Compare commits

..

5 Commits

Author SHA1 Message Date
655a7d9473 update workflow to update web app bundle correctly
Some checks failed
Go / build-and-release (push) Has been cancelled
2025-11-25 15:41:01 +00:00
a03af8e05a self-detection elides self url at startup, handles multiple DNS pointers
Some checks failed
Go / build-and-release (push) Has been cancelled
2025-11-25 13:26:37 +00:00
1522bfab2e add relay self-connection via authed pubkey
Some checks failed
Go / build-and-release (push) Has been cancelled
2025-11-25 12:54:37 +00:00
a457d22baf update go.yml workflow 2025-11-25 12:12:08 +00:00
2b8f359a83 fix workflow to fetch libsecp256k1.so
Some checks failed
Go / build-and-release (push) Has been cancelled
2025-11-25 11:04:04 +00:00
10 changed files with 315 additions and 56 deletions

View File

@@ -131,7 +131,8 @@
"Bash(systemctl:*)",
"Bash(systemctl show:*)",
"Bash(ssh relay1:*)",
"Bash(done)"
"Bash(done)",
"Bash(go run:*)"
],
"deny": [],
"ask": []

View File

@@ -43,6 +43,27 @@ jobs:
export PATH=/usr/local/go/bin:$PATH
go version
- name: Set up Bun
run: |
echo "Installing Bun..."
curl -fsSL https://bun.sh/install | bash
export BUN_INSTALL="$HOME/.bun"
export PATH="$BUN_INSTALL/bin:$PATH"
bun --version
- name: Build Web UI
run: |
export BUN_INSTALL="$HOME/.bun"
export PATH="$BUN_INSTALL/bin:$PATH"
cd ${GITHUB_WORKSPACE}/app/web
echo "Installing frontend dependencies..."
bun install
echo "Building web app..."
bun run build
echo "Verifying dist directory was created..."
ls -lah dist/
echo "Web UI build complete"
- name: Build (Pure Go + purego)
run: |
export PATH=/usr/local/go/bin:$PATH
@@ -129,3 +150,4 @@ jobs:
--asset release-binaries/libsecp256k1-linux-amd64.so \
--asset release-binaries/SHA256SUMS.txt \
|| echo "Release may already exist, updating..."

5
.idea/.gitignore generated vendored
View File

@@ -1,5 +0,0 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/

8
.idea/modules.xml generated
View File

@@ -1,8 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/next.orly.dev.iml" filepath="$PROJECT_DIR$/.idea/next.orly.dev.iml" />
</modules>
</component>
</project>

6
.idea/vcs.xml generated
View File

@@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="VcsDirectoryMappings">
<mapping directory="" vcs="Git" />
</component>
</project>

View File

@@ -1,6 +1,7 @@
package app
import (
"bytes"
"context"
"net/http"
"strings"
@@ -38,6 +39,7 @@ type Listener struct {
messageQueue chan messageRequest // Buffered channel for message processing
processingDone chan struct{} // Closed when message processor exits
handlerWg sync.WaitGroup // Tracks spawned message handler goroutines
authProcessing sync.RWMutex // Ensures AUTH completes before other messages check authentication
// Flow control counters (atomic for concurrent access)
droppedMessages atomic.Int64 // Messages dropped due to full queue
// Diagnostics: per-connection counters
@@ -218,14 +220,32 @@ func (l *Listener) messageProcessor() {
return
}
// Process the message in a separate goroutine to avoid blocking
// This allows multiple messages to be processed concurrently (like khatru does)
// Track the goroutine so we can wait for it during cleanup
l.handlerWg.Add(1)
go func(data []byte, remote string) {
defer l.handlerWg.Done()
l.HandleMessage(data, remote)
}(req.data, req.remote)
// Lock immediately to ensure AUTH is processed before subsequent messages
// are dequeued. This prevents race conditions where EVENT checks authentication
// before AUTH completes.
l.authProcessing.Lock()
// Check if this is an AUTH message by looking for the ["AUTH" prefix
isAuthMessage := len(req.data) > 7 && bytes.HasPrefix(req.data, []byte(`["AUTH"`))
if isAuthMessage {
// Process AUTH message synchronously while holding lock
// This blocks the messageProcessor from dequeuing the next message
// until authentication is complete and authedPubkey is set
log.D.F("ws->%s processing AUTH synchronously with lock", req.remote)
l.HandleMessage(req.data, req.remote)
// Unlock after AUTH completes so subsequent messages see updated authedPubkey
l.authProcessing.Unlock()
} else {
// Not AUTH - unlock immediately and process concurrently
// The next message can now be dequeued (possibly another non-AUTH to process concurrently)
l.authProcessing.Unlock()
l.handlerWg.Add(1)
go func(data []byte, remote string) {
defer l.handlerWg.Done()
l.HandleMessage(data, remote)
}(req.data, req.remote)
}
}
}
}

View File

@@ -7,16 +7,18 @@ import (
"sync"
"time"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database"
"git.mleku.dev/mleku/nostr/crypto/keys"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/timestamp"
"next.orly.dev/pkg/interfaces/publisher"
"git.mleku.dev/mleku/nostr/ws"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/interfaces/publisher"
dsync "next.orly.dev/pkg/sync"
)
const (
@@ -53,8 +55,10 @@ type Spider struct {
mode string
// Configuration
adminRelays []string
followList [][]byte
adminRelays []string
followList [][]byte
relayIdentityPubkey string // Our relay's identity pubkey (hex)
selfURLs map[string]bool // URLs discovered to be ourselves (for fast lookups)
// State management
mu sync.RWMutex
@@ -129,14 +133,24 @@ func New(ctx context.Context, db *database.D, pub publisher.I, mode string) (s *
}
ctx, cancel := context.WithCancel(ctx)
// Get relay identity pubkey for self-detection
var relayPubkey string
if skb, err := db.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
pk, _ := keys.SecretBytesToPubKeyHex(skb)
relayPubkey = pk
}
s = &Spider{
ctx: ctx,
cancel: cancel,
db: db,
pub: pub,
mode: mode,
connections: make(map[string]*RelayConnection),
followListUpdated: make(chan struct{}, 1),
ctx: ctx,
cancel: cancel,
db: db,
pub: pub,
mode: mode,
relayIdentityPubkey: relayPubkey,
selfURLs: make(map[string]bool),
connections: make(map[string]*RelayConnection),
followListUpdated: make(chan struct{}, 1),
}
return
@@ -254,9 +268,15 @@ func (s *Spider) updateConnections() {
return
}
// Update connections for current admin relays
// Update connections for current admin relays (filtering out self)
currentRelays := make(map[string]bool)
for _, url := range adminRelays {
// Check if this relay URL is ourselves
if s.isSelfRelay(url) {
log.D.F("spider: skipping self-relay: %s", url)
continue
}
currentRelays[url] = true
if conn, exists := s.connections[url]; exists {
@@ -804,3 +824,42 @@ func (rc *RelayConnection) close() {
rc.cancel()
}
// isSelfRelay checks if a relay URL is actually ourselves by comparing NIP-11 pubkeys
func (s *Spider) isSelfRelay(relayURL string) bool {
// If we don't have a relay identity pubkey, can't compare
if s.relayIdentityPubkey == "" {
return false
}
s.mu.RLock()
// Fast path: check if we already know this URL is ours
if s.selfURLs[relayURL] {
s.mu.RUnlock()
log.D.F("spider: skipping self-relay (known URL): %s", relayURL)
return true
}
s.mu.RUnlock()
// Slow path: check via NIP-11 pubkey
nip11Cache := dsync.NewNIP11Cache(30 * time.Minute)
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
peerPubkey, err := nip11Cache.GetPubkey(ctx, relayURL)
if err != nil {
log.D.F("spider: couldn't fetch NIP-11 for %s: %v", relayURL, err)
return false
}
if peerPubkey == s.relayIdentityPubkey {
log.I.F("spider: discovered self-relay: %s (pubkey: %s)", relayURL, s.relayIdentityPubkey)
// Cache this URL as ours for future fast lookups
s.mu.Lock()
s.selfURLs[relayURL] = true
s.mu.Unlock()
return true
}
return false
}

View File

@@ -13,6 +13,7 @@ import (
"lol.mleku.dev/log"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes/types"
"git.mleku.dev/mleku/nostr/crypto/keys"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
@@ -23,6 +24,8 @@ type ClusterManager struct {
cancel context.CancelFunc
db *database.D
adminNpubs []string
relayIdentityPubkey string // Our relay's identity pubkey (hex)
selfURLs map[string]bool // URLs discovered to be ourselves (for fast lookups)
members map[string]*ClusterMember // keyed by relay URL
membersMux sync.RWMutex
pollTicker *time.Ticker
@@ -30,6 +33,7 @@ type ClusterManager struct {
httpClient *http.Client
propagatePrivilegedEvents bool
publisher interface{ Deliver(*event.E) }
nip11Cache *NIP11Cache
}
type ClusterMember struct {
@@ -61,11 +65,21 @@ type EventInfo struct {
func NewClusterManager(ctx context.Context, db *database.D, adminNpubs []string, propagatePrivilegedEvents bool, publisher interface{ Deliver(*event.E) }) *ClusterManager {
ctx, cancel := context.WithCancel(ctx)
// Get our relay identity pubkey
var relayPubkey string
if skb, err := db.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
if pk, err := keys.SecretBytesToPubKeyHex(skb); err == nil {
relayPubkey = pk
}
}
cm := &ClusterManager{
ctx: ctx,
cancel: cancel,
db: db,
adminNpubs: adminNpubs,
relayIdentityPubkey: relayPubkey,
selfURLs: make(map[string]bool),
members: make(map[string]*ClusterMember),
pollDone: make(chan struct{}),
propagatePrivilegedEvents: propagatePrivilegedEvents,
@@ -73,6 +87,7 @@ func NewClusterManager(ctx context.Context, db *database.D, adminNpubs []string,
httpClient: &http.Client{
Timeout: 30 * time.Second,
},
nip11Cache: NewNIP11Cache(30 * time.Minute),
}
return cm
@@ -252,20 +267,44 @@ func (cm *ClusterManager) UpdateMembership(relayURLs []string) {
}
}
// Add new members
// Add new members (filter out self once at this point)
for _, url := range relayURLs {
if _, exists := cm.members[url]; !exists {
// For simplicity, assume HTTP and WebSocket URLs are the same
// In practice, you'd need to parse these properly
member := &ClusterMember{
HTTPURL: url,
WebSocketURL: url, // TODO: Convert to WebSocket URL
LastSerial: 0,
Status: "unknown",
}
cm.members[url] = member
log.I.F("added cluster member: %s", url)
// Skip if already exists
if _, exists := cm.members[url]; exists {
continue
}
// Fast path: check if we already know this URL is ours
if cm.selfURLs[url] {
log.I.F("removed self from cluster members (known URL): %s", url)
continue
}
// Slow path: check via NIP-11 pubkey
if cm.relayIdentityPubkey != "" {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
peerPubkey, err := cm.nip11Cache.GetPubkey(ctx, url)
cancel()
if err != nil {
log.D.F("couldn't fetch NIP-11 for %s, adding to cluster anyway: %v", url, err)
} else if peerPubkey == cm.relayIdentityPubkey {
log.I.F("removed self from cluster members (discovered): %s (pubkey: %s)", url, cm.relayIdentityPubkey)
// Cache this URL as ours for future fast lookups
cm.selfURLs[url] = true
continue
}
}
// Add member
member := &ClusterMember{
HTTPURL: url,
WebSocketURL: url, // TODO: Convert to WebSocket URL
LastSerial: 0,
Status: "unknown",
}
cm.members[url] = member
log.I.F("added cluster member: %s", url)
}
}
@@ -313,6 +352,40 @@ func (cm *ClusterManager) HandleLatestSerial(w http.ResponseWriter, r *http.Requ
return
}
// Check if request is from ourselves by examining the Referer or Origin header
// Note: Self-members are already filtered out, but this catches edge cases
origin := r.Header.Get("Origin")
referer := r.Header.Get("Referer")
if cm.relayIdentityPubkey != "" && (origin != "" || referer != "") {
checkURL := origin
if checkURL == "" {
checkURL = referer
}
// Fast path: check known self-URLs
if cm.selfURLs[checkURL] {
log.D.F("rejecting cluster latest request from self (known URL): %s", checkURL)
http.Error(w, "Cannot sync with self", http.StatusBadRequest)
return
}
// Slow path: verify via NIP-11
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
peerPubkey, err := cm.nip11Cache.GetPubkey(ctx, checkURL)
cancel()
if err == nil && peerPubkey == cm.relayIdentityPubkey {
log.D.F("rejecting cluster latest request from self (discovered): %s", checkURL)
// Cache for future fast lookups
cm.membersMux.Lock()
cm.selfURLs[checkURL] = true
cm.membersMux.Unlock()
http.Error(w, "Cannot sync with self", http.StatusBadRequest)
return
}
}
// Get the latest serial from database by querying for the highest serial
latestSerial, err := cm.getLatestSerialFromDB()
if err != nil {
@@ -336,6 +409,40 @@ func (cm *ClusterManager) HandleEventsRange(w http.ResponseWriter, r *http.Reque
return
}
// Check if request is from ourselves by examining the Referer or Origin header
// Note: Self-members are already filtered out, but this catches edge cases
origin := r.Header.Get("Origin")
referer := r.Header.Get("Referer")
if cm.relayIdentityPubkey != "" && (origin != "" || referer != "") {
checkURL := origin
if checkURL == "" {
checkURL = referer
}
// Fast path: check known self-URLs
if cm.selfURLs[checkURL] {
log.D.F("rejecting cluster events request from self (known URL): %s", checkURL)
http.Error(w, "Cannot sync with self", http.StatusBadRequest)
return
}
// Slow path: verify via NIP-11
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
peerPubkey, err := cm.nip11Cache.GetPubkey(ctx, checkURL)
cancel()
if err == nil && peerPubkey == cm.relayIdentityPubkey {
log.D.F("rejecting cluster events request from self (discovered): %s", checkURL)
// Cache for future fast lookups
cm.membersMux.Lock()
cm.selfURLs[checkURL] = true
cm.membersMux.Unlock()
http.Error(w, "Cannot sync with self", http.StatusBadRequest)
return
}
}
// Parse query parameters
fromStr := r.URL.Query().Get("from")
toStr := r.URL.Query().Get("to")

View File

@@ -26,6 +26,7 @@ type Manager struct {
nodeID string
relayURL string
peers []string
selfURLs map[string]bool // URLs discovered to be ourselves (for fast lookups)
currentSerial uint64
peerSerials map[string]uint64 // peer URL -> latest serial seen
relayGroupMgr *RelayGroupManager
@@ -72,6 +73,7 @@ func NewManager(ctx context.Context, db *database.D, nodeID, relayURL string, pe
nodeID: nodeID,
relayURL: relayURL,
peers: peers,
selfURLs: make(map[string]bool),
currentSerial: 0,
peerSerials: make(map[string]uint64),
relayGroupMgr: relayGroupMgr,
@@ -79,6 +81,44 @@ func NewManager(ctx context.Context, db *database.D, nodeID, relayURL string, pe
policyManager: policyManager,
}
// Add our configured relay URL to self-URLs cache if provided
if m.relayURL != "" {
m.selfURLs[m.relayURL] = true
}
// Remove self from peer list once at startup if we have a nodeID
if m.nodeID != "" {
filteredPeers := make([]string, 0, len(m.peers))
for _, peerURL := range m.peers {
// Fast path: check if we already know this URL is ours
if m.selfURLs[peerURL] {
log.I.F("removed self from sync peer list (known URL): %s", peerURL)
continue
}
// Slow path: check via NIP-11 pubkey
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
peerPubkey, err := m.nip11Cache.GetPubkey(ctx, peerURL)
cancel()
if err != nil {
log.D.F("couldn't fetch NIP-11 for %s, keeping in peer list: %v", peerURL, err)
filteredPeers = append(filteredPeers, peerURL)
continue
}
if peerPubkey == m.nodeID {
log.I.F("removed self from sync peer list (discovered): %s (pubkey: %s)", peerURL, m.nodeID)
// Cache this URL as ours for future fast lookups
m.selfURLs[peerURL] = true
continue
}
filteredPeers = append(filteredPeers, peerURL)
}
m.peers = filteredPeers
}
// Start sync routine
go m.syncRoutine()
@@ -173,6 +213,7 @@ func (m *Manager) syncRoutine() {
// syncWithPeersSequentially syncs with all configured peers one at a time
func (m *Manager) syncWithPeersSequentially() {
for _, peerURL := range m.peers {
// Self-peers are already filtered out during initialization/update
m.syncWithPeer(peerURL)
// Small delay between peers to avoid overwhelming
time.Sleep(100 * time.Millisecond)
@@ -390,6 +431,20 @@ func (m *Manager) HandleCurrentRequest(w http.ResponseWriter, r *http.Request) {
return
}
// Reject requests from ourselves (same nodeID)
if req.NodeID != "" && req.NodeID == m.nodeID {
log.D.F("rejecting sync current request from self (nodeID: %s)", req.NodeID)
// Cache the requesting relay URL as ours for future fast lookups
if req.RelayURL != "" {
m.mutex.Lock()
m.selfURLs[req.RelayURL] = true
m.mutex.Unlock()
log.D.F("cached self-URL from inbound request: %s", req.RelayURL)
}
http.Error(w, "Cannot sync with self", http.StatusBadRequest)
return
}
resp := CurrentResponse{
NodeID: m.nodeID,
RelayURL: m.relayURL,
@@ -413,6 +468,20 @@ func (m *Manager) HandleEventIDsRequest(w http.ResponseWriter, r *http.Request)
return
}
// Reject requests from ourselves (same nodeID)
if req.NodeID != "" && req.NodeID == m.nodeID {
log.D.F("rejecting sync event-ids request from self (nodeID: %s)", req.NodeID)
// Cache the requesting relay URL as ours for future fast lookups
if req.RelayURL != "" {
m.mutex.Lock()
m.selfURLs[req.RelayURL] = true
m.mutex.Unlock()
log.D.F("cached self-URL from inbound request: %s", req.RelayURL)
}
http.Error(w, "Cannot sync with self", http.StatusBadRequest)
return
}
// Get events with IDs in the requested range
eventMap, err := m.getEventsWithIDs(req.From, req.To)
if err != nil {

View File

@@ -1 +1 @@
v0.29.16
v0.29.20