add directory spider
This commit is contained in:
@@ -68,6 +68,11 @@ type C struct {
|
||||
// Spider settings
|
||||
SpiderMode string `env:"ORLY_SPIDER_MODE" default:"none" usage:"spider mode for syncing events: none, follows"`
|
||||
|
||||
// Directory Spider settings
|
||||
DirectorySpiderEnabled bool `env:"ORLY_DIRECTORY_SPIDER" default:"false" usage:"enable directory spider for metadata sync (kinds 0, 3, 10000, 10002)"`
|
||||
DirectorySpiderInterval time.Duration `env:"ORLY_DIRECTORY_SPIDER_INTERVAL" default:"24h" usage:"how often to run directory spider"`
|
||||
DirectorySpiderMaxHops int `env:"ORLY_DIRECTORY_SPIDER_HOPS" default:"3" usage:"maximum hops for relay discovery from seed users"`
|
||||
|
||||
PolicyEnabled bool `env:"ORLY_POLICY_ENABLED" default:"false" usage:"enable policy-based event processing (configuration found in $HOME/.config/ORLY/policy.json)"`
|
||||
|
||||
// NIP-43 Relay Access Metadata and Requests
|
||||
|
||||
44
app/main.go
44
app/main.go
@@ -141,6 +141,44 @@ func Run(
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize directory spider if enabled (only for Badger backend)
|
||||
if badgerDB, ok := db.(*database.D); ok && cfg.DirectorySpiderEnabled {
|
||||
if l.directorySpider, err = spider.NewDirectorySpider(
|
||||
ctx,
|
||||
badgerDB,
|
||||
l.publishers,
|
||||
cfg.DirectorySpiderInterval,
|
||||
cfg.DirectorySpiderMaxHops,
|
||||
); chk.E(err) {
|
||||
log.E.F("failed to create directory spider: %v", err)
|
||||
} else {
|
||||
// Set up callback to get seed pubkeys (whitelisted users)
|
||||
l.directorySpider.SetSeedCallback(func() [][]byte {
|
||||
var pubkeys [][]byte
|
||||
// Get followed pubkeys from follows ACL if available
|
||||
for _, aclInstance := range acl.Registry.ACL {
|
||||
if aclInstance.Type() == "follows" {
|
||||
if follows, ok := aclInstance.(*acl.Follows); ok {
|
||||
pubkeys = append(pubkeys, follows.GetFollowedPubkeys()...)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fall back to admin keys if no follows ACL
|
||||
if len(pubkeys) == 0 {
|
||||
pubkeys = adminKeys
|
||||
}
|
||||
return pubkeys
|
||||
})
|
||||
|
||||
if err = l.directorySpider.Start(); chk.E(err) {
|
||||
log.E.F("failed to start directory spider: %v", err)
|
||||
} else {
|
||||
log.I.F("directory spider started (interval: %v, max hops: %d)",
|
||||
cfg.DirectorySpiderInterval, cfg.DirectorySpiderMaxHops)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize relay group manager (only for Badger backend)
|
||||
if badgerDB, ok := db.(*database.D); ok {
|
||||
l.relayGroupMgr = dsync.NewRelayGroupManager(badgerDB, cfg.RelayGroupAdmins)
|
||||
@@ -360,6 +398,12 @@ func Run(
|
||||
log.I.F("spider manager stopped")
|
||||
}
|
||||
|
||||
// Stop directory spider if running
|
||||
if l.directorySpider != nil {
|
||||
l.directorySpider.Stop()
|
||||
log.I.F("directory spider stopped")
|
||||
}
|
||||
|
||||
// Create shutdown context with timeout
|
||||
shutdownCtx, cancelShutdown := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancelShutdown()
|
||||
|
||||
@@ -48,17 +48,22 @@ type Server struct {
|
||||
challengeMutex sync.RWMutex
|
||||
challenges map[string][]byte
|
||||
|
||||
paymentProcessor *PaymentProcessor
|
||||
sprocketManager *SprocketManager
|
||||
policyManager *policy.P
|
||||
spiderManager *spider.Spider
|
||||
syncManager *dsync.Manager
|
||||
relayGroupMgr *dsync.RelayGroupManager
|
||||
clusterManager *dsync.ClusterManager
|
||||
blossomServer *blossom.Server
|
||||
InviteManager *nip43.InviteManager
|
||||
cfg *config.C
|
||||
db database.Database // Changed from *database.D to interface
|
||||
// Message processing pause mutex for policy/follow list updates
|
||||
// Use RLock() for normal message processing, Lock() for updates
|
||||
messagePauseMutex sync.RWMutex
|
||||
|
||||
paymentProcessor *PaymentProcessor
|
||||
sprocketManager *SprocketManager
|
||||
policyManager *policy.P
|
||||
spiderManager *spider.Spider
|
||||
directorySpider *spider.DirectorySpider
|
||||
syncManager *dsync.Manager
|
||||
relayGroupMgr *dsync.RelayGroupManager
|
||||
clusterManager *dsync.ClusterManager
|
||||
blossomServer *blossom.Server
|
||||
InviteManager *nip43.InviteManager
|
||||
cfg *config.C
|
||||
db database.Database // Changed from *database.D to interface
|
||||
}
|
||||
|
||||
// isIPBlacklisted checks if an IP address is blacklisted using the managed ACL system
|
||||
@@ -1135,3 +1140,32 @@ func (s *Server) updatePeerAdminACL(peerPubkey []byte) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Message Processing Pause/Resume for Policy and Follow List Updates
|
||||
// =============================================================================
|
||||
|
||||
// PauseMessageProcessing acquires an exclusive lock to pause all message processing.
|
||||
// This should be called before updating policy configuration or follow lists.
|
||||
// Call ResumeMessageProcessing to release the lock after updates are complete.
|
||||
func (s *Server) PauseMessageProcessing() {
|
||||
s.messagePauseMutex.Lock()
|
||||
}
|
||||
|
||||
// ResumeMessageProcessing releases the exclusive lock to resume message processing.
|
||||
// This should be called after policy configuration or follow list updates are complete.
|
||||
func (s *Server) ResumeMessageProcessing() {
|
||||
s.messagePauseMutex.Unlock()
|
||||
}
|
||||
|
||||
// AcquireMessageProcessingLock acquires a read lock for normal message processing.
|
||||
// This allows concurrent message processing while blocking during policy updates.
|
||||
// Call ReleaseMessageProcessingLock when message processing is complete.
|
||||
func (s *Server) AcquireMessageProcessingLock() {
|
||||
s.messagePauseMutex.RLock()
|
||||
}
|
||||
|
||||
// ReleaseMessageProcessingLock releases the read lock after message processing.
|
||||
func (s *Server) ReleaseMessageProcessingLock() {
|
||||
s.messagePauseMutex.RUnlock()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user