Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
91e38edd2c | ||
|
|
cb50a9c5c4 | ||
|
|
c5be98bcaa | ||
|
|
417866ebf4 | ||
|
|
0e87337723 | ||
|
|
b10851c209 | ||
|
|
e68916ca5d | ||
|
|
0e30f7a697 | ||
|
|
a0af5bb45e | ||
|
|
9da1784b1b | ||
|
|
205f23fc0c |
@@ -22,6 +22,10 @@ func initializeBlossomServer(
|
|||||||
MaxBlobSize: 100 * 1024 * 1024, // 100MB default
|
MaxBlobSize: 100 * 1024 * 1024, // 100MB default
|
||||||
AllowedMimeTypes: nil, // Allow all MIME types by default
|
AllowedMimeTypes: nil, // Allow all MIME types by default
|
||||||
RequireAuth: cfg.AuthRequired || cfg.AuthToWrite,
|
RequireAuth: cfg.AuthRequired || cfg.AuthToWrite,
|
||||||
|
// Rate limiting for non-followed users
|
||||||
|
RateLimitEnabled: cfg.BlossomRateLimitEnabled,
|
||||||
|
DailyLimitMB: cfg.BlossomDailyLimitMB,
|
||||||
|
BurstLimitMB: cfg.BlossomBurstLimitMB,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create blossom server with relay's ACL registry
|
// Create blossom server with relay's ACL registry
|
||||||
@@ -31,7 +35,12 @@ func initializeBlossomServer(
|
|||||||
// We'll need to modify the handler to inject the baseURL per request
|
// We'll need to modify the handler to inject the baseURL per request
|
||||||
// For now, we'll use a middleware approach
|
// For now, we'll use a middleware approach
|
||||||
|
|
||||||
|
if cfg.BlossomRateLimitEnabled {
|
||||||
|
log.I.F("blossom server initialized with ACL mode: %s, rate limit: %dMB/day (burst: %dMB)",
|
||||||
|
cfg.ACLMode, cfg.BlossomDailyLimitMB, cfg.BlossomBurstLimitMB)
|
||||||
|
} else {
|
||||||
log.I.F("blossom server initialized with ACL mode: %s", cfg.ACLMode)
|
log.I.F("blossom server initialized with ACL mode: %s", cfg.ACLMode)
|
||||||
|
}
|
||||||
return bs, nil
|
return bs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -69,13 +69,18 @@ type C struct {
|
|||||||
|
|
||||||
// Progressive throttle for follows ACL mode - allows non-followed users to write with increasing delay
|
// Progressive throttle for follows ACL mode - allows non-followed users to write with increasing delay
|
||||||
FollowsThrottleEnabled bool `env:"ORLY_FOLLOWS_THROTTLE" default:"false" usage:"enable progressive delay for non-followed users in follows ACL mode"`
|
FollowsThrottleEnabled bool `env:"ORLY_FOLLOWS_THROTTLE" default:"false" usage:"enable progressive delay for non-followed users in follows ACL mode"`
|
||||||
FollowsThrottlePerEvent time.Duration `env:"ORLY_FOLLOWS_THROTTLE_INCREMENT" default:"200ms" usage:"delay added per event for non-followed users"`
|
FollowsThrottlePerEvent time.Duration `env:"ORLY_FOLLOWS_THROTTLE_INCREMENT" default:"25ms" usage:"delay added per event for non-followed users"`
|
||||||
FollowsThrottleMaxDelay time.Duration `env:"ORLY_FOLLOWS_THROTTLE_MAX" default:"60s" usage:"maximum throttle delay cap"`
|
FollowsThrottleMaxDelay time.Duration `env:"ORLY_FOLLOWS_THROTTLE_MAX" default:"60s" usage:"maximum throttle delay cap"`
|
||||||
|
|
||||||
// Blossom blob storage service settings
|
// Blossom blob storage service settings
|
||||||
BlossomEnabled bool `env:"ORLY_BLOSSOM_ENABLED" default:"true" usage:"enable Blossom blob storage server (only works with Badger backend)"`
|
BlossomEnabled bool `env:"ORLY_BLOSSOM_ENABLED" default:"true" usage:"enable Blossom blob storage server (only works with Badger backend)"`
|
||||||
BlossomServiceLevels string `env:"ORLY_BLOSSOM_SERVICE_LEVELS" usage:"comma-separated list of service levels in format: name:storage_mb_per_sat_per_month (e.g., basic:1,premium:10)"`
|
BlossomServiceLevels string `env:"ORLY_BLOSSOM_SERVICE_LEVELS" usage:"comma-separated list of service levels in format: name:storage_mb_per_sat_per_month (e.g., basic:1,premium:10)"`
|
||||||
|
|
||||||
|
// Blossom upload rate limiting (for non-followed users)
|
||||||
|
BlossomRateLimitEnabled bool `env:"ORLY_BLOSSOM_RATE_LIMIT" default:"false" usage:"enable upload rate limiting for non-followed users"`
|
||||||
|
BlossomDailyLimitMB int64 `env:"ORLY_BLOSSOM_DAILY_LIMIT_MB" default:"10" usage:"daily upload limit in MB for non-followed users (EMA averaged)"`
|
||||||
|
BlossomBurstLimitMB int64 `env:"ORLY_BLOSSOM_BURST_LIMIT_MB" default:"50" usage:"max burst upload in MB (bucket cap)"`
|
||||||
|
|
||||||
// Web UI and dev mode settings
|
// Web UI and dev mode settings
|
||||||
WebDisableEmbedded bool `env:"ORLY_WEB_DISABLE" default:"false" usage:"disable serving the embedded web UI; useful for hot-reload during development"`
|
WebDisableEmbedded bool `env:"ORLY_WEB_DISABLE" default:"false" usage:"disable serving the embedded web UI; useful for hot-reload during development"`
|
||||||
WebDevProxyURL string `env:"ORLY_WEB_DEV_PROXY_URL" usage:"when ORLY_WEB_DISABLE is true, reverse-proxy non-API paths to this dev server URL (e.g. http://localhost:5173)"`
|
WebDevProxyURL string `env:"ORLY_WEB_DEV_PROXY_URL" usage:"when ORLY_WEB_DISABLE is true, reverse-proxy non-API paths to this dev server URL (e.g. http://localhost:5173)"`
|
||||||
|
|||||||
@@ -124,6 +124,17 @@ func (s *Server) handleCashuKeysets(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
// handleCashuInfo handles GET /cashu/info - returns mint information.
|
// handleCashuInfo handles GET /cashu/info - returns mint information.
|
||||||
func (s *Server) handleCashuInfo(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleCashuInfo(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// CORS headers for browser-based CAT support detection
|
||||||
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
w.Header().Set("Access-Control-Allow-Methods", "GET, OPTIONS")
|
||||||
|
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Accept")
|
||||||
|
|
||||||
|
// Handle preflight
|
||||||
|
if r.Method == http.MethodOptions {
|
||||||
|
w.WriteHeader(http.StatusNoContent)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if s.CashuIssuer == nil {
|
if s.CashuIssuer == nil {
|
||||||
http.Error(w, "Cashu tokens not enabled", http.StatusNotImplemented)
|
http.Error(w, "Cashu tokens not enabled", http.StatusNotImplemented)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||||
log.D.F("HandleEvent: START handling event: %s", msg)
|
log.I.F("HandleEvent: START handling event: %s", string(msg[:min(200, len(msg))]))
|
||||||
|
|
||||||
// 1. Raw JSON validation (before unmarshal) - use validation service
|
// 1. Raw JSON validation (before unmarshal) - use validation service
|
||||||
if result := l.eventValidator.ValidateRawJSON(msg); !result.Valid {
|
if result := l.eventValidator.ValidateRawJSON(msg); !result.Valid {
|
||||||
@@ -231,6 +231,11 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
|
|
||||||
// Authorization check (policy + ACL) - use authorization service
|
// Authorization check (policy + ACL) - use authorization service
|
||||||
decision := l.eventAuthorizer.Authorize(env.E, l.authedPubkey.Load(), l.remote, env.E.Kind)
|
decision := l.eventAuthorizer.Authorize(env.E, l.authedPubkey.Load(), l.remote, env.E.Kind)
|
||||||
|
// Debug: log ephemeral event authorization
|
||||||
|
if env.E.Kind >= 20000 && env.E.Kind < 30000 {
|
||||||
|
log.I.F("ephemeral auth check: kind %d, allowed=%v, reason=%s",
|
||||||
|
env.E.Kind, decision.Allowed, decision.DenyReason)
|
||||||
|
}
|
||||||
if !decision.Allowed {
|
if !decision.Allowed {
|
||||||
log.D.F("HandleEvent: authorization denied: %s (requireAuth=%v)", decision.DenyReason, decision.RequireAuth)
|
log.D.F("HandleEvent: authorization denied: %s (requireAuth=%v)", decision.DenyReason, decision.RequireAuth)
|
||||||
if decision.RequireAuth {
|
if decision.RequireAuth {
|
||||||
@@ -256,6 +261,8 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
log.I.F("HandleEvent: authorized with access level %s", decision.AccessLevel)
|
log.I.F("HandleEvent: authorized with access level %s", decision.AccessLevel)
|
||||||
|
|
||||||
// Progressive throttle for follows ACL mode (delays non-followed users)
|
// Progressive throttle for follows ACL mode (delays non-followed users)
|
||||||
|
// Skip throttle if a Cashu Access Token is present (authenticated via CAT)
|
||||||
|
if l.cashuToken == nil {
|
||||||
if delay := l.getFollowsThrottleDelay(env.E); delay > 0 {
|
if delay := l.getFollowsThrottleDelay(env.E); delay > 0 {
|
||||||
log.D.F("HandleEvent: applying progressive throttle delay of %v for %0x from %s",
|
log.D.F("HandleEvent: applying progressive throttle delay of %v for %0x from %s",
|
||||||
delay, env.E.Pubkey, l.remote)
|
delay, env.E.Pubkey, l.remote)
|
||||||
@@ -266,6 +273,7 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
// Delay completed, continue processing
|
// Delay completed, continue processing
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Route special event kinds (ephemeral, etc.) - use routing service
|
// Route special event kinds (ephemeral, etc.) - use routing service
|
||||||
if routeResult := l.eventRouter.Route(env.E, l.authedPubkey.Load()); routeResult.Action != routing.Continue {
|
if routeResult := l.eventRouter.Route(env.E, l.authedPubkey.Load()); routeResult.Action != routing.Continue {
|
||||||
|
|||||||
@@ -143,6 +143,12 @@ func (s *Server) handleCuratingNIP86Method(request NIP86Request, curatingACL *ac
|
|||||||
return s.handleUnblockCuratingIP(request.Params, dbACL)
|
return s.handleUnblockCuratingIP(request.Params, dbACL)
|
||||||
case "isconfigured":
|
case "isconfigured":
|
||||||
return s.handleIsConfigured(dbACL)
|
return s.handleIsConfigured(dbACL)
|
||||||
|
case "scanpubkeys":
|
||||||
|
return s.handleScanPubkeys(dbACL)
|
||||||
|
case "geteventsforpubkey":
|
||||||
|
return s.handleGetEventsForPubkey(request.Params, dbACL)
|
||||||
|
case "deleteeventsforpubkey":
|
||||||
|
return s.handleDeleteEventsForPubkey(request.Params, dbACL)
|
||||||
default:
|
default:
|
||||||
return NIP86Response{Error: "Unknown method: " + request.Method}
|
return NIP86Response{Error: "Unknown method: " + request.Method}
|
||||||
}
|
}
|
||||||
@@ -167,6 +173,9 @@ func (s *Server) handleCuratingSupportedMethods() NIP86Response {
|
|||||||
"listblockedips",
|
"listblockedips",
|
||||||
"unblockip",
|
"unblockip",
|
||||||
"isconfigured",
|
"isconfigured",
|
||||||
|
"scanpubkeys",
|
||||||
|
"geteventsforpubkey",
|
||||||
|
"deleteeventsforpubkey",
|
||||||
}
|
}
|
||||||
return NIP86Response{Result: methods}
|
return NIP86Response{Result: methods}
|
||||||
}
|
}
|
||||||
@@ -444,8 +453,11 @@ func (s *Server) handleGetCuratingConfig(dbACL *database.CuratingACL) NIP86Respo
|
|||||||
"first_ban_hours": config.FirstBanHours,
|
"first_ban_hours": config.FirstBanHours,
|
||||||
"second_ban_hours": config.SecondBanHours,
|
"second_ban_hours": config.SecondBanHours,
|
||||||
"allowed_kinds": config.AllowedKinds,
|
"allowed_kinds": config.AllowedKinds,
|
||||||
|
"custom_kinds": config.AllowedKinds, // Alias for frontend compatibility
|
||||||
"allowed_ranges": config.AllowedRanges,
|
"allowed_ranges": config.AllowedRanges,
|
||||||
|
"kind_ranges": config.AllowedRanges, // Alias for frontend compatibility
|
||||||
"kind_categories": config.KindCategories,
|
"kind_categories": config.KindCategories,
|
||||||
|
"categories": config.KindCategories, // Alias for frontend compatibility
|
||||||
"config_event_id": config.ConfigEventID,
|
"config_event_id": config.ConfigEventID,
|
||||||
"config_pubkey": config.ConfigPubkey,
|
"config_pubkey": config.ConfigPubkey,
|
||||||
"configured_at": config.ConfiguredAt,
|
"configured_at": config.ConfiguredAt,
|
||||||
@@ -531,11 +543,23 @@ func GetKindCategoriesInfo() []map[string]interface{} {
|
|||||||
"kinds": []int{1063, 20, 21, 22},
|
"kinds": []int{1063, 20, 21, 22},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"id": "marketplace",
|
"id": "marketplace_nip15",
|
||||||
"name": "Marketplace",
|
"name": "Marketplace (NIP-15)",
|
||||||
"description": "Product listings, stalls, auctions",
|
"description": "Legacy NIP-15 stalls and products",
|
||||||
"kinds": []int{30017, 30018, 30019, 30020, 1021, 1022},
|
"kinds": []int{30017, 30018, 30019, 30020, 1021, 1022},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"id": "marketplace_nip99",
|
||||||
|
"name": "Marketplace (NIP-99/Gamma)",
|
||||||
|
"description": "NIP-99 classified listings, collections, shipping, reviews (Plebeian Market)",
|
||||||
|
"kinds": []int{30402, 30403, 30405, 30406, 31555},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "order_communication",
|
||||||
|
"name": "Order Communication",
|
||||||
|
"description": "Gamma Markets order messages and payment receipts",
|
||||||
|
"kinds": []int{16, 17},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"id": "groups_nip29",
|
"id": "groups_nip29",
|
||||||
"name": "Group Messaging (NIP-29)",
|
"name": "Group Messaging (NIP-29)",
|
||||||
@@ -591,3 +615,122 @@ func parseRange(s string, parts []int) (int, error) {
|
|||||||
}
|
}
|
||||||
return 0, nil
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleScanPubkeys scans the database for all pubkeys and populates event counts
|
||||||
|
// This is used to retroactively populate the unclassified users list
|
||||||
|
func (s *Server) handleScanPubkeys(dbACL *database.CuratingACL) NIP86Response {
|
||||||
|
result, err := dbACL.ScanAllPubkeys()
|
||||||
|
if chk.E(err) {
|
||||||
|
return NIP86Response{Error: "Failed to scan pubkeys: " + err.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NIP86Response{Result: map[string]interface{}{
|
||||||
|
"total_pubkeys": result.TotalPubkeys,
|
||||||
|
"total_events": result.TotalEvents,
|
||||||
|
"skipped": result.Skipped,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleGetEventsForPubkey returns events for a specific pubkey
|
||||||
|
// Params: [pubkey, limit (optional, default 100), offset (optional, default 0)]
|
||||||
|
func (s *Server) handleGetEventsForPubkey(params []interface{}, dbACL *database.CuratingACL) NIP86Response {
|
||||||
|
if len(params) < 1 {
|
||||||
|
return NIP86Response{Error: "Missing required parameter: pubkey"}
|
||||||
|
}
|
||||||
|
|
||||||
|
pubkey, ok := params[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return NIP86Response{Error: "Invalid pubkey parameter"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pubkey) != 64 {
|
||||||
|
return NIP86Response{Error: "Invalid pubkey format (must be 64 hex characters)"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse optional limit (default 100)
|
||||||
|
limit := 100
|
||||||
|
if len(params) > 1 {
|
||||||
|
if l, ok := params[1].(float64); ok {
|
||||||
|
limit = int(l)
|
||||||
|
if limit > 500 {
|
||||||
|
limit = 500 // Cap at 500
|
||||||
|
}
|
||||||
|
if limit < 1 {
|
||||||
|
limit = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse optional offset (default 0)
|
||||||
|
offset := 0
|
||||||
|
if len(params) > 2 {
|
||||||
|
if o, ok := params[2].(float64); ok {
|
||||||
|
offset = int(o)
|
||||||
|
if offset < 0 {
|
||||||
|
offset = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
events, total, err := dbACL.GetEventsForPubkey(pubkey, limit, offset)
|
||||||
|
if chk.E(err) {
|
||||||
|
return NIP86Response{Error: "Failed to get events: " + err.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to response format
|
||||||
|
eventList := make([]map[string]interface{}, len(events))
|
||||||
|
for i, ev := range events {
|
||||||
|
eventList[i] = map[string]interface{}{
|
||||||
|
"id": ev.ID,
|
||||||
|
"kind": ev.Kind,
|
||||||
|
"content": ev.Content,
|
||||||
|
"created_at": ev.CreatedAt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NIP86Response{Result: map[string]interface{}{
|
||||||
|
"events": eventList,
|
||||||
|
"total": total,
|
||||||
|
"limit": limit,
|
||||||
|
"offset": offset,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleDeleteEventsForPubkey deletes all events for a specific pubkey
|
||||||
|
// This is only allowed for blacklisted pubkeys as a safety measure
|
||||||
|
// Params: [pubkey]
|
||||||
|
func (s *Server) handleDeleteEventsForPubkey(params []interface{}, dbACL *database.CuratingACL) NIP86Response {
|
||||||
|
if len(params) < 1 {
|
||||||
|
return NIP86Response{Error: "Missing required parameter: pubkey"}
|
||||||
|
}
|
||||||
|
|
||||||
|
pubkey, ok := params[0].(string)
|
||||||
|
if !ok {
|
||||||
|
return NIP86Response{Error: "Invalid pubkey parameter"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pubkey) != 64 {
|
||||||
|
return NIP86Response{Error: "Invalid pubkey format (must be 64 hex characters)"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Safety check: only allow deletion of events from blacklisted users
|
||||||
|
isBlacklisted, err := dbACL.IsPubkeyBlacklisted(pubkey)
|
||||||
|
if chk.E(err) {
|
||||||
|
return NIP86Response{Error: "Failed to check blacklist status: " + err.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isBlacklisted {
|
||||||
|
return NIP86Response{Error: "Can only delete events from blacklisted users. Blacklist the user first."}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all events for this pubkey
|
||||||
|
deleted, err := dbACL.DeleteEventsForPubkey(pubkey)
|
||||||
|
if chk.E(err) {
|
||||||
|
return NIP86Response{Error: "Failed to delete events: " + err.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
return NIP86Response{Result: map[string]interface{}{
|
||||||
|
"deleted": deleted,
|
||||||
|
"pubkey": pubkey,
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|||||||
@@ -34,7 +34,6 @@ import (
|
|||||||
|
|
||||||
func (l *Listener) HandleReq(msg []byte) (err error) {
|
func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||||
log.D.F("handling REQ: %s", msg)
|
log.D.F("handling REQ: %s", msg)
|
||||||
log.T.F("HandleReq: START processing from %s", l.remote)
|
|
||||||
// var rem []byte
|
// var rem []byte
|
||||||
env := reqenvelope.New()
|
env := reqenvelope.New()
|
||||||
if _, err = env.Unmarshal(msg); chk.E(err) {
|
if _, err = env.Unmarshal(msg); chk.E(err) {
|
||||||
|
|||||||
@@ -159,12 +159,26 @@ func (p *P) Deliver(ev *event.E) {
|
|||||||
sub Subscription
|
sub Subscription
|
||||||
}
|
}
|
||||||
var deliveries []delivery
|
var deliveries []delivery
|
||||||
|
// Debug: log ephemeral event delivery attempts
|
||||||
|
isEphemeral := ev.Kind >= 20000 && ev.Kind < 30000
|
||||||
|
if isEphemeral {
|
||||||
|
var tagInfo string
|
||||||
|
if ev.Tags != nil {
|
||||||
|
tagInfo = string(ev.Tags.Marshal(nil))
|
||||||
|
}
|
||||||
|
log.I.F("ephemeral event kind %d, id %0x, checking %d connections for matches, tags: %s",
|
||||||
|
ev.Kind, ev.ID[:8], len(p.Map), tagInfo)
|
||||||
|
}
|
||||||
for w, subs := range p.Map {
|
for w, subs := range p.Map {
|
||||||
for id, subscriber := range subs {
|
for id, subscriber := range subs {
|
||||||
if subscriber.Match(ev) {
|
if subscriber.Match(ev) {
|
||||||
deliveries = append(
|
deliveries = append(
|
||||||
deliveries, delivery{w: w, id: id, sub: subscriber},
|
deliveries, delivery{w: w, id: id, sub: subscriber},
|
||||||
)
|
)
|
||||||
|
} else if isEphemeral {
|
||||||
|
// Debug: log why ephemeral events don't match
|
||||||
|
log.I.F("ephemeral event kind %d did NOT match subscription %s (filters: %s)",
|
||||||
|
ev.Kind, id, string(subscriber.S.Marshal(nil)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
2
app/web/dist/bundle.css
vendored
2
app/web/dist/bundle.css
vendored
File diff suppressed because one or more lines are too long
28
app/web/dist/bundle.js
vendored
28
app/web/dist/bundle.js
vendored
File diff suppressed because one or more lines are too long
2
app/web/dist/bundle.js.map
vendored
2
app/web/dist/bundle.js.map
vendored
File diff suppressed because one or more lines are too long
@@ -13,6 +13,15 @@
|
|||||||
let messageType = "info";
|
let messageType = "info";
|
||||||
let isConfigured = false;
|
let isConfigured = false;
|
||||||
|
|
||||||
|
// User detail view state
|
||||||
|
let selectedUser = null;
|
||||||
|
let selectedUserType = null; // "trusted", "blacklisted", or "unclassified"
|
||||||
|
let userEvents = [];
|
||||||
|
let userEventsTotal = 0;
|
||||||
|
let userEventsOffset = 0;
|
||||||
|
let loadingEvents = false;
|
||||||
|
let expandedEvents = {}; // Track which events are expanded
|
||||||
|
|
||||||
// Configuration state
|
// Configuration state
|
||||||
let config = {
|
let config = {
|
||||||
daily_limit: 50,
|
daily_limit: 50,
|
||||||
@@ -186,6 +195,19 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Scan database for all pubkeys
|
||||||
|
async function scanDatabase() {
|
||||||
|
try {
|
||||||
|
const result = await callNIP86API("scanpubkeys");
|
||||||
|
showMessage(`Database scanned: ${result.total_pubkeys} pubkeys, ${result.total_events} events (${result.skipped} skipped)`, "success");
|
||||||
|
// Refresh the unclassified users list
|
||||||
|
await loadUnclassifiedUsers();
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to scan database:", error);
|
||||||
|
showMessage("Failed to scan database: " + error.message, "error");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Load spam events
|
// Load spam events
|
||||||
async function loadSpamEvents() {
|
async function loadSpamEvents() {
|
||||||
try {
|
try {
|
||||||
@@ -430,6 +452,176 @@
|
|||||||
if (!timestamp) return "";
|
if (!timestamp) return "";
|
||||||
return new Date(timestamp).toLocaleString();
|
return new Date(timestamp).toLocaleString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Show message helper
|
||||||
|
function showMessage(msg, type = "info") {
|
||||||
|
message = msg;
|
||||||
|
messageType = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open user detail view
|
||||||
|
async function openUserDetail(pubkey, type) {
|
||||||
|
console.log("openUserDetail called:", pubkey, type);
|
||||||
|
selectedUser = pubkey;
|
||||||
|
selectedUserType = type;
|
||||||
|
userEvents = [];
|
||||||
|
userEventsTotal = 0;
|
||||||
|
userEventsOffset = 0;
|
||||||
|
expandedEvents = {};
|
||||||
|
console.log("selectedUser set to:", selectedUser);
|
||||||
|
await loadUserEvents();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close user detail view
|
||||||
|
function closeUserDetail() {
|
||||||
|
selectedUser = null;
|
||||||
|
selectedUserType = null;
|
||||||
|
userEvents = [];
|
||||||
|
userEventsTotal = 0;
|
||||||
|
userEventsOffset = 0;
|
||||||
|
expandedEvents = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load events for selected user
|
||||||
|
async function loadUserEvents() {
|
||||||
|
console.log("loadUserEvents called, selectedUser:", selectedUser, "loadingEvents:", loadingEvents);
|
||||||
|
if (!selectedUser || loadingEvents) return;
|
||||||
|
|
||||||
|
try {
|
||||||
|
loadingEvents = true;
|
||||||
|
console.log("Calling geteventsforpubkey API...");
|
||||||
|
const result = await callNIP86API("geteventsforpubkey", [selectedUser, 100, userEventsOffset]);
|
||||||
|
console.log("API result:", result);
|
||||||
|
if (result) {
|
||||||
|
if (userEventsOffset === 0) {
|
||||||
|
userEvents = result.events || [];
|
||||||
|
} else {
|
||||||
|
userEvents = [...userEvents, ...(result.events || [])];
|
||||||
|
}
|
||||||
|
userEventsTotal = result.total || 0;
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to load user events:", error);
|
||||||
|
showMessage("Failed to load events: " + error.message, "error");
|
||||||
|
} finally {
|
||||||
|
loadingEvents = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load more events
|
||||||
|
async function loadMoreEvents() {
|
||||||
|
userEventsOffset = userEvents.length;
|
||||||
|
await loadUserEvents();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Toggle event expansion
|
||||||
|
function toggleEventExpansion(eventId) {
|
||||||
|
expandedEvents = {
|
||||||
|
...expandedEvents,
|
||||||
|
[eventId]: !expandedEvents[eventId]
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Truncate content to 6 lines (approximately 300 chars per line)
|
||||||
|
function truncateContent(content, maxLines = 6) {
|
||||||
|
if (!content) return "";
|
||||||
|
const lines = content.split('\n');
|
||||||
|
if (lines.length <= maxLines && content.length <= maxLines * 100) {
|
||||||
|
return content;
|
||||||
|
}
|
||||||
|
// Truncate by lines or characters, whichever is smaller
|
||||||
|
let truncated = lines.slice(0, maxLines).join('\n');
|
||||||
|
if (truncated.length > maxLines * 100) {
|
||||||
|
truncated = truncated.substring(0, maxLines * 100);
|
||||||
|
}
|
||||||
|
return truncated;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if content is truncated
|
||||||
|
function isContentTruncated(content, maxLines = 6) {
|
||||||
|
if (!content) return false;
|
||||||
|
const lines = content.split('\n');
|
||||||
|
return lines.length > maxLines || content.length > maxLines * 100;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trust user from detail view and refresh
|
||||||
|
async function trustUserFromDetail() {
|
||||||
|
await trustPubkey(selectedUser, "");
|
||||||
|
// Refresh list and go back
|
||||||
|
await loadAllData();
|
||||||
|
closeUserDetail();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Blacklist user from detail view and refresh
|
||||||
|
async function blacklistUserFromDetail() {
|
||||||
|
await blacklistPubkey(selectedUser, "");
|
||||||
|
// Refresh list and go back
|
||||||
|
await loadAllData();
|
||||||
|
closeUserDetail();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Untrust user from detail view and refresh
|
||||||
|
async function untrustUserFromDetail() {
|
||||||
|
await untrustPubkey(selectedUser);
|
||||||
|
await loadAllData();
|
||||||
|
closeUserDetail();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unblacklist user from detail view and refresh
|
||||||
|
async function unblacklistUserFromDetail() {
|
||||||
|
await unblacklistPubkey(selectedUser);
|
||||||
|
await loadAllData();
|
||||||
|
closeUserDetail();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete all events for a blacklisted user
|
||||||
|
async function deleteAllEventsForUser() {
|
||||||
|
if (!confirm(`Delete ALL ${userEventsTotal} events from this user? This cannot be undone.`)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
isLoading = true;
|
||||||
|
const result = await callNIP86API("deleteeventsforpubkey", [selectedUser]);
|
||||||
|
showMessage(`Deleted ${result.deleted} events`, "success");
|
||||||
|
// Refresh the events list
|
||||||
|
userEvents = [];
|
||||||
|
userEventsTotal = 0;
|
||||||
|
userEventsOffset = 0;
|
||||||
|
await loadUserEvents();
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Failed to delete events:", error);
|
||||||
|
showMessage("Failed to delete events: " + error.message, "error");
|
||||||
|
} finally {
|
||||||
|
isLoading = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get kind name
|
||||||
|
function getKindName(kind) {
|
||||||
|
const kindNames = {
|
||||||
|
0: "Metadata",
|
||||||
|
1: "Text Note",
|
||||||
|
3: "Follow List",
|
||||||
|
4: "Encrypted DM",
|
||||||
|
6: "Repost",
|
||||||
|
7: "Reaction",
|
||||||
|
14: "Chat Message",
|
||||||
|
16: "Order Message",
|
||||||
|
17: "Payment Receipt",
|
||||||
|
1063: "File Metadata",
|
||||||
|
10002: "Relay List",
|
||||||
|
30017: "Stall",
|
||||||
|
30018: "Product (NIP-15)",
|
||||||
|
30023: "Long-form",
|
||||||
|
30078: "App Data",
|
||||||
|
30402: "Product (NIP-99)",
|
||||||
|
30405: "Collection",
|
||||||
|
30406: "Shipping",
|
||||||
|
31555: "Review",
|
||||||
|
};
|
||||||
|
return kindNames[kind] || `Kind ${kind}`;
|
||||||
|
}
|
||||||
</script>
|
</script>
|
||||||
|
|
||||||
<div class="curation-view">
|
<div class="curation-view">
|
||||||
@@ -531,6 +723,74 @@
|
|||||||
<p class="publish-note">This will publish a kind 30078 event to activate curating mode.</p>
|
<p class="publish-note">This will publish a kind 30078 event to activate curating mode.</p>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
{:else}
|
||||||
|
<!-- User Detail View -->
|
||||||
|
{#if selectedUser}
|
||||||
|
<div class="user-detail-view">
|
||||||
|
<div class="detail-header">
|
||||||
|
<div class="detail-header-left">
|
||||||
|
<button class="back-btn" on:click={closeUserDetail}>
|
||||||
|
← Back
|
||||||
|
</button>
|
||||||
|
<h3>User Events</h3>
|
||||||
|
<span class="detail-pubkey" title={selectedUser}>{formatPubkey(selectedUser)}</span>
|
||||||
|
<span class="detail-count">{userEventsTotal} events</span>
|
||||||
|
</div>
|
||||||
|
<div class="detail-header-right">
|
||||||
|
{#if selectedUserType === "trusted"}
|
||||||
|
<button class="btn-danger" on:click={untrustUserFromDetail}>Remove Trust</button>
|
||||||
|
<button class="btn-danger" on:click={blacklistUserFromDetail}>Blacklist</button>
|
||||||
|
{:else if selectedUserType === "blacklisted"}
|
||||||
|
<button class="btn-delete-all" on:click={deleteAllEventsForUser} disabled={isLoading || userEventsTotal === 0}>
|
||||||
|
Delete All Events
|
||||||
|
</button>
|
||||||
|
<button class="btn-success" on:click={unblacklistUserFromDetail}>Remove from Blacklist</button>
|
||||||
|
<button class="btn-success" on:click={trustUserFromDetail}>Trust</button>
|
||||||
|
{:else}
|
||||||
|
<button class="btn-success" on:click={trustUserFromDetail}>Trust</button>
|
||||||
|
<button class="btn-danger" on:click={blacklistUserFromDetail}>Blacklist</button>
|
||||||
|
{/if}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="events-list">
|
||||||
|
{#if loadingEvents && userEvents.length === 0}
|
||||||
|
<div class="loading">Loading events...</div>
|
||||||
|
{:else if userEvents.length === 0}
|
||||||
|
<div class="empty">No events found for this user.</div>
|
||||||
|
{:else}
|
||||||
|
{#each userEvents as event}
|
||||||
|
<div class="event-item">
|
||||||
|
<div class="event-header">
|
||||||
|
<span class="event-kind">{getKindName(event.kind)}</span>
|
||||||
|
<span class="event-id" title={event.id}>{formatPubkey(event.id)}</span>
|
||||||
|
<span class="event-time">{formatDate(event.created_at * 1000)}</span>
|
||||||
|
</div>
|
||||||
|
<div class="event-content" class:expanded={expandedEvents[event.id]}>
|
||||||
|
{#if expandedEvents[event.id] || !isContentTruncated(event.content)}
|
||||||
|
<pre>{event.content || "(empty)"}</pre>
|
||||||
|
{:else}
|
||||||
|
<pre>{truncateContent(event.content)}...</pre>
|
||||||
|
{/if}
|
||||||
|
</div>
|
||||||
|
{#if isContentTruncated(event.content)}
|
||||||
|
<button class="expand-btn" on:click={() => toggleEventExpansion(event.id)}>
|
||||||
|
{expandedEvents[event.id] ? "Show less" : "Show more"}
|
||||||
|
</button>
|
||||||
|
{/if}
|
||||||
|
</div>
|
||||||
|
{/each}
|
||||||
|
|
||||||
|
{#if userEvents.length < userEventsTotal}
|
||||||
|
<div class="load-more">
|
||||||
|
<button on:click={loadMoreEvents} disabled={loadingEvents}>
|
||||||
|
{loadingEvents ? "Loading..." : `Load more (${userEvents.length} of ${userEventsTotal})`}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
{/if}
|
||||||
|
{/if}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
{:else}
|
{:else}
|
||||||
<!-- Active Mode -->
|
<!-- Active Mode -->
|
||||||
<div class="tabs">
|
<div class="tabs">
|
||||||
@@ -579,7 +839,7 @@
|
|||||||
<div class="list">
|
<div class="list">
|
||||||
{#if trustedPubkeys.length > 0}
|
{#if trustedPubkeys.length > 0}
|
||||||
{#each trustedPubkeys as item}
|
{#each trustedPubkeys as item}
|
||||||
<div class="list-item">
|
<div class="list-item clickable" on:click={() => openUserDetail(item.pubkey, "trusted")}>
|
||||||
<div class="item-main">
|
<div class="item-main">
|
||||||
<span class="pubkey" title={item.pubkey}>{formatPubkey(item.pubkey)}</span>
|
<span class="pubkey" title={item.pubkey}>{formatPubkey(item.pubkey)}</span>
|
||||||
{#if item.note}
|
{#if item.note}
|
||||||
@@ -587,7 +847,7 @@
|
|||||||
{/if}
|
{/if}
|
||||||
</div>
|
</div>
|
||||||
<div class="item-actions">
|
<div class="item-actions">
|
||||||
<button class="btn-danger" on:click={() => untrustPubkey(item.pubkey)}>
|
<button class="btn-danger" on:click|stopPropagation={() => untrustPubkey(item.pubkey)}>
|
||||||
Remove
|
Remove
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
@@ -624,7 +884,7 @@
|
|||||||
<div class="list">
|
<div class="list">
|
||||||
{#if blacklistedPubkeys.length > 0}
|
{#if blacklistedPubkeys.length > 0}
|
||||||
{#each blacklistedPubkeys as item}
|
{#each blacklistedPubkeys as item}
|
||||||
<div class="list-item">
|
<div class="list-item clickable" on:click={() => openUserDetail(item.pubkey, "blacklisted")}>
|
||||||
<div class="item-main">
|
<div class="item-main">
|
||||||
<span class="pubkey" title={item.pubkey}>{formatPubkey(item.pubkey)}</span>
|
<span class="pubkey" title={item.pubkey}>{formatPubkey(item.pubkey)}</span>
|
||||||
{#if item.reason}
|
{#if item.reason}
|
||||||
@@ -632,7 +892,7 @@
|
|||||||
{/if}
|
{/if}
|
||||||
</div>
|
</div>
|
||||||
<div class="item-actions">
|
<div class="item-actions">
|
||||||
<button class="btn-success" on:click={() => unblacklistPubkey(item.pubkey)}>
|
<button class="btn-success" on:click|stopPropagation={() => unblacklistPubkey(item.pubkey)}>
|
||||||
Remove
|
Remove
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
@@ -650,23 +910,28 @@
|
|||||||
<h3>Unclassified Users</h3>
|
<h3>Unclassified Users</h3>
|
||||||
<p class="help-text">Users who have posted events but haven't been classified. Sorted by event count.</p>
|
<p class="help-text">Users who have posted events but haven't been classified. Sorted by event count.</p>
|
||||||
|
|
||||||
|
<div class="button-row">
|
||||||
<button class="refresh-btn" on:click={loadUnclassifiedUsers} disabled={isLoading}>
|
<button class="refresh-btn" on:click={loadUnclassifiedUsers} disabled={isLoading}>
|
||||||
Refresh
|
Refresh
|
||||||
</button>
|
</button>
|
||||||
|
<button class="scan-btn" on:click={scanDatabase} disabled={isLoading}>
|
||||||
|
Scan Database
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div class="list">
|
<div class="list">
|
||||||
{#if unclassifiedUsers.length > 0}
|
{#if unclassifiedUsers.length > 0}
|
||||||
{#each unclassifiedUsers as user}
|
{#each unclassifiedUsers as user}
|
||||||
<div class="list-item">
|
<div class="list-item clickable" on:click={() => openUserDetail(user.pubkey, "unclassified")}>
|
||||||
<div class="item-main">
|
<div class="item-main">
|
||||||
<span class="pubkey" title={user.pubkey}>{formatPubkey(user.pubkey)}</span>
|
<span class="pubkey" title={user.pubkey}>{formatPubkey(user.pubkey)}</span>
|
||||||
<span class="event-count">{user.total_events} events</span>
|
<span class="event-count">{user.event_count} events</span>
|
||||||
</div>
|
</div>
|
||||||
<div class="item-actions">
|
<div class="item-actions">
|
||||||
<button class="btn-success" on:click={() => trustPubkey(user.pubkey, "")}>
|
<button class="btn-success" on:click|stopPropagation={() => trustPubkey(user.pubkey, "")}>
|
||||||
Trust
|
Trust
|
||||||
</button>
|
</button>
|
||||||
<button class="btn-danger" on:click={() => blacklistPubkey(user.pubkey, "")}>
|
<button class="btn-danger" on:click|stopPropagation={() => blacklistPubkey(user.pubkey, "")}>
|
||||||
Blacklist
|
Blacklist
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
@@ -841,6 +1106,7 @@
|
|||||||
{/if}
|
{/if}
|
||||||
</div>
|
</div>
|
||||||
{/if}
|
{/if}
|
||||||
|
{/if}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<style>
|
<style>
|
||||||
@@ -1149,6 +1415,26 @@
|
|||||||
cursor: not-allowed;
|
cursor: not-allowed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.button-row {
|
||||||
|
display: flex;
|
||||||
|
gap: 0.5rem;
|
||||||
|
margin-bottom: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.scan-btn {
|
||||||
|
padding: 0.5rem 1rem;
|
||||||
|
background: var(--warning, #f0ad4e);
|
||||||
|
color: var(--text-color);
|
||||||
|
border: none;
|
||||||
|
border-radius: 4px;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.scan-btn:disabled {
|
||||||
|
opacity: 0.6;
|
||||||
|
cursor: not-allowed;
|
||||||
|
}
|
||||||
|
|
||||||
.list {
|
.list {
|
||||||
border: 1px solid var(--border-color);
|
border: 1px solid var(--border-color);
|
||||||
border-radius: 4px;
|
border-radius: 4px;
|
||||||
@@ -1222,6 +1508,26 @@
|
|||||||
font-size: 0.85em;
|
font-size: 0.85em;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.btn-delete-all {
|
||||||
|
padding: 0.35rem 0.75rem;
|
||||||
|
background: #8B0000;
|
||||||
|
color: white;
|
||||||
|
border: none;
|
||||||
|
border-radius: 4px;
|
||||||
|
cursor: pointer;
|
||||||
|
font-size: 0.85em;
|
||||||
|
font-weight: 600;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-delete-all:hover:not(:disabled) {
|
||||||
|
background: #660000;
|
||||||
|
}
|
||||||
|
|
||||||
|
.btn-delete-all:disabled {
|
||||||
|
opacity: 0.5;
|
||||||
|
cursor: not-allowed;
|
||||||
|
}
|
||||||
|
|
||||||
.empty {
|
.empty {
|
||||||
padding: 2rem;
|
padding: 2rem;
|
||||||
text-align: center;
|
text-align: center;
|
||||||
@@ -1229,4 +1535,187 @@
|
|||||||
opacity: 0.6;
|
opacity: 0.6;
|
||||||
font-style: italic;
|
font-style: italic;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Clickable list items */
|
||||||
|
.list-item.clickable {
|
||||||
|
cursor: pointer;
|
||||||
|
transition: background-color 0.2s;
|
||||||
|
}
|
||||||
|
|
||||||
|
.list-item.clickable:hover {
|
||||||
|
background-color: var(--button-hover-bg);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* User Detail View */
|
||||||
|
.user-detail-view {
|
||||||
|
background: var(--card-bg);
|
||||||
|
border-radius: 8px;
|
||||||
|
padding: 1.5em;
|
||||||
|
border: 1px solid var(--border-color);
|
||||||
|
}
|
||||||
|
|
||||||
|
.detail-header {
|
||||||
|
display: flex;
|
||||||
|
justify-content: space-between;
|
||||||
|
align-items: center;
|
||||||
|
margin-bottom: 1.5rem;
|
||||||
|
padding-bottom: 1rem;
|
||||||
|
border-bottom: 1px solid var(--border-color);
|
||||||
|
flex-wrap: wrap;
|
||||||
|
gap: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.detail-header-left {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 1rem;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.detail-header-left h3 {
|
||||||
|
margin: 0;
|
||||||
|
color: var(--text-color);
|
||||||
|
}
|
||||||
|
|
||||||
|
.detail-header-right {
|
||||||
|
display: flex;
|
||||||
|
gap: 0.5rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.back-btn {
|
||||||
|
padding: 0.5rem 1rem;
|
||||||
|
background: var(--bg-color);
|
||||||
|
color: var(--text-color);
|
||||||
|
border: 1px solid var(--border-color);
|
||||||
|
border-radius: 4px;
|
||||||
|
cursor: pointer;
|
||||||
|
font-size: 0.9em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.back-btn:hover {
|
||||||
|
background: var(--button-hover-bg);
|
||||||
|
}
|
||||||
|
|
||||||
|
.detail-pubkey {
|
||||||
|
font-family: monospace;
|
||||||
|
font-size: 0.9em;
|
||||||
|
color: var(--text-color);
|
||||||
|
background: var(--bg-color);
|
||||||
|
padding: 0.25rem 0.5rem;
|
||||||
|
border-radius: 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.detail-count {
|
||||||
|
font-size: 0.85em;
|
||||||
|
color: var(--success);
|
||||||
|
font-weight: 500;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Events List */
|
||||||
|
.events-list {
|
||||||
|
max-height: 600px;
|
||||||
|
overflow-y: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.event-item {
|
||||||
|
background: var(--bg-color);
|
||||||
|
border: 1px solid var(--border-color);
|
||||||
|
border-radius: 6px;
|
||||||
|
padding: 1rem;
|
||||||
|
margin-bottom: 0.75rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.event-header {
|
||||||
|
display: flex;
|
||||||
|
gap: 1rem;
|
||||||
|
margin-bottom: 0.5rem;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
align-items: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.event-kind {
|
||||||
|
background: var(--accent-color);
|
||||||
|
color: var(--text-color);
|
||||||
|
padding: 0.2rem 0.5rem;
|
||||||
|
border-radius: 4px;
|
||||||
|
font-size: 0.8em;
|
||||||
|
font-weight: 500;
|
||||||
|
}
|
||||||
|
|
||||||
|
.event-id {
|
||||||
|
font-family: monospace;
|
||||||
|
font-size: 0.8em;
|
||||||
|
color: var(--text-color);
|
||||||
|
opacity: 0.7;
|
||||||
|
}
|
||||||
|
|
||||||
|
.event-time {
|
||||||
|
font-size: 0.8em;
|
||||||
|
color: var(--text-color);
|
||||||
|
opacity: 0.6;
|
||||||
|
}
|
||||||
|
|
||||||
|
.event-content {
|
||||||
|
background: var(--card-bg);
|
||||||
|
border-radius: 4px;
|
||||||
|
padding: 0.75rem;
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
.event-content pre {
|
||||||
|
margin: 0;
|
||||||
|
white-space: pre-wrap;
|
||||||
|
word-break: break-word;
|
||||||
|
font-family: inherit;
|
||||||
|
font-size: 0.9em;
|
||||||
|
color: var(--text-color);
|
||||||
|
max-height: 150px;
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
.event-content.expanded pre {
|
||||||
|
max-height: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.expand-btn {
|
||||||
|
margin-top: 0.5rem;
|
||||||
|
padding: 0.25rem 0.5rem;
|
||||||
|
background: transparent;
|
||||||
|
color: var(--accent-color);
|
||||||
|
border: 1px solid var(--accent-color);
|
||||||
|
border-radius: 4px;
|
||||||
|
cursor: pointer;
|
||||||
|
font-size: 0.8em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.expand-btn:hover {
|
||||||
|
background: var(--accent-color);
|
||||||
|
color: var(--text-color);
|
||||||
|
}
|
||||||
|
|
||||||
|
.load-more {
|
||||||
|
text-align: center;
|
||||||
|
padding: 1rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.load-more button {
|
||||||
|
padding: 0.5rem 1.5rem;
|
||||||
|
background: var(--info);
|
||||||
|
color: var(--text-color);
|
||||||
|
border: none;
|
||||||
|
border-radius: 4px;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.load-more button:disabled {
|
||||||
|
opacity: 0.6;
|
||||||
|
cursor: not-allowed;
|
||||||
|
}
|
||||||
|
|
||||||
|
.loading {
|
||||||
|
padding: 2rem;
|
||||||
|
text-align: center;
|
||||||
|
color: var(--text-color);
|
||||||
|
opacity: 0.6;
|
||||||
|
}
|
||||||
</style>
|
</style>
|
||||||
|
|||||||
@@ -30,11 +30,23 @@ export const curationKindCategories = [
|
|||||||
kinds: [1063, 20, 21, 22],
|
kinds: [1063, 20, 21, 22],
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
id: "marketplace",
|
id: "marketplace_nip15",
|
||||||
name: "Marketplace",
|
name: "Marketplace (NIP-15)",
|
||||||
description: "Product listings, stalls, and marketplace events",
|
description: "Legacy NIP-15 stalls and products",
|
||||||
kinds: [30017, 30018, 30019, 30020],
|
kinds: [30017, 30018, 30019, 30020],
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
id: "marketplace_nip99",
|
||||||
|
name: "Marketplace (NIP-99/Gamma)",
|
||||||
|
description: "NIP-99 classified listings, collections, shipping, reviews (Plebeian Market)",
|
||||||
|
kinds: [30402, 30403, 30405, 30406, 31555],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
id: "order_communication",
|
||||||
|
name: "Order Communication",
|
||||||
|
description: "Gamma Markets order messages and payment receipts (kinds 16, 17)",
|
||||||
|
kinds: [16, 17],
|
||||||
|
},
|
||||||
{
|
{
|
||||||
id: "groups_nip29",
|
id: "groups_nip29",
|
||||||
name: "Group Messaging (NIP-29)",
|
name: "Group Messaging (NIP-29)",
|
||||||
|
|||||||
290
docs/NIP-CURATION.md
Normal file
290
docs/NIP-CURATION.md
Normal file
@@ -0,0 +1,290 @@
|
|||||||
|
# NIP-XX: Relay Curation Mode
|
||||||
|
|
||||||
|
`draft` `optional`
|
||||||
|
|
||||||
|
This NIP defines a relay operating mode where operators can curate content through a three-tier publisher classification system (trusted, blacklisted, unclassified) with rate limiting, IP-based flood protection, and event kind filtering. Configuration and management are performed through Nostr events and a NIP-86 JSON-RPC API.
|
||||||
|
|
||||||
|
## Motivation
|
||||||
|
|
||||||
|
Public relays face challenges managing spam, abuse, and resource consumption. Traditional approaches (pay-to-relay, invite-only, WoT-based) each have limitations. Curation mode provides relay operators with fine-grained control over who can publish what, while maintaining an open-by-default stance that allows unknown users to participate within limits.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Curation mode introduces:
|
||||||
|
|
||||||
|
1. **Publisher Classification**: Three-tier system (trusted, blacklisted, unclassified)
|
||||||
|
2. **Rate Limiting**: Per-pubkey and per-IP daily event limits
|
||||||
|
3. **Kind Filtering**: Configurable allowed event kinds
|
||||||
|
4. **Configuration Event**: Kind 30078 replaceable event for relay configuration
|
||||||
|
5. **Management API**: NIP-86 JSON-RPC endpoints for administration
|
||||||
|
|
||||||
|
## Configuration Event (Kind 30078)
|
||||||
|
|
||||||
|
The relay MUST be configured with a kind 30078 replaceable event before accepting events from non-owner/admin pubkeys. This event uses the `d` tag value `curating-config`.
|
||||||
|
|
||||||
|
### Event Structure
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"kind": 30078,
|
||||||
|
"tags": [
|
||||||
|
["d", "curating-config"],
|
||||||
|
["daily_limit", "<number>"],
|
||||||
|
["ip_daily_limit", "<number>"],
|
||||||
|
["first_ban_hours", "<number>"],
|
||||||
|
["second_ban_hours", "<number>"],
|
||||||
|
["kind_category", "<category_id>"],
|
||||||
|
["kind", "<kind_number>"],
|
||||||
|
["kind_range", "<start>-<end>"]
|
||||||
|
],
|
||||||
|
"content": "{}",
|
||||||
|
"pubkey": "<owner_or_admin_pubkey>",
|
||||||
|
"created_at": <unix_timestamp>
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration Tags
|
||||||
|
|
||||||
|
| Tag | Description | Default |
|
||||||
|
|-----|-------------|---------|
|
||||||
|
| `d` | MUST be `"curating-config"` | Required |
|
||||||
|
| `daily_limit` | Max events per day for unclassified users | 50 |
|
||||||
|
| `ip_daily_limit` | Max events per day from a single IP | 500 |
|
||||||
|
| `first_ban_hours` | First offense IP ban duration (hours) | 1 |
|
||||||
|
| `second_ban_hours` | Subsequent offense IP ban duration (hours) | 168 |
|
||||||
|
| `kind_category` | Predefined kind category (repeatable) | - |
|
||||||
|
| `kind` | Individual allowed kind number (repeatable) | - |
|
||||||
|
| `kind_range` | Allowed kind range as "start-end" (repeatable) | - |
|
||||||
|
|
||||||
|
### Kind Categories
|
||||||
|
|
||||||
|
Relays SHOULD support these predefined categories:
|
||||||
|
|
||||||
|
| Category ID | Kinds | Description |
|
||||||
|
|-------------|-------|-------------|
|
||||||
|
| `social` | 0, 1, 3, 6, 7, 10002 | Profiles, notes, contacts, reposts, reactions, relay lists |
|
||||||
|
| `dm` | 4, 14, 1059 | Direct messages (NIP-04, NIP-17, gift wraps) |
|
||||||
|
| `longform` | 30023, 30024 | Long-form articles and drafts |
|
||||||
|
| `media` | 1063, 20, 21, 22 | File metadata, picture, video, audio events |
|
||||||
|
| `lists` | 10000, 10001, 10003, 30000, 30001, 30003 | Mute lists, pins, bookmarks, people lists |
|
||||||
|
| `groups_nip29` | 9-12, 9000-9002, 39000-39002 | NIP-29 relay-based groups |
|
||||||
|
| `groups_nip72` | 34550, 1111, 4550 | NIP-72 moderated communities |
|
||||||
|
| `marketplace_nip15` | 30017-30020, 1021, 1022 | NIP-15 stalls and products |
|
||||||
|
| `marketplace_nip99` | 30402, 30403, 30405, 30406, 31555 | NIP-99 classified listings |
|
||||||
|
| `order_communication` | 16, 17 | Marketplace order messages |
|
||||||
|
|
||||||
|
Relays MAY define additional categories.
|
||||||
|
|
||||||
|
### Example Configuration Event
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"kind": 30078,
|
||||||
|
"tags": [
|
||||||
|
["d", "curating-config"],
|
||||||
|
["daily_limit", "100"],
|
||||||
|
["ip_daily_limit", "1000"],
|
||||||
|
["first_ban_hours", "2"],
|
||||||
|
["second_ban_hours", "336"],
|
||||||
|
["kind_category", "social"],
|
||||||
|
["kind_category", "dm"],
|
||||||
|
["kind", "1984"],
|
||||||
|
["kind_range", "30000-39999"]
|
||||||
|
],
|
||||||
|
"content": "{}",
|
||||||
|
"pubkey": "a1b2c3...",
|
||||||
|
"created_at": 1700000000
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Publisher Classification
|
||||||
|
|
||||||
|
### Trusted Publishers
|
||||||
|
|
||||||
|
- Unlimited publishing rights
|
||||||
|
- Bypass rate limiting and IP flood protection
|
||||||
|
- Events visible to all users
|
||||||
|
|
||||||
|
### Blacklisted Publishers
|
||||||
|
|
||||||
|
- Cannot publish any events
|
||||||
|
- Events rejected with `"blocked: pubkey is blacklisted"` notice
|
||||||
|
- Existing events hidden from queries (visible only to admins/owners)
|
||||||
|
|
||||||
|
### Unclassified Publishers (Default)
|
||||||
|
|
||||||
|
- Subject to daily event limit
|
||||||
|
- Subject to IP flood protection
|
||||||
|
- Events visible to all users
|
||||||
|
- Can be promoted to trusted or demoted to blacklisted
|
||||||
|
|
||||||
|
## Event Processing Flow
|
||||||
|
|
||||||
|
When an event is received, the relay MUST process it as follows:
|
||||||
|
|
||||||
|
1. **Configuration Check**: Reject if relay is not configured (no kind 30078 event)
|
||||||
|
2. **Access Level Check**: Determine pubkey's access level
|
||||||
|
- Owners and admins: always accept, bypass all limits
|
||||||
|
- IP-blocked: reject with temporary block notice
|
||||||
|
- Blacklisted: reject with blacklist notice
|
||||||
|
- Trusted: accept, bypass rate limits
|
||||||
|
- Unclassified: continue to rate limit checks
|
||||||
|
3. **Kind Filter**: Reject if event kind is not in allowed list
|
||||||
|
4. **Rate Limit Check**:
|
||||||
|
- Check pubkey's daily event count against `daily_limit`
|
||||||
|
- Check IP's daily event count against `ip_daily_limit`
|
||||||
|
5. **Accept or Reject**: Accept if all checks pass
|
||||||
|
|
||||||
|
### IP Flood Protection
|
||||||
|
|
||||||
|
When a pubkey exceeds `daily_limit`:
|
||||||
|
|
||||||
|
1. Record IP offense
|
||||||
|
2. If first offense: block IP for `first_ban_hours`
|
||||||
|
3. If subsequent offense: block IP for `second_ban_hours`
|
||||||
|
4. Track which pubkeys triggered the offense for admin review
|
||||||
|
|
||||||
|
## Management API (NIP-86)
|
||||||
|
|
||||||
|
All management endpoints require NIP-98 HTTP authentication from an owner or admin pubkey.
|
||||||
|
|
||||||
|
### Trust Management
|
||||||
|
|
||||||
|
| Method | Parameters | Description |
|
||||||
|
|--------|------------|-------------|
|
||||||
|
| `trustpubkey` | `[pubkey_hex, note?]` | Add pubkey to trusted list |
|
||||||
|
| `untrustpubkey` | `[pubkey_hex]` | Remove pubkey from trusted list |
|
||||||
|
| `listtrustedpubkeys` | `[]` | List all trusted pubkeys |
|
||||||
|
|
||||||
|
### Blacklist Management
|
||||||
|
|
||||||
|
| Method | Parameters | Description |
|
||||||
|
|--------|------------|-------------|
|
||||||
|
| `blacklistpubkey` | `[pubkey_hex, reason?]` | Add pubkey to blacklist |
|
||||||
|
| `unblacklistpubkey` | `[pubkey_hex]` | Remove pubkey from blacklist |
|
||||||
|
| `listblacklistedpubkeys` | `[]` | List all blacklisted pubkeys |
|
||||||
|
|
||||||
|
### User Inspection
|
||||||
|
|
||||||
|
| Method | Parameters | Description |
|
||||||
|
|--------|------------|-------------|
|
||||||
|
| `listunclassifiedusers` | `[limit?]` | List unclassified users sorted by event count |
|
||||||
|
| `geteventsforpubkey` | `[pubkey_hex, limit?, offset?]` | Get events from a pubkey |
|
||||||
|
| `deleteeventsforpubkey` | `[pubkey_hex]` | Delete all events from a blacklisted pubkey |
|
||||||
|
| `scanpubkeys` | `[]` | Scan database to populate unclassified users list |
|
||||||
|
|
||||||
|
### Spam Management
|
||||||
|
|
||||||
|
| Method | Parameters | Description |
|
||||||
|
|--------|------------|-------------|
|
||||||
|
| `markspam` | `[event_id_hex, pubkey?, reason?]` | Flag event as spam (hides from queries) |
|
||||||
|
| `unmarkspam` | `[event_id_hex]` | Remove spam flag |
|
||||||
|
| `listspamevents` | `[]` | List spam-flagged events |
|
||||||
|
| `deleteevent` | `[event_id_hex]` | Permanently delete an event |
|
||||||
|
|
||||||
|
### IP Management
|
||||||
|
|
||||||
|
| Method | Parameters | Description |
|
||||||
|
|--------|------------|-------------|
|
||||||
|
| `listblockedips` | `[]` | List currently blocked IPs |
|
||||||
|
| `unblockip` | `[ip_address]` | Remove IP block |
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
| Method | Parameters | Description |
|
||||||
|
|--------|------------|-------------|
|
||||||
|
| `getcuratingconfig` | `[]` | Get current configuration |
|
||||||
|
| `isconfigured` | `[]` | Check if relay is configured |
|
||||||
|
| `supportedmethods` | `[]` | List available management methods |
|
||||||
|
|
||||||
|
### Example API Request
|
||||||
|
|
||||||
|
```http
|
||||||
|
POST /api HTTP/1.1
|
||||||
|
Host: relay.example.com
|
||||||
|
Authorization: Nostr <base64_nip98_event>
|
||||||
|
Content-Type: application/json
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
### Example API Response
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"result": {
|
||||||
|
"success": true,
|
||||||
|
"message": "Pubkey added to trusted list"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Event Visibility
|
||||||
|
|
||||||
|
| Viewer | Sees Trusted Events | Sees Blacklisted Events | Sees Spam-Flagged Events |
|
||||||
|
|--------|---------------------|-------------------------|--------------------------|
|
||||||
|
| Owner/Admin | Yes | Yes | Yes |
|
||||||
|
| Regular User | Yes | No | No |
|
||||||
|
|
||||||
|
## Relay Information Document
|
||||||
|
|
||||||
|
Relays implementing this NIP SHOULD advertise it in their NIP-11 relay information document:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"supported_nips": [11, 86, "XX"],
|
||||||
|
"limitation": {
|
||||||
|
"curation_mode": true,
|
||||||
|
"daily_limit": 50,
|
||||||
|
"ip_daily_limit": 500
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Implementation Notes
|
||||||
|
|
||||||
|
### Rate Limit Reset
|
||||||
|
|
||||||
|
Daily counters SHOULD reset at UTC midnight (00:00:00 UTC).
|
||||||
|
|
||||||
|
### Caching
|
||||||
|
|
||||||
|
Implementations SHOULD cache trusted/blacklisted status and allowed kinds in memory for performance, refreshing periodically (e.g., hourly).
|
||||||
|
|
||||||
|
### Database Keys
|
||||||
|
|
||||||
|
Suggested key prefixes for persistent storage:
|
||||||
|
|
||||||
|
- `CURATING_ACL_CONFIG` - Current configuration
|
||||||
|
- `CURATING_ACL_TRUSTED_PUBKEY_{pubkey}` - Trusted publishers
|
||||||
|
- `CURATING_ACL_BLACKLISTED_PUBKEY_{pubkey}` - Blacklisted publishers
|
||||||
|
- `CURATING_ACL_EVENT_COUNT_{pubkey}_{date}` - Daily event counts
|
||||||
|
- `CURATING_ACL_IP_EVENT_COUNT_{ip}_{date}` - IP daily event counts
|
||||||
|
- `CURATING_ACL_IP_OFFENSE_{ip}` - IP offense tracking
|
||||||
|
- `CURATING_ACL_BLOCKED_IP_{ip}` - Active IP blocks
|
||||||
|
- `CURATING_ACL_SPAM_EVENT_{event_id}` - Spam-flagged events
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
1. **NIP-98 Authentication**: All management API calls MUST require valid NIP-98 authentication from owner or admin pubkeys
|
||||||
|
2. **IP Spoofing**: Relays SHOULD use `X-Forwarded-For` or `X-Real-IP` headers carefully, only trusting them from known reverse proxies
|
||||||
|
3. **Rate Limit Bypass**: Trusted status should be granted carefully as it bypasses all rate limiting
|
||||||
|
4. **Event Deletion**: Deleted events cannot be recovered; implementations SHOULD consider soft-delete with admin recovery option
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This NIP is compatible with:
|
||||||
|
- NIP-42 (Authentication): Can require auth before accepting events
|
||||||
|
- NIP-86 (Relay Management API): Uses NIP-86 for management endpoints
|
||||||
|
- NIP-98 (HTTP Auth): Uses NIP-98 for API authentication
|
||||||
|
|
||||||
|
## Reference Implementation
|
||||||
|
|
||||||
|
- ORLY Relay: https://github.com/mleku/orly
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
- Initial draft
|
||||||
|
|
||||||
|
## Changelog
|
||||||
|
|
||||||
|
- Initial draft
|
||||||
@@ -137,7 +137,7 @@ Where `payload` is the standard Nostr message array, e.g.:
|
|||||||
The encrypted content structure:
|
The encrypted content structure:
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"type": "EVENT" | "OK" | "EOSE" | "NOTICE" | "CLOSED" | "COUNT" | "AUTH",
|
"type": "EVENT" | "OK" | "EOSE" | "NOTICE" | "CLOSED" | "COUNT" | "AUTH" | "CHUNK",
|
||||||
"payload": <standard_nostr_response_array>
|
"payload": <standard_nostr_response_array>
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
@@ -150,6 +150,7 @@ Where `payload` is the standard Nostr response array, e.g.:
|
|||||||
- `["CLOSED", "<sub_id>", "<message>"]`
|
- `["CLOSED", "<sub_id>", "<message>"]`
|
||||||
- `["COUNT", "<sub_id>", {"count": <n>}]`
|
- `["COUNT", "<sub_id>", {"count": <n>}]`
|
||||||
- `["AUTH", "<challenge>"]`
|
- `["AUTH", "<challenge>"]`
|
||||||
|
- `[<chunk_object>]` (for CHUNK type, see Message Segmentation)
|
||||||
|
|
||||||
### Session Management
|
### Session Management
|
||||||
|
|
||||||
@@ -168,6 +169,85 @@ The conversation key is derived from:
|
|||||||
- **Secret-based auth**: ECDH between client's secret key (derived from URI secret) and relay's public key
|
- **Secret-based auth**: ECDH between client's secret key (derived from URI secret) and relay's public key
|
||||||
- **CAT auth**: ECDH between client's Nostr key and relay's public key
|
- **CAT auth**: ECDH between client's Nostr key and relay's public key
|
||||||
|
|
||||||
|
### Message Segmentation
|
||||||
|
|
||||||
|
Some Nostr events exceed the typical relay message size limits (commonly 64KB). NRC supports message segmentation to handle large payloads by splitting them into multiple chunks.
|
||||||
|
|
||||||
|
#### When to Chunk
|
||||||
|
|
||||||
|
Senders SHOULD chunk messages when the JSON-serialized response exceeds 40KB. This threshold accounts for:
|
||||||
|
- NIP-44 encryption overhead (~100 bytes)
|
||||||
|
- Base64 encoding expansion (~33%)
|
||||||
|
- Event wrapper overhead (tags, signature, etc.)
|
||||||
|
|
||||||
|
#### Chunk Message Format
|
||||||
|
|
||||||
|
When a response is too large, it is split into multiple CHUNK responses:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"type": "CHUNK",
|
||||||
|
"payload": [{
|
||||||
|
"type": "CHUNK",
|
||||||
|
"messageId": "<uuid>",
|
||||||
|
"index": 0,
|
||||||
|
"total": 3,
|
||||||
|
"data": "<base64_encoded_chunk>"
|
||||||
|
}]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Fields:
|
||||||
|
- `messageId`: A unique identifier (UUID) for the chunked message, used to correlate chunks
|
||||||
|
- `index`: Zero-based chunk index (0, 1, 2, ...)
|
||||||
|
- `total`: Total number of chunks in this message
|
||||||
|
- `data`: Base64-encoded segment of the original message
|
||||||
|
|
||||||
|
#### Chunking Process (Sender)
|
||||||
|
|
||||||
|
1. Serialize the original response message to JSON
|
||||||
|
2. If the serialized length exceeds the threshold (40KB), proceed with chunking
|
||||||
|
3. Encode the JSON string as UTF-8, then Base64 encode it
|
||||||
|
4. Split the Base64 string into chunks of the maximum chunk size
|
||||||
|
5. Generate a unique `messageId` (UUID recommended)
|
||||||
|
6. Send each chunk as a separate CHUNK response event
|
||||||
|
|
||||||
|
Example encoding (JavaScript):
|
||||||
|
```javascript
|
||||||
|
const encoded = btoa(unescape(encodeURIComponent(jsonString)))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Reassembly Process (Receiver)
|
||||||
|
|
||||||
|
1. When receiving a CHUNK response, buffer it by `messageId`
|
||||||
|
2. Track received chunks by `index`
|
||||||
|
3. When all chunks are received (`chunks.size === total`):
|
||||||
|
a. Concatenate chunk data in index order (0, 1, 2, ...)
|
||||||
|
b. Base64 decode the concatenated string
|
||||||
|
c. Parse as UTF-8 JSON to recover the original response
|
||||||
|
4. Process the reassembled response as normal
|
||||||
|
5. Clean up the chunk buffer
|
||||||
|
|
||||||
|
Example decoding (JavaScript):
|
||||||
|
```javascript
|
||||||
|
const jsonString = decodeURIComponent(escape(atob(concatenatedBase64)))
|
||||||
|
const response = JSON.parse(jsonString)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Chunk Buffer Management
|
||||||
|
|
||||||
|
Receivers MUST implement chunk buffer cleanup:
|
||||||
|
- Discard incomplete chunk buffers after 60 seconds of inactivity
|
||||||
|
- Limit the number of concurrent incomplete messages to prevent memory exhaustion
|
||||||
|
- Log warnings when discarding stale buffers for debugging
|
||||||
|
|
||||||
|
#### Ordering and Reliability
|
||||||
|
|
||||||
|
- Chunks MAY arrive out of order; receivers MUST reassemble by index
|
||||||
|
- Missing chunks result in message loss; the incomplete buffer is eventually discarded
|
||||||
|
- Duplicate chunks (same messageId + index) SHOULD be ignored
|
||||||
|
- Each chunk is sent as a separate encrypted NRC response event
|
||||||
|
|
||||||
### Authentication
|
### Authentication
|
||||||
|
|
||||||
#### Secret-Based Authentication
|
#### Secret-Based Authentication
|
||||||
@@ -208,6 +288,9 @@ The conversation key is derived from:
|
|||||||
4. Match responses using the `e` tag (references request event ID)
|
4. Match responses using the `e` tag (references request event ID)
|
||||||
5. Handle EOSE by waiting for kind 24892 with type "EOSE" in content
|
5. Handle EOSE by waiting for kind 24892 with type "EOSE" in content
|
||||||
6. For subscriptions, maintain mapping of internal sub IDs to tunnel session
|
6. For subscriptions, maintain mapping of internal sub IDs to tunnel session
|
||||||
|
7. **Chunking**: Maintain a chunk buffer map keyed by `messageId`
|
||||||
|
8. **Chunking**: When receiving CHUNK responses, buffer chunks and reassemble when complete
|
||||||
|
9. **Chunking**: Implement 60-second timeout for incomplete chunk buffers
|
||||||
|
|
||||||
## Bridge Implementation Notes
|
## Bridge Implementation Notes
|
||||||
|
|
||||||
@@ -217,10 +300,14 @@ The conversation key is derived from:
|
|||||||
4. Capture all relay responses and wrap in kind 24892
|
4. Capture all relay responses and wrap in kind 24892
|
||||||
5. Sign with relay's key and publish to rendezvous relay
|
5. Sign with relay's key and publish to rendezvous relay
|
||||||
6. Maintain session state for subscription mapping
|
6. Maintain session state for subscription mapping
|
||||||
|
7. **Chunking**: Check response size before sending; chunk if > 40KB
|
||||||
|
8. **Chunking**: Use consistent messageId (UUID) across all chunks of a message
|
||||||
|
9. **Chunking**: Send chunks in order (index 0, 1, 2, ...) for optimal reassembly
|
||||||
|
|
||||||
## Reference Implementations
|
## Reference Implementations
|
||||||
|
|
||||||
- ORLY Relay: [https://git.mleku.dev/mleku/next.orly.dev](https://git.mleku.dev/mleku/next.orly.dev)
|
- ORLY Relay (Bridge): [https://git.mleku.dev/mleku/next.orly.dev](https://git.mleku.dev/mleku/next.orly.dev)
|
||||||
|
- Smesh Client: [https://git.mleku.dev/mleku/smesh](https://git.mleku.dev/mleku/smesh)
|
||||||
|
|
||||||
## See Also
|
## See Also
|
||||||
|
|
||||||
|
|||||||
8
go.mod
8
go.mod
@@ -3,12 +3,14 @@ module next.orly.dev
|
|||||||
go 1.25.3
|
go 1.25.3
|
||||||
|
|
||||||
require (
|
require (
|
||||||
git.mleku.dev/mleku/nostr v1.0.12
|
git.mleku.dev/mleku/nostr v1.0.13
|
||||||
github.com/adrg/xdg v0.5.3
|
github.com/adrg/xdg v0.5.3
|
||||||
github.com/alexflint/go-arg v1.6.1
|
github.com/alexflint/go-arg v1.6.1
|
||||||
github.com/aperturerobotics/go-indexeddb v0.2.3
|
github.com/aperturerobotics/go-indexeddb v0.2.3
|
||||||
|
github.com/bits-and-blooms/bloom/v3 v3.7.1
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0
|
||||||
github.com/dgraph-io/badger/v4 v4.8.0
|
github.com/dgraph-io/badger/v4 v4.8.0
|
||||||
|
github.com/google/uuid v1.6.0
|
||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/hack-pad/safejs v0.1.1
|
github.com/hack-pad/safejs v0.1.1
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||||
@@ -22,6 +24,7 @@ require (
|
|||||||
github.com/stretchr/testify v1.11.1
|
github.com/stretchr/testify v1.11.1
|
||||||
github.com/vertex-lab/nostr-sqlite v0.3.2
|
github.com/vertex-lab/nostr-sqlite v0.3.2
|
||||||
go-simpler.org/env v0.12.0
|
go-simpler.org/env v0.12.0
|
||||||
|
go.etcd.io/bbolt v1.4.3
|
||||||
go.uber.org/atomic v1.11.0
|
go.uber.org/atomic v1.11.0
|
||||||
golang.org/x/crypto v0.46.0
|
golang.org/x/crypto v0.46.0
|
||||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||||
@@ -37,7 +40,6 @@ require (
|
|||||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect
|
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect
|
||||||
github.com/alexflint/go-scalar v1.2.0 // indirect
|
github.com/alexflint/go-scalar v1.2.0 // indirect
|
||||||
github.com/bits-and-blooms/bitset v1.24.2 // indirect
|
github.com/bits-and-blooms/bitset v1.24.2 // indirect
|
||||||
github.com/bits-and-blooms/bloom/v3 v3.7.1 // indirect
|
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
|
github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
|
||||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
||||||
github.com/bytedance/sonic v1.13.1 // indirect
|
github.com/bytedance/sonic v1.13.1 // indirect
|
||||||
@@ -56,7 +58,6 @@ require (
|
|||||||
github.com/google/btree v1.1.2 // indirect
|
github.com/google/btree v1.1.2 // indirect
|
||||||
github.com/google/flatbuffers v25.9.23+incompatible // indirect
|
github.com/google/flatbuffers v25.9.23+incompatible // indirect
|
||||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||||
@@ -72,7 +73,6 @@ require (
|
|||||||
github.com/tidwall/match v1.1.1 // indirect
|
github.com/tidwall/match v1.1.1 // indirect
|
||||||
github.com/tidwall/pretty v1.2.1 // indirect
|
github.com/tidwall/pretty v1.2.1 // indirect
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||||
go.etcd.io/bbolt v1.4.3 // indirect
|
|
||||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||||
|
|||||||
5
go.sum
5
go.sum
@@ -1,5 +1,5 @@
|
|||||||
git.mleku.dev/mleku/nostr v1.0.12 h1:bjsFUh1Q3fGpU7qsqxggGgrGGUt2OBdu1w8hjDM4gJE=
|
git.mleku.dev/mleku/nostr v1.0.13 h1:FqeOQ9ZX8AFVsAI6XisQkB6cgmhn9DNQ2a8li9gx7aY=
|
||||||
git.mleku.dev/mleku/nostr v1.0.12/go.mod h1:kJwSMmLRnAJ7QJtgXDv2wGgceFU0luwVqrgAL3MI93M=
|
git.mleku.dev/mleku/nostr v1.0.13/go.mod h1:kJwSMmLRnAJ7QJtgXDv2wGgceFU0luwVqrgAL3MI93M=
|
||||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||||
@@ -161,6 +161,7 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
|||||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||||
|
github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg=
|
||||||
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ=
|
||||||
github.com/vertex-lab/nostr-sqlite v0.3.2 h1:8nZYYIwiKnWLA446qA/wL/Gy+bU0kuaxdLfUyfeTt/E=
|
github.com/vertex-lab/nostr-sqlite v0.3.2 h1:8nZYYIwiKnWLA446qA/wL/Gy+bU0kuaxdLfUyfeTt/E=
|
||||||
github.com/vertex-lab/nostr-sqlite v0.3.2/go.mod h1:5bw1wMgJhSdrumsZAWxqy+P0u1g+q02PnlGQn15dnSM=
|
github.com/vertex-lab/nostr-sqlite v0.3.2/go.mod h1:5bw1wMgJhSdrumsZAWxqy+P0u1g+q02PnlGQn15dnSM=
|
||||||
|
|||||||
@@ -138,7 +138,7 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
|||||||
if f.cfg.FollowsThrottleEnabled {
|
if f.cfg.FollowsThrottleEnabled {
|
||||||
perEvent := f.cfg.FollowsThrottlePerEvent
|
perEvent := f.cfg.FollowsThrottlePerEvent
|
||||||
if perEvent == 0 {
|
if perEvent == 0 {
|
||||||
perEvent = 200 * time.Millisecond
|
perEvent = 25 * time.Millisecond
|
||||||
}
|
}
|
||||||
maxDelay := f.cfg.FollowsThrottleMaxDelay
|
maxDelay := f.cfg.FollowsThrottleMaxDelay
|
||||||
if maxDelay == 0 {
|
if maxDelay == 0 {
|
||||||
|
|||||||
@@ -200,6 +200,12 @@ func (s *Server) handleUpload(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check bandwidth rate limit (non-followed users)
|
||||||
|
if !s.checkBandwidthLimit(pubkey, remoteAddr, int64(len(body))) {
|
||||||
|
s.setErrorResponse(w, http.StatusTooManyRequests, "upload rate limit exceeded, try again later")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Calculate SHA256 after auth check
|
// Calculate SHA256 after auth check
|
||||||
sha256Hash := CalculateSHA256(body)
|
sha256Hash := CalculateSHA256(body)
|
||||||
sha256Hex := hex.Enc(sha256Hash)
|
sha256Hex := hex.Enc(sha256Hash)
|
||||||
@@ -647,6 +653,12 @@ func (s *Server) handleMirror(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check bandwidth rate limit (non-followed users)
|
||||||
|
if !s.checkBandwidthLimit(pubkey, remoteAddr, int64(len(body))) {
|
||||||
|
s.setErrorResponse(w, http.StatusTooManyRequests, "upload rate limit exceeded, try again later")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Note: pubkey may be nil for anonymous uploads if ACL allows it
|
// Note: pubkey may be nil for anonymous uploads if ACL allows it
|
||||||
|
|
||||||
// Detect MIME type from remote response
|
// Detect MIME type from remote response
|
||||||
@@ -726,6 +738,12 @@ func (s *Server) handleMediaUpload(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check bandwidth rate limit (non-followed users)
|
||||||
|
if !s.checkBandwidthLimit(pubkey, remoteAddr, int64(len(body))) {
|
||||||
|
s.setErrorResponse(w, http.StatusTooManyRequests, "upload rate limit exceeded, try again later")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Note: pubkey may be nil for anonymous uploads if ACL allows it
|
// Note: pubkey may be nil for anonymous uploads if ACL allows it
|
||||||
|
|
||||||
// Optimize media (placeholder - actual optimization would be implemented here)
|
// Optimize media (placeholder - actual optimization would be implemented here)
|
||||||
|
|||||||
131
pkg/blossom/ratelimit.go
Normal file
131
pkg/blossom/ratelimit.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
package blossom
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BandwidthState tracks upload bandwidth for an identity
|
||||||
|
type BandwidthState struct {
|
||||||
|
BucketBytes int64 // Current token bucket level (bytes available)
|
||||||
|
LastUpdate time.Time // Last time bucket was updated
|
||||||
|
}
|
||||||
|
|
||||||
|
// BandwidthLimiter implements token bucket rate limiting for uploads.
|
||||||
|
// Each identity gets a bucket that replenishes at dailyLimit/day rate.
|
||||||
|
// Uploads consume tokens from the bucket.
|
||||||
|
type BandwidthLimiter struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
states map[string]*BandwidthState // keyed by pubkey hex or IP
|
||||||
|
dailyLimit int64 // bytes per day
|
||||||
|
burstLimit int64 // max bucket size (burst capacity)
|
||||||
|
refillRate float64 // bytes per second refill rate
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBandwidthLimiter creates a new bandwidth limiter.
|
||||||
|
// dailyLimitMB is the average daily limit in megabytes.
|
||||||
|
// burstLimitMB is the maximum burst capacity in megabytes.
|
||||||
|
func NewBandwidthLimiter(dailyLimitMB, burstLimitMB int64) *BandwidthLimiter {
|
||||||
|
dailyBytes := dailyLimitMB * 1024 * 1024
|
||||||
|
burstBytes := burstLimitMB * 1024 * 1024
|
||||||
|
|
||||||
|
return &BandwidthLimiter{
|
||||||
|
states: make(map[string]*BandwidthState),
|
||||||
|
dailyLimit: dailyBytes,
|
||||||
|
burstLimit: burstBytes,
|
||||||
|
refillRate: float64(dailyBytes) / 86400.0, // bytes per second
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckAndConsume checks if an upload of the given size is allowed for the identity,
|
||||||
|
// and if so, consumes the tokens. Returns true if allowed, false if rate limited.
|
||||||
|
// The identity should be pubkey hex for authenticated users, or IP for anonymous.
|
||||||
|
func (bl *BandwidthLimiter) CheckAndConsume(identity string, sizeBytes int64) bool {
|
||||||
|
bl.mu.Lock()
|
||||||
|
defer bl.mu.Unlock()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
state, exists := bl.states[identity]
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
// New identity starts with full burst capacity
|
||||||
|
state = &BandwidthState{
|
||||||
|
BucketBytes: bl.burstLimit,
|
||||||
|
LastUpdate: now,
|
||||||
|
}
|
||||||
|
bl.states[identity] = state
|
||||||
|
} else {
|
||||||
|
// Refill bucket based on elapsed time
|
||||||
|
elapsed := now.Sub(state.LastUpdate).Seconds()
|
||||||
|
refill := int64(elapsed * bl.refillRate)
|
||||||
|
state.BucketBytes += refill
|
||||||
|
if state.BucketBytes > bl.burstLimit {
|
||||||
|
state.BucketBytes = bl.burstLimit
|
||||||
|
}
|
||||||
|
state.LastUpdate = now
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if upload fits in bucket
|
||||||
|
if state.BucketBytes >= sizeBytes {
|
||||||
|
state.BucketBytes -= sizeBytes
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAvailable returns the currently available bytes for an identity.
|
||||||
|
func (bl *BandwidthLimiter) GetAvailable(identity string) int64 {
|
||||||
|
bl.mu.Lock()
|
||||||
|
defer bl.mu.Unlock()
|
||||||
|
|
||||||
|
state, exists := bl.states[identity]
|
||||||
|
if !exists {
|
||||||
|
return bl.burstLimit // New users have full capacity
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate current level with refill
|
||||||
|
now := time.Now()
|
||||||
|
elapsed := now.Sub(state.LastUpdate).Seconds()
|
||||||
|
refill := int64(elapsed * bl.refillRate)
|
||||||
|
available := state.BucketBytes + refill
|
||||||
|
if available > bl.burstLimit {
|
||||||
|
available = bl.burstLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
return available
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTimeUntilAvailable returns how long until the given bytes will be available.
|
||||||
|
func (bl *BandwidthLimiter) GetTimeUntilAvailable(identity string, sizeBytes int64) time.Duration {
|
||||||
|
available := bl.GetAvailable(identity)
|
||||||
|
if available >= sizeBytes {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
needed := sizeBytes - available
|
||||||
|
seconds := float64(needed) / bl.refillRate
|
||||||
|
return time.Duration(seconds * float64(time.Second))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup removes entries that have fully replenished (at burst limit).
|
||||||
|
func (bl *BandwidthLimiter) Cleanup() {
|
||||||
|
bl.mu.Lock()
|
||||||
|
defer bl.mu.Unlock()
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
for key, state := range bl.states {
|
||||||
|
elapsed := now.Sub(state.LastUpdate).Seconds()
|
||||||
|
refill := int64(elapsed * bl.refillRate)
|
||||||
|
if state.BucketBytes+refill >= bl.burstLimit {
|
||||||
|
delete(bl.states, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stats returns the number of tracked identities.
|
||||||
|
func (bl *BandwidthLimiter) Stats() int {
|
||||||
|
bl.mu.Lock()
|
||||||
|
defer bl.mu.Unlock()
|
||||||
|
return len(bl.states)
|
||||||
|
}
|
||||||
@@ -19,6 +19,9 @@ type Server struct {
|
|||||||
maxBlobSize int64
|
maxBlobSize int64
|
||||||
allowedMimeTypes map[string]bool
|
allowedMimeTypes map[string]bool
|
||||||
requireAuth bool
|
requireAuth bool
|
||||||
|
|
||||||
|
// Rate limiting for uploads
|
||||||
|
bandwidthLimiter *BandwidthLimiter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config holds configuration for the Blossom server
|
// Config holds configuration for the Blossom server
|
||||||
@@ -27,6 +30,11 @@ type Config struct {
|
|||||||
MaxBlobSize int64
|
MaxBlobSize int64
|
||||||
AllowedMimeTypes []string
|
AllowedMimeTypes []string
|
||||||
RequireAuth bool
|
RequireAuth bool
|
||||||
|
|
||||||
|
// Rate limiting (for non-followed users)
|
||||||
|
RateLimitEnabled bool
|
||||||
|
DailyLimitMB int64
|
||||||
|
BurstLimitMB int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServer creates a new Blossom server instance
|
// NewServer creates a new Blossom server instance
|
||||||
@@ -48,6 +56,20 @@ func NewServer(db *database.D, aclRegistry *acl.S, cfg *Config) *Server {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize bandwidth limiter if enabled
|
||||||
|
var bwLimiter *BandwidthLimiter
|
||||||
|
if cfg.RateLimitEnabled {
|
||||||
|
dailyMB := cfg.DailyLimitMB
|
||||||
|
if dailyMB <= 0 {
|
||||||
|
dailyMB = 10 // 10MB default
|
||||||
|
}
|
||||||
|
burstMB := cfg.BurstLimitMB
|
||||||
|
if burstMB <= 0 {
|
||||||
|
burstMB = 50 // 50MB default burst
|
||||||
|
}
|
||||||
|
bwLimiter = NewBandwidthLimiter(dailyMB, burstMB)
|
||||||
|
}
|
||||||
|
|
||||||
return &Server{
|
return &Server{
|
||||||
db: db,
|
db: db,
|
||||||
storage: storage,
|
storage: storage,
|
||||||
@@ -56,6 +78,7 @@ func NewServer(db *database.D, aclRegistry *acl.S, cfg *Config) *Server {
|
|||||||
maxBlobSize: cfg.MaxBlobSize,
|
maxBlobSize: cfg.MaxBlobSize,
|
||||||
allowedMimeTypes: allowedMap,
|
allowedMimeTypes: allowedMap,
|
||||||
requireAuth: cfg.RequireAuth,
|
requireAuth: cfg.RequireAuth,
|
||||||
|
bandwidthLimiter: bwLimiter,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -208,6 +231,44 @@ func (s *Server) checkACL(
|
|||||||
return actual >= required
|
return actual >= required
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isRateLimitExempt returns true if the user is exempt from rate limiting.
|
||||||
|
// Users with write access or higher (followed users, admins, owners) are exempt.
|
||||||
|
func (s *Server) isRateLimitExempt(pubkey []byte, remoteAddr string) bool {
|
||||||
|
if s.acl == nil {
|
||||||
|
return true // No ACL configured, no rate limiting
|
||||||
|
}
|
||||||
|
|
||||||
|
level := s.acl.GetAccessLevel(pubkey, remoteAddr)
|
||||||
|
|
||||||
|
// Followed users get "write" level, admins/owners get higher
|
||||||
|
// Only "read" and "none" are rate limited
|
||||||
|
return level == "write" || level == "admin" || level == "owner"
|
||||||
|
}
|
||||||
|
|
||||||
|
// checkBandwidthLimit checks if the upload is allowed under rate limits.
|
||||||
|
// Returns true if allowed, false if rate limited.
|
||||||
|
// Exempt users (followed, admin, owner) always return true.
|
||||||
|
func (s *Server) checkBandwidthLimit(pubkey []byte, remoteAddr string, sizeBytes int64) bool {
|
||||||
|
if s.bandwidthLimiter == nil {
|
||||||
|
return true // No rate limiting configured
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if user is exempt
|
||||||
|
if s.isRateLimitExempt(pubkey, remoteAddr) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use pubkey hex if available, otherwise IP
|
||||||
|
var identity string
|
||||||
|
if len(pubkey) > 0 {
|
||||||
|
identity = string(pubkey) // Will be converted to hex in handler
|
||||||
|
} else {
|
||||||
|
identity = remoteAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.bandwidthLimiter.CheckAndConsume(identity, sizeBytes)
|
||||||
|
}
|
||||||
|
|
||||||
// BaseURLKey is the context key for the base URL (exported for use by app handler)
|
// BaseURLKey is the context key for the base URL (exported for use by app handler)
|
||||||
type BaseURLKey struct{}
|
type BaseURLKey struct{}
|
||||||
|
|
||||||
|
|||||||
@@ -4,12 +4,17 @@ package database
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/dgraph-io/badger/v4"
|
"github.com/dgraph-io/badger/v4"
|
||||||
|
"github.com/minio/sha256-simd"
|
||||||
|
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||||
|
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||||
|
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CuratingConfig represents the configuration for curating ACL mode
|
// CuratingConfig represents the configuration for curating ACL mode
|
||||||
@@ -969,7 +974,10 @@ func kindInCategory(kind int, category string) bool {
|
|||||||
"dm": {4, 14, 1059},
|
"dm": {4, 14, 1059},
|
||||||
"longform": {30023, 30024},
|
"longform": {30023, 30024},
|
||||||
"media": {1063, 20, 21, 22},
|
"media": {1063, 20, 21, 22},
|
||||||
"marketplace": {30017, 30018, 30019, 30020, 1021, 1022},
|
"marketplace": {30017, 30018, 30019, 30020, 1021, 1022}, // Legacy alias
|
||||||
|
"marketplace_nip15": {30017, 30018, 30019, 30020, 1021, 1022},
|
||||||
|
"marketplace_nip99": {30402, 30403, 30405, 30406, 31555}, // NIP-99/Gamma Markets (Plebeian Market)
|
||||||
|
"order_communication": {16, 17}, // Gamma Markets order messages
|
||||||
"groups_nip29": {9, 10, 11, 12, 9000, 9001, 9002, 39000, 39001, 39002},
|
"groups_nip29": {9, 10, 11, 12, 9000, 9001, 9002, 39000, 39001, 39002},
|
||||||
"groups_nip72": {34550, 1111, 4550},
|
"groups_nip72": {34550, 1111, 4550},
|
||||||
"lists": {10000, 10001, 10003, 30000, 30001, 30003},
|
"lists": {10000, 10001, 10003, 30000, 30001, 30003},
|
||||||
@@ -987,3 +995,236 @@ func kindInCategory(kind int, category string) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ==================== Database Scanning ====================
|
||||||
|
|
||||||
|
// ScanResult contains the results of scanning all pubkeys in the database
|
||||||
|
type ScanResult struct {
|
||||||
|
TotalPubkeys int `json:"total_pubkeys"`
|
||||||
|
TotalEvents int `json:"total_events"`
|
||||||
|
Skipped int `json:"skipped"` // Trusted/blacklisted users skipped
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanAllPubkeys scans the database to find all unique pubkeys and count their events.
|
||||||
|
// This populates the event count data needed for the unclassified users list.
|
||||||
|
// It uses the SerialPubkey index to find all pubkeys, then counts events for each.
|
||||||
|
func (c *CuratingACL) ScanAllPubkeys() (*ScanResult, error) {
|
||||||
|
result := &ScanResult{}
|
||||||
|
|
||||||
|
// First, get all trusted and blacklisted pubkeys to skip
|
||||||
|
trusted, err := c.ListTrustedPubkeys()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
blacklisted, err := c.ListBlacklistedPubkeys()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
excludeSet := make(map[string]struct{})
|
||||||
|
for _, t := range trusted {
|
||||||
|
excludeSet[t.Pubkey] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, b := range blacklisted {
|
||||||
|
excludeSet[b.Pubkey] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan the SerialPubkey index to get all pubkeys
|
||||||
|
pubkeys := make(map[string]struct{})
|
||||||
|
|
||||||
|
err = c.View(func(txn *badger.Txn) error {
|
||||||
|
// SerialPubkey prefix is "spk"
|
||||||
|
prefix := []byte("spk")
|
||||||
|
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
|
||||||
|
defer it.Close()
|
||||||
|
|
||||||
|
for it.Rewind(); it.Valid(); it.Next() {
|
||||||
|
item := it.Item()
|
||||||
|
// The value contains the 32-byte pubkey
|
||||||
|
val, err := item.ValueCopy(nil)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(val) == 32 {
|
||||||
|
// Convert to hex
|
||||||
|
pubkeyHex := fmt.Sprintf("%x", val)
|
||||||
|
pubkeys[pubkeyHex] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.TotalPubkeys = len(pubkeys)
|
||||||
|
|
||||||
|
// For each pubkey, count events and store the count
|
||||||
|
today := time.Now().Format("2006-01-02")
|
||||||
|
|
||||||
|
for pubkeyHex := range pubkeys {
|
||||||
|
// Skip if trusted or blacklisted
|
||||||
|
if _, excluded := excludeSet[pubkeyHex]; excluded {
|
||||||
|
result.Skipped++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count events for this pubkey using the Pubkey index
|
||||||
|
count, err := c.countEventsForPubkey(pubkeyHex)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if count > 0 {
|
||||||
|
result.TotalEvents += count
|
||||||
|
|
||||||
|
// Store the event count
|
||||||
|
ec := PubkeyEventCount{
|
||||||
|
Pubkey: pubkeyHex,
|
||||||
|
Date: today,
|
||||||
|
Count: count,
|
||||||
|
LastEvent: time.Now(),
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.Update(func(txn *badger.Txn) error {
|
||||||
|
key := c.getEventCountKey(pubkeyHex, today)
|
||||||
|
data, err := json.Marshal(ec)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return txn.Set(key, data)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EventSummary represents a simplified event for display in the UI
|
||||||
|
type EventSummary struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Kind int `json:"kind"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
CreatedAt int64 `json:"created_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEventsForPubkey fetches events for a pubkey, returning simplified event data
|
||||||
|
// limit specifies max events to return, offset is for pagination
|
||||||
|
func (c *CuratingACL) GetEventsForPubkey(pubkeyHex string, limit, offset int) ([]EventSummary, int, error) {
|
||||||
|
var events []EventSummary
|
||||||
|
|
||||||
|
// First, count total events for this pubkey
|
||||||
|
totalCount, err := c.countEventsForPubkey(pubkeyHex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode the pubkey hex to bytes
|
||||||
|
pubkeyBytes, err := hex.DecAppend(nil, []byte(pubkeyHex))
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("invalid pubkey hex: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a filter to query events by author
|
||||||
|
// Use a larger limit to account for offset, then slice
|
||||||
|
queryLimit := uint(limit + offset)
|
||||||
|
f := &filter.F{
|
||||||
|
Authors: tag.NewFromBytesSlice(pubkeyBytes),
|
||||||
|
Limit: &queryLimit,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query events using the database's QueryEvents method
|
||||||
|
ctx := context.Background()
|
||||||
|
evs, err := c.D.QueryEvents(ctx, f)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply offset and convert to EventSummary
|
||||||
|
for i, ev := range evs {
|
||||||
|
if i < offset {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if len(events) >= limit {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
events = append(events, EventSummary{
|
||||||
|
ID: hex.Enc(ev.ID),
|
||||||
|
Kind: int(ev.Kind),
|
||||||
|
Content: string(ev.Content),
|
||||||
|
CreatedAt: ev.CreatedAt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return events, totalCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteEventsForPubkey deletes all events for a given pubkey
|
||||||
|
// Returns the number of events deleted
|
||||||
|
func (c *CuratingACL) DeleteEventsForPubkey(pubkeyHex string) (int, error) {
|
||||||
|
// Decode the pubkey hex to bytes
|
||||||
|
pubkeyBytes, err := hex.DecAppend(nil, []byte(pubkeyHex))
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("invalid pubkey hex: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a filter to find all events by this author
|
||||||
|
f := &filter.F{
|
||||||
|
Authors: tag.NewFromBytesSlice(pubkeyBytes),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query all events for this pubkey
|
||||||
|
ctx := context.Background()
|
||||||
|
evs, err := c.D.QueryEvents(ctx, f)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete each event
|
||||||
|
deleted := 0
|
||||||
|
for _, ev := range evs {
|
||||||
|
if err := c.D.DeleteEvent(ctx, ev.ID); err != nil {
|
||||||
|
// Log error but continue deleting
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
deleted++
|
||||||
|
}
|
||||||
|
|
||||||
|
return deleted, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// countEventsForPubkey counts events in the database for a given pubkey hex string
|
||||||
|
func (c *CuratingACL) countEventsForPubkey(pubkeyHex string) (int, error) {
|
||||||
|
count := 0
|
||||||
|
|
||||||
|
// Decode the pubkey hex to bytes
|
||||||
|
pubkeyBytes := make([]byte, 32)
|
||||||
|
for i := 0; i < 32 && i*2+1 < len(pubkeyHex); i++ {
|
||||||
|
fmt.Sscanf(pubkeyHex[i*2:i*2+2], "%02x", &pubkeyBytes[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the pubkey hash (SHA256 of pubkey, first 8 bytes)
|
||||||
|
// This matches the PubHash type in indexes/types/pubhash.go
|
||||||
|
pkh := sha256.Sum256(pubkeyBytes)
|
||||||
|
|
||||||
|
// Scan the Pubkey index (prefix "pc-") for this pubkey
|
||||||
|
err := c.View(func(txn *badger.Txn) error {
|
||||||
|
// Build prefix: "pc-" + 8-byte SHA256 hash of pubkey
|
||||||
|
prefix := make([]byte, 3+8)
|
||||||
|
copy(prefix[:3], []byte("pc-"))
|
||||||
|
copy(prefix[3:], pkh[:8])
|
||||||
|
|
||||||
|
it := txn.NewIterator(badger.IteratorOptions{Prefix: prefix})
|
||||||
|
defer it.Close()
|
||||||
|
|
||||||
|
for it.Rewind(); it.Valid(); it.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
|
"lol.mleku.dev/errorf"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
"next.orly.dev/pkg/database/indexes"
|
"next.orly.dev/pkg/database/indexes"
|
||||||
types2 "next.orly.dev/pkg/database/indexes/types"
|
types2 "next.orly.dev/pkg/database/indexes/types"
|
||||||
@@ -44,6 +45,12 @@ func NormalizeTagValueForHash(key byte, valueBytes []byte) []byte {
|
|||||||
func CreateIdHashFromData(data []byte) (i *types2.IdHash, err error) {
|
func CreateIdHashFromData(data []byte) (i *types2.IdHash, err error) {
|
||||||
i = new(types2.IdHash)
|
i = new(types2.IdHash)
|
||||||
|
|
||||||
|
// Skip empty data to avoid noisy errors
|
||||||
|
if len(data) == 0 {
|
||||||
|
err = errorf.E("CreateIdHashFromData: empty ID provided")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// If data looks like hex string and has the right length for hex-encoded
|
// If data looks like hex string and has the right length for hex-encoded
|
||||||
// sha256
|
// sha256
|
||||||
if len(data) == 64 {
|
if len(data) == 64 {
|
||||||
@@ -95,6 +102,11 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
|||||||
// should be an error, but convention just ignores it.
|
// should be an error, but convention just ignores it.
|
||||||
if f.Ids.Len() > 0 {
|
if f.Ids.Len() > 0 {
|
||||||
for _, id := range f.Ids.T {
|
for _, id := range f.Ids.T {
|
||||||
|
// Skip empty IDs - some filters have empty ID values
|
||||||
|
if len(id) == 0 {
|
||||||
|
log.D.F("GetIndexesFromFilter: skipping empty ID in filter (ids=%d)", f.Ids.Len())
|
||||||
|
continue
|
||||||
|
}
|
||||||
if err = func() (err error) {
|
if err = func() (err error) {
|
||||||
var i *types2.IdHash
|
var i *types2.IdHash
|
||||||
if i, err = CreateIdHashFromData(id); chk.E(err) {
|
if i, err = CreateIdHashFromData(id); chk.E(err) {
|
||||||
|
|||||||
@@ -20,6 +20,10 @@ import (
|
|||||||
|
|
||||||
func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
|
func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
|
||||||
// log.T.F("GetSerialById: input id=%s", hex.Enc(id))
|
// log.T.F("GetSerialById: input id=%s", hex.Enc(id))
|
||||||
|
if len(id) == 0 {
|
||||||
|
err = errorf.E("GetSerialById: called with empty ID")
|
||||||
|
return
|
||||||
|
}
|
||||||
var idxs []Range
|
var idxs []Range
|
||||||
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
|
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
|
||||||
return
|
return
|
||||||
@@ -102,6 +106,10 @@ func (d *D) GetSerialsByIdsWithFilter(
|
|||||||
|
|
||||||
// Process each ID sequentially
|
// Process each ID sequentially
|
||||||
for _, id := range ids.T {
|
for _, id := range ids.T {
|
||||||
|
// Skip empty IDs
|
||||||
|
if len(id) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
// idHex := hex.Enc(id)
|
// idHex := hex.Enc(id)
|
||||||
|
|
||||||
// Get the index prefix for this ID
|
// Get the index prefix for this ID
|
||||||
|
|||||||
@@ -24,8 +24,8 @@ func (i *IdHash) Set(idh []byte) {
|
|||||||
func (i *IdHash) FromId(id []byte) (err error) {
|
func (i *IdHash) FromId(id []byte) (err error) {
|
||||||
if len(id) != sha256.Size {
|
if len(id) != sha256.Size {
|
||||||
err = errorf.E(
|
err = errorf.E(
|
||||||
"FromId: invalid ID length, got %d require %d", len(id),
|
"FromId: invalid ID length, got %d require %d (data=%x)", len(id),
|
||||||
sha256.Size,
|
sha256.Size, id,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package routing
|
|||||||
import (
|
import (
|
||||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||||
|
"lol.mleku.dev/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Publisher abstracts event delivery to subscribers.
|
// Publisher abstracts event delivery to subscribers.
|
||||||
@@ -22,6 +23,7 @@ func IsEphemeral(k uint16) bool {
|
|||||||
// - Are immediately delivered to subscribers
|
// - Are immediately delivered to subscribers
|
||||||
func MakeEphemeralHandler(publisher Publisher) Handler {
|
func MakeEphemeralHandler(publisher Publisher) Handler {
|
||||||
return func(ev *event.E, authedPubkey []byte) Result {
|
return func(ev *event.E, authedPubkey []byte) Result {
|
||||||
|
log.I.F("ephemeral handler received event kind %d, id %0x", ev.Kind, ev.ID[:8])
|
||||||
// Clone and deliver immediately without persistence
|
// Clone and deliver immediately without persistence
|
||||||
cloned := ev.Clone()
|
cloned := ev.Clone()
|
||||||
go publisher.Deliver(cloned)
|
go publisher.Deliver(cloned)
|
||||||
|
|||||||
@@ -10,12 +10,15 @@ import (
|
|||||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||||
|
"lol.mleku.dev/log"
|
||||||
"next.orly.dev/pkg/database/indexes/types"
|
"next.orly.dev/pkg/database/indexes/types"
|
||||||
"next.orly.dev/pkg/interfaces/store"
|
"next.orly.dev/pkg/interfaces/store"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueryEvents retrieves events matching the given filter
|
// QueryEvents retrieves events matching the given filter
|
||||||
func (n *N) QueryEvents(c context.Context, f *filter.F) (evs event.S, err error) {
|
func (n *N) QueryEvents(c context.Context, f *filter.F) (evs event.S, err error) {
|
||||||
|
log.T.F("Neo4j QueryEvents called with filter: kinds=%v, authors=%d, tags=%v",
|
||||||
|
f.Kinds != nil, f.Authors != nil && len(f.Authors.T) > 0, f.Tags != nil)
|
||||||
return n.QueryEventsWithOptions(c, f, false, false)
|
return n.QueryEventsWithOptions(c, f, false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,6 +104,7 @@ func (n *N) buildCypherQuery(f *filter.F, includeDeleteEvents bool) (string, map
|
|||||||
// Normalize to lowercase hex using our utility function
|
// Normalize to lowercase hex using our utility function
|
||||||
// This handles both binary-encoded pubkeys and hex string pubkeys (including uppercase)
|
// This handles both binary-encoded pubkeys and hex string pubkeys (including uppercase)
|
||||||
hexAuthor := NormalizePubkeyHex(author)
|
hexAuthor := NormalizePubkeyHex(author)
|
||||||
|
log.T.F("Neo4j author filter: raw_len=%d, normalized=%q", len(author), hexAuthor)
|
||||||
if hexAuthor == "" {
|
if hexAuthor == "" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -130,30 +134,39 @@ func (n *N) buildCypherQuery(f *filter.F, includeDeleteEvents bool) (string, map
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Time range filters - for temporal queries
|
// Time range filters - for temporal queries
|
||||||
if f.Since != nil {
|
// Note: Check both pointer and value - a zero timestamp (Unix epoch 1970) is almost
|
||||||
|
// certainly not a valid constraint as Nostr events didn't exist then
|
||||||
|
if f.Since != nil && f.Since.V > 0 {
|
||||||
params["since"] = f.Since.V
|
params["since"] = f.Since.V
|
||||||
whereClauses = append(whereClauses, "e.created_at >= $since")
|
whereClauses = append(whereClauses, "e.created_at >= $since")
|
||||||
}
|
}
|
||||||
if f.Until != nil {
|
if f.Until != nil && f.Until.V > 0 {
|
||||||
params["until"] = f.Until.V
|
params["until"] = f.Until.V
|
||||||
whereClauses = append(whereClauses, "e.created_at <= $until")
|
whereClauses = append(whereClauses, "e.created_at <= $until")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tag filters - this is where Neo4j's graph capabilities shine
|
// Tag filters - this is where Neo4j's graph capabilities shine
|
||||||
// We can efficiently traverse tag relationships
|
// We use EXISTS subqueries to efficiently filter events by tags
|
||||||
|
// This ensures events are only returned if they have matching tags
|
||||||
tagIndex := 0
|
tagIndex := 0
|
||||||
if f.Tags != nil {
|
if f.Tags != nil {
|
||||||
for _, tagValues := range *f.Tags {
|
for _, tagValues := range *f.Tags {
|
||||||
if len(tagValues.T) > 0 {
|
if len(tagValues.T) > 0 {
|
||||||
tagVarName := fmt.Sprintf("t%d", tagIndex)
|
|
||||||
tagTypeParam := fmt.Sprintf("tagType_%d", tagIndex)
|
tagTypeParam := fmt.Sprintf("tagType_%d", tagIndex)
|
||||||
tagValuesParam := fmt.Sprintf("tagValues_%d", tagIndex)
|
tagValuesParam := fmt.Sprintf("tagValues_%d", tagIndex)
|
||||||
|
|
||||||
// Add tag relationship to MATCH clause
|
// The first element is the tag type (e.g., "e", "p", "#e", "#p", etc.)
|
||||||
matchClause += fmt.Sprintf(" OPTIONAL MATCH (e)-[:TAGGED_WITH]->(%s:Tag)", tagVarName)
|
// Filter tags may have "#" prefix (e.g., "#d" for d-tag filters)
|
||||||
|
// Event tags are stored without prefix, so we must strip it
|
||||||
|
tagTypeBytes := tagValues.T[0]
|
||||||
|
var tagType string
|
||||||
|
if len(tagTypeBytes) > 0 && tagTypeBytes[0] == '#' {
|
||||||
|
tagType = string(tagTypeBytes[1:]) // Strip "#" prefix
|
||||||
|
} else {
|
||||||
|
tagType = string(tagTypeBytes)
|
||||||
|
}
|
||||||
|
|
||||||
// The first element is the tag type (e.g., "e", "p", etc.)
|
log.T.F("Neo4j tag filter: type=%q (raw=%q, len=%d)", tagType, string(tagTypeBytes), len(tagTypeBytes))
|
||||||
tagType := string(tagValues.T[0])
|
|
||||||
|
|
||||||
// Convert remaining tag values to strings (skip first element which is the type)
|
// Convert remaining tag values to strings (skip first element which is the type)
|
||||||
// For e/p tags, use NormalizePubkeyHex to handle binary encoding and uppercase hex
|
// For e/p tags, use NormalizePubkeyHex to handle binary encoding and uppercase hex
|
||||||
@@ -162,26 +175,34 @@ func (n *N) buildCypherQuery(f *filter.F, includeDeleteEvents bool) (string, map
|
|||||||
if tagType == "e" || tagType == "p" {
|
if tagType == "e" || tagType == "p" {
|
||||||
// Normalize e/p tag values to lowercase hex (handles binary encoding)
|
// Normalize e/p tag values to lowercase hex (handles binary encoding)
|
||||||
normalized := NormalizePubkeyHex(tv)
|
normalized := NormalizePubkeyHex(tv)
|
||||||
|
log.T.F("Neo4j tag filter: %s-tag value normalized: %q (raw len=%d, binary=%v)",
|
||||||
|
tagType, normalized, len(tv), IsBinaryEncoded(tv))
|
||||||
if normalized != "" {
|
if normalized != "" {
|
||||||
tagValueStrings = append(tagValueStrings, normalized)
|
tagValueStrings = append(tagValueStrings, normalized)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// For other tags, use direct string conversion
|
// For other tags, use direct string conversion
|
||||||
tagValueStrings = append(tagValueStrings, string(tv))
|
val := string(tv)
|
||||||
|
log.T.F("Neo4j tag filter: %s-tag value: %q (len=%d)", tagType, val, len(val))
|
||||||
|
tagValueStrings = append(tagValueStrings, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip if no valid values after normalization
|
// Skip if no valid values after normalization
|
||||||
if len(tagValueStrings) == 0 {
|
if len(tagValueStrings) == 0 {
|
||||||
|
log.W.F("Neo4j tag filter: no valid values for tag type %q, skipping", tagType)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add WHERE conditions for this tag
|
log.T.F("Neo4j tag filter: type=%s, values=%v", tagType, tagValueStrings)
|
||||||
|
|
||||||
|
// Use EXISTS subquery to filter events that have matching tags
|
||||||
|
// This is more correct than OPTIONAL MATCH because it requires the tag to exist
|
||||||
params[tagTypeParam] = tagType
|
params[tagTypeParam] = tagType
|
||||||
params[tagValuesParam] = tagValueStrings
|
params[tagValuesParam] = tagValueStrings
|
||||||
whereClauses = append(whereClauses,
|
whereClauses = append(whereClauses,
|
||||||
fmt.Sprintf("(%s.type = $%s AND %s.value IN $%s)",
|
fmt.Sprintf("EXISTS { MATCH (e)-[:TAGGED_WITH]->(t:Tag) WHERE t.type = $%s AND t.value IN $%s }",
|
||||||
tagVarName, tagTypeParam, tagVarName, tagValuesParam))
|
tagTypeParam, tagValuesParam))
|
||||||
|
|
||||||
tagIndex++
|
tagIndex++
|
||||||
}
|
}
|
||||||
@@ -248,6 +269,26 @@ RETURN e.id AS id,
|
|||||||
// Combine all parts
|
// Combine all parts
|
||||||
cypher := matchClause + whereClause + returnClause + orderClause + limitClause
|
cypher := matchClause + whereClause + returnClause + orderClause + limitClause
|
||||||
|
|
||||||
|
// Log the generated query for debugging
|
||||||
|
log.T.F("Neo4j query: %s", cypher)
|
||||||
|
// Log params at trace level for debugging
|
||||||
|
var paramSummary strings.Builder
|
||||||
|
for k, v := range params {
|
||||||
|
switch val := v.(type) {
|
||||||
|
case []string:
|
||||||
|
if len(val) <= 3 {
|
||||||
|
paramSummary.WriteString(fmt.Sprintf("%s: %v ", k, val))
|
||||||
|
} else {
|
||||||
|
paramSummary.WriteString(fmt.Sprintf("%s: [%d values] ", k, len(val)))
|
||||||
|
}
|
||||||
|
case []int64:
|
||||||
|
paramSummary.WriteString(fmt.Sprintf("%s: %v ", k, val))
|
||||||
|
default:
|
||||||
|
paramSummary.WriteString(fmt.Sprintf("%s: %v ", k, v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.T.F("Neo4j params: %s", paramSummary.String())
|
||||||
|
|
||||||
return cypher, params
|
return cypher, params
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -300,19 +341,17 @@ func (n *N) parseEventsFromResult(result *CollectedResult) ([]*event.E, error) {
|
|||||||
_ = tags.UnmarshalJSON([]byte(tagsStr))
|
_ = tags.UnmarshalJSON([]byte(tagsStr))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create event
|
// Create event with decoded binary fields
|
||||||
e := &event.E{
|
e := &event.E{
|
||||||
|
ID: id,
|
||||||
|
Pubkey: pubkey,
|
||||||
Kind: uint16(kind),
|
Kind: uint16(kind),
|
||||||
CreatedAt: createdAt,
|
CreatedAt: createdAt,
|
||||||
Content: []byte(content),
|
Content: []byte(content),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
|
Sig: sig,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy fixed-size arrays
|
|
||||||
copy(e.ID[:], id)
|
|
||||||
copy(e.Sig[:], sig)
|
|
||||||
copy(e.Pubkey[:], pubkey)
|
|
||||||
|
|
||||||
events = append(events, e)
|
events = append(events, e)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -462,3 +462,584 @@ func TestCountEvents(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("✓ Count events returned correct count: %d", count)
|
t.Logf("✓ Count events returned correct count: %d", count)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestQueryEventsByTagWithHashPrefix tests that tag filters with "#" prefix work correctly.
|
||||||
|
// This is a regression test for a bug where filter tags like "#d" were not being matched
|
||||||
|
// because the "#" prefix wasn't being stripped before comparison with stored tags.
|
||||||
|
func TestQueryEventsByTagWithHashPrefix(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
signer := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create events with d-tags (parameterized replaceable kind)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event with d=id1",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "id1")), baseTs)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event with d=id2",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "id2")), baseTs+1)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event with d=id3",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "id3")), baseTs+2)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event with d=other",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "other")), baseTs+3)
|
||||||
|
|
||||||
|
// Query with "#d" prefix (as clients send it) - should match events with d=id1
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30382)),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#d", "id1")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to query events with #d tag: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evs) != 1 {
|
||||||
|
t.Fatalf("Expected 1 event with d=id1, got %d", len(evs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the returned event has the correct d-tag
|
||||||
|
dTag := evs[0].Tags.GetFirst([]byte("d"))
|
||||||
|
if dTag == nil || string(dTag.Value()) != "id1" {
|
||||||
|
t.Fatalf("Expected d=id1, got d=%s", dTag.Value())
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Query with #d prefix returned correct event")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestQueryEventsByTagMultipleValues tests that tag filters with multiple values
|
||||||
|
// use OR logic (match events with ANY of the values).
|
||||||
|
func TestQueryEventsByTagMultipleValues(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
signer := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create events with different d-tags
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event A",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "target-1")), baseTs)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event B",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "target-2")), baseTs+1)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event C",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "target-3")), baseTs+2)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event D (not target)",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "other-value")), baseTs+3)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event E (no match)",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "different")), baseTs+4)
|
||||||
|
|
||||||
|
// Query with multiple d-tag values using "#d" prefix
|
||||||
|
// Should match events with d=target-1 OR d=target-2 OR d=target-3
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30382)),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#d", "target-1", "target-2", "target-3")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to query events with multiple #d values: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evs) != 3 {
|
||||||
|
t.Fatalf("Expected 3 events matching the d-tag values, got %d", len(evs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify returned events have correct d-tags
|
||||||
|
validDTags := map[string]bool{"target-1": false, "target-2": false, "target-3": false}
|
||||||
|
for _, ev := range evs {
|
||||||
|
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||||
|
if dTag == nil {
|
||||||
|
t.Fatalf("Event missing d-tag")
|
||||||
|
}
|
||||||
|
dValue := string(dTag.Value())
|
||||||
|
if _, ok := validDTags[dValue]; !ok {
|
||||||
|
t.Fatalf("Unexpected d-tag value: %s", dValue)
|
||||||
|
}
|
||||||
|
validDTags[dValue] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify all expected d-tags were found
|
||||||
|
for dValue, found := range validDTags {
|
||||||
|
if !found {
|
||||||
|
t.Fatalf("Expected to find event with d=%s", dValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Query with multiple #d values returned correct events")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestQueryEventsByTagNoMatch tests that tag filters correctly return no results
|
||||||
|
// when no events match the filter.
|
||||||
|
func TestQueryEventsByTagNoMatch(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
signer := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create events with d-tags
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "existing-value")), baseTs)
|
||||||
|
|
||||||
|
// Query for d-tag value that doesn't exist
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30382)),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#d", "non-existent-value")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to query events: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evs) != 0 {
|
||||||
|
t.Fatalf("Expected 0 events for non-matching d-tag, got %d", len(evs))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Query with non-matching #d value returned no events")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestQueryEventsByTagWithKindAndAuthor tests the combination of kind, author, and tag filters.
|
||||||
|
// This is the specific case reported by the user with kind 30382.
|
||||||
|
func TestQueryEventsByTagWithKindAndAuthor(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
alice := createTestSignerLocal(t)
|
||||||
|
bob := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create events from different authors with d-tags
|
||||||
|
createAndSaveEventLocal(t, ctx, alice, 30382, "Alice target 1",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "card-1")), baseTs)
|
||||||
|
createAndSaveEventLocal(t, ctx, alice, 30382, "Alice target 2",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "card-2")), baseTs+1)
|
||||||
|
createAndSaveEventLocal(t, ctx, alice, 30382, "Alice other",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "other-card")), baseTs+2)
|
||||||
|
createAndSaveEventLocal(t, ctx, bob, 30382, "Bob target 1",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "card-1")), baseTs+3) // Same d-tag as Alice but different author
|
||||||
|
|
||||||
|
// Query for Alice's events with specific d-tags
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30382)),
|
||||||
|
Authors: tag.NewFromBytesSlice(alice.Pub()),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#d", "card-1", "card-2")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to query events: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should only return Alice's 2 events, not Bob's even though he has card-1
|
||||||
|
if len(evs) != 2 {
|
||||||
|
t.Fatalf("Expected 2 events from Alice with matching d-tags, got %d", len(evs))
|
||||||
|
}
|
||||||
|
|
||||||
|
alicePubkey := hex.Enc(alice.Pub())
|
||||||
|
for _, ev := range evs {
|
||||||
|
if hex.Enc(ev.Pubkey[:]) != alicePubkey {
|
||||||
|
t.Fatalf("Expected author %s, got %s", alicePubkey, hex.Enc(ev.Pubkey[:]))
|
||||||
|
}
|
||||||
|
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||||
|
dValue := string(dTag.Value())
|
||||||
|
if dValue != "card-1" && dValue != "card-2" {
|
||||||
|
t.Fatalf("Expected d=card-1 or card-2, got d=%s", dValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Query with kind, author, and #d filter returned correct events")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBinaryTagFilterRegression tests that queries with #e and #p tags work correctly
|
||||||
|
// even when tags are stored with binary-encoded values but filters come as hex strings.
|
||||||
|
// This mirrors the Badger database test for binary tag handling.
|
||||||
|
func TestBinaryTagFilterRegression(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
author := createTestSignerLocal(t)
|
||||||
|
referenced := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create a referenced event to get a valid event ID for e-tag
|
||||||
|
refEvent := createAndSaveEventLocal(t, ctx, referenced, 1, "Referenced event", nil, baseTs)
|
||||||
|
|
||||||
|
// Get hex representations
|
||||||
|
refEventIdHex := hex.Enc(refEvent.ID)
|
||||||
|
refPubkeyHex := hex.Enc(referenced.Pub())
|
||||||
|
|
||||||
|
// Create test event with e, p, d, and other tags
|
||||||
|
testEvent := createAndSaveEventLocal(t, ctx, author, 30520, "Event with binary tags",
|
||||||
|
tag.NewS(
|
||||||
|
tag.NewFromAny("d", "test-d-value"),
|
||||||
|
tag.NewFromAny("p", string(refPubkeyHex)),
|
||||||
|
tag.NewFromAny("e", string(refEventIdHex)),
|
||||||
|
tag.NewFromAny("t", "test-topic"),
|
||||||
|
), baseTs+1)
|
||||||
|
|
||||||
|
testEventIdHex := hex.Enc(testEvent.ID)
|
||||||
|
|
||||||
|
// Test case 1: Query WITHOUT #e/#p tags (baseline - should work)
|
||||||
|
t.Run("QueryWithoutEPTags", func(t *testing.T) {
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30520)),
|
||||||
|
Authors: tag.NewFromBytesSlice(author.Pub()),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#d", "test-d-value")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query without e/p tags failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evs) == 0 {
|
||||||
|
t.Fatal("Expected to find event with d tag filter, got 0 results")
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for _, ev := range evs {
|
||||||
|
if hex.Enc(ev.ID) == testEventIdHex {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Errorf("Expected event ID %s not found", testEventIdHex)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test case 2: Query WITH #p tag
|
||||||
|
t.Run("QueryWithPTag", func(t *testing.T) {
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30520)),
|
||||||
|
Authors: tag.NewFromBytesSlice(author.Pub()),
|
||||||
|
Tags: tag.NewS(
|
||||||
|
tag.NewFromAny("#d", "test-d-value"),
|
||||||
|
tag.NewFromAny("#p", string(refPubkeyHex)),
|
||||||
|
),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query with #p tag failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evs) == 0 {
|
||||||
|
t.Fatalf("REGRESSION: Expected to find event with #p tag filter, got 0 results")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test case 3: Query WITH #e tag
|
||||||
|
t.Run("QueryWithETag", func(t *testing.T) {
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30520)),
|
||||||
|
Authors: tag.NewFromBytesSlice(author.Pub()),
|
||||||
|
Tags: tag.NewS(
|
||||||
|
tag.NewFromAny("#d", "test-d-value"),
|
||||||
|
tag.NewFromAny("#e", string(refEventIdHex)),
|
||||||
|
),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query with #e tag failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evs) == 0 {
|
||||||
|
t.Fatalf("REGRESSION: Expected to find event with #e tag filter, got 0 results")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test case 4: Query WITH BOTH #e AND #p tags
|
||||||
|
t.Run("QueryWithBothEAndPTags", func(t *testing.T) {
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30520)),
|
||||||
|
Authors: tag.NewFromBytesSlice(author.Pub()),
|
||||||
|
Tags: tag.NewS(
|
||||||
|
tag.NewFromAny("#d", "test-d-value"),
|
||||||
|
tag.NewFromAny("#e", string(refEventIdHex)),
|
||||||
|
tag.NewFromAny("#p", string(refPubkeyHex)),
|
||||||
|
),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query with both #e and #p tags failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(evs) == 0 {
|
||||||
|
t.Fatalf("REGRESSION: Expected to find event with #e and #p tag filters, got 0 results")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Logf("✓ Binary tag filter regression tests passed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestParameterizedReplaceableEvents tests that parameterized replaceable events (kind 30000+)
|
||||||
|
// are handled correctly - only the newest version should be returned in queries by kind/author/d-tag.
|
||||||
|
func TestParameterizedReplaceableEvents(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
signer := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create older parameterized replaceable event
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30000, "Original event",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "test-param")), baseTs-7200) // 2 hours ago
|
||||||
|
|
||||||
|
// Create newer event with same kind/author/d-tag
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30000, "Newer event",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "test-param")), baseTs-3600) // 1 hour ago
|
||||||
|
|
||||||
|
// Create newest event with same kind/author/d-tag
|
||||||
|
newestEvent := createAndSaveEventLocal(t, ctx, signer, 30000, "Newest event",
|
||||||
|
tag.NewS(tag.NewFromAny("d", "test-param")), baseTs) // Now
|
||||||
|
|
||||||
|
// Query for events - should only return the newest one
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30000)),
|
||||||
|
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#d", "test-param")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to query parameterized replaceable events: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: Neo4j backend may or may not automatically deduplicate replaceable events
|
||||||
|
// depending on implementation. The important thing is that the newest is returned first.
|
||||||
|
if len(evs) == 0 {
|
||||||
|
t.Fatal("Expected at least 1 event")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the first (most recent) event is the newest one
|
||||||
|
if hex.Enc(evs[0].ID) != hex.Enc(newestEvent.ID) {
|
||||||
|
t.Logf("Note: Expected newest event first, got different order")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Parameterized replaceable events test returned %d events", len(evs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestQueryForIds tests the QueryForIds method
|
||||||
|
func TestQueryForIds(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
signer := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create test events
|
||||||
|
ev1 := createAndSaveEventLocal(t, ctx, signer, 1, "Event 1", nil, baseTs)
|
||||||
|
ev2 := createAndSaveEventLocal(t, ctx, signer, 1, "Event 2", nil, baseTs+1)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 7, "Reaction", nil, baseTs+2)
|
||||||
|
|
||||||
|
// Query for IDs of kind 1 events
|
||||||
|
idPkTs, err := testDB.QueryForIds(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(1)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to query for IDs: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(idPkTs) != 2 {
|
||||||
|
t.Fatalf("Expected 2 IDs for kind 1 events, got %d", len(idPkTs))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify IDs match our events
|
||||||
|
foundIds := make(map[string]bool)
|
||||||
|
for _, r := range idPkTs {
|
||||||
|
foundIds[hex.Enc(r.Id)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !foundIds[hex.Enc(ev1.ID)] {
|
||||||
|
t.Error("Event 1 ID not found in results")
|
||||||
|
}
|
||||||
|
if !foundIds[hex.Enc(ev2.ID)] {
|
||||||
|
t.Error("Event 2 ID not found in results")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ QueryForIds returned correct IDs")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestQueryForSerials tests the QueryForSerials method
|
||||||
|
func TestQueryForSerials(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
signer := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create test events
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 1, "Event 1", nil, baseTs)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 1, "Event 2", nil, baseTs+1)
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 1, "Event 3", nil, baseTs+2)
|
||||||
|
|
||||||
|
// Query for serials
|
||||||
|
serials, err := testDB.QueryForSerials(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(1)),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to query for serials: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(serials) != 3 {
|
||||||
|
t.Fatalf("Expected 3 serials, got %d", len(serials))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ QueryForSerials returned %d serials", len(serials))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestQueryEventsComplex tests complex filter combinations
|
||||||
|
func TestQueryEventsComplex(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
alice := createTestSignerLocal(t)
|
||||||
|
bob := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create diverse set of events
|
||||||
|
createAndSaveEventLocal(t, ctx, alice, 1, "Alice note with bitcoin tag",
|
||||||
|
tag.NewS(tag.NewFromAny("t", "bitcoin")), baseTs)
|
||||||
|
createAndSaveEventLocal(t, ctx, alice, 1, "Alice note with nostr tag",
|
||||||
|
tag.NewS(tag.NewFromAny("t", "nostr")), baseTs+1)
|
||||||
|
createAndSaveEventLocal(t, ctx, alice, 7, "Alice reaction",
|
||||||
|
nil, baseTs+2)
|
||||||
|
createAndSaveEventLocal(t, ctx, bob, 1, "Bob note with bitcoin tag",
|
||||||
|
tag.NewS(tag.NewFromAny("t", "bitcoin")), baseTs+3)
|
||||||
|
|
||||||
|
// Test: kinds + tags (no authors)
|
||||||
|
t.Run("KindsAndTags", func(t *testing.T) {
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(1)),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#t", "bitcoin")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(evs) != 2 {
|
||||||
|
t.Fatalf("Expected 2 events with kind=1 and #t=bitcoin, got %d", len(evs))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test: authors + tags (no kinds)
|
||||||
|
t.Run("AuthorsAndTags", func(t *testing.T) {
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Authors: tag.NewFromBytesSlice(alice.Pub()),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#t", "bitcoin")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(evs) != 1 {
|
||||||
|
t.Fatalf("Expected 1 event from Alice with #t=bitcoin, got %d", len(evs))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test: kinds + authors (no tags)
|
||||||
|
t.Run("KindsAndAuthors", func(t *testing.T) {
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(1)),
|
||||||
|
Authors: tag.NewFromBytesSlice(alice.Pub()),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(evs) != 2 {
|
||||||
|
t.Fatalf("Expected 2 kind=1 events from Alice, got %d", len(evs))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test: all three filters
|
||||||
|
t.Run("AllFilters", func(t *testing.T) {
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(1)),
|
||||||
|
Authors: tag.NewFromBytesSlice(alice.Pub()),
|
||||||
|
Tags: tag.NewS(tag.NewFromAny("#t", "nostr")),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query failed: %v", err)
|
||||||
|
}
|
||||||
|
if len(evs) != 1 {
|
||||||
|
t.Fatalf("Expected 1 event (Alice kind=1 #t=nostr), got %d", len(evs))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Logf("✓ Complex filter combination tests passed")
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestQueryEventsMultipleTagTypes tests filtering with multiple different tag types
|
||||||
|
func TestQueryEventsMultipleTagTypes(t *testing.T) {
|
||||||
|
if testDB == nil {
|
||||||
|
t.Skip("Neo4j not available")
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanTestDatabase()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
signer := createTestSignerLocal(t)
|
||||||
|
baseTs := timestamp.Now().V
|
||||||
|
|
||||||
|
// Create events with multiple tag types
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event with d and client tags",
|
||||||
|
tag.NewS(
|
||||||
|
tag.NewFromAny("d", "user-1"),
|
||||||
|
tag.NewFromAny("client", "app-a"),
|
||||||
|
), baseTs)
|
||||||
|
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event with d and different client",
|
||||||
|
tag.NewS(
|
||||||
|
tag.NewFromAny("d", "user-2"),
|
||||||
|
tag.NewFromAny("client", "app-b"),
|
||||||
|
), baseTs+1)
|
||||||
|
|
||||||
|
createAndSaveEventLocal(t, ctx, signer, 30382, "Event with only d tag",
|
||||||
|
tag.NewS(
|
||||||
|
tag.NewFromAny("d", "user-3"),
|
||||||
|
), baseTs+2)
|
||||||
|
|
||||||
|
// Query with multiple tag types (should AND them together)
|
||||||
|
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||||
|
Kinds: kind.NewS(kind.New(30382)),
|
||||||
|
Tags: tag.NewS(
|
||||||
|
tag.NewFromAny("#d", "user-1", "user-2"),
|
||||||
|
tag.NewFromAny("#client", "app-a"),
|
||||||
|
),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Query with multiple tag types failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should match only the first event (user-1 with app-a)
|
||||||
|
if len(evs) != 1 {
|
||||||
|
t.Fatalf("Expected 1 event matching both #d and #client, got %d", len(evs))
|
||||||
|
}
|
||||||
|
|
||||||
|
dTag := evs[0].Tags.GetFirst([]byte("d"))
|
||||||
|
if string(dTag.Value()) != "user-1" {
|
||||||
|
t.Fatalf("Expected d=user-1, got d=%s", dTag.Value())
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("✓ Multiple tag types filter test passed")
|
||||||
|
}
|
||||||
|
|||||||
@@ -377,16 +377,14 @@ func (l *Limiter) ComputeDelay(opType OperationType) time.Duration {
|
|||||||
|
|
||||||
// In emergency mode, apply progressive throttling for writes
|
// In emergency mode, apply progressive throttling for writes
|
||||||
if inEmergency {
|
if inEmergency {
|
||||||
// Calculate how far above recovery threshold we are
|
// Calculate how far above emergency threshold we are
|
||||||
// At emergency threshold, add 1x normal delay
|
// Linear scaling: multiplier = 1 + (excess * 5)
|
||||||
// For every additional 10% above emergency, double the delay
|
// At emergency threshold: 1x, at +20% above: 2x, at +40% above: 3x
|
||||||
excessPressure := metrics.MemoryPressure - l.config.RecoveryThreshold
|
excessPressure := metrics.MemoryPressure - l.config.EmergencyThreshold
|
||||||
if excessPressure > 0 {
|
if excessPressure < 0 {
|
||||||
// Progressive multiplier: starts at 2x, doubles every 10% excess
|
excessPressure = 0
|
||||||
multiplier := 2.0
|
|
||||||
for excess := excessPressure; excess > 0.1; excess -= 0.1 {
|
|
||||||
multiplier *= 2
|
|
||||||
}
|
}
|
||||||
|
multiplier := 1.0 + excessPressure*5.0
|
||||||
|
|
||||||
emergencyDelaySec := delaySec * multiplier
|
emergencyDelaySec := delaySec * multiplier
|
||||||
maxEmergencySec := float64(l.config.EmergencyMaxDelayMs) / 1000.0
|
maxEmergencySec := float64(l.config.EmergencyMaxDelayMs) / 1000.0
|
||||||
@@ -400,7 +398,6 @@ func (l *Limiter) ComputeDelay(opType OperationType) time.Duration {
|
|||||||
}
|
}
|
||||||
delaySec = emergencyDelaySec
|
delaySec = emergencyDelaySec
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if delaySec > 0 {
|
if delaySec > 0 {
|
||||||
l.writeThrottles.Add(1)
|
l.writeThrottles.Add(1)
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
v0.48.14
|
v0.50.1
|
||||||
|
|||||||
Reference in New Issue
Block a user