From 9e59d5f72b04a19452633fba86aa4820c212eacf Mon Sep 17 00:00:00 2001 From: mleku Date: Wed, 10 Sep 2025 16:29:55 +0100 Subject: [PATCH] Set default value for `LogToStdout`, enhance logging for request handling, query events, and filters, and fix ID handling in `relaytester-test.sh`. --- app/config/config.go | 2 +- app/handle-req.go | 45 ++++++++++++++++++++-- pkg/database/get-indexes-from-filter.go | 8 ++-- pkg/database/get-serial-by-id.go | 3 ++ pkg/database/query-events.go | 51 +++++++++++++++++-------- scripts/relaytester-test.sh | 2 +- 6 files changed, 86 insertions(+), 25 deletions(-) diff --git a/app/config/config.go b/app/config/config.go index 596e30f..44a3e49 100644 --- a/app/config/config.go +++ b/app/config/config.go @@ -29,7 +29,7 @@ type C struct { Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"` LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"` DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"` - LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" usage:"log to stdout instead of stderr"` + LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"` Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation"` IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"` Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"` diff --git a/app/handle-req.go b/app/handle-req.go index 1428412..d76c185 100644 --- a/app/handle-req.go +++ b/app/handle-req.go @@ -25,13 +25,14 @@ import ( ) func (l *Listener) HandleReq(msg []byte) (err error) { + log.T.F("HandleReq: from %s", l.remote) var rem []byte env := reqenvelope.New() if rem, err = env.Unmarshal(msg); chk.E(err) { return normalize.Error.Errorf(err.Error()) } if len(rem) > 0 { - log.I.F("extra '%s'", rem) + log.I.F("REQ extra bytes: '%s'", rem) } // send a challenge to the client to auth if an ACL is active if acl.Registry.Active.Load() != "none" { @@ -55,8 +56,40 @@ func (l *Listener) HandleReq(msg []byte) (err error) { // user has read access or better, continue log.D.F("user has %s access", accessLevel) } - var events event.S - for _, f := range *env.Filters { + var events event.S + for _, f := range *env.Filters { + idsLen := 0; kindsLen := 0; authorsLen := 0; tagsLen := 0 + if f != nil { + if f.Ids != nil { idsLen = f.Ids.Len() } + if f.Kinds != nil { kindsLen = f.Kinds.Len() } + if f.Authors != nil { authorsLen = f.Authors.Len() } + if f.Tags != nil { tagsLen = f.Tags.Len() } + } + log.T.F("REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d", env.Subscription, idsLen, kindsLen, authorsLen, tagsLen) + if f != nil && f.Authors != nil && f.Authors.Len() > 0 { + var authors []string + for _, a := range f.Authors.T { authors = append(authors, hex.Enc(a)) } + log.T.F("REQ %s: authors=%v", env.Subscription, authors) + } + if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 { + log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16()) + } + if f != nil && f.Ids != nil && f.Ids.Len() > 0 { + var ids []string + for _, id := range f.Ids.T { + ids = append(ids, hex.Enc(id)) + } + var lim any + if pointers.Present(f.Limit) { + lim = *f.Limit + } else { + lim = nil + } + log.T.F( + "REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription, + f.Ids.Len(), ids, lim, + ) + } if pointers.Present(f.Limit) { if *f.Limit == 0 { continue @@ -113,6 +146,10 @@ privCheck: events = tmp seen := make(map[string]struct{}) for _, ev := range events { + log.T.F( + "REQ %s: sending EVENT id=%s kind=%d", env.Subscription, + hex.Enc(ev.ID), ev.Kind, + ) var res *eventenvelope.Result if res, err = eventenvelope.NewResultWith( env.Subscription, ev, @@ -135,6 +172,7 @@ privCheck: // if the query was for just Ids, we know there can't be any more results, // so cancel the subscription. cancel := true + log.T.F("REQ %s: computing cancel/subscription; events_sent=%d", env.Subscription, len(events)) var subbedFilters filter.S for _, f := range *env.Filters { if f.Ids.Len() < 1 { @@ -149,6 +187,7 @@ privCheck: } notFounds = append(notFounds, id) } + log.T.F("REQ %s: ids outstanding=%d of %d", env.Subscription, len(notFounds), f.Ids.Len()) // if all were found, don't add to subbedFilters if len(notFounds) == 0 { continue diff --git a/pkg/database/get-indexes-from-filter.go b/pkg/database/get-indexes-from-filter.go index ef1c8e7..db7a2c2 100644 --- a/pkg/database/get-indexes-from-filter.go +++ b/pkg/database/get-indexes-from-filter.go @@ -306,8 +306,8 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) { for _, author := range f.Authors.T { kind := new(types2.Uint16) kind.Set(k) - p := new(types2.PubHash) - if err = p.FromPubkey(author); chk.E(err) { + var p *types2.PubHash + if p, err = CreatePubHashFromData(author); chk.E(err) { return } start, end := new(bytes.Buffer), new(bytes.Buffer) @@ -351,8 +351,8 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) { // Pubkey pc- if f.Authors != nil && f.Authors.Len() > 0 { for _, author := range f.Authors.T { - p := new(types2.PubHash) - if err = p.FromPubkey(author); chk.E(err) { + var p *types2.PubHash + if p, err = CreatePubHashFromData(author); chk.E(err) { return } start, end := new(bytes.Buffer), new(bytes.Buffer) diff --git a/pkg/database/get-serial-by-id.go b/pkg/database/get-serial-by-id.go index 3eac61e..07f91b6 100644 --- a/pkg/database/get-serial-by-id.go +++ b/pkg/database/get-serial-by-id.go @@ -6,12 +6,15 @@ import ( "database.orly/indexes/types" "encoders.orly/filter" "encoders.orly/tag" + "encoders.orly/hex" "github.com/dgraph-io/badger/v4" "lol.mleku.dev/chk" "lol.mleku.dev/errorf" + "lol.mleku.dev/log" ) func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) { + log.T.F("GetSerialById: input id=%s", hex.Enc(id)) var idxs []Range if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) { return diff --git a/pkg/database/query-events.go b/pkg/database/query-events.go index e588df6..f6fff65 100644 --- a/pkg/database/query-events.go +++ b/pkg/database/query-events.go @@ -42,23 +42,33 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) ( var expDeletes types.Uint40s var expEvs event.S if f.Ids != nil && f.Ids.Len() > 0 { - for _, idx := range f.Ids.T { + log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len()) + for _, idx := range f.Ids.T { + log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx)) // we know there is only Ids in this, so run the ID query and fetch. var ser *types.Uint40 if ser, err = d.GetSerialById(idx); chk.E(err) { + log.T.F("QueryEvents: id miss or error id=%s err=%v", hex.Enc(idx), err) continue } // fetch the events var ev *event.E if ev, err = d.FetchEventBySerial(ser); err != nil { + log.T.F("QueryEvents: fetch by serial failed for id=%s ser=%v err=%v", hex.Enc(idx), ser, err) continue } + log.T.F("QueryEvents: found id=%s kind=%d created_at=%d", hex.Enc(ev.ID), ev.Kind, ev.CreatedAt) // check for an expiration tag and delete after returning the result if CheckExpiration(ev) { expDeletes = append(expDeletes, ser) expEvs = append(expEvs, ev) continue } + // skip events that have been deleted by a proper deletion event + if derr := d.CheckForDeleted(ev, nil); derr != nil { + log.T.F("QueryEvents: id=%s filtered out due to deletion: %v", hex.Enc(ev.ID), derr) + continue + } evs = append(evs, ev) } // sort the events by timestamp @@ -68,10 +78,15 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) ( }, ) } else { + // non-IDs path var idPkTs []*store.IdPkTs + if f.Authors != nil && f.Authors.Len() > 0 && f.Kinds != nil && f.Kinds.Len() > 0 { + log.T.F("QueryEvents: authors+kinds path, authors=%d kinds=%d", f.Authors.Len(), f.Kinds.Len()) + } if idPkTs, err = d.QueryForIds(c, f); chk.E(err) { return } + log.T.F("QueryEvents: QueryForIds returned %d candidates", len(idPkTs)) // Create a map to store the latest version of replaceable events replaceableEvents := make(map[string]*event.E) // Create a map to store the latest version of parameterized replaceable @@ -83,8 +98,8 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) ( // events) deletionsByKindPubkey := make(map[string]bool) // Map to track deletion events by kind, pubkey, and d-tag (for - // parameterized replaceable events) - deletionsByKindPubkeyDTag := make(map[string]map[string]bool) + // parameterized replaceable events). We store the newest delete timestamp per d-tag. + deletionsByKindPubkeyDTag := make(map[string]map[string]int64) // Map to track specific event IDs that have been deleted deletedEventIds := make(map[string]bool) // Query for deletion events separately if we have authors in the filter @@ -169,11 +184,13 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) ( key := hex.Enc(pk) + ":" + strconv.Itoa(int(kk.K)) // Initialize the inner map if it doesn't exist if _, exists := deletionsByKindPubkeyDTag[key]; !exists { - deletionsByKindPubkeyDTag[key] = make(map[string]bool) + deletionsByKindPubkeyDTag[key] = make(map[string]int64) } - // Mark this d-tag as deleted + // Record the newest delete timestamp for this d-tag dValue := string(split[2]) - deletionsByKindPubkeyDTag[key][dValue] = true + if ts, ok := deletionsByKindPubkeyDTag[key][dValue]; !ok || ev.CreatedAt > ts { + deletionsByKindPubkeyDTag[key][dValue] = ev.CreatedAt + } // Debug logging } // For replaceable events, we need to check if there are any @@ -223,12 +240,14 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) ( // If no 'd' tag, use empty string dValue = "" } - // Initialize the inner map if it doesn't exist - if _, exists := deletionsByKindPubkeyDTag[key]; !exists { - deletionsByKindPubkeyDTag[key] = make(map[string]bool) - } - // Mark this d-tag as deleted - deletionsByKindPubkeyDTag[key][dValue] = true + // Initialize the inner map if it doesn't exist + if _, exists := deletionsByKindPubkeyDTag[key]; !exists { + deletionsByKindPubkeyDTag[key] = make(map[string]int64) + } + // Record the newest delete timestamp for this d-tag + if ts, ok := deletionsByKindPubkeyDTag[key][dValue]; !ok || ev.CreatedAt > ts { + deletionsByKindPubkeyDTag[key][dValue] = ev.CreatedAt + } } } } @@ -308,10 +327,10 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) ( // Check if this event has been deleted via an a-tag if deletionMap, exists := deletionsByKindPubkeyDTag[key]; exists { - // If the d-tag value is in the deletion map and this event - // is not specifically requested by ID, skip it - if deletionMap[dValue] && !isIdInFilter { - log.T.F("Debug: Event deleted - skipping") + // If there is a deletion timestamp and this event is older than the deletion, + // and this event is not specifically requested by ID, skip it + if delTs, ok := deletionMap[dValue]; ok && ev.CreatedAt < delTs && !isIdInFilter { + log.T.F("Debug: Event deleted by a-tag (older than delete) - skipping") continue } } diff --git a/scripts/relaytester-test.sh b/scripts/relaytester-test.sh index cfea322..cc2f179 100755 --- a/scripts/relaytester-test.sh +++ b/scripts/relaytester-test.sh @@ -16,6 +16,6 @@ export ORLY_IP_WHITELIST=127.0.0 export ORLY_ADMINS=8118b9201de133912079652601863a69fdd0cac7f3eb15a38ae410c3f364269c,57eaff2aec61241dd8925b4d3a9cc93824a2944ed3f9e6b5143c15d0dd911864 export ORLY_ACL_MODE=none go run . & -sleep 2 +sleep 5 relay-tester ws://127.0.0.1:3334 nsec12l4072hvvyjpmkyjtdxn48xf8qj299zw60u7ddg58s2aphv3rpjqtg0tvr nsec1syvtjgqauyeezgrev5nqrp36d87apjk87043tgu2usgv8umyy6wq4yl6tu killall next.orly.dev \ No newline at end of file