Remove debug logging across the codebase and update version to v0.3.0.
Some checks failed
Go / build (push) Has been cancelled
Some checks failed
Go / build (push) Has been cancelled
This commit is contained in:
@@ -46,7 +46,7 @@ func (l *Listener) HandleAuth(b []byte) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.D.F(
|
log.D.F(
|
||||||
"%s authed to pubkey,%0x", l.remote,
|
"%s authed to pubkey %0x", l.remote,
|
||||||
env.Event.Pubkey,
|
env.Event.Pubkey,
|
||||||
)
|
)
|
||||||
l.authedPubkey.Store(env.Event.Pubkey)
|
l.authedPubkey.Store(env.Event.Pubkey)
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"encoders.orly/envelopes"
|
"encoders.orly/envelopes"
|
||||||
"encoders.orly/envelopes/authenvelope"
|
"encoders.orly/envelopes/authenvelope"
|
||||||
"encoders.orly/envelopes/closeenvelope"
|
"encoders.orly/envelopes/closeenvelope"
|
||||||
@@ -11,40 +9,39 @@ import (
|
|||||||
"encoders.orly/envelopes/reqenvelope"
|
"encoders.orly/envelopes/reqenvelope"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/errorf"
|
"lol.mleku.dev/errorf"
|
||||||
"lol.mleku.dev/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
||||||
log.D.F("%s received message:\n%s", remote, msg)
|
// log.D.F("%s received message:\n%s", remote, msg)
|
||||||
var err error
|
var err error
|
||||||
var t string
|
var t string
|
||||||
var rem []byte
|
var rem []byte
|
||||||
if t, rem, err = envelopes.Identify(msg); !chk.E(err) {
|
if t, rem, err = envelopes.Identify(msg); !chk.E(err) {
|
||||||
switch t {
|
switch t {
|
||||||
case eventenvelope.L:
|
case eventenvelope.L:
|
||||||
log.D.F("eventenvelope: %s %s", remote, rem)
|
// log.D.F("eventenvelope: %s %s", remote, rem)
|
||||||
err = l.HandleEvent(rem)
|
err = l.HandleEvent(rem)
|
||||||
case reqenvelope.L:
|
case reqenvelope.L:
|
||||||
log.D.F("reqenvelope: %s %s", remote, rem)
|
// log.D.F("reqenvelope: %s %s", remote, rem)
|
||||||
err = l.HandleReq(rem)
|
err = l.HandleReq(rem)
|
||||||
case closeenvelope.L:
|
case closeenvelope.L:
|
||||||
log.D.F("closeenvelope: %s %s", remote, rem)
|
// log.D.F("closeenvelope: %s %s", remote, rem)
|
||||||
err = l.HandleClose(rem)
|
err = l.HandleClose(rem)
|
||||||
case authenvelope.L:
|
case authenvelope.L:
|
||||||
log.D.F("authenvelope: %s %s", remote, rem)
|
// log.D.F("authenvelope: %s %s", remote, rem)
|
||||||
err = l.HandleAuth(rem)
|
err = l.HandleAuth(rem)
|
||||||
default:
|
default:
|
||||||
err = errorf.E("unknown envelope type %s\n%s", t, rem)
|
err = errorf.E("unknown envelope type %s\n%s", t, rem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.D.C(
|
// log.D.C(
|
||||||
func() string {
|
// func() string {
|
||||||
return fmt.Sprintf(
|
// return fmt.Sprintf(
|
||||||
"notice->%s %s", remote, err,
|
// "notice->%s %s", remote, err,
|
||||||
)
|
// )
|
||||||
},
|
// },
|
||||||
)
|
// )
|
||||||
if err = noticeenvelope.NewFrom(err.Error()).Write(l); chk.E(err) {
|
if err = noticeenvelope.NewFrom(err.Error()).Write(l); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleReq(msg []byte) (err error) {
|
func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||||
log.T.F("HandleReq: from %s", l.remote)
|
// log.T.F("HandleReq: from %s", l.remote)
|
||||||
var rem []byte
|
var rem []byte
|
||||||
env := reqenvelope.New()
|
env := reqenvelope.New()
|
||||||
if rem, err = env.Unmarshal(msg); chk.E(err) {
|
if rem, err = env.Unmarshal(msg); chk.E(err) {
|
||||||
@@ -54,42 +54,42 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
|||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
// user has read access or better, continue
|
// user has read access or better, continue
|
||||||
log.D.F("user has %s access", accessLevel)
|
// log.D.F("user has %s access", accessLevel)
|
||||||
}
|
}
|
||||||
var events event.S
|
var events event.S
|
||||||
for _, f := range *env.Filters {
|
for _, f := range *env.Filters {
|
||||||
idsLen := 0; kindsLen := 0; authorsLen := 0; tagsLen := 0
|
// idsLen := 0; kindsLen := 0; authorsLen := 0; tagsLen := 0
|
||||||
if f != nil {
|
// if f != nil {
|
||||||
if f.Ids != nil { idsLen = f.Ids.Len() }
|
// if f.Ids != nil { idsLen = f.Ids.Len() }
|
||||||
if f.Kinds != nil { kindsLen = f.Kinds.Len() }
|
// if f.Kinds != nil { kindsLen = f.Kinds.Len() }
|
||||||
if f.Authors != nil { authorsLen = f.Authors.Len() }
|
// if f.Authors != nil { authorsLen = f.Authors.Len() }
|
||||||
if f.Tags != nil { tagsLen = f.Tags.Len() }
|
// if f.Tags != nil { tagsLen = f.Tags.Len() }
|
||||||
}
|
// }
|
||||||
log.T.F("REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d", env.Subscription, idsLen, kindsLen, authorsLen, tagsLen)
|
// log.T.F("REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d", env.Subscription, idsLen, kindsLen, authorsLen, tagsLen)
|
||||||
if f != nil && f.Authors != nil && f.Authors.Len() > 0 {
|
// if f != nil && f.Authors != nil && f.Authors.Len() > 0 {
|
||||||
var authors []string
|
// var authors []string
|
||||||
for _, a := range f.Authors.T { authors = append(authors, hex.Enc(a)) }
|
// for _, a := range f.Authors.T { authors = append(authors, hex.Enc(a)) }
|
||||||
log.T.F("REQ %s: authors=%v", env.Subscription, authors)
|
// log.T.F("REQ %s: authors=%v", env.Subscription, authors)
|
||||||
}
|
// }
|
||||||
if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 {
|
// if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 {
|
||||||
log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16())
|
// log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16())
|
||||||
}
|
// }
|
||||||
if f != nil && f.Ids != nil && f.Ids.Len() > 0 {
|
// if f != nil && f.Ids != nil && f.Ids.Len() > 0 {
|
||||||
var ids []string
|
// var ids []string
|
||||||
for _, id := range f.Ids.T {
|
// for _, id := range f.Ids.T {
|
||||||
ids = append(ids, hex.Enc(id))
|
// ids = append(ids, hex.Enc(id))
|
||||||
}
|
// }
|
||||||
var lim any
|
// var lim any
|
||||||
if pointers.Present(f.Limit) {
|
// if pointers.Present(f.Limit) {
|
||||||
lim = *f.Limit
|
// lim = *f.Limit
|
||||||
} else {
|
// } else {
|
||||||
lim = nil
|
// lim = nil
|
||||||
}
|
// }
|
||||||
log.T.F(
|
// log.T.F(
|
||||||
"REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription,
|
// "REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription,
|
||||||
f.Ids.Len(), ids, lim,
|
// f.Ids.Len(), ids, lim,
|
||||||
)
|
// )
|
||||||
}
|
// }
|
||||||
if pointers.Present(f.Limit) {
|
if pointers.Present(f.Limit) {
|
||||||
if *f.Limit == 0 {
|
if *f.Limit == 0 {
|
||||||
continue
|
continue
|
||||||
@@ -107,16 +107,16 @@ privCheck:
|
|||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if kind.IsPrivileged(ev.Kind) &&
|
if kind.IsPrivileged(ev.Kind) &&
|
||||||
accessLevel != "admin" { // admins can see all events
|
accessLevel != "admin" { // admins can see all events
|
||||||
log.I.F("checking privileged event %s", ev.ID)
|
// log.I.F("checking privileged event %s", ev.ID)
|
||||||
pk := l.authedPubkey.Load()
|
pk := l.authedPubkey.Load()
|
||||||
if pk == nil {
|
if pk == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if utils.FastEqual(ev.Pubkey, pk) {
|
if utils.FastEqual(ev.Pubkey, pk) {
|
||||||
log.I.F(
|
// log.I.F(
|
||||||
"privileged event %s is for logged in pubkey %0x", ev.ID,
|
// "privileged event %s is for logged in pubkey %0x", ev.ID,
|
||||||
pk,
|
// pk,
|
||||||
)
|
// )
|
||||||
tmp = append(tmp, ev)
|
tmp = append(tmp, ev)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -127,10 +127,10 @@ privCheck:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if utils.FastEqual(pt, pk) {
|
if utils.FastEqual(pt, pk) {
|
||||||
log.I.F(
|
// log.I.F(
|
||||||
"privileged event %s is for logged in pubkey %0x",
|
// "privileged event %s is for logged in pubkey %0x",
|
||||||
ev.ID, pk,
|
// ev.ID, pk,
|
||||||
)
|
// )
|
||||||
tmp = append(tmp, ev)
|
tmp = append(tmp, ev)
|
||||||
continue privCheck
|
continue privCheck
|
||||||
}
|
}
|
||||||
@@ -146,10 +146,10 @@ privCheck:
|
|||||||
events = tmp
|
events = tmp
|
||||||
seen := make(map[string]struct{})
|
seen := make(map[string]struct{})
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
log.T.F(
|
// log.T.F(
|
||||||
"REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
// "REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||||
hex.Enc(ev.ID), ev.Kind,
|
// hex.Enc(ev.ID), ev.Kind,
|
||||||
)
|
// )
|
||||||
var res *eventenvelope.Result
|
var res *eventenvelope.Result
|
||||||
if res, err = eventenvelope.NewResultWith(
|
if res, err = eventenvelope.NewResultWith(
|
||||||
env.Subscription, ev,
|
env.Subscription, ev,
|
||||||
@@ -164,7 +164,7 @@ privCheck:
|
|||||||
}
|
}
|
||||||
// write the EOSE to signal to the client that all events found have been
|
// write the EOSE to signal to the client that all events found have been
|
||||||
// sent.
|
// sent.
|
||||||
log.T.F("sending EOSE to %s", l.remote)
|
// log.T.F("sending EOSE to %s", l.remote)
|
||||||
if err = eoseenvelope.NewFrom(env.Subscription).
|
if err = eoseenvelope.NewFrom(env.Subscription).
|
||||||
Write(l); chk.E(err) {
|
Write(l); chk.E(err) {
|
||||||
return
|
return
|
||||||
@@ -172,7 +172,10 @@ privCheck:
|
|||||||
// if the query was for just Ids, we know there can't be any more results,
|
// if the query was for just Ids, we know there can't be any more results,
|
||||||
// so cancel the subscription.
|
// so cancel the subscription.
|
||||||
cancel := true
|
cancel := true
|
||||||
log.T.F("REQ %s: computing cancel/subscription; events_sent=%d", env.Subscription, len(events))
|
// log.T.F(
|
||||||
|
// "REQ %s: computing cancel/subscription; events_sent=%d",
|
||||||
|
// env.Subscription, len(events),
|
||||||
|
// )
|
||||||
var subbedFilters filter.S
|
var subbedFilters filter.S
|
||||||
for _, f := range *env.Filters {
|
for _, f := range *env.Filters {
|
||||||
if f.Ids.Len() < 1 {
|
if f.Ids.Len() < 1 {
|
||||||
@@ -187,7 +190,10 @@ privCheck:
|
|||||||
}
|
}
|
||||||
notFounds = append(notFounds, id)
|
notFounds = append(notFounds, id)
|
||||||
}
|
}
|
||||||
log.T.F("REQ %s: ids outstanding=%d of %d", env.Subscription, len(notFounds), f.Ids.Len())
|
// log.T.F(
|
||||||
|
// "REQ %s: ids outstanding=%d of %d", env.Subscription,
|
||||||
|
// len(notFounds), f.Ids.Len(),
|
||||||
|
// )
|
||||||
// if all were found, don't add to subbedFilters
|
// if all were found, don't add to subbedFilters
|
||||||
if len(notFounds) == 0 {
|
if len(notFounds) == 0 {
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -73,7 +73,7 @@ whitelist:
|
|||||||
listener.challenge.Store([]byte(hex.Enc(chal)))
|
listener.challenge.Store([]byte(hex.Enc(chal)))
|
||||||
// If admins are configured, immediately prompt client to AUTH (NIP-42)
|
// If admins are configured, immediately prompt client to AUTH (NIP-42)
|
||||||
if len(s.Config.Admins) > 0 {
|
if len(s.Config.Admins) > 0 {
|
||||||
log.D.F("sending initial AUTH challenge to %s", remote)
|
// log.D.F("sending initial AUTH challenge to %s", remote)
|
||||||
if err = authenvelope.NewChallengeWith(listener.challenge.Load()).
|
if err = authenvelope.NewChallengeWith(listener.challenge.Load()).
|
||||||
Write(listener); chk.E(err) {
|
Write(listener); chk.E(err) {
|
||||||
return
|
return
|
||||||
@@ -82,7 +82,7 @@ whitelist:
|
|||||||
ticker := time.NewTicker(DefaultPingWait)
|
ticker := time.NewTicker(DefaultPingWait)
|
||||||
go s.Pinger(ctx, conn, ticker, cancel)
|
go s.Pinger(ctx, conn, ticker, cancel)
|
||||||
defer func() {
|
defer func() {
|
||||||
log.D.F("closing websocket connection from %s", remote)
|
// log.D.F("closing websocket connection from %s", remote)
|
||||||
cancel()
|
cancel()
|
||||||
ticker.Stop()
|
ticker.Stop()
|
||||||
listener.publishers.Receive(&W{Cancel: true})
|
listener.publishers.Receive(&W{Cancel: true})
|
||||||
@@ -95,7 +95,7 @@ whitelist:
|
|||||||
}
|
}
|
||||||
var typ websocket.MessageType
|
var typ websocket.MessageType
|
||||||
var msg []byte
|
var msg []byte
|
||||||
log.T.F("waiting for message from %s", remote)
|
// log.T.F("waiting for message from %s", remote)
|
||||||
if typ, msg, err = conn.Read(ctx); chk.E(err) {
|
if typ, msg, err = conn.Read(ctx); chk.E(err) {
|
||||||
if strings.Contains(
|
if strings.Contains(
|
||||||
err.Error(), "use of closed network connection",
|
err.Error(), "use of closed network connection",
|
||||||
|
|||||||
@@ -101,17 +101,17 @@ func (p *P) Receive(msg typer.T) {
|
|||||||
if m.Cancel {
|
if m.Cancel {
|
||||||
if m.Id == "" {
|
if m.Id == "" {
|
||||||
p.removeSubscriber(m.Conn)
|
p.removeSubscriber(m.Conn)
|
||||||
log.D.F("removed listener %s", m.remote)
|
// log.D.F("removed listener %s", m.remote)
|
||||||
} else {
|
} else {
|
||||||
p.removeSubscriberId(m.Conn, m.Id)
|
p.removeSubscriberId(m.Conn, m.Id)
|
||||||
log.D.C(
|
// log.D.C(
|
||||||
func() string {
|
// func() string {
|
||||||
return fmt.Sprintf(
|
// return fmt.Sprintf(
|
||||||
"removed subscription %s for %s", m.Id,
|
// "removed subscription %s for %s", m.Id,
|
||||||
m.remote,
|
// m.remote,
|
||||||
)
|
// )
|
||||||
},
|
// },
|
||||||
)
|
// )
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -119,27 +119,31 @@ func (p *P) Receive(msg typer.T) {
|
|||||||
defer p.Mx.Unlock()
|
defer p.Mx.Unlock()
|
||||||
if subs, ok := p.Map[m.Conn]; !ok {
|
if subs, ok := p.Map[m.Conn]; !ok {
|
||||||
subs = make(map[string]Subscription)
|
subs = make(map[string]Subscription)
|
||||||
subs[m.Id] = Subscription{S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey}
|
subs[m.Id] = Subscription{
|
||||||
|
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
||||||
|
}
|
||||||
p.Map[m.Conn] = subs
|
p.Map[m.Conn] = subs
|
||||||
log.D.C(
|
// log.D.C(
|
||||||
func() string {
|
// func() string {
|
||||||
return fmt.Sprintf(
|
// return fmt.Sprintf(
|
||||||
"created new subscription for %s, %s",
|
// "created new subscription for %s, %s",
|
||||||
m.remote,
|
// m.remote,
|
||||||
m.Filters.Marshal(nil),
|
// m.Filters.Marshal(nil),
|
||||||
)
|
// )
|
||||||
},
|
// },
|
||||||
)
|
// )
|
||||||
} else {
|
} else {
|
||||||
subs[m.Id] = Subscription{S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey}
|
subs[m.Id] = Subscription{
|
||||||
log.D.C(
|
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
||||||
func() string {
|
}
|
||||||
return fmt.Sprintf(
|
// log.D.C(
|
||||||
"added subscription %s for %s", m.Id,
|
// func() string {
|
||||||
m.remote,
|
// return fmt.Sprintf(
|
||||||
)
|
// "added subscription %s for %s", m.Id,
|
||||||
},
|
// m.remote,
|
||||||
)
|
// )
|
||||||
|
// },
|
||||||
|
// )
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -168,7 +172,9 @@ func (p *P) Deliver(ev *event.E) {
|
|||||||
for w, subs := range p.Map {
|
for w, subs := range p.Map {
|
||||||
for id, subscriber := range subs {
|
for id, subscriber := range subs {
|
||||||
if subscriber.Match(ev) {
|
if subscriber.Match(ev) {
|
||||||
deliveries = append(deliveries, delivery{w: w, id: id, sub: subscriber})
|
deliveries = append(
|
||||||
|
deliveries, delivery{w: w, id: id, sub: subscriber},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,14 +2,12 @@ package app
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"database.orly"
|
"database.orly"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
|
||||||
"next.orly.dev/app/config"
|
"next.orly.dev/app/config"
|
||||||
"protocol.orly/publish"
|
"protocol.orly/publish"
|
||||||
)
|
)
|
||||||
@@ -25,11 +23,11 @@ type Server struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
log.T.C(
|
// log.T.C(
|
||||||
func() string {
|
// func() string {
|
||||||
return fmt.Sprintf("path %v header %v", r.URL, r.Header)
|
// return fmt.Sprintf("path %v header %v", r.URL, r.Header)
|
||||||
},
|
// },
|
||||||
)
|
// )
|
||||||
if r.Header.Get("Upgrade") == "websocket" {
|
if r.Header.Get("Upgrade") == "websocket" {
|
||||||
s.HandleWebsocket(w, r)
|
s.HandleWebsocket(w, r)
|
||||||
} else if r.Header.Get("Accept") == "application/nostr+json" {
|
} else if r.Header.Get("Accept") == "application/nostr+json" {
|
||||||
|
|||||||
@@ -45,13 +45,13 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
|||||||
for _, ca := range cfg {
|
for _, ca := range cfg {
|
||||||
switch c := ca.(type) {
|
switch c := ca.(type) {
|
||||||
case *config.C:
|
case *config.C:
|
||||||
log.D.F("setting ACL config: %v", c)
|
// log.D.F("setting ACL config: %v", c)
|
||||||
f.cfg = c
|
f.cfg = c
|
||||||
case *database.D:
|
case *database.D:
|
||||||
log.D.F("setting ACL database: %s", c.Path())
|
// log.D.F("setting ACL database: %s", c.Path())
|
||||||
f.D = c
|
f.D = c
|
||||||
case context.Context:
|
case context.Context:
|
||||||
log.D.F("setting ACL context: %s", c.Value("id"))
|
// log.D.F("setting ACL context: %s", c.Value("id"))
|
||||||
f.Ctx = c
|
f.Ctx = c
|
||||||
default:
|
default:
|
||||||
err = errorf.E("invalid type: %T", reflect.TypeOf(ca))
|
err = errorf.E("invalid type: %T", reflect.TypeOf(ca))
|
||||||
@@ -64,10 +64,10 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
|||||||
// find admin follow lists
|
// find admin follow lists
|
||||||
f.followsMx.Lock()
|
f.followsMx.Lock()
|
||||||
defer f.followsMx.Unlock()
|
defer f.followsMx.Unlock()
|
||||||
log.I.F("finding admins")
|
// log.I.F("finding admins")
|
||||||
f.follows, f.admins = nil, nil
|
f.follows, f.admins = nil, nil
|
||||||
for _, admin := range f.cfg.Admins {
|
for _, admin := range f.cfg.Admins {
|
||||||
log.I.F("%s", admin)
|
// log.I.F("%s", admin)
|
||||||
var adm []byte
|
var adm []byte
|
||||||
if a, e := bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(e) {
|
if a, e := bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(e) {
|
||||||
continue
|
continue
|
||||||
@@ -207,7 +207,7 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
|||||||
log.W.F("follows syncer: no admin relays found in DB (kind 10002)")
|
log.W.F("follows syncer: no admin relays found in DB (kind 10002)")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.I.F(
|
log.T.F(
|
||||||
"follows syncer: subscribing to %d relays for %d authors", len(urls),
|
"follows syncer: subscribing to %d relays for %d authors", len(urls),
|
||||||
len(authors),
|
len(authors),
|
||||||
)
|
)
|
||||||
@@ -244,13 +244,13 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
*ff = append(*ff, f1)
|
*ff = append(*ff, f1)
|
||||||
req := reqenvelope.NewFrom([]byte("follows-sync"), ff)
|
req := reqenvelope.NewFrom([]byte("follows-sync"), ff)
|
||||||
if err := c.Write(
|
if err = c.Write(
|
||||||
ctx, websocket.MessageText, req.Marshal(nil),
|
ctx, websocket.MessageText, req.Marshal(nil),
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
_ = c.Close(websocket.StatusInternalError, "write failed")
|
_ = c.Close(websocket.StatusInternalError, "write failed")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.I.F("sent REQ to %s for follows subscription", u)
|
log.T.F("sent REQ to %s for follows subscription", u)
|
||||||
// read loop
|
// read loop
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -337,6 +337,7 @@ func (f *Follows) Syncer() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
f.updated <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
|
|
||||||
"database.orly/indexes/types"
|
"database.orly/indexes/types"
|
||||||
"encoders.orly/filter"
|
"encoders.orly/filter"
|
||||||
"encoders.orly/tag"
|
|
||||||
"encoders.orly/hex"
|
"encoders.orly/hex"
|
||||||
|
"encoders.orly/tag"
|
||||||
"github.com/dgraph-io/badger/v4"
|
"github.com/dgraph-io/badger/v4"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/errorf"
|
"lol.mleku.dev/errorf"
|
||||||
@@ -19,15 +19,18 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
|
|||||||
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
|
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, idx := range idxs {
|
for i, idx := range idxs {
|
||||||
log.T.F("GetSerialById: searching range %d: start=%x, end=%x", i, idx.Start, idx.End)
|
log.T.F(
|
||||||
|
"GetSerialById: searching range %d: start=%x, end=%x", i, idx.Start,
|
||||||
|
idx.End,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
if len(idxs) == 0 {
|
if len(idxs) == 0 {
|
||||||
err = errorf.E("no indexes found for id %0x", id)
|
err = errorf.E("no indexes found for id %0x", id)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
idFound := false
|
idFound := false
|
||||||
if err = d.View(
|
if err = d.View(
|
||||||
func(txn *badger.Txn) (err error) {
|
func(txn *badger.Txn) (err error) {
|
||||||
@@ -46,19 +49,21 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
|
|||||||
idFound = true
|
idFound = true
|
||||||
} else {
|
} else {
|
||||||
// Item not found in database
|
// Item not found in database
|
||||||
log.T.F("GetSerialById: ID not found in database: %s", hex.Enc(id))
|
log.T.F(
|
||||||
|
"GetSerialById: ID not found in database: %s", hex.Enc(id),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
},
|
},
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if !idFound {
|
if !idFound {
|
||||||
err = errorf.E("id not found in database: %s", hex.Enc(id))
|
err = errorf.T("id not found in database: %s", hex.Enc(id))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -47,18 +47,24 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
|||||||
log.T.F("QueryEvents: looking for ID=%s", hex.Enc(id))
|
log.T.F("QueryEvents: looking for ID=%s", hex.Enc(id))
|
||||||
}
|
}
|
||||||
log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
|
log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
|
||||||
for _, idx := range f.Ids.T {
|
for _, idx := range f.Ids.T {
|
||||||
log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx))
|
log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx))
|
||||||
// we know there is only Ids in this, so run the ID query and fetch.
|
// we know there is only Ids in this, so run the ID query and fetch.
|
||||||
var ser *types.Uint40
|
var ser *types.Uint40
|
||||||
var idErr error
|
var idErr error
|
||||||
if ser, idErr = d.GetSerialById(idx); idErr != nil {
|
if ser, idErr = d.GetSerialById(idx); idErr != nil {
|
||||||
// Check if this is a "not found" error which is expected for IDs we don't have
|
// Check if this is a "not found" error which is expected for IDs we don't have
|
||||||
if strings.Contains(idErr.Error(), "id not found in database") {
|
if strings.Contains(idErr.Error(), "id not found in database") {
|
||||||
log.T.F("QueryEvents: ID not found in database: %s", hex.Enc(idx))
|
log.T.F(
|
||||||
|
"QueryEvents: ID not found in database: %s",
|
||||||
|
hex.Enc(idx),
|
||||||
|
)
|
||||||
} else {
|
} else {
|
||||||
// Log unexpected errors but continue processing other IDs
|
// Log unexpected errors but continue processing other IDs
|
||||||
log.E.F("QueryEvents: error looking up id=%s err=%v", hex.Enc(idx), idErr)
|
log.E.F(
|
||||||
|
"QueryEvents: error looking up id=%s err=%v",
|
||||||
|
hex.Enc(idx), idErr,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -70,23 +76,38 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
|||||||
// fetch the events
|
// fetch the events
|
||||||
var ev *event.E
|
var ev *event.E
|
||||||
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
||||||
log.T.F("QueryEvents: fetch by serial failed for id=%s ser=%v err=%v", hex.Enc(idx), ser, err)
|
log.T.F(
|
||||||
|
"QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
|
||||||
|
hex.Enc(idx), ser, err,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.T.F("QueryEvents: found id=%s kind=%d created_at=%d", hex.Enc(ev.ID), ev.Kind, ev.CreatedAt)
|
log.T.F(
|
||||||
|
"QueryEvents: found id=%s kind=%d created_at=%d",
|
||||||
|
hex.Enc(ev.ID), ev.Kind, ev.CreatedAt,
|
||||||
|
)
|
||||||
// check for an expiration tag and delete after returning the result
|
// check for an expiration tag and delete after returning the result
|
||||||
if CheckExpiration(ev) {
|
if CheckExpiration(ev) {
|
||||||
log.T.F("QueryEvents: id=%s filtered out due to expiration", hex.Enc(ev.ID))
|
log.T.F(
|
||||||
|
"QueryEvents: id=%s filtered out due to expiration",
|
||||||
|
hex.Enc(ev.ID),
|
||||||
|
)
|
||||||
expDeletes = append(expDeletes, ser)
|
expDeletes = append(expDeletes, ser)
|
||||||
expEvs = append(expEvs, ev)
|
expEvs = append(expEvs, ev)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// skip events that have been deleted by a proper deletion event
|
// skip events that have been deleted by a proper deletion event
|
||||||
if derr := d.CheckForDeleted(ev, nil); derr != nil {
|
if derr := d.CheckForDeleted(ev, nil); derr != nil {
|
||||||
log.T.F("QueryEvents: id=%s filtered out due to deletion: %v", hex.Enc(ev.ID), derr)
|
log.T.F(
|
||||||
|
"QueryEvents: id=%s filtered out due to deletion: %v",
|
||||||
|
hex.Enc(ev.ID), derr,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.T.F("QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results", hex.Enc(ev.ID))
|
log.T.F(
|
||||||
|
"QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results",
|
||||||
|
hex.Enc(ev.ID),
|
||||||
|
)
|
||||||
evs = append(evs, ev)
|
evs = append(evs, ev)
|
||||||
}
|
}
|
||||||
// sort the events by timestamp
|
// sort the events by timestamp
|
||||||
@@ -98,13 +119,13 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
|||||||
} else {
|
} else {
|
||||||
// non-IDs path
|
// non-IDs path
|
||||||
var idPkTs []*store.IdPkTs
|
var idPkTs []*store.IdPkTs
|
||||||
if f.Authors != nil && f.Authors.Len() > 0 && f.Kinds != nil && f.Kinds.Len() > 0 {
|
// if f.Authors != nil && f.Authors.Len() > 0 && f.Kinds != nil && f.Kinds.Len() > 0 {
|
||||||
log.T.F("QueryEvents: authors+kinds path, authors=%d kinds=%d", f.Authors.Len(), f.Kinds.Len())
|
// log.T.F("QueryEvents: authors+kinds path, authors=%d kinds=%d", f.Authors.Len(), f.Kinds.Len())
|
||||||
}
|
// }
|
||||||
if idPkTs, err = d.QueryForIds(c, f); chk.E(err) {
|
if idPkTs, err = d.QueryForIds(c, f); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.T.F("QueryEvents: QueryForIds returned %d candidates", len(idPkTs))
|
// log.T.F("QueryEvents: QueryForIds returned %d candidates", len(idPkTs))
|
||||||
// Create a map to store the latest version of replaceable events
|
// Create a map to store the latest version of replaceable events
|
||||||
replaceableEvents := make(map[string]*event.E)
|
replaceableEvents := make(map[string]*event.E)
|
||||||
// Create a map to store the latest version of parameterized replaceable
|
// Create a map to store the latest version of parameterized replaceable
|
||||||
@@ -258,19 +279,18 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
|||||||
// If no 'd' tag, use empty string
|
// If no 'd' tag, use empty string
|
||||||
dValue = ""
|
dValue = ""
|
||||||
}
|
}
|
||||||
// Initialize the inner map if it doesn't exist
|
// Initialize the inner map if it doesn't exist
|
||||||
if _, exists := deletionsByKindPubkeyDTag[key]; !exists {
|
if _, exists := deletionsByKindPubkeyDTag[key]; !exists {
|
||||||
deletionsByKindPubkeyDTag[key] = make(map[string]int64)
|
deletionsByKindPubkeyDTag[key] = make(map[string]int64)
|
||||||
}
|
}
|
||||||
// Record the newest delete timestamp for this d-tag
|
// Record the newest delete timestamp for this d-tag
|
||||||
if ts, ok := deletionsByKindPubkeyDTag[key][dValue]; !ok || ev.CreatedAt > ts {
|
if ts, ok := deletionsByKindPubkeyDTag[key][dValue]; !ok || ev.CreatedAt > ts {
|
||||||
deletionsByKindPubkeyDTag[key][dValue] = ev.CreatedAt
|
deletionsByKindPubkeyDTag[key][dValue] = ev.CreatedAt
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Second pass: process all events, filtering out deleted ones
|
// Second pass: process all events, filtering out deleted ones
|
||||||
for _, idpk := range idPkTs {
|
for _, idpk := range idPkTs {
|
||||||
var ev *event.E
|
var ev *event.E
|
||||||
@@ -281,19 +301,24 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
|||||||
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add logging for tag filter debugging
|
// Add logging for tag filter debugging
|
||||||
if f.Tags != nil && f.Tags.Len() > 0 {
|
if f.Tags != nil && f.Tags.Len() > 0 {
|
||||||
var eventTags []string
|
var eventTags []string
|
||||||
if ev.Tags != nil && ev.Tags.Len() > 0 {
|
if ev.Tags != nil && ev.Tags.Len() > 0 {
|
||||||
for _, t := range *ev.Tags {
|
for _, t := range *ev.Tags {
|
||||||
if t.Len() >= 2 {
|
if t.Len() >= 2 {
|
||||||
eventTags = append(eventTags, string(t.Key())+"="+string(t.Value()))
|
eventTags = append(
|
||||||
|
eventTags,
|
||||||
|
string(t.Key())+"="+string(t.Value()),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
log.T.F("QueryEvents: processing event ID=%s kind=%d tags=%v", hex.Enc(ev.ID), ev.Kind, eventTags)
|
// log.T.F(
|
||||||
|
// "QueryEvents: processing event ID=%s kind=%d tags=%v",
|
||||||
|
// hex.Enc(ev.ID), ev.Kind, eventTags,
|
||||||
|
// )
|
||||||
// Check if this event matches ALL required tags in the filter
|
// Check if this event matches ALL required tags in the filter
|
||||||
tagMatches := 0
|
tagMatches := 0
|
||||||
for _, filterTag := range *f.Tags {
|
for _, filterTag := range *f.Tags {
|
||||||
@@ -306,15 +331,18 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
|||||||
} else {
|
} else {
|
||||||
actualKey = filterKey
|
actualKey = filterKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if event has this tag key with any of the filter's values
|
// Check if event has this tag key with any of the filter's values
|
||||||
eventHasTag := false
|
eventHasTag := false
|
||||||
if ev.Tags != nil {
|
if ev.Tags != nil {
|
||||||
for _, eventTag := range *ev.Tags {
|
for _, eventTag := range *ev.Tags {
|
||||||
if eventTag.Len() >= 2 && bytes.Equal(eventTag.Key(), actualKey) {
|
if eventTag.Len() >= 2 && bytes.Equal(
|
||||||
|
eventTag.Key(), actualKey,
|
||||||
|
) {
|
||||||
// Check if the event's tag value matches any of the filter's values
|
// Check if the event's tag value matches any of the filter's values
|
||||||
for _, filterValue := range filterTag.T[1:] {
|
for _, filterValue := range filterTag.T[1:] {
|
||||||
if bytes.Equal(eventTag.Value(), filterValue) {
|
if bytes.Equal(
|
||||||
|
eventTag.Value(), filterValue,
|
||||||
|
) {
|
||||||
eventHasTag = true
|
eventHasTag = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -328,20 +356,28 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
|||||||
if eventHasTag {
|
if eventHasTag {
|
||||||
tagMatches++
|
tagMatches++
|
||||||
}
|
}
|
||||||
log.T.F("QueryEvents: tag filter %s (actual key: %s) matches: %v (total matches: %d/%d)",
|
// log.T.F(
|
||||||
string(filterKey), string(actualKey), eventHasTag, tagMatches, f.Tags.Len())
|
// "QueryEvents: tag filter %s (actual key: %s) matches: %v (total matches: %d/%d)",
|
||||||
|
// string(filterKey), string(actualKey), eventHasTag,
|
||||||
|
// tagMatches, f.Tags.Len(),
|
||||||
|
// )
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If not all tags match, skip this event
|
// If not all tags match, skip this event
|
||||||
if tagMatches < f.Tags.Len() {
|
if tagMatches < f.Tags.Len() {
|
||||||
log.T.F("QueryEvents: event ID=%s SKIPPED - only matches %d/%d required tags",
|
// log.T.F(
|
||||||
hex.Enc(ev.ID), tagMatches, f.Tags.Len())
|
// "QueryEvents: event ID=%s SKIPPED - only matches %d/%d required tags",
|
||||||
|
// hex.Enc(ev.ID), tagMatches, f.Tags.Len(),
|
||||||
|
// )
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.T.F("QueryEvents: event ID=%s PASSES all tag filters", hex.Enc(ev.ID))
|
// log.T.F(
|
||||||
|
// "QueryEvents: event ID=%s PASSES all tag filters",
|
||||||
|
// hex.Enc(ev.ID),
|
||||||
|
// )
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip events with kind 5 (Deletion)
|
// Skip events with kind 5 (Deletion)
|
||||||
if ev.Kind == kind.Deletion.K {
|
if ev.Kind == kind.Deletion.K {
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -11,7 +11,6 @@ import (
|
|||||||
"interfaces.orly/codec"
|
"interfaces.orly/codec"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/errorf"
|
"lol.mleku.dev/errorf"
|
||||||
"lol.mleku.dev/log"
|
|
||||||
"utils.orly/constraints"
|
"utils.orly/constraints"
|
||||||
"utils.orly/units"
|
"utils.orly/units"
|
||||||
)
|
)
|
||||||
@@ -55,7 +54,7 @@ func (en *Challenge) Label() string { return L }
|
|||||||
func (en *Challenge) Write(w io.Writer) (err error) {
|
func (en *Challenge) Write(w io.Writer) (err error) {
|
||||||
var b []byte
|
var b []byte
|
||||||
b = en.Marshal(b)
|
b = en.Marshal(b)
|
||||||
log.D.F("writing out challenge envelope: '%s'", b)
|
// log.D.F("writing out challenge envelope: '%s'", b)
|
||||||
_, err = w.Write(b)
|
_, err = w.Write(b)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
v0.2.1
|
v0.3.0
|
||||||
Reference in New Issue
Block a user