Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
d1316a5b7a
|
|||
|
b45f0a2c51
|
|||
|
e2b7152221
|
|||
|
bf7ca1da43
|
|||
|
bb8998fef6
|
|||
|
57ac3667e6
|
|||
|
cb54891473
|
@@ -24,13 +24,13 @@ func (l *Listener) GetSerialsFromFilter(f *filter.F) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||||
log.I.C(
|
// log.I.C(
|
||||||
func() string {
|
// func() string {
|
||||||
return fmt.Sprintf(
|
// return fmt.Sprintf(
|
||||||
"delete event\n%s", env.E.Serialize(),
|
// "delete event\n%s", env.E.Serialize(),
|
||||||
)
|
// )
|
||||||
},
|
// },
|
||||||
)
|
// )
|
||||||
var ownerDelete bool
|
var ownerDelete bool
|
||||||
for _, pk := range l.Admins {
|
for _, pk := range l.Admins {
|
||||||
if utils.FastEqual(pk, env.E.Pubkey) {
|
if utils.FastEqual(pk, env.E.Pubkey) {
|
||||||
@@ -77,26 +77,37 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
|||||||
if kind.IsParameterizedReplaceable(ev.Kind) {
|
if kind.IsParameterizedReplaceable(ev.Kind) {
|
||||||
// For parameterized replaceable, we need a DTag to match
|
// For parameterized replaceable, we need a DTag to match
|
||||||
if len(at.DTag) == 0 {
|
if len(at.DTag) == 0 {
|
||||||
log.I.F("HandleDelete: skipping parameterized replaceable event %s - no DTag in a-tag", hex.Enc(ev.ID))
|
log.I.F(
|
||||||
|
"HandleDelete: skipping parameterized replaceable event %s - no DTag in a-tag",
|
||||||
|
hex.Enc(ev.ID),
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
} else if !kind.IsReplaceable(ev.Kind) {
|
} else if !kind.IsReplaceable(ev.Kind) {
|
||||||
// For non-replaceable events, a-tags don't apply
|
// For non-replaceable events, a-tags don't apply
|
||||||
log.I.F("HandleDelete: skipping non-replaceable event %s - a-tags only apply to replaceable events", hex.Enc(ev.ID))
|
log.I.F(
|
||||||
|
"HandleDelete: skipping non-replaceable event %s - a-tags only apply to replaceable events",
|
||||||
|
hex.Enc(ev.ID),
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Only delete events that are older than or equal to the delete event timestamp
|
// Only delete events that are older than or equal to the delete event timestamp
|
||||||
if ev.CreatedAt > env.E.CreatedAt {
|
if ev.CreatedAt > env.E.CreatedAt {
|
||||||
log.I.F("HandleDelete: skipping newer event %s (created_at=%d) - delete event timestamp is %d",
|
log.I.F(
|
||||||
hex.Enc(ev.ID), ev.CreatedAt, env.E.CreatedAt)
|
"HandleDelete: skipping newer event %s (created_at=%d) - delete event timestamp is %d",
|
||||||
|
hex.Enc(ev.ID), ev.CreatedAt, env.E.CreatedAt,
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.I.F("HandleDelete: deleting event %s via a-tag %d:%s:%s (event_time=%d, delete_time=%d)",
|
log.I.F(
|
||||||
hex.Enc(ev.ID), at.Kind.K, hex.Enc(at.Pubkey), string(at.DTag), ev.CreatedAt, env.E.CreatedAt)
|
"HandleDelete: deleting event %s via a-tag %d:%s:%s (event_time=%d, delete_time=%d)",
|
||||||
|
hex.Enc(ev.ID), at.Kind.K, hex.Enc(at.Pubkey),
|
||||||
|
string(at.DTag), ev.CreatedAt, env.E.CreatedAt,
|
||||||
|
)
|
||||||
if err = l.DeleteEventBySerial(
|
if err = l.DeleteEventBySerial(
|
||||||
l.Ctx, s, ev,
|
l.Ctx(), s, ev,
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -138,8 +149,11 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
|||||||
// delete, for the e tag case the author is the signer of
|
// delete, for the e tag case the author is the signer of
|
||||||
// the event.
|
// the event.
|
||||||
if !utils.FastEqual(env.E.Pubkey, ev.Pubkey) {
|
if !utils.FastEqual(env.E.Pubkey, ev.Pubkey) {
|
||||||
log.W.F("HandleDelete: attempted deletion of event %s by different user - delete pubkey=%s, event pubkey=%s",
|
log.W.F(
|
||||||
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey), hex.Enc(ev.Pubkey))
|
"HandleDelete: attempted deletion of event %s by different user - delete pubkey=%s, event pubkey=%s",
|
||||||
|
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
|
||||||
|
hex.Enc(ev.Pubkey),
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
validDeletionFound = true
|
validDeletionFound = true
|
||||||
@@ -147,9 +161,11 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
|||||||
if ev.Kind == kind.EventDeletion.K {
|
if ev.Kind == kind.EventDeletion.K {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.I.F("HandleDelete: deleting event %s by authorized user %s",
|
log.I.F(
|
||||||
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey))
|
"HandleDelete: deleting event %s by authorized user %s",
|
||||||
if err = l.DeleteEventBySerial(l.Ctx, s, ev); chk.E(err) {
|
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
|
||||||
|
)
|
||||||
|
if err = l.DeleteEventBySerial(l.Ctx(), s, ev); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -196,11 +212,11 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no valid deletions were found, return an error
|
// If no valid deletions were found, return an error
|
||||||
if !validDeletionFound {
|
if !validDeletionFound {
|
||||||
return fmt.Errorf("blocked: cannot delete events that belong to other users")
|
return fmt.Errorf("blocked: cannot delete events that belong to other users")
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
acl "acl.orly"
|
acl "acl.orly"
|
||||||
"encoders.orly/envelopes/authenvelope"
|
"encoders.orly/envelopes/authenvelope"
|
||||||
@@ -99,7 +101,7 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
// user has write access or better, continue
|
// user has write access or better, continue
|
||||||
log.D.F("user has %s access", accessLevel)
|
// log.D.F("user has %s access", accessLevel)
|
||||||
}
|
}
|
||||||
// if the event is a delete, process the delete
|
// if the event is a delete, process the delete
|
||||||
if env.E.Kind == kind.EventDeletion.K {
|
if env.E.Kind == kind.EventDeletion.K {
|
||||||
@@ -127,9 +129,11 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// store the event
|
// store the event - use a separate context to prevent cancellation issues
|
||||||
log.I.F("saving event %0x, %s", env.E.ID, env.E.Serialize())
|
saveCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
if _, _, err = l.SaveEvent(l.Ctx, env.E); err != nil {
|
defer cancel()
|
||||||
|
// log.I.F("saving event %0x, %s", env.E.ID, env.E.Serialize())
|
||||||
|
if _, _, err = l.SaveEvent(saveCtx, env.E); err != nil {
|
||||||
if strings.HasPrefix(err.Error(), "blocked:") {
|
if strings.HasPrefix(err.Error(), "blocked:") {
|
||||||
errStr := err.Error()[len("blocked: "):len(err.Error())]
|
errStr := err.Error()[len("blocked: "):len(err.Error())]
|
||||||
if err = Ok.Error(
|
if err = Ok.Error(
|
||||||
@@ -146,7 +150,8 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
if err = Ok.Ok(l, env, ""); chk.E(err) {
|
if err = Ok.Ok(l, env, ""); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer l.publishers.Deliver(env.E)
|
// Deliver the event to subscribers immediately after sending OK response
|
||||||
|
l.publishers.Deliver(env.E)
|
||||||
log.D.F("saved event %0x", env.E.ID)
|
log.D.F("saved event %0x", env.E.ID)
|
||||||
var isNewFromAdmin bool
|
var isNewFromAdmin bool
|
||||||
for _, admin := range l.Admins {
|
for _, admin := range l.Admins {
|
||||||
@@ -156,11 +161,16 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if isNewFromAdmin {
|
if isNewFromAdmin {
|
||||||
|
log.I.F("new event from admin %0x", env.E.Pubkey)
|
||||||
// if a follow list was saved, reconfigure ACLs now that it is persisted
|
// if a follow list was saved, reconfigure ACLs now that it is persisted
|
||||||
if env.E.Kind == kind.FollowList.K ||
|
if env.E.Kind == kind.FollowList.K ||
|
||||||
env.E.Kind == kind.RelayListMetadata.K {
|
env.E.Kind == kind.RelayListMetadata.K {
|
||||||
if err = acl.Registry.Configure(); chk.E(err) {
|
// Run ACL reconfiguration asynchronously to prevent blocking websocket operations
|
||||||
}
|
go func() {
|
||||||
|
if err := acl.Registry.Configure(); chk.E(err) {
|
||||||
|
log.E.F("failed to reconfigure ACL: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -9,10 +9,11 @@ import (
|
|||||||
"encoders.orly/envelopes/reqenvelope"
|
"encoders.orly/envelopes/reqenvelope"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/errorf"
|
"lol.mleku.dev/errorf"
|
||||||
|
"lol.mleku.dev/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
||||||
// log.D.F("%s received message:\n%s", remote, msg)
|
log.D.F("%s received message:\n%s", remote, msg)
|
||||||
var err error
|
var err error
|
||||||
var t string
|
var t string
|
||||||
var rem []byte
|
var rem []byte
|
||||||
|
|||||||
@@ -1,7 +1,10 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
acl "acl.orly"
|
acl "acl.orly"
|
||||||
"encoders.orly/envelopes/authenvelope"
|
"encoders.orly/envelopes/authenvelope"
|
||||||
@@ -25,7 +28,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleReq(msg []byte) (err error) {
|
func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||||
// log.T.F("HandleReq: from %s", l.remote)
|
log.T.F("HandleReq: START processing from %s\n%s\n", l.remote, msg)
|
||||||
var rem []byte
|
var rem []byte
|
||||||
env := reqenvelope.New()
|
env := reqenvelope.New()
|
||||||
if rem, err = env.Unmarshal(msg); chk.E(err) {
|
if rem, err = env.Unmarshal(msg); chk.E(err) {
|
||||||
@@ -54,69 +57,91 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
|||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
// user has read access or better, continue
|
// user has read access or better, continue
|
||||||
// log.D.F("user has %s access", accessLevel)
|
log.D.F("user has %s access", accessLevel)
|
||||||
}
|
}
|
||||||
var events event.S
|
var events event.S
|
||||||
for _, f := range *env.Filters {
|
for _, f := range *env.Filters {
|
||||||
// idsLen := 0; kindsLen := 0; authorsLen := 0; tagsLen := 0
|
idsLen := 0
|
||||||
// if f != nil {
|
kindsLen := 0
|
||||||
// if f.Ids != nil { idsLen = f.Ids.Len() }
|
authorsLen := 0
|
||||||
// if f.Kinds != nil { kindsLen = f.Kinds.Len() }
|
tagsLen := 0
|
||||||
// if f.Authors != nil { authorsLen = f.Authors.Len() }
|
if f != nil {
|
||||||
// if f.Tags != nil { tagsLen = f.Tags.Len() }
|
if f.Ids != nil {
|
||||||
// }
|
idsLen = f.Ids.Len()
|
||||||
// log.T.F("REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d", env.Subscription, idsLen, kindsLen, authorsLen, tagsLen)
|
}
|
||||||
// if f != nil && f.Authors != nil && f.Authors.Len() > 0 {
|
if f.Kinds != nil {
|
||||||
// var authors []string
|
kindsLen = f.Kinds.Len()
|
||||||
// for _, a := range f.Authors.T { authors = append(authors, hex.Enc(a)) }
|
}
|
||||||
// log.T.F("REQ %s: authors=%v", env.Subscription, authors)
|
if f.Authors != nil {
|
||||||
// }
|
authorsLen = f.Authors.Len()
|
||||||
// if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 {
|
}
|
||||||
// log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16())
|
if f.Tags != nil {
|
||||||
// }
|
tagsLen = f.Tags.Len()
|
||||||
// if f != nil && f.Ids != nil && f.Ids.Len() > 0 {
|
}
|
||||||
// var ids []string
|
}
|
||||||
// for _, id := range f.Ids.T {
|
log.T.F(
|
||||||
// ids = append(ids, hex.Enc(id))
|
"REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d",
|
||||||
// }
|
env.Subscription, idsLen, kindsLen, authorsLen, tagsLen,
|
||||||
// var lim any
|
)
|
||||||
// if pointers.Present(f.Limit) {
|
if f != nil && f.Authors != nil && f.Authors.Len() > 0 {
|
||||||
// lim = *f.Limit
|
var authors []string
|
||||||
// } else {
|
for _, a := range f.Authors.T {
|
||||||
// lim = nil
|
authors = append(authors, hex.Enc(a))
|
||||||
// }
|
}
|
||||||
// log.T.F(
|
log.T.F("REQ %s: authors=%v", env.Subscription, authors)
|
||||||
// "REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription,
|
}
|
||||||
// f.Ids.Len(), ids, lim,
|
if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 {
|
||||||
// )
|
log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16())
|
||||||
// }
|
}
|
||||||
|
if f != nil && f.Ids != nil && f.Ids.Len() > 0 {
|
||||||
|
var ids []string
|
||||||
|
for _, id := range f.Ids.T {
|
||||||
|
ids = append(ids, hex.Enc(id))
|
||||||
|
}
|
||||||
|
var lim any
|
||||||
|
if pointers.Present(f.Limit) {
|
||||||
|
lim = *f.Limit
|
||||||
|
} else {
|
||||||
|
lim = nil
|
||||||
|
}
|
||||||
|
log.T.F(
|
||||||
|
"REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription,
|
||||||
|
f.Ids.Len(), ids, lim,
|
||||||
|
)
|
||||||
|
}
|
||||||
if pointers.Present(f.Limit) {
|
if pointers.Present(f.Limit) {
|
||||||
if *f.Limit == 0 {
|
if *f.Limit == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if events, err = l.QueryEvents(l.Ctx, f); chk.E(err) {
|
// Use a separate context for QueryEvents to prevent cancellation issues
|
||||||
if errors.Is(err, badger.ErrDBClosed) {
|
queryCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
return
|
defer cancel()
|
||||||
}
|
log.T.F("HandleReq: About to QueryEvents for %s, main context done: %v", l.remote, l.ctx.Err() != nil)
|
||||||
err = nil
|
if events, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||||
}
|
if errors.Is(err, badger.ErrDBClosed) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.T.F("HandleReq: QueryEvents error for %s: %v", l.remote, err)
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
|
log.T.F("HandleReq: QueryEvents completed for %s, found %d events", l.remote, len(events))
|
||||||
}
|
}
|
||||||
var tmp event.S
|
var tmp event.S
|
||||||
privCheck:
|
privCheck:
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if kind.IsPrivileged(ev.Kind) &&
|
if kind.IsPrivileged(ev.Kind) &&
|
||||||
accessLevel != "admin" { // admins can see all events
|
accessLevel != "admin" { // admins can see all events
|
||||||
// log.I.F("checking privileged event %s", ev.ID)
|
log.I.F("checking privileged event %s", ev.ID)
|
||||||
pk := l.authedPubkey.Load()
|
pk := l.authedPubkey.Load()
|
||||||
if pk == nil {
|
if pk == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if utils.FastEqual(ev.Pubkey, pk) {
|
if utils.FastEqual(ev.Pubkey, pk) {
|
||||||
// log.I.F(
|
log.I.F(
|
||||||
// "privileged event %s is for logged in pubkey %0x", ev.ID,
|
"privileged event %s is for logged in pubkey %0x", ev.ID,
|
||||||
// pk,
|
pk,
|
||||||
// )
|
)
|
||||||
tmp = append(tmp, ev)
|
tmp = append(tmp, ev)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -127,10 +152,10 @@ privCheck:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if utils.FastEqual(pt, pk) {
|
if utils.FastEqual(pt, pk) {
|
||||||
// log.I.F(
|
log.I.F(
|
||||||
// "privileged event %s is for logged in pubkey %0x",
|
"privileged event %s is for logged in pubkey %0x",
|
||||||
// ev.ID, pk,
|
ev.ID, pk,
|
||||||
// )
|
)
|
||||||
tmp = append(tmp, ev)
|
tmp = append(tmp, ev)
|
||||||
continue privCheck
|
continue privCheck
|
||||||
}
|
}
|
||||||
@@ -146,10 +171,15 @@ privCheck:
|
|||||||
events = tmp
|
events = tmp
|
||||||
seen := make(map[string]struct{})
|
seen := make(map[string]struct{})
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
// log.T.F(
|
log.T.F(
|
||||||
// "REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
"REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||||
// hex.Enc(ev.ID), ev.Kind,
|
hex.Enc(ev.ID), ev.Kind,
|
||||||
// )
|
)
|
||||||
|
log.T.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||||
|
},
|
||||||
|
)
|
||||||
var res *eventenvelope.Result
|
var res *eventenvelope.Result
|
||||||
if res, err = eventenvelope.NewResultWith(
|
if res, err = eventenvelope.NewResultWith(
|
||||||
env.Subscription, ev,
|
env.Subscription, ev,
|
||||||
@@ -164,7 +194,7 @@ privCheck:
|
|||||||
}
|
}
|
||||||
// write the EOSE to signal to the client that all events found have been
|
// write the EOSE to signal to the client that all events found have been
|
||||||
// sent.
|
// sent.
|
||||||
// log.T.F("sending EOSE to %s", l.remote)
|
log.T.F("sending EOSE to %s", l.remote)
|
||||||
if err = eoseenvelope.NewFrom(env.Subscription).
|
if err = eoseenvelope.NewFrom(env.Subscription).
|
||||||
Write(l); chk.E(err) {
|
Write(l); chk.E(err) {
|
||||||
return
|
return
|
||||||
@@ -172,10 +202,10 @@ privCheck:
|
|||||||
// if the query was for just Ids, we know there can't be any more results,
|
// if the query was for just Ids, we know there can't be any more results,
|
||||||
// so cancel the subscription.
|
// so cancel the subscription.
|
||||||
cancel := true
|
cancel := true
|
||||||
// log.T.F(
|
log.T.F(
|
||||||
// "REQ %s: computing cancel/subscription; events_sent=%d",
|
"REQ %s: computing cancel/subscription; events_sent=%d",
|
||||||
// env.Subscription, len(events),
|
env.Subscription, len(events),
|
||||||
// )
|
)
|
||||||
var subbedFilters filter.S
|
var subbedFilters filter.S
|
||||||
for _, f := range *env.Filters {
|
for _, f := range *env.Filters {
|
||||||
if f.Ids.Len() < 1 {
|
if f.Ids.Len() < 1 {
|
||||||
@@ -190,10 +220,10 @@ privCheck:
|
|||||||
}
|
}
|
||||||
notFounds = append(notFounds, id)
|
notFounds = append(notFounds, id)
|
||||||
}
|
}
|
||||||
// log.T.F(
|
log.T.F(
|
||||||
// "REQ %s: ids outstanding=%d of %d", env.Subscription,
|
"REQ %s: ids outstanding=%d of %d", env.Subscription,
|
||||||
// len(notFounds), f.Ids.Len(),
|
len(notFounds), f.Ids.Len(),
|
||||||
// )
|
)
|
||||||
// if all were found, don't add to subbedFilters
|
// if all were found, don't add to subbedFilters
|
||||||
if len(notFounds) == 0 {
|
if len(notFounds) == 0 {
|
||||||
continue
|
continue
|
||||||
@@ -230,5 +260,6 @@ privCheck:
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
log.T.F("HandleReq: COMPLETED processing from %s", l.remote)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,6 +19,8 @@ const (
|
|||||||
DefaultWriteWait = 10 * time.Second
|
DefaultWriteWait = 10 * time.Second
|
||||||
DefaultPongWait = 60 * time.Second
|
DefaultPongWait = 60 * time.Second
|
||||||
DefaultPingWait = DefaultPongWait / 2
|
DefaultPingWait = DefaultPongWait / 2
|
||||||
|
DefaultReadTimeout = 3 * time.Second // Read timeout to detect stalled connections
|
||||||
|
DefaultWriteTimeout = 3 * time.Second
|
||||||
DefaultMaxMessageSize = 1 * units.Mb
|
DefaultMaxMessageSize = 1 * units.Mb
|
||||||
|
|
||||||
// CloseMessage denotes a close control message. The optional message
|
// CloseMessage denotes a close control message. The optional message
|
||||||
@@ -95,13 +97,34 @@ whitelist:
|
|||||||
}
|
}
|
||||||
var typ websocket.MessageType
|
var typ websocket.MessageType
|
||||||
var msg []byte
|
var msg []byte
|
||||||
// log.T.F("waiting for message from %s", remote)
|
log.T.F("waiting for message from %s", remote)
|
||||||
if typ, msg, err = conn.Read(ctx); chk.E(err) {
|
|
||||||
|
// Create a read context with timeout to prevent indefinite blocking
|
||||||
|
readCtx, readCancel := context.WithTimeout(ctx, DefaultReadTimeout)
|
||||||
|
typ, msg, err = conn.Read(readCtx)
|
||||||
|
readCancel()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
if strings.Contains(
|
if strings.Contains(
|
||||||
err.Error(), "use of closed network connection",
|
err.Error(), "use of closed network connection",
|
||||||
) {
|
) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// Handle timeout errors - occurs when client becomes unresponsive
|
||||||
|
if strings.Contains(err.Error(), "context deadline exceeded") {
|
||||||
|
log.T.F(
|
||||||
|
"connection from %s timed out after %v", remote,
|
||||||
|
DefaultReadTimeout,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Handle EOF errors gracefully - these occur when client closes connection
|
||||||
|
// or sends incomplete/malformed WebSocket frames
|
||||||
|
if strings.Contains(err.Error(), "EOF") ||
|
||||||
|
strings.Contains(err.Error(), "failed to read frame header") {
|
||||||
|
log.T.F("connection from %s closed: %v", remote, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
status := websocket.CloseStatus(err)
|
status := websocket.CloseStatus(err)
|
||||||
switch status {
|
switch status {
|
||||||
case websocket.StatusNormalClosure,
|
case websocket.StatusNormalClosure,
|
||||||
@@ -109,17 +132,27 @@ whitelist:
|
|||||||
websocket.StatusNoStatusRcvd,
|
websocket.StatusNoStatusRcvd,
|
||||||
websocket.StatusAbnormalClosure,
|
websocket.StatusAbnormalClosure,
|
||||||
websocket.StatusProtocolError:
|
websocket.StatusProtocolError:
|
||||||
|
log.T.F(
|
||||||
|
"connection from %s closed with status: %v", remote, status,
|
||||||
|
)
|
||||||
default:
|
default:
|
||||||
log.E.F("unexpected close error from %s: %v", remote, err)
|
log.E.F("unexpected close error from %s: %v", remote, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if typ == PingMessage {
|
if typ == PingMessage {
|
||||||
if err = conn.Write(ctx, PongMessage, msg); chk.E(err) {
|
// Create a write context with timeout for pong response
|
||||||
|
writeCtx, writeCancel := context.WithTimeout(
|
||||||
|
ctx, DefaultWriteTimeout,
|
||||||
|
)
|
||||||
|
if err = conn.Write(writeCtx, PongMessage, msg); chk.E(err) {
|
||||||
|
writeCancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
writeCancel()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
log.T.F("received message from %s: %s", remote, string(msg))
|
||||||
go listener.HandleMessage(msg, remote)
|
go listener.HandleMessage(msg, remote)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -136,9 +169,13 @@ func (s *Server) Pinger(
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if err = conn.Ping(ctx); chk.E(err) {
|
// Create a write context with timeout for ping operation
|
||||||
|
pingCtx, pingCancel := context.WithTimeout(ctx, DefaultWriteTimeout)
|
||||||
|
if err = conn.Ping(pingCtx); chk.E(err) {
|
||||||
|
pingCancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pingCancel()
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,8 +19,21 @@ type Listener struct {
|
|||||||
authedPubkey atomic.Bytes
|
authedPubkey atomic.Bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ctx returns the listener's context, but creates a new context for each operation
|
||||||
|
// to prevent cancellation from affecting subsequent operations
|
||||||
|
func (l *Listener) Ctx() context.Context {
|
||||||
|
return l.ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (l *Listener) Write(p []byte) (n int, err error) {
|
func (l *Listener) Write(p []byte) (n int, err error) {
|
||||||
if err = l.conn.Write(l.ctx, websocket.MessageText, p); chk.E(err) {
|
// Use a separate context with timeout for writes to prevent race conditions
|
||||||
|
// where the main connection context gets cancelled while writing events
|
||||||
|
writeCtx, cancel := context.WithTimeout(
|
||||||
|
context.Background(), DefaultWriteTimeout,
|
||||||
|
)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err = l.conn.Write(writeCtx, websocket.MessageText, p); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n = len(p)
|
n = len(p)
|
||||||
|
|||||||
@@ -28,6 +28,9 @@ func Run(
|
|||||||
var err error
|
var err error
|
||||||
var adminKeys [][]byte
|
var adminKeys [][]byte
|
||||||
for _, admin := range cfg.Admins {
|
for _, admin := range cfg.Admins {
|
||||||
|
if len(admin) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
var pk []byte
|
var pk []byte
|
||||||
if pk, err = bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(err) {
|
if pk, err = bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -101,17 +101,17 @@ func (p *P) Receive(msg typer.T) {
|
|||||||
if m.Cancel {
|
if m.Cancel {
|
||||||
if m.Id == "" {
|
if m.Id == "" {
|
||||||
p.removeSubscriber(m.Conn)
|
p.removeSubscriber(m.Conn)
|
||||||
// log.D.F("removed listener %s", m.remote)
|
log.D.F("removed listener %s", m.remote)
|
||||||
} else {
|
} else {
|
||||||
p.removeSubscriberId(m.Conn, m.Id)
|
p.removeSubscriberId(m.Conn, m.Id)
|
||||||
// log.D.C(
|
log.D.C(
|
||||||
// func() string {
|
func() string {
|
||||||
// return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
// "removed subscription %s for %s", m.Id,
|
"removed subscription %s for %s", m.Id,
|
||||||
// m.remote,
|
m.remote,
|
||||||
// )
|
)
|
||||||
// },
|
},
|
||||||
// )
|
)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -123,27 +123,27 @@ func (p *P) Receive(msg typer.T) {
|
|||||||
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
||||||
}
|
}
|
||||||
p.Map[m.Conn] = subs
|
p.Map[m.Conn] = subs
|
||||||
// log.D.C(
|
log.D.C(
|
||||||
// func() string {
|
func() string {
|
||||||
// return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
// "created new subscription for %s, %s",
|
"created new subscription for %s, %s",
|
||||||
// m.remote,
|
m.remote,
|
||||||
// m.Filters.Marshal(nil),
|
m.Filters.Marshal(nil),
|
||||||
// )
|
)
|
||||||
// },
|
},
|
||||||
// )
|
)
|
||||||
} else {
|
} else {
|
||||||
subs[m.Id] = Subscription{
|
subs[m.Id] = Subscription{
|
||||||
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
||||||
}
|
}
|
||||||
// log.D.C(
|
log.D.C(
|
||||||
// func() string {
|
func() string {
|
||||||
// return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
// "added subscription %s for %s", m.Id,
|
"added subscription %s for %s", m.Id,
|
||||||
// m.remote,
|
m.remote,
|
||||||
// )
|
)
|
||||||
// },
|
},
|
||||||
// )
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -179,14 +179,16 @@ func (p *P) Deliver(ev *event.E) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
p.Mx.RUnlock()
|
p.Mx.RUnlock()
|
||||||
log.D.C(
|
if len(deliveries) > 0 {
|
||||||
func() string {
|
log.D.C(
|
||||||
return fmt.Sprintf(
|
func() string {
|
||||||
"delivering event %0x to websocket subscribers %d", ev.ID,
|
return fmt.Sprintf(
|
||||||
len(deliveries),
|
"delivering event %0x to websocket subscribers %d", ev.ID,
|
||||||
)
|
len(deliveries),
|
||||||
},
|
)
|
||||||
)
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
for _, d := range deliveries {
|
for _, d := range deliveries {
|
||||||
// If the event is privileged, enforce that the subscriber's authed pubkey matches
|
// If the event is privileged, enforce that the subscriber's authed pubkey matches
|
||||||
// either the event pubkey or appears in any 'p' tag of the event.
|
// either the event pubkey or appears in any 'p' tag of the event.
|
||||||
@@ -218,8 +220,15 @@ func (p *P) Deliver(ev *event.E) {
|
|||||||
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
|
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// Use a separate context with timeout for writes to prevent race conditions
|
||||||
|
// where the publisher context gets cancelled while writing events
|
||||||
|
writeCtx, cancel := context.WithTimeout(
|
||||||
|
context.Background(), WriteTimeout,
|
||||||
|
)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
if err = d.w.Write(
|
if err = d.w.Write(
|
||||||
p.c, websocket.MessageText, res.Marshal(nil),
|
writeCtx, websocket.MessageText, res.Marshal(nil),
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
// On error, remove the subscriber connection safely
|
// On error, remove the subscriber connection safely
|
||||||
p.removeSubscriber(d.w)
|
p.removeSubscriber(d.w)
|
||||||
@@ -245,9 +254,9 @@ func (p *P) removeSubscriberId(ws *websocket.Conn, id string) {
|
|||||||
var subs map[string]Subscription
|
var subs map[string]Subscription
|
||||||
var ok bool
|
var ok bool
|
||||||
if subs, ok = p.Map[ws]; ok {
|
if subs, ok = p.Map[ws]; ok {
|
||||||
delete(p.Map[ws], id)
|
delete(subs, id)
|
||||||
_ = subs
|
// Check the actual map after deletion, not the original reference
|
||||||
if len(subs) == 0 {
|
if len(p.Map[ws]) == 0 {
|
||||||
delete(p.Map, ws)
|
delete(p.Map, ws)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -98,9 +98,9 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
|||||||
if ev, err = f.D.FetchEventBySerial(s); chk.E(err) {
|
if ev, err = f.D.FetchEventBySerial(s); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.I.F("admin follow list:\n%s", ev.Serialize())
|
// log.I.F("admin follow list:\n%s", ev.Serialize())
|
||||||
for _, v := range ev.Tags.GetAll([]byte("p")) {
|
for _, v := range ev.Tags.GetAll([]byte("p")) {
|
||||||
log.I.F("adding follow: %s", v.Value())
|
// log.I.F("adding follow: %s", v.Value())
|
||||||
var a []byte
|
var a []byte
|
||||||
if b, e := hex.Dec(string(v.Value())); chk.E(e) {
|
if b, e := hex.Dec(string(v.Value())); chk.E(e) {
|
||||||
continue
|
continue
|
||||||
@@ -282,7 +282,7 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
|||||||
ctx, res.Event,
|
ctx, res.Event,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
if !strings.HasPrefix(
|
if !strings.HasPrefix(
|
||||||
err.Error(), "event already exists",
|
err.Error(), "blocked:",
|
||||||
) {
|
) {
|
||||||
log.W.F(
|
log.W.F(
|
||||||
"follows syncer: save event failed: %v",
|
"follows syncer: save event failed: %v",
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
types2 "database.orly/indexes/types"
|
types2 "database.orly/indexes/types"
|
||||||
"encoders.orly/filter"
|
"encoders.orly/filter"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
|
"lol.mleku.dev/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Range struct {
|
type Range struct {
|
||||||
@@ -95,16 +96,13 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
b := buf.Bytes()
|
b := buf.Bytes()
|
||||||
|
|
||||||
// Create range that will match any serial value with this ID prefix
|
// Create range that will match any serial value with this ID prefix
|
||||||
end := make([]byte, len(b))
|
end := make([]byte, len(b))
|
||||||
copy(end, b)
|
copy(end, b)
|
||||||
|
|
||||||
// Fill the end range with 0xff bytes to match all possible serial values
|
// Fill the end range with 0xff bytes to match all possible serial values
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
end = append(end, 0xff)
|
end = append(end, 0xff)
|
||||||
}
|
}
|
||||||
|
|
||||||
r := Range{b, end}
|
r := Range{b, end}
|
||||||
idxs = append(idxs, r)
|
idxs = append(idxs, r)
|
||||||
return
|
return
|
||||||
@@ -241,6 +239,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
|||||||
for _, t := range *f.Tags {
|
for _, t := range *f.Tags {
|
||||||
if t.Len() >= 2 && (len(t.Key()) == 1 || (len(t.Key()) == 2 && t.Key()[0] == '#')) {
|
if t.Len() >= 2 && (len(t.Key()) == 1 || (len(t.Key()) == 2 && t.Key()[0] == '#')) {
|
||||||
var p *types2.PubHash
|
var p *types2.PubHash
|
||||||
|
log.I.S(author)
|
||||||
if p, err = CreatePubHashFromData(author); chk.E(err) {
|
if p, err = CreatePubHashFromData(author); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -363,6 +362,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
|||||||
if f.Authors != nil && f.Authors.Len() > 0 {
|
if f.Authors != nil && f.Authors.Len() > 0 {
|
||||||
for _, author := range f.Authors.T {
|
for _, author := range f.Authors.T {
|
||||||
var p *types2.PubHash
|
var p *types2.PubHash
|
||||||
|
log.I.S(author)
|
||||||
if p, err = CreatePubHashFromData(author); chk.E(err) {
|
if p, err = CreatePubHashFromData(author); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,10 +15,13 @@ const PubHashLen = 8
|
|||||||
type PubHash struct{ val [PubHashLen]byte }
|
type PubHash struct{ val [PubHashLen]byte }
|
||||||
|
|
||||||
func (ph *PubHash) FromPubkey(pk []byte) (err error) {
|
func (ph *PubHash) FromPubkey(pk []byte) (err error) {
|
||||||
|
if len(pk) == 0 {
|
||||||
|
panic("nil pubkey")
|
||||||
|
}
|
||||||
if len(pk) != schnorr.PubKeyBytesLen {
|
if len(pk) != schnorr.PubKeyBytesLen {
|
||||||
err = errorf.E(
|
err = errorf.E(
|
||||||
"invalid Pubkey length, got %d require %d",
|
"invalid Pubkey length, got %d require %d %0x",
|
||||||
len(pk), schnorr.PubKeyBytesLen,
|
len(pk), schnorr.PubKeyBytesLen, pk,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package database
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"database.orly/indexes"
|
"database.orly/indexes"
|
||||||
@@ -235,5 +236,10 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
|
|||||||
"total data written: %d bytes keys %d bytes values for event ID %s", kc,
|
"total data written: %d bytes keys %d bytes values for event ID %s", kc,
|
||||||
vc, hex.Enc(ev.ID),
|
vc, hex.Enc(ev.ID),
|
||||||
)
|
)
|
||||||
|
log.T.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||||
|
},
|
||||||
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"encoders.orly/hex"
|
"encoders.orly/hex"
|
||||||
"encoders.orly/ints"
|
"encoders.orly/ints"
|
||||||
"encoders.orly/text"
|
"encoders.orly/text"
|
||||||
"lol.mleku.dev/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ToCanonical converts the event to the canonical encoding used to derive the
|
// ToCanonical converts the event to the canonical encoding used to derive the
|
||||||
@@ -23,7 +22,7 @@ func (ev *E) ToCanonical(dst []byte) (b []byte) {
|
|||||||
b = append(b, ',')
|
b = append(b, ',')
|
||||||
b = text.AppendQuote(b, ev.Content, text.NostrEscape)
|
b = text.AppendQuote(b, ev.Content, text.NostrEscape)
|
||||||
b = append(b, ']')
|
b = append(b, ']')
|
||||||
log.D.F("canonical: %s", b)
|
// log.D.F("canonical: %s", b)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -333,7 +333,7 @@ var (
|
|||||||
CommunityDefinition = &K{34550}
|
CommunityDefinition = &K{34550}
|
||||||
ACLEvent = &K{39998}
|
ACLEvent = &K{39998}
|
||||||
// ParameterizedReplaceableEnd is an event type that...
|
// ParameterizedReplaceableEnd is an event type that...
|
||||||
ParameterizedReplaceableEnd = &K{39999}
|
ParameterizedReplaceableEnd = &K{40000}
|
||||||
)
|
)
|
||||||
|
|
||||||
var MapMx sync.RWMutex
|
var MapMx sync.RWMutex
|
||||||
|
|||||||
@@ -1 +1 @@
|
|||||||
v0.3.1
|
v0.4.1
|
||||||
@@ -16,4 +16,4 @@ cd relay-tester
|
|||||||
cargo build -r
|
cargo build -r
|
||||||
cp target/release/relay-tester $GOBIN/
|
cp target/release/relay-tester $GOBIN/
|
||||||
cd ..
|
cd ..
|
||||||
rm -rf relay-tester
|
#rm -rf relay-tester
|
||||||
@@ -7,7 +7,7 @@ if ! command -v "relay-tester" &> /dev/null; then
|
|||||||
echo "./scripts/relaytester-install.sh"
|
echo "./scripts/relaytester-install.sh"
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
rm -rf ~/.local/share/ORLY
|
rm -rf /tmp/orlytest
|
||||||
export ORLY_LOG_LEVEL=trace
|
export ORLY_LOG_LEVEL=trace
|
||||||
export ORLY_LOG_TO_STDOUT=true
|
export ORLY_LOG_TO_STDOUT=true
|
||||||
export ORLY_LISTEN=127.0.0.1
|
export ORLY_LISTEN=127.0.0.1
|
||||||
@@ -15,7 +15,9 @@ export ORLY_PORT=3334
|
|||||||
export ORLY_IP_WHITELIST=127.0.0
|
export ORLY_IP_WHITELIST=127.0.0
|
||||||
export ORLY_ADMINS=6d9b216ec1dc329ca43c56634e0dba6aaaf3d45ab878bdf4fa910c7117db0bfa,c284f03a874668eded145490e436b87f1a1fc565cf320e7dea93a7e96e3629d7
|
export ORLY_ADMINS=6d9b216ec1dc329ca43c56634e0dba6aaaf3d45ab878bdf4fa910c7117db0bfa,c284f03a874668eded145490e436b87f1a1fc565cf320e7dea93a7e96e3629d7
|
||||||
export ORLY_ACL_MODE=none
|
export ORLY_ACL_MODE=none
|
||||||
|
export ORLY_DATA_DIR=/tmp/orlytest
|
||||||
go run . &
|
go run . &
|
||||||
sleep 5
|
sleep 5
|
||||||
relay-tester ws://127.0.0.1:3334 nsec12l4072hvvyjpmkyjtdxn48xf8qj299zw60u7ddg58s2aphv3rpjqtg0tvr nsec1syvtjgqauyeezgrev5nqrp36d87apjk87043tgu2usgv8umyy6wq4yl6tu
|
relay-tester ws://127.0.0.1:3334 nsec12l4072hvvyjpmkyjtdxn48xf8qj299zw60u7ddg58s2aphv3rpjqtg0tvr nsec1syvtjgqauyeezgrev5nqrp36d87apjk87043tgu2usgv8umyy6wq4yl6tu
|
||||||
killall next.orly.dev
|
killall next.orly.dev
|
||||||
|
rm -rf /tmp/orlytest
|
||||||
|
|||||||
14972
stacktrace.txt
Normal file
14972
stacktrace.txt
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user