forked from mleku/next.orly.dev
Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
bc1527e6cf
|
|||
|
45c31795e7
|
|||
|
3ec2f60e0b
|
|||
|
110223fc4e
|
|||
|
2dd119401b
|
|||
|
6e06905773
|
|||
|
d1316a5b7a
|
|||
|
b45f0a2c51
|
|||
|
e2b7152221
|
|||
|
bf7ca1da43
|
|||
|
bb8998fef6
|
|||
|
57ac3667e6
|
|||
|
cb54891473
|
|||
|
fdcfd863e0
|
|||
|
4e96c9e2f7
|
|||
|
fb956ff09c
|
|||
|
eac6ba1410
|
|||
|
6b4b035f0c
|
|||
|
c2c6720e01
|
|||
|
dddcc682b9
|
|||
|
ddaab70d2b
|
|||
|
61cec63ca9
|
|||
|
b063dab2a3
|
|||
|
9e59d5f72b
|
|||
|
fe3893addf
|
|||
|
5eb192f208
|
|||
|
2385d1f752
|
|||
|
faad7ddc93
|
|||
|
c9314bdbd0
|
|||
|
85d806b157
|
|||
|
6207f9d426
|
|||
|
ebb5e2c0f3
|
|||
|
9dec51cd40
|
|||
|
f570660f37
|
|||
|
3d3a0fa520
|
|||
|
8ddc34d202
|
|||
|
eaa4006a75
|
|||
|
f102c205f8
|
|||
|
135508c390
|
|||
|
2491fd2738
|
|||
|
5a068378fa
|
@@ -89,3 +89,7 @@ A good typical example:
|
|||||||
// - Initializes the relay, starting its operation in a separate goroutine.
|
// - Initializes the relay, starting its operation in a separate goroutine.
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
use the source of the relay-tester to help guide what expectations the test has,
|
||||||
|
and use context7 for information about the nostr protocol, and use additional
|
||||||
|
log statements to help locate the cause of bugs
|
||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -29,6 +29,7 @@ node_modules/**
|
|||||||
# and others
|
# and others
|
||||||
/go.work.sum
|
/go.work.sum
|
||||||
/secp256k1/
|
/secp256k1/
|
||||||
|
cmd/benchmark/external
|
||||||
|
|
||||||
# But not these files...
|
# But not these files...
|
||||||
!/.gitignore
|
!/.gitignore
|
||||||
@@ -87,6 +88,9 @@ node_modules/**
|
|||||||
!.gitignore
|
!.gitignore
|
||||||
!version
|
!version
|
||||||
!out.jsonl
|
!out.jsonl
|
||||||
|
!Dockerfile*
|
||||||
|
!strfry.conf
|
||||||
|
!config.toml
|
||||||
# ...even if they are in subdirectories
|
# ...even if they are in subdirectories
|
||||||
!*/
|
!*/
|
||||||
/blocklist.json
|
/blocklist.json
|
||||||
@@ -108,3 +112,4 @@ pkg/database/testrealy
|
|||||||
/.idea/inspectionProfiles/Project_Default.xml
|
/.idea/inspectionProfiles/Project_Default.xml
|
||||||
/.idea/.name
|
/.idea/.name
|
||||||
/ctxproxy.config.yml
|
/ctxproxy.config.yml
|
||||||
|
cmd/benchmark/external/**
|
||||||
@@ -29,6 +29,7 @@ type C struct {
|
|||||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
|
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
|
||||||
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
||||||
|
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
|
||||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation"`
|
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation"`
|
||||||
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
|
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
|
||||||
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
||||||
@@ -73,6 +74,9 @@ func New() (cfg *C, err error) {
|
|||||||
PrintHelp(cfg, os.Stderr)
|
PrintHelp(cfg, os.Stderr)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
if cfg.LogToStdout {
|
||||||
|
lol.Writer = os.Stdout
|
||||||
|
}
|
||||||
lol.SetLogLevel(cfg.LogLevel)
|
lol.SetLogLevel(cfg.LogLevel)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoders.orly/envelopes/authenvelope"
|
|
||||||
"encoders.orly/envelopes/okenvelope"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
"protocol.orly/auth"
|
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||||
|
"next.orly.dev/pkg/protocol/auth"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleAuth(b []byte) (err error) {
|
func (l *Listener) HandleAuth(b []byte) (err error) {
|
||||||
@@ -14,6 +14,11 @@ func (l *Listener) HandleAuth(b []byte) (err error) {
|
|||||||
if rem, err = env.Unmarshal(b); chk.E(err) {
|
if rem, err = env.Unmarshal(b); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if env != nil && env.Event != nil {
|
||||||
|
env.Event.Free()
|
||||||
|
}
|
||||||
|
}()
|
||||||
if len(rem) > 0 {
|
if len(rem) > 0 {
|
||||||
log.I.F("extra '%s'", rem)
|
log.I.F("extra '%s'", rem)
|
||||||
}
|
}
|
||||||
@@ -41,7 +46,7 @@ func (l *Listener) HandleAuth(b []byte) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.D.F(
|
log.D.F(
|
||||||
"%s authed to pubkey,%0x", l.remote,
|
"%s authed to pubkey %0x", l.remote,
|
||||||
env.Event.Pubkey,
|
env.Event.Pubkey,
|
||||||
)
|
)
|
||||||
l.authedPubkey.Store(env.Event.Pubkey)
|
l.authedPubkey.Store(env.Event.Pubkey)
|
||||||
|
|||||||
@@ -3,9 +3,9 @@ package app
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"encoders.orly/envelopes/closeenvelope"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleClose processes a CLOSE envelope by unmarshalling the request,
|
// HandleClose processes a CLOSE envelope by unmarshalling the request,
|
||||||
|
|||||||
@@ -3,18 +3,18 @@ package app
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"database.orly/indexes/types"
|
|
||||||
"encoders.orly/envelopes/eventenvelope"
|
|
||||||
"encoders.orly/event"
|
|
||||||
"encoders.orly/filter"
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"encoders.orly/ints"
|
|
||||||
"encoders.orly/kind"
|
|
||||||
"encoders.orly/tag"
|
|
||||||
"encoders.orly/tag/atag"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
utils "utils.orly"
|
"next.orly.dev/pkg/database/indexes/types"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/event"
|
||||||
|
"next.orly.dev/pkg/encoders/filter"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/encoders/ints"
|
||||||
|
"next.orly.dev/pkg/encoders/kind"
|
||||||
|
"next.orly.dev/pkg/encoders/tag"
|
||||||
|
"next.orly.dev/pkg/encoders/tag/atag"
|
||||||
|
utils "next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) GetSerialsFromFilter(f *filter.F) (
|
func (l *Listener) GetSerialsFromFilter(f *filter.F) (
|
||||||
@@ -23,14 +23,14 @@ func (l *Listener) GetSerialsFromFilter(f *filter.F) (
|
|||||||
return l.D.GetSerialsFromFilter(f)
|
return l.D.GetSerialsFromFilter(f)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||||
log.I.C(
|
// log.I.C(
|
||||||
func() string {
|
// func() string {
|
||||||
return fmt.Sprintf(
|
// return fmt.Sprintf(
|
||||||
"delete event\n%s", env.E.Serialize(),
|
// "delete event\n%s", env.E.Serialize(),
|
||||||
)
|
// )
|
||||||
},
|
// },
|
||||||
)
|
// )
|
||||||
var ownerDelete bool
|
var ownerDelete bool
|
||||||
for _, pk := range l.Admins {
|
for _, pk := range l.Admins {
|
||||||
if utils.FastEqual(pk, env.E.Pubkey) {
|
if utils.FastEqual(pk, env.E.Pubkey) {
|
||||||
@@ -39,15 +39,17 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// process the tags in the delete event
|
// process the tags in the delete event
|
||||||
var err error
|
var deleteErr error
|
||||||
|
var validDeletionFound bool
|
||||||
for _, t := range *env.E.Tags {
|
for _, t := range *env.E.Tags {
|
||||||
// first search for a tags, as these are the simplest to process
|
// first search for a tags, as these are the simplest to process
|
||||||
if utils.FastEqual(t.Key(), []byte("a")) {
|
if utils.FastEqual(t.Key(), []byte("a")) {
|
||||||
at := new(atag.T)
|
at := new(atag.T)
|
||||||
if _, err = at.Unmarshal(t.Value()); chk.E(err) {
|
if _, deleteErr = at.Unmarshal(t.Value()); chk.E(deleteErr) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if ownerDelete || utils.FastEqual(env.E.Pubkey, at.Pubkey) {
|
if ownerDelete || utils.FastEqual(env.E.Pubkey, at.Pubkey) {
|
||||||
|
validDeletionFound = true
|
||||||
// find the event and delete it
|
// find the event and delete it
|
||||||
f := &filter.F{
|
f := &filter.F{
|
||||||
Authors: tag.NewFromBytesSlice(at.Pubkey),
|
Authors: tag.NewFromBytesSlice(at.Pubkey),
|
||||||
@@ -69,13 +71,43 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
|||||||
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
|
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !(kind.IsReplaceable(ev.Kind) && len(at.DTag) == 0) {
|
// Only delete events that match the a-tag criteria:
|
||||||
// skip a tags with no dtag if the kind is not
|
// - For parameterized replaceable events: must have matching d-tag
|
||||||
// replaceable.
|
// - For regular replaceable events: should not have d-tag constraint
|
||||||
|
if kind.IsParameterizedReplaceable(ev.Kind) {
|
||||||
|
// For parameterized replaceable, we need a DTag to match
|
||||||
|
if len(at.DTag) == 0 {
|
||||||
|
log.I.F(
|
||||||
|
"HandleDelete: skipping parameterized replaceable event %s - no DTag in a-tag",
|
||||||
|
hex.Enc(ev.ID),
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if !kind.IsReplaceable(ev.Kind) {
|
||||||
|
// For non-replaceable events, a-tags don't apply
|
||||||
|
log.I.F(
|
||||||
|
"HandleDelete: skipping non-replaceable event %s - a-tags only apply to replaceable events",
|
||||||
|
hex.Enc(ev.ID),
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Only delete events that are older than or equal to the delete event timestamp
|
||||||
|
if ev.CreatedAt > env.E.CreatedAt {
|
||||||
|
log.I.F(
|
||||||
|
"HandleDelete: skipping newer event %s (created_at=%d) - delete event timestamp is %d",
|
||||||
|
hex.Enc(ev.ID), ev.CreatedAt, env.E.CreatedAt,
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
log.I.F(
|
||||||
|
"HandleDelete: deleting event %s via a-tag %d:%s:%s (event_time=%d, delete_time=%d)",
|
||||||
|
hex.Enc(ev.ID), at.Kind.K, hex.Enc(at.Pubkey),
|
||||||
|
string(at.DTag), ev.CreatedAt, env.E.CreatedAt,
|
||||||
|
)
|
||||||
if err = l.DeleteEventBySerial(
|
if err = l.DeleteEventBySerial(
|
||||||
l.Ctx, s, ev,
|
l.Ctx(), s, ev,
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -87,10 +119,16 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
|||||||
// if e tags are found, delete them if the author is signer, or one of
|
// if e tags are found, delete them if the author is signer, or one of
|
||||||
// the owners is signer
|
// the owners is signer
|
||||||
if utils.FastEqual(t.Key(), []byte("e")) {
|
if utils.FastEqual(t.Key(), []byte("e")) {
|
||||||
var dst []byte
|
val := t.Value()
|
||||||
if _, err = hex.DecBytes(dst, t.Value()); chk.E(err) {
|
if len(val) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
var dst []byte
|
||||||
|
if b, e := hex.Dec(string(val)); chk.E(e) {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
dst = b
|
||||||
|
}
|
||||||
f := &filter.F{
|
f := &filter.F{
|
||||||
Ids: tag.NewFromBytesSlice(dst),
|
Ids: tag.NewFromBytesSlice(dst),
|
||||||
}
|
}
|
||||||
@@ -108,16 +146,26 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// check that the author is the same as the signer of the
|
// check that the author is the same as the signer of the
|
||||||
// delete, for the k tag case the author is the signer of
|
// delete, for the e tag case the author is the signer of
|
||||||
// the event.
|
// the event.
|
||||||
if !utils.FastEqual(env.E.Pubkey, ev.Pubkey) {
|
if !utils.FastEqual(env.E.Pubkey, ev.Pubkey) {
|
||||||
|
log.W.F(
|
||||||
|
"HandleDelete: attempted deletion of event %s by different user - delete pubkey=%s, event pubkey=%s",
|
||||||
|
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
|
||||||
|
hex.Enc(ev.Pubkey),
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
validDeletionFound = true
|
||||||
// exclude delete events
|
// exclude delete events
|
||||||
if ev.Kind == kind.EventDeletion.K {
|
if ev.Kind == kind.EventDeletion.K {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = l.DeleteEventBySerial(l.Ctx, s, ev); chk.E(err) {
|
log.I.F(
|
||||||
|
"HandleDelete: deleting event %s by authorized user %s",
|
||||||
|
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
|
||||||
|
)
|
||||||
|
if err = l.DeleteEventBySerial(l.Ctx(), s, ev); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -164,5 +212,11 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If no valid deletions were found, return an error
|
||||||
|
if !validDeletionFound {
|
||||||
|
return fmt.Errorf("blocked: cannot delete events that belong to other users")
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,18 +1,20 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
acl "acl.orly"
|
|
||||||
"encoders.orly/envelopes/authenvelope"
|
|
||||||
"encoders.orly/envelopes/eventenvelope"
|
|
||||||
"encoders.orly/envelopes/okenvelope"
|
|
||||||
"encoders.orly/kind"
|
|
||||||
"encoders.orly/reason"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
utils "utils.orly"
|
acl "next.orly.dev/pkg/acl"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/kind"
|
||||||
|
"next.orly.dev/pkg/encoders/reason"
|
||||||
|
utils "next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||||
@@ -21,6 +23,11 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
if msg, err = env.Unmarshal(msg); chk.E(err) {
|
if msg, err = env.Unmarshal(msg); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
if env != nil && env.E != nil {
|
||||||
|
env.E.Free()
|
||||||
|
}
|
||||||
|
}()
|
||||||
if len(msg) > 0 {
|
if len(msg) > 0 {
|
||||||
log.I.F("extra '%s'", msg)
|
log.I.F("extra '%s'", msg)
|
||||||
}
|
}
|
||||||
@@ -56,21 +63,13 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// // send a challenge to the client to auth if an ACL is active and not authed
|
|
||||||
// if acl.Registry.Active.Load() != "none" && l.authedPubkey.Load() == nil {
|
|
||||||
// log.D.F("sending challenge to %s", l.remote)
|
|
||||||
// if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
|
||||||
// Write(l); chk.E(err) {
|
|
||||||
// // return
|
|
||||||
// }
|
|
||||||
// // ACL is enabled so return and wait for auth
|
|
||||||
// // return
|
|
||||||
// }
|
|
||||||
// check permissions of user
|
// check permissions of user
|
||||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load())
|
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||||
switch accessLevel {
|
switch accessLevel {
|
||||||
case "none":
|
case "none":
|
||||||
log.D.F("handle event: sending CLOSED to %s", l.remote)
|
log.D.F(
|
||||||
|
"handle event: sending 'OK,false,auth-required...' to %s", l.remote,
|
||||||
|
)
|
||||||
if err = okenvelope.NewFrom(
|
if err = okenvelope.NewFrom(
|
||||||
env.Id(), false,
|
env.Id(), false,
|
||||||
reason.AuthRequired.F("auth required for write access"),
|
reason.AuthRequired.F("auth required for write access"),
|
||||||
@@ -84,26 +83,39 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
}
|
}
|
||||||
return
|
return
|
||||||
case "read":
|
case "read":
|
||||||
log.D.F("handle event: sending CLOSED to %s", l.remote)
|
log.D.F(
|
||||||
|
"handle event: sending 'OK,false,auth-required:...' to %s",
|
||||||
|
l.remote,
|
||||||
|
)
|
||||||
if err = okenvelope.NewFrom(
|
if err = okenvelope.NewFrom(
|
||||||
env.Id(), false,
|
env.Id(), false,
|
||||||
reason.AuthRequired.F("auth required for write access"),
|
reason.AuthRequired.F("auth required for write access"),
|
||||||
).Write(l); chk.E(err) {
|
).Write(l); chk.E(err) {
|
||||||
// return
|
return
|
||||||
}
|
}
|
||||||
log.D.F("handle event: sending challenge to %s", l.remote)
|
log.D.F("handle event: sending challenge to %s", l.remote)
|
||||||
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
||||||
Write(l); chk.E(err) {
|
Write(l); chk.E(err) {
|
||||||
// return
|
return
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
// user has write access or better, continue
|
// user has write access or better, continue
|
||||||
log.D.F("user has %s access", accessLevel)
|
// log.D.F("user has %s access", accessLevel)
|
||||||
}
|
}
|
||||||
// if the event is a delete, process the delete
|
// if the event is a delete, process the delete
|
||||||
if env.E.Kind == kind.EventDeletion.K {
|
if env.E.Kind == kind.EventDeletion.K {
|
||||||
l.HandleDelete(env)
|
if err = l.HandleDelete(env); err != nil {
|
||||||
|
if strings.HasPrefix(err.Error(), "blocked:") {
|
||||||
|
errStr := err.Error()[len("blocked: "):len(err.Error())]
|
||||||
|
if err = Ok.Error(
|
||||||
|
l, env, errStr,
|
||||||
|
); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
// check if the event was deleted
|
// check if the event was deleted
|
||||||
if err = l.CheckForDeleted(env.E, l.Admins); err != nil {
|
if err = l.CheckForDeleted(env.E, l.Admins); err != nil {
|
||||||
@@ -117,21 +129,49 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// store the event
|
// store the event - use a separate context to prevent cancellation issues
|
||||||
log.I.F("saving event %0x, %s", env.E.ID, env.E.Serialize())
|
saveCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||||
if _, _, err = l.SaveEvent(l.Ctx, env.E); chk.E(err) {
|
defer cancel()
|
||||||
|
// log.I.F("saving event %0x, %s", env.E.ID, env.E.Serialize())
|
||||||
|
if _, _, err = l.SaveEvent(saveCtx, env.E); err != nil {
|
||||||
|
if strings.HasPrefix(err.Error(), "blocked:") {
|
||||||
|
errStr := err.Error()[len("blocked: "):len(err.Error())]
|
||||||
|
if err = Ok.Error(
|
||||||
|
l, env, errStr,
|
||||||
|
); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chk.E(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// if a follow list was saved, reconfigure ACLs now that it is persisted
|
|
||||||
if env.E.Kind == kind.FollowList.K {
|
|
||||||
if err = acl.Registry.Configure(); chk.E(err) {
|
|
||||||
}
|
|
||||||
}
|
|
||||||
l.publishers.Deliver(env.E)
|
|
||||||
// Send a success response storing
|
// Send a success response storing
|
||||||
if err = Ok.Ok(l, env, ""); chk.E(err) {
|
if err = Ok.Ok(l, env, ""); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// Deliver the event to subscribers immediately after sending OK response
|
||||||
|
l.publishers.Deliver(env.E)
|
||||||
log.D.F("saved event %0x", env.E.ID)
|
log.D.F("saved event %0x", env.E.ID)
|
||||||
|
var isNewFromAdmin bool
|
||||||
|
for _, admin := range l.Admins {
|
||||||
|
if utils.FastEqual(admin, env.E.Pubkey) {
|
||||||
|
isNewFromAdmin = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isNewFromAdmin {
|
||||||
|
log.I.F("new event from admin %0x", env.E.Pubkey)
|
||||||
|
// if a follow list was saved, reconfigure ACLs now that it is persisted
|
||||||
|
if env.E.Kind == kind.FollowList.K ||
|
||||||
|
env.E.Kind == kind.RelayListMetadata.K {
|
||||||
|
// Run ACL reconfiguration asynchronously to prevent blocking websocket operations
|
||||||
|
go func() {
|
||||||
|
if err := acl.Registry.Configure(); chk.E(err) {
|
||||||
|
log.E.F("failed to reconfigure ACL: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,56 +1,48 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"encoders.orly/envelopes"
|
|
||||||
"encoders.orly/envelopes/authenvelope"
|
|
||||||
"encoders.orly/envelopes/closeenvelope"
|
|
||||||
"encoders.orly/envelopes/eventenvelope"
|
|
||||||
"encoders.orly/envelopes/noticeenvelope"
|
|
||||||
"encoders.orly/envelopes/reqenvelope"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/errorf"
|
"lol.mleku.dev/errorf"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/noticeenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
||||||
log.D.C(
|
log.D.F("%s received message:\n%s", remote, msg)
|
||||||
func() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"%s received message:\n%s", remote, msg,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
var err error
|
var err error
|
||||||
var t string
|
var t string
|
||||||
var rem []byte
|
var rem []byte
|
||||||
if t, rem, err = envelopes.Identify(msg); !chk.E(err) {
|
if t, rem, err = envelopes.Identify(msg); !chk.E(err) {
|
||||||
switch t {
|
switch t {
|
||||||
case eventenvelope.L:
|
case eventenvelope.L:
|
||||||
log.D.F("eventenvelope: %s", rem)
|
// log.D.F("eventenvelope: %s %s", remote, rem)
|
||||||
err = l.HandleEvent(rem)
|
err = l.HandleEvent(rem)
|
||||||
case reqenvelope.L:
|
case reqenvelope.L:
|
||||||
log.D.F("reqenvelope: %s", rem)
|
// log.D.F("reqenvelope: %s %s", remote, rem)
|
||||||
err = l.HandleReq(rem)
|
err = l.HandleReq(rem)
|
||||||
case closeenvelope.L:
|
case closeenvelope.L:
|
||||||
log.D.F("closeenvelope: %s", rem)
|
// log.D.F("closeenvelope: %s %s", remote, rem)
|
||||||
err = l.HandleClose(rem)
|
err = l.HandleClose(rem)
|
||||||
case authenvelope.L:
|
case authenvelope.L:
|
||||||
log.D.F("authenvelope: %s", rem)
|
// log.D.F("authenvelope: %s %s", remote, rem)
|
||||||
err = l.HandleAuth(rem)
|
err = l.HandleAuth(rem)
|
||||||
default:
|
default:
|
||||||
err = errorf.E("unknown envelope type %s\n%s", t, rem)
|
err = errorf.E("unknown envelope type %s\n%s", t, rem)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.D.C(
|
// log.D.C(
|
||||||
func() string {
|
// func() string {
|
||||||
return fmt.Sprintf(
|
// return fmt.Sprintf(
|
||||||
"notice->%s %s", remote, err,
|
// "notice->%s %s", remote, err,
|
||||||
)
|
// )
|
||||||
},
|
// },
|
||||||
)
|
// )
|
||||||
if err = noticeenvelope.NewFrom(err.Error()).Write(l); chk.E(err) {
|
if err = noticeenvelope.NewFrom(err.Error()).Write(l); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
|
"next.orly.dev/pkg/protocol/relayinfo"
|
||||||
"next.orly.dev/pkg/version"
|
"next.orly.dev/pkg/version"
|
||||||
"protocol.orly/relayinfo"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleRelayInfo generates and returns a relay information document in JSON
|
// HandleRelayInfo generates and returns a relay information document in JSON
|
||||||
@@ -33,32 +33,32 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
|||||||
relayinfo.BasicProtocol,
|
relayinfo.BasicProtocol,
|
||||||
// relayinfo.Authentication,
|
// relayinfo.Authentication,
|
||||||
// relayinfo.EncryptedDirectMessage,
|
// relayinfo.EncryptedDirectMessage,
|
||||||
// relayinfo.EventDeletion,
|
relayinfo.EventDeletion,
|
||||||
relayinfo.RelayInformationDocument,
|
relayinfo.RelayInformationDocument,
|
||||||
// relayinfo.GenericTagQueries,
|
// relayinfo.GenericTagQueries,
|
||||||
// relayinfo.NostrMarketplace,
|
// relayinfo.NostrMarketplace,
|
||||||
// relayinfo.EventTreatment,
|
relayinfo.EventTreatment,
|
||||||
// relayinfo.CommandResults,
|
// relayinfo.CommandResults,
|
||||||
// relayinfo.ParameterizedReplaceableEvents,
|
relayinfo.ParameterizedReplaceableEvents,
|
||||||
// relayinfo.ExpirationTimestamp,
|
// relayinfo.ExpirationTimestamp,
|
||||||
// relayinfo.ProtectedEvents,
|
relayinfo.ProtectedEvents,
|
||||||
// relayinfo.RelayListMetadata,
|
relayinfo.RelayListMetadata,
|
||||||
)
|
)
|
||||||
if s.Config.ACLMode != "none" {
|
if s.Config.ACLMode != "none" {
|
||||||
supportedNIPs = relayinfo.GetList(
|
supportedNIPs = relayinfo.GetList(
|
||||||
relayinfo.BasicProtocol,
|
relayinfo.BasicProtocol,
|
||||||
relayinfo.Authentication,
|
relayinfo.Authentication,
|
||||||
// relayinfo.EncryptedDirectMessage,
|
// relayinfo.EncryptedDirectMessage,
|
||||||
// relayinfo.EventDeletion,
|
relayinfo.EventDeletion,
|
||||||
relayinfo.RelayInformationDocument,
|
relayinfo.RelayInformationDocument,
|
||||||
// relayinfo.GenericTagQueries,
|
// relayinfo.GenericTagQueries,
|
||||||
// relayinfo.NostrMarketplace,
|
// relayinfo.NostrMarketplace,
|
||||||
// relayinfo.EventTreatment,
|
relayinfo.EventTreatment,
|
||||||
// relayinfo.CommandResults,
|
// relayinfo.CommandResults,
|
||||||
// relayinfo.ParameterizedReplaceableEvents,
|
// relayinfo.ParameterizedReplaceableEvents,
|
||||||
// relayinfo.ExpirationTimestamp,
|
// relayinfo.ExpirationTimestamp,
|
||||||
// relayinfo.ProtectedEvents,
|
relayinfo.ProtectedEvents,
|
||||||
// relayinfo.RelayListMetadata,
|
relayinfo.RelayListMetadata,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
sort.Sort(supportedNIPs)
|
sort.Sort(supportedNIPs)
|
||||||
|
|||||||
@@ -1,68 +1,51 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
acl "acl.orly"
|
|
||||||
"encoders.orly/envelopes/authenvelope"
|
|
||||||
"encoders.orly/envelopes/closedenvelope"
|
|
||||||
"encoders.orly/envelopes/eoseenvelope"
|
|
||||||
"encoders.orly/envelopes/eventenvelope"
|
|
||||||
"encoders.orly/envelopes/okenvelope"
|
|
||||||
"encoders.orly/envelopes/reqenvelope"
|
|
||||||
"encoders.orly/event"
|
|
||||||
"encoders.orly/filter"
|
|
||||||
"encoders.orly/reason"
|
|
||||||
"encoders.orly/tag"
|
|
||||||
"github.com/dgraph-io/badger/v4"
|
"github.com/dgraph-io/badger/v4"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
"utils.orly/normalize"
|
acl "next.orly.dev/pkg/acl"
|
||||||
"utils.orly/pointers"
|
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/closedenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/event"
|
||||||
|
"next.orly.dev/pkg/encoders/filter"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/encoders/kind"
|
||||||
|
"next.orly.dev/pkg/encoders/reason"
|
||||||
|
"next.orly.dev/pkg/encoders/tag"
|
||||||
|
utils "next.orly.dev/pkg/utils"
|
||||||
|
"next.orly.dev/pkg/utils/normalize"
|
||||||
|
"next.orly.dev/pkg/utils/pointers"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (l *Listener) HandleReq(msg []byte) (
|
func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||||
err error,
|
log.T.F("HandleReq: START processing from %s\n%s\n", l.remote, msg)
|
||||||
) {
|
|
||||||
var rem []byte
|
var rem []byte
|
||||||
env := reqenvelope.New()
|
env := reqenvelope.New()
|
||||||
if rem, err = env.Unmarshal(msg); chk.E(err) {
|
if rem, err = env.Unmarshal(msg); chk.E(err) {
|
||||||
return normalize.Error.Errorf(err.Error())
|
return normalize.Error.Errorf(err.Error())
|
||||||
}
|
}
|
||||||
if len(rem) > 0 {
|
if len(rem) > 0 {
|
||||||
log.I.F("extra '%s'", rem)
|
log.I.F("REQ extra bytes: '%s'", rem)
|
||||||
}
|
}
|
||||||
// // send a challenge to the client to auth if an ACL is active and not authed
|
|
||||||
// if acl.Registry.Active.Load() != "none" && l.authedPubkey.Load() == nil {
|
|
||||||
// log.D.F("sending challenge to %s", l.remote)
|
|
||||||
// if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
|
||||||
// Write(l); chk.E(err) {
|
|
||||||
// // return
|
|
||||||
// }
|
|
||||||
// log.D.F("sending CLOSED to %s", l.remote)
|
|
||||||
// if err = closedenvelope.NewFrom(
|
|
||||||
// env.Subscription, reason.AuthRequired.F("auth required for access"),
|
|
||||||
// ).Write(l); chk.E(err) {
|
|
||||||
// return
|
|
||||||
// }
|
|
||||||
// // ACL is enabled so return and wait for auth
|
|
||||||
// // return
|
|
||||||
// }
|
|
||||||
// send a challenge to the client to auth if an ACL is active
|
// send a challenge to the client to auth if an ACL is active
|
||||||
if acl.Registry.Active.Load() != "none" {
|
if acl.Registry.Active.Load() != "none" {
|
||||||
// log.D.F("sending CLOSED to %s", l.remote)
|
|
||||||
// if err = closedenvelope.NewFrom(
|
|
||||||
// env.Subscription, reason.AuthRequired.F("auth required for access"),
|
|
||||||
// ).Write(l); chk.E(err) {
|
|
||||||
// // return
|
|
||||||
// }
|
|
||||||
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
||||||
Write(l); chk.E(err) {
|
Write(l); chk.E(err) {
|
||||||
// return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// check permissions of user
|
// check permissions of user
|
||||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load())
|
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||||
switch accessLevel {
|
switch accessLevel {
|
||||||
case "none":
|
case "none":
|
||||||
if err = okenvelope.NewFrom(
|
if err = okenvelope.NewFrom(
|
||||||
@@ -78,23 +61,160 @@ func (l *Listener) HandleReq(msg []byte) (
|
|||||||
}
|
}
|
||||||
var events event.S
|
var events event.S
|
||||||
for _, f := range *env.Filters {
|
for _, f := range *env.Filters {
|
||||||
|
idsLen := 0
|
||||||
|
kindsLen := 0
|
||||||
|
authorsLen := 0
|
||||||
|
tagsLen := 0
|
||||||
|
if f != nil {
|
||||||
|
if f.Ids != nil {
|
||||||
|
idsLen = f.Ids.Len()
|
||||||
|
}
|
||||||
|
if f.Kinds != nil {
|
||||||
|
kindsLen = f.Kinds.Len()
|
||||||
|
}
|
||||||
|
if f.Authors != nil {
|
||||||
|
authorsLen = f.Authors.Len()
|
||||||
|
}
|
||||||
|
if f.Tags != nil {
|
||||||
|
tagsLen = f.Tags.Len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.T.F(
|
||||||
|
"REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d",
|
||||||
|
env.Subscription, idsLen, kindsLen, authorsLen, tagsLen,
|
||||||
|
)
|
||||||
|
if f != nil && f.Authors != nil && f.Authors.Len() > 0 {
|
||||||
|
var authors []string
|
||||||
|
for _, a := range f.Authors.T {
|
||||||
|
authors = append(authors, hex.Enc(a))
|
||||||
|
}
|
||||||
|
log.T.F("REQ %s: authors=%v", env.Subscription, authors)
|
||||||
|
}
|
||||||
|
if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 {
|
||||||
|
log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16())
|
||||||
|
}
|
||||||
|
if f != nil && f.Ids != nil && f.Ids.Len() > 0 {
|
||||||
|
var ids []string
|
||||||
|
for _, id := range f.Ids.T {
|
||||||
|
ids = append(ids, hex.Enc(id))
|
||||||
|
}
|
||||||
|
var lim any
|
||||||
|
if pointers.Present(f.Limit) {
|
||||||
|
lim = *f.Limit
|
||||||
|
} else {
|
||||||
|
lim = nil
|
||||||
|
}
|
||||||
|
log.T.F(
|
||||||
|
"REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription,
|
||||||
|
f.Ids.Len(), ids, lim,
|
||||||
|
)
|
||||||
|
}
|
||||||
if pointers.Present(f.Limit) {
|
if pointers.Present(f.Limit) {
|
||||||
if *f.Limit == 0 {
|
if *f.Limit == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if events, err = l.QueryEvents(l.Ctx, f); chk.E(err) {
|
// Use a separate context for QueryEvents to prevent cancellation issues
|
||||||
|
queryCtx, cancel := context.WithTimeout(
|
||||||
|
context.Background(), 30*time.Second,
|
||||||
|
)
|
||||||
|
defer cancel()
|
||||||
|
log.T.F(
|
||||||
|
"HandleReq: About to QueryEvents for %s, main context done: %v",
|
||||||
|
l.remote, l.ctx.Err() != nil,
|
||||||
|
)
|
||||||
|
if events, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||||
if errors.Is(err, badger.ErrDBClosed) {
|
if errors.Is(err, badger.ErrDBClosed) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
log.T.F("HandleReq: QueryEvents error for %s: %v", l.remote, err)
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
for _, ev := range events {
|
||||||
|
ev.Free()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
log.T.F(
|
||||||
|
"HandleReq: QueryEvents completed for %s, found %d events",
|
||||||
|
l.remote, len(events),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
// write out the events to the socket
|
var tmp event.S
|
||||||
|
privCheck:
|
||||||
|
for _, ev := range events {
|
||||||
|
if kind.IsPrivileged(ev.Kind) &&
|
||||||
|
accessLevel != "admin" { // admins can see all events
|
||||||
|
log.T.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"checking privileged event %0x", ev.ID,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
pk := l.authedPubkey.Load()
|
||||||
|
if pk == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if utils.FastEqual(ev.Pubkey, pk) {
|
||||||
|
log.T.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"privileged event %s is for logged in pubkey %0x",
|
||||||
|
ev.ID, pk,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tmp = append(tmp, ev)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pTags := ev.Tags.GetAll([]byte("p"))
|
||||||
|
for _, pTag := range pTags {
|
||||||
|
var pt []byte
|
||||||
|
if pt, err = hex.Dec(string(pTag.Value())); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if utils.FastEqual(pt, pk) {
|
||||||
|
log.T.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"privileged event %s is for logged in pubkey %0x",
|
||||||
|
ev.ID, pk,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
tmp = append(tmp, ev)
|
||||||
|
continue privCheck
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.T.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"privileged event %s does not contain the logged in pubkey %0x",
|
||||||
|
ev.ID, pk,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
tmp = append(tmp, ev)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
events = tmp
|
||||||
seen := make(map[string]struct{})
|
seen := make(map[string]struct{})
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
// track the IDs we've sent
|
log.D.C(
|
||||||
seen[string(ev.ID)] = struct{}{}
|
func() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||||
|
hex.Enc(ev.ID), ev.Kind,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
log.T.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||||
|
},
|
||||||
|
)
|
||||||
var res *eventenvelope.Result
|
var res *eventenvelope.Result
|
||||||
if res, err = eventenvelope.NewResultWith(
|
if res, err = eventenvelope.NewResultWith(
|
||||||
env.Subscription, ev,
|
env.Subscription, ev,
|
||||||
@@ -104,6 +224,8 @@ func (l *Listener) HandleReq(msg []byte) (
|
|||||||
if err = res.Write(l); chk.E(err) {
|
if err = res.Write(l); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// track the IDs we've sent (use hex encoding for stable key)
|
||||||
|
seen[hex.Enc(ev.ID)] = struct{}{}
|
||||||
}
|
}
|
||||||
// write the EOSE to signal to the client that all events found have been
|
// write the EOSE to signal to the client that all events found have been
|
||||||
// sent.
|
// sent.
|
||||||
@@ -115,6 +237,10 @@ func (l *Listener) HandleReq(msg []byte) (
|
|||||||
// if the query was for just Ids, we know there can't be any more results,
|
// if the query was for just Ids, we know there can't be any more results,
|
||||||
// so cancel the subscription.
|
// so cancel the subscription.
|
||||||
cancel := true
|
cancel := true
|
||||||
|
log.T.F(
|
||||||
|
"REQ %s: computing cancel/subscription; events_sent=%d",
|
||||||
|
env.Subscription, len(events),
|
||||||
|
)
|
||||||
var subbedFilters filter.S
|
var subbedFilters filter.S
|
||||||
for _, f := range *env.Filters {
|
for _, f := range *env.Filters {
|
||||||
if f.Ids.Len() < 1 {
|
if f.Ids.Len() < 1 {
|
||||||
@@ -123,12 +249,16 @@ func (l *Listener) HandleReq(msg []byte) (
|
|||||||
} else {
|
} else {
|
||||||
// remove the IDs that we already sent
|
// remove the IDs that we already sent
|
||||||
var notFounds [][]byte
|
var notFounds [][]byte
|
||||||
for _, ev := range events {
|
for _, id := range f.Ids.T {
|
||||||
if _, ok := seen[string(ev.ID)]; ok {
|
if _, ok := seen[hex.Enc(id)]; ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
notFounds = append(notFounds, ev.ID)
|
notFounds = append(notFounds, id)
|
||||||
}
|
}
|
||||||
|
log.T.F(
|
||||||
|
"REQ %s: ids outstanding=%d of %d", env.Subscription,
|
||||||
|
len(notFounds), f.Ids.Len(),
|
||||||
|
)
|
||||||
// if all were found, don't add to subbedFilters
|
// if all were found, don't add to subbedFilters
|
||||||
if len(notFounds) == 0 {
|
if len(notFounds) == 0 {
|
||||||
continue
|
continue
|
||||||
@@ -150,11 +280,12 @@ func (l *Listener) HandleReq(msg []byte) (
|
|||||||
if !cancel {
|
if !cancel {
|
||||||
l.publishers.Receive(
|
l.publishers.Receive(
|
||||||
&W{
|
&W{
|
||||||
Conn: l.conn,
|
Conn: l.conn,
|
||||||
remote: l.remote,
|
remote: l.remote,
|
||||||
Id: string(env.Subscription),
|
Id: string(env.Subscription),
|
||||||
Receiver: receiver,
|
Receiver: receiver,
|
||||||
Filters: env.Filters,
|
Filters: env.Filters,
|
||||||
|
AuthedPubkey: l.authedPubkey.Load(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
@@ -164,5 +295,6 @@ func (l *Listener) HandleReq(msg []byte) (
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
log.T.F("HandleReq: COMPLETED processing from %s", l.remote)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,17 +7,20 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"github.com/coder/websocket"
|
"github.com/coder/websocket"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
"utils.orly/units"
|
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/utils/units"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultWriteWait = 10 * time.Second
|
DefaultWriteWait = 10 * time.Second
|
||||||
DefaultPongWait = 60 * time.Second
|
DefaultPongWait = 60 * time.Second
|
||||||
DefaultPingWait = DefaultPongWait / 2
|
DefaultPingWait = DefaultPongWait / 2
|
||||||
|
DefaultReadTimeout = 3 * time.Second // Read timeout to detect stalled connections
|
||||||
|
DefaultWriteTimeout = 3 * time.Second
|
||||||
DefaultMaxMessageSize = 1 * units.Mb
|
DefaultMaxMessageSize = 1 * units.Mb
|
||||||
|
|
||||||
// CloseMessage denotes a close control message. The optional message
|
// CloseMessage denotes a close control message. The optional message
|
||||||
@@ -70,10 +73,18 @@ whitelist:
|
|||||||
chal := make([]byte, 32)
|
chal := make([]byte, 32)
|
||||||
rand.Read(chal)
|
rand.Read(chal)
|
||||||
listener.challenge.Store([]byte(hex.Enc(chal)))
|
listener.challenge.Store([]byte(hex.Enc(chal)))
|
||||||
|
// If admins are configured, immediately prompt client to AUTH (NIP-42)
|
||||||
|
if len(s.Config.Admins) > 0 {
|
||||||
|
// log.D.F("sending initial AUTH challenge to %s", remote)
|
||||||
|
if err = authenvelope.NewChallengeWith(listener.challenge.Load()).
|
||||||
|
Write(listener); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
ticker := time.NewTicker(DefaultPingWait)
|
ticker := time.NewTicker(DefaultPingWait)
|
||||||
go s.Pinger(ctx, conn, ticker, cancel)
|
go s.Pinger(ctx, conn, ticker, cancel)
|
||||||
defer func() {
|
defer func() {
|
||||||
log.D.F("closing websocket connection from %s", remote)
|
// log.D.F("closing websocket connection from %s", remote)
|
||||||
cancel()
|
cancel()
|
||||||
ticker.Stop()
|
ticker.Stop()
|
||||||
listener.publishers.Receive(&W{Cancel: true})
|
listener.publishers.Receive(&W{Cancel: true})
|
||||||
@@ -87,12 +98,33 @@ whitelist:
|
|||||||
var typ websocket.MessageType
|
var typ websocket.MessageType
|
||||||
var msg []byte
|
var msg []byte
|
||||||
log.T.F("waiting for message from %s", remote)
|
log.T.F("waiting for message from %s", remote)
|
||||||
if typ, msg, err = conn.Read(ctx); chk.E(err) {
|
|
||||||
|
// Create a read context with timeout to prevent indefinite blocking
|
||||||
|
readCtx, readCancel := context.WithTimeout(ctx, DefaultReadTimeout)
|
||||||
|
typ, msg, err = conn.Read(readCtx)
|
||||||
|
readCancel()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
if strings.Contains(
|
if strings.Contains(
|
||||||
err.Error(), "use of closed network connection",
|
err.Error(), "use of closed network connection",
|
||||||
) {
|
) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// Handle timeout errors - occurs when client becomes unresponsive
|
||||||
|
if strings.Contains(err.Error(), "context deadline exceeded") {
|
||||||
|
log.T.F(
|
||||||
|
"connection from %s timed out after %v", remote,
|
||||||
|
DefaultReadTimeout,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Handle EOF errors gracefully - these occur when client closes connection
|
||||||
|
// or sends incomplete/malformed WebSocket frames
|
||||||
|
if strings.Contains(err.Error(), "EOF") ||
|
||||||
|
strings.Contains(err.Error(), "failed to read frame header") {
|
||||||
|
log.T.F("connection from %s closed: %v", remote, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
status := websocket.CloseStatus(err)
|
status := websocket.CloseStatus(err)
|
||||||
switch status {
|
switch status {
|
||||||
case websocket.StatusNormalClosure,
|
case websocket.StatusNormalClosure,
|
||||||
@@ -100,17 +132,27 @@ whitelist:
|
|||||||
websocket.StatusNoStatusRcvd,
|
websocket.StatusNoStatusRcvd,
|
||||||
websocket.StatusAbnormalClosure,
|
websocket.StatusAbnormalClosure,
|
||||||
websocket.StatusProtocolError:
|
websocket.StatusProtocolError:
|
||||||
|
log.T.F(
|
||||||
|
"connection from %s closed with status: %v", remote, status,
|
||||||
|
)
|
||||||
default:
|
default:
|
||||||
log.E.F("unexpected close error from %s: %v", remote, err)
|
log.E.F("unexpected close error from %s: %v", remote, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if typ == PingMessage {
|
if typ == PingMessage {
|
||||||
if err = conn.Write(ctx, PongMessage, msg); chk.E(err) {
|
// Create a write context with timeout for pong response
|
||||||
|
writeCtx, writeCancel := context.WithTimeout(
|
||||||
|
ctx, DefaultWriteTimeout,
|
||||||
|
)
|
||||||
|
if err = conn.Write(writeCtx, PongMessage, msg); chk.E(err) {
|
||||||
|
writeCancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
writeCancel()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
log.T.F("received message from %s: %s", remote, string(msg))
|
||||||
go listener.HandleMessage(msg, remote)
|
go listener.HandleMessage(msg, remote)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -127,9 +169,13 @@ func (s *Server) Pinger(
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
if err = conn.Ping(ctx); chk.E(err) {
|
// Create a write context with timeout for ping operation
|
||||||
|
pingCtx, pingCancel := context.WithTimeout(ctx, DefaultWriteTimeout)
|
||||||
|
if err = conn.Ping(pingCtx); chk.E(err) {
|
||||||
|
pingCancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pingCancel()
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
|
|
||||||
"github.com/coder/websocket"
|
"github.com/coder/websocket"
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"utils.orly/atomic"
|
"next.orly.dev/pkg/utils/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Listener struct {
|
type Listener struct {
|
||||||
@@ -19,8 +19,21 @@ type Listener struct {
|
|||||||
authedPubkey atomic.Bytes
|
authedPubkey atomic.Bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ctx returns the listener's context, but creates a new context for each operation
|
||||||
|
// to prevent cancellation from affecting subsequent operations
|
||||||
|
func (l *Listener) Ctx() context.Context {
|
||||||
|
return l.ctx
|
||||||
|
}
|
||||||
|
|
||||||
func (l *Listener) Write(p []byte) (n int, err error) {
|
func (l *Listener) Write(p []byte) (n int, err error) {
|
||||||
if err = l.conn.Write(l.ctx, websocket.MessageText, p); chk.E(err) {
|
// Use a separate context with timeout for writes to prevent race conditions
|
||||||
|
// where the main connection context gets cancelled while writing events
|
||||||
|
writeCtx, cancel := context.WithTimeout(
|
||||||
|
context.Background(), DefaultWriteTimeout,
|
||||||
|
)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err = l.conn.Write(writeCtx, websocket.MessageText, p); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n = len(p)
|
n = len(p)
|
||||||
|
|||||||
10
app/main.go
10
app/main.go
@@ -5,12 +5,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
database "database.orly"
|
|
||||||
"encoders.orly/bech32encoding"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
"next.orly.dev/app/config"
|
"next.orly.dev/app/config"
|
||||||
"protocol.orly/publish"
|
database "next.orly.dev/pkg/database"
|
||||||
|
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||||
|
"next.orly.dev/pkg/protocol/publish"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Run(
|
func Run(
|
||||||
@@ -28,13 +28,15 @@ func Run(
|
|||||||
var err error
|
var err error
|
||||||
var adminKeys [][]byte
|
var adminKeys [][]byte
|
||||||
for _, admin := range cfg.Admins {
|
for _, admin := range cfg.Admins {
|
||||||
|
if len(admin) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
var pk []byte
|
var pk []byte
|
||||||
if pk, err = bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(err) {
|
if pk, err = bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
adminKeys = append(adminKeys, pk)
|
adminKeys = append(adminKeys, pk)
|
||||||
}
|
}
|
||||||
|
|
||||||
// start listener
|
// start listener
|
||||||
l := &Server{
|
l := &Server{
|
||||||
Ctx: ctx,
|
Ctx: ctx,
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
package app
|
package app
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoders.orly/envelopes/eventenvelope"
|
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||||
"encoders.orly/envelopes/okenvelope"
|
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||||
"encoders.orly/reason"
|
"next.orly.dev/pkg/encoders/reason"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OK represents a function that processes events or operations, using provided
|
// OK represents a function that processes events or operations, using provided
|
||||||
|
|||||||
155
app/publisher.go
155
app/publisher.go
@@ -5,20 +5,24 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"encoders.orly/envelopes/eventenvelope"
|
|
||||||
"encoders.orly/event"
|
|
||||||
"encoders.orly/filter"
|
|
||||||
"github.com/coder/websocket"
|
"github.com/coder/websocket"
|
||||||
"interfaces.orly/publisher"
|
|
||||||
"interfaces.orly/typer"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/event"
|
||||||
|
"next.orly.dev/pkg/encoders/filter"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/encoders/kind"
|
||||||
|
"next.orly.dev/pkg/interfaces/publisher"
|
||||||
|
"next.orly.dev/pkg/interfaces/typer"
|
||||||
|
utils "next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
const Type = "socketapi"
|
const Type = "socketapi"
|
||||||
|
|
||||||
type Subscription struct {
|
type Subscription struct {
|
||||||
remote string
|
remote string
|
||||||
|
AuthedPubkey []byte
|
||||||
*filter.S
|
*filter.S
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -46,6 +50,9 @@ type W struct {
|
|||||||
// associated with this WebSocket connection. It is used to determine which
|
// associated with this WebSocket connection. It is used to determine which
|
||||||
// notifications or data should be received by the subscriber.
|
// notifications or data should be received by the subscriber.
|
||||||
Filters *filter.S
|
Filters *filter.S
|
||||||
|
|
||||||
|
// AuthedPubkey is the authenticated pubkey associated with the listener (if any).
|
||||||
|
AuthedPubkey []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *W) Type() (typeName string) { return Type }
|
func (w *W) Type() (typeName string) { return Type }
|
||||||
@@ -56,7 +63,7 @@ func (w *W) Type() (typeName string) { return Type }
|
|||||||
type P struct {
|
type P struct {
|
||||||
c context.Context
|
c context.Context
|
||||||
// Mx is the mutex for the Map.
|
// Mx is the mutex for the Map.
|
||||||
Mx sync.Mutex
|
Mx sync.RWMutex
|
||||||
// Map is the map of subscribers and subscriptions from the websocket api.
|
// Map is the map of subscribers and subscriptions from the websocket api.
|
||||||
Map
|
Map
|
||||||
}
|
}
|
||||||
@@ -112,7 +119,9 @@ func (p *P) Receive(msg typer.T) {
|
|||||||
defer p.Mx.Unlock()
|
defer p.Mx.Unlock()
|
||||||
if subs, ok := p.Map[m.Conn]; !ok {
|
if subs, ok := p.Map[m.Conn]; !ok {
|
||||||
subs = make(map[string]Subscription)
|
subs = make(map[string]Subscription)
|
||||||
subs[m.Id] = Subscription{S: m.Filters, remote: m.remote}
|
subs[m.Id] = Subscription{
|
||||||
|
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
||||||
|
}
|
||||||
p.Map[m.Conn] = subs
|
p.Map[m.Conn] = subs
|
||||||
log.D.C(
|
log.D.C(
|
||||||
func() string {
|
func() string {
|
||||||
@@ -124,7 +133,9 @@ func (p *P) Receive(msg typer.T) {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
subs[m.Id] = Subscription{S: m.Filters, remote: m.remote}
|
subs[m.Id] = Subscription{
|
||||||
|
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
||||||
|
}
|
||||||
log.D.C(
|
log.D.C(
|
||||||
func() string {
|
func() string {
|
||||||
return fmt.Sprintf(
|
return fmt.Sprintf(
|
||||||
@@ -150,71 +161,111 @@ func (p *P) Receive(msg typer.T) {
|
|||||||
// for unauthenticated users when events are privileged.
|
// for unauthenticated users when events are privileged.
|
||||||
func (p *P) Deliver(ev *event.E) {
|
func (p *P) Deliver(ev *event.E) {
|
||||||
var err error
|
var err error
|
||||||
p.Mx.Lock()
|
// Snapshot the deliveries under read lock to avoid holding locks during I/O
|
||||||
defer p.Mx.Unlock()
|
p.Mx.RLock()
|
||||||
log.D.C(
|
type delivery struct {
|
||||||
func() string {
|
w *websocket.Conn
|
||||||
return fmt.Sprintf(
|
id string
|
||||||
"delivering event %0x to websocket subscribers %d", ev.ID,
|
sub Subscription
|
||||||
len(p.Map),
|
}
|
||||||
)
|
var deliveries []delivery
|
||||||
},
|
|
||||||
)
|
|
||||||
for w, subs := range p.Map {
|
for w, subs := range p.Map {
|
||||||
for id, subscriber := range subs {
|
for id, subscriber := range subs {
|
||||||
if !subscriber.Match(ev) {
|
if subscriber.Match(ev) {
|
||||||
continue
|
deliveries = append(
|
||||||
|
deliveries, delivery{w: w, id: id, sub: subscriber},
|
||||||
|
)
|
||||||
}
|
}
|
||||||
// if p.Server.AuthRequired() {
|
|
||||||
// if !auth.CheckPrivilege(w.AuthedPubkey(), ev) {
|
|
||||||
// continue
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
var res *eventenvelope.Result
|
|
||||||
if res, err = eventenvelope.NewResultWith(id, ev); chk.E(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err = w.Write(
|
|
||||||
p.c, websocket.MessageText, res.Marshal(nil),
|
|
||||||
); chk.E(err) {
|
|
||||||
p.removeSubscriber(w)
|
|
||||||
if err = w.CloseNow(); chk.E(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
log.D.C(
|
|
||||||
func() string {
|
|
||||||
return fmt.Sprintf(
|
|
||||||
"dispatched event %0x to subscription %s, %s",
|
|
||||||
ev.ID, id, subscriber.remote,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
p.Mx.RUnlock()
|
||||||
|
if len(deliveries) > 0 {
|
||||||
|
log.D.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"delivering event %0x to websocket subscribers %d", ev.ID,
|
||||||
|
len(deliveries),
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
for _, d := range deliveries {
|
||||||
|
// If the event is privileged, enforce that the subscriber's authed pubkey matches
|
||||||
|
// either the event pubkey or appears in any 'p' tag of the event.
|
||||||
|
if kind.IsPrivileged(ev.Kind) && len(d.sub.AuthedPubkey) > 0 {
|
||||||
|
pk := d.sub.AuthedPubkey
|
||||||
|
allowed := false
|
||||||
|
// Direct author match
|
||||||
|
if utils.FastEqual(ev.Pubkey, pk) {
|
||||||
|
allowed = true
|
||||||
|
} else if ev.Tags != nil {
|
||||||
|
for _, pTag := range ev.Tags.GetAll([]byte("p")) {
|
||||||
|
// pTag.Value() returns []byte hex string; decode to bytes
|
||||||
|
dec, derr := hex.Dec(string(pTag.Value()))
|
||||||
|
if derr != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if utils.FastEqual(dec, pk) {
|
||||||
|
allowed = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !allowed {
|
||||||
|
// Skip delivery for this subscriber
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var res *eventenvelope.Result
|
||||||
|
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Use a separate context with timeout for writes to prevent race conditions
|
||||||
|
// where the publisher context gets cancelled while writing events
|
||||||
|
writeCtx, cancel := context.WithTimeout(
|
||||||
|
context.Background(), DefaultWriteTimeout,
|
||||||
|
)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err = d.w.Write(
|
||||||
|
writeCtx, websocket.MessageText, res.Marshal(nil),
|
||||||
|
); chk.E(err) {
|
||||||
|
// On error, remove the subscriber connection safely
|
||||||
|
p.removeSubscriber(d.w)
|
||||||
|
_ = d.w.CloseNow()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.D.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"dispatched event %0x to subscription %s, %s",
|
||||||
|
ev.ID, d.id, d.sub.remote,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeSubscriberId removes a specific subscription from a subscriber
|
// removeSubscriberId removes a specific subscription from a subscriber
|
||||||
// websocket.
|
// websocket.
|
||||||
func (p *P) removeSubscriberId(ws *websocket.Conn, id string) {
|
func (p *P) removeSubscriberId(ws *websocket.Conn, id string) {
|
||||||
p.Mx.Lock()
|
p.Mx.Lock()
|
||||||
|
defer p.Mx.Unlock()
|
||||||
var subs map[string]Subscription
|
var subs map[string]Subscription
|
||||||
var ok bool
|
var ok bool
|
||||||
if subs, ok = p.Map[ws]; ok {
|
if subs, ok = p.Map[ws]; ok {
|
||||||
delete(p.Map[ws], id)
|
delete(subs, id)
|
||||||
_ = subs
|
// Check the actual map after deletion, not the original reference
|
||||||
if len(subs) == 0 {
|
if len(p.Map[ws]) == 0 {
|
||||||
delete(p.Map, ws)
|
delete(p.Map, ws)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
p.Mx.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeSubscriber removes a websocket from the P collection.
|
// removeSubscriber removes a websocket from the P collection.
|
||||||
func (p *P) removeSubscriber(ws *websocket.Conn) {
|
func (p *P) removeSubscriber(ws *websocket.Conn) {
|
||||||
p.Mx.Lock()
|
p.Mx.Lock()
|
||||||
|
defer p.Mx.Unlock()
|
||||||
clear(p.Map[ws])
|
clear(p.Map[ws])
|
||||||
delete(p.Map, ws)
|
delete(p.Map, ws)
|
||||||
p.Mx.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,16 +2,14 @@ package app
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"database.orly"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
|
||||||
"next.orly.dev/app/config"
|
"next.orly.dev/app/config"
|
||||||
"protocol.orly/publish"
|
"next.orly.dev/pkg/database"
|
||||||
|
"next.orly.dev/pkg/protocol/publish"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Server struct {
|
type Server struct {
|
||||||
@@ -25,11 +23,11 @@ type Server struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
log.T.C(
|
// log.T.C(
|
||||||
func() string {
|
// func() string {
|
||||||
return fmt.Sprintf("path %v header %v", r.URL, r.Header)
|
// return fmt.Sprintf("path %v header %v", r.URL, r.Header)
|
||||||
},
|
// },
|
||||||
)
|
// )
|
||||||
if r.Header.Get("Upgrade") == "websocket" {
|
if r.Header.Get("Upgrade") == "websocket" {
|
||||||
s.HandleWebsocket(w, r)
|
s.HandleWebsocket(w, r)
|
||||||
} else if r.Header.Get("Accept") == "application/nostr+json" {
|
} else if r.Header.Get("Accept") == "application/nostr+json" {
|
||||||
|
|||||||
46
cmd/benchmark/Dockerfile.benchmark
Normal file
46
cmd/benchmark/Dockerfile.benchmark
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# Dockerfile for benchmark runner
|
||||||
|
FROM golang:1.25-alpine AS builder
|
||||||
|
|
||||||
|
# Install build dependencies
|
||||||
|
RUN apk add --no-cache git ca-certificates
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
# Copy go modules
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# Copy source code
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the benchmark tool
|
||||||
|
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o benchmark cmd/benchmark/main.go
|
||||||
|
|
||||||
|
# Final stage
|
||||||
|
FROM alpine:latest
|
||||||
|
|
||||||
|
# Install runtime dependencies
|
||||||
|
RUN apk --no-cache add ca-certificates curl wget
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy benchmark binary
|
||||||
|
COPY --from=builder /build/benchmark /app/benchmark
|
||||||
|
|
||||||
|
# Copy benchmark runner script
|
||||||
|
COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||||
|
|
||||||
|
# Make scripts executable
|
||||||
|
RUN chmod +x /app/benchmark-runner
|
||||||
|
|
||||||
|
# Create reports directory
|
||||||
|
RUN mkdir -p /reports
|
||||||
|
|
||||||
|
# Environment variables
|
||||||
|
ENV BENCHMARK_EVENTS=10000
|
||||||
|
ENV BENCHMARK_WORKERS=8
|
||||||
|
ENV BENCHMARK_DURATION=60s
|
||||||
|
|
||||||
|
# Run the benchmark runner
|
||||||
|
CMD ["/app/benchmark-runner"]
|
||||||
23
cmd/benchmark/Dockerfile.khatru-badger
Normal file
23
cmd/benchmark/Dockerfile.khatru-badger
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
FROM golang:1.25-alpine AS builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the basic-badger example
|
||||||
|
RUN cd examples/basic-badger && \
|
||||||
|
go mod tidy && \
|
||||||
|
CGO_ENABLED=0 go build -o khatru-badger .
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
RUN apk --no-cache add ca-certificates wget
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV DATABASE_PATH=/data/badger
|
||||||
|
ENV PORT=8080
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||||
|
CMD ["/app/khatru-badger"]
|
||||||
23
cmd/benchmark/Dockerfile.khatru-sqlite
Normal file
23
cmd/benchmark/Dockerfile.khatru-sqlite
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
FROM golang:1.25-alpine AS builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the basic-sqlite example
|
||||||
|
RUN cd examples/basic-sqlite3 && \
|
||||||
|
go mod tidy && \
|
||||||
|
CGO_ENABLED=1 go build -o khatru-sqlite .
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/examples/basic-sqlite3/khatru-sqlite /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV DATABASE_PATH=/data/khatru.db
|
||||||
|
ENV PORT=8080
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||||
|
CMD ["/app/khatru-sqlite"]
|
||||||
80
cmd/benchmark/Dockerfile.next-orly
Normal file
80
cmd/benchmark/Dockerfile.next-orly
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
# Dockerfile for next.orly.dev relay
|
||||||
|
FROM ubuntu:22.04 as builder
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
ARG GOLANG_VERSION=1.25.1
|
||||||
|
|
||||||
|
# Update package list and install dependencies
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y wget ca-certificates && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Download Go binary
|
||||||
|
RUN wget https://go.dev/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz && \
|
||||||
|
rm -rf /usr/local/go && \
|
||||||
|
tar -C /usr/local -xzf go${GOLANG_VERSION}.linux-amd64.tar.gz && \
|
||||||
|
rm go${GOLANG_VERSION}.linux-amd64.tar.gz
|
||||||
|
|
||||||
|
# Set PATH environment variable
|
||||||
|
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||||
|
|
||||||
|
# Verify installation
|
||||||
|
RUN go version
|
||||||
|
|
||||||
|
RUN apt update && \
|
||||||
|
apt -y install build-essential autoconf libtool git wget
|
||||||
|
RUN cd /tmp && \
|
||||||
|
rm -rf secp256k1 && \
|
||||||
|
git clone https://github.com/bitcoin-core/secp256k1.git && \
|
||||||
|
cd secp256k1 && \
|
||||||
|
git checkout v0.6.0 && \
|
||||||
|
git submodule init && \
|
||||||
|
git submodule update && \
|
||||||
|
./autogen.sh && \
|
||||||
|
./configure --enable-module-schnorrsig --enable-module-ecdh --prefix=/usr && \
|
||||||
|
make -j1 && \
|
||||||
|
make install
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /build
|
||||||
|
|
||||||
|
# Copy go modules
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# Copy source code
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the relay
|
||||||
|
RUN CGO_ENABLED=1 GOOS=linux go build -o relay .
|
||||||
|
|
||||||
|
# Final stage
|
||||||
|
FROM ubuntu:22.04
|
||||||
|
|
||||||
|
# Install runtime dependencies
|
||||||
|
RUN apt-get update && apt-get install -y ca-certificates curl libsecp256k1-0 libsecp256k1-dev && rm -rf /var/lib/apt/lists/* && \
|
||||||
|
ln -sf /usr/lib/x86_64-linux-gnu/libsecp256k1.so.0 /usr/lib/x86_64-linux-gnu/libsecp256k1.so.5
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy binary from builder
|
||||||
|
COPY --from=builder /build/relay /app/relay
|
||||||
|
|
||||||
|
# Create data directory
|
||||||
|
RUN mkdir -p /data
|
||||||
|
|
||||||
|
# Expose port
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
# Set environment variables
|
||||||
|
ENV DATA_DIR=/data
|
||||||
|
ENV LISTEN=0.0.0.0
|
||||||
|
ENV PORT=8080
|
||||||
|
ENV LOG_LEVEL=info
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:8080 || exit 1
|
||||||
|
|
||||||
|
# Run the relay
|
||||||
|
CMD ["/app/relay"]
|
||||||
33
cmd/benchmark/Dockerfile.nostr-rs-relay
Normal file
33
cmd/benchmark/Dockerfile.nostr-rs-relay
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
FROM ubuntu:22.04 AS builder
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
curl \
|
||||||
|
build-essential \
|
||||||
|
libsqlite3-dev \
|
||||||
|
pkg-config \
|
||||||
|
protobuf-compiler \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install Rust
|
||||||
|
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||||
|
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the relay
|
||||||
|
RUN cargo build --release
|
||||||
|
|
||||||
|
FROM ubuntu:22.04
|
||||||
|
RUN apt-get update && apt-get install -y ca-certificates sqlite3 wget && rm -rf /var/lib/apt/lists/*
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/target/release/nostr-rs-relay /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV RUST_LOG=info
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||||
|
|
||||||
|
CMD ["/app/nostr-rs-relay"]
|
||||||
23
cmd/benchmark/Dockerfile.relayer-basic
Normal file
23
cmd/benchmark/Dockerfile.relayer-basic
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
FROM golang:1.25-alpine AS builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the basic example
|
||||||
|
RUN cd examples/basic && \
|
||||||
|
go mod tidy && \
|
||||||
|
CGO_ENABLED=1 go build -o relayer-basic .
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/examples/basic/relayer-basic /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV DATABASE_PATH=/data/relayer.db
|
||||||
|
ENV PORT=8080
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||||
|
CMD ["/app/relayer-basic"]
|
||||||
54
cmd/benchmark/Dockerfile.strfry
Normal file
54
cmd/benchmark/Dockerfile.strfry
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
FROM ubuntu:22.04 AS builder
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Install build dependencies
|
||||||
|
RUN apt-get update && apt-get install -y git g++ make libssl-dev zlib1g-dev liblmdb-dev libflatbuffers-dev libsecp256k1-dev libzstd-dev \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Initialize git submodules
|
||||||
|
RUN git submodule update --init --recursive
|
||||||
|
|
||||||
|
# Build strfry
|
||||||
|
RUN make setup-golpe && \
|
||||||
|
make -j$(nproc)
|
||||||
|
|
||||||
|
FROM ubuntu:22.04
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
liblmdb0 \
|
||||||
|
libsecp256k1-0 \
|
||||||
|
curl \
|
||||||
|
bash \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/strfry /app/
|
||||||
|
COPY --from=builder /build/strfry.conf /app/
|
||||||
|
|
||||||
|
# Create the data directory placeholder (may be masked by volume at runtime)
|
||||||
|
RUN mkdir -p /data && \
|
||||||
|
chmod 755 /data
|
||||||
|
|
||||||
|
# Update strfry.conf to bind to all interfaces and use port 8080
|
||||||
|
RUN sed -i 's/bind = "127.0.0.1"/bind = "0.0.0.0"/' /app/strfry.conf && \
|
||||||
|
sed -i 's/port = 7777/port = 8080/' /app/strfry.conf
|
||||||
|
|
||||||
|
# Entrypoint ensures the LMDB directory exists inside the mounted volume before starting
|
||||||
|
ENV STRFRY_DB_PATH=/data/strfry.lmdb
|
||||||
|
RUN echo '#!/usr/bin/env bash' > /entrypoint.sh && \
|
||||||
|
echo 'set -euo pipefail' >> /entrypoint.sh && \
|
||||||
|
echo 'DB_PATH="${STRFRY_DB_PATH:-/data/strfry.lmdb}"' >> /entrypoint.sh && \
|
||||||
|
echo 'mkdir -p "$DB_PATH"' >> /entrypoint.sh && \
|
||||||
|
echo 'chown -R root:root "$(dirname "$DB_PATH")"' >> /entrypoint.sh && \
|
||||||
|
echo 'exec /app/strfry relay' >> /entrypoint.sh && \
|
||||||
|
chmod +x /entrypoint.sh
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:8080 || exit 1
|
||||||
|
|
||||||
|
ENTRYPOINT ["/entrypoint.sh"]
|
||||||
260
cmd/benchmark/README.md
Normal file
260
cmd/benchmark/README.md
Normal file
@@ -0,0 +1,260 @@
|
|||||||
|
# Nostr Relay Benchmark Suite
|
||||||
|
|
||||||
|
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including:
|
||||||
|
|
||||||
|
- **next.orly.dev** (this repository) - BadgerDB-based relay
|
||||||
|
- **Khatru** - SQLite and Badger variants
|
||||||
|
- **Relayer** - Basic example implementation
|
||||||
|
- **Strfry** - C++ LMDB-based relay
|
||||||
|
- **nostr-rs-relay** - Rust-based relay with SQLite
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
### Benchmark Tests
|
||||||
|
|
||||||
|
1. **Peak Throughput Test**
|
||||||
|
- Tests maximum event ingestion rate
|
||||||
|
- Concurrent workers pushing events as fast as possible
|
||||||
|
- Measures events/second, latency distribution, success rate
|
||||||
|
|
||||||
|
2. **Burst Pattern Test**
|
||||||
|
- Simulates real-world traffic patterns
|
||||||
|
- Alternating high-activity bursts and quiet periods
|
||||||
|
- Tests relay behavior under varying loads
|
||||||
|
|
||||||
|
3. **Mixed Read/Write Test**
|
||||||
|
- Concurrent read and write operations
|
||||||
|
- Tests query performance while events are being ingested
|
||||||
|
- Measures combined throughput and latency
|
||||||
|
|
||||||
|
### Performance Metrics
|
||||||
|
|
||||||
|
- **Throughput**: Events processed per second
|
||||||
|
- **Latency**: Average, P95, and P99 response times
|
||||||
|
- **Success Rate**: Percentage of successful operations
|
||||||
|
- **Memory Usage**: Peak memory consumption during tests
|
||||||
|
- **Error Analysis**: Detailed error reporting and categorization
|
||||||
|
|
||||||
|
### Reporting
|
||||||
|
|
||||||
|
- Individual relay reports with detailed metrics
|
||||||
|
- Aggregate comparison report across all relays
|
||||||
|
- Comparison tables for easy performance analysis
|
||||||
|
- Timestamped results for tracking improvements over time
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### 1. Setup External Relays
|
||||||
|
|
||||||
|
Run the setup script to download and configure all external relay repositories:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd cmd/benchmark
|
||||||
|
./setup-external-relays.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
- Clone all external relay repositories
|
||||||
|
- Create Docker configurations for each relay
|
||||||
|
- Set up configuration files
|
||||||
|
- Create data and report directories
|
||||||
|
|
||||||
|
### 2. Run Benchmarks
|
||||||
|
|
||||||
|
Start all relays and run the benchmark suite:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker compose up --build
|
||||||
|
```
|
||||||
|
|
||||||
|
The system will:
|
||||||
|
- Build and start all relay containers
|
||||||
|
- Wait for all relays to become healthy
|
||||||
|
- Run benchmarks against each relay sequentially
|
||||||
|
- Generate individual and aggregate reports
|
||||||
|
|
||||||
|
### 3. View Results
|
||||||
|
|
||||||
|
Results are stored in the `reports/` directory with timestamps:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# View the aggregate report
|
||||||
|
cat reports/run_YYYYMMDD_HHMMSS/aggregate_report.txt
|
||||||
|
|
||||||
|
# View individual relay results
|
||||||
|
ls reports/run_YYYYMMDD_HHMMSS/
|
||||||
|
```
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
### Docker Compose Services
|
||||||
|
|
||||||
|
| Service | Port | Description |
|
||||||
|
|---------|------|-------------|
|
||||||
|
| next-orly | 8001 | This repository's BadgerDB relay |
|
||||||
|
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||||
|
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||||
|
| relayer-basic | 8004 | Basic relayer example |
|
||||||
|
| strfry | 8005 | Strfry C++ LMDB relay |
|
||||||
|
| nostr-rs-relay | 8006 | Rust SQLite relay |
|
||||||
|
| benchmark-runner | - | Orchestrates tests and aggregates results |
|
||||||
|
|
||||||
|
### File Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
cmd/benchmark/
|
||||||
|
├── main.go # Benchmark tool implementation
|
||||||
|
├── docker-compose.yml # Service orchestration
|
||||||
|
├── setup-external-relays.sh # Repository setup script
|
||||||
|
├── benchmark-runner.sh # Test orchestration script
|
||||||
|
├── Dockerfile.next-orly # This repo's relay container
|
||||||
|
├── Dockerfile.benchmark # Benchmark runner container
|
||||||
|
├── Dockerfile.khatru-sqlite # Khatru SQLite variant
|
||||||
|
├── Dockerfile.khatru-badger # Khatru Badger variant
|
||||||
|
├── Dockerfile.relayer-basic # Relayer basic example
|
||||||
|
├── Dockerfile.strfry # Strfry relay
|
||||||
|
├── Dockerfile.nostr-rs-relay # Rust relay
|
||||||
|
├── configs/
|
||||||
|
│ ├── strfry.conf # Strfry configuration
|
||||||
|
│ └── config.toml # nostr-rs-relay configuration
|
||||||
|
├── external/ # External relay repositories
|
||||||
|
├── data/ # Persistent data for each relay
|
||||||
|
└── reports/ # Benchmark results
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
The benchmark can be configured via environment variables in `docker-compose.yml`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
environment:
|
||||||
|
- BENCHMARK_EVENTS=10000 # Number of events per test
|
||||||
|
- BENCHMARK_WORKERS=8 # Concurrent workers
|
||||||
|
- BENCHMARK_DURATION=60s # Test duration
|
||||||
|
- BENCHMARK_TARGETS=... # Relay endpoints to test
|
||||||
|
```
|
||||||
|
|
||||||
|
### Custom Configuration
|
||||||
|
|
||||||
|
1. **Modify test parameters**: Edit environment variables in `docker-compose.yml`
|
||||||
|
2. **Add new relays**:
|
||||||
|
- Add service to `docker-compose.yml`
|
||||||
|
- Create appropriate Dockerfile
|
||||||
|
- Update `BENCHMARK_TARGETS` environment variable
|
||||||
|
3. **Adjust relay configs**: Edit files in `configs/` directory
|
||||||
|
|
||||||
|
## Manual Usage
|
||||||
|
|
||||||
|
### Run Individual Relay
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build and run a specific relay
|
||||||
|
docker-compose up next-orly
|
||||||
|
|
||||||
|
# Run benchmark against specific endpoint
|
||||||
|
./benchmark -datadir=/tmp/test -events=1000 -workers=4
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Benchmark Tool Directly
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build the benchmark tool
|
||||||
|
go build -o benchmark main.go
|
||||||
|
|
||||||
|
# Run with custom parameters
|
||||||
|
./benchmark \
|
||||||
|
-datadir=/tmp/benchmark_db \
|
||||||
|
-events=5000 \
|
||||||
|
-workers=4 \
|
||||||
|
-duration=30s
|
||||||
|
```
|
||||||
|
|
||||||
|
## Benchmark Results Interpretation
|
||||||
|
|
||||||
|
### Peak Throughput Test
|
||||||
|
- **High events/sec**: Good write performance
|
||||||
|
- **Low latency**: Efficient event processing
|
||||||
|
- **High success rate**: Stable under load
|
||||||
|
|
||||||
|
### Burst Pattern Test
|
||||||
|
- **Consistent performance**: Good handling of variable loads
|
||||||
|
- **Low P95/P99 latency**: Predictable response times
|
||||||
|
- **No errors during bursts**: Robust queuing/buffering
|
||||||
|
|
||||||
|
### Mixed Read/Write Test
|
||||||
|
- **Balanced throughput**: Good concurrent operation handling
|
||||||
|
- **Low read latency**: Efficient query processing
|
||||||
|
- **Stable write performance**: Queries don't significantly impact writes
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Adding New Tests
|
||||||
|
|
||||||
|
1. Extend the `Benchmark` struct in `main.go`
|
||||||
|
2. Add new test method following existing patterns
|
||||||
|
3. Update `main()` function to call new test
|
||||||
|
4. Update result aggregation in `benchmark-runner.sh`
|
||||||
|
|
||||||
|
### Modifying Relay Configurations
|
||||||
|
|
||||||
|
Each relay's Dockerfile and configuration can be customized:
|
||||||
|
- **Resource limits**: Adjust memory/CPU limits in docker-compose.yml
|
||||||
|
- **Database settings**: Modify configuration files in `configs/`
|
||||||
|
- **Network settings**: Update port mappings and health checks
|
||||||
|
|
||||||
|
### Debugging
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# View logs for specific relay
|
||||||
|
docker-compose logs next-orly
|
||||||
|
|
||||||
|
# Run benchmark with debug output
|
||||||
|
docker-compose up --build benchmark-runner
|
||||||
|
|
||||||
|
# Check individual container health
|
||||||
|
docker-compose ps
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
1. **Relay fails to start**: Check logs with `docker-compose logs <service>`
|
||||||
|
2. **Connection refused**: Ensure relay health checks are passing
|
||||||
|
3. **Build failures**: Verify external repositories were cloned correctly
|
||||||
|
4. **Permission errors**: Ensure setup script is executable
|
||||||
|
|
||||||
|
### Performance Issues
|
||||||
|
|
||||||
|
- **Low throughput**: Check resource limits and concurrent worker count
|
||||||
|
- **High memory usage**: Monitor container resource consumption
|
||||||
|
- **Network bottlenecks**: Test on different host configurations
|
||||||
|
|
||||||
|
### Reset Environment
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clean up everything
|
||||||
|
docker-compose down -v
|
||||||
|
docker system prune -f
|
||||||
|
rm -rf external/ data/ reports/
|
||||||
|
|
||||||
|
# Start fresh
|
||||||
|
./setup-external-relays.sh
|
||||||
|
docker-compose up --build
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
To add support for new relay implementations:
|
||||||
|
|
||||||
|
1. Create appropriate Dockerfile following existing patterns
|
||||||
|
2. Add service definition to `docker-compose.yml`
|
||||||
|
3. Update `BENCHMARK_TARGETS` environment variable
|
||||||
|
4. Test the new relay integration
|
||||||
|
5. Update documentation
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms.
|
||||||
265
cmd/benchmark/benchmark-runner.sh
Normal file
265
cmd/benchmark/benchmark-runner.sh
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# Benchmark runner script for testing multiple Nostr relay implementations
|
||||||
|
# This script coordinates testing all relays and aggregates results
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Configuration from environment variables
|
||||||
|
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-10000}"
|
||||||
|
BENCHMARK_WORKERS="${BENCHMARK_WORKERS:-8}"
|
||||||
|
BENCHMARK_DURATION="${BENCHMARK_DURATION:-60s}"
|
||||||
|
BENCHMARK_TARGETS="${BENCHMARK_TARGETS:-next-orly:8001,khatru-sqlite:8002,khatru-badger:8003,relayer-basic:8004,strfry:8005,nostr-rs-relay:8006}"
|
||||||
|
OUTPUT_DIR="${OUTPUT_DIR:-/reports}"
|
||||||
|
|
||||||
|
# Create output directory
|
||||||
|
mkdir -p "${OUTPUT_DIR}"
|
||||||
|
|
||||||
|
# Generate timestamp for this benchmark run
|
||||||
|
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||||
|
RUN_DIR="${OUTPUT_DIR}/run_${TIMESTAMP}"
|
||||||
|
mkdir -p "${RUN_DIR}"
|
||||||
|
|
||||||
|
echo "=================================================="
|
||||||
|
echo "Nostr Relay Benchmark Suite"
|
||||||
|
echo "=================================================="
|
||||||
|
echo "Timestamp: $(date)"
|
||||||
|
echo "Events per test: ${BENCHMARK_EVENTS}"
|
||||||
|
echo "Concurrent workers: ${BENCHMARK_WORKERS}"
|
||||||
|
echo "Test duration: ${BENCHMARK_DURATION}"
|
||||||
|
echo "Output directory: ${RUN_DIR}"
|
||||||
|
echo "=================================================="
|
||||||
|
|
||||||
|
# Function to wait for relay to be ready
|
||||||
|
wait_for_relay() {
|
||||||
|
local name="$1"
|
||||||
|
local url="$2"
|
||||||
|
local max_attempts=60
|
||||||
|
local attempt=0
|
||||||
|
|
||||||
|
echo "Waiting for ${name} to be ready at ${url}..."
|
||||||
|
|
||||||
|
while [ $attempt -lt $max_attempts ]; do
|
||||||
|
if wget --quiet --tries=1 --spider --timeout=5 "http://${url}" 2>/dev/null || \
|
||||||
|
curl -f --connect-timeout 5 --max-time 5 "http://${url}" >/dev/null 2>&1; then
|
||||||
|
echo "${name} is ready!"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
echo " Attempt ${attempt}/${max_attempts}: ${name} not ready yet..."
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "ERROR: ${name} failed to become ready after ${max_attempts} attempts"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to run benchmark against a specific relay
|
||||||
|
run_benchmark() {
|
||||||
|
local relay_name="$1"
|
||||||
|
local relay_url="$2"
|
||||||
|
local output_file="$3"
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=================================================="
|
||||||
|
echo "Testing ${relay_name} at ws://${relay_url}"
|
||||||
|
echo "=================================================="
|
||||||
|
|
||||||
|
# Wait for relay to be ready
|
||||||
|
if ! wait_for_relay "${relay_name}" "${relay_url}"; then
|
||||||
|
echo "ERROR: ${relay_name} is not responding, skipping..."
|
||||||
|
echo "RELAY: ${relay_name}" > "${output_file}"
|
||||||
|
echo "STATUS: FAILED - Relay not responding" >> "${output_file}"
|
||||||
|
echo "ERROR: Connection failed" >> "${output_file}"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run the benchmark
|
||||||
|
echo "Running benchmark against ${relay_name}..."
|
||||||
|
|
||||||
|
# Create temporary directory for this relay's data
|
||||||
|
TEMP_DATA_DIR="/tmp/benchmark_${relay_name}_$$"
|
||||||
|
mkdir -p "${TEMP_DATA_DIR}"
|
||||||
|
|
||||||
|
# Run benchmark and capture both stdout and stderr
|
||||||
|
if /app/benchmark \
|
||||||
|
-datadir="${TEMP_DATA_DIR}" \
|
||||||
|
-events="${BENCHMARK_EVENTS}" \
|
||||||
|
-workers="${BENCHMARK_WORKERS}" \
|
||||||
|
-duration="${BENCHMARK_DURATION}" \
|
||||||
|
> "${output_file}" 2>&1; then
|
||||||
|
|
||||||
|
echo "✓ Benchmark completed successfully for ${relay_name}"
|
||||||
|
|
||||||
|
# Add relay identification to the report
|
||||||
|
echo "" >> "${output_file}"
|
||||||
|
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||||
|
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||||
|
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||||
|
echo "BENCHMARK_CONFIG:" >> "${output_file}"
|
||||||
|
echo " Events: ${BENCHMARK_EVENTS}" >> "${output_file}"
|
||||||
|
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||||
|
echo " Duration: ${BENCHMARK_DURATION}" >> "${output_file}"
|
||||||
|
|
||||||
|
else
|
||||||
|
echo "✗ Benchmark failed for ${relay_name}"
|
||||||
|
echo "" >> "${output_file}"
|
||||||
|
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||||
|
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||||
|
echo "STATUS: FAILED" >> "${output_file}"
|
||||||
|
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Clean up temporary data
|
||||||
|
rm -rf "${TEMP_DATA_DIR}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Function to generate aggregate report
|
||||||
|
generate_aggregate_report() {
|
||||||
|
local aggregate_file="${RUN_DIR}/aggregate_report.txt"
|
||||||
|
|
||||||
|
echo "Generating aggregate report..."
|
||||||
|
|
||||||
|
cat > "${aggregate_file}" << EOF
|
||||||
|
================================================================
|
||||||
|
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||||
|
================================================================
|
||||||
|
Generated: $(date -Iseconds)
|
||||||
|
Benchmark Configuration:
|
||||||
|
Events per test: ${BENCHMARK_EVENTS}
|
||||||
|
Concurrent workers: ${BENCHMARK_WORKERS}
|
||||||
|
Test duration: ${BENCHMARK_DURATION}
|
||||||
|
|
||||||
|
Relays tested: $(echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | wc -l)
|
||||||
|
|
||||||
|
================================================================
|
||||||
|
SUMMARY BY RELAY
|
||||||
|
================================================================
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Process each relay's results
|
||||||
|
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||||
|
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
relay_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||||
|
|
||||||
|
echo "Relay: ${relay_name}" >> "${aggregate_file}"
|
||||||
|
echo "----------------------------------------" >> "${aggregate_file}"
|
||||||
|
|
||||||
|
if [ -f "${relay_file}" ]; then
|
||||||
|
# Extract key metrics from the relay's report
|
||||||
|
if grep -q "STATUS: FAILED" "${relay_file}"; then
|
||||||
|
echo "Status: FAILED" >> "${aggregate_file}"
|
||||||
|
grep "ERROR:" "${relay_file}" | head -1 >> "${aggregate_file}" || echo "Error: Unknown failure" >> "${aggregate_file}"
|
||||||
|
else
|
||||||
|
echo "Status: COMPLETED" >> "${aggregate_file}"
|
||||||
|
|
||||||
|
# Extract performance metrics
|
||||||
|
grep "Events/sec:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||||
|
grep "Success Rate:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||||
|
grep "Avg Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||||
|
grep "P95 Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||||
|
grep "Memory:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Status: NO RESULTS FILE" >> "${aggregate_file}"
|
||||||
|
echo "Error: Results file not found" >> "${aggregate_file}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "" >> "${aggregate_file}"
|
||||||
|
done
|
||||||
|
|
||||||
|
cat >> "${aggregate_file}" << EOF
|
||||||
|
|
||||||
|
================================================================
|
||||||
|
DETAILED RESULTS
|
||||||
|
================================================================
|
||||||
|
|
||||||
|
Individual relay reports are available in:
|
||||||
|
$(ls "${RUN_DIR}"/*_results.txt 2>/dev/null | sed 's|^| - |' || echo " No individual reports found")
|
||||||
|
|
||||||
|
================================================================
|
||||||
|
BENCHMARK COMPARISON TABLE
|
||||||
|
================================================================
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create a comparison table
|
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "Relay" "Status" "Peak Tput/s" "Avg Latency" "Success Rate" >> "${aggregate_file}"
|
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "----" "------" "-----------" "-----------" "------------" >> "${aggregate_file}"
|
||||||
|
|
||||||
|
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||||
|
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
relay_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||||
|
|
||||||
|
if [ -f "${relay_file}" ]; then
|
||||||
|
if grep -q "STATUS: FAILED" "${relay_file}"; then
|
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "FAILED" "-" "-" "-" >> "${aggregate_file}"
|
||||||
|
else
|
||||||
|
# Extract metrics for the table
|
||||||
|
peak_tput=$(grep "Events/sec:" "${relay_file}" | head -1 | awk '{print $2}' || echo "-")
|
||||||
|
avg_latency=$(grep "Avg Latency:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-")
|
||||||
|
success_rate=$(grep "Success Rate:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-")
|
||||||
|
|
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "OK" "${peak_tput}" "${avg_latency}" "${success_rate}" >> "${aggregate_file}"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "NO DATA" "-" "-" "-" >> "${aggregate_file}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "" >> "${aggregate_file}"
|
||||||
|
echo "================================================================" >> "${aggregate_file}"
|
||||||
|
echo "End of Report" >> "${aggregate_file}"
|
||||||
|
echo "================================================================" >> "${aggregate_file}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main execution
|
||||||
|
echo "Starting relay benchmark suite..."
|
||||||
|
|
||||||
|
# Parse targets and run benchmarks
|
||||||
|
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||||
|
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||||
|
echo "WARNING: Skipping invalid target: ${relay_name}:${relay_port}"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
relay_url="${relay_name}:${relay_port}"
|
||||||
|
output_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||||
|
|
||||||
|
run_benchmark "${relay_name}" "${relay_url}" "${output_file}"
|
||||||
|
|
||||||
|
# Small delay between tests
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
# Generate aggregate report
|
||||||
|
generate_aggregate_report
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "=================================================="
|
||||||
|
echo "Benchmark Suite Completed!"
|
||||||
|
echo "=================================================="
|
||||||
|
echo "Results directory: ${RUN_DIR}"
|
||||||
|
echo "Aggregate report: ${RUN_DIR}/aggregate_report.txt"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# Display summary
|
||||||
|
if [ -f "${RUN_DIR}/aggregate_report.txt" ]; then
|
||||||
|
echo "Quick Summary:"
|
||||||
|
echo "=============="
|
||||||
|
grep -A 10 "BENCHMARK COMPARISON TABLE" "${RUN_DIR}/aggregate_report.txt" | tail -n +4
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "All benchmark files:"
|
||||||
|
ls -la "${RUN_DIR}/"
|
||||||
|
echo ""
|
||||||
|
echo "Benchmark suite finished at: $(date)"
|
||||||
36
cmd/benchmark/configs/config.toml
Normal file
36
cmd/benchmark/configs/config.toml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
[info]
|
||||||
|
relay_url = "ws://localhost:8080"
|
||||||
|
name = "nostr-rs-relay benchmark"
|
||||||
|
description = "A nostr-rs-relay for benchmarking"
|
||||||
|
pubkey = ""
|
||||||
|
contact = ""
|
||||||
|
|
||||||
|
[database]
|
||||||
|
data_directory = "/data"
|
||||||
|
in_memory = false
|
||||||
|
engine = "sqlite"
|
||||||
|
|
||||||
|
[network]
|
||||||
|
port = 8080
|
||||||
|
address = "0.0.0.0"
|
||||||
|
|
||||||
|
[limits]
|
||||||
|
messages_per_sec = 0
|
||||||
|
subscriptions_per_min = 0
|
||||||
|
max_event_bytes = 65535
|
||||||
|
max_ws_message_bytes = 131072
|
||||||
|
max_ws_frame_bytes = 131072
|
||||||
|
|
||||||
|
[authorization]
|
||||||
|
pubkey_whitelist = []
|
||||||
|
|
||||||
|
[verified_users]
|
||||||
|
mode = "passive"
|
||||||
|
domain_whitelist = []
|
||||||
|
domain_blacklist = []
|
||||||
|
|
||||||
|
[pay_to_relay]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
[options]
|
||||||
|
reject_future_seconds = 30
|
||||||
101
cmd/benchmark/configs/strfry.conf
Normal file
101
cmd/benchmark/configs/strfry.conf
Normal file
@@ -0,0 +1,101 @@
|
|||||||
|
##
|
||||||
|
## Default strfry config
|
||||||
|
##
|
||||||
|
|
||||||
|
# Directory that contains the strfry LMDB database (restart required)
|
||||||
|
db = "/data/strfry.lmdb"
|
||||||
|
|
||||||
|
dbParams {
|
||||||
|
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||||
|
maxreaders = 256
|
||||||
|
|
||||||
|
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required)
|
||||||
|
mapsize = 1099511627776
|
||||||
|
}
|
||||||
|
|
||||||
|
relay {
|
||||||
|
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||||
|
bind = "0.0.0.0"
|
||||||
|
|
||||||
|
# Port to open for the nostr websocket protocol (restart required)
|
||||||
|
port = 8080
|
||||||
|
|
||||||
|
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||||
|
nofiles = 1000000
|
||||||
|
|
||||||
|
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||||
|
realIpHeader = ""
|
||||||
|
|
||||||
|
info {
|
||||||
|
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||||
|
name = "strfry benchmark"
|
||||||
|
|
||||||
|
# NIP-11: Detailed description of this server, free-form
|
||||||
|
description = "A strfry relay for benchmarking"
|
||||||
|
|
||||||
|
# NIP-11: Administrative pubkey, for contact purposes
|
||||||
|
pubkey = ""
|
||||||
|
|
||||||
|
# NIP-11: Alternative contact for this server
|
||||||
|
contact = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||||
|
maxWebsocketPayloadSize = 131072
|
||||||
|
|
||||||
|
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||||
|
autoPingSeconds = 55
|
||||||
|
|
||||||
|
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required)
|
||||||
|
enableTcpKeepalive = false
|
||||||
|
|
||||||
|
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||||
|
queryTimesliceBudgetMicroseconds = 10000
|
||||||
|
|
||||||
|
# Maximum records that can be returned per filter
|
||||||
|
maxFilterLimit = 500
|
||||||
|
|
||||||
|
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||||
|
maxSubsPerConnection = 20
|
||||||
|
|
||||||
|
writePolicy {
|
||||||
|
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||||
|
plugin = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
compression {
|
||||||
|
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required)
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||||
|
slidingWindow = true
|
||||||
|
}
|
||||||
|
|
||||||
|
logging {
|
||||||
|
# Dump all incoming messages
|
||||||
|
dumpInAll = false
|
||||||
|
|
||||||
|
# Dump all incoming EVENT messages
|
||||||
|
dumpInEvents = false
|
||||||
|
|
||||||
|
# Dump all incoming REQ/CLOSE messages
|
||||||
|
dumpInReqs = false
|
||||||
|
|
||||||
|
# Log performance metrics for initial REQ database scans
|
||||||
|
dbScanPerf = false
|
||||||
|
}
|
||||||
|
|
||||||
|
numThreads {
|
||||||
|
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||||
|
ingester = 3
|
||||||
|
|
||||||
|
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||||
|
reqWorker = 3
|
||||||
|
|
||||||
|
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||||
|
reqMonitor = 3
|
||||||
|
|
||||||
|
# yesstr threads: experimental yesstr protocol (restart required)
|
||||||
|
yesstr = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
183
cmd/benchmark/docker-compose.yml
Normal file
183
cmd/benchmark/docker-compose.yml
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
# Next.orly.dev relay (this repository)
|
||||||
|
next-orly:
|
||||||
|
build:
|
||||||
|
context: ../..
|
||||||
|
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||||
|
container_name: benchmark-next-orly
|
||||||
|
environment:
|
||||||
|
- DATA_DIR=/data
|
||||||
|
- LISTEN=0.0.0.0
|
||||||
|
- PORT=8080
|
||||||
|
- LOG_LEVEL=info
|
||||||
|
volumes:
|
||||||
|
- ./data/next-orly:/data
|
||||||
|
ports:
|
||||||
|
- "8001:8080"
|
||||||
|
networks:
|
||||||
|
- benchmark-net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
# Khatru with SQLite
|
||||||
|
khatru-sqlite:
|
||||||
|
build:
|
||||||
|
context: ./external/khatru
|
||||||
|
dockerfile: ../../Dockerfile.khatru-sqlite
|
||||||
|
container_name: benchmark-khatru-sqlite
|
||||||
|
environment:
|
||||||
|
- DATABASE_TYPE=sqlite
|
||||||
|
- DATABASE_PATH=/data/khatru.db
|
||||||
|
- PORT=8080
|
||||||
|
volumes:
|
||||||
|
- ./data/khatru-sqlite:/data
|
||||||
|
ports:
|
||||||
|
- "8002:8080"
|
||||||
|
networks:
|
||||||
|
- benchmark-net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
# Khatru with Badger
|
||||||
|
khatru-badger:
|
||||||
|
build:
|
||||||
|
context: ./external/khatru
|
||||||
|
dockerfile: ../../Dockerfile.khatru-badger
|
||||||
|
container_name: benchmark-khatru-badger
|
||||||
|
environment:
|
||||||
|
- DATABASE_TYPE=badger
|
||||||
|
- DATABASE_PATH=/data/badger
|
||||||
|
- PORT=8080
|
||||||
|
volumes:
|
||||||
|
- ./data/khatru-badger:/data
|
||||||
|
ports:
|
||||||
|
- "8003:8080"
|
||||||
|
networks:
|
||||||
|
- benchmark-net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
# Relayer basic example
|
||||||
|
relayer-basic:
|
||||||
|
build:
|
||||||
|
context: ./external/relayer
|
||||||
|
dockerfile: ../../Dockerfile.relayer-basic
|
||||||
|
container_name: benchmark-relayer-basic
|
||||||
|
environment:
|
||||||
|
- PORT=8080
|
||||||
|
- DATABASE_PATH=/data/relayer.db
|
||||||
|
volumes:
|
||||||
|
- ./data/relayer-basic:/data
|
||||||
|
ports:
|
||||||
|
- "8004:8080"
|
||||||
|
networks:
|
||||||
|
- benchmark-net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
# Strfry
|
||||||
|
strfry:
|
||||||
|
build:
|
||||||
|
context: ./external/strfry
|
||||||
|
dockerfile: ../../Dockerfile.strfry
|
||||||
|
container_name: benchmark-strfry
|
||||||
|
environment:
|
||||||
|
- STRFRY_DB_PATH=/data/strfry.lmdb
|
||||||
|
- STRFRY_RELAY_PORT=8080
|
||||||
|
volumes:
|
||||||
|
- ./data/strfry:/data
|
||||||
|
- ./configs/strfry.conf:/etc/strfry.conf
|
||||||
|
ports:
|
||||||
|
- "8005:8080"
|
||||||
|
networks:
|
||||||
|
- benchmark-net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-f", "http://localhost:8080"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
# Nostr-rs-relay
|
||||||
|
nostr-rs-relay:
|
||||||
|
build:
|
||||||
|
context: ./external/nostr-rs-relay
|
||||||
|
dockerfile: ../../Dockerfile.nostr-rs-relay
|
||||||
|
container_name: benchmark-nostr-rs-relay
|
||||||
|
environment:
|
||||||
|
- RUST_LOG=info
|
||||||
|
volumes:
|
||||||
|
- ./data/nostr-rs-relay:/data
|
||||||
|
- ./configs/config.toml:/app/config.toml
|
||||||
|
ports:
|
||||||
|
- "8006:8080"
|
||||||
|
networks:
|
||||||
|
- benchmark-net
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 10s
|
||||||
|
retries: 3
|
||||||
|
start_period: 40s
|
||||||
|
|
||||||
|
# Benchmark runner
|
||||||
|
benchmark-runner:
|
||||||
|
build:
|
||||||
|
context: ../..
|
||||||
|
dockerfile: cmd/benchmark/Dockerfile.benchmark
|
||||||
|
container_name: benchmark-runner
|
||||||
|
depends_on:
|
||||||
|
next-orly:
|
||||||
|
condition: service_healthy
|
||||||
|
khatru-sqlite:
|
||||||
|
condition: service_healthy
|
||||||
|
khatru-badger:
|
||||||
|
condition: service_healthy
|
||||||
|
relayer-basic:
|
||||||
|
condition: service_healthy
|
||||||
|
strfry:
|
||||||
|
condition: service_healthy
|
||||||
|
nostr-rs-relay:
|
||||||
|
condition: service_healthy
|
||||||
|
environment:
|
||||||
|
- BENCHMARK_TARGETS=next-orly:8001,khatru-sqlite:8002,khatru-badger:8003,relayer-basic:8004,strfry:8005,nostr-rs-relay:8006
|
||||||
|
- BENCHMARK_EVENTS=10000
|
||||||
|
- BENCHMARK_WORKERS=8
|
||||||
|
- BENCHMARK_DURATION=60s
|
||||||
|
volumes:
|
||||||
|
- ./reports:/reports
|
||||||
|
networks:
|
||||||
|
- benchmark-net
|
||||||
|
command: >
|
||||||
|
sh -c "
|
||||||
|
echo 'Waiting for all relays to be ready...' &&
|
||||||
|
sleep 30 &&
|
||||||
|
echo 'Starting benchmark tests...' &&
|
||||||
|
/app/benchmark-runner --output-dir=/reports
|
||||||
|
"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
benchmark-net:
|
||||||
|
driver: bridge
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
benchmark-data:
|
||||||
|
driver: local
|
||||||
1
cmd/benchmark/external/khatru
vendored
Submodule
1
cmd/benchmark/external/khatru
vendored
Submodule
Submodule cmd/benchmark/external/khatru added at 668c41b988
573
cmd/benchmark/main.go
Normal file
573
cmd/benchmark/main.go
Normal file
@@ -0,0 +1,573 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"next.orly.dev/pkg/database"
|
||||||
|
"next.orly.dev/pkg/encoders/event"
|
||||||
|
"next.orly.dev/pkg/encoders/filter"
|
||||||
|
"next.orly.dev/pkg/encoders/kind"
|
||||||
|
"next.orly.dev/pkg/encoders/tag"
|
||||||
|
"next.orly.dev/pkg/encoders/timestamp"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BenchmarkConfig struct {
|
||||||
|
DataDir string
|
||||||
|
NumEvents int
|
||||||
|
ConcurrentWorkers int
|
||||||
|
TestDuration time.Duration
|
||||||
|
BurstPattern bool
|
||||||
|
ReportInterval time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
type BenchmarkResult struct {
|
||||||
|
TestName string
|
||||||
|
Duration time.Duration
|
||||||
|
TotalEvents int
|
||||||
|
EventsPerSecond float64
|
||||||
|
AvgLatency time.Duration
|
||||||
|
P95Latency time.Duration
|
||||||
|
P99Latency time.Duration
|
||||||
|
SuccessRate float64
|
||||||
|
ConcurrentWorkers int
|
||||||
|
MemoryUsed uint64
|
||||||
|
Errors []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Benchmark struct {
|
||||||
|
config *BenchmarkConfig
|
||||||
|
db *database.D
|
||||||
|
results []*BenchmarkResult
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
config := parseFlags()
|
||||||
|
|
||||||
|
fmt.Printf("Starting Nostr Relay Benchmark\n")
|
||||||
|
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||||
|
fmt.Printf(
|
||||||
|
"Events: %d, Workers: %d, Duration: %v\n",
|
||||||
|
config.NumEvents, config.ConcurrentWorkers, config.TestDuration,
|
||||||
|
)
|
||||||
|
|
||||||
|
benchmark := NewBenchmark(config)
|
||||||
|
defer benchmark.Close()
|
||||||
|
|
||||||
|
// Run benchmark tests
|
||||||
|
benchmark.RunPeakThroughputTest()
|
||||||
|
benchmark.RunBurstPatternTest()
|
||||||
|
benchmark.RunMixedReadWriteTest()
|
||||||
|
|
||||||
|
// Generate report
|
||||||
|
benchmark.GenerateReport()
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseFlags() *BenchmarkConfig {
|
||||||
|
config := &BenchmarkConfig{}
|
||||||
|
|
||||||
|
flag.StringVar(
|
||||||
|
&config.DataDir, "datadir", "/tmp/benchmark_db", "Database directory",
|
||||||
|
)
|
||||||
|
flag.IntVar(
|
||||||
|
&config.NumEvents, "events", 10000, "Number of events to generate",
|
||||||
|
)
|
||||||
|
flag.IntVar(
|
||||||
|
&config.ConcurrentWorkers, "workers", runtime.NumCPU(),
|
||||||
|
"Number of concurrent workers",
|
||||||
|
)
|
||||||
|
flag.DurationVar(
|
||||||
|
&config.TestDuration, "duration", 60*time.Second, "Test duration",
|
||||||
|
)
|
||||||
|
flag.BoolVar(
|
||||||
|
&config.BurstPattern, "burst", true, "Enable burst pattern testing",
|
||||||
|
)
|
||||||
|
flag.DurationVar(
|
||||||
|
&config.ReportInterval, "report-interval", 10*time.Second,
|
||||||
|
"Report interval",
|
||||||
|
)
|
||||||
|
|
||||||
|
flag.Parse()
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewBenchmark(config *BenchmarkConfig) *Benchmark {
|
||||||
|
// Clean up existing data directory
|
||||||
|
os.RemoveAll(config.DataDir)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
cancel := func() {}
|
||||||
|
|
||||||
|
db, err := database.New(ctx, cancel, config.DataDir, "info")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to create database: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Benchmark{
|
||||||
|
config: config,
|
||||||
|
db: db,
|
||||||
|
results: make([]*BenchmarkResult, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Benchmark) Close() {
|
||||||
|
if b.db != nil {
|
||||||
|
b.db.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Benchmark) RunPeakThroughputTest() {
|
||||||
|
fmt.Println("\n=== Peak Throughput Test ===")
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var totalEvents int64
|
||||||
|
var errors []error
|
||||||
|
var latencies []time.Duration
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
events := b.generateEvents(b.config.NumEvents)
|
||||||
|
eventChan := make(chan *event.E, len(events))
|
||||||
|
|
||||||
|
// Fill event channel
|
||||||
|
for _, ev := range events {
|
||||||
|
eventChan <- ev
|
||||||
|
}
|
||||||
|
close(eventChan)
|
||||||
|
|
||||||
|
// Start workers
|
||||||
|
for i := 0; i < b.config.ConcurrentWorkers; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(workerID int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
for ev := range eventChan {
|
||||||
|
eventStart := time.Now()
|
||||||
|
|
||||||
|
_, _, err := b.db.SaveEvent(ctx, ev)
|
||||||
|
latency := time.Since(eventStart)
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
} else {
|
||||||
|
totalEvents++
|
||||||
|
latencies = append(latencies, latency)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
duration := time.Since(start)
|
||||||
|
|
||||||
|
// Calculate metrics
|
||||||
|
result := &BenchmarkResult{
|
||||||
|
TestName: "Peak Throughput",
|
||||||
|
Duration: duration,
|
||||||
|
TotalEvents: int(totalEvents),
|
||||||
|
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
||||||
|
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||||
|
MemoryUsed: getMemUsage(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(latencies) > 0 {
|
||||||
|
result.AvgLatency = calculateAvgLatency(latencies)
|
||||||
|
result.P95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||||
|
result.P99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||||
|
}
|
||||||
|
|
||||||
|
result.SuccessRate = float64(totalEvents) / float64(b.config.NumEvents) * 100
|
||||||
|
|
||||||
|
for _, err := range errors {
|
||||||
|
result.Errors = append(result.Errors, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
b.results = append(b.results, result)
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
fmt.Printf(
|
||||||
|
"Events saved: %d/%d (%.1f%%)\n", totalEvents, b.config.NumEvents,
|
||||||
|
result.SuccessRate,
|
||||||
|
)
|
||||||
|
fmt.Printf("Duration: %v\n", duration)
|
||||||
|
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||||
|
fmt.Printf("Avg latency: %v\n", result.AvgLatency)
|
||||||
|
fmt.Printf("P95 latency: %v\n", result.P95Latency)
|
||||||
|
fmt.Printf("P99 latency: %v\n", result.P99Latency)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Benchmark) RunBurstPatternTest() {
|
||||||
|
fmt.Println("\n=== Burst Pattern Test ===")
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
var totalEvents int64
|
||||||
|
var errors []error
|
||||||
|
var latencies []time.Duration
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
// Generate events for burst pattern
|
||||||
|
events := b.generateEvents(b.config.NumEvents)
|
||||||
|
|
||||||
|
// Simulate burst pattern: high activity periods followed by quiet periods
|
||||||
|
burstSize := b.config.NumEvents / 10 // 10% of events in each burst
|
||||||
|
quietPeriod := 500 * time.Millisecond
|
||||||
|
burstPeriod := 100 * time.Millisecond
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
eventIndex := 0
|
||||||
|
|
||||||
|
for eventIndex < len(events) && time.Since(start) < b.config.TestDuration {
|
||||||
|
// Burst period - send events rapidly
|
||||||
|
burstStart := time.Now()
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for i := 0; i < burstSize && eventIndex < len(events); i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(ev *event.E) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
eventStart := time.Now()
|
||||||
|
_, _, err := b.db.SaveEvent(ctx, ev)
|
||||||
|
latency := time.Since(eventStart)
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
} else {
|
||||||
|
totalEvents++
|
||||||
|
latencies = append(latencies, latency)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}(events[eventIndex])
|
||||||
|
|
||||||
|
eventIndex++
|
||||||
|
time.Sleep(burstPeriod / time.Duration(burstSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
fmt.Printf(
|
||||||
|
"Burst completed: %d events in %v\n", burstSize,
|
||||||
|
time.Since(burstStart),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Quiet period
|
||||||
|
time.Sleep(quietPeriod)
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(start)
|
||||||
|
|
||||||
|
// Calculate metrics
|
||||||
|
result := &BenchmarkResult{
|
||||||
|
TestName: "Burst Pattern",
|
||||||
|
Duration: duration,
|
||||||
|
TotalEvents: int(totalEvents),
|
||||||
|
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
||||||
|
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||||
|
MemoryUsed: getMemUsage(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(latencies) > 0 {
|
||||||
|
result.AvgLatency = calculateAvgLatency(latencies)
|
||||||
|
result.P95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||||
|
result.P99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||||
|
}
|
||||||
|
|
||||||
|
result.SuccessRate = float64(totalEvents) / float64(eventIndex) * 100
|
||||||
|
|
||||||
|
for _, err := range errors {
|
||||||
|
result.Errors = append(result.Errors, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
b.results = append(b.results, result)
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
fmt.Printf("Burst test completed: %d events in %v\n", totalEvents, duration)
|
||||||
|
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Benchmark) RunMixedReadWriteTest() {
|
||||||
|
fmt.Println("\n=== Mixed Read/Write Test ===")
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
var totalWrites, totalReads int64
|
||||||
|
var writeLatencies, readLatencies []time.Duration
|
||||||
|
var errors []error
|
||||||
|
var mu sync.Mutex
|
||||||
|
|
||||||
|
// Pre-populate with some events for reading
|
||||||
|
seedEvents := b.generateEvents(1000)
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
fmt.Println("Pre-populating database for read tests...")
|
||||||
|
for _, ev := range seedEvents {
|
||||||
|
b.db.SaveEvent(ctx, ev)
|
||||||
|
}
|
||||||
|
|
||||||
|
events := b.generateEvents(b.config.NumEvents)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
// Start mixed read/write workers
|
||||||
|
for i := 0; i < b.config.ConcurrentWorkers; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(workerID int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
eventIndex := workerID
|
||||||
|
for time.Since(start) < b.config.TestDuration && eventIndex < len(events) {
|
||||||
|
// Alternate between write and read operations
|
||||||
|
if eventIndex%2 == 0 {
|
||||||
|
// Write operation
|
||||||
|
writeStart := time.Now()
|
||||||
|
_, _, err := b.db.SaveEvent(ctx, events[eventIndex])
|
||||||
|
writeLatency := time.Since(writeStart)
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
} else {
|
||||||
|
totalWrites++
|
||||||
|
writeLatencies = append(writeLatencies, writeLatency)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
} else {
|
||||||
|
// Read operation
|
||||||
|
readStart := time.Now()
|
||||||
|
f := filter.New()
|
||||||
|
f.Kinds = kind.NewS(kind.TextNote)
|
||||||
|
limit := uint(10)
|
||||||
|
f.Limit = &limit
|
||||||
|
_, err := b.db.GetSerialsFromFilter(f)
|
||||||
|
readLatency := time.Since(readStart)
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
errors = append(errors, err)
|
||||||
|
} else {
|
||||||
|
totalReads++
|
||||||
|
readLatencies = append(readLatencies, readLatency)
|
||||||
|
}
|
||||||
|
mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
eventIndex += b.config.ConcurrentWorkers
|
||||||
|
time.Sleep(10 * time.Millisecond) // Small delay between operations
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
duration := time.Since(start)
|
||||||
|
|
||||||
|
// Calculate metrics
|
||||||
|
result := &BenchmarkResult{
|
||||||
|
TestName: "Mixed Read/Write",
|
||||||
|
Duration: duration,
|
||||||
|
TotalEvents: int(totalWrites + totalReads),
|
||||||
|
EventsPerSecond: float64(totalWrites+totalReads) / duration.Seconds(),
|
||||||
|
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||||
|
MemoryUsed: getMemUsage(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate combined latencies for overall metrics
|
||||||
|
allLatencies := append(writeLatencies, readLatencies...)
|
||||||
|
if len(allLatencies) > 0 {
|
||||||
|
result.AvgLatency = calculateAvgLatency(allLatencies)
|
||||||
|
result.P95Latency = calculatePercentileLatency(allLatencies, 0.95)
|
||||||
|
result.P99Latency = calculatePercentileLatency(allLatencies, 0.99)
|
||||||
|
}
|
||||||
|
|
||||||
|
result.SuccessRate = float64(totalWrites+totalReads) / float64(len(events)) * 100
|
||||||
|
|
||||||
|
for _, err := range errors {
|
||||||
|
result.Errors = append(result.Errors, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
b.results = append(b.results, result)
|
||||||
|
b.mu.Unlock()
|
||||||
|
|
||||||
|
fmt.Printf(
|
||||||
|
"Mixed test completed: %d writes, %d reads in %v\n", totalWrites,
|
||||||
|
totalReads, duration,
|
||||||
|
)
|
||||||
|
fmt.Printf("Combined ops/sec: %.2f\n", result.EventsPerSecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Benchmark) generateEvents(count int) []*event.E {
|
||||||
|
events := make([]*event.E, count)
|
||||||
|
now := timestamp.Now()
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
ev := event.New()
|
||||||
|
|
||||||
|
// Generate random 32-byte ID
|
||||||
|
ev.ID = make([]byte, 32)
|
||||||
|
rand.Read(ev.ID)
|
||||||
|
|
||||||
|
// Generate random 32-byte pubkey
|
||||||
|
ev.Pubkey = make([]byte, 32)
|
||||||
|
rand.Read(ev.Pubkey)
|
||||||
|
|
||||||
|
ev.CreatedAt = now.I64()
|
||||||
|
ev.Kind = kind.TextNote.K
|
||||||
|
ev.Content = []byte(fmt.Sprintf(
|
||||||
|
"This is test event number %d with some content", i,
|
||||||
|
))
|
||||||
|
|
||||||
|
// Create tags using NewFromBytesSlice
|
||||||
|
ev.Tags = tag.NewS(
|
||||||
|
tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")),
|
||||||
|
tag.NewFromBytesSlice(
|
||||||
|
[]byte("e"), []byte(fmt.Sprintf("ref_%d", i%50)),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
events[i] = ev
|
||||||
|
}
|
||||||
|
|
||||||
|
return events
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Benchmark) GenerateReport() {
|
||||||
|
fmt.Println("\n" + strings.Repeat("=", 80))
|
||||||
|
fmt.Println("BENCHMARK REPORT")
|
||||||
|
fmt.Println(strings.Repeat("=", 80))
|
||||||
|
|
||||||
|
b.mu.RLock()
|
||||||
|
defer b.mu.RUnlock()
|
||||||
|
|
||||||
|
for _, result := range b.results {
|
||||||
|
fmt.Printf("\nTest: %s\n", result.TestName)
|
||||||
|
fmt.Printf("Duration: %v\n", result.Duration)
|
||||||
|
fmt.Printf("Total Events: %d\n", result.TotalEvents)
|
||||||
|
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||||
|
fmt.Printf("Success Rate: %.1f%%\n", result.SuccessRate)
|
||||||
|
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers)
|
||||||
|
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024))
|
||||||
|
fmt.Printf("Avg Latency: %v\n", result.AvgLatency)
|
||||||
|
fmt.Printf("P95 Latency: %v\n", result.P95Latency)
|
||||||
|
fmt.Printf("P99 Latency: %v\n", result.P99Latency)
|
||||||
|
|
||||||
|
if len(result.Errors) > 0 {
|
||||||
|
fmt.Printf("Errors (%d):\n", len(result.Errors))
|
||||||
|
for i, err := range result.Errors {
|
||||||
|
if i < 5 { // Show first 5 errors
|
||||||
|
fmt.Printf(" - %s\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(result.Errors) > 5 {
|
||||||
|
fmt.Printf(" ... and %d more errors\n", len(result.Errors)-5)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Println(strings.Repeat("-", 40))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save report to file
|
||||||
|
reportPath := filepath.Join(b.config.DataDir, "benchmark_report.txt")
|
||||||
|
b.saveReportToFile(reportPath)
|
||||||
|
fmt.Printf("\nReport saved to: %s\n", reportPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Benchmark) saveReportToFile(path string) error {
|
||||||
|
file, err := os.Create(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
file.WriteString("NOSTR RELAY BENCHMARK REPORT\n")
|
||||||
|
file.WriteString("============================\n\n")
|
||||||
|
file.WriteString(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Generated: %s\n", time.Now().Format(time.RFC3339),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
file.WriteString(fmt.Sprintf("Relay: next.orly.dev\n"))
|
||||||
|
file.WriteString(fmt.Sprintf("Database: BadgerDB\n"))
|
||||||
|
file.WriteString(fmt.Sprintf("Workers: %d\n", b.config.ConcurrentWorkers))
|
||||||
|
file.WriteString(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Test Duration: %v\n\n", b.config.TestDuration,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
b.mu.RLock()
|
||||||
|
defer b.mu.RUnlock()
|
||||||
|
|
||||||
|
for _, result := range b.results {
|
||||||
|
file.WriteString(fmt.Sprintf("Test: %s\n", result.TestName))
|
||||||
|
file.WriteString(fmt.Sprintf("Duration: %v\n", result.Duration))
|
||||||
|
file.WriteString(fmt.Sprintf("Events: %d\n", result.TotalEvents))
|
||||||
|
file.WriteString(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Events/sec: %.2f\n", result.EventsPerSecond,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
file.WriteString(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Success Rate: %.1f%%\n", result.SuccessRate,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
file.WriteString(fmt.Sprintf("Avg Latency: %v\n", result.AvgLatency))
|
||||||
|
file.WriteString(fmt.Sprintf("P95 Latency: %v\n", result.P95Latency))
|
||||||
|
file.WriteString(fmt.Sprintf("P99 Latency: %v\n", result.P99Latency))
|
||||||
|
file.WriteString(
|
||||||
|
fmt.Sprintf(
|
||||||
|
"Memory: %d MB\n", result.MemoryUsed/(1024*1024),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
file.WriteString("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions
|
||||||
|
|
||||||
|
func calculateAvgLatency(latencies []time.Duration) time.Duration {
|
||||||
|
if len(latencies) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
var total time.Duration
|
||||||
|
for _, l := range latencies {
|
||||||
|
total += l
|
||||||
|
}
|
||||||
|
return total / time.Duration(len(latencies))
|
||||||
|
}
|
||||||
|
|
||||||
|
func calculatePercentileLatency(
|
||||||
|
latencies []time.Duration, percentile float64,
|
||||||
|
) time.Duration {
|
||||||
|
if len(latencies) == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simple percentile calculation - in production would sort first
|
||||||
|
index := int(float64(len(latencies)) * percentile)
|
||||||
|
if index >= len(latencies) {
|
||||||
|
index = len(latencies) - 1
|
||||||
|
}
|
||||||
|
return latencies[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMemUsage() uint64 {
|
||||||
|
var m runtime.MemStats
|
||||||
|
runtime.ReadMemStats(&m)
|
||||||
|
return m.Alloc
|
||||||
|
}
|
||||||
368
cmd/benchmark/setup-external-relays.sh
Executable file
368
cmd/benchmark/setup-external-relays.sh
Executable file
@@ -0,0 +1,368 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Setup script for downloading and configuring external relay repositories
|
||||||
|
# for benchmarking
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
|
EXTERNAL_DIR="${SCRIPT_DIR}/external"
|
||||||
|
|
||||||
|
echo "Setting up external relay repositories for benchmarking..."
|
||||||
|
|
||||||
|
# Create external directory
|
||||||
|
mkdir -p "${EXTERNAL_DIR}"
|
||||||
|
|
||||||
|
# Function to clone or update repository
|
||||||
|
clone_or_update() {
|
||||||
|
local repo_url="$1"
|
||||||
|
local repo_dir="$2"
|
||||||
|
local repo_name="$3"
|
||||||
|
|
||||||
|
echo "Setting up ${repo_name}..."
|
||||||
|
|
||||||
|
if [ -d "${repo_dir}" ]; then
|
||||||
|
echo " ${repo_name} already exists, updating..."
|
||||||
|
cd "${repo_dir}"
|
||||||
|
git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true
|
||||||
|
cd - > /dev/null
|
||||||
|
else
|
||||||
|
echo " Cloning ${repo_name}..."
|
||||||
|
git clone "${repo_url}" "${repo_dir}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Clone khatru
|
||||||
|
clone_or_update "https://github.com/fiatjaf/khatru.git" "${EXTERNAL_DIR}/khatru" "Khatru"
|
||||||
|
|
||||||
|
# Clone relayer
|
||||||
|
clone_or_update "https://github.com/fiatjaf/relayer.git" "${EXTERNAL_DIR}/relayer" "Relayer"
|
||||||
|
|
||||||
|
# Clone strfry
|
||||||
|
clone_or_update "https://github.com/hoytech/strfry.git" "${EXTERNAL_DIR}/strfry" "Strfry"
|
||||||
|
|
||||||
|
# Clone nostr-rs-relay
|
||||||
|
clone_or_update "https://git.sr.ht/~gheartsfield/nostr-rs-relay" "${EXTERNAL_DIR}/nostr-rs-relay" "Nostr-rs-relay"
|
||||||
|
|
||||||
|
echo "Creating Dockerfiles for external relays..."
|
||||||
|
|
||||||
|
# Create Dockerfile for Khatru SQLite
|
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.khatru-sqlite" << 'EOF'
|
||||||
|
FROM golang:1.25-alpine AS builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the basic-sqlite example
|
||||||
|
RUN cd examples/basic-sqlite && \
|
||||||
|
go mod tidy && \
|
||||||
|
CGO_ENABLED=1 go build -o khatru-sqlite .
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/examples/basic-sqlite/khatru-sqlite /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV DATABASE_PATH=/data/khatru.db
|
||||||
|
ENV PORT=8080
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||||
|
CMD ["/app/khatru-sqlite"]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create Dockerfile for Khatru Badger
|
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.khatru-badger" << 'EOF'
|
||||||
|
FROM golang:1.25-alpine AS builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the basic-badger example
|
||||||
|
RUN cd examples/basic-badger && \
|
||||||
|
go mod tidy && \
|
||||||
|
CGO_ENABLED=0 go build -o khatru-badger .
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
RUN apk --no-cache add ca-certificates wget
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV DATABASE_PATH=/data/badger
|
||||||
|
ENV PORT=8080
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||||
|
CMD ["/app/khatru-badger"]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create Dockerfile for Relayer basic example
|
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.relayer-basic" << 'EOF'
|
||||||
|
FROM golang:1.25-alpine AS builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the basic example
|
||||||
|
RUN cd examples/basic && \
|
||||||
|
go mod tidy && \
|
||||||
|
CGO_ENABLED=1 go build -o relayer-basic .
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/examples/basic/relayer-basic /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV DATABASE_PATH=/data/relayer.db
|
||||||
|
ENV PORT=8080
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||||
|
CMD ["/app/relayer-basic"]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create Dockerfile for Strfry
|
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.strfry" << 'EOF'
|
||||||
|
FROM ubuntu:22.04 AS builder
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
||||||
|
# Install build dependencies
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
git \
|
||||||
|
build-essential \
|
||||||
|
liblmdb-dev \
|
||||||
|
libsecp256k1-dev \
|
||||||
|
pkg-config \
|
||||||
|
libtool \
|
||||||
|
autoconf \
|
||||||
|
automake \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build strfry
|
||||||
|
RUN make setup-golpe && \
|
||||||
|
make -j$(nproc)
|
||||||
|
|
||||||
|
FROM ubuntu:22.04
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
liblmdb0 \
|
||||||
|
libsecp256k1-0 \
|
||||||
|
curl \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/strfry /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV STRFRY_DB_PATH=/data/strfry.lmdb
|
||||||
|
ENV STRFRY_RELAY_PORT=8080
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:8080 || exit 1
|
||||||
|
|
||||||
|
CMD ["/app/strfry", "relay"]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create Dockerfile for nostr-rs-relay
|
||||||
|
cat > "${SCRIPT_DIR}/Dockerfile.nostr-rs-relay" << 'EOF'
|
||||||
|
FROM rust:1.70-alpine AS builder
|
||||||
|
|
||||||
|
RUN apk add --no-cache musl-dev sqlite-dev
|
||||||
|
|
||||||
|
WORKDIR /build
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build the relay
|
||||||
|
RUN cargo build --release
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
RUN apk --no-cache add ca-certificates sqlite wget
|
||||||
|
WORKDIR /app
|
||||||
|
COPY --from=builder /build/target/release/nostr-rs-relay /app/
|
||||||
|
RUN mkdir -p /data
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
ENV RUST_LOG=info
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||||
|
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||||
|
|
||||||
|
CMD ["/app/nostr-rs-relay"]
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Creating configuration files..."
|
||||||
|
|
||||||
|
# Create configs directory
|
||||||
|
mkdir -p "${SCRIPT_DIR}/configs"
|
||||||
|
|
||||||
|
# Create strfry configuration
|
||||||
|
cat > "${SCRIPT_DIR}/configs/strfry.conf" << 'EOF'
|
||||||
|
##
|
||||||
|
## Default strfry config
|
||||||
|
##
|
||||||
|
|
||||||
|
# Directory that contains the strfry LMDB database (restart required)
|
||||||
|
db = "/data/strfry.lmdb"
|
||||||
|
|
||||||
|
dbParams {
|
||||||
|
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||||
|
maxreaders = 256
|
||||||
|
|
||||||
|
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required)
|
||||||
|
mapsize = 1099511627776
|
||||||
|
}
|
||||||
|
|
||||||
|
relay {
|
||||||
|
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||||
|
bind = "0.0.0.0"
|
||||||
|
|
||||||
|
# Port to open for the nostr websocket protocol (restart required)
|
||||||
|
port = 8080
|
||||||
|
|
||||||
|
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||||
|
nofiles = 1000000
|
||||||
|
|
||||||
|
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||||
|
realIpHeader = ""
|
||||||
|
|
||||||
|
info {
|
||||||
|
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||||
|
name = "strfry benchmark"
|
||||||
|
|
||||||
|
# NIP-11: Detailed description of this server, free-form
|
||||||
|
description = "A strfry relay for benchmarking"
|
||||||
|
|
||||||
|
# NIP-11: Administrative pubkey, for contact purposes
|
||||||
|
pubkey = ""
|
||||||
|
|
||||||
|
# NIP-11: Alternative contact for this server
|
||||||
|
contact = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||||
|
maxWebsocketPayloadSize = 131072
|
||||||
|
|
||||||
|
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||||
|
autoPingSeconds = 55
|
||||||
|
|
||||||
|
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required)
|
||||||
|
enableTcpKeepalive = false
|
||||||
|
|
||||||
|
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||||
|
queryTimesliceBudgetMicroseconds = 10000
|
||||||
|
|
||||||
|
# Maximum records that can be returned per filter
|
||||||
|
maxFilterLimit = 500
|
||||||
|
|
||||||
|
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||||
|
maxSubsPerConnection = 20
|
||||||
|
|
||||||
|
writePolicy {
|
||||||
|
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||||
|
plugin = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
compression {
|
||||||
|
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required)
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||||
|
slidingWindow = true
|
||||||
|
}
|
||||||
|
|
||||||
|
logging {
|
||||||
|
# Dump all incoming messages
|
||||||
|
dumpInAll = false
|
||||||
|
|
||||||
|
# Dump all incoming EVENT messages
|
||||||
|
dumpInEvents = false
|
||||||
|
|
||||||
|
# Dump all incoming REQ/CLOSE messages
|
||||||
|
dumpInReqs = false
|
||||||
|
|
||||||
|
# Log performance metrics for initial REQ database scans
|
||||||
|
dbScanPerf = false
|
||||||
|
}
|
||||||
|
|
||||||
|
numThreads {
|
||||||
|
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||||
|
ingester = 3
|
||||||
|
|
||||||
|
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||||
|
reqWorker = 3
|
||||||
|
|
||||||
|
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||||
|
reqMonitor = 3
|
||||||
|
|
||||||
|
# yesstr threads: experimental yesstr protocol (restart required)
|
||||||
|
yesstr = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Create nostr-rs-relay configuration
|
||||||
|
cat > "${SCRIPT_DIR}/configs/config.toml" << 'EOF'
|
||||||
|
[info]
|
||||||
|
relay_url = "ws://localhost:8080"
|
||||||
|
name = "nostr-rs-relay benchmark"
|
||||||
|
description = "A nostr-rs-relay for benchmarking"
|
||||||
|
pubkey = ""
|
||||||
|
contact = ""
|
||||||
|
|
||||||
|
[database]
|
||||||
|
data_directory = "/data"
|
||||||
|
in_memory = false
|
||||||
|
engine = "sqlite"
|
||||||
|
|
||||||
|
[network]
|
||||||
|
port = 8080
|
||||||
|
address = "0.0.0.0"
|
||||||
|
|
||||||
|
[limits]
|
||||||
|
messages_per_sec = 0
|
||||||
|
subscriptions_per_min = 0
|
||||||
|
max_event_bytes = 65535
|
||||||
|
max_ws_message_bytes = 131072
|
||||||
|
max_ws_frame_bytes = 131072
|
||||||
|
|
||||||
|
[authorization]
|
||||||
|
pubkey_whitelist = []
|
||||||
|
|
||||||
|
[verified_users]
|
||||||
|
mode = "passive"
|
||||||
|
domain_whitelist = []
|
||||||
|
domain_blacklist = []
|
||||||
|
|
||||||
|
[pay_to_relay]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
[options]
|
||||||
|
reject_future_seconds = 30
|
||||||
|
EOF
|
||||||
|
|
||||||
|
echo "Creating data directories..."
|
||||||
|
mkdir -p "${SCRIPT_DIR}/data"/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay}
|
||||||
|
mkdir -p "${SCRIPT_DIR}/reports"
|
||||||
|
|
||||||
|
echo "Setup complete!"
|
||||||
|
echo ""
|
||||||
|
echo "External relay repositories have been cloned to: ${EXTERNAL_DIR}"
|
||||||
|
echo "Dockerfiles have been created for all relay implementations"
|
||||||
|
echo "Configuration files have been created in: ${SCRIPT_DIR}/configs"
|
||||||
|
echo "Data directories have been created in: ${SCRIPT_DIR}/data"
|
||||||
|
echo ""
|
||||||
|
echo "To run the benchmark:"
|
||||||
|
echo " cd ${SCRIPT_DIR}"
|
||||||
|
echo " docker-compose up --build"
|
||||||
|
echo ""
|
||||||
|
echo "Reports will be generated in: ${SCRIPT_DIR}/reports"
|
||||||
@@ -6,10 +6,10 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"crypto.orly/ec/schnorr"
|
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
b32 "encoders.orly/bech32encoding"
|
b32 "next.orly.dev/pkg/encoders/bech32encoding"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
func usage() {
|
func usage() {
|
||||||
|
|||||||
45
go.mod
45
go.mod
@@ -3,24 +3,29 @@ module next.orly.dev
|
|||||||
go 1.25.0
|
go 1.25.0
|
||||||
|
|
||||||
require (
|
require (
|
||||||
acl.orly v0.0.0-00010101000000-000000000000
|
|
||||||
crypto.orly v0.0.0-00010101000000-000000000000
|
|
||||||
database.orly v0.0.0-00010101000000-000000000000
|
|
||||||
encoders.orly v0.0.0-00010101000000-000000000000
|
|
||||||
github.com/adrg/xdg v0.5.3
|
github.com/adrg/xdg v0.5.3
|
||||||
github.com/coder/websocket v1.8.13
|
github.com/coder/websocket v1.8.13
|
||||||
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/dgraph-io/badger/v4 v4.8.0
|
github.com/dgraph-io/badger/v4 v4.8.0
|
||||||
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||||
|
github.com/klauspost/cpuid/v2 v2.3.0
|
||||||
github.com/pkg/profile v1.7.0
|
github.com/pkg/profile v1.7.0
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||||
|
github.com/stretchr/testify v1.10.0
|
||||||
|
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
|
||||||
go-simpler.org/env v0.12.0
|
go-simpler.org/env v0.12.0
|
||||||
interfaces.orly v0.0.0-00010101000000-000000000000
|
go.uber.org/atomic v1.11.0
|
||||||
|
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
|
||||||
|
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||||
|
golang.org/x/net v0.43.0
|
||||||
|
honnef.co/go/tools v0.6.1
|
||||||
lol.mleku.dev v1.0.2
|
lol.mleku.dev v1.0.2
|
||||||
protocol.orly v0.0.0-00010101000000-000000000000
|
lukechampine.com/frand v1.5.1
|
||||||
utils.orly v0.0.0-00010101000000-000000000000
|
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
github.com/fatih/color v1.18.0 // indirect
|
||||||
@@ -29,32 +34,20 @@ require (
|
|||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
|
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
github.com/klauspost/compress v1.18.0 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/templexxx/cpu v0.0.1 // indirect
|
github.com/templexxx/cpu v0.0.1 // indirect
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
|
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||||
go.uber.org/atomic v1.11.0 // indirect
|
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
|
golang.org/x/mod v0.27.0 // indirect
|
||||||
golang.org/x/net v0.41.0 // indirect
|
golang.org/x/sync v0.16.0 // indirect
|
||||||
golang.org/x/sys v0.35.0 // indirect
|
golang.org/x/sys v0.35.0 // indirect
|
||||||
|
golang.org/x/tools v0.36.0 // indirect
|
||||||
google.golang.org/protobuf v1.36.6 // indirect
|
google.golang.org/protobuf v1.36.6 // indirect
|
||||||
lukechampine.com/frand v1.5.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
|
||||||
|
|
||||||
replace (
|
|
||||||
acl.orly => ./pkg/acl
|
|
||||||
crypto.orly => ./pkg/crypto
|
|
||||||
database.orly => ./pkg/database
|
|
||||||
encoders.orly => ./pkg/encoders
|
|
||||||
interfaces.orly => ./pkg/interfaces
|
|
||||||
next.orly.dev => ../../
|
|
||||||
protocol.orly => ./pkg/protocol
|
|
||||||
utils.orly => ./pkg/utils
|
|
||||||
)
|
)
|
||||||
|
|||||||
45
go.sum
45
go.sum
@@ -1,3 +1,5 @@
|
|||||||
|
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||||
|
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||||
@@ -40,6 +42,10 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
|
|||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||||
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
@@ -48,12 +54,16 @@ github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
|||||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||||
|
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||||
|
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||||
|
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
||||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
||||||
@@ -70,20 +80,47 @@ go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mx
|
|||||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||||
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
||||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||||
|
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
|
||||||
|
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
|
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||||
|
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||||
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||||
|
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||||
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||||
|
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||||
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||||
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
|
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||||
|
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||||
|
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
|
||||||
|
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||||
|
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
|
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
|
||||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
||||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
||||||
|
|||||||
7
main.go
7
main.go
@@ -6,12 +6,12 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
|
|
||||||
acl "acl.orly"
|
|
||||||
database "database.orly"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
"next.orly.dev/app"
|
"next.orly.dev/app"
|
||||||
"next.orly.dev/app/config"
|
"next.orly.dev/app/config"
|
||||||
|
acl "next.orly.dev/pkg/acl"
|
||||||
|
database "next.orly.dev/pkg/database"
|
||||||
"next.orly.dev/pkg/version"
|
"next.orly.dev/pkg/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -30,9 +30,10 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
acl.Registry.Active.Store(cfg.ACLMode)
|
acl.Registry.Active.Store(cfg.ACLMode)
|
||||||
if err = acl.Registry.Configure(cfg, db); chk.E(err) {
|
if err = acl.Registry.Configure(cfg, db, ctx); chk.E(err) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
acl.Registry.Syncer()
|
||||||
quit := app.Run(ctx, cfg, db)
|
quit := app.Run(ctx, cfg, db)
|
||||||
sigs := make(chan os.Signal, 1)
|
sigs := make(chan os.Signal, 1)
|
||||||
signal.Notify(sigs, os.Interrupt)
|
signal.Notify(sigs, os.Interrupt)
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package acl
|
package acl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"interfaces.orly/acl"
|
"next.orly.dev/pkg/interfaces/acl"
|
||||||
"utils.orly/atomic"
|
"next.orly.dev/pkg/utils/atomic"
|
||||||
)
|
)
|
||||||
|
|
||||||
var Registry = &S{}
|
var Registry = &S{}
|
||||||
@@ -28,10 +28,10 @@ func (s *S) Configure(cfg ...any) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S) GetAccessLevel(pub []byte) (level string) {
|
func (s *S) GetAccessLevel(pub []byte, address string) (level string) {
|
||||||
for _, i := range s.ACL {
|
for _, i := range s.ACL {
|
||||||
if i.Type() == s.Active.Load() {
|
if i.Type() == s.Active.Load() {
|
||||||
level = i.GetAccessLevel(pub)
|
level = i.GetAccessLevel(pub, address)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -48,6 +48,15 @@ func (s *S) GetACLInfo() (name, description, documentation string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *S) Syncer() {
|
||||||
|
for _, i := range s.ACL {
|
||||||
|
if i.Type() == s.Active.Load() {
|
||||||
|
i.Syncer()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (s *S) Type() (typ string) {
|
func (s *S) Type() (typ string) {
|
||||||
for _, i := range s.ACL {
|
for _, i := range s.ACL {
|
||||||
if i.Type() == s.Active.Load() {
|
if i.Type() == s.Active.Load() {
|
||||||
|
|||||||
@@ -1,30 +1,43 @@
|
|||||||
package acl
|
package acl
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
database "database.orly"
|
"github.com/coder/websocket"
|
||||||
"database.orly/indexes/types"
|
|
||||||
"encoders.orly/bech32encoding"
|
|
||||||
"encoders.orly/event"
|
|
||||||
"encoders.orly/filter"
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"encoders.orly/kind"
|
|
||||||
"encoders.orly/tag"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/errorf"
|
"lol.mleku.dev/errorf"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
"next.orly.dev/app/config"
|
"next.orly.dev/app/config"
|
||||||
utils "utils.orly"
|
database "next.orly.dev/pkg/database"
|
||||||
|
"next.orly.dev/pkg/database/indexes/types"
|
||||||
|
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||||
|
"next.orly.dev/pkg/encoders/event"
|
||||||
|
"next.orly.dev/pkg/encoders/filter"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/encoders/kind"
|
||||||
|
"next.orly.dev/pkg/encoders/tag"
|
||||||
|
utils "next.orly.dev/pkg/utils"
|
||||||
|
"next.orly.dev/pkg/utils/normalize"
|
||||||
|
"next.orly.dev/pkg/utils/values"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Follows struct {
|
type Follows struct {
|
||||||
|
Ctx context.Context
|
||||||
cfg *config.C
|
cfg *config.C
|
||||||
*database.D
|
*database.D
|
||||||
followsMx sync.RWMutex
|
followsMx sync.RWMutex
|
||||||
admins [][]byte
|
admins [][]byte
|
||||||
follows [][]byte
|
follows [][]byte
|
||||||
|
updated chan struct{}
|
||||||
|
subsCancel context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Follows) Configure(cfg ...any) (err error) {
|
func (f *Follows) Configure(cfg ...any) (err error) {
|
||||||
@@ -32,11 +45,14 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
|||||||
for _, ca := range cfg {
|
for _, ca := range cfg {
|
||||||
switch c := ca.(type) {
|
switch c := ca.(type) {
|
||||||
case *config.C:
|
case *config.C:
|
||||||
log.D.F("setting ACL config: %v", c)
|
// log.D.F("setting ACL config: %v", c)
|
||||||
f.cfg = c
|
f.cfg = c
|
||||||
case *database.D:
|
case *database.D:
|
||||||
log.D.F("setting ACL database: %s", c.Path())
|
// log.D.F("setting ACL database: %s", c.Path())
|
||||||
f.D = c
|
f.D = c
|
||||||
|
case context.Context:
|
||||||
|
// log.D.F("setting ACL context: %s", c.Value("id"))
|
||||||
|
f.Ctx = c
|
||||||
default:
|
default:
|
||||||
err = errorf.E("invalid type: %T", reflect.TypeOf(ca))
|
err = errorf.E("invalid type: %T", reflect.TypeOf(ca))
|
||||||
}
|
}
|
||||||
@@ -48,13 +64,15 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
|||||||
// find admin follow lists
|
// find admin follow lists
|
||||||
f.followsMx.Lock()
|
f.followsMx.Lock()
|
||||||
defer f.followsMx.Unlock()
|
defer f.followsMx.Unlock()
|
||||||
log.I.F("finding admins")
|
// log.I.F("finding admins")
|
||||||
f.follows, f.admins = nil, nil
|
f.follows, f.admins = nil, nil
|
||||||
for _, admin := range f.cfg.Admins {
|
for _, admin := range f.cfg.Admins {
|
||||||
log.I.F("%s", admin)
|
// log.I.F("%s", admin)
|
||||||
var adm []byte
|
var adm []byte
|
||||||
if adm, err = bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(err) {
|
if a, e := bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(e) {
|
||||||
continue
|
continue
|
||||||
|
} else {
|
||||||
|
adm = a
|
||||||
}
|
}
|
||||||
log.I.F("admin: %0x", adm)
|
log.I.F("admin: %0x", adm)
|
||||||
f.admins = append(f.admins, adm)
|
f.admins = append(f.admins, adm)
|
||||||
@@ -80,22 +98,29 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
|||||||
if ev, err = f.D.FetchEventBySerial(s); chk.E(err) {
|
if ev, err = f.D.FetchEventBySerial(s); chk.E(err) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.I.F("admin follow list:\n%s", ev.Serialize())
|
// log.I.F("admin follow list:\n%s", ev.Serialize())
|
||||||
for _, v := range ev.Tags.GetAll([]byte("p")) {
|
for _, v := range ev.Tags.GetAll([]byte("p")) {
|
||||||
log.I.F("adding follow: %s", v.Value())
|
// log.I.F("adding follow: %s", v.Value())
|
||||||
var a []byte
|
var a []byte
|
||||||
if a, err = hex.Dec(string(v.Value())); chk.E(err) {
|
if b, e := hex.Dec(string(v.Value())); chk.E(e) {
|
||||||
continue
|
continue
|
||||||
|
} else {
|
||||||
|
a = b
|
||||||
}
|
}
|
||||||
f.follows = append(f.follows, a)
|
f.follows = append(f.follows, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if f.updated == nil {
|
||||||
|
f.updated = make(chan struct{})
|
||||||
|
} else {
|
||||||
|
f.updated <- struct{}{}
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Follows) GetAccessLevel(pub []byte) (level string) {
|
func (f *Follows) GetAccessLevel(pub []byte, address string) (level string) {
|
||||||
if f.cfg == nil {
|
if f.cfg == nil {
|
||||||
return "write"
|
return "write"
|
||||||
}
|
}
|
||||||
@@ -121,6 +146,200 @@ func (f *Follows) GetACLInfo() (name, description, documentation string) {
|
|||||||
|
|
||||||
func (f *Follows) Type() string { return "follows" }
|
func (f *Follows) Type() string { return "follows" }
|
||||||
|
|
||||||
|
func (f *Follows) adminRelays() (urls []string) {
|
||||||
|
f.followsMx.RLock()
|
||||||
|
admins := make([][]byte, len(f.admins))
|
||||||
|
copy(admins, f.admins)
|
||||||
|
f.followsMx.RUnlock()
|
||||||
|
seen := make(map[string]struct{})
|
||||||
|
for _, adm := range admins {
|
||||||
|
fl := &filter.F{
|
||||||
|
Authors: tag.NewFromAny(adm),
|
||||||
|
Kinds: kind.NewS(kind.New(kind.RelayListMetadata.K)),
|
||||||
|
}
|
||||||
|
idxs, err := database.GetIndexesFromFilter(fl)
|
||||||
|
if chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var sers types.Uint40s
|
||||||
|
for _, idx := range idxs {
|
||||||
|
s, err := f.D.GetSerialsByRange(idx)
|
||||||
|
if chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sers = append(sers, s...)
|
||||||
|
}
|
||||||
|
for _, s := range sers {
|
||||||
|
ev, err := f.D.FetchEventBySerial(s)
|
||||||
|
if chk.E(err) || ev == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, v := range ev.Tags.GetAll([]byte("r")) {
|
||||||
|
u := string(v.Value())
|
||||||
|
n := string(normalize.URL(u))
|
||||||
|
if n == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := seen[n]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[n] = struct{}{}
|
||||||
|
urls = append(urls, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||||
|
// build authors list: admins + follows
|
||||||
|
f.followsMx.RLock()
|
||||||
|
authors := make([][]byte, 0, len(f.admins)+len(f.follows))
|
||||||
|
authors = append(authors, f.admins...)
|
||||||
|
authors = append(authors, f.follows...)
|
||||||
|
f.followsMx.RUnlock()
|
||||||
|
if len(authors) == 0 {
|
||||||
|
log.W.F("follows syncer: no authors (admins+follows) to subscribe to")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
urls := f.adminRelays()
|
||||||
|
if len(urls) == 0 {
|
||||||
|
log.W.F("follows syncer: no admin relays found in DB (kind 10002)")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.T.F(
|
||||||
|
"follows syncer: subscribing to %d relays for %d authors", len(urls),
|
||||||
|
len(authors),
|
||||||
|
)
|
||||||
|
for _, u := range urls {
|
||||||
|
u := u
|
||||||
|
go func() {
|
||||||
|
backoff := time.Second
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
c, _, err := websocket.Dial(ctx, u, nil)
|
||||||
|
if err != nil {
|
||||||
|
log.W.F("follows syncer: dial %s failed: %v", u, err)
|
||||||
|
timer := time.NewTimer(backoff)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-timer.C:
|
||||||
|
}
|
||||||
|
if backoff < 30*time.Second {
|
||||||
|
backoff *= 2
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
backoff = time.Second
|
||||||
|
// send REQ
|
||||||
|
ff := &filter.S{}
|
||||||
|
f1 := &filter.F{
|
||||||
|
Authors: tag.NewFromBytesSlice(authors...),
|
||||||
|
Limit: values.ToUintPointer(0),
|
||||||
|
}
|
||||||
|
*ff = append(*ff, f1)
|
||||||
|
req := reqenvelope.NewFrom([]byte("follows-sync"), ff)
|
||||||
|
if err = c.Write(
|
||||||
|
ctx, websocket.MessageText, req.Marshal(nil),
|
||||||
|
); chk.E(err) {
|
||||||
|
_ = c.Close(websocket.StatusInternalError, "write failed")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.T.F("sent REQ to %s for follows subscription", u)
|
||||||
|
// read loop
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
_ = c.Close(websocket.StatusNormalClosure, "ctx done")
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
_, data, err := c.Read(ctx)
|
||||||
|
if err != nil {
|
||||||
|
_ = c.Close(websocket.StatusNormalClosure, "read err")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
label, rem, err := envelopes.Identify(data)
|
||||||
|
if chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch label {
|
||||||
|
case eventenvelope.L:
|
||||||
|
res, _, err := eventenvelope.ParseResult(rem)
|
||||||
|
if chk.E(err) || res == nil || res.Event == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// verify signature before saving
|
||||||
|
if ok, err := res.Event.Verify(); chk.T(err) || !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, _, err = f.D.SaveEvent(
|
||||||
|
ctx, res.Event,
|
||||||
|
); err != nil {
|
||||||
|
if !strings.HasPrefix(
|
||||||
|
err.Error(), "blocked:",
|
||||||
|
) {
|
||||||
|
log.W.F(
|
||||||
|
"follows syncer: save event failed: %v",
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
// ignore duplicates and continue
|
||||||
|
}
|
||||||
|
log.I.F(
|
||||||
|
"saved new event from follows syncer: %0x",
|
||||||
|
res.Event.ID,
|
||||||
|
)
|
||||||
|
case eoseenvelope.L:
|
||||||
|
// ignore, continue subscription
|
||||||
|
default:
|
||||||
|
// ignore other labels
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// loop reconnect
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *Follows) Syncer() {
|
||||||
|
log.I.F("starting follows syncer")
|
||||||
|
go func() {
|
||||||
|
// start immediately if Configure already ran
|
||||||
|
for {
|
||||||
|
var innerCancel context.CancelFunc
|
||||||
|
select {
|
||||||
|
case <-f.Ctx.Done():
|
||||||
|
if f.subsCancel != nil {
|
||||||
|
f.subsCancel()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
case <-f.updated:
|
||||||
|
// close and reopen subscriptions to users on the follow list and admins
|
||||||
|
if f.subsCancel != nil {
|
||||||
|
log.I.F("follows syncer: cancelling existing subscriptions")
|
||||||
|
f.subsCancel()
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithCancel(f.Ctx)
|
||||||
|
f.subsCancel = cancel
|
||||||
|
innerCancel = cancel
|
||||||
|
log.I.F("follows syncer: (re)opening subscriptions")
|
||||||
|
f.startSubscriptions(ctx)
|
||||||
|
}
|
||||||
|
// small sleep to avoid tight loop if updated fires rapidly
|
||||||
|
if innerCancel == nil {
|
||||||
|
time.Sleep(50 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
f.updated <- struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
log.T.F("registering follows ACL")
|
log.T.F("registering follows ACL")
|
||||||
Registry.Register(new(Follows))
|
Registry.Register(new(Follows))
|
||||||
|
|||||||
@@ -1,54 +0,0 @@
|
|||||||
module acl.orly
|
|
||||||
|
|
||||||
go 1.25.0
|
|
||||||
|
|
||||||
replace (
|
|
||||||
acl.orly => ../acl
|
|
||||||
crypto.orly => ../crypto
|
|
||||||
database.orly => ../database
|
|
||||||
encoders.orly => ../encoders
|
|
||||||
interfaces.orly => ../interfaces
|
|
||||||
next.orly.dev => ../../
|
|
||||||
protocol.orly => ../protocol
|
|
||||||
utils.orly => ../utils
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
database.orly v0.0.0-00010101000000-000000000000
|
|
||||||
encoders.orly v0.0.0-00010101000000-000000000000
|
|
||||||
interfaces.orly v0.0.0-00010101000000-000000000000
|
|
||||||
lol.mleku.dev v1.0.2
|
|
||||||
next.orly.dev v0.0.0-00010101000000-000000000000
|
|
||||||
utils.orly v0.0.0-00010101000000-000000000000
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
crypto.orly v0.0.0-00010101000000-000000000000 // indirect
|
|
||||||
github.com/adrg/xdg v0.5.3 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/dgraph-io/badger/v4 v4.8.0 // indirect
|
|
||||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
|
||||||
github.com/go-logr/logr v1.4.3 // indirect
|
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
|
||||||
github.com/templexxx/cpu v0.0.1 // indirect
|
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
|
|
||||||
go-simpler.org/env v0.12.0 // indirect
|
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
|
||||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
|
||||||
go.uber.org/atomic v1.11.0 // indirect
|
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
|
|
||||||
golang.org/x/net v0.41.0 // indirect
|
|
||||||
golang.org/x/sys v0.35.0 // indirect
|
|
||||||
google.golang.org/protobuf v1.36.6 // indirect
|
|
||||||
lukechampine.com/frand v1.5.1 // indirect
|
|
||||||
)
|
|
||||||
@@ -1,68 +0,0 @@
|
|||||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
|
||||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
|
||||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
|
|
||||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
|
|
||||||
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
|
|
||||||
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
|
|
||||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
|
||||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
|
||||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
|
||||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
|
||||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
|
||||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
|
||||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
|
|
||||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
|
|
||||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
|
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
|
||||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
|
||||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
|
||||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
|
||||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
|
||||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
|
||||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
|
||||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
|
||||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
|
||||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
|
||||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
|
||||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
|
||||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
|
||||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
|
||||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
|
|
||||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
|
||||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
|
||||||
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=
|
|
||||||
@@ -8,7 +8,7 @@ type None struct{}
|
|||||||
|
|
||||||
func (n None) Configure(cfg ...any) (err error) { return }
|
func (n None) Configure(cfg ...any) (err error) { return }
|
||||||
|
|
||||||
func (n None) GetAccessLevel(pub []byte) (level string) {
|
func (n None) GetAccessLevel(pub []byte, address string) (level string) {
|
||||||
return "write"
|
return "write"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -20,6 +20,8 @@ func (n None) Type() string {
|
|||||||
return "none"
|
return "none"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n None) Syncer() {}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
log.T.F("registering none ACL")
|
log.T.F("registering none ACL")
|
||||||
Registry.Register(new(None))
|
Registry.Register(new(None))
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec/base58"
|
"next.orly.dev/pkg/crypto/ec/base58"
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
var stringTests = []struct {
|
var stringTests = []struct {
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec/base58"
|
"next.orly.dev/pkg/crypto/ec/base58"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ package base58
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
|
||||||
"crypto.orly/sha256"
|
"next.orly.dev/pkg/crypto/sha256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrChecksum indicates that the checksum of a check-encoded string does not verify against
|
// ErrChecksum indicates that the checksum of a check-encoded string does not verify against
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ package base58_test
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec/base58"
|
"next.orly.dev/pkg/crypto/ec/base58"
|
||||||
)
|
)
|
||||||
|
|
||||||
var checkEncodingStringTests = []struct {
|
var checkEncodingStringTests = []struct {
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ package base58_test
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/ec/base58"
|
"next.orly.dev/pkg/crypto/ec/base58"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This example demonstrates how to decode modified base58 encoded data.
|
// This example demonstrates how to decode modified base58 encoded data.
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestBech32 tests whether decoding and re-encoding the valid BIP-173 test
|
// TestBech32 tests whether decoding and re-encoding the valid BIP-173 test
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
// setHex decodes the passed big-endian hex string into the internal field value
|
// setHex decodes the passed big-endian hex string into the internal field value
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ package btcec
|
|||||||
// reverse the transform than to operate in affine coordinates.
|
// reverse the transform than to operate in affine coordinates.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// KoblitzCurve provides an implementation for secp256k1 that fits the ECC
|
// KoblitzCurve provides an implementation for secp256k1 that fits the ECC
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"crypto.orly/ec/wire"
|
"next.orly.dev/pkg/crypto/ec/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ package chaincfg
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"crypto.orly/ec/chainhash"
|
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||||
"crypto.orly/ec/wire"
|
"next.orly.dev/pkg/crypto/ec/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -5,8 +5,8 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"crypto.orly/ec/chainhash"
|
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||||
"crypto.orly/ec/wire"
|
"next.orly.dev/pkg/crypto/ec/wire"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/sha256"
|
"next.orly.dev/pkg/crypto/sha256"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ package chainhash
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// mainNetGenesisHash is the hash of the first block in the block chain for the
|
// mainNetGenesisHash is the hash of the first block in the block chain for the
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
package chainhash
|
package chainhash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/sha256"
|
"next.orly.dev/pkg/crypto/sha256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// HashB calculates hash(b) and returns the resulting bytes.
|
// HashB calculates hash(b) and returns the resulting bytes.
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
package btcec
|
package btcec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GenerateSharedSecret generates a shared secret based on a secret key and a
|
// GenerateSharedSecret generates a shared secret based on a secret key and a
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ package btcec
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGenerateSharedSecret(t *testing.T) {
|
func TestGenerateSharedSecret(t *testing.T) {
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ package btcec
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// JacobianPoint is an element of the group formed by the secp256k1 curve in
|
// JacobianPoint is an element of the group formed by the secp256k1 curve in
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ package ecdsa
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
// hexToModNScalar converts the passed hex string into a ModNScalar and will
|
// hexToModNScalar converts the passed hex string into a ModNScalar and will
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ package ecdsa
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// References:
|
// References:
|
||||||
|
|||||||
@@ -14,10 +14,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"crypto.orly/ec/secp256k1"
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
package btcec
|
package btcec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Error identifies an error related to public key cryptography using a
|
// Error identifies an error related to public key cryptography using a
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package btcec
|
package btcec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FieldVal implements optimized fixed-precision arithmetic over the secp256k1
|
// FieldVal implements optimized fixed-precision arithmetic over the secp256k1
|
||||||
|
|||||||
@@ -9,8 +9,8 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIsZero ensures that checking if a field IsZero works as expected.
|
// TestIsZero ensures that checking if a field IsZero works as expected.
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ package btcec
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
func FuzzParsePubKey(f *testing.F) {
|
func FuzzParsePubKey(f *testing.F) {
|
||||||
|
|||||||
@@ -4,7 +4,7 @@
|
|||||||
package btcec
|
package btcec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ModNScalar implements optimized 256-bit constant-time fixed-precision
|
// ModNScalar implements optimized 256-bit constant-time fixed-precision
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
"crypto.orly/ec/schnorr"
|
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -5,9 +5,9 @@ package musig2
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
|
||||||
"crypto.orly/ec/schnorr"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -7,12 +7,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
"crypto.orly/ec/chainhash"
|
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||||
"crypto.orly/ec/schnorr"
|
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -10,10 +10,10 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
"crypto.orly/ec/schnorr"
|
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
"crypto.orly/sha256"
|
"next.orly.dev/pkg/crypto/sha256"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -9,10 +9,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
|
||||||
"crypto.orly/ec/chainhash"
|
|
||||||
"crypto.orly/ec/schnorr"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -9,9 +9,9 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type nonceGenTestCase struct {
|
type nonceGenTestCase struct {
|
||||||
|
|||||||
@@ -7,12 +7,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
|
||||||
"crypto.orly/ec/chainhash"
|
|
||||||
"crypto.orly/ec/schnorr"
|
|
||||||
"crypto.orly/ec/secp256k1"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
|
||||||
"crypto.orly/ec/secp256k1"
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
package btcec
|
package btcec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// These constants define the lengths of serialized public keys.
|
// These constants define the lengths of serialized public keys.
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ package btcec
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -9,10 +9,10 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
"crypto.orly/sha256"
|
"next.orly.dev/pkg/crypto/sha256"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ package schnorr
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// These constants define the lengths of serialized public keys.
|
// These constants define the lengths of serialized public keys.
|
||||||
|
|||||||
@@ -5,10 +5,10 @@ package schnorr
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
|
||||||
"crypto.orly/ec/chainhash"
|
|
||||||
"crypto.orly/ec/secp256k1"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -11,10 +11,10 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"testing/quick"
|
"testing/quick"
|
||||||
|
|
||||||
"crypto.orly/ec"
|
|
||||||
"crypto.orly/ec/secp256k1"
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
|
"next.orly.dev/pkg/crypto/ec"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
|
||||||
"github.com/davecgh/go-spew/spew"
|
"github.com/davecgh/go-spew/spew"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
package btcec
|
package btcec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SecretKey wraps an ecdsa.SecretKey as a convenience mainly for signing things with the secret key without having to
|
// SecretKey wraps an ecdsa.SecretKey as a convenience mainly for signing things with the secret key without having to
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ package secp256k1
|
|||||||
import (
|
import (
|
||||||
"math/bits"
|
"math/bits"
|
||||||
|
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
// References:
|
// References:
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ package secp256k1
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGenerateSharedSecret(t *testing.T) {
|
func TestGenerateSharedSecret(t *testing.T) {
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/ec/secp256k1"
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
"crypto.orly/sha256"
|
"next.orly.dev/pkg/crypto/sha256"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This example demonstrates use of GenerateSharedSecret to encrypt a message
|
// This example demonstrates use of GenerateSharedSecret to encrypt a message
|
||||||
|
|||||||
@@ -52,7 +52,7 @@ package secp256k1
|
|||||||
// ordinarily would. See the documentation for FieldVal for more details.
|
// ordinarily would. See the documentation for FieldVal for more details.
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Constants used to make the code more readable.
|
// Constants used to make the code more readable.
|
||||||
|
|||||||
@@ -14,9 +14,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetHex decodes the passed big-endian hex string into the internal field value
|
// SetHex decodes the passed big-endian hex string into the internal field value
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ package secp256k1
|
|||||||
import (
|
import (
|
||||||
"math/big"
|
"math/big"
|
||||||
|
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
)
|
)
|
||||||
|
|
||||||
// References:
|
// References:
|
||||||
|
|||||||
@@ -12,9 +12,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"encoders.orly/hex"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetHex interprets the provided hex string as a 256-bit big-endian unsigned
|
// SetHex interprets the provided hex string as a 256-bit big-endian unsigned
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"hash"
|
"hash"
|
||||||
|
|
||||||
"crypto.orly/sha256"
|
"next.orly.dev/pkg/crypto/sha256"
|
||||||
)
|
)
|
||||||
|
|
||||||
// References:
|
// References:
|
||||||
|
|||||||
@@ -8,9 +8,9 @@ package secp256k1
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"crypto.orly/sha256"
|
"next.orly.dev/pkg/crypto/sha256"
|
||||||
"encoders.orly/hex"
|
"next.orly.dev/pkg/encoders/hex"
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||||
|
|||||||
@@ -13,9 +13,9 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"crypto.orly/ec/secp256k1"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// curveParams houses the secp256k1 curve parameters for convenient access.
|
// curveParams houses the secp256k1 curve parameters for convenient access.
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestParsePubKey ensures that public keys are properly parsed according
|
// TestParsePubKey ensures that public keys are properly parsed according
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestGenerateSecretKey ensures the key generation works as expected.
|
// TestGenerateSecretKey ensures the key generation works as expected.
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"crypto.orly/ec/bech32"
|
|
||||||
"crypto.orly/ec/chaincfg"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"utils.orly"
|
"next.orly.dev/pkg/crypto/ec/bech32"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/chaincfg"
|
||||||
|
"next.orly.dev/pkg/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AddressSegWit is the base address type for all SegWit addresses.
|
// AddressSegWit is the base address type for all SegWit addresses.
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package wire
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"crypto.orly/ec/chainhash"
|
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// BlockHeader defines information about a block and is used in the bitcoin
|
// BlockHeader defines information about a block and is used in the bitcoin
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
package wire
|
package wire
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/chainhash"
|
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OutPoint defines a bitcoin data type that is used to track previous
|
// OutPoint defines a bitcoin data type that is used to track previous
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
module crypto.orly
|
|
||||||
|
|
||||||
go 1.25.0
|
|
||||||
|
|
||||||
require (
|
|
||||||
encoders.orly v0.0.0-00010101000000-000000000000
|
|
||||||
github.com/davecgh/go-spew v1.1.1
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0
|
|
||||||
github.com/stretchr/testify v1.11.1
|
|
||||||
interfaces.orly v0.0.0-00010101000000-000000000000
|
|
||||||
lol.mleku.dev v1.0.2
|
|
||||||
utils.orly v0.0.0-00010101000000-000000000000
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/fatih/color v1.18.0 // indirect
|
|
||||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
|
||||||
github.com/templexxx/cpu v0.0.1 // indirect
|
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
|
|
||||||
golang.org/x/sys v0.35.0 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
)
|
|
||||||
|
|
||||||
replace (
|
|
||||||
acl.orly => ../acl
|
|
||||||
crypto.orly => ../crypto
|
|
||||||
database.orly => ../database
|
|
||||||
encoders.orly => ../encoders
|
|
||||||
interfaces.orly => ../interfaces
|
|
||||||
next.orly.dev => ../../
|
|
||||||
protocol.orly => ../protocol
|
|
||||||
utils.orly => ../utils
|
|
||||||
)
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
|
||||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
|
||||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
|
||||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
|
||||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
|
||||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
|
||||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
|
||||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
|
||||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
|
||||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
|
||||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
|
||||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
|
||||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
|
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
|
||||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|
||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
|
||||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
|
|
||||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
|
||||||
@@ -3,8 +3,8 @@
|
|||||||
package p256k
|
package p256k
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/p256k/btcec"
|
|
||||||
"lol.mleku.dev/log"
|
"lol.mleku.dev/log"
|
||||||
|
"next.orly.dev/pkg/crypto/p256k/btcec"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|||||||
@@ -4,18 +4,18 @@
|
|||||||
package btcec
|
package btcec
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto.orly/ec/schnorr"
|
|
||||||
"crypto.orly/ec/secp256k1"
|
|
||||||
"interfaces.orly/signer"
|
|
||||||
"lol.mleku.dev/chk"
|
"lol.mleku.dev/chk"
|
||||||
"lol.mleku.dev/errorf"
|
"lol.mleku.dev/errorf"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||||
|
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
|
"next.orly.dev/pkg/interfaces/signer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Signer is an implementation of signer.I that uses the btcec library.
|
// Signer is an implementation of signer.I that uses the btcec library.
|
||||||
type Signer struct {
|
type Signer struct {
|
||||||
SecretKey *secp256k1.SecretKey
|
SecretKey *secp256k1.SecretKey
|
||||||
PublicKey *secp256k1.PublicKey
|
PublicKey *secp256k1.PublicKey
|
||||||
BTCECSec *ec.SecretKey
|
BTCECSec *secp256k1.SecretKey
|
||||||
pkb, skb []byte
|
pkb, skb []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -23,11 +23,11 @@ var _ signer.I = &Signer{}
|
|||||||
|
|
||||||
// Generate creates a new Signer.
|
// Generate creates a new Signer.
|
||||||
func (s *Signer) Generate() (err error) {
|
func (s *Signer) Generate() (err error) {
|
||||||
if s.SecretKey, err = ec.NewSecretKey(); chk.E(err) {
|
if s.SecretKey, err = secp256k1.GenerateSecretKey(); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.skb = s.SecretKey.Serialize()
|
s.skb = s.SecretKey.Serialize()
|
||||||
s.BTCECSec, _ = ec.PrivKeyFromBytes(s.skb)
|
s.BTCECSec = secp256k1.PrivKeyFromBytes(s.skb)
|
||||||
s.PublicKey = s.SecretKey.PubKey()
|
s.PublicKey = s.SecretKey.PubKey()
|
||||||
s.pkb = schnorr.SerializePubKey(s.PublicKey)
|
s.pkb = schnorr.SerializePubKey(s.PublicKey)
|
||||||
return
|
return
|
||||||
@@ -43,7 +43,7 @@ func (s *Signer) InitSec(sec []byte) (err error) {
|
|||||||
s.SecretKey = secp256k1.SecKeyFromBytes(sec)
|
s.SecretKey = secp256k1.SecKeyFromBytes(sec)
|
||||||
s.PublicKey = s.SecretKey.PubKey()
|
s.PublicKey = s.SecretKey.PubKey()
|
||||||
s.pkb = schnorr.SerializePubKey(s.PublicKey)
|
s.pkb = schnorr.SerializePubKey(s.PublicKey)
|
||||||
s.BTCECSec, _ = ec.PrivKeyFromBytes(s.skb)
|
s.BTCECSec = secp256k1.PrivKeyFromBytes(s.skb)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -142,7 +142,7 @@ func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) {
|
|||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
secret = ec.GenerateSharedSecret(s.BTCECSec, pub)
|
secret = secp256k1.GenerateSharedSecret(s.BTCECSec, pub)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,7 +154,7 @@ type Keygen struct {
|
|||||||
// Generate a new key pair. If the result is suitable, the embedded Signer can have its contents
|
// Generate a new key pair. If the result is suitable, the embedded Signer can have its contents
|
||||||
// extracted.
|
// extracted.
|
||||||
func (k *Keygen) Generate() (pubBytes []byte, err error) {
|
func (k *Keygen) Generate() (pubBytes []byte, err error) {
|
||||||
if k.Signer.SecretKey, err = ec.NewSecretKey(); chk.E(err) {
|
if k.Signer.SecretKey, err = secp256k1.GenerateSecretKey(); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
k.Signer.PublicKey = k.SecretKey.PubKey()
|
k.Signer.PublicKey = k.SecretKey.PubKey()
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user