forked from mleku/next.orly.dev
Compare commits
14 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
a9893a0918
|
|||
|
8290e1ae0e
|
|||
|
fc546ddc0b
|
|||
|
c45276ef08
|
|||
|
fefa4d202e
|
|||
|
bf062a4a46
|
|||
|
246591b60b
|
|||
|
098595717f
|
|||
|
bc1527e6cf
|
|||
|
45c31795e7
|
|||
|
3ec2f60e0b
|
|||
|
110223fc4e
|
|||
|
2dd119401b
|
|||
|
6e06905773
|
@@ -92,4 +92,6 @@ A good typical example:
|
||||
|
||||
use the source of the relay-tester to help guide what expectations the test has,
|
||||
and use context7 for information about the nostr protocol, and use additional
|
||||
log statements to help locate the cause of bugs
|
||||
log statements to help locate the cause of bugs
|
||||
|
||||
always use Go v1.25.1 for everything involving Go
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -29,7 +29,8 @@ node_modules/**
|
||||
# and others
|
||||
/go.work.sum
|
||||
/secp256k1/
|
||||
|
||||
cmd/benchmark/external
|
||||
cmd/benchmark/data
|
||||
# But not these files...
|
||||
!/.gitignore
|
||||
!*.go
|
||||
@@ -87,6 +88,9 @@ node_modules/**
|
||||
!.gitignore
|
||||
!version
|
||||
!out.jsonl
|
||||
!Dockerfile*
|
||||
!strfry.conf
|
||||
!config.toml
|
||||
# ...even if they are in subdirectories
|
||||
!*/
|
||||
/blocklist.json
|
||||
@@ -108,3 +112,4 @@ pkg/database/testrealy
|
||||
/.idea/inspectionProfiles/Project_Default.xml
|
||||
/.idea/.name
|
||||
/ctxproxy.config.yml
|
||||
cmd/benchmark/external/**
|
||||
@@ -27,6 +27,7 @@ type C struct {
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
HealthPort int `env:"ORLY_HEALTH_PORT" default:"0" usage:"optional health check HTTP port; 0 disables"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
|
||||
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
||||
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
|
||||
@@ -34,7 +35,7 @@ type C struct {
|
||||
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
|
||||
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
|
||||
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"follows"`
|
||||
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
|
||||
}
|
||||
|
||||
// New creates and initializes a new configuration object for the relay
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/envelopes/okenvelope"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"protocol.orly/auth"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/protocol/auth"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleAuth(b []byte) (err error) {
|
||||
|
||||
@@ -3,9 +3,9 @@ package app
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"encoders.orly/envelopes/closeenvelope"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||
)
|
||||
|
||||
// HandleClose processes a CLOSE envelope by unmarshalling the request,
|
||||
|
||||
@@ -3,18 +3,18 @@ package app
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"database.orly/indexes/types"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/event"
|
||||
"encoders.orly/filter"
|
||||
"encoders.orly/hex"
|
||||
"encoders.orly/ints"
|
||||
"encoders.orly/kind"
|
||||
"encoders.orly/tag"
|
||||
"encoders.orly/tag/atag"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
utils "utils.orly"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/ints"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/tag/atag"
|
||||
utils "next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func (l *Listener) GetSerialsFromFilter(f *filter.F) (
|
||||
|
||||
@@ -6,15 +6,15 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
acl "acl.orly"
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/okenvelope"
|
||||
"encoders.orly/kind"
|
||||
"encoders.orly/reason"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
utils "utils.orly"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/reason"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
@@ -151,7 +151,7 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
return
|
||||
}
|
||||
// Deliver the event to subscribers immediately after sending OK response
|
||||
l.publishers.Deliver(env.E)
|
||||
go l.publishers.Deliver(env.E)
|
||||
log.D.F("saved event %0x", env.E.ID)
|
||||
var isNewFromAdmin bool
|
||||
for _, admin := range l.Admins {
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoders.orly/envelopes"
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/envelopes/closeenvelope"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/noticeenvelope"
|
||||
"encoders.orly/envelopes/reqenvelope"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/noticeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/protocol/relayinfo"
|
||||
"next.orly.dev/pkg/version"
|
||||
"protocol.orly/relayinfo"
|
||||
)
|
||||
|
||||
// HandleRelayInfo generates and returns a relay information document in JSON
|
||||
|
||||
@@ -6,25 +6,25 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
acl "acl.orly"
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/envelopes/closedenvelope"
|
||||
"encoders.orly/envelopes/eoseenvelope"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/okenvelope"
|
||||
"encoders.orly/envelopes/reqenvelope"
|
||||
"encoders.orly/event"
|
||||
"encoders.orly/filter"
|
||||
"encoders.orly/hex"
|
||||
"encoders.orly/kind"
|
||||
"encoders.orly/reason"
|
||||
"encoders.orly/tag"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
utils "utils.orly"
|
||||
"utils.orly/normalize"
|
||||
"utils.orly/pointers"
|
||||
acl "next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closedenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/reason"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
utils "next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/normalize"
|
||||
"next.orly.dev/pkg/utils/pointers"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
@@ -114,33 +114,56 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Use a separate context for QueryEvents to prevent cancellation issues
|
||||
queryCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
log.T.F("HandleReq: About to QueryEvents for %s, main context done: %v", l.remote, l.ctx.Err() != nil)
|
||||
if events, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||
if errors.Is(err, badger.ErrDBClosed) {
|
||||
return
|
||||
}
|
||||
log.T.F("HandleReq: QueryEvents error for %s: %v", l.remote, err)
|
||||
err = nil
|
||||
}
|
||||
log.T.F("HandleReq: QueryEvents completed for %s, found %d events", l.remote, len(events))
|
||||
// Use a separate context for QueryEvents to prevent cancellation issues
|
||||
queryCtx, cancel := context.WithTimeout(
|
||||
context.Background(), 30*time.Second,
|
||||
)
|
||||
defer cancel()
|
||||
log.T.F(
|
||||
"HandleReq: About to QueryEvents for %s, main context done: %v",
|
||||
l.remote, l.ctx.Err() != nil,
|
||||
)
|
||||
if events, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||
if errors.Is(err, badger.ErrDBClosed) {
|
||||
return
|
||||
}
|
||||
log.T.F("HandleReq: QueryEvents error for %s: %v", l.remote, err)
|
||||
err = nil
|
||||
}
|
||||
defer func() {
|
||||
for _, ev := range events {
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
log.T.F(
|
||||
"HandleReq: QueryEvents completed for %s, found %d events",
|
||||
l.remote, len(events),
|
||||
)
|
||||
}
|
||||
var tmp event.S
|
||||
privCheck:
|
||||
for _, ev := range events {
|
||||
if kind.IsPrivileged(ev.Kind) &&
|
||||
accessLevel != "admin" { // admins can see all events
|
||||
log.I.F("checking privileged event %s", ev.ID)
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"checking privileged event %0x", ev.ID,
|
||||
)
|
||||
},
|
||||
)
|
||||
pk := l.authedPubkey.Load()
|
||||
if pk == nil {
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(ev.Pubkey, pk) {
|
||||
log.I.F(
|
||||
"privileged event %s is for logged in pubkey %0x", ev.ID,
|
||||
pk,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s is for logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
tmp = append(tmp, ev)
|
||||
continue
|
||||
@@ -152,17 +175,25 @@ privCheck:
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(pt, pk) {
|
||||
log.I.F(
|
||||
"privileged event %s is for logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s is for logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
tmp = append(tmp, ev)
|
||||
continue privCheck
|
||||
}
|
||||
}
|
||||
log.W.F(
|
||||
"privileged event %s does not contain the logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s does not contain the logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
} else {
|
||||
tmp = append(tmp, ev)
|
||||
@@ -171,9 +202,13 @@ privCheck:
|
||||
events = tmp
|
||||
seen := make(map[string]struct{})
|
||||
for _, ev := range events {
|
||||
log.T.F(
|
||||
"REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||
hex.Enc(ev.ID), ev.Kind,
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||
hex.Enc(ev.ID), ev.Kind,
|
||||
)
|
||||
},
|
||||
)
|
||||
log.T.C(
|
||||
func() string {
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/hex"
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"utils.orly/units"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/utils/units"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"utils.orly/atomic"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
type Listener struct {
|
||||
|
||||
11
app/main.go
11
app/main.go
@@ -5,12 +5,13 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
database "database.orly"
|
||||
"encoders.orly/bech32encoding"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
"protocol.orly/publish"
|
||||
acl "next.orly.dev/pkg/acl"
|
||||
database "next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
)
|
||||
|
||||
func Run(
|
||||
@@ -45,6 +46,10 @@ func Run(
|
||||
publishers: publish.New(NewPublisher(ctx)),
|
||||
Admins: adminKeys,
|
||||
}
|
||||
// provide publisher to ACL so background sync can dispatch events
|
||||
if err := acl.Registry.Configure(cfg, db, ctx, l.publishers); chk.E(err) {
|
||||
// if configuration fails, proceed but log; ACL might be 'none'
|
||||
}
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Listen, cfg.Port)
|
||||
log.I.F("starting listener on http://%s", addr)
|
||||
go func() {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/okenvelope"
|
||||
"encoders.orly/reason"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/encoders/reason"
|
||||
)
|
||||
|
||||
// OK represents a function that processes events or operations, using provided
|
||||
|
||||
@@ -5,17 +5,17 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/event"
|
||||
"encoders.orly/filter"
|
||||
"encoders.orly/hex"
|
||||
"encoders.orly/kind"
|
||||
"github.com/coder/websocket"
|
||||
"interfaces.orly/publisher"
|
||||
"interfaces.orly/typer"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
utils "utils.orly"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/interfaces/publisher"
|
||||
"next.orly.dev/pkg/interfaces/typer"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
const Type = "socketapi"
|
||||
@@ -223,13 +223,13 @@ func (p *P) Deliver(ev *event.E) {
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the publisher context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
context.Background(), WriteTimeout,
|
||||
context.Background(), DefaultWriteTimeout,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
if err = d.w.Write(
|
||||
writeCtx, websocket.MessageText, res.Marshal(nil),
|
||||
); chk.E(err) {
|
||||
); err != nil {
|
||||
// On error, remove the subscriber connection safely
|
||||
p.removeSubscriber(d.w)
|
||||
_ = d.w.CloseNow()
|
||||
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"database.orly"
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/app/config"
|
||||
"protocol.orly/publish"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
|
||||
46
cmd/benchmark/Dockerfile.benchmark
Normal file
46
cmd/benchmark/Dockerfile.benchmark
Normal file
@@ -0,0 +1,46 @@
|
||||
# Dockerfile for benchmark runner
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the benchmark tool
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o benchmark cmd/benchmark/main.go
|
||||
|
||||
# Final stage
|
||||
FROM alpine:latest
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk --no-cache add ca-certificates curl wget
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy benchmark binary
|
||||
COPY --from=builder /build/benchmark /app/benchmark
|
||||
|
||||
# Copy benchmark runner script
|
||||
COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /app/benchmark-runner
|
||||
|
||||
# Create reports directory
|
||||
RUN mkdir -p /reports
|
||||
|
||||
# Environment variables
|
||||
ENV BENCHMARK_EVENTS=10000
|
||||
ENV BENCHMARK_WORKERS=8
|
||||
ENV BENCHMARK_DURATION=60s
|
||||
|
||||
# Run the benchmark runner
|
||||
CMD ["/app/benchmark-runner"]
|
||||
22
cmd/benchmark/Dockerfile.khatru-badger
Normal file
22
cmd/benchmark/Dockerfile.khatru-badger
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-badger example
|
||||
RUN cd examples/basic-badger && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=0 go build -o khatru-badger .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 3334
|
||||
ENV DATABASE_PATH=/data/badger
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:3334 || exit 1
|
||||
CMD ["/app/khatru-badger"]
|
||||
22
cmd/benchmark/Dockerfile.khatru-sqlite
Normal file
22
cmd/benchmark/Dockerfile.khatru-sqlite
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-sqlite3 example
|
||||
RUN cd examples/basic-sqlite3 && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o khatru-sqlite .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-sqlite3/khatru-sqlite /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 3334
|
||||
ENV DATABASE_PATH=/data/khatru.db
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:3334 || exit 1
|
||||
CMD ["/app/khatru-sqlite"]
|
||||
80
cmd/benchmark/Dockerfile.next-orly
Normal file
80
cmd/benchmark/Dockerfile.next-orly
Normal file
@@ -0,0 +1,80 @@
|
||||
# Dockerfile for next.orly.dev relay
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
# Set environment variables
|
||||
ARG GOLANG_VERSION=1.22.5
|
||||
|
||||
# Update package list and install dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y wget ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Download Go binary
|
||||
RUN wget https://go.dev/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz && \
|
||||
rm -rf /usr/local/go && \
|
||||
tar -C /usr/local -xzf go${GOLANG_VERSION}.linux-amd64.tar.gz && \
|
||||
rm go${GOLANG_VERSION}.linux-amd64.tar.gz
|
||||
|
||||
# Set PATH environment variable
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
# Verify installation
|
||||
RUN go version
|
||||
|
||||
RUN apt update && \
|
||||
apt -y install build-essential autoconf libtool git wget
|
||||
RUN cd /tmp && \
|
||||
rm -rf secp256k1 && \
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git && \
|
||||
cd secp256k1 && \
|
||||
git checkout v0.6.0 && \
|
||||
git submodule init && \
|
||||
git submodule update && \
|
||||
./autogen.sh && \
|
||||
./configure --enable-module-schnorrsig --enable-module-ecdh --prefix=/usr && \
|
||||
make -j1 && \
|
||||
make install
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -o relay .
|
||||
|
||||
# Final stage
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y ca-certificates curl libsecp256k1-0 libsecp256k1-dev && rm -rf /var/lib/apt/lists/* && \
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/libsecp256k1.so.0 /usr/lib/x86_64-linux-gnu/libsecp256k1.so.5
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /build/relay /app/relay
|
||||
|
||||
# Create data directory
|
||||
RUN mkdir -p /data
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8080
|
||||
|
||||
# Set environment variables
|
||||
ENV ORLY_DATA_DIR=/data
|
||||
ENV ORLY_LISTEN=0.0.0.0
|
||||
ENV ORLY_PORT=8080
|
||||
ENV ORLY_LOG_LEVEL=info
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD bash -lc "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8080 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
|
||||
|
||||
# Run the relay
|
||||
CMD ["/app/relay"]
|
||||
23
cmd/benchmark/Dockerfile.nostr-rs-relay
Normal file
23
cmd/benchmark/Dockerfile.nostr-rs-relay
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM rust:1.81-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache musl-dev sqlite-dev build-base bash perl protobuf
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN cargo build --release
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/target/release/nostr-rs-relay /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV RUST_LOG=info
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/nostr-rs-relay"]
|
||||
23
cmd/benchmark/Dockerfile.relayer-basic
Normal file
23
cmd/benchmark/Dockerfile.relayer-basic
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic example
|
||||
RUN cd examples/basic && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o relayer-basic .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic/relayer-basic /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 7447
|
||||
ENV DATABASE_PATH=/data/relayer.db
|
||||
# PORT env is not used by relayer-basic; it always binds to 7447 in code.
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:7447 || exit 1
|
||||
CMD ["/app/relayer-basic"]
|
||||
44
cmd/benchmark/Dockerfile.strfry
Normal file
44
cmd/benchmark/Dockerfile.strfry
Normal file
@@ -0,0 +1,44 @@
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
build-essential \
|
||||
liblmdb-dev \
|
||||
libsecp256k1-dev \
|
||||
pkg-config \
|
||||
libtool \
|
||||
autoconf \
|
||||
automake \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Fetch strfry source with submodules to ensure golpe is present
|
||||
RUN git clone --recurse-submodules https://github.com/hoytech/strfry .
|
||||
|
||||
# Build strfry
|
||||
RUN make setup-golpe && \
|
||||
make -j$(nproc)
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y \
|
||||
liblmdb0 \
|
||||
libsecp256k1-0 \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/strfry /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
ENV STRFRY_RELAY_PORT=8080
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/strfry", "relay"]
|
||||
260
cmd/benchmark/README.md
Normal file
260
cmd/benchmark/README.md
Normal file
@@ -0,0 +1,260 @@
|
||||
# Nostr Relay Benchmark Suite
|
||||
|
||||
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including:
|
||||
|
||||
- **next.orly.dev** (this repository) - BadgerDB-based relay
|
||||
- **Khatru** - SQLite and Badger variants
|
||||
- **Relayer** - Basic example implementation
|
||||
- **Strfry** - C++ LMDB-based relay
|
||||
- **nostr-rs-relay** - Rust-based relay with SQLite
|
||||
|
||||
## Features
|
||||
|
||||
### Benchmark Tests
|
||||
|
||||
1. **Peak Throughput Test**
|
||||
- Tests maximum event ingestion rate
|
||||
- Concurrent workers pushing events as fast as possible
|
||||
- Measures events/second, latency distribution, success rate
|
||||
|
||||
2. **Burst Pattern Test**
|
||||
- Simulates real-world traffic patterns
|
||||
- Alternating high-activity bursts and quiet periods
|
||||
- Tests relay behavior under varying loads
|
||||
|
||||
3. **Mixed Read/Write Test**
|
||||
- Concurrent read and write operations
|
||||
- Tests query performance while events are being ingested
|
||||
- Measures combined throughput and latency
|
||||
|
||||
### Performance Metrics
|
||||
|
||||
- **Throughput**: Events processed per second
|
||||
- **Latency**: Average, P95, and P99 response times
|
||||
- **Success Rate**: Percentage of successful operations
|
||||
- **Memory Usage**: Peak memory consumption during tests
|
||||
- **Error Analysis**: Detailed error reporting and categorization
|
||||
|
||||
### Reporting
|
||||
|
||||
- Individual relay reports with detailed metrics
|
||||
- Aggregate comparison report across all relays
|
||||
- Comparison tables for easy performance analysis
|
||||
- Timestamped results for tracking improvements over time
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Setup External Relays
|
||||
|
||||
Run the setup script to download and configure all external relay repositories:
|
||||
|
||||
```bash
|
||||
cd cmd/benchmark
|
||||
./setup-external-relays.sh
|
||||
```
|
||||
|
||||
This will:
|
||||
- Clone all external relay repositories
|
||||
- Create Docker configurations for each relay
|
||||
- Set up configuration files
|
||||
- Create data and report directories
|
||||
|
||||
### 2. Run Benchmarks
|
||||
|
||||
Start all relays and run the benchmark suite:
|
||||
|
||||
```bash
|
||||
docker compose up --build
|
||||
```
|
||||
|
||||
The system will:
|
||||
- Build and start all relay containers
|
||||
- Wait for all relays to become healthy
|
||||
- Run benchmarks against each relay sequentially
|
||||
- Generate individual and aggregate reports
|
||||
|
||||
### 3. View Results
|
||||
|
||||
Results are stored in the `reports/` directory with timestamps:
|
||||
|
||||
```bash
|
||||
# View the aggregate report
|
||||
cat reports/run_YYYYMMDD_HHMMSS/aggregate_report.txt
|
||||
|
||||
# View individual relay results
|
||||
ls reports/run_YYYYMMDD_HHMMSS/
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Docker Compose Services
|
||||
|
||||
| Service | Port | Description |
|
||||
|---------|------|-------------|
|
||||
| next-orly | 8001 | This repository's BadgerDB relay |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
| relayer-basic | 8004 | Basic relayer example |
|
||||
| strfry | 8005 | Strfry C++ LMDB relay |
|
||||
| nostr-rs-relay | 8006 | Rust SQLite relay |
|
||||
| benchmark-runner | - | Orchestrates tests and aggregates results |
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
cmd/benchmark/
|
||||
├── main.go # Benchmark tool implementation
|
||||
├── docker-compose.yml # Service orchestration
|
||||
├── setup-external-relays.sh # Repository setup script
|
||||
├── benchmark-runner.sh # Test orchestration script
|
||||
├── Dockerfile.next-orly # This repo's relay container
|
||||
├── Dockerfile.benchmark # Benchmark runner container
|
||||
├── Dockerfile.khatru-sqlite # Khatru SQLite variant
|
||||
├── Dockerfile.khatru-badger # Khatru Badger variant
|
||||
├── Dockerfile.relayer-basic # Relayer basic example
|
||||
├── Dockerfile.strfry # Strfry relay
|
||||
├── Dockerfile.nostr-rs-relay # Rust relay
|
||||
├── configs/
|
||||
│ ├── strfry.conf # Strfry configuration
|
||||
│ └── config.toml # nostr-rs-relay configuration
|
||||
├── external/ # External relay repositories
|
||||
├── data/ # Persistent data for each relay
|
||||
└── reports/ # Benchmark results
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The benchmark can be configured via environment variables in `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- BENCHMARK_EVENTS=10000 # Number of events per test
|
||||
- BENCHMARK_WORKERS=8 # Concurrent workers
|
||||
- BENCHMARK_DURATION=60s # Test duration
|
||||
- BENCHMARK_TARGETS=... # Relay endpoints to test
|
||||
```
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
1. **Modify test parameters**: Edit environment variables in `docker-compose.yml`
|
||||
2. **Add new relays**:
|
||||
- Add service to `docker-compose.yml`
|
||||
- Create appropriate Dockerfile
|
||||
- Update `BENCHMARK_TARGETS` environment variable
|
||||
3. **Adjust relay configs**: Edit files in `configs/` directory
|
||||
|
||||
## Manual Usage
|
||||
|
||||
### Run Individual Relay
|
||||
|
||||
```bash
|
||||
# Build and run a specific relay
|
||||
docker-compose up next-orly
|
||||
|
||||
# Run benchmark against specific endpoint
|
||||
./benchmark -datadir=/tmp/test -events=1000 -workers=4
|
||||
```
|
||||
|
||||
### Run Benchmark Tool Directly
|
||||
|
||||
```bash
|
||||
# Build the benchmark tool
|
||||
go build -o benchmark main.go
|
||||
|
||||
# Run with custom parameters
|
||||
./benchmark \
|
||||
-datadir=/tmp/benchmark_db \
|
||||
-events=5000 \
|
||||
-workers=4 \
|
||||
-duration=30s
|
||||
```
|
||||
|
||||
## Benchmark Results Interpretation
|
||||
|
||||
### Peak Throughput Test
|
||||
- **High events/sec**: Good write performance
|
||||
- **Low latency**: Efficient event processing
|
||||
- **High success rate**: Stable under load
|
||||
|
||||
### Burst Pattern Test
|
||||
- **Consistent performance**: Good handling of variable loads
|
||||
- **Low P95/P99 latency**: Predictable response times
|
||||
- **No errors during bursts**: Robust queuing/buffering
|
||||
|
||||
### Mixed Read/Write Test
|
||||
- **Balanced throughput**: Good concurrent operation handling
|
||||
- **Low read latency**: Efficient query processing
|
||||
- **Stable write performance**: Queries don't significantly impact writes
|
||||
|
||||
## Development
|
||||
|
||||
### Adding New Tests
|
||||
|
||||
1. Extend the `Benchmark` struct in `main.go`
|
||||
2. Add new test method following existing patterns
|
||||
3. Update `main()` function to call new test
|
||||
4. Update result aggregation in `benchmark-runner.sh`
|
||||
|
||||
### Modifying Relay Configurations
|
||||
|
||||
Each relay's Dockerfile and configuration can be customized:
|
||||
- **Resource limits**: Adjust memory/CPU limits in docker-compose.yml
|
||||
- **Database settings**: Modify configuration files in `configs/`
|
||||
- **Network settings**: Update port mappings and health checks
|
||||
|
||||
### Debugging
|
||||
|
||||
```bash
|
||||
# View logs for specific relay
|
||||
docker-compose logs next-orly
|
||||
|
||||
# Run benchmark with debug output
|
||||
docker-compose up --build benchmark-runner
|
||||
|
||||
# Check individual container health
|
||||
docker-compose ps
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Relay fails to start**: Check logs with `docker-compose logs <service>`
|
||||
2. **Connection refused**: Ensure relay health checks are passing
|
||||
3. **Build failures**: Verify external repositories were cloned correctly
|
||||
4. **Permission errors**: Ensure setup script is executable
|
||||
|
||||
### Performance Issues
|
||||
|
||||
- **Low throughput**: Check resource limits and concurrent worker count
|
||||
- **High memory usage**: Monitor container resource consumption
|
||||
- **Network bottlenecks**: Test on different host configurations
|
||||
|
||||
### Reset Environment
|
||||
|
||||
```bash
|
||||
# Clean up everything
|
||||
docker-compose down -v
|
||||
docker system prune -f
|
||||
rm -rf external/ data/ reports/
|
||||
|
||||
# Start fresh
|
||||
./setup-external-relays.sh
|
||||
docker-compose up --build
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
To add support for new relay implementations:
|
||||
|
||||
1. Create appropriate Dockerfile following existing patterns
|
||||
2. Add service definition to `docker-compose.yml`
|
||||
3. Update `BENCHMARK_TARGETS` environment variable
|
||||
4. Test the new relay integration
|
||||
5. Update documentation
|
||||
|
||||
## License
|
||||
|
||||
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms.
|
||||
275
cmd/benchmark/benchmark-runner.sh
Normal file
275
cmd/benchmark/benchmark-runner.sh
Normal file
@@ -0,0 +1,275 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Benchmark runner script for testing multiple Nostr relay implementations
|
||||
# This script coordinates testing all relays and aggregates results
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration from environment variables
|
||||
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-10000}"
|
||||
BENCHMARK_WORKERS="${BENCHMARK_WORKERS:-8}"
|
||||
BENCHMARK_DURATION="${BENCHMARK_DURATION:-60s}"
|
||||
BENCHMARK_TARGETS="${BENCHMARK_TARGETS:-next-orly:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080}"
|
||||
OUTPUT_DIR="${OUTPUT_DIR:-/reports}"
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
# Generate timestamp for this benchmark run
|
||||
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||
RUN_DIR="${OUTPUT_DIR}/run_${TIMESTAMP}"
|
||||
mkdir -p "${RUN_DIR}"
|
||||
|
||||
echo "=================================================="
|
||||
echo "Nostr Relay Benchmark Suite"
|
||||
echo "=================================================="
|
||||
echo "Timestamp: $(date)"
|
||||
echo "Events per test: ${BENCHMARK_EVENTS}"
|
||||
echo "Concurrent workers: ${BENCHMARK_WORKERS}"
|
||||
echo "Test duration: ${BENCHMARK_DURATION}"
|
||||
echo "Output directory: ${RUN_DIR}"
|
||||
echo "=================================================="
|
||||
|
||||
# Function to wait for relay to be ready
|
||||
wait_for_relay() {
|
||||
local name="$1"
|
||||
local url="$2"
|
||||
local max_attempts=60
|
||||
local attempt=0
|
||||
|
||||
echo "Waiting for ${name} to be ready at ${url}..."
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
# Try wget first to obtain an HTTP status code
|
||||
local status=""
|
||||
status=$(wget --quiet --server-response --tries=1 --timeout=5 "http://${url}" 2>&1 | awk '/^ HTTP\//{print $2; exit}')
|
||||
|
||||
# Fallback to curl to obtain an HTTP status code
|
||||
if [ -z "$status" ]; then
|
||||
status=$(curl -s -o /dev/null -w "%{http_code}" --connect-timeout 5 --max-time 5 "http://${url}" || echo 000)
|
||||
fi
|
||||
|
||||
case "$status" in
|
||||
101|200|400|404|426)
|
||||
echo "${name} is ready! (HTTP ${status})"
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
attempt=$((attempt + 1))
|
||||
echo " Attempt ${attempt}/${max_attempts}: ${name} not ready yet (HTTP ${status:-none})..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "ERROR: ${name} failed to become ready after ${max_attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to run benchmark against a specific relay
|
||||
run_benchmark() {
|
||||
local relay_name="$1"
|
||||
local relay_url="$2"
|
||||
local output_file="$3"
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Testing ${relay_name} at ws://${relay_url}"
|
||||
echo "=================================================="
|
||||
|
||||
# Wait for relay to be ready
|
||||
if ! wait_for_relay "${relay_name}" "${relay_url}"; then
|
||||
echo "ERROR: ${relay_name} is not responding, skipping..."
|
||||
echo "RELAY: ${relay_name}" > "${output_file}"
|
||||
echo "STATUS: FAILED - Relay not responding" >> "${output_file}"
|
||||
echo "ERROR: Connection failed" >> "${output_file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Run the benchmark
|
||||
echo "Running benchmark against ${relay_name}..."
|
||||
|
||||
# Create temporary directory for this relay's data
|
||||
TEMP_DATA_DIR="/tmp/benchmark_${relay_name}_$$"
|
||||
mkdir -p "${TEMP_DATA_DIR}"
|
||||
|
||||
# Run benchmark and capture both stdout and stderr
|
||||
if /app/benchmark \
|
||||
-datadir="${TEMP_DATA_DIR}" \
|
||||
-events="${BENCHMARK_EVENTS}" \
|
||||
-workers="${BENCHMARK_WORKERS}" \
|
||||
-duration="${BENCHMARK_DURATION}" \
|
||||
> "${output_file}" 2>&1; then
|
||||
|
||||
echo "✓ Benchmark completed successfully for ${relay_name}"
|
||||
|
||||
# Add relay identification to the report
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
echo "BENCHMARK_CONFIG:" >> "${output_file}"
|
||||
echo " Events: ${BENCHMARK_EVENTS}" >> "${output_file}"
|
||||
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||
echo " Duration: ${BENCHMARK_DURATION}" >> "${output_file}"
|
||||
|
||||
else
|
||||
echo "✗ Benchmark failed for ${relay_name}"
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "STATUS: FAILED" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
fi
|
||||
|
||||
# Clean up temporary data
|
||||
rm -rf "${TEMP_DATA_DIR}"
|
||||
}
|
||||
|
||||
# Function to generate aggregate report
|
||||
generate_aggregate_report() {
|
||||
local aggregate_file="${RUN_DIR}/aggregate_report.txt"
|
||||
|
||||
echo "Generating aggregate report..."
|
||||
|
||||
cat > "${aggregate_file}" << EOF
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: $(date -Iseconds)
|
||||
Benchmark Configuration:
|
||||
Events per test: ${BENCHMARK_EVENTS}
|
||||
Concurrent workers: ${BENCHMARK_WORKERS}
|
||||
Test duration: ${BENCHMARK_DURATION}
|
||||
|
||||
Relays tested: $(echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | wc -l)
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
EOF
|
||||
|
||||
# Process each relay's results
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
echo "Relay: ${relay_name}" >> "${aggregate_file}"
|
||||
echo "----------------------------------------" >> "${aggregate_file}"
|
||||
|
||||
if [ -f "${relay_file}" ]; then
|
||||
# Extract key metrics from the relay's report
|
||||
if grep -q "STATUS: FAILED" "${relay_file}"; then
|
||||
echo "Status: FAILED" >> "${aggregate_file}"
|
||||
grep "ERROR:" "${relay_file}" | head -1 >> "${aggregate_file}" || echo "Error: Unknown failure" >> "${aggregate_file}"
|
||||
else
|
||||
echo "Status: COMPLETED" >> "${aggregate_file}"
|
||||
|
||||
# Extract performance metrics
|
||||
grep "Events/sec:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Success Rate:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Avg Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "P95 Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Memory:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
fi
|
||||
else
|
||||
echo "Status: NO RESULTS FILE" >> "${aggregate_file}"
|
||||
echo "Error: Results file not found" >> "${aggregate_file}"
|
||||
fi
|
||||
|
||||
echo "" >> "${aggregate_file}"
|
||||
done
|
||||
|
||||
cat >> "${aggregate_file}" << EOF
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
$(ls "${RUN_DIR}"/*_results.txt 2>/dev/null | sed 's|^| - |' || echo " No individual reports found")
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
EOF
|
||||
|
||||
# Create a comparison table
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "Relay" "Status" "Peak Tput/s" "Avg Latency" "Success Rate" >> "${aggregate_file}"
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "----" "------" "-----------" "-----------" "------------" >> "${aggregate_file}"
|
||||
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
if [ -f "${relay_file}" ]; then
|
||||
if grep -q "STATUS: FAILED" "${relay_file}"; then
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "FAILED" "-" "-" "-" >> "${aggregate_file}"
|
||||
else
|
||||
# Extract metrics for the table
|
||||
peak_tput=$(grep "Events/sec:" "${relay_file}" | head -1 | awk '{print $2}' || echo "-")
|
||||
avg_latency=$(grep "Avg Latency:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-")
|
||||
success_rate=$(grep "Success Rate:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-")
|
||||
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "OK" "${peak_tput}" "${avg_latency}" "${success_rate}" >> "${aggregate_file}"
|
||||
fi
|
||||
else
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "NO DATA" "-" "-" "-" >> "${aggregate_file}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "" >> "${aggregate_file}"
|
||||
echo "================================================================" >> "${aggregate_file}"
|
||||
echo "End of Report" >> "${aggregate_file}"
|
||||
echo "================================================================" >> "${aggregate_file}"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
echo "Starting relay benchmark suite..."
|
||||
|
||||
# Parse targets and run benchmarks
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
echo "WARNING: Skipping invalid target: ${relay_name}:${relay_port}"
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_url="${relay_name}:${relay_port}"
|
||||
output_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
run_benchmark "${relay_name}" "${relay_url}" "${output_file}"
|
||||
|
||||
# Small delay between tests
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Generate aggregate report
|
||||
generate_aggregate_report
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Benchmark Suite Completed!"
|
||||
echo "=================================================="
|
||||
echo "Results directory: ${RUN_DIR}"
|
||||
echo "Aggregate report: ${RUN_DIR}/aggregate_report.txt"
|
||||
echo ""
|
||||
|
||||
# Display summary
|
||||
if [ -f "${RUN_DIR}/aggregate_report.txt" ]; then
|
||||
echo "Quick Summary:"
|
||||
echo "=============="
|
||||
grep -A 10 "BENCHMARK COMPARISON TABLE" "${RUN_DIR}/aggregate_report.txt" | tail -n +4
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "All benchmark files:"
|
||||
ls -la "${RUN_DIR}/"
|
||||
echo ""
|
||||
echo "Benchmark suite finished at: $(date)"
|
||||
36
cmd/benchmark/configs/config.toml
Normal file
36
cmd/benchmark/configs/config.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[info]
|
||||
relay_url = "ws://localhost:8080"
|
||||
name = "nostr-rs-relay benchmark"
|
||||
description = "A nostr-rs-relay for benchmarking"
|
||||
pubkey = ""
|
||||
contact = ""
|
||||
|
||||
[database]
|
||||
data_directory = "/data"
|
||||
in_memory = false
|
||||
engine = "sqlite"
|
||||
|
||||
[network]
|
||||
port = 8080
|
||||
address = "0.0.0.0"
|
||||
|
||||
[limits]
|
||||
messages_per_sec = 0
|
||||
subscriptions_per_min = 0
|
||||
max_event_bytes = 65535
|
||||
max_ws_message_bytes = 131072
|
||||
max_ws_frame_bytes = 131072
|
||||
|
||||
[authorization]
|
||||
pubkey_whitelist = []
|
||||
|
||||
[verified_users]
|
||||
mode = "passive"
|
||||
domain_whitelist = []
|
||||
domain_blacklist = []
|
||||
|
||||
[pay_to_relay]
|
||||
enabled = false
|
||||
|
||||
[options]
|
||||
reject_future_seconds = 30
|
||||
101
cmd/benchmark/configs/strfry.conf
Normal file
101
cmd/benchmark/configs/strfry.conf
Normal file
@@ -0,0 +1,101 @@
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "/data/strfry.lmdb"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required)
|
||||
mapsize = 1099511627776
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 8080
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 1000000
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "strfry benchmark"
|
||||
|
||||
# NIP-11: Detailed description of this server, free-form
|
||||
description = "A strfry relay for benchmarking"
|
||||
|
||||
# NIP-11: Administrative pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative contact for this server
|
||||
contact = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 20
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = ""
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = true
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# yesstr threads: experimental yesstr protocol (restart required)
|
||||
yesstr = 1
|
||||
}
|
||||
}
|
||||
200
cmd/benchmark/docker-compose.yml
Normal file
200
cmd/benchmark/docker-compose.yml
Normal file
@@ -0,0 +1,200 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# Next.orly.dev relay (this repository)
|
||||
next-orly:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=8080
|
||||
- ORLY_LOG_LEVEL=info
|
||||
volumes:
|
||||
- ./data/next-orly:/data
|
||||
ports:
|
||||
- "8001:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Khatru with SQLite
|
||||
khatru-sqlite:
|
||||
build:
|
||||
context: ./external/khatru
|
||||
dockerfile: ../../Dockerfile.khatru-sqlite
|
||||
container_name: benchmark-khatru-sqlite
|
||||
environment:
|
||||
- DATABASE_TYPE=sqlite
|
||||
- DATABASE_PATH=/data/khatru.db
|
||||
volumes:
|
||||
- ./data/khatru-sqlite:/data
|
||||
ports:
|
||||
- "8002:3334"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Khatru with Badger
|
||||
khatru-badger:
|
||||
build:
|
||||
context: ./external/khatru
|
||||
dockerfile: ../../Dockerfile.khatru-badger
|
||||
container_name: benchmark-khatru-badger
|
||||
environment:
|
||||
- DATABASE_TYPE=badger
|
||||
- DATABASE_PATH=/data/badger
|
||||
volumes:
|
||||
- ./data/khatru-badger:/data
|
||||
ports:
|
||||
- "8003:3334"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Relayer basic example
|
||||
relayer-basic:
|
||||
build:
|
||||
context: ./external/relayer
|
||||
dockerfile: ../../Dockerfile.relayer-basic
|
||||
container_name: benchmark-relayer-basic
|
||||
environment:
|
||||
- POSTGRESQL_DATABASE=postgres://relayer:relayerpass@postgres:5432/relayerdb?sslmode=disable
|
||||
volumes:
|
||||
- ./data/relayer-basic:/data
|
||||
ports:
|
||||
- "8004:7447"
|
||||
networks:
|
||||
- benchmark-net
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Strfry
|
||||
strfry:
|
||||
image: ghcr.io/hoytech/strfry:latest
|
||||
container_name: benchmark-strfry
|
||||
environment:
|
||||
- STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
- STRFRY_RELAY_PORT=8080
|
||||
volumes:
|
||||
- ./data/strfry:/data
|
||||
- ./configs/strfry.conf:/etc/strfry.conf
|
||||
ports:
|
||||
- "8005:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Nostr-rs-relay
|
||||
nostr-rs-relay:
|
||||
build:
|
||||
context: ./external/nostr-rs-relay
|
||||
dockerfile: ../../Dockerfile.nostr-rs-relay
|
||||
container_name: benchmark-nostr-rs-relay
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
volumes:
|
||||
- ./data/nostr-rs-relay:/data
|
||||
- ./configs/config.toml:/app/config.toml
|
||||
ports:
|
||||
- "8006:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Benchmark runner
|
||||
benchmark-runner:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.benchmark
|
||||
container_name: benchmark-runner
|
||||
depends_on:
|
||||
next-orly:
|
||||
condition: service_healthy
|
||||
khatru-sqlite:
|
||||
condition: service_healthy
|
||||
khatru-badger:
|
||||
condition: service_healthy
|
||||
relayer-basic:
|
||||
condition: service_healthy
|
||||
strfry:
|
||||
condition: service_healthy
|
||||
nostr-rs-relay:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- BENCHMARK_TARGETS=next-orly:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080
|
||||
- BENCHMARK_EVENTS=10000
|
||||
- BENCHMARK_WORKERS=8
|
||||
- BENCHMARK_DURATION=60s
|
||||
volumes:
|
||||
- ./reports:/reports
|
||||
networks:
|
||||
- benchmark-net
|
||||
command: >
|
||||
sh -c "
|
||||
echo 'Waiting for all relays to be ready...' &&
|
||||
sleep 30 &&
|
||||
echo 'Starting benchmark tests...' &&
|
||||
/app/benchmark-runner --output-dir=/reports
|
||||
"
|
||||
|
||||
# PostgreSQL for relayer-basic
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: benchmark-postgres
|
||||
environment:
|
||||
- POSTGRES_DB=relayerdb
|
||||
- POSTGRES_USER=relayer
|
||||
- POSTGRES_PASSWORD=relayerpass
|
||||
volumes:
|
||||
- ./data/postgres:/var/lib/postgresql/data
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U relayer -d relayerdb"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
|
||||
networks:
|
||||
benchmark-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
benchmark-data:
|
||||
driver: local
|
||||
1
cmd/benchmark/external/khatru
vendored
Submodule
1
cmd/benchmark/external/khatru
vendored
Submodule
Submodule cmd/benchmark/external/khatru added at 668c41b988
838
cmd/benchmark/main.go
Normal file
838
cmd/benchmark/main.go
Normal file
@@ -0,0 +1,838 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
"next.orly.dev/pkg/protocol/ws"
|
||||
)
|
||||
|
||||
type BenchmarkConfig struct {
|
||||
DataDir string
|
||||
NumEvents int
|
||||
ConcurrentWorkers int
|
||||
TestDuration time.Duration
|
||||
BurstPattern bool
|
||||
ReportInterval time.Duration
|
||||
|
||||
// Network load options
|
||||
RelayURL string
|
||||
NetWorkers int
|
||||
NetRate int // events/sec per worker
|
||||
}
|
||||
|
||||
type BenchmarkResult struct {
|
||||
TestName string
|
||||
Duration time.Duration
|
||||
TotalEvents int
|
||||
EventsPerSecond float64
|
||||
AvgLatency time.Duration
|
||||
P90Latency time.Duration
|
||||
P95Latency time.Duration
|
||||
P99Latency time.Duration
|
||||
Bottom10Avg time.Duration
|
||||
SuccessRate float64
|
||||
ConcurrentWorkers int
|
||||
MemoryUsed uint64
|
||||
Errors []string
|
||||
}
|
||||
|
||||
type Benchmark struct {
|
||||
config *BenchmarkConfig
|
||||
db *database.D
|
||||
results []*BenchmarkResult
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func main() {
|
||||
config := parseFlags()
|
||||
|
||||
if config.RelayURL != "" {
|
||||
// Network mode: connect to relay and generate traffic
|
||||
runNetworkLoad(config)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Starting Nostr Relay Benchmark\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
fmt.Printf(
|
||||
"Events: %d, Workers: %d, Duration: %v\n",
|
||||
config.NumEvents, config.ConcurrentWorkers, config.TestDuration,
|
||||
)
|
||||
|
||||
benchmark := NewBenchmark(config)
|
||||
defer benchmark.Close()
|
||||
|
||||
// Run benchmark suite twice with pauses
|
||||
benchmark.RunSuite()
|
||||
|
||||
// Generate reports
|
||||
benchmark.GenerateReport()
|
||||
benchmark.GenerateAsciidocReport()
|
||||
}
|
||||
|
||||
func parseFlags() *BenchmarkConfig {
|
||||
config := &BenchmarkConfig{}
|
||||
|
||||
flag.StringVar(
|
||||
&config.DataDir, "datadir", "/tmp/benchmark_db", "Database directory",
|
||||
)
|
||||
flag.IntVar(
|
||||
&config.NumEvents, "events", 100000, "Number of events to generate",
|
||||
)
|
||||
flag.IntVar(
|
||||
&config.ConcurrentWorkers, "workers", runtime.NumCPU(),
|
||||
"Number of concurrent workers",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&config.TestDuration, "duration", 60*time.Second, "Test duration",
|
||||
)
|
||||
flag.BoolVar(
|
||||
&config.BurstPattern, "burst", true, "Enable burst pattern testing",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&config.ReportInterval, "report-interval", 10*time.Second,
|
||||
"Report interval",
|
||||
)
|
||||
|
||||
// Network mode flags
|
||||
flag.StringVar(
|
||||
&config.RelayURL, "relay-url", "",
|
||||
"Relay WebSocket URL (enables network mode if set)",
|
||||
)
|
||||
flag.IntVar(
|
||||
&config.NetWorkers, "net-workers", runtime.NumCPU(),
|
||||
"Network workers (connections)",
|
||||
)
|
||||
flag.IntVar(&config.NetRate, "net-rate", 20, "Events per second per worker")
|
||||
|
||||
flag.Parse()
|
||||
return config
|
||||
}
|
||||
|
||||
func runNetworkLoad(cfg *BenchmarkConfig) {
|
||||
fmt.Printf(
|
||||
"Network mode: relay=%s workers=%d rate=%d ev/s per worker duration=%s\n",
|
||||
cfg.RelayURL, cfg.NetWorkers, cfg.NetRate, cfg.TestDuration,
|
||||
)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cfg.TestDuration)
|
||||
defer cancel()
|
||||
var wg sync.WaitGroup
|
||||
if cfg.NetWorkers <= 0 {
|
||||
cfg.NetWorkers = 1
|
||||
}
|
||||
if cfg.NetRate <= 0 {
|
||||
cfg.NetRate = 1
|
||||
}
|
||||
for i := 0; i < cfg.NetWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
// Connect to relay
|
||||
rl, err := ws.RelayConnect(ctx, cfg.RelayURL)
|
||||
if err != nil {
|
||||
fmt.Printf(
|
||||
"worker %d: failed to connect to %s: %v\n", workerID,
|
||||
cfg.RelayURL, err,
|
||||
)
|
||||
return
|
||||
}
|
||||
defer rl.Close()
|
||||
fmt.Printf("worker %d: connected to %s\n", workerID, cfg.RelayURL)
|
||||
|
||||
// Signer for this worker
|
||||
var keys p256k.Signer
|
||||
if err := keys.Generate(); err != nil {
|
||||
fmt.Printf("worker %d: keygen failed: %v\n", workerID, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Start a concurrent subscriber that listens for events published by this worker
|
||||
// Build a filter that matches this worker's pubkey and kind=1, since now
|
||||
since := time.Now().Unix()
|
||||
go func() {
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
f.Authors = tag.NewWithCap(1)
|
||||
f.Authors.T = append(f.Authors.T, keys.Pub())
|
||||
f.Since = timestamp.FromUnix(since)
|
||||
sub, err := rl.Subscribe(ctx, filter.NewS(f))
|
||||
if err != nil {
|
||||
fmt.Printf("worker %d: subscribe error: %v\n", workerID, err)
|
||||
return
|
||||
}
|
||||
defer sub.Unsub()
|
||||
recv := 0
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Printf("worker %d: subscriber exiting after %d events\n", workerID, recv)
|
||||
return
|
||||
case <-sub.EndOfStoredEvents:
|
||||
// continue streaming live events
|
||||
case ev := <-sub.Events:
|
||||
if ev == nil {
|
||||
continue
|
||||
}
|
||||
recv++
|
||||
if recv%100 == 0 {
|
||||
fmt.Printf("worker %d: received %d matching events\n", workerID, recv)
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
interval := time.Second / time.Duration(cfg.NetRate)
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
count := 0
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Printf(
|
||||
"worker %d: stopping after %d publishes\n", workerID,
|
||||
count,
|
||||
)
|
||||
return
|
||||
case <-ticker.C:
|
||||
// Build and sign a simple text note event
|
||||
ev := event.New()
|
||||
ev.Kind = uint16(1)
|
||||
ev.CreatedAt = time.Now().Unix()
|
||||
ev.Tags = tag.NewS()
|
||||
ev.Content = []byte(fmt.Sprintf(
|
||||
"bench worker=%d n=%d", workerID, count,
|
||||
))
|
||||
if err := ev.Sign(&keys); err != nil {
|
||||
fmt.Printf("worker %d: sign error: %v\n", workerID, err)
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
// Async publish: don't wait for OK; this greatly increases throughput
|
||||
ch := rl.Write(eventenvelope.NewSubmissionWith(ev).Marshal(nil))
|
||||
// Non-blocking error check
|
||||
select {
|
||||
case err := <-ch:
|
||||
if err != nil {
|
||||
fmt.Printf("worker %d: write error: %v\n", workerID, err)
|
||||
}
|
||||
default:
|
||||
}
|
||||
if count%100 == 0 {
|
||||
fmt.Printf("worker %d: sent %d events\n", workerID, count)
|
||||
}
|
||||
ev.Free()
|
||||
count++
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func NewBenchmark(config *BenchmarkConfig) *Benchmark {
|
||||
// Clean up existing data directory
|
||||
os.RemoveAll(config.DataDir)
|
||||
|
||||
ctx := context.Background()
|
||||
cancel := func() {}
|
||||
|
||||
db, err := database.New(ctx, cancel, config.DataDir, "info")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
|
||||
b := &Benchmark{
|
||||
config: config,
|
||||
db: db,
|
||||
results: make([]*BenchmarkResult, 0),
|
||||
}
|
||||
|
||||
// Trigger compaction/GC before starting tests
|
||||
b.compactDatabase()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *Benchmark) Close() {
|
||||
if b.db != nil {
|
||||
b.db.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// RunSuite runs the three tests with a 10s pause between them and repeats the
|
||||
// set twice with a 10s pause between rounds.
|
||||
func (b *Benchmark) RunSuite() {
|
||||
for round := 1; round <= 2; round++ {
|
||||
fmt.Printf("\n=== Starting test round %d/2 ===\n", round)
|
||||
b.RunPeakThroughputTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
b.RunBurstPatternTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
b.RunMixedReadWriteTest()
|
||||
if round < 2 {
|
||||
fmt.Println("\nPausing 10s before next round...")
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compactDatabase triggers a Badger value log GC before starting tests.
|
||||
func (b *Benchmark) compactDatabase() {
|
||||
if b.db == nil || b.db.DB == nil {
|
||||
return
|
||||
}
|
||||
// Attempt value log GC. Ignore errors; this is best-effort.
|
||||
_ = b.db.DB.RunValueLogGC(0.5)
|
||||
}
|
||||
|
||||
func (b *Benchmark) RunPeakThroughputTest() {
|
||||
fmt.Println("\n=== Peak Throughput Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
var totalEvents int64
|
||||
var errors []error
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
|
||||
events := b.generateEvents(b.config.NumEvents)
|
||||
eventChan := make(chan *event.E, len(events))
|
||||
|
||||
// Fill event channel
|
||||
for _, ev := range events {
|
||||
eventChan <- ev
|
||||
}
|
||||
close(eventChan)
|
||||
|
||||
// Start workers
|
||||
for i := 0; i < b.config.ConcurrentWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
ctx := context.Background()
|
||||
for ev := range eventChan {
|
||||
eventStart := time.Now()
|
||||
|
||||
_, _, err := b.db.SaveEvent(ctx, ev)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalEvents++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Peak Throughput",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalEvents),
|
||||
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
||||
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
result.AvgLatency = calculateAvgLatency(latencies)
|
||||
result.P90Latency = calculatePercentileLatency(latencies, 0.90)
|
||||
result.P95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||
result.P99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||
result.Bottom10Avg = calculateBottom10Avg(latencies)
|
||||
}
|
||||
|
||||
result.SuccessRate = float64(totalEvents) / float64(b.config.NumEvents) * 100
|
||||
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.results = append(b.results, result)
|
||||
b.mu.Unlock()
|
||||
|
||||
fmt.Printf(
|
||||
"Events saved: %d/%d (%.1f%%)\n", totalEvents, b.config.NumEvents,
|
||||
result.SuccessRate,
|
||||
)
|
||||
fmt.Printf("Duration: %v\n", duration)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Avg latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P90 latency: %v\n", result.P90Latency)
|
||||
fmt.Printf("P95 latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 latency: %v\n", result.P99Latency)
|
||||
fmt.Printf("Bottom 10%% Avg latency: %v\n", result.Bottom10Avg)
|
||||
}
|
||||
|
||||
func (b *Benchmark) RunBurstPatternTest() {
|
||||
fmt.Println("\n=== Burst Pattern Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var totalEvents int64
|
||||
var errors []error
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
|
||||
// Generate events for burst pattern
|
||||
events := b.generateEvents(b.config.NumEvents)
|
||||
|
||||
// Simulate burst pattern: high activity periods followed by quiet periods
|
||||
burstSize := b.config.NumEvents / 10 // 10% of events in each burst
|
||||
quietPeriod := 500 * time.Millisecond
|
||||
burstPeriod := 100 * time.Millisecond
|
||||
|
||||
ctx := context.Background()
|
||||
eventIndex := 0
|
||||
|
||||
for eventIndex < len(events) && time.Since(start) < b.config.TestDuration {
|
||||
// Burst period - send events rapidly
|
||||
burstStart := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < burstSize && eventIndex < len(events); i++ {
|
||||
wg.Add(1)
|
||||
go func(ev *event.E) {
|
||||
defer wg.Done()
|
||||
|
||||
eventStart := time.Now()
|
||||
_, _, err := b.db.SaveEvent(ctx, ev)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalEvents++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
}(events[eventIndex])
|
||||
|
||||
eventIndex++
|
||||
time.Sleep(burstPeriod / time.Duration(burstSize))
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
fmt.Printf(
|
||||
"Burst completed: %d events in %v\n", burstSize,
|
||||
time.Since(burstStart),
|
||||
)
|
||||
|
||||
// Quiet period
|
||||
time.Sleep(quietPeriod)
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Burst Pattern",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalEvents),
|
||||
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
||||
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
result.AvgLatency = calculateAvgLatency(latencies)
|
||||
result.P90Latency = calculatePercentileLatency(latencies, 0.90)
|
||||
result.P95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||
result.P99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||
result.Bottom10Avg = calculateBottom10Avg(latencies)
|
||||
}
|
||||
|
||||
result.SuccessRate = float64(totalEvents) / float64(eventIndex) * 100
|
||||
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.results = append(b.results, result)
|
||||
b.mu.Unlock()
|
||||
|
||||
fmt.Printf("Burst test completed: %d events in %v\n", totalEvents, duration)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
}
|
||||
|
||||
func (b *Benchmark) RunMixedReadWriteTest() {
|
||||
fmt.Println("\n=== Mixed Read/Write Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var totalWrites, totalReads int64
|
||||
var writeLatencies, readLatencies []time.Duration
|
||||
var errors []error
|
||||
var mu sync.Mutex
|
||||
|
||||
// Pre-populate with some events for reading
|
||||
seedEvents := b.generateEvents(1000)
|
||||
ctx := context.Background()
|
||||
|
||||
fmt.Println("Pre-populating database for read tests...")
|
||||
for _, ev := range seedEvents {
|
||||
b.db.SaveEvent(ctx, ev)
|
||||
}
|
||||
|
||||
events := b.generateEvents(b.config.NumEvents)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start mixed read/write workers
|
||||
for i := 0; i < b.config.ConcurrentWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
eventIndex := workerID
|
||||
for time.Since(start) < b.config.TestDuration && eventIndex < len(events) {
|
||||
// Alternate between write and read operations
|
||||
if eventIndex%2 == 0 {
|
||||
// Write operation
|
||||
writeStart := time.Now()
|
||||
_, _, err := b.db.SaveEvent(ctx, events[eventIndex])
|
||||
writeLatency := time.Since(writeStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalWrites++
|
||||
writeLatencies = append(writeLatencies, writeLatency)
|
||||
}
|
||||
mu.Unlock()
|
||||
} else {
|
||||
// Read operation
|
||||
readStart := time.Now()
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
limit := uint(10)
|
||||
f.Limit = &limit
|
||||
_, err := b.db.GetSerialsFromFilter(f)
|
||||
readLatency := time.Since(readStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalReads++
|
||||
readLatencies = append(readLatencies, readLatency)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
eventIndex += b.config.ConcurrentWorkers
|
||||
time.Sleep(10 * time.Millisecond) // Small delay between operations
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Mixed Read/Write",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalWrites + totalReads),
|
||||
EventsPerSecond: float64(totalWrites+totalReads) / duration.Seconds(),
|
||||
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
// Calculate combined latencies for overall metrics
|
||||
allLatencies := append(writeLatencies, readLatencies...)
|
||||
if len(allLatencies) > 0 {
|
||||
result.AvgLatency = calculateAvgLatency(allLatencies)
|
||||
result.P90Latency = calculatePercentileLatency(allLatencies, 0.90)
|
||||
result.P95Latency = calculatePercentileLatency(allLatencies, 0.95)
|
||||
result.P99Latency = calculatePercentileLatency(allLatencies, 0.99)
|
||||
result.Bottom10Avg = calculateBottom10Avg(allLatencies)
|
||||
}
|
||||
|
||||
result.SuccessRate = float64(totalWrites+totalReads) / float64(len(events)) * 100
|
||||
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.results = append(b.results, result)
|
||||
b.mu.Unlock()
|
||||
|
||||
fmt.Printf(
|
||||
"Mixed test completed: %d writes, %d reads in %v\n", totalWrites,
|
||||
totalReads, duration,
|
||||
)
|
||||
fmt.Printf("Combined ops/sec: %.2f\n", result.EventsPerSecond)
|
||||
}
|
||||
|
||||
func (b *Benchmark) generateEvents(count int) []*event.E {
|
||||
events := make([]*event.E, count)
|
||||
now := timestamp.Now()
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
ev := event.New()
|
||||
|
||||
// Generate random 32-byte ID
|
||||
ev.ID = make([]byte, 32)
|
||||
rand.Read(ev.ID)
|
||||
|
||||
// Generate random 32-byte pubkey
|
||||
ev.Pubkey = make([]byte, 32)
|
||||
rand.Read(ev.Pubkey)
|
||||
|
||||
ev.CreatedAt = now.I64()
|
||||
ev.Kind = kind.TextNote.K
|
||||
ev.Content = []byte(fmt.Sprintf(
|
||||
"This is test event number %d with some content", i,
|
||||
))
|
||||
|
||||
// Create tags using NewFromBytesSlice
|
||||
ev.Tags = tag.NewS(
|
||||
tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")),
|
||||
tag.NewFromBytesSlice(
|
||||
[]byte("e"), []byte(fmt.Sprintf("ref_%d", i%50)),
|
||||
),
|
||||
)
|
||||
|
||||
events[i] = ev
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
func (b *Benchmark) GenerateReport() {
|
||||
fmt.Println("\n" + strings.Repeat("=", 80))
|
||||
fmt.Println("BENCHMARK REPORT")
|
||||
fmt.Println(strings.Repeat("=", 80))
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
for _, result := range b.results {
|
||||
fmt.Printf("\nTest: %s\n", result.TestName)
|
||||
fmt.Printf("Duration: %v\n", result.Duration)
|
||||
fmt.Printf("Total Events: %d\n", result.TotalEvents)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Success Rate: %.1f%%\n", result.SuccessRate)
|
||||
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers)
|
||||
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024))
|
||||
fmt.Printf("Avg Latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P90 Latency: %v\n", result.P90Latency)
|
||||
fmt.Printf("P95 Latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 Latency: %v\n", result.P99Latency)
|
||||
fmt.Printf("Bottom 10%% Avg Latency: %v\n", result.Bottom10Avg)
|
||||
|
||||
if len(result.Errors) > 0 {
|
||||
fmt.Printf("Errors (%d):\n", len(result.Errors))
|
||||
for i, err := range result.Errors {
|
||||
if i < 5 { // Show first 5 errors
|
||||
fmt.Printf(" - %s\n", err)
|
||||
}
|
||||
}
|
||||
if len(result.Errors) > 5 {
|
||||
fmt.Printf(" ... and %d more errors\n", len(result.Errors)-5)
|
||||
}
|
||||
}
|
||||
fmt.Println(strings.Repeat("-", 40))
|
||||
}
|
||||
|
||||
// Save report to file
|
||||
reportPath := filepath.Join(b.config.DataDir, "benchmark_report.txt")
|
||||
b.saveReportToFile(reportPath)
|
||||
fmt.Printf("\nReport saved to: %s\n", reportPath)
|
||||
}
|
||||
|
||||
func (b *Benchmark) saveReportToFile(path string) error {
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString("NOSTR RELAY BENCHMARK REPORT\n")
|
||||
file.WriteString("============================\n\n")
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Generated: %s\n", time.Now().Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
file.WriteString(fmt.Sprintf("Relay: next.orly.dev\n"))
|
||||
file.WriteString(fmt.Sprintf("Database: BadgerDB\n"))
|
||||
file.WriteString(fmt.Sprintf("Workers: %d\n", b.config.ConcurrentWorkers))
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Test Duration: %v\n\n", b.config.TestDuration,
|
||||
),
|
||||
)
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
for _, result := range b.results {
|
||||
file.WriteString(fmt.Sprintf("Test: %s\n", result.TestName))
|
||||
file.WriteString(fmt.Sprintf("Duration: %v\n", result.Duration))
|
||||
file.WriteString(fmt.Sprintf("Events: %d\n", result.TotalEvents))
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Events/sec: %.2f\n", result.EventsPerSecond,
|
||||
),
|
||||
)
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Success Rate: %.1f%%\n", result.SuccessRate,
|
||||
),
|
||||
)
|
||||
file.WriteString(fmt.Sprintf("Avg Latency: %v\n", result.AvgLatency))
|
||||
file.WriteString(fmt.Sprintf("P90 Latency: %v\n", result.P90Latency))
|
||||
file.WriteString(fmt.Sprintf("P95 Latency: %v\n", result.P95Latency))
|
||||
file.WriteString(fmt.Sprintf("P99 Latency: %v\n", result.P99Latency))
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Bottom 10%% Avg Latency: %v\n", result.Bottom10Avg,
|
||||
),
|
||||
)
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Memory: %d MB\n", result.MemoryUsed/(1024*1024),
|
||||
),
|
||||
)
|
||||
file.WriteString("\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GenerateAsciidocReport creates a simple AsciiDoc report alongside the text report.
|
||||
func (b *Benchmark) GenerateAsciidocReport() error {
|
||||
path := filepath.Join(b.config.DataDir, "benchmark_report.adoc")
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString("= NOSTR Relay Benchmark Results\n\n")
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Generated: %s\n\n", time.Now().Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
file.WriteString("[cols=\"1,^1,^1,^1,^1,^1\",options=\"header\"]\n")
|
||||
file.WriteString("|===\n")
|
||||
file.WriteString("| Test | Events/sec | Avg Latency | P90 | P95 | Bottom 10% Avg\n")
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
for _, r := range b.results {
|
||||
file.WriteString(fmt.Sprintf("| %s\n", r.TestName))
|
||||
file.WriteString(fmt.Sprintf("| %.2f\n", r.EventsPerSecond))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.AvgLatency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P90Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P95Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.Bottom10Avg))
|
||||
}
|
||||
file.WriteString("|===\n")
|
||||
|
||||
fmt.Printf("AsciiDoc report saved to: %s\n", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func calculateAvgLatency(latencies []time.Duration) time.Duration {
|
||||
if len(latencies) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var total time.Duration
|
||||
for _, l := range latencies {
|
||||
total += l
|
||||
}
|
||||
return total / time.Duration(len(latencies))
|
||||
}
|
||||
|
||||
func calculatePercentileLatency(
|
||||
latencies []time.Duration, percentile float64,
|
||||
) time.Duration {
|
||||
if len(latencies) == 0 {
|
||||
return 0
|
||||
}
|
||||
// Sort a copy to avoid mutating caller slice
|
||||
copySlice := make([]time.Duration, len(latencies))
|
||||
copy(copySlice, latencies)
|
||||
sort.Slice(
|
||||
copySlice, func(i, j int) bool { return copySlice[i] < copySlice[j] },
|
||||
)
|
||||
index := int(float64(len(copySlice)-1) * percentile)
|
||||
if index < 0 {
|
||||
index = 0
|
||||
}
|
||||
if index >= len(copySlice) {
|
||||
index = len(copySlice) - 1
|
||||
}
|
||||
return copySlice[index]
|
||||
}
|
||||
|
||||
// calculateBottom10Avg returns the average latency of the slowest 10% of samples.
|
||||
func calculateBottom10Avg(latencies []time.Duration) time.Duration {
|
||||
if len(latencies) == 0 {
|
||||
return 0
|
||||
}
|
||||
copySlice := make([]time.Duration, len(latencies))
|
||||
copy(copySlice, latencies)
|
||||
sort.Slice(
|
||||
copySlice, func(i, j int) bool { return copySlice[i] < copySlice[j] },
|
||||
)
|
||||
start := int(float64(len(copySlice)) * 0.9)
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
if start >= len(copySlice) {
|
||||
start = len(copySlice) - 1
|
||||
}
|
||||
var total time.Duration
|
||||
for i := start; i < len(copySlice); i++ {
|
||||
total += copySlice[i]
|
||||
}
|
||||
count := len(copySlice) - start
|
||||
if count <= 0 {
|
||||
return 0
|
||||
}
|
||||
return total / time.Duration(count)
|
||||
}
|
||||
|
||||
func getMemUsage() uint64 {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
return m.Alloc
|
||||
}
|
||||
156
cmd/benchmark/profile.sh
Executable file
156
cmd/benchmark/profile.sh
Executable file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Runs the ORLY relay with CPU profiling enabled and opens the resulting
|
||||
# pprof profile in a local web UI.
|
||||
#
|
||||
# Usage:
|
||||
# ./profile.sh [duration_seconds]
|
||||
#
|
||||
# - Builds the relay.
|
||||
# - Starts it with ORLY_PPROF=cpu and minimal logging.
|
||||
# - Waits for the profile path printed at startup.
|
||||
# - Runs for DURATION seconds (default 10), then stops the relay to flush the
|
||||
# CPU profile to disk.
|
||||
# - Launches `go tool pprof -http=:8000` for convenient browsing.
|
||||
#
|
||||
# Notes:
|
||||
# - The profile file path is detected from the relay's stdout/stderr lines
|
||||
# emitted by github.com/pkg/profile, typically like:
|
||||
# profile: cpu profiling enabled, path: /tmp/profile123456/cpu.pprof
|
||||
# - You can change DURATION by passing a number of seconds as the first arg
|
||||
# or by setting DURATION env var.
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." && pwd)"
|
||||
cd "$REPO_ROOT"
|
||||
|
||||
DURATION="${1:-${DURATION:-10}}"
|
||||
PPROF_HTTP_PORT="${PPROF_HTTP_PORT:-8000}"
|
||||
|
||||
# Load generation controls
|
||||
LOAD_ENABLED="${LOAD_ENABLED:-1}" # set to 0 to disable load
|
||||
# Use the benchmark main package in cmd/benchmark as the load generator
|
||||
BENCHMARK_PKG_DIR="$REPO_ROOT/cmd/benchmark"
|
||||
BENCHMARK_BIN="${BENCHMARK_BIN:-}" # if empty, we will build to $RUN_DIR/benchmark
|
||||
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-}" # optional override for -events
|
||||
BENCHMARK_DURATION="${BENCHMARK_DURATION:-}" # optional override for -duration (e.g. 30s); defaults to DURATION seconds
|
||||
|
||||
BIN="$REPO_ROOT/next.orly.dev"
|
||||
LOG_DIR="${LOG_DIR:-$REPO_ROOT/cmd/benchmark/reports}"
|
||||
mkdir -p "$LOG_DIR"
|
||||
RUN_TS="$(date +%Y%m%d_%H%M%S)"
|
||||
RUN_DIR="$LOG_DIR/profile_run_${RUN_TS}"
|
||||
mkdir -p "$RUN_DIR"
|
||||
LOG_FILE="$RUN_DIR/relay.log"
|
||||
LOAD_LOG_FILE="$RUN_DIR/load.log"
|
||||
|
||||
echo "[profile.sh] Building relay binary ..."
|
||||
go build -o "$BIN" .
|
||||
|
||||
# Ensure we clean up the child process on exit
|
||||
RELAY_PID=""
|
||||
LOAD_PID=""
|
||||
cleanup() {
|
||||
if [[ -n "$LOAD_PID" ]] && kill -0 "$LOAD_PID" 2>/dev/null; then
|
||||
echo "[profile.sh] Stopping load generator (pid=$LOAD_PID) ..."
|
||||
kill -INT "$LOAD_PID" 2>/dev/null || true
|
||||
sleep 0.5
|
||||
kill -TERM "$LOAD_PID" 2>/dev/null || true
|
||||
fi
|
||||
if [[ -n "$RELAY_PID" ]] && kill -0 "$RELAY_PID" 2>/dev/null; then
|
||||
echo "[profile.sh] Stopping relay (pid=$RELAY_PID) ..."
|
||||
kill -INT "$RELAY_PID" 2>/dev/null || true
|
||||
# give it a moment to exit and flush profile
|
||||
sleep 1
|
||||
kill -TERM "$RELAY_PID" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Start the relay with CPU profiling enabled. Capture both stdout and stderr.
|
||||
echo "[profile.sh] Starting relay with CPU profiling enabled ..."
|
||||
(
|
||||
ORLY_LOG_LEVEL=off \
|
||||
ORLY_LISTEN="${ORLY_LISTEN:-127.0.0.1}" \
|
||||
ORLY_PORT="${ORLY_PORT:-3334}" \
|
||||
ORLY_PPROF=cpu \
|
||||
"$BIN"
|
||||
) >"$LOG_FILE" 2>&1 &
|
||||
RELAY_PID=$!
|
||||
echo "[profile.sh] Relay started with pid $RELAY_PID; logging to $LOG_FILE"
|
||||
|
||||
# Wait until the profile path is printed. Timeout after reasonable period.
|
||||
PPROF_FILE=""
|
||||
START_TIME=$(date +%s)
|
||||
TIMEOUT=30
|
||||
|
||||
echo "[profile.sh] Waiting for profile path to appear in relay output ..."
|
||||
while :; do
|
||||
if grep -Eo "/tmp/profile[^ ]+/cpu\.pprof" "$LOG_FILE" >/dev/null 2>&1; then
|
||||
PPROF_FILE=$(grep -Eo "/tmp/profile[^ ]+/cpu\.pprof" "$LOG_FILE" | tail -n1)
|
||||
break
|
||||
fi
|
||||
NOW=$(date +%s)
|
||||
if (( NOW - START_TIME > TIMEOUT )); then
|
||||
echo "[profile.sh] ERROR: Timed out waiting for profile path in $LOG_FILE" >&2
|
||||
echo "Last 50 log lines:" >&2
|
||||
tail -n 50 "$LOG_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.3
|
||||
done
|
||||
|
||||
echo "[profile.sh] Detected profile file: $PPROF_FILE"
|
||||
|
||||
# Optionally start load generator to exercise the relay
|
||||
if [[ "$LOAD_ENABLED" == "1" ]]; then
|
||||
# Build benchmark binary if not provided
|
||||
if [[ -z "$BENCHMARK_BIN" ]]; then
|
||||
BENCHMARK_BIN="$RUN_DIR/benchmark"
|
||||
echo "[profile.sh] Building benchmark load generator ($BENCHMARK_PKG_DIR) ..."
|
||||
go build -o "$BENCHMARK_BIN" "$BENCHMARK_PKG_DIR"
|
||||
fi
|
||||
BENCH_DB_DIR="$RUN_DIR/benchdb"
|
||||
mkdir -p "$BENCH_DB_DIR"
|
||||
DURATION_ARG="${BENCHMARK_DURATION:-${DURATION}s}"
|
||||
EXTRA_EVENTS=""
|
||||
if [[ -n "$BENCHMARK_EVENTS" ]]; then
|
||||
EXTRA_EVENTS="-events=$BENCHMARK_EVENTS"
|
||||
fi
|
||||
echo "[profile.sh] Starting benchmark load generator for duration $DURATION_ARG ..."
|
||||
RELAY_URL="ws://${ORLY_LISTEN:-127.0.0.1}:${ORLY_PORT:-3334}"
|
||||
echo "[profile.sh] Using relay URL: $RELAY_URL"
|
||||
(
|
||||
"$BENCHMARK_BIN" -relay-url="$RELAY_URL" -net-workers="${NET_WORKERS:-2}" -net-rate="${NET_RATE:-20}" -duration="$DURATION_ARG" $EXTRA_EVENTS \
|
||||
>"$LOAD_LOG_FILE" 2>&1 &
|
||||
)
|
||||
LOAD_PID=$!
|
||||
echo "[profile.sh] Load generator started (pid=$LOAD_PID); logging to $LOAD_LOG_FILE"
|
||||
else
|
||||
echo "[profile.sh] LOAD_ENABLED=0; not starting load generator."
|
||||
fi
|
||||
|
||||
echo "[profile.sh] Letting the relay run for ${DURATION}s to collect CPU samples ..."
|
||||
sleep "$DURATION"
|
||||
|
||||
# Stop the relay to flush the CPU profile
|
||||
cleanup
|
||||
# Disable trap so we don't double-kill
|
||||
trap - EXIT
|
||||
|
||||
# Wait briefly to ensure the profile file is finalized
|
||||
for i in {1..20}; do
|
||||
if [[ -s "$PPROF_FILE" ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 0.2
|
||||
done
|
||||
|
||||
if [[ ! -s "$PPROF_FILE" ]]; then
|
||||
echo "[profile.sh] WARNING: Profile file exists but is empty or missing: $PPROF_FILE" >&2
|
||||
fi
|
||||
|
||||
# Launch pprof HTTP UI
|
||||
echo "[profile.sh] Launching pprof web UI (http://localhost:${PPROF_HTTP_PORT}) ..."
|
||||
exec go tool pprof -http=":${PPROF_HTTP_PORT}" "$BIN" "$PPROF_FILE"
|
||||
140
cmd/benchmark/reports/run_20250912_195729/aggregate_report.txt
Normal file
140
cmd/benchmark/reports/run_20250912_195729/aggregate_report.txt
Normal file
@@ -0,0 +1,140 @@
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: 2025-09-12T20:02:26+00:00
|
||||
Benchmark Configuration:
|
||||
Events per test: 10000
|
||||
Concurrent workers: 8
|
||||
Test duration: 60s
|
||||
|
||||
Relays tested: 6
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
Relay: next-orly
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17901.30
|
||||
Events/sec: 1504.52
|
||||
Events/sec: 17901.30
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 433.058µs
|
||||
Avg Latency: 182.813µs
|
||||
Avg Latency: 9.086952ms
|
||||
P95 Latency: 456.738µs
|
||||
P95 Latency: 152.86µs
|
||||
P95 Latency: 18.156339ms
|
||||
|
||||
Relay: khatru-sqlite
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 14291.70
|
||||
Events/sec: 1530.29
|
||||
Events/sec: 14291.70
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 545.724µs
|
||||
Avg Latency: 205.962µs
|
||||
Avg Latency: 9.092604ms
|
||||
P95 Latency: 473.43µs
|
||||
P95 Latency: 165.525µs
|
||||
P95 Latency: 19.302571ms
|
||||
|
||||
Relay: khatru-badger
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 16351.11
|
||||
Events/sec: 1539.25
|
||||
Events/sec: 16351.11
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 474.016µs
|
||||
Avg Latency: 226.602µs
|
||||
Avg Latency: 9.930935ms
|
||||
P95 Latency: 479.03µs
|
||||
P95 Latency: 239.525µs
|
||||
P95 Latency: 17.75358ms
|
||||
|
||||
Relay: relayer-basic
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 16522.60
|
||||
Events/sec: 1537.71
|
||||
Events/sec: 16522.60
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 466.066µs
|
||||
Avg Latency: 215.609µs
|
||||
Avg Latency: 9.851217ms
|
||||
P95 Latency: 514.849µs
|
||||
P95 Latency: 141.91µs
|
||||
P95 Latency: 23.101412ms
|
||||
|
||||
Relay: strfry
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15346.12
|
||||
Events/sec: 1534.88
|
||||
Events/sec: 15346.12
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 506.51µs
|
||||
Avg Latency: 216.564µs
|
||||
Avg Latency: 9.938991ms
|
||||
P95 Latency: 590.442µs
|
||||
P95 Latency: 267.91µs
|
||||
P95 Latency: 19.784708ms
|
||||
|
||||
Relay: nostr-rs-relay
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15199.95
|
||||
Events/sec: 1533.87
|
||||
Events/sec: 15199.95
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 508.699µs
|
||||
Avg Latency: 217.187µs
|
||||
Avg Latency: 9.38757ms
|
||||
P95 Latency: 1.011413ms
|
||||
P95 Latency: 130.018µs
|
||||
P95 Latency: 19.250416ms
|
||||
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
- /reports/run_20250912_195729/khatru-badger_results.txt
|
||||
- /reports/run_20250912_195729/khatru-sqlite_results.txt
|
||||
- /reports/run_20250912_195729/next-orly_results.txt
|
||||
- /reports/run_20250912_195729/nostr-rs-relay_results.txt
|
||||
- /reports/run_20250912_195729/relayer-basic_results.txt
|
||||
- /reports/run_20250912_195729/strfry_results.txt
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
Relay Status Peak Tput/s Avg Latency Success Rate
|
||||
---- ------ ----------- ----------- ------------
|
||||
next-orly OK 17901.30 433.058µs 100.0%
|
||||
khatru-sqlite OK 14291.70 545.724µs 100.0%
|
||||
khatru-badger OK 16351.11 474.016µs 100.0%
|
||||
relayer-basic OK 16522.60 466.066µs 100.0%
|
||||
strfry OK 15346.12 506.51µs 100.0%
|
||||
nostr-rs-relay OK 15199.95 508.699µs 100.0%
|
||||
|
||||
================================================================
|
||||
End of Report
|
||||
================================================================
|
||||
@@ -0,0 +1,104 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912195906053114 INF /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912195906053741 INF /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912195906053768 INF /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912195906054020 INF (*types.Uint32)(0xc00570406c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912195906054071 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 611.579176ms
|
||||
Events/sec: 16351.11
|
||||
Avg latency: 474.016µs
|
||||
P95 latency: 479.03µs
|
||||
P99 latency: 594.73µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 160.976517ms
|
||||
Burst completed: 1000 events in 153.010415ms
|
||||
Burst completed: 1000 events in 146.10015ms
|
||||
Burst completed: 1000 events in 148.403729ms
|
||||
Burst completed: 1000 events in 141.681801ms
|
||||
Burst completed: 1000 events in 154.663067ms
|
||||
Burst completed: 1000 events in 135.960988ms
|
||||
Burst completed: 1000 events in 136.240589ms
|
||||
Burst completed: 1000 events in 141.75454ms
|
||||
Burst completed: 1000 events in 152.485379ms
|
||||
Burst test completed: 10000 events in 6.496690038s
|
||||
Events/sec: 1539.25
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 37.695370694s
|
||||
Combined ops/sec: 265.28
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 611.579176ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16351.11
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 474.016µs
|
||||
P95 Latency: 479.03µs
|
||||
P99 Latency: 594.73µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.496690038s
|
||||
Total Events: 10000
|
||||
Events/sec: 1539.25
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 207 MB
|
||||
Avg Latency: 226.602µs
|
||||
P95 Latency: 239.525µs
|
||||
P99 Latency: 168.561µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 37.695370694s
|
||||
Total Events: 10000
|
||||
Events/sec: 265.28
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 132 MB
|
||||
Avg Latency: 9.930935ms
|
||||
P95 Latency: 17.75358ms
|
||||
P99 Latency: 24.256293ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
|
||||
20250912195950858706 INF /tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912195951643646 INF /tmp/benchmark_khatru-badger_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912195951645255 INF /tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-badger
|
||||
RELAY_URL: ws://khatru-badger:3334
|
||||
TEST_TIMESTAMP: 2025-09-12T19:59:51+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,104 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912195817361580 INF /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912195817362030 INF /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912195817362064 INF /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912195817362711 INF (*types.Uint32)(0xc00000005c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912195817362777 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 699.706889ms
|
||||
Events/sec: 14291.70
|
||||
Avg latency: 545.724µs
|
||||
P95 latency: 473.43µs
|
||||
P99 latency: 478.349µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 138.253122ms
|
||||
Burst completed: 1000 events in 153.709429ms
|
||||
Burst completed: 1000 events in 158.711026ms
|
||||
Burst completed: 1000 events in 152.54677ms
|
||||
Burst completed: 1000 events in 144.735244ms
|
||||
Burst completed: 1000 events in 153.236893ms
|
||||
Burst completed: 1000 events in 150.180515ms
|
||||
Burst completed: 1000 events in 154.733588ms
|
||||
Burst completed: 1000 events in 151.252182ms
|
||||
Burst completed: 1000 events in 150.610613ms
|
||||
Burst test completed: 10000 events in 6.534724469s
|
||||
Events/sec: 1530.29
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 35.563312501s
|
||||
Combined ops/sec: 281.19
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 699.706889ms
|
||||
Total Events: 10000
|
||||
Events/sec: 14291.70
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 545.724µs
|
||||
P95 Latency: 473.43µs
|
||||
P99 Latency: 478.349µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.534724469s
|
||||
Total Events: 10000
|
||||
Events/sec: 1530.29
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 208 MB
|
||||
Avg Latency: 205.962µs
|
||||
P95 Latency: 165.525µs
|
||||
P99 Latency: 253.411µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 35.563312501s
|
||||
Total Events: 10000
|
||||
Events/sec: 281.19
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 146 MB
|
||||
Avg Latency: 9.092604ms
|
||||
P95 Latency: 19.302571ms
|
||||
P99 Latency: 16.944829ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
20250912195900161526 INF /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912195900909573 INF /tmp/benchmark_khatru-sqlite_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912195900911092 INF /tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-09-12T19:59:01+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
104
cmd/benchmark/reports/run_20250912_195729/next-orly_results.txt
Normal file
104
cmd/benchmark/reports/run_20250912_195729/next-orly_results.txt
Normal file
@@ -0,0 +1,104 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912195729240522 INF /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912195729241087 INF /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912195729241168 INF /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912195729241759 INF (*types.Uint32)(0xc0001de49c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912195729241847 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 558.618706ms
|
||||
Events/sec: 17901.30
|
||||
Avg latency: 433.058µs
|
||||
P95 latency: 456.738µs
|
||||
P99 latency: 337.231µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 172.949275ms
|
||||
Burst completed: 1000 events in 175.209401ms
|
||||
Burst completed: 1000 events in 156.532197ms
|
||||
Burst completed: 1000 events in 157.913421ms
|
||||
Burst completed: 1000 events in 151.37659ms
|
||||
Burst completed: 1000 events in 161.938783ms
|
||||
Burst completed: 1000 events in 168.47761ms
|
||||
Burst completed: 1000 events in 159.951768ms
|
||||
Burst completed: 1000 events in 170.308111ms
|
||||
Burst completed: 1000 events in 146.767432ms
|
||||
Burst test completed: 10000 events in 6.646634323s
|
||||
Events/sec: 1504.52
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 35.548232107s
|
||||
Combined ops/sec: 281.31
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 558.618706ms
|
||||
Total Events: 10000
|
||||
Events/sec: 17901.30
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 433.058µs
|
||||
P95 Latency: 456.738µs
|
||||
P99 Latency: 337.231µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.646634323s
|
||||
Total Events: 10000
|
||||
Events/sec: 1504.52
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 207 MB
|
||||
Avg Latency: 182.813µs
|
||||
P95 Latency: 152.86µs
|
||||
P99 Latency: 204.198µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 35.548232107s
|
||||
Total Events: 10000
|
||||
Events/sec: 281.31
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 215 MB
|
||||
Avg Latency: 9.086952ms
|
||||
P95 Latency: 18.156339ms
|
||||
P99 Latency: 24.346573ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
|
||||
20250912195811996353 INF /tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912195812308400 INF /tmp/benchmark_next-orly_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912195812310341 INF /tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: next-orly
|
||||
RELAY_URL: ws://next-orly:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T19:58:12+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,104 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_nostr-rs-relay_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912200137539643 INF /tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912200137540391 INF /tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912200137540449 INF /tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912200137540903 INF (*types.Uint32)(0xc0001c24cc)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912200137540961 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 657.896815ms
|
||||
Events/sec: 15199.95
|
||||
Avg latency: 508.699µs
|
||||
P95 latency: 1.011413ms
|
||||
P99 latency: 710.782µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 149.389787ms
|
||||
Burst completed: 1000 events in 138.154354ms
|
||||
Burst completed: 1000 events in 139.952633ms
|
||||
Burst completed: 1000 events in 148.684306ms
|
||||
Burst completed: 1000 events in 154.779586ms
|
||||
Burst completed: 1000 events in 163.72717ms
|
||||
Burst completed: 1000 events in 142.665132ms
|
||||
Burst completed: 1000 events in 151.637082ms
|
||||
Burst completed: 1000 events in 143.018896ms
|
||||
Burst completed: 1000 events in 157.963802ms
|
||||
Burst test completed: 10000 events in 6.519459944s
|
||||
Events/sec: 1533.87
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 36.26569002s
|
||||
Combined ops/sec: 275.74
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 657.896815ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15199.95
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 508.699µs
|
||||
P95 Latency: 1.011413ms
|
||||
P99 Latency: 710.782µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.519459944s
|
||||
Total Events: 10000
|
||||
Events/sec: 1533.87
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 206 MB
|
||||
Avg Latency: 217.187µs
|
||||
P95 Latency: 130.018µs
|
||||
P99 Latency: 261.728µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 36.26569002s
|
||||
Total Events: 10000
|
||||
Events/sec: 275.74
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 225 MB
|
||||
Avg Latency: 9.38757ms
|
||||
P95 Latency: 19.250416ms
|
||||
P99 Latency: 20.049957ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
|
||||
20250912200220985006 INF /tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912200221295381 INF /tmp/benchmark_nostr-rs-relay_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912200221297677 INF /tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: nostr-rs-relay
|
||||
RELAY_URL: ws://nostr-rs-relay:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T20:02:21+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,104 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_relayer-basic_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912195956808180 INF /tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912195956808720 INF /tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912195956808755 INF /tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912195956809102 INF (*types.Uint32)(0xc0001bc04c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912195956809190 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 605.231707ms
|
||||
Events/sec: 16522.60
|
||||
Avg latency: 466.066µs
|
||||
P95 latency: 514.849µs
|
||||
P99 latency: 451.358µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 149.715312ms
|
||||
Burst completed: 1000 events in 146.385191ms
|
||||
Burst completed: 1000 events in 147.010481ms
|
||||
Burst completed: 1000 events in 151.671062ms
|
||||
Burst completed: 1000 events in 143.215087ms
|
||||
Burst completed: 1000 events in 137.331431ms
|
||||
Burst completed: 1000 events in 155.735079ms
|
||||
Burst completed: 1000 events in 161.246126ms
|
||||
Burst completed: 1000 events in 140.174417ms
|
||||
Burst completed: 1000 events in 144.819799ms
|
||||
Burst test completed: 10000 events in 6.503155987s
|
||||
Events/sec: 1537.71
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 37.45410417s
|
||||
Combined ops/sec: 266.99
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 605.231707ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16522.60
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 152 MB
|
||||
Avg Latency: 466.066µs
|
||||
P95 Latency: 514.849µs
|
||||
P99 Latency: 451.358µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.503155987s
|
||||
Total Events: 10000
|
||||
Events/sec: 1537.71
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 203 MB
|
||||
Avg Latency: 215.609µs
|
||||
P95 Latency: 141.91µs
|
||||
P99 Latency: 204.819µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 37.45410417s
|
||||
Total Events: 10000
|
||||
Events/sec: 266.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 148 MB
|
||||
Avg Latency: 9.851217ms
|
||||
P95 Latency: 23.101412ms
|
||||
P99 Latency: 17.889412ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
|
||||
20250912200041372670 INF /tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912200041686782 INF /tmp/benchmark_relayer-basic_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912200041689009 INF /tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: relayer-basic
|
||||
RELAY_URL: ws://relayer-basic:7447
|
||||
TEST_TIMESTAMP: 2025-09-12T20:00:41+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
35
cmd/benchmark/reports/run_20250912_195729/results.adoc
Normal file
35
cmd/benchmark/reports/run_20250912_195729/results.adoc
Normal file
@@ -0,0 +1,35 @@
|
||||
= NOSTR Relay Benchmark Results
|
||||
|
||||
Generated from: aggregate_report.txt
|
||||
|
||||
[cols="1,^1,^1,^1,^1,^1,^1",options="header"]
|
||||
|===
|
||||
| Metric | next-orly | khatru-sqlite | khatru-badger | relayer-basic | strfry | nostr-rs-relay
|
||||
|
||||
| Store Events/sec
|
||||
| 17901.30 | 14291.70 | 16351.11 | 16522.60 | 15346.12 | 15199.95
|
||||
|
||||
| Store Avg Latency #1
|
||||
| 433.058µs | 545.724µs | 474.016µs | 466.066µs | 506.51µs | 508.699µs
|
||||
|
||||
| Store P95 Latency #1
|
||||
| 456.738µs | 473.43µs | 479.03µs | 514.849µs | 590.442µs | 1.011413ms
|
||||
|
||||
| Query Events/sec #2
|
||||
| 1504.52 | 1530.29 | 1539.25 | 1537.71 | 1534.88 | 1533.87
|
||||
|
||||
| Query Avg Latency #2
|
||||
| 182.813µs | 205.962µs | 226.602µs | 215.609µs | 216.564µs | 217.187µs
|
||||
|
||||
| Query P95 Latency #2
|
||||
| 152.86µs | 165.525µs | 239.525µs | 141.91µs | 267.91µs | 130.018µs
|
||||
|
||||
| Concurrent Store/Query Events/sec #3
|
||||
| 17901.30 | 14291.70 | 16351.11 | 16522.60 | 15346.12 | 15199.95
|
||||
|
||||
| Concurrent Store/Query Avg Latency #3
|
||||
| 9.086952ms | 9.092604ms | 9.930935ms | 9.851217ms | 9.938991ms | 9.38757ms
|
||||
|
||||
| Concurrent Store/Query P95 Latency #3
|
||||
| 18.156339ms | 19.302571ms | 17.75358ms | 23.101412ms | 19.784708ms | 19.250416ms
|
||||
|===
|
||||
104
cmd/benchmark/reports/run_20250912_195729/strfry_results.txt
Normal file
104
cmd/benchmark/reports/run_20250912_195729/strfry_results.txt
Normal file
@@ -0,0 +1,104 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_strfry_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912200046745432 INF /tmp/benchmark_strfry_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912200046746116 INF /tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912200046746193 INF /tmp/benchmark_strfry_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912200046746576 INF (*types.Uint32)(0xc0002a9c4c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912200046746636 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 651.630667ms
|
||||
Events/sec: 15346.12
|
||||
Avg latency: 506.51µs
|
||||
P95 latency: 590.442µs
|
||||
P99 latency: 278.399µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 148.701372ms
|
||||
Burst completed: 1000 events in 161.333951ms
|
||||
Burst completed: 1000 events in 146.993646ms
|
||||
Burst completed: 1000 events in 155.768019ms
|
||||
Burst completed: 1000 events in 143.83944ms
|
||||
Burst completed: 1000 events in 156.208347ms
|
||||
Burst completed: 1000 events in 150.769887ms
|
||||
Burst completed: 1000 events in 140.217044ms
|
||||
Burst completed: 1000 events in 150.831164ms
|
||||
Burst completed: 1000 events in 135.759058ms
|
||||
Burst test completed: 10000 events in 6.515183689s
|
||||
Events/sec: 1534.88
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 37.667054484s
|
||||
Combined ops/sec: 265.48
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 651.630667ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15346.12
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 152 MB
|
||||
Avg Latency: 506.51µs
|
||||
P95 Latency: 590.442µs
|
||||
P99 Latency: 278.399µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.515183689s
|
||||
Total Events: 10000
|
||||
Events/sec: 1534.88
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 203 MB
|
||||
Avg Latency: 216.564µs
|
||||
P95 Latency: 267.91µs
|
||||
P99 Latency: 310.46µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 37.667054484s
|
||||
Total Events: 10000
|
||||
Events/sec: 265.48
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 136 MB
|
||||
Avg Latency: 9.938991ms
|
||||
P95 Latency: 19.784708ms
|
||||
P99 Latency: 18.788985ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
|
||||
20250912200131581470 INF /tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912200132372653 INF /tmp/benchmark_strfry_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912200132384548 INF /tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: strfry
|
||||
RELAY_URL: ws://strfry:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T20:01:32+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
140
cmd/benchmark/reports/run_20250912_222649/aggregate_report.txt
Normal file
140
cmd/benchmark/reports/run_20250912_222649/aggregate_report.txt
Normal file
@@ -0,0 +1,140 @@
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: 2025-09-12T22:43:29+00:00
|
||||
Benchmark Configuration:
|
||||
Events per test: 10000
|
||||
Concurrent workers: 8
|
||||
Test duration: 60s
|
||||
|
||||
Relays tested: 6
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
Relay: next-orly
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 18056.94
|
||||
Events/sec: 1492.32
|
||||
Events/sec: 16750.82
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 428.869µs
|
||||
Bottom 10% Avg Latency: 643.51µs
|
||||
Avg Latency: 178.04µs
|
||||
P95 Latency: 607.997µs
|
||||
P95 Latency: 243.954µs
|
||||
P95 Latency: 21.665387ms
|
||||
|
||||
Relay: khatru-sqlite
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17635.76
|
||||
Events/sec: 1510.39
|
||||
Events/sec: 16509.10
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 437.941µs
|
||||
Bottom 10% Avg Latency: 659.71µs
|
||||
Avg Latency: 203.563µs
|
||||
P95 Latency: 621.964µs
|
||||
P95 Latency: 330.729µs
|
||||
P95 Latency: 21.838576ms
|
||||
|
||||
Relay: khatru-badger
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17312.60
|
||||
Events/sec: 1508.54
|
||||
Events/sec: 15933.99
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 448.778µs
|
||||
Bottom 10% Avg Latency: 664.268µs
|
||||
Avg Latency: 196.38µs
|
||||
P95 Latency: 633.085µs
|
||||
P95 Latency: 293.579µs
|
||||
P95 Latency: 22.727378ms
|
||||
|
||||
Relay: relayer-basic
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15155.00
|
||||
Events/sec: 1545.44
|
||||
Events/sec: 14255.58
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 513.243µs
|
||||
Bottom 10% Avg Latency: 864.746µs
|
||||
Avg Latency: 273.645µs
|
||||
P95 Latency: 792.685µs
|
||||
P95 Latency: 498.989µs
|
||||
P95 Latency: 22.924497ms
|
||||
|
||||
Relay: strfry
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15245.05
|
||||
Events/sec: 1533.59
|
||||
Events/sec: 15507.07
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 510.383µs
|
||||
Bottom 10% Avg Latency: 831.211µs
|
||||
Avg Latency: 223.359µs
|
||||
P95 Latency: 769.085µs
|
||||
P95 Latency: 378.145µs
|
||||
P95 Latency: 22.152884ms
|
||||
|
||||
Relay: nostr-rs-relay
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 16312.24
|
||||
Events/sec: 1502.05
|
||||
Events/sec: 14131.23
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 476.418µs
|
||||
Bottom 10% Avg Latency: 722.179µs
|
||||
Avg Latency: 182.765µs
|
||||
P95 Latency: 686.836µs
|
||||
P95 Latency: 257.082µs
|
||||
P95 Latency: 20.680962ms
|
||||
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
- /reports/run_20250912_222649/khatru-badger_results.txt
|
||||
- /reports/run_20250912_222649/khatru-sqlite_results.txt
|
||||
- /reports/run_20250912_222649/next-orly_results.txt
|
||||
- /reports/run_20250912_222649/nostr-rs-relay_results.txt
|
||||
- /reports/run_20250912_222649/relayer-basic_results.txt
|
||||
- /reports/run_20250912_222649/strfry_results.txt
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
Relay Status Peak Tput/s Avg Latency Success Rate
|
||||
---- ------ ----------- ----------- ------------
|
||||
next-orly OK 18056.94 428.869µs 100.0%
|
||||
khatru-sqlite OK 17635.76 437.941µs 100.0%
|
||||
khatru-badger OK 17312.60 448.778µs 100.0%
|
||||
relayer-basic OK 15155.00 513.243µs 100.0%
|
||||
strfry OK 15245.05 510.383µs 100.0%
|
||||
nostr-rs-relay OK 16312.24 476.418µs 100.0%
|
||||
|
||||
================================================================
|
||||
End of Report
|
||||
================================================================
|
||||
@@ -0,0 +1,190 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912223222496620 INF /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912223222497154 INF /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912223222497184 INF /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912223222497402 INF (*types.Uint32)(0xc0000100fc)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912223222497454 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 577.614152ms
|
||||
Events/sec: 17312.60
|
||||
Avg latency: 448.778µs
|
||||
P90 latency: 584.783µs
|
||||
P95 latency: 633.085µs
|
||||
P99 latency: 749.537µs
|
||||
Bottom 10% Avg latency: 664.268µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 161.62554ms
|
||||
Burst completed: 1000 events in 154.666063ms
|
||||
Burst completed: 1000 events in 149.999903ms
|
||||
Burst completed: 1000 events in 169.141205ms
|
||||
Burst completed: 1000 events in 153.987041ms
|
||||
Burst completed: 1000 events in 141.227756ms
|
||||
Burst completed: 1000 events in 168.989116ms
|
||||
Burst completed: 1000 events in 161.032171ms
|
||||
Burst completed: 1000 events in 182.128996ms
|
||||
Burst completed: 1000 events in 161.86147ms
|
||||
Burst test completed: 10000 events in 6.628942674s
|
||||
Events/sec: 1508.54
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 36.466065909s
|
||||
Combined ops/sec: 274.23
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 627.589155ms
|
||||
Events/sec: 15933.99
|
||||
Avg latency: 489.881µs
|
||||
P90 latency: 628.857µs
|
||||
P95 latency: 679.363µs
|
||||
P99 latency: 828.307µs
|
||||
Bottom 10% Avg latency: 716.862µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 150.262543ms
|
||||
Burst completed: 1000 events in 148.027109ms
|
||||
Burst completed: 1000 events in 139.184066ms
|
||||
Burst completed: 1000 events in 147.196277ms
|
||||
Burst completed: 1000 events in 141.143557ms
|
||||
Burst completed: 1000 events in 138.727197ms
|
||||
Burst completed: 1000 events in 143.014207ms
|
||||
Burst completed: 1000 events in 143.355055ms
|
||||
Burst completed: 1000 events in 162.573956ms
|
||||
Burst completed: 1000 events in 142.875393ms
|
||||
Burst test completed: 10000 events in 6.475822519s
|
||||
Events/sec: 1544.21
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4742 reads in 1m0.036644794s
|
||||
Combined ops/sec: 162.27
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 577.614152ms
|
||||
Total Events: 10000
|
||||
Events/sec: 17312.60
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 152 MB
|
||||
Avg Latency: 448.778µs
|
||||
P90 Latency: 584.783µs
|
||||
P95 Latency: 633.085µs
|
||||
P99 Latency: 749.537µs
|
||||
Bottom 10% Avg Latency: 664.268µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.628942674s
|
||||
Total Events: 10000
|
||||
Events/sec: 1508.54
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 204 MB
|
||||
Avg Latency: 196.38µs
|
||||
P90 Latency: 260.706µs
|
||||
P95 Latency: 293.579µs
|
||||
P99 Latency: 385.694µs
|
||||
Bottom 10% Avg Latency: 317.532µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 36.466065909s
|
||||
Total Events: 10000
|
||||
Events/sec: 274.23
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 128 MB
|
||||
Avg Latency: 9.448363ms
|
||||
P90 Latency: 20.988228ms
|
||||
P95 Latency: 22.727378ms
|
||||
P99 Latency: 25.094784ms
|
||||
Bottom 10% Avg Latency: 23.01277ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 627.589155ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15933.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 124 MB
|
||||
Avg Latency: 489.881µs
|
||||
P90 Latency: 628.857µs
|
||||
P95 Latency: 679.363µs
|
||||
P99 Latency: 828.307µs
|
||||
Bottom 10% Avg Latency: 716.862µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.475822519s
|
||||
Total Events: 10000
|
||||
Events/sec: 1544.21
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 170 MB
|
||||
Avg Latency: 215.418µs
|
||||
P90 Latency: 287.237µs
|
||||
P95 Latency: 339.025µs
|
||||
P99 Latency: 510.682µs
|
||||
Bottom 10% Avg Latency: 378.172µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.036644794s
|
||||
Total Events: 9742
|
||||
Events/sec: 162.27
|
||||
Success Rate: 97.4%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 181 MB
|
||||
Avg Latency: 19.714686ms
|
||||
P90 Latency: 44.573506ms
|
||||
P95 Latency: 46.895555ms
|
||||
P99 Latency: 50.425027ms
|
||||
Bottom 10% Avg Latency: 47.384489ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
|
||||
20250912223503335481 INF /tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912223504473151 INF /tmp/benchmark_khatru-badger_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912223504475627 INF /tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-badger
|
||||
RELAY_URL: ws://khatru-badger:3334
|
||||
TEST_TIMESTAMP: 2025-09-12T22:35:04+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,190 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912222936300616 INF /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912222936301606 INF /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912222936301647 INF /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912222936301987 INF (*types.Uint32)(0xc0001c23f0)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912222936302060 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 567.02963ms
|
||||
Events/sec: 17635.76
|
||||
Avg latency: 437.941µs
|
||||
P90 latency: 574.133µs
|
||||
P95 latency: 621.964µs
|
||||
P99 latency: 768.473µs
|
||||
Bottom 10% Avg latency: 659.71µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 172.012448ms
|
||||
Burst completed: 1000 events in 145.502701ms
|
||||
Burst completed: 1000 events in 153.928098ms
|
||||
Burst completed: 1000 events in 169.995269ms
|
||||
Burst completed: 1000 events in 147.617375ms
|
||||
Burst completed: 1000 events in 157.211387ms
|
||||
Burst completed: 1000 events in 153.332744ms
|
||||
Burst completed: 1000 events in 172.374938ms
|
||||
Burst completed: 1000 events in 167.518935ms
|
||||
Burst completed: 1000 events in 155.211871ms
|
||||
Burst test completed: 10000 events in 6.620785215s
|
||||
Events/sec: 1510.39
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 35.700582016s
|
||||
Combined ops/sec: 280.11
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 605.726547ms
|
||||
Events/sec: 16509.10
|
||||
Avg latency: 470.577µs
|
||||
P90 latency: 609.791µs
|
||||
P95 latency: 660.256µs
|
||||
P99 latency: 788.641µs
|
||||
Bottom 10% Avg latency: 687.847µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 135.310723ms
|
||||
Burst completed: 1000 events in 166.604305ms
|
||||
Burst completed: 1000 events in 141.453184ms
|
||||
Burst completed: 1000 events in 146.579351ms
|
||||
Burst completed: 1000 events in 154.453638ms
|
||||
Burst completed: 1000 events in 156.212516ms
|
||||
Burst completed: 1000 events in 142.309354ms
|
||||
Burst completed: 1000 events in 152.268188ms
|
||||
Burst completed: 1000 events in 144.187829ms
|
||||
Burst completed: 1000 events in 147.609002ms
|
||||
Burst test completed: 10000 events in 6.508461808s
|
||||
Events/sec: 1536.46
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4662 reads in 1m0.040595326s
|
||||
Combined ops/sec: 160.92
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 567.02963ms
|
||||
Total Events: 10000
|
||||
Events/sec: 17635.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 437.941µs
|
||||
P90 Latency: 574.133µs
|
||||
P95 Latency: 621.964µs
|
||||
P99 Latency: 768.473µs
|
||||
Bottom 10% Avg Latency: 659.71µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.620785215s
|
||||
Total Events: 10000
|
||||
Events/sec: 1510.39
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 208 MB
|
||||
Avg Latency: 203.563µs
|
||||
P90 Latency: 274.152µs
|
||||
P95 Latency: 330.729µs
|
||||
P99 Latency: 521.483µs
|
||||
Bottom 10% Avg Latency: 378.237µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 35.700582016s
|
||||
Total Events: 10000
|
||||
Events/sec: 280.11
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 232 MB
|
||||
Avg Latency: 9.150925ms
|
||||
P90 Latency: 20.1434ms
|
||||
P95 Latency: 21.838576ms
|
||||
P99 Latency: 24.0106ms
|
||||
Bottom 10% Avg Latency: 22.04901ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 605.726547ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16509.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 139 MB
|
||||
Avg Latency: 470.577µs
|
||||
P90 Latency: 609.791µs
|
||||
P95 Latency: 660.256µs
|
||||
P99 Latency: 788.641µs
|
||||
Bottom 10% Avg Latency: 687.847µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.508461808s
|
||||
Total Events: 10000
|
||||
Events/sec: 1536.46
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 182 MB
|
||||
Avg Latency: 199.49µs
|
||||
P90 Latency: 261.427µs
|
||||
P95 Latency: 294.771µs
|
||||
P99 Latency: 406.814µs
|
||||
Bottom 10% Avg Latency: 332.083µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.040595326s
|
||||
Total Events: 9662
|
||||
Events/sec: 160.92
|
||||
Success Rate: 96.6%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 204 MB
|
||||
Avg Latency: 19.935937ms
|
||||
P90 Latency: 44.802034ms
|
||||
P95 Latency: 48.282589ms
|
||||
P99 Latency: 52.169026ms
|
||||
Bottom 10% Avg Latency: 48.641697ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
|
||||
20250912223216370778 INF /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912223217349356 INF /tmp/benchmark_khatru-sqlite_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912223217352393 INF /tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-09-12T22:32:17+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
190
cmd/benchmark/reports/run_20250912_222649/next-orly_results.txt
Normal file
190
cmd/benchmark/reports/run_20250912_222649/next-orly_results.txt
Normal file
@@ -0,0 +1,190 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912222650025765 INF /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912222650026455 INF /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912222650026497 INF /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912222650026747 INF (*types.Uint32)(0xc0001f63cc)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912222650026778 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 553.803776ms
|
||||
Events/sec: 18056.94
|
||||
Avg latency: 428.869µs
|
||||
P90 latency: 558.663µs
|
||||
P95 latency: 607.997µs
|
||||
P99 latency: 749.787µs
|
||||
Bottom 10% Avg latency: 643.51µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 190.801687ms
|
||||
Burst completed: 1000 events in 168.170564ms
|
||||
Burst completed: 1000 events in 161.16591ms
|
||||
Burst completed: 1000 events in 161.43364ms
|
||||
Burst completed: 1000 events in 148.293941ms
|
||||
Burst completed: 1000 events in 172.875177ms
|
||||
Burst completed: 1000 events in 178.930553ms
|
||||
Burst completed: 1000 events in 161.052715ms
|
||||
Burst completed: 1000 events in 162.071335ms
|
||||
Burst completed: 1000 events in 171.849756ms
|
||||
Burst test completed: 10000 events in 6.70096222s
|
||||
Events/sec: 1492.32
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 35.645619485s
|
||||
Combined ops/sec: 280.54
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 596.985601ms
|
||||
Events/sec: 16750.82
|
||||
Avg latency: 465.438µs
|
||||
P90 latency: 594.151µs
|
||||
P95 latency: 636.592µs
|
||||
P99 latency: 757.953µs
|
||||
Bottom 10% Avg latency: 672.673µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 152.121077ms
|
||||
Burst completed: 1000 events in 160.774367ms
|
||||
Burst completed: 1000 events in 137.913676ms
|
||||
Burst completed: 1000 events in 142.916647ms
|
||||
Burst completed: 1000 events in 166.771131ms
|
||||
Burst completed: 1000 events in 160.016244ms
|
||||
Burst completed: 1000 events in 156.369302ms
|
||||
Burst completed: 1000 events in 158.850666ms
|
||||
Burst completed: 1000 events in 154.842287ms
|
||||
Burst completed: 1000 events in 146.828122ms
|
||||
Burst test completed: 10000 events in 6.557799732s
|
||||
Events/sec: 1524.90
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4782 reads in 1m0.043775785s
|
||||
Combined ops/sec: 162.91
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 553.803776ms
|
||||
Total Events: 10000
|
||||
Events/sec: 18056.94
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 428.869µs
|
||||
P90 Latency: 558.663µs
|
||||
P95 Latency: 607.997µs
|
||||
P99 Latency: 749.787µs
|
||||
Bottom 10% Avg Latency: 643.51µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.70096222s
|
||||
Total Events: 10000
|
||||
Events/sec: 1492.32
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 204 MB
|
||||
Avg Latency: 178.04µs
|
||||
P90 Latency: 224.367µs
|
||||
P95 Latency: 243.954µs
|
||||
P99 Latency: 318.225µs
|
||||
Bottom 10% Avg Latency: 264.418µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 35.645619485s
|
||||
Total Events: 10000
|
||||
Events/sec: 280.54
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 120 MB
|
||||
Avg Latency: 9.118653ms
|
||||
P90 Latency: 19.852346ms
|
||||
P95 Latency: 21.665387ms
|
||||
P99 Latency: 23.946919ms
|
||||
Bottom 10% Avg Latency: 21.867062ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 596.985601ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16750.82
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 121 MB
|
||||
Avg Latency: 465.438µs
|
||||
P90 Latency: 594.151µs
|
||||
P95 Latency: 636.592µs
|
||||
P99 Latency: 757.953µs
|
||||
Bottom 10% Avg Latency: 672.673µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.557799732s
|
||||
Total Events: 10000
|
||||
Events/sec: 1524.90
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 167 MB
|
||||
Avg Latency: 189.538µs
|
||||
P90 Latency: 247.511µs
|
||||
P95 Latency: 274.011µs
|
||||
P99 Latency: 360.977µs
|
||||
Bottom 10% Avg Latency: 296.967µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.043775785s
|
||||
Total Events: 9782
|
||||
Events/sec: 162.91
|
||||
Success Rate: 97.8%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 193 MB
|
||||
Avg Latency: 19.562536ms
|
||||
P90 Latency: 43.431835ms
|
||||
P95 Latency: 46.326204ms
|
||||
P99 Latency: 50.533302ms
|
||||
Bottom 10% Avg Latency: 46.979603ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly_8/benchmark_report.adoc
|
||||
20250912222930150767 INF /tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912222931147258 INF /tmp/benchmark_next-orly_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912222931149928 INF /tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: next-orly
|
||||
RELAY_URL: ws://next-orly:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T22:29:31+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,190 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_nostr-rs-relay_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912224044213613 INF /tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912224044214094 INF /tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912224044214130 INF /tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912224044214381 INF (*types.Uint32)(0xc000233c3c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912224044214413 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 613.036589ms
|
||||
Events/sec: 16312.24
|
||||
Avg latency: 476.418µs
|
||||
P90 latency: 627.852µs
|
||||
P95 latency: 686.836µs
|
||||
P99 latency: 841.471µs
|
||||
Bottom 10% Avg latency: 722.179µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 156.218882ms
|
||||
Burst completed: 1000 events in 170.25756ms
|
||||
Burst completed: 1000 events in 164.944293ms
|
||||
Burst completed: 1000 events in 162.767866ms
|
||||
Burst completed: 1000 events in 148.744622ms
|
||||
Burst completed: 1000 events in 163.556351ms
|
||||
Burst completed: 1000 events in 172.007512ms
|
||||
Burst completed: 1000 events in 159.806858ms
|
||||
Burst completed: 1000 events in 168.086258ms
|
||||
Burst completed: 1000 events in 164.931889ms
|
||||
Burst test completed: 10000 events in 6.657581804s
|
||||
Events/sec: 1502.05
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 34.850355805s
|
||||
Combined ops/sec: 286.94
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 707.652249ms
|
||||
Events/sec: 14131.23
|
||||
Avg latency: 551.706µs
|
||||
P90 latency: 724.937µs
|
||||
P95 latency: 790.563µs
|
||||
P99 latency: 980.677µs
|
||||
Bottom 10% Avg latency: 836.659µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 164.62419ms
|
||||
Burst completed: 1000 events in 155.938167ms
|
||||
Burst completed: 1000 events in 132.903056ms
|
||||
Burst completed: 1000 events in 142.377596ms
|
||||
Burst completed: 1000 events in 155.024184ms
|
||||
Burst completed: 1000 events in 147.095521ms
|
||||
Burst completed: 1000 events in 150.027389ms
|
||||
Burst completed: 1000 events in 152.873043ms
|
||||
Burst completed: 1000 events in 150.635479ms
|
||||
Burst completed: 1000 events in 146.45553ms
|
||||
Burst test completed: 10000 events in 6.519122877s
|
||||
Events/sec: 1533.95
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4806 reads in 1m0.03930731s
|
||||
Combined ops/sec: 163.33
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 613.036589ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16312.24
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 476.418µs
|
||||
P90 Latency: 627.852µs
|
||||
P95 Latency: 686.836µs
|
||||
P99 Latency: 841.471µs
|
||||
Bottom 10% Avg Latency: 722.179µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.657581804s
|
||||
Total Events: 10000
|
||||
Events/sec: 1502.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 209 MB
|
||||
Avg Latency: 182.765µs
|
||||
P90 Latency: 234.409µs
|
||||
P95 Latency: 257.082µs
|
||||
P99 Latency: 330.764µs
|
||||
Bottom 10% Avg Latency: 277.843µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 34.850355805s
|
||||
Total Events: 10000
|
||||
Events/sec: 286.94
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 221 MB
|
||||
Avg Latency: 8.802188ms
|
||||
P90 Latency: 19.075904ms
|
||||
P95 Latency: 20.680962ms
|
||||
P99 Latency: 22.78326ms
|
||||
Bottom 10% Avg Latency: 20.897398ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 707.652249ms
|
||||
Total Events: 10000
|
||||
Events/sec: 14131.23
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 120 MB
|
||||
Avg Latency: 551.706µs
|
||||
P90 Latency: 724.937µs
|
||||
P95 Latency: 790.563µs
|
||||
P99 Latency: 980.677µs
|
||||
Bottom 10% Avg Latency: 836.659µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.519122877s
|
||||
Total Events: 10000
|
||||
Events/sec: 1533.95
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 168 MB
|
||||
Avg Latency: 204.873µs
|
||||
P90 Latency: 271.569µs
|
||||
P95 Latency: 329.28µs
|
||||
P99 Latency: 558.829µs
|
||||
Bottom 10% Avg Latency: 380.136µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.03930731s
|
||||
Total Events: 9806
|
||||
Events/sec: 163.33
|
||||
Success Rate: 98.1%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 164 MB
|
||||
Avg Latency: 19.506135ms
|
||||
P90 Latency: 43.206775ms
|
||||
P95 Latency: 45.944446ms
|
||||
P99 Latency: 49.910436ms
|
||||
Bottom 10% Avg Latency: 46.417943ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
|
||||
20250912224323628137 INF /tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912224324180883 INF /tmp/benchmark_nostr-rs-relay_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912224324184069 INF /tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: nostr-rs-relay
|
||||
RELAY_URL: ws://nostr-rs-relay:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T22:43:24+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,190 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_relayer-basic_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912223509638362 INF /tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912223509638864 INF /tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912223509638903 INF /tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912223509639558 INF (*types.Uint32)(0xc00570005c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912223509639620 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 659.848301ms
|
||||
Events/sec: 15155.00
|
||||
Avg latency: 513.243µs
|
||||
P90 latency: 706.89µs
|
||||
P95 latency: 792.685µs
|
||||
P99 latency: 1.089215ms
|
||||
Bottom 10% Avg latency: 864.746µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 142.551144ms
|
||||
Burst completed: 1000 events in 137.426595ms
|
||||
Burst completed: 1000 events in 139.51501ms
|
||||
Burst completed: 1000 events in 143.683041ms
|
||||
Burst completed: 1000 events in 136.500167ms
|
||||
Burst completed: 1000 events in 139.573844ms
|
||||
Burst completed: 1000 events in 145.873173ms
|
||||
Burst completed: 1000 events in 144.256594ms
|
||||
Burst completed: 1000 events in 157.89329ms
|
||||
Burst completed: 1000 events in 153.882313ms
|
||||
Burst test completed: 10000 events in 6.47066659s
|
||||
Events/sec: 1545.44
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 37.483034098s
|
||||
Combined ops/sec: 266.79
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 701.479526ms
|
||||
Events/sec: 14255.58
|
||||
Avg latency: 544.692µs
|
||||
P90 latency: 742.997µs
|
||||
P95 latency: 845.975µs
|
||||
P99 latency: 1.147624ms
|
||||
Bottom 10% Avg latency: 913.45µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 143.063212ms
|
||||
Burst completed: 1000 events in 139.422008ms
|
||||
Burst completed: 1000 events in 138.184516ms
|
||||
Burst completed: 1000 events in 148.207616ms
|
||||
Burst completed: 1000 events in 137.663883ms
|
||||
Burst completed: 1000 events in 141.607643ms
|
||||
Burst completed: 1000 events in 143.668551ms
|
||||
Burst completed: 1000 events in 140.467359ms
|
||||
Burst completed: 1000 events in 139.860509ms
|
||||
Burst completed: 1000 events in 138.328306ms
|
||||
Burst test completed: 10000 events in 6.43971118s
|
||||
Events/sec: 1552.86
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4870 reads in 1m0.034216467s
|
||||
Combined ops/sec: 164.41
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 659.848301ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15155.00
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 513.243µs
|
||||
P90 Latency: 706.89µs
|
||||
P95 Latency: 792.685µs
|
||||
P99 Latency: 1.089215ms
|
||||
Bottom 10% Avg Latency: 864.746µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.47066659s
|
||||
Total Events: 10000
|
||||
Events/sec: 1545.44
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 206 MB
|
||||
Avg Latency: 273.645µs
|
||||
P90 Latency: 407.483µs
|
||||
P95 Latency: 498.989µs
|
||||
P99 Latency: 772.406µs
|
||||
Bottom 10% Avg Latency: 574.801µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 37.483034098s
|
||||
Total Events: 10000
|
||||
Events/sec: 266.79
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 163 MB
|
||||
Avg Latency: 9.873363ms
|
||||
P90 Latency: 21.643466ms
|
||||
P95 Latency: 22.924497ms
|
||||
P99 Latency: 24.961324ms
|
||||
Bottom 10% Avg Latency: 23.201171ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 701.479526ms
|
||||
Total Events: 10000
|
||||
Events/sec: 14255.58
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 544.692µs
|
||||
P90 Latency: 742.997µs
|
||||
P95 Latency: 845.975µs
|
||||
P99 Latency: 1.147624ms
|
||||
Bottom 10% Avg Latency: 913.45µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.43971118s
|
||||
Total Events: 10000
|
||||
Events/sec: 1552.86
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 204 MB
|
||||
Avg Latency: 266.006µs
|
||||
P90 Latency: 402.683µs
|
||||
P95 Latency: 491.253µs
|
||||
P99 Latency: 715.735µs
|
||||
Bottom 10% Avg Latency: 553.762µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.034216467s
|
||||
Total Events: 9870
|
||||
Events/sec: 164.41
|
||||
Success Rate: 98.7%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 184 MB
|
||||
Avg Latency: 19.308183ms
|
||||
P90 Latency: 42.766459ms
|
||||
P95 Latency: 45.372157ms
|
||||
P99 Latency: 49.993951ms
|
||||
Bottom 10% Avg Latency: 46.189525ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
|
||||
20250912223751453794 INF /tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912223752488197 INF /tmp/benchmark_relayer-basic_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912223752491495 INF /tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: relayer-basic
|
||||
RELAY_URL: ws://relayer-basic:7447
|
||||
TEST_TIMESTAMP: 2025-09-12T22:37:52+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
190
cmd/benchmark/reports/run_20250912_222649/strfry_results.txt
Normal file
190
cmd/benchmark/reports/run_20250912_222649/strfry_results.txt
Normal file
@@ -0,0 +1,190 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_strfry_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912223757656112 INF /tmp/benchmark_strfry_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912223757657685 INF /tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912223757657767 INF /tmp/benchmark_strfry_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912223757658314 INF (*types.Uint32)(0xc0055c63ac)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912223757658385 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 655.950723ms
|
||||
Events/sec: 15245.05
|
||||
Avg latency: 510.383µs
|
||||
P90 latency: 690.815µs
|
||||
P95 latency: 769.085µs
|
||||
P99 latency: 1.000349ms
|
||||
Bottom 10% Avg latency: 831.211µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 168.844089ms
|
||||
Burst completed: 1000 events in 138.644286ms
|
||||
Burst completed: 1000 events in 167.717113ms
|
||||
Burst completed: 1000 events in 141.566337ms
|
||||
Burst completed: 1000 events in 141.186447ms
|
||||
Burst completed: 1000 events in 145.845582ms
|
||||
Burst completed: 1000 events in 142.834263ms
|
||||
Burst completed: 1000 events in 144.707595ms
|
||||
Burst completed: 1000 events in 144.096361ms
|
||||
Burst completed: 1000 events in 158.524931ms
|
||||
Burst test completed: 10000 events in 6.520630606s
|
||||
Events/sec: 1533.59
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 36.04854491s
|
||||
Combined ops/sec: 277.40
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 644.867085ms
|
||||
Events/sec: 15507.07
|
||||
Avg latency: 501.972µs
|
||||
P90 latency: 650.197µs
|
||||
P95 latency: 709.37µs
|
||||
P99 latency: 914.673µs
|
||||
Bottom 10% Avg latency: 754.969µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 133.763626ms
|
||||
Burst completed: 1000 events in 135.289448ms
|
||||
Burst completed: 1000 events in 136.874215ms
|
||||
Burst completed: 1000 events in 135.118277ms
|
||||
Burst completed: 1000 events in 139.247778ms
|
||||
Burst completed: 1000 events in 142.262475ms
|
||||
Burst completed: 1000 events in 141.21783ms
|
||||
Burst completed: 1000 events in 143.089554ms
|
||||
Burst completed: 1000 events in 148.027057ms
|
||||
Burst completed: 1000 events in 150.006497ms
|
||||
Burst test completed: 10000 events in 6.429121967s
|
||||
Events/sec: 1555.42
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4857 reads in 1m0.047885362s
|
||||
Combined ops/sec: 164.15
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 655.950723ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15245.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 510.383µs
|
||||
P90 Latency: 690.815µs
|
||||
P95 Latency: 769.085µs
|
||||
P99 Latency: 1.000349ms
|
||||
Bottom 10% Avg Latency: 831.211µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.520630606s
|
||||
Total Events: 10000
|
||||
Events/sec: 1533.59
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 208 MB
|
||||
Avg Latency: 223.359µs
|
||||
P90 Latency: 321.256µs
|
||||
P95 Latency: 378.145µs
|
||||
P99 Latency: 530.597µs
|
||||
Bottom 10% Avg Latency: 412.953µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 36.04854491s
|
||||
Total Events: 10000
|
||||
Events/sec: 277.40
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 222 MB
|
||||
Avg Latency: 9.309397ms
|
||||
P90 Latency: 20.403594ms
|
||||
P95 Latency: 22.152884ms
|
||||
P99 Latency: 24.513304ms
|
||||
Bottom 10% Avg Latency: 22.447709ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 644.867085ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15507.07
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 125 MB
|
||||
Avg Latency: 501.972µs
|
||||
P90 Latency: 650.197µs
|
||||
P95 Latency: 709.37µs
|
||||
P99 Latency: 914.673µs
|
||||
Bottom 10% Avg Latency: 754.969µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.429121967s
|
||||
Total Events: 10000
|
||||
Events/sec: 1555.42
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 170 MB
|
||||
Avg Latency: 239.454µs
|
||||
P90 Latency: 335.133µs
|
||||
P95 Latency: 408.012µs
|
||||
P99 Latency: 593.458µs
|
||||
Bottom 10% Avg Latency: 446.804µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.047885362s
|
||||
Total Events: 9857
|
||||
Events/sec: 164.15
|
||||
Success Rate: 98.6%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 189 MB
|
||||
Avg Latency: 19.373297ms
|
||||
P90 Latency: 42.953055ms
|
||||
P95 Latency: 45.636867ms
|
||||
P99 Latency: 49.71977ms
|
||||
Bottom 10% Avg Latency: 46.144029ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
|
||||
20250912224038033173 INF /tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912224039055498 INF /tmp/benchmark_strfry_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912224039058214 INF /tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: strfry
|
||||
RELAY_URL: ws://strfry:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T22:40:39+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
368
cmd/benchmark/setup-external-relays.sh
Executable file
368
cmd/benchmark/setup-external-relays.sh
Executable file
@@ -0,0 +1,368 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Setup script for downloading and configuring external relay repositories
|
||||
# for benchmarking
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
EXTERNAL_DIR="${SCRIPT_DIR}/external"
|
||||
|
||||
echo "Setting up external relay repositories for benchmarking..."
|
||||
|
||||
# Create external directory
|
||||
mkdir -p "${EXTERNAL_DIR}"
|
||||
|
||||
# Function to clone or update repository
|
||||
clone_or_update() {
|
||||
local repo_url="$1"
|
||||
local repo_dir="$2"
|
||||
local repo_name="$3"
|
||||
|
||||
echo "Setting up ${repo_name}..."
|
||||
|
||||
if [ -d "${repo_dir}" ]; then
|
||||
echo " ${repo_name} already exists, updating..."
|
||||
cd "${repo_dir}"
|
||||
git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true
|
||||
cd - > /dev/null
|
||||
else
|
||||
echo " Cloning ${repo_name}..."
|
||||
git clone "${repo_url}" "${repo_dir}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clone khatru
|
||||
clone_or_update "https://github.com/fiatjaf/khatru.git" "${EXTERNAL_DIR}/khatru" "Khatru"
|
||||
|
||||
# Clone relayer
|
||||
clone_or_update "https://github.com/fiatjaf/relayer.git" "${EXTERNAL_DIR}/relayer" "Relayer"
|
||||
|
||||
# Clone strfry
|
||||
clone_or_update "https://github.com/hoytech/strfry.git" "${EXTERNAL_DIR}/strfry" "Strfry"
|
||||
|
||||
# Clone nostr-rs-relay
|
||||
clone_or_update "https://git.sr.ht/~gheartsfield/nostr-rs-relay" "${EXTERNAL_DIR}/nostr-rs-relay" "Nostr-rs-relay"
|
||||
|
||||
echo "Creating Dockerfiles for external relays..."
|
||||
|
||||
# Create Dockerfile for Khatru SQLite
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.khatru-sqlite" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-sqlite example
|
||||
RUN cd examples/basic-sqlite && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o khatru-sqlite .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-sqlite/khatru-sqlite /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/khatru.db
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/khatru-sqlite"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Khatru Badger
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.khatru-badger" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-badger example
|
||||
RUN cd examples/basic-badger && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=0 go build -o khatru-badger .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/badger
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/khatru-badger"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Relayer basic example
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.relayer-basic" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic example
|
||||
RUN cd examples/basic && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o relayer-basic .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic/relayer-basic /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/relayer.db
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/relayer-basic"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Strfry
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.strfry" << 'EOF'
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
build-essential \
|
||||
liblmdb-dev \
|
||||
libsecp256k1-dev \
|
||||
pkg-config \
|
||||
libtool \
|
||||
autoconf \
|
||||
automake \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build strfry
|
||||
RUN make setup-golpe && \
|
||||
make -j$(nproc)
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y \
|
||||
liblmdb0 \
|
||||
libsecp256k1-0 \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/strfry /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
ENV STRFRY_RELAY_PORT=8080
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/strfry", "relay"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for nostr-rs-relay
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.nostr-rs-relay" << 'EOF'
|
||||
FROM rust:1.70-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache musl-dev sqlite-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN cargo build --release
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/target/release/nostr-rs-relay /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV RUST_LOG=info
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/nostr-rs-relay"]
|
||||
EOF
|
||||
|
||||
echo "Creating configuration files..."
|
||||
|
||||
# Create configs directory
|
||||
mkdir -p "${SCRIPT_DIR}/configs"
|
||||
|
||||
# Create strfry configuration
|
||||
cat > "${SCRIPT_DIR}/configs/strfry.conf" << 'EOF'
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "/data/strfry.lmdb"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required)
|
||||
mapsize = 1099511627776
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 8080
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 1000000
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "strfry benchmark"
|
||||
|
||||
# NIP-11: Detailed description of this server, free-form
|
||||
description = "A strfry relay for benchmarking"
|
||||
|
||||
# NIP-11: Administrative pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative contact for this server
|
||||
contact = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 20
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = ""
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = true
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# yesstr threads: experimental yesstr protocol (restart required)
|
||||
yesstr = 1
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create nostr-rs-relay configuration
|
||||
cat > "${SCRIPT_DIR}/configs/config.toml" << 'EOF'
|
||||
[info]
|
||||
relay_url = "ws://localhost:8080"
|
||||
name = "nostr-rs-relay benchmark"
|
||||
description = "A nostr-rs-relay for benchmarking"
|
||||
pubkey = ""
|
||||
contact = ""
|
||||
|
||||
[database]
|
||||
data_directory = "/data"
|
||||
in_memory = false
|
||||
engine = "sqlite"
|
||||
|
||||
[network]
|
||||
port = 8080
|
||||
address = "0.0.0.0"
|
||||
|
||||
[limits]
|
||||
messages_per_sec = 0
|
||||
subscriptions_per_min = 0
|
||||
max_event_bytes = 65535
|
||||
max_ws_message_bytes = 131072
|
||||
max_ws_frame_bytes = 131072
|
||||
|
||||
[authorization]
|
||||
pubkey_whitelist = []
|
||||
|
||||
[verified_users]
|
||||
mode = "passive"
|
||||
domain_whitelist = []
|
||||
domain_blacklist = []
|
||||
|
||||
[pay_to_relay]
|
||||
enabled = false
|
||||
|
||||
[options]
|
||||
reject_future_seconds = 30
|
||||
EOF
|
||||
|
||||
echo "Creating data directories..."
|
||||
mkdir -p "${SCRIPT_DIR}/data"/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay}
|
||||
mkdir -p "${SCRIPT_DIR}/reports"
|
||||
|
||||
echo "Setup complete!"
|
||||
echo ""
|
||||
echo "External relay repositories have been cloned to: ${EXTERNAL_DIR}"
|
||||
echo "Dockerfiles have been created for all relay implementations"
|
||||
echo "Configuration files have been created in: ${SCRIPT_DIR}/configs"
|
||||
echo "Data directories have been created in: ${SCRIPT_DIR}/data"
|
||||
echo ""
|
||||
echo "To run the benchmark:"
|
||||
echo " cd ${SCRIPT_DIR}"
|
||||
echo " docker-compose up --build"
|
||||
echo ""
|
||||
echo "Reports will be generated in: ${SCRIPT_DIR}/reports"
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"crypto.orly/ec/schnorr"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
b32 "encoders.orly/bech32encoding"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
b32 "next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
|
||||
45
go.mod
45
go.mod
@@ -3,24 +3,29 @@ module next.orly.dev
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
acl.orly v0.0.0-00010101000000-000000000000
|
||||
crypto.orly v0.0.0-00010101000000-000000000000
|
||||
database.orly v0.0.0-00010101000000-000000000000
|
||||
encoders.orly v0.0.0-00010101000000-000000000000
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/klauspost/cpuid/v2 v2.3.0
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
|
||||
go-simpler.org/env v0.12.0
|
||||
interfaces.orly v0.0.0-00010101000000-000000000000
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||
golang.org/x/net v0.43.0
|
||||
honnef.co/go/tools v0.6.1
|
||||
lol.mleku.dev v1.0.2
|
||||
protocol.orly v0.0.0-00010101000000-000000000000
|
||||
utils.orly v0.0.0-00010101000000-000000000000
|
||||
lukechampine.com/frand v1.5.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
@@ -29,32 +34,20 @@ require (
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/templexxx/cpu v0.0.1 // indirect
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
lukechampine.com/frand v1.5.1 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
acl.orly => ./pkg/acl
|
||||
crypto.orly => ./pkg/crypto
|
||||
database.orly => ./pkg/database
|
||||
encoders.orly => ./pkg/encoders
|
||||
interfaces.orly => ./pkg/interfaces
|
||||
next.orly.dev => ../../
|
||||
protocol.orly => ./pkg/protocol
|
||||
utils.orly => ./pkg/utils
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
45
go.sum
45
go.sum
@@ -1,3 +1,5 @@
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
@@ -40,6 +42,10 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
@@ -48,12 +54,16 @@ github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
||||
@@ -70,20 +80,47 @@ go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mx
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
|
||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
||||
|
||||
54
main.go
54
main.go
@@ -3,25 +3,40 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
acl "acl.orly"
|
||||
database "database.orly"
|
||||
"github.com/pkg/profile"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app"
|
||||
"next.orly.dev/app/config"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/version"
|
||||
)
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU() * 4)
|
||||
var err error
|
||||
var cfg *config.C
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
}
|
||||
log.I.F("starting %s %s", cfg.AppName, version.V)
|
||||
startProfiler(cfg.Pprof)
|
||||
switch cfg.Pprof {
|
||||
case "cpu":
|
||||
prof := profile.Start(profile.CPUProfile)
|
||||
defer prof.Stop()
|
||||
case "memory":
|
||||
prof := profile.Start(profile.MemProfile)
|
||||
defer prof.Stop()
|
||||
case "allocation":
|
||||
prof := profile.Start(profile.MemProfileAllocs)
|
||||
defer prof.Stop()
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var db *database.D
|
||||
if db, err = database.New(
|
||||
@@ -34,6 +49,39 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
acl.Registry.Syncer()
|
||||
|
||||
// Start health check HTTP server if configured
|
||||
var healthSrv *http.Server
|
||||
if cfg.HealthPort > 0 {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(
|
||||
"/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("ok"))
|
||||
log.I.F("health check ok")
|
||||
},
|
||||
)
|
||||
healthSrv = &http.Server{
|
||||
Addr: fmt.Sprintf(
|
||||
"%s:%d", cfg.Listen, cfg.HealthPort,
|
||||
), Handler: mux,
|
||||
}
|
||||
go func() {
|
||||
log.I.F("health check server listening on %s", healthSrv.Addr)
|
||||
if err := healthSrv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.E.F("health server error: %v", err)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutdownCtx, cancelShutdown := context.WithTimeout(
|
||||
context.Background(), 2*time.Second,
|
||||
)
|
||||
defer cancelShutdown()
|
||||
_ = healthSrv.Shutdown(shutdownCtx)
|
||||
}()
|
||||
}
|
||||
|
||||
quit := app.Run(ctx, cfg, db)
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, os.Interrupt)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"interfaces.orly/acl"
|
||||
"utils.orly/atomic"
|
||||
"next.orly.dev/pkg/interfaces/acl"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
var Registry = &S{}
|
||||
|
||||
@@ -7,32 +7,34 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
database "database.orly"
|
||||
"database.orly/indexes/types"
|
||||
"encoders.orly/bech32encoding"
|
||||
"encoders.orly/envelopes"
|
||||
"encoders.orly/envelopes/eoseenvelope"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/reqenvelope"
|
||||
"encoders.orly/event"
|
||||
"encoders.orly/filter"
|
||||
"encoders.orly/hex"
|
||||
"encoders.orly/kind"
|
||||
"encoders.orly/tag"
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
utils "utils.orly"
|
||||
"utils.orly/normalize"
|
||||
"utils.orly/values"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/envelopes"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
"next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/normalize"
|
||||
"next.orly.dev/pkg/utils/values"
|
||||
)
|
||||
|
||||
type Follows struct {
|
||||
Ctx context.Context
|
||||
cfg *config.C
|
||||
*database.D
|
||||
pubs *publish.S
|
||||
followsMx sync.RWMutex
|
||||
admins [][]byte
|
||||
follows [][]byte
|
||||
@@ -53,6 +55,9 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
case context.Context:
|
||||
// log.D.F("setting ACL context: %s", c.Value("id"))
|
||||
f.Ctx = c
|
||||
case *publish.S:
|
||||
// set publisher for dispatching new events
|
||||
f.pubs = c
|
||||
default:
|
||||
err = errorf.E("invalid type: %T", reflect.TypeOf(ca))
|
||||
}
|
||||
@@ -74,7 +79,7 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
} else {
|
||||
adm = a
|
||||
}
|
||||
log.I.F("admin: %0x", adm)
|
||||
// log.I.F("admin: %0x", adm)
|
||||
f.admins = append(f.admins, adm)
|
||||
fl := &filter.F{
|
||||
Authors: tag.NewFromAny(adm),
|
||||
@@ -224,6 +229,15 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
c, _, err := websocket.Dial(ctx, u, nil)
|
||||
if err != nil {
|
||||
log.W.F("follows syncer: dial %s failed: %v", u, err)
|
||||
if strings.Contains(
|
||||
err.Error(), "response status code 101 but got 403",
|
||||
) {
|
||||
// 403 means the relay is not accepting connections from
|
||||
// us. Forbidden is the meaning, usually used to
|
||||
// indicate either the IP or user is blocked. so stop
|
||||
// trying this one.
|
||||
return
|
||||
}
|
||||
timer := time.NewTimer(backoff)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -290,11 +304,16 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
)
|
||||
}
|
||||
// ignore duplicates and continue
|
||||
} else {
|
||||
// Only dispatch if the event was newly saved (no error)
|
||||
if f.pubs != nil {
|
||||
go f.pubs.Deliver(res.Event)
|
||||
}
|
||||
// log.I.F(
|
||||
// "saved new event from follows syncer: %0x",
|
||||
// res.Event.ID,
|
||||
// )
|
||||
}
|
||||
log.I.F(
|
||||
"saved new event from follows syncer: %0x",
|
||||
res.Event.ID,
|
||||
)
|
||||
case eoseenvelope.L:
|
||||
// ignore, continue subscription
|
||||
default:
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
module acl.orly
|
||||
|
||||
go 1.25.0
|
||||
|
||||
replace (
|
||||
acl.orly => ../acl
|
||||
crypto.orly => ../crypto
|
||||
database.orly => ../database
|
||||
encoders.orly => ../encoders
|
||||
interfaces.orly => ../interfaces
|
||||
next.orly.dev => ../../
|
||||
protocol.orly => ../protocol
|
||||
utils.orly => ../utils
|
||||
)
|
||||
|
||||
require (
|
||||
database.orly v0.0.0-00010101000000-000000000000
|
||||
encoders.orly v0.0.0-00010101000000-000000000000
|
||||
interfaces.orly v0.0.0-00010101000000-000000000000
|
||||
lol.mleku.dev v1.0.2
|
||||
next.orly.dev v0.0.0-00010101000000-000000000000
|
||||
utils.orly v0.0.0-00010101000000-000000000000
|
||||
)
|
||||
|
||||
require (
|
||||
crypto.orly v0.0.0-00010101000000-000000000000 // indirect
|
||||
github.com/adrg/xdg v0.5.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/templexxx/cpu v0.0.1 // indirect
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
|
||||
go-simpler.org/env v0.12.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
lukechampine.com/frand v1.5.1 // indirect
|
||||
)
|
||||
@@ -1,68 +0,0 @@
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
|
||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
|
||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
|
||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
||||
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/base58"
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/crypto/ec/base58"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
var stringTests = []struct {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/base58"
|
||||
"next.orly.dev/pkg/crypto/ec/base58"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -7,7 +7,7 @@ package base58
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"crypto.orly/sha256"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
)
|
||||
|
||||
// ErrChecksum indicates that the checksum of a check-encoded string does not verify against
|
||||
|
||||
@@ -7,7 +7,7 @@ package base58_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/base58"
|
||||
"next.orly.dev/pkg/crypto/ec/base58"
|
||||
)
|
||||
|
||||
var checkEncodingStringTests = []struct {
|
||||
|
||||
@@ -7,7 +7,7 @@ package base58_test
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec/base58"
|
||||
"next.orly.dev/pkg/crypto/ec/base58"
|
||||
)
|
||||
|
||||
// This example demonstrates how to decode modified base58 encoded data.
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// TestBech32 tests whether decoding and re-encoding the valid BIP-173 test
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// setHex decodes the passed big-endian hex string into the internal field value
|
||||
|
||||
@@ -20,7 +20,7 @@ package btcec
|
||||
// reverse the transform than to operate in affine coordinates.
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// KoblitzCurve provides an implementation for secp256k1 that fits the ECC
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"crypto.orly/ec/wire"
|
||||
"next.orly.dev/pkg/crypto/ec/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -3,8 +3,8 @@ package chaincfg
|
||||
import (
|
||||
"time"
|
||||
|
||||
"crypto.orly/ec/chainhash"
|
||||
"crypto.orly/ec/wire"
|
||||
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||
"next.orly.dev/pkg/crypto/ec/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"crypto.orly/ec/chainhash"
|
||||
"crypto.orly/ec/wire"
|
||||
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||
"next.orly.dev/pkg/crypto/ec/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/sha256"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -7,7 +7,7 @@ package chainhash
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// mainNetGenesisHash is the hash of the first block in the block chain for the
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
package chainhash
|
||||
|
||||
import (
|
||||
"crypto.orly/sha256"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
)
|
||||
|
||||
// HashB calculates hash(b) and returns the resulting bytes.
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// GenerateSharedSecret generates a shared secret based on a secret key and a
|
||||
|
||||
@@ -7,7 +7,7 @@ package btcec
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func TestGenerateSharedSecret(t *testing.T) {
|
||||
|
||||
@@ -6,7 +6,7 @@ package btcec
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// JacobianPoint is an element of the group formed by the secp256k1 curve in
|
||||
|
||||
@@ -8,8 +8,8 @@ package ecdsa
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// hexToModNScalar converts the passed hex string into a ModNScalar and will
|
||||
|
||||
@@ -8,7 +8,7 @@ package ecdsa
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// References:
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"lol.mleku.dev/chk"
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// Error identifies an error related to public key cryptography using a
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// FieldVal implements optimized fixed-precision arithmetic over the secp256k1
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"encoders.orly/hex"
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// TestIsZero ensures that checking if a field IsZero works as expected.
|
||||
|
||||
@@ -11,7 +11,7 @@ package btcec
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
func FuzzParsePubKey(f *testing.F) {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// ModNScalar implements optimized 256-bit constant-time fixed-precision
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/schnorr"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,9 +5,9 @@ package musig2
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/schnorr"
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/chainhash"
|
||||
"crypto.orly/ec/schnorr"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -10,10 +10,10 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/schnorr"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/sha256"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/chainhash"
|
||||
"crypto.orly/ec/schnorr"
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"encoders.orly/hex"
|
||||
"github.com/stretchr/testify/require"
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
type nonceGenTestCase struct {
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/chainhash"
|
||||
"crypto.orly/ec/schnorr"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"lol.mleku.dev/chk"
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"github.com/stretchr/testify/require"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// These constants define the lengths of serialized public keys.
|
||||
|
||||
@@ -7,7 +7,7 @@ package btcec
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"crypto.orly/sha256"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
|
||||
@@ -8,8 +8,8 @@ package schnorr
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// These constants define the lengths of serialized public keys.
|
||||
|
||||
@@ -5,10 +5,10 @@ package schnorr
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/chainhash"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"crypto.orly/ec"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// SecretKey wraps an ecdsa.SecretKey as a convenience mainly for signing things with the secret key without having to
|
||||
|
||||
@@ -8,7 +8,7 @@ package secp256k1
|
||||
import (
|
||||
"math/bits"
|
||||
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// References:
|
||||
|
||||
@@ -8,7 +8,7 @@ package secp256k1
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func TestGenerateSharedSecret(t *testing.T) {
|
||||
|
||||
@@ -11,9 +11,9 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"crypto.orly/sha256"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// This example demonstrates use of GenerateSharedSecret to encrypt a message
|
||||
|
||||
@@ -52,7 +52,7 @@ package secp256k1
|
||||
// ordinarily would. See the documentation for FieldVal for more details.
|
||||
|
||||
import (
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// Constants used to make the code more readable.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user