Optimize deletion timestamp lookup by replacing sorting logic with linear scan to improve performance. Add profiling support with cmd/benchmark/profile.sh, introduce network load testing in benchmarks, and update benchmark reports with additional latency metrics (P90, bottom 10%).

This commit is contained in:
2025-09-12 23:47:53 +01:00
parent fefa4d202e
commit c45276ef08
14 changed files with 1788 additions and 63 deletions

View File

@@ -9,16 +9,20 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
"sort"
"strings" "strings"
"sync" "sync"
"time" "time"
"next.orly.dev/pkg/crypto/p256k"
"next.orly.dev/pkg/database" "next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
"next.orly.dev/pkg/encoders/event" "next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter" "next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/kind" "next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag" "next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/encoders/timestamp" "next.orly.dev/pkg/encoders/timestamp"
"next.orly.dev/pkg/protocol/ws"
) )
type BenchmarkConfig struct { type BenchmarkConfig struct {
@@ -28,6 +32,11 @@ type BenchmarkConfig struct {
TestDuration time.Duration TestDuration time.Duration
BurstPattern bool BurstPattern bool
ReportInterval time.Duration ReportInterval time.Duration
// Network load options
RelayURL string
NetWorkers int
NetRate int // events/sec per worker
} }
type BenchmarkResult struct { type BenchmarkResult struct {
@@ -36,8 +45,10 @@ type BenchmarkResult struct {
TotalEvents int TotalEvents int
EventsPerSecond float64 EventsPerSecond float64
AvgLatency time.Duration AvgLatency time.Duration
P90Latency time.Duration
P95Latency time.Duration P95Latency time.Duration
P99Latency time.Duration P99Latency time.Duration
Bottom10Avg time.Duration
SuccessRate float64 SuccessRate float64
ConcurrentWorkers int ConcurrentWorkers int
MemoryUsed uint64 MemoryUsed uint64
@@ -54,6 +65,12 @@ type Benchmark struct {
func main() { func main() {
config := parseFlags() config := parseFlags()
if config.RelayURL != "" {
// Network mode: connect to relay and generate traffic
runNetworkLoad(config)
return
}
fmt.Printf("Starting Nostr Relay Benchmark\n") fmt.Printf("Starting Nostr Relay Benchmark\n")
fmt.Printf("Data Directory: %s\n", config.DataDir) fmt.Printf("Data Directory: %s\n", config.DataDir)
fmt.Printf( fmt.Printf(
@@ -64,13 +81,12 @@ func main() {
benchmark := NewBenchmark(config) benchmark := NewBenchmark(config)
defer benchmark.Close() defer benchmark.Close()
// Run benchmark tests // Run benchmark suite twice with pauses
benchmark.RunPeakThroughputTest() benchmark.RunSuite()
benchmark.RunBurstPatternTest()
benchmark.RunMixedReadWriteTest()
// Generate report // Generate reports
benchmark.GenerateReport() benchmark.GenerateReport()
benchmark.GenerateAsciidocReport()
} }
func parseFlags() *BenchmarkConfig { func parseFlags() *BenchmarkConfig {
@@ -80,7 +96,7 @@ func parseFlags() *BenchmarkConfig {
&config.DataDir, "datadir", "/tmp/benchmark_db", "Database directory", &config.DataDir, "datadir", "/tmp/benchmark_db", "Database directory",
) )
flag.IntVar( flag.IntVar(
&config.NumEvents, "events", 10000, "Number of events to generate", &config.NumEvents, "events", 100000, "Number of events to generate",
) )
flag.IntVar( flag.IntVar(
&config.ConcurrentWorkers, "workers", runtime.NumCPU(), &config.ConcurrentWorkers, "workers", runtime.NumCPU(),
@@ -97,10 +113,142 @@ func parseFlags() *BenchmarkConfig {
"Report interval", "Report interval",
) )
// Network mode flags
flag.StringVar(
&config.RelayURL, "relay-url", "",
"Relay WebSocket URL (enables network mode if set)",
)
flag.IntVar(
&config.NetWorkers, "net-workers", runtime.NumCPU(),
"Network workers (connections)",
)
flag.IntVar(&config.NetRate, "net-rate", 20, "Events per second per worker")
flag.Parse() flag.Parse()
return config return config
} }
func runNetworkLoad(cfg *BenchmarkConfig) {
fmt.Printf(
"Network mode: relay=%s workers=%d rate=%d ev/s per worker duration=%s\n",
cfg.RelayURL, cfg.NetWorkers, cfg.NetRate, cfg.TestDuration,
)
ctx, cancel := context.WithTimeout(context.Background(), cfg.TestDuration)
defer cancel()
var wg sync.WaitGroup
if cfg.NetWorkers <= 0 {
cfg.NetWorkers = 1
}
if cfg.NetRate <= 0 {
cfg.NetRate = 1
}
for i := 0; i < cfg.NetWorkers; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
// Connect to relay
rl, err := ws.RelayConnect(ctx, cfg.RelayURL)
if err != nil {
fmt.Printf(
"worker %d: failed to connect to %s: %v\n", workerID,
cfg.RelayURL, err,
)
return
}
defer rl.Close()
fmt.Printf("worker %d: connected to %s\n", workerID, cfg.RelayURL)
// Signer for this worker
var keys p256k.Signer
if err := keys.Generate(); err != nil {
fmt.Printf("worker %d: keygen failed: %v\n", workerID, err)
return
}
// Start a concurrent subscriber that listens for events published by this worker
// Build a filter that matches this worker's pubkey and kind=1, since now
since := time.Now().Unix()
go func() {
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
f.Authors = tag.NewWithCap(1)
f.Authors.T = append(f.Authors.T, keys.Pub())
f.Since = timestamp.FromUnix(since)
sub, err := rl.Subscribe(ctx, filter.NewS(f))
if err != nil {
fmt.Printf("worker %d: subscribe error: %v\n", workerID, err)
return
}
defer sub.Unsub()
recv := 0
for {
select {
case <-ctx.Done():
fmt.Printf("worker %d: subscriber exiting after %d events\n", workerID, recv)
return
case <-sub.EndOfStoredEvents:
// continue streaming live events
case ev := <-sub.Events:
if ev == nil {
continue
}
recv++
if recv%100 == 0 {
fmt.Printf("worker %d: received %d matching events\n", workerID, recv)
}
ev.Free()
}
}
}()
interval := time.Second / time.Duration(cfg.NetRate)
ticker := time.NewTicker(interval)
defer ticker.Stop()
count := 0
for {
select {
case <-ctx.Done():
fmt.Printf(
"worker %d: stopping after %d publishes\n", workerID,
count,
)
return
case <-ticker.C:
// Build and sign a simple text note event
ev := event.New()
ev.Kind = uint16(1)
ev.CreatedAt = time.Now().Unix()
ev.Tags = tag.NewS()
ev.Content = []byte(fmt.Sprintf(
"bench worker=%d n=%d", workerID, count,
))
if err := ev.Sign(&keys); err != nil {
fmt.Printf("worker %d: sign error: %v\n", workerID, err)
ev.Free()
continue
}
// Async publish: don't wait for OK; this greatly increases throughput
ch := rl.Write(eventenvelope.NewSubmissionWith(ev).Marshal(nil))
// Non-blocking error check
select {
case err := <-ch:
if err != nil {
fmt.Printf("worker %d: write error: %v\n", workerID, err)
}
default:
}
if count%100 == 0 {
fmt.Printf("worker %d: sent %d events\n", workerID, count)
}
ev.Free()
count++
}
}
}(i)
}
wg.Wait()
}
func NewBenchmark(config *BenchmarkConfig) *Benchmark { func NewBenchmark(config *BenchmarkConfig) *Benchmark {
// Clean up existing data directory // Clean up existing data directory
os.RemoveAll(config.DataDir) os.RemoveAll(config.DataDir)
@@ -113,11 +261,16 @@ func NewBenchmark(config *BenchmarkConfig) *Benchmark {
log.Fatalf("Failed to create database: %v", err) log.Fatalf("Failed to create database: %v", err)
} }
return &Benchmark{ b := &Benchmark{
config: config, config: config,
db: db, db: db,
results: make([]*BenchmarkResult, 0), results: make([]*BenchmarkResult, 0),
} }
// Trigger compaction/GC before starting tests
b.compactDatabase()
return b
} }
func (b *Benchmark) Close() { func (b *Benchmark) Close() {
@@ -126,6 +279,32 @@ func (b *Benchmark) Close() {
} }
} }
// RunSuite runs the three tests with a 10s pause between them and repeats the
// set twice with a 10s pause between rounds.
func (b *Benchmark) RunSuite() {
for round := 1; round <= 2; round++ {
fmt.Printf("\n=== Starting test round %d/2 ===\n", round)
b.RunPeakThroughputTest()
time.Sleep(10 * time.Second)
b.RunBurstPatternTest()
time.Sleep(10 * time.Second)
b.RunMixedReadWriteTest()
if round < 2 {
fmt.Println("\nPausing 10s before next round...")
time.Sleep(10 * time.Second)
}
}
}
// compactDatabase triggers a Badger value log GC before starting tests.
func (b *Benchmark) compactDatabase() {
if b.db == nil || b.db.DB == nil {
return
}
// Attempt value log GC. Ignore errors; this is best-effort.
_ = b.db.DB.RunValueLogGC(0.5)
}
func (b *Benchmark) RunPeakThroughputTest() { func (b *Benchmark) RunPeakThroughputTest() {
fmt.Println("\n=== Peak Throughput Test ===") fmt.Println("\n=== Peak Throughput Test ===")
@@ -185,8 +364,10 @@ func (b *Benchmark) RunPeakThroughputTest() {
if len(latencies) > 0 { if len(latencies) > 0 {
result.AvgLatency = calculateAvgLatency(latencies) result.AvgLatency = calculateAvgLatency(latencies)
result.P90Latency = calculatePercentileLatency(latencies, 0.90)
result.P95Latency = calculatePercentileLatency(latencies, 0.95) result.P95Latency = calculatePercentileLatency(latencies, 0.95)
result.P99Latency = calculatePercentileLatency(latencies, 0.99) result.P99Latency = calculatePercentileLatency(latencies, 0.99)
result.Bottom10Avg = calculateBottom10Avg(latencies)
} }
result.SuccessRate = float64(totalEvents) / float64(b.config.NumEvents) * 100 result.SuccessRate = float64(totalEvents) / float64(b.config.NumEvents) * 100
@@ -206,8 +387,10 @@ func (b *Benchmark) RunPeakThroughputTest() {
fmt.Printf("Duration: %v\n", duration) fmt.Printf("Duration: %v\n", duration)
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond) fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
fmt.Printf("Avg latency: %v\n", result.AvgLatency) fmt.Printf("Avg latency: %v\n", result.AvgLatency)
fmt.Printf("P90 latency: %v\n", result.P90Latency)
fmt.Printf("P95 latency: %v\n", result.P95Latency) fmt.Printf("P95 latency: %v\n", result.P95Latency)
fmt.Printf("P99 latency: %v\n", result.P99Latency) fmt.Printf("P99 latency: %v\n", result.P99Latency)
fmt.Printf("Bottom 10%% Avg latency: %v\n", result.Bottom10Avg)
} }
func (b *Benchmark) RunBurstPatternTest() { func (b *Benchmark) RunBurstPatternTest() {
@@ -282,8 +465,10 @@ func (b *Benchmark) RunBurstPatternTest() {
if len(latencies) > 0 { if len(latencies) > 0 {
result.AvgLatency = calculateAvgLatency(latencies) result.AvgLatency = calculateAvgLatency(latencies)
result.P90Latency = calculatePercentileLatency(latencies, 0.90)
result.P95Latency = calculatePercentileLatency(latencies, 0.95) result.P95Latency = calculatePercentileLatency(latencies, 0.95)
result.P99Latency = calculatePercentileLatency(latencies, 0.99) result.P99Latency = calculatePercentileLatency(latencies, 0.99)
result.Bottom10Avg = calculateBottom10Avg(latencies)
} }
result.SuccessRate = float64(totalEvents) / float64(eventIndex) * 100 result.SuccessRate = float64(totalEvents) / float64(eventIndex) * 100
@@ -387,8 +572,10 @@ func (b *Benchmark) RunMixedReadWriteTest() {
allLatencies := append(writeLatencies, readLatencies...) allLatencies := append(writeLatencies, readLatencies...)
if len(allLatencies) > 0 { if len(allLatencies) > 0 {
result.AvgLatency = calculateAvgLatency(allLatencies) result.AvgLatency = calculateAvgLatency(allLatencies)
result.P90Latency = calculatePercentileLatency(allLatencies, 0.90)
result.P95Latency = calculatePercentileLatency(allLatencies, 0.95) result.P95Latency = calculatePercentileLatency(allLatencies, 0.95)
result.P99Latency = calculatePercentileLatency(allLatencies, 0.99) result.P99Latency = calculatePercentileLatency(allLatencies, 0.99)
result.Bottom10Avg = calculateBottom10Avg(allLatencies)
} }
result.SuccessRate = float64(totalWrites+totalReads) / float64(len(events)) * 100 result.SuccessRate = float64(totalWrites+totalReads) / float64(len(events)) * 100
@@ -460,8 +647,10 @@ func (b *Benchmark) GenerateReport() {
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers) fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers)
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024)) fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024))
fmt.Printf("Avg Latency: %v\n", result.AvgLatency) fmt.Printf("Avg Latency: %v\n", result.AvgLatency)
fmt.Printf("P90 Latency: %v\n", result.P90Latency)
fmt.Printf("P95 Latency: %v\n", result.P95Latency) fmt.Printf("P95 Latency: %v\n", result.P95Latency)
fmt.Printf("P99 Latency: %v\n", result.P99Latency) fmt.Printf("P99 Latency: %v\n", result.P99Latency)
fmt.Printf("Bottom 10%% Avg Latency: %v\n", result.Bottom10Avg)
if len(result.Errors) > 0 { if len(result.Errors) > 0 {
fmt.Printf("Errors (%d):\n", len(result.Errors)) fmt.Printf("Errors (%d):\n", len(result.Errors))
@@ -524,8 +713,14 @@ func (b *Benchmark) saveReportToFile(path string) error {
), ),
) )
file.WriteString(fmt.Sprintf("Avg Latency: %v\n", result.AvgLatency)) file.WriteString(fmt.Sprintf("Avg Latency: %v\n", result.AvgLatency))
file.WriteString(fmt.Sprintf("P90 Latency: %v\n", result.P90Latency))
file.WriteString(fmt.Sprintf("P95 Latency: %v\n", result.P95Latency)) file.WriteString(fmt.Sprintf("P95 Latency: %v\n", result.P95Latency))
file.WriteString(fmt.Sprintf("P99 Latency: %v\n", result.P99Latency)) file.WriteString(fmt.Sprintf("P99 Latency: %v\n", result.P99Latency))
file.WriteString(
fmt.Sprintf(
"Bottom 10%% Avg Latency: %v\n", result.Bottom10Avg,
),
)
file.WriteString( file.WriteString(
fmt.Sprintf( fmt.Sprintf(
"Memory: %d MB\n", result.MemoryUsed/(1024*1024), "Memory: %d MB\n", result.MemoryUsed/(1024*1024),
@@ -537,6 +732,41 @@ func (b *Benchmark) saveReportToFile(path string) error {
return nil return nil
} }
// GenerateAsciidocReport creates a simple AsciiDoc report alongside the text report.
func (b *Benchmark) GenerateAsciidocReport() error {
path := filepath.Join(b.config.DataDir, "benchmark_report.adoc")
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
file.WriteString("= NOSTR Relay Benchmark Results\n\n")
file.WriteString(
fmt.Sprintf(
"Generated: %s\n\n", time.Now().Format(time.RFC3339),
),
)
file.WriteString("[cols=\"1,^1,^1,^1,^1,^1\",options=\"header\"]\n")
file.WriteString("|===\n")
file.WriteString("| Test | Events/sec | Avg Latency | P90 | P95 | Bottom 10% Avg\n")
b.mu.RLock()
defer b.mu.RUnlock()
for _, r := range b.results {
file.WriteString(fmt.Sprintf("| %s\n", r.TestName))
file.WriteString(fmt.Sprintf("| %.2f\n", r.EventsPerSecond))
file.WriteString(fmt.Sprintf("| %v\n", r.AvgLatency))
file.WriteString(fmt.Sprintf("| %v\n", r.P90Latency))
file.WriteString(fmt.Sprintf("| %v\n", r.P95Latency))
file.WriteString(fmt.Sprintf("| %v\n", r.Bottom10Avg))
}
file.WriteString("|===\n")
fmt.Printf("AsciiDoc report saved to: %s\n", path)
return nil
}
// Helper functions // Helper functions
func calculateAvgLatency(latencies []time.Duration) time.Duration { func calculateAvgLatency(latencies []time.Duration) time.Duration {
@@ -557,13 +787,48 @@ func calculatePercentileLatency(
if len(latencies) == 0 { if len(latencies) == 0 {
return 0 return 0
} }
// Sort a copy to avoid mutating caller slice
// Simple percentile calculation - in production would sort first copySlice := make([]time.Duration, len(latencies))
index := int(float64(len(latencies)) * percentile) copy(copySlice, latencies)
if index >= len(latencies) { sort.Slice(
index = len(latencies) - 1 copySlice, func(i, j int) bool { return copySlice[i] < copySlice[j] },
)
index := int(float64(len(copySlice)-1) * percentile)
if index < 0 {
index = 0
} }
return latencies[index] if index >= len(copySlice) {
index = len(copySlice) - 1
}
return copySlice[index]
}
// calculateBottom10Avg returns the average latency of the slowest 10% of samples.
func calculateBottom10Avg(latencies []time.Duration) time.Duration {
if len(latencies) == 0 {
return 0
}
copySlice := make([]time.Duration, len(latencies))
copy(copySlice, latencies)
sort.Slice(
copySlice, func(i, j int) bool { return copySlice[i] < copySlice[j] },
)
start := int(float64(len(copySlice)) * 0.9)
if start < 0 {
start = 0
}
if start >= len(copySlice) {
start = len(copySlice) - 1
}
var total time.Duration
for i := start; i < len(copySlice); i++ {
total += copySlice[i]
}
count := len(copySlice) - start
if count <= 0 {
return 0
}
return total / time.Duration(count)
} }
func getMemUsage() uint64 { func getMemUsage() uint64 {

156
cmd/benchmark/profile.sh Executable file
View File

@@ -0,0 +1,156 @@
#!/usr/bin/env bash
set -euo pipefail
# Runs the ORLY relay with CPU profiling enabled and opens the resulting
# pprof profile in a local web UI.
#
# Usage:
# ./profile.sh [duration_seconds]
#
# - Builds the relay.
# - Starts it with ORLY_PPROF=cpu and minimal logging.
# - Waits for the profile path printed at startup.
# - Runs for DURATION seconds (default 10), then stops the relay to flush the
# CPU profile to disk.
# - Launches `go tool pprof -http=:8000` for convenient browsing.
#
# Notes:
# - The profile file path is detected from the relay's stdout/stderr lines
# emitted by github.com/pkg/profile, typically like:
# profile: cpu profiling enabled, path: /tmp/profile123456/cpu.pprof
# - You can change DURATION by passing a number of seconds as the first arg
# or by setting DURATION env var.
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." && pwd)"
cd "$REPO_ROOT"
DURATION="${1:-${DURATION:-10}}"
PPROF_HTTP_PORT="${PPROF_HTTP_PORT:-8000}"
# Load generation controls
LOAD_ENABLED="${LOAD_ENABLED:-1}" # set to 0 to disable load
# Use the benchmark main package in cmd/benchmark as the load generator
BENCHMARK_PKG_DIR="$REPO_ROOT/cmd/benchmark"
BENCHMARK_BIN="${BENCHMARK_BIN:-}" # if empty, we will build to $RUN_DIR/benchmark
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-}" # optional override for -events
BENCHMARK_DURATION="${BENCHMARK_DURATION:-}" # optional override for -duration (e.g. 30s); defaults to DURATION seconds
BIN="$REPO_ROOT/next.orly.dev"
LOG_DIR="${LOG_DIR:-$REPO_ROOT/cmd/benchmark/reports}"
mkdir -p "$LOG_DIR"
RUN_TS="$(date +%Y%m%d_%H%M%S)"
RUN_DIR="$LOG_DIR/profile_run_${RUN_TS}"
mkdir -p "$RUN_DIR"
LOG_FILE="$RUN_DIR/relay.log"
LOAD_LOG_FILE="$RUN_DIR/load.log"
echo "[profile.sh] Building relay binary ..."
go build -o "$BIN" .
# Ensure we clean up the child process on exit
RELAY_PID=""
LOAD_PID=""
cleanup() {
if [[ -n "$LOAD_PID" ]] && kill -0 "$LOAD_PID" 2>/dev/null; then
echo "[profile.sh] Stopping load generator (pid=$LOAD_PID) ..."
kill -INT "$LOAD_PID" 2>/dev/null || true
sleep 0.5
kill -TERM "$LOAD_PID" 2>/dev/null || true
fi
if [[ -n "$RELAY_PID" ]] && kill -0 "$RELAY_PID" 2>/dev/null; then
echo "[profile.sh] Stopping relay (pid=$RELAY_PID) ..."
kill -INT "$RELAY_PID" 2>/dev/null || true
# give it a moment to exit and flush profile
sleep 1
kill -TERM "$RELAY_PID" 2>/dev/null || true
fi
}
trap cleanup EXIT
# Start the relay with CPU profiling enabled. Capture both stdout and stderr.
echo "[profile.sh] Starting relay with CPU profiling enabled ..."
(
ORLY_LOG_LEVEL=off \
ORLY_LISTEN="${ORLY_LISTEN:-127.0.0.1}" \
ORLY_PORT="${ORLY_PORT:-3334}" \
ORLY_PPROF=cpu \
"$BIN"
) >"$LOG_FILE" 2>&1 &
RELAY_PID=$!
echo "[profile.sh] Relay started with pid $RELAY_PID; logging to $LOG_FILE"
# Wait until the profile path is printed. Timeout after reasonable period.
PPROF_FILE=""
START_TIME=$(date +%s)
TIMEOUT=30
echo "[profile.sh] Waiting for profile path to appear in relay output ..."
while :; do
if grep -Eo "/tmp/profile[^ ]+/cpu\.pprof" "$LOG_FILE" >/dev/null 2>&1; then
PPROF_FILE=$(grep -Eo "/tmp/profile[^ ]+/cpu\.pprof" "$LOG_FILE" | tail -n1)
break
fi
NOW=$(date +%s)
if (( NOW - START_TIME > TIMEOUT )); then
echo "[profile.sh] ERROR: Timed out waiting for profile path in $LOG_FILE" >&2
echo "Last 50 log lines:" >&2
tail -n 50 "$LOG_FILE" >&2
exit 1
fi
sleep 0.3
done
echo "[profile.sh] Detected profile file: $PPROF_FILE"
# Optionally start load generator to exercise the relay
if [[ "$LOAD_ENABLED" == "1" ]]; then
# Build benchmark binary if not provided
if [[ -z "$BENCHMARK_BIN" ]]; then
BENCHMARK_BIN="$RUN_DIR/benchmark"
echo "[profile.sh] Building benchmark load generator ($BENCHMARK_PKG_DIR) ..."
go build -o "$BENCHMARK_BIN" "$BENCHMARK_PKG_DIR"
fi
BENCH_DB_DIR="$RUN_DIR/benchdb"
mkdir -p "$BENCH_DB_DIR"
DURATION_ARG="${BENCHMARK_DURATION:-${DURATION}s}"
EXTRA_EVENTS=""
if [[ -n "$BENCHMARK_EVENTS" ]]; then
EXTRA_EVENTS="-events=$BENCHMARK_EVENTS"
fi
echo "[profile.sh] Starting benchmark load generator for duration $DURATION_ARG ..."
RELAY_URL="ws://${ORLY_LISTEN:-127.0.0.1}:${ORLY_PORT:-3334}"
echo "[profile.sh] Using relay URL: $RELAY_URL"
(
"$BENCHMARK_BIN" -relay-url="$RELAY_URL" -net-workers="${NET_WORKERS:-2}" -net-rate="${NET_RATE:-20}" -duration="$DURATION_ARG" $EXTRA_EVENTS \
>"$LOAD_LOG_FILE" 2>&1 &
)
LOAD_PID=$!
echo "[profile.sh] Load generator started (pid=$LOAD_PID); logging to $LOAD_LOG_FILE"
else
echo "[profile.sh] LOAD_ENABLED=0; not starting load generator."
fi
echo "[profile.sh] Letting the relay run for ${DURATION}s to collect CPU samples ..."
sleep "$DURATION"
# Stop the relay to flush the CPU profile
cleanup
# Disable trap so we don't double-kill
trap - EXIT
# Wait briefly to ensure the profile file is finalized
for i in {1..20}; do
if [[ -s "$PPROF_FILE" ]]; then
break
fi
sleep 0.2
done
if [[ ! -s "$PPROF_FILE" ]]; then
echo "[profile.sh] WARNING: Profile file exists but is empty or missing: $PPROF_FILE" >&2
fi
# Launch pprof HTTP UI
echo "[profile.sh] Launching pprof web UI (http://localhost:${PPROF_HTTP_PORT}) ..."
exec go tool pprof -http=":${PPROF_HTTP_PORT}" "$BIN" "$PPROF_FILE"

View File

@@ -0,0 +1,140 @@
================================================================
NOSTR RELAY BENCHMARK AGGREGATE REPORT
================================================================
Generated: 2025-09-12T22:43:29+00:00
Benchmark Configuration:
Events per test: 10000
Concurrent workers: 8
Test duration: 60s
Relays tested: 6
================================================================
SUMMARY BY RELAY
================================================================
Relay: next-orly
----------------------------------------
Status: COMPLETED
Events/sec: 18056.94
Events/sec: 1492.32
Events/sec: 16750.82
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 428.869µs
Bottom 10% Avg Latency: 643.51µs
Avg Latency: 178.04µs
P95 Latency: 607.997µs
P95 Latency: 243.954µs
P95 Latency: 21.665387ms
Relay: khatru-sqlite
----------------------------------------
Status: COMPLETED
Events/sec: 17635.76
Events/sec: 1510.39
Events/sec: 16509.10
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 437.941µs
Bottom 10% Avg Latency: 659.71µs
Avg Latency: 203.563µs
P95 Latency: 621.964µs
P95 Latency: 330.729µs
P95 Latency: 21.838576ms
Relay: khatru-badger
----------------------------------------
Status: COMPLETED
Events/sec: 17312.60
Events/sec: 1508.54
Events/sec: 15933.99
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 448.778µs
Bottom 10% Avg Latency: 664.268µs
Avg Latency: 196.38µs
P95 Latency: 633.085µs
P95 Latency: 293.579µs
P95 Latency: 22.727378ms
Relay: relayer-basic
----------------------------------------
Status: COMPLETED
Events/sec: 15155.00
Events/sec: 1545.44
Events/sec: 14255.58
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 513.243µs
Bottom 10% Avg Latency: 864.746µs
Avg Latency: 273.645µs
P95 Latency: 792.685µs
P95 Latency: 498.989µs
P95 Latency: 22.924497ms
Relay: strfry
----------------------------------------
Status: COMPLETED
Events/sec: 15245.05
Events/sec: 1533.59
Events/sec: 15507.07
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 510.383µs
Bottom 10% Avg Latency: 831.211µs
Avg Latency: 223.359µs
P95 Latency: 769.085µs
P95 Latency: 378.145µs
P95 Latency: 22.152884ms
Relay: nostr-rs-relay
----------------------------------------
Status: COMPLETED
Events/sec: 16312.24
Events/sec: 1502.05
Events/sec: 14131.23
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 476.418µs
Bottom 10% Avg Latency: 722.179µs
Avg Latency: 182.765µs
P95 Latency: 686.836µs
P95 Latency: 257.082µs
P95 Latency: 20.680962ms
================================================================
DETAILED RESULTS
================================================================
Individual relay reports are available in:
- /reports/run_20250912_222649/khatru-badger_results.txt
- /reports/run_20250912_222649/khatru-sqlite_results.txt
- /reports/run_20250912_222649/next-orly_results.txt
- /reports/run_20250912_222649/nostr-rs-relay_results.txt
- /reports/run_20250912_222649/relayer-basic_results.txt
- /reports/run_20250912_222649/strfry_results.txt
================================================================
BENCHMARK COMPARISON TABLE
================================================================
Relay Status Peak Tput/s Avg Latency Success Rate
---- ------ ----------- ----------- ------------
next-orly OK 18056.94 428.869µs 100.0%
khatru-sqlite OK 17635.76 437.941µs 100.0%
khatru-badger OK 17312.60 448.778µs 100.0%
relayer-basic OK 15155.00 513.243µs 100.0%
strfry OK 15245.05 510.383µs 100.0%
nostr-rs-relay OK 16312.24 476.418µs 100.0%
================================================================
End of Report
================================================================

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-badger_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912223222496620 INF /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912223222497154 INF /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912223222497184 INF /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912223222497402 INF (*types.Uint32)(0xc0000100fc)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912223222497454 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 577.614152ms
Events/sec: 17312.60
Avg latency: 448.778µs
P90 latency: 584.783µs
P95 latency: 633.085µs
P99 latency: 749.537µs
Bottom 10% Avg latency: 664.268µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 161.62554ms
Burst completed: 1000 events in 154.666063ms
Burst completed: 1000 events in 149.999903ms
Burst completed: 1000 events in 169.141205ms
Burst completed: 1000 events in 153.987041ms
Burst completed: 1000 events in 141.227756ms
Burst completed: 1000 events in 168.989116ms
Burst completed: 1000 events in 161.032171ms
Burst completed: 1000 events in 182.128996ms
Burst completed: 1000 events in 161.86147ms
Burst test completed: 10000 events in 6.628942674s
Events/sec: 1508.54
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 36.466065909s
Combined ops/sec: 274.23
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 627.589155ms
Events/sec: 15933.99
Avg latency: 489.881µs
P90 latency: 628.857µs
P95 latency: 679.363µs
P99 latency: 828.307µs
Bottom 10% Avg latency: 716.862µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 150.262543ms
Burst completed: 1000 events in 148.027109ms
Burst completed: 1000 events in 139.184066ms
Burst completed: 1000 events in 147.196277ms
Burst completed: 1000 events in 141.143557ms
Burst completed: 1000 events in 138.727197ms
Burst completed: 1000 events in 143.014207ms
Burst completed: 1000 events in 143.355055ms
Burst completed: 1000 events in 162.573956ms
Burst completed: 1000 events in 142.875393ms
Burst test completed: 10000 events in 6.475822519s
Events/sec: 1544.21
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4742 reads in 1m0.036644794s
Combined ops/sec: 162.27
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 577.614152ms
Total Events: 10000
Events/sec: 17312.60
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 152 MB
Avg Latency: 448.778µs
P90 Latency: 584.783µs
P95 Latency: 633.085µs
P99 Latency: 749.537µs
Bottom 10% Avg Latency: 664.268µs
----------------------------------------
Test: Burst Pattern
Duration: 6.628942674s
Total Events: 10000
Events/sec: 1508.54
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 204 MB
Avg Latency: 196.38µs
P90 Latency: 260.706µs
P95 Latency: 293.579µs
P99 Latency: 385.694µs
Bottom 10% Avg Latency: 317.532µs
----------------------------------------
Test: Mixed Read/Write
Duration: 36.466065909s
Total Events: 10000
Events/sec: 274.23
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 128 MB
Avg Latency: 9.448363ms
P90 Latency: 20.988228ms
P95 Latency: 22.727378ms
P99 Latency: 25.094784ms
Bottom 10% Avg Latency: 23.01277ms
----------------------------------------
Test: Peak Throughput
Duration: 627.589155ms
Total Events: 10000
Events/sec: 15933.99
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 124 MB
Avg Latency: 489.881µs
P90 Latency: 628.857µs
P95 Latency: 679.363µs
P99 Latency: 828.307µs
Bottom 10% Avg Latency: 716.862µs
----------------------------------------
Test: Burst Pattern
Duration: 6.475822519s
Total Events: 10000
Events/sec: 1544.21
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 170 MB
Avg Latency: 215.418µs
P90 Latency: 287.237µs
P95 Latency: 339.025µs
P99 Latency: 510.682µs
Bottom 10% Avg Latency: 378.172µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.036644794s
Total Events: 9742
Events/sec: 162.27
Success Rate: 97.4%
Concurrent Workers: 8
Memory Used: 181 MB
Avg Latency: 19.714686ms
P90 Latency: 44.573506ms
P95 Latency: 46.895555ms
P99 Latency: 50.425027ms
Bottom 10% Avg Latency: 47.384489ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
20250912223503335481 INF /tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912223504473151 INF /tmp/benchmark_khatru-badger_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912223504475627 INF /tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-badger
RELAY_URL: ws://khatru-badger:3334
TEST_TIMESTAMP: 2025-09-12T22:35:04+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-sqlite_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912222936300616 INF /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912222936301606 INF /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912222936301647 INF /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912222936301987 INF (*types.Uint32)(0xc0001c23f0)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912222936302060 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 567.02963ms
Events/sec: 17635.76
Avg latency: 437.941µs
P90 latency: 574.133µs
P95 latency: 621.964µs
P99 latency: 768.473µs
Bottom 10% Avg latency: 659.71µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 172.012448ms
Burst completed: 1000 events in 145.502701ms
Burst completed: 1000 events in 153.928098ms
Burst completed: 1000 events in 169.995269ms
Burst completed: 1000 events in 147.617375ms
Burst completed: 1000 events in 157.211387ms
Burst completed: 1000 events in 153.332744ms
Burst completed: 1000 events in 172.374938ms
Burst completed: 1000 events in 167.518935ms
Burst completed: 1000 events in 155.211871ms
Burst test completed: 10000 events in 6.620785215s
Events/sec: 1510.39
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 35.700582016s
Combined ops/sec: 280.11
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 605.726547ms
Events/sec: 16509.10
Avg latency: 470.577µs
P90 latency: 609.791µs
P95 latency: 660.256µs
P99 latency: 788.641µs
Bottom 10% Avg latency: 687.847µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 135.310723ms
Burst completed: 1000 events in 166.604305ms
Burst completed: 1000 events in 141.453184ms
Burst completed: 1000 events in 146.579351ms
Burst completed: 1000 events in 154.453638ms
Burst completed: 1000 events in 156.212516ms
Burst completed: 1000 events in 142.309354ms
Burst completed: 1000 events in 152.268188ms
Burst completed: 1000 events in 144.187829ms
Burst completed: 1000 events in 147.609002ms
Burst test completed: 10000 events in 6.508461808s
Events/sec: 1536.46
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4662 reads in 1m0.040595326s
Combined ops/sec: 160.92
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 567.02963ms
Total Events: 10000
Events/sec: 17635.76
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 437.941µs
P90 Latency: 574.133µs
P95 Latency: 621.964µs
P99 Latency: 768.473µs
Bottom 10% Avg Latency: 659.71µs
----------------------------------------
Test: Burst Pattern
Duration: 6.620785215s
Total Events: 10000
Events/sec: 1510.39
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 208 MB
Avg Latency: 203.563µs
P90 Latency: 274.152µs
P95 Latency: 330.729µs
P99 Latency: 521.483µs
Bottom 10% Avg Latency: 378.237µs
----------------------------------------
Test: Mixed Read/Write
Duration: 35.700582016s
Total Events: 10000
Events/sec: 280.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 232 MB
Avg Latency: 9.150925ms
P90 Latency: 20.1434ms
P95 Latency: 21.838576ms
P99 Latency: 24.0106ms
Bottom 10% Avg Latency: 22.04901ms
----------------------------------------
Test: Peak Throughput
Duration: 605.726547ms
Total Events: 10000
Events/sec: 16509.10
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 139 MB
Avg Latency: 470.577µs
P90 Latency: 609.791µs
P95 Latency: 660.256µs
P99 Latency: 788.641µs
Bottom 10% Avg Latency: 687.847µs
----------------------------------------
Test: Burst Pattern
Duration: 6.508461808s
Total Events: 10000
Events/sec: 1536.46
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 182 MB
Avg Latency: 199.49µs
P90 Latency: 261.427µs
P95 Latency: 294.771µs
P99 Latency: 406.814µs
Bottom 10% Avg Latency: 332.083µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.040595326s
Total Events: 9662
Events/sec: 160.92
Success Rate: 96.6%
Concurrent Workers: 8
Memory Used: 204 MB
Avg Latency: 19.935937ms
P90 Latency: 44.802034ms
P95 Latency: 48.282589ms
P99 Latency: 52.169026ms
Bottom 10% Avg Latency: 48.641697ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
20250912223216370778 INF /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912223217349356 INF /tmp/benchmark_khatru-sqlite_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912223217352393 INF /tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-sqlite
RELAY_URL: ws://khatru-sqlite:3334
TEST_TIMESTAMP: 2025-09-12T22:32:17+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912222650025765 INF /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912222650026455 INF /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912222650026497 INF /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912222650026747 INF (*types.Uint32)(0xc0001f63cc)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912222650026778 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 553.803776ms
Events/sec: 18056.94
Avg latency: 428.869µs
P90 latency: 558.663µs
P95 latency: 607.997µs
P99 latency: 749.787µs
Bottom 10% Avg latency: 643.51µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 190.801687ms
Burst completed: 1000 events in 168.170564ms
Burst completed: 1000 events in 161.16591ms
Burst completed: 1000 events in 161.43364ms
Burst completed: 1000 events in 148.293941ms
Burst completed: 1000 events in 172.875177ms
Burst completed: 1000 events in 178.930553ms
Burst completed: 1000 events in 161.052715ms
Burst completed: 1000 events in 162.071335ms
Burst completed: 1000 events in 171.849756ms
Burst test completed: 10000 events in 6.70096222s
Events/sec: 1492.32
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 35.645619485s
Combined ops/sec: 280.54
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 596.985601ms
Events/sec: 16750.82
Avg latency: 465.438µs
P90 latency: 594.151µs
P95 latency: 636.592µs
P99 latency: 757.953µs
Bottom 10% Avg latency: 672.673µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 152.121077ms
Burst completed: 1000 events in 160.774367ms
Burst completed: 1000 events in 137.913676ms
Burst completed: 1000 events in 142.916647ms
Burst completed: 1000 events in 166.771131ms
Burst completed: 1000 events in 160.016244ms
Burst completed: 1000 events in 156.369302ms
Burst completed: 1000 events in 158.850666ms
Burst completed: 1000 events in 154.842287ms
Burst completed: 1000 events in 146.828122ms
Burst test completed: 10000 events in 6.557799732s
Events/sec: 1524.90
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4782 reads in 1m0.043775785s
Combined ops/sec: 162.91
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 553.803776ms
Total Events: 10000
Events/sec: 18056.94
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 428.869µs
P90 Latency: 558.663µs
P95 Latency: 607.997µs
P99 Latency: 749.787µs
Bottom 10% Avg Latency: 643.51µs
----------------------------------------
Test: Burst Pattern
Duration: 6.70096222s
Total Events: 10000
Events/sec: 1492.32
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 204 MB
Avg Latency: 178.04µs
P90 Latency: 224.367µs
P95 Latency: 243.954µs
P99 Latency: 318.225µs
Bottom 10% Avg Latency: 264.418µs
----------------------------------------
Test: Mixed Read/Write
Duration: 35.645619485s
Total Events: 10000
Events/sec: 280.54
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 120 MB
Avg Latency: 9.118653ms
P90 Latency: 19.852346ms
P95 Latency: 21.665387ms
P99 Latency: 23.946919ms
Bottom 10% Avg Latency: 21.867062ms
----------------------------------------
Test: Peak Throughput
Duration: 596.985601ms
Total Events: 10000
Events/sec: 16750.82
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 121 MB
Avg Latency: 465.438µs
P90 Latency: 594.151µs
P95 Latency: 636.592µs
P99 Latency: 757.953µs
Bottom 10% Avg Latency: 672.673µs
----------------------------------------
Test: Burst Pattern
Duration: 6.557799732s
Total Events: 10000
Events/sec: 1524.90
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 167 MB
Avg Latency: 189.538µs
P90 Latency: 247.511µs
P95 Latency: 274.011µs
P99 Latency: 360.977µs
Bottom 10% Avg Latency: 296.967µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.043775785s
Total Events: 9782
Events/sec: 162.91
Success Rate: 97.8%
Concurrent Workers: 8
Memory Used: 193 MB
Avg Latency: 19.562536ms
P90 Latency: 43.431835ms
P95 Latency: 46.326204ms
P99 Latency: 50.533302ms
Bottom 10% Avg Latency: 46.979603ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly_8/benchmark_report.adoc
20250912222930150767 INF /tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912222931147258 INF /tmp/benchmark_next-orly_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912222931149928 INF /tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: next-orly
RELAY_URL: ws://next-orly:8080
TEST_TIMESTAMP: 2025-09-12T22:29:31+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_nostr-rs-relay_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912224044213613 INF /tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912224044214094 INF /tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912224044214130 INF /tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912224044214381 INF (*types.Uint32)(0xc000233c3c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912224044214413 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 613.036589ms
Events/sec: 16312.24
Avg latency: 476.418µs
P90 latency: 627.852µs
P95 latency: 686.836µs
P99 latency: 841.471µs
Bottom 10% Avg latency: 722.179µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 156.218882ms
Burst completed: 1000 events in 170.25756ms
Burst completed: 1000 events in 164.944293ms
Burst completed: 1000 events in 162.767866ms
Burst completed: 1000 events in 148.744622ms
Burst completed: 1000 events in 163.556351ms
Burst completed: 1000 events in 172.007512ms
Burst completed: 1000 events in 159.806858ms
Burst completed: 1000 events in 168.086258ms
Burst completed: 1000 events in 164.931889ms
Burst test completed: 10000 events in 6.657581804s
Events/sec: 1502.05
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 34.850355805s
Combined ops/sec: 286.94
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 707.652249ms
Events/sec: 14131.23
Avg latency: 551.706µs
P90 latency: 724.937µs
P95 latency: 790.563µs
P99 latency: 980.677µs
Bottom 10% Avg latency: 836.659µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 164.62419ms
Burst completed: 1000 events in 155.938167ms
Burst completed: 1000 events in 132.903056ms
Burst completed: 1000 events in 142.377596ms
Burst completed: 1000 events in 155.024184ms
Burst completed: 1000 events in 147.095521ms
Burst completed: 1000 events in 150.027389ms
Burst completed: 1000 events in 152.873043ms
Burst completed: 1000 events in 150.635479ms
Burst completed: 1000 events in 146.45553ms
Burst test completed: 10000 events in 6.519122877s
Events/sec: 1533.95
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4806 reads in 1m0.03930731s
Combined ops/sec: 163.33
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 613.036589ms
Total Events: 10000
Events/sec: 16312.24
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 476.418µs
P90 Latency: 627.852µs
P95 Latency: 686.836µs
P99 Latency: 841.471µs
Bottom 10% Avg Latency: 722.179µs
----------------------------------------
Test: Burst Pattern
Duration: 6.657581804s
Total Events: 10000
Events/sec: 1502.05
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 209 MB
Avg Latency: 182.765µs
P90 Latency: 234.409µs
P95 Latency: 257.082µs
P99 Latency: 330.764µs
Bottom 10% Avg Latency: 277.843µs
----------------------------------------
Test: Mixed Read/Write
Duration: 34.850355805s
Total Events: 10000
Events/sec: 286.94
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 221 MB
Avg Latency: 8.802188ms
P90 Latency: 19.075904ms
P95 Latency: 20.680962ms
P99 Latency: 22.78326ms
Bottom 10% Avg Latency: 20.897398ms
----------------------------------------
Test: Peak Throughput
Duration: 707.652249ms
Total Events: 10000
Events/sec: 14131.23
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 120 MB
Avg Latency: 551.706µs
P90 Latency: 724.937µs
P95 Latency: 790.563µs
P99 Latency: 980.677µs
Bottom 10% Avg Latency: 836.659µs
----------------------------------------
Test: Burst Pattern
Duration: 6.519122877s
Total Events: 10000
Events/sec: 1533.95
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 168 MB
Avg Latency: 204.873µs
P90 Latency: 271.569µs
P95 Latency: 329.28µs
P99 Latency: 558.829µs
Bottom 10% Avg Latency: 380.136µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.03930731s
Total Events: 9806
Events/sec: 163.33
Success Rate: 98.1%
Concurrent Workers: 8
Memory Used: 164 MB
Avg Latency: 19.506135ms
P90 Latency: 43.206775ms
P95 Latency: 45.944446ms
P99 Latency: 49.910436ms
Bottom 10% Avg Latency: 46.417943ms
----------------------------------------
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
20250912224323628137 INF /tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912224324180883 INF /tmp/benchmark_nostr-rs-relay_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912224324184069 INF /tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: nostr-rs-relay
RELAY_URL: ws://nostr-rs-relay:8080
TEST_TIMESTAMP: 2025-09-12T22:43:24+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_relayer-basic_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912223509638362 INF /tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912223509638864 INF /tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912223509638903 INF /tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912223509639558 INF (*types.Uint32)(0xc00570005c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912223509639620 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 659.848301ms
Events/sec: 15155.00
Avg latency: 513.243µs
P90 latency: 706.89µs
P95 latency: 792.685µs
P99 latency: 1.089215ms
Bottom 10% Avg latency: 864.746µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 142.551144ms
Burst completed: 1000 events in 137.426595ms
Burst completed: 1000 events in 139.51501ms
Burst completed: 1000 events in 143.683041ms
Burst completed: 1000 events in 136.500167ms
Burst completed: 1000 events in 139.573844ms
Burst completed: 1000 events in 145.873173ms
Burst completed: 1000 events in 144.256594ms
Burst completed: 1000 events in 157.89329ms
Burst completed: 1000 events in 153.882313ms
Burst test completed: 10000 events in 6.47066659s
Events/sec: 1545.44
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 37.483034098s
Combined ops/sec: 266.79
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 701.479526ms
Events/sec: 14255.58
Avg latency: 544.692µs
P90 latency: 742.997µs
P95 latency: 845.975µs
P99 latency: 1.147624ms
Bottom 10% Avg latency: 913.45µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 143.063212ms
Burst completed: 1000 events in 139.422008ms
Burst completed: 1000 events in 138.184516ms
Burst completed: 1000 events in 148.207616ms
Burst completed: 1000 events in 137.663883ms
Burst completed: 1000 events in 141.607643ms
Burst completed: 1000 events in 143.668551ms
Burst completed: 1000 events in 140.467359ms
Burst completed: 1000 events in 139.860509ms
Burst completed: 1000 events in 138.328306ms
Burst test completed: 10000 events in 6.43971118s
Events/sec: 1552.86
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4870 reads in 1m0.034216467s
Combined ops/sec: 164.41
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 659.848301ms
Total Events: 10000
Events/sec: 15155.00
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 513.243µs
P90 Latency: 706.89µs
P95 Latency: 792.685µs
P99 Latency: 1.089215ms
Bottom 10% Avg Latency: 864.746µs
----------------------------------------
Test: Burst Pattern
Duration: 6.47066659s
Total Events: 10000
Events/sec: 1545.44
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 206 MB
Avg Latency: 273.645µs
P90 Latency: 407.483µs
P95 Latency: 498.989µs
P99 Latency: 772.406µs
Bottom 10% Avg Latency: 574.801µs
----------------------------------------
Test: Mixed Read/Write
Duration: 37.483034098s
Total Events: 10000
Events/sec: 266.79
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 163 MB
Avg Latency: 9.873363ms
P90 Latency: 21.643466ms
P95 Latency: 22.924497ms
P99 Latency: 24.961324ms
Bottom 10% Avg Latency: 23.201171ms
----------------------------------------
Test: Peak Throughput
Duration: 701.479526ms
Total Events: 10000
Events/sec: 14255.58
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 544.692µs
P90 Latency: 742.997µs
P95 Latency: 845.975µs
P99 Latency: 1.147624ms
Bottom 10% Avg Latency: 913.45µs
----------------------------------------
Test: Burst Pattern
Duration: 6.43971118s
Total Events: 10000
Events/sec: 1552.86
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 204 MB
Avg Latency: 266.006µs
P90 Latency: 402.683µs
P95 Latency: 491.253µs
P99 Latency: 715.735µs
Bottom 10% Avg Latency: 553.762µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.034216467s
Total Events: 9870
Events/sec: 164.41
Success Rate: 98.7%
Concurrent Workers: 8
Memory Used: 184 MB
Avg Latency: 19.308183ms
P90 Latency: 42.766459ms
P95 Latency: 45.372157ms
P99 Latency: 49.993951ms
Bottom 10% Avg Latency: 46.189525ms
----------------------------------------
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
20250912223751453794 INF /tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912223752488197 INF /tmp/benchmark_relayer-basic_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912223752491495 INF /tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: relayer-basic
RELAY_URL: ws://relayer-basic:7447
TEST_TIMESTAMP: 2025-09-12T22:37:52+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_strfry_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912223757656112 INF /tmp/benchmark_strfry_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912223757657685 INF /tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912223757657767 INF /tmp/benchmark_strfry_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912223757658314 INF (*types.Uint32)(0xc0055c63ac)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912223757658385 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 655.950723ms
Events/sec: 15245.05
Avg latency: 510.383µs
P90 latency: 690.815µs
P95 latency: 769.085µs
P99 latency: 1.000349ms
Bottom 10% Avg latency: 831.211µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 168.844089ms
Burst completed: 1000 events in 138.644286ms
Burst completed: 1000 events in 167.717113ms
Burst completed: 1000 events in 141.566337ms
Burst completed: 1000 events in 141.186447ms
Burst completed: 1000 events in 145.845582ms
Burst completed: 1000 events in 142.834263ms
Burst completed: 1000 events in 144.707595ms
Burst completed: 1000 events in 144.096361ms
Burst completed: 1000 events in 158.524931ms
Burst test completed: 10000 events in 6.520630606s
Events/sec: 1533.59
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 36.04854491s
Combined ops/sec: 277.40
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 644.867085ms
Events/sec: 15507.07
Avg latency: 501.972µs
P90 latency: 650.197µs
P95 latency: 709.37µs
P99 latency: 914.673µs
Bottom 10% Avg latency: 754.969µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 133.763626ms
Burst completed: 1000 events in 135.289448ms
Burst completed: 1000 events in 136.874215ms
Burst completed: 1000 events in 135.118277ms
Burst completed: 1000 events in 139.247778ms
Burst completed: 1000 events in 142.262475ms
Burst completed: 1000 events in 141.21783ms
Burst completed: 1000 events in 143.089554ms
Burst completed: 1000 events in 148.027057ms
Burst completed: 1000 events in 150.006497ms
Burst test completed: 10000 events in 6.429121967s
Events/sec: 1555.42
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4857 reads in 1m0.047885362s
Combined ops/sec: 164.15
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 655.950723ms
Total Events: 10000
Events/sec: 15245.05
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 510.383µs
P90 Latency: 690.815µs
P95 Latency: 769.085µs
P99 Latency: 1.000349ms
Bottom 10% Avg Latency: 831.211µs
----------------------------------------
Test: Burst Pattern
Duration: 6.520630606s
Total Events: 10000
Events/sec: 1533.59
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 208 MB
Avg Latency: 223.359µs
P90 Latency: 321.256µs
P95 Latency: 378.145µs
P99 Latency: 530.597µs
Bottom 10% Avg Latency: 412.953µs
----------------------------------------
Test: Mixed Read/Write
Duration: 36.04854491s
Total Events: 10000
Events/sec: 277.40
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 222 MB
Avg Latency: 9.309397ms
P90 Latency: 20.403594ms
P95 Latency: 22.152884ms
P99 Latency: 24.513304ms
Bottom 10% Avg Latency: 22.447709ms
----------------------------------------
Test: Peak Throughput
Duration: 644.867085ms
Total Events: 10000
Events/sec: 15507.07
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 125 MB
Avg Latency: 501.972µs
P90 Latency: 650.197µs
P95 Latency: 709.37µs
P99 Latency: 914.673µs
Bottom 10% Avg Latency: 754.969µs
----------------------------------------
Test: Burst Pattern
Duration: 6.429121967s
Total Events: 10000
Events/sec: 1555.42
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 170 MB
Avg Latency: 239.454µs
P90 Latency: 335.133µs
P95 Latency: 408.012µs
P99 Latency: 593.458µs
Bottom 10% Avg Latency: 446.804µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.047885362s
Total Events: 9857
Events/sec: 164.15
Success Rate: 98.6%
Concurrent Workers: 8
Memory Used: 189 MB
Avg Latency: 19.373297ms
P90 Latency: 42.953055ms
P95 Latency: 45.636867ms
P99 Latency: 49.71977ms
Bottom 10% Avg Latency: 46.144029ms
----------------------------------------
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
20250912224038033173 INF /tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912224039055498 INF /tmp/benchmark_strfry_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912224039058214 INF /tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: strfry
RELAY_URL: ws://strfry:8080
TEST_TIMESTAMP: 2025-09-12T22:40:39+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

13
main.go
View File

@@ -8,6 +8,7 @@ import (
"os/signal" "os/signal"
"time" "time"
"github.com/pkg/profile"
"lol.mleku.dev/chk" "lol.mleku.dev/chk"
"lol.mleku.dev/log" "lol.mleku.dev/log"
"next.orly.dev/app" "next.orly.dev/app"
@@ -23,7 +24,17 @@ func main() {
if cfg, err = config.New(); chk.T(err) { if cfg, err = config.New(); chk.T(err) {
} }
log.I.F("starting %s %s", cfg.AppName, version.V) log.I.F("starting %s %s", cfg.AppName, version.V)
startProfiler(cfg.Pprof) switch cfg.Pprof {
case "cpu":
prof := profile.Start(profile.CPUProfile)
defer prof.Stop()
case "memory":
prof := profile.Start(profile.MemProfile)
defer prof.Stop()
case "allocation":
prof := profile.Start(profile.MemProfileAllocs)
defer prof.Stop()
}
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
var db *database.D var db *database.D
if db, err = database.New( if db, err = database.New(

View File

@@ -2,7 +2,6 @@ package database
import ( import (
"fmt" "fmt"
"sort"
"lol.mleku.dev/chk" "lol.mleku.dev/chk"
"lol.mleku.dev/errorf" "lol.mleku.dev/errorf"
@@ -64,17 +63,17 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
return return
} }
idPkTss = append(idPkTss, tmp...) idPkTss = append(idPkTss, tmp...)
// sort by timestamp, so the first is the newest, which the event // find the newest deletion timestamp without sorting to reduce cost
// must be newer to not be deleted. maxTs := idPkTss[0].Ts
sort.Slice( for i := 1; i < len(idPkTss); i++ {
idPkTss, func(i, j int) bool { if idPkTss[i].Ts > maxTs {
return idPkTss[i].Ts > idPkTss[j].Ts maxTs = idPkTss[i].Ts
}, }
) }
if ev.CreatedAt < idPkTss[0].Ts { if ev.CreatedAt < maxTs {
err = errorf.E( err = errorf.E(
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d", "blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
ev.ID, at, ev.CreatedAt, idPkTss[0].Ts, ev.ID, at, ev.CreatedAt, maxTs,
) )
return return
} }
@@ -114,17 +113,19 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
return return
} }
idPkTss = append(idPkTss, tmp...) idPkTss = append(idPkTss, tmp...)
// sort by timestamp, so the first is the newest, which the event // find the newest deletion without sorting to reduce cost
// must be newer to not be deleted. maxTs := idPkTss[0].Ts
sort.Slice( maxId := idPkTss[0].Id
idPkTss, func(i, j int) bool { for i := 1; i < len(idPkTss); i++ {
return idPkTss[i].Ts > idPkTss[j].Ts if idPkTss[i].Ts > maxTs {
}, maxTs = idPkTss[i].Ts
) maxId = idPkTss[i].Id
if ev.CreatedAt < idPkTss[0].Ts { }
}
if ev.CreatedAt < maxTs {
err = errorf.E( err = errorf.E(
"blocked: %0x was deleted: the event is older than the delete event %0x: event: %d delete: %d", "blocked: %0x was deleted: the event is older than the delete event %0x: event: %d delete: %d",
ev.ID, idPkTss[0].Id, ev.CreatedAt, idPkTss[0].Ts, ev.ID, maxId, ev.CreatedAt, maxTs,
) )
return return
} }
@@ -162,20 +163,25 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
return return
} }
idPkTss = append(idPkTss, tmp...) idPkTss = append(idPkTss, tmp...)
// sort by timestamp, so the first is the newest // find the newest deletion without sorting to reduce cost
sort.Slice( maxTs := idPkTss[0].Ts
idPkTss, func(i, j int) bool { maxId := idPkTss[0].Id
return idPkTss[i].Ts > idPkTss[j].Ts for i := 1; i < len(idPkTss); i++ {
}, if idPkTss[i].Ts > maxTs {
) maxTs = idPkTss[i].Ts
if ev.CreatedAt < idPkTss[0].Ts { maxId = idPkTss[i].Id
}
}
if ev.CreatedAt < maxTs {
err = errorf.E( err = errorf.E(
"blocked: %0x was deleted by address %s: event is older than the delete: event: %d delete: %d", "blocked: %0x was deleted by address %s: event is older than the delete: event: %d delete: %d",
ev.ID, at, idPkTss[0].Id, ev.CreatedAt, idPkTss[0].Ts, ev.ID, at, maxId, ev.CreatedAt, maxTs,
) )
return return
} }
return
} }
return
} }
// otherwise we check for a delete by event id // otherwise we check for a delete by event id
var idxs []Range var idxs []Range
@@ -196,7 +202,15 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
if s, err = d.GetSerialsByRange(idx); chk.E(err) { if s, err = d.GetSerialsByRange(idx); chk.E(err) {
return return
} }
sers = append(sers, s...) if len(s) > 0 {
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
// regardless of timestamp, since it's a specific deletion of this exact event
err = errorf.E(
"blocked: %0x was deleted by ID and cannot be resubmitted",
ev.ID,
)
return
}
} }
if len(sers) > 0 { if len(sers) > 0 {
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted // For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted

View File

@@ -359,7 +359,7 @@ func (r *Client) ConnectWithTLS(
okCallback(env.OK, env.ReasonString()) okCallback(env.OK, env.ReasonString())
} else { } else {
log.I.F( log.I.F(
"{%s} got an unexpected OK message for event %s", "{%s} got an unexpected OK message for event %0x",
r.URL, r.URL,
env.EventID, env.EventID,
) )

View File

@@ -15,3 +15,22 @@ func FastEqual[A constraints.Bytes, B constraints.Bytes](a A, b B) (same bool) {
} }
return true return true
} }
func FastCompare[A constraints.Bytes, B constraints.Bytes](
a A, b B,
) (diff int) {
if len(a) != len(b) {
return
}
ab := []byte(a)
bb := []byte(b)
for i, v := range ab {
if v != bb[i] {
if v > bb[i] {
return 1
}
return -1
}
}
return 0
}

View File

@@ -1,20 +0,0 @@
package main
import (
"github.com/pkg/profile"
"next.orly.dev/pkg/utils/interrupt"
)
func startProfiler(mode string) {
switch mode {
case "cpu":
prof := profile.Start(profile.CPUProfile)
interrupt.AddHandler(prof.Stop)
case "memory":
prof := profile.Start(profile.MemProfile)
interrupt.AddHandler(prof.Stop)
case "allocation":
prof := profile.Start(profile.MemProfileAllocs)
interrupt.AddHandler(prof.Stop)
}
}