added profiler tooling to enable automated generation of profile reports

This commit is contained in:
2025-09-13 13:06:52 +01:00
parent 509aac3819
commit 935eb1fb0b
11 changed files with 1067 additions and 58 deletions

View File

@@ -23,19 +23,21 @@ import (
// and default values. It defines parameters for app behaviour, storage
// locations, logging, and network settings used across the relay service.
type C struct {
AppName string `env:"ORLY_APP_NAME" usage:"set a name to display on information about the relay" default:"ORLY"`
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
HealthPort int `env:"ORLY_HEALTH_PORT" default:"0" usage:"optional health check HTTP port; 0 disables"`
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation"`
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
AppName string `env:"ORLY_APP_NAME" usage:"set a name to display on information about the relay" default:"ORLY"`
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
HealthPort int `env:"ORLY_HEALTH_PORT" default:"0" usage:"optional health check HTTP port; 0 disables"`
EnableShutdown bool `env:"ORLY_ENABLE_SHUTDOWN" default:"false" usage:"if true, expose /shutdown on the health port to gracefully stop the process (for profiling)"`
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation"`
PprofPath string `env:"ORLY_PPROF_PATH" usage:"optional directory to write pprof profiles into (inside container); default is temporary dir"`
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
}
// New creates and initializes a new configuration object for the relay

View File

@@ -101,17 +101,17 @@ func (p *P) Receive(msg typer.T) {
if m.Cancel {
if m.Id == "" {
p.removeSubscriber(m.Conn)
log.D.F("removed listener %s", m.remote)
// log.D.F("removed listener %s", m.remote)
} else {
p.removeSubscriberId(m.Conn, m.Id)
log.D.C(
func() string {
return fmt.Sprintf(
"removed subscription %s for %s", m.Id,
m.remote,
)
},
)
// log.D.C(
// func() string {
// return fmt.Sprintf(
// "removed subscription %s for %s", m.Id,
// m.remote,
// )
// },
// )
}
return
}
@@ -123,27 +123,27 @@ func (p *P) Receive(msg typer.T) {
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
}
p.Map[m.Conn] = subs
log.D.C(
func() string {
return fmt.Sprintf(
"created new subscription for %s, %s",
m.remote,
m.Filters.Marshal(nil),
)
},
)
// log.D.C(
// func() string {
// return fmt.Sprintf(
// "created new subscription for %s, %s",
// m.remote,
// m.Filters.Marshal(nil),
// )
// },
// )
} else {
subs[m.Id] = Subscription{
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
}
log.D.C(
func() string {
return fmt.Sprintf(
"added subscription %s for %s", m.Id,
m.remote,
)
},
)
// log.D.C(
// func() string {
// return fmt.Sprintf(
// "added subscription %s for %s", m.Id,
// m.remote,
// )
// },
// )
}
}
}

41
main.go
View File

@@ -28,14 +28,29 @@ func main() {
log.I.F("starting %s %s", cfg.AppName, version.V)
switch cfg.Pprof {
case "cpu":
prof := profile.Start(profile.CPUProfile)
defer prof.Stop()
if cfg.PprofPath != "" {
prof := profile.Start(profile.CPUProfile, profile.ProfilePath(cfg.PprofPath))
defer prof.Stop()
} else {
prof := profile.Start(profile.CPUProfile)
defer prof.Stop()
}
case "memory":
prof := profile.Start(profile.MemProfile)
defer prof.Stop()
if cfg.PprofPath != "" {
prof := profile.Start(profile.MemProfile, profile.ProfilePath(cfg.PprofPath))
defer prof.Stop()
} else {
prof := profile.Start(profile.MemProfile)
defer prof.Stop()
}
case "allocation":
prof := profile.Start(profile.MemProfileAllocs)
defer prof.Stop()
if cfg.PprofPath != "" {
prof := profile.Start(profile.MemProfileAllocs, profile.ProfilePath(cfg.PprofPath))
defer prof.Stop()
} else {
prof := profile.Start(profile.MemProfileAllocs)
defer prof.Stop()
}
}
ctx, cancel := context.WithCancel(context.Background())
var db *database.D
@@ -53,7 +68,7 @@ func main() {
// Start health check HTTP server if configured
var healthSrv *http.Server
if cfg.HealthPort > 0 {
mux := http.NewServeMux()
mux := http.NewServeMux()
mux.HandleFunc(
"/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
@@ -61,6 +76,18 @@ func main() {
log.I.F("health check ok")
},
)
// Optional shutdown endpoint to gracefully stop the process so profiling defers run
if cfg.EnableShutdown {
mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("shutting down"))
log.I.F("shutdown requested via /shutdown; sending SIGINT to self")
go func() {
p, _ := os.FindProcess(os.Getpid())
_ = p.Signal(os.Interrupt)
}()
})
}
healthSrv = &http.Server{
Addr: fmt.Sprintf(
"%s:%d", cfg.Listen, cfg.HealthPort,

View File

@@ -173,10 +173,10 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
}
}
if ev.CreatedAt < maxTs {
err = fmt.Errorf(
"blocked: was deleted by address %s: event is older than the delete: event: %d delete: %d",
at, ev.CreatedAt, maxTs,
)
// err = fmt.Errorf(
// "blocked: was deleted by address %s: event is older than the delete: event: %d delete: %d",
// at, ev.CreatedAt, maxTs,
// )
return
}
return
@@ -205,20 +205,20 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
if len(s) > 0 {
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
// regardless of timestamp, since it's a specific deletion of this exact event
err = errorf.E(
"blocked: was deleted by ID and cannot be resubmitted",
// ev.ID,
)
// err = errorf.E(
// "blocked: was deleted by ID and cannot be resubmitted",
// // ev.ID,
// )
return
}
}
if len(sers) > 0 {
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
// regardless of timestamp, since it's a specific deletion of this exact event
err = errorf.E(
"blocked: was deleted by ID and cannot be resubmitted",
// ev.ID,
)
// err = errorf.E(
// "blocked: was deleted by ID and cannot be resubmitted",
// // ev.ID,
// )
return
}

82
profiler/analyze.sh Normal file
View File

@@ -0,0 +1,82 @@
#!/usr/bin/env bash
set -euo pipefail
OUT_DIR="/work/reports"
BIN="/build/relay"
CPU_DIR="/profiles/cpu"
MEM_DIR="/profiles/mem"
ALLOC_DIR="/profiles/alloc"
REPORT="$OUT_DIR/profile-analysis.txt"
mkdir -p "$OUT_DIR"
# Helper: wait for any file matching a glob in a directory to exist and be non-empty
wait_for_glob() {
local dir="$1"; local pattern="$2"; local timeout=${3:-180}; local waited=0
echo "[analyze] Waiting for profiles in ${dir}/${pattern} (timeout ${timeout}s)..." >&2
while [[ $waited -lt $timeout ]]; do
# shellcheck disable=SC2086
local f
f=$(ls -1 ${dir}/${pattern} 2>/dev/null | head -n1 || true)
if [[ -n "${f}" && -s "${f}" ]]; then
echo "[analyze] Found: ${f}" >&2
echo -n "${f}"
return 0
fi
sleep 3; waited=$((waited+3))
done
echo "" # return empty string
return 1
}
CPU_FILE=$(wait_for_glob "$CPU_DIR" "cpu*.pprof" 180 || true)
MEM_FILE=$(wait_for_glob "$MEM_DIR" "*.pprof" 180 || true)
ALLOC_FILE=$(wait_for_glob "$ALLOC_DIR" "*.pprof" 180 || true)
if [[ -z "$CPU_FILE" ]]; then echo "[analyze] WARNING: CPU profile not found at $CPU_DIR" >&2; fi
if [[ -z "$MEM_FILE" ]]; then echo "[analyze] WARNING: Mem profile not found at $MEM_DIR" >&2; fi
if [[ -z "$ALLOC_FILE" ]]; then echo "[analyze] WARNING: Alloc profile not found at $ALLOC_DIR" >&2; fi
{
echo "==== next.orly.dev Profiling Analysis ===="
date
echo
if [[ -n "$CPU_FILE" && -s "$CPU_FILE" ]]; then
echo "-- CPU Hotspots (top by flat CPU) --"
go tool pprof -top -nodecount=15 "$BIN" "$CPU_FILE" 2>/dev/null | sed '1,2d'
echo
else
echo "CPU profile: not available"
echo
fi
if [[ -n "$MEM_FILE" && -s "$MEM_FILE" ]]; then
echo "-- Memory (In-Use Space) Hotspots --"
go tool pprof -top -sample_index=inuse_space -nodecount=15 "$BIN" "$MEM_FILE" 2>/dev/null | sed '1,2d'
echo
else
echo "Memory (in-use) profile: not available"
echo
fi
if [[ -n "$ALLOC_FILE" && -s "$ALLOC_FILE" ]]; then
echo "-- Allocations (Total Alloc Space) Hotspots --"
go tool pprof -top -sample_index=alloc_space -nodecount=15 "$BIN" "$ALLOC_FILE" 2>/dev/null | sed '1,2d'
echo
echo "-- Allocation Frequency (Alloc Objects) --"
go tool pprof -top -sample_index=alloc_objects -nodecount=15 "$BIN" "$ALLOC_FILE" 2>/dev/null | sed '1,2d'
echo
else
echo "Allocation profile: not available"
echo
fi
echo "Notes:"
echo "- CPU section identifies functions using the most CPU time."
echo "- Memory section identifies which functions retain the most memory (in-use)."
echo "- Allocations sections identify functions responsible for the most allocation volume and count, which correlates with GC pressure."
echo "- Profiles are created by github.com/pkg/profile and may only be flushed when the relay process receives a shutdown; CPU profile often requires process exit."
} > "$REPORT"
echo "[analyze] Wrote report to $REPORT" >&2

158
profiler/docker-compose.yml Normal file
View File

@@ -0,0 +1,158 @@
services:
# Build image for next.orly.dev relay (same as benchmark build)
next-orly-image:
build:
context: ..
dockerfile: cmd/benchmark/Dockerfile.next-orly
image: next-orly:profiler
# Run relay with CPU profiling
next-orly-cpu:
image: next-orly:profiler
build:
context: ..
dockerfile: cmd/benchmark/Dockerfile.next-orly
container_name: profiler-next-orly-cpu
environment:
- ORLY_DATA_DIR=/data
- ORLY_LISTEN=0.0.0.0
- ORLY_PORT=8080
- ORLY_LOG_LEVEL=info
- ORLY_PPROF=cpu
- ORLY_PPROF_PATH=/profiles
- ORLY_HEALTH_PORT=18080
- ORLY_ENABLE_SHUTDOWN=true
volumes:
- ./data/cpu:/data
- ./profiles/cpu:/profiles
ports:
- "8101:8080"
- "18081:18080"
networks:
- profiler-net
healthcheck:
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
interval: 15s
timeout: 10s
retries: 5
start_period: 20s
# Run relay with in-use memory profiling
next-orly-mem:
image: next-orly:profiler
build:
context: ..
dockerfile: cmd/benchmark/Dockerfile.next-orly
container_name: profiler-next-orly-mem
environment:
- ORLY_DATA_DIR=/data
- ORLY_LISTEN=0.0.0.0
- ORLY_PORT=8080
- ORLY_LOG_LEVEL=info
- ORLY_PPROF=memory
- ORLY_PPROF_PATH=/profiles
- ORLY_HEALTH_PORT=18080
- ORLY_ENABLE_SHUTDOWN=true
volumes:
- ./data/mem:/data
- ./profiles/mem:/profiles
ports:
- "8102:8080"
- "18082:18080"
networks:
- profiler-net
healthcheck:
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
interval: 15s
timeout: 10s
retries: 5
start_period: 20s
# Run relay with allocation profiling (alloc_space/alloc_objects)
next-orly-alloc:
image: next-orly:profiler
build:
context: ..
dockerfile: cmd/benchmark/Dockerfile.next-orly
container_name: profiler-next-orly-alloc
environment:
- ORLY_DATA_DIR=/data
- ORLY_LISTEN=0.0.0.0
- ORLY_PORT=8080
- ORLY_LOG_LEVEL=info
- ORLY_PPROF=allocation
- ORLY_PPROF_PATH=/profiles
- ORLY_HEALTH_PORT=18080
- ORLY_ENABLE_SHUTDOWN=true
volumes:
- ./data/alloc:/data
- ./profiles/alloc:/profiles
ports:
- "8103:8080"
- "18083:18080"
networks:
- profiler-net
healthcheck:
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
interval: 15s
timeout: 10s
retries: 5
start_period: 20s
# Load generator using existing benchmark runner
loadgen:
build:
context: ..
dockerfile: cmd/benchmark/Dockerfile.benchmark
container_name: profiler-loadgen
depends_on:
next-orly-cpu:
condition: service_healthy
next-orly-mem:
condition: service_healthy
next-orly-alloc:
condition: service_healthy
environment:
- BENCHMARK_TARGETS=next-orly-cpu:8080,next-orly-mem:8080,next-orly-alloc:8080
- BENCHMARK_EVENTS=5000
- BENCHMARK_WORKERS=4
- BENCHMARK_DURATION=30s
volumes:
- ./reports:/reports
networks:
- profiler-net
command: >
sh -c "
echo 'Waiting a bit before starting load...' && \
sleep 10 && \
/app/benchmark-runner --output-dir=/reports && \
echo 'Triggering relay shutdowns to flush profiles...' && \
curl -sS -X POST http://next-orly-cpu:18080/shutdown || true && \
curl -sS -X POST http://next-orly-mem:18080/shutdown || true && \
curl -sS -X POST http://next-orly-alloc:18080/shutdown || true && \
echo 'Load generation complete. Waiting for relays to exit...' && \
sleep 5
"
# Analyzer service: use the builder stage so Go toolchain is available
analyzer:
build:
context: ..
dockerfile: cmd/benchmark/Dockerfile.next-orly
target: builder
container_name: profiler-analyzer
depends_on:
loadgen:
condition: service_completed_successfully
working_dir: /work
volumes:
- ./profiles:/profiles:ro
- ./reports:/work/reports
- ./analyze.sh:/work/analyze.sh:ro
networks:
- profiler-net
command: ["bash", "-lc", "bash /work/analyze.sh"]
networks:
profiler-net:
driver: bridge

View File

@@ -0,0 +1,84 @@
==== next.orly.dev Profiling Analysis ====
Sat Sep 13 11:46:40 UTC 2025
-- CPU Hotspots (top by flat CPU) --
Type: cpu
Time: Sep 13, 2025 at 11:39am (UTC)
Duration: 400.39s, Total samples = 2620ms ( 0.65%)
Showing nodes accounting for 1950ms, 74.43% of 2620ms total
Dropped 58 nodes (cum <= 13.10ms)
Showing top 15 nodes out of 78
flat flat% sum% cum cum%
870ms 33.21% 33.21% 870ms 33.21% internal/runtime/syscall.Syscall6
220ms 8.40% 41.60% 220ms 8.40% runtime.futex
210ms 8.02% 49.62% 400ms 15.27% runtime.stealWork
160ms 6.11% 55.73% 160ms 6.11% runtime.write1
70ms 2.67% 58.40% 70ms 2.67% runtime.nanotime1
60ms 2.29% 60.69% 60ms 2.29% runtime.pMask.read (inline)
50ms 1.91% 62.60% 380ms 14.50% github.com/dgraph-io/badger/v4.(*levelsController).runCompactor
50ms 1.91% 64.50% 50ms 1.91% runtime.(*randomEnum).next (inline)
50ms 1.91% 66.41% 1980ms 75.57% runtime.findRunnable
40ms 1.53% 67.94% 40ms 1.53% runtime.memclrNoHeapPointers
40ms 1.53% 69.47% 930ms 35.50% runtime.netpoll
40ms 1.53% 70.99% 150ms 5.73% runtime.selectgo
30ms 1.15% 72.14% 860ms 32.82% internal/runtime/syscall.EpollWait
30ms 1.15% 73.28% 40ms 1.53% runtime.(*mcache).prepareForSweep
30ms 1.15% 74.43% 30ms 1.15% runtime.cheaprand (inline)
-- Memory (In-Use Space) Hotspots --
Type: inuse_space
Time: Sep 13, 2025 at 11:46am (UTC)
Showing nodes accounting for 85209.25kB, 99.24% of 85865.58kB total
Dropped 112 nodes (cum <= 429.33kB)
flat flat% sum% cum cum%
85200kB 99.22% 99.22% 85200kB 99.22% github.com/dgraph-io/badger/v4/skl.newArena (inline)
9.25kB 0.011% 99.24% 85654.66kB 99.75% github.com/dgraph-io/badger/v4.Open
0 0% 99.24% 85200kB 99.22% github.com/dgraph-io/badger/v4.(*DB).newMemTable
0 0% 99.24% 85200kB 99.22% github.com/dgraph-io/badger/v4.(*DB).openMemTable
0 0% 99.24% 85200kB 99.22% github.com/dgraph-io/badger/v4/skl.NewSkiplist
0 0% 99.24% 85654.66kB 99.75% main.main
0 0% 99.24% 85654.66kB 99.75% next.orly.dev/pkg/database.New
0 0% 99.24% 85654.66kB 99.75% runtime.main
-- Allocations (Total Alloc Space) Hotspots --
Type: alloc_space
Time: Sep 13, 2025 at 11:46am (UTC)
Showing nodes accounting for 83.20MB, 100% of 83.21MB total
Dropped 26 nodes (cum <= 0.42MB)
flat flat% sum% cum cum%
83.20MB 100% 100% 83.20MB 100% github.com/dgraph-io/badger/v4/skl.newArena (inline)
0 0% 100% 83.20MB 100% github.com/dgraph-io/badger/v4.(*DB).newMemTable
0 0% 100% 83.20MB 100% github.com/dgraph-io/badger/v4.(*DB).openMemTable
0 0% 100% 83.20MB 100% github.com/dgraph-io/badger/v4.Open
0 0% 100% 83.20MB 100% github.com/dgraph-io/badger/v4/skl.NewSkiplist
0 0% 100% 83.20MB 100% main.main
0 0% 100% 83.20MB 100% next.orly.dev/pkg/database.New
0 0% 100% 83.20MB 100% runtime.main
-- Allocation Frequency (Alloc Objects) --
Type: alloc_objects
Time: Sep 13, 2025 at 11:46am (UTC)
Showing nodes accounting for 7, 100% of 7 total
Showing top 15 nodes out of 34
flat flat% sum% cum cum%
2 28.57% 28.57% 2 28.57% regexp/syntax.(*parser).newRegexp (inline)
1 14.29% 42.86% 1 14.29% github.com/dgraph-io/badger/v4.(*levelsController).levelTargets
1 14.29% 57.14% 1 14.29% github.com/dgraph-io/badger/v4/skl.newArena (inline)
1 14.29% 71.43% 1 14.29% github.com/pkg/profile.Start
1 14.29% 85.71% 1 14.29% github.com/pkg/profile.Start.func11
1 14.29% 100% 1 14.29% runtime.allocm
0 0% 100% 1 14.29% github.com/dgraph-io/badger/v4.(*DB).newMemTable
0 0% 100% 1 14.29% github.com/dgraph-io/badger/v4.(*DB).openMemTable
0 0% 100% 1 14.29% github.com/dgraph-io/badger/v4.(*levelsController).pickCompactLevels
0 0% 100% 1 14.29% github.com/dgraph-io/badger/v4.(*levelsController).runCompactor
0 0% 100% 1 14.29% github.com/dgraph-io/badger/v4.(*levelsController).runCompactor.func3
0 0% 100% 1 14.29% github.com/dgraph-io/badger/v4.Open
0 0% 100% 1 14.29% github.com/dgraph-io/badger/v4/skl.NewSkiplist
0 0% 100% 2 28.57% github.com/google/pprof/profile.init
0 0% 100% 2 28.57% main.main
Notes:
- CPU section identifies functions using the most CPU time.
- Memory section identifies which functions retain the most memory (in-use).
- Allocations sections identify functions responsible for the most allocation volume and count, which correlates with GC pressure.
- Profiles are created by github.com/pkg/profile and may only be flushed when the relay process receives a shutdown; CPU profile often requires process exit.

View File

@@ -0,0 +1,86 @@
================================================================
NOSTR RELAY BENCHMARK AGGREGATE REPORT
================================================================
Generated: 2025-09-13T11:46:34+00:00
Benchmark Configuration:
Events per test: 5000
Concurrent workers: 4
Test duration: 30s
Relays tested: 3
================================================================
SUMMARY BY RELAY
================================================================
Relay: next-orly-cpu
----------------------------------------
Status: COMPLETED
Events/sec: 13750.67
Events/sec: 578.63
Events/sec: 13169.21
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 283.533µs
Bottom 10% Avg Latency: 455.08µs
Avg Latency: 214.446µs
P95 Latency: 413.136µs
P95 Latency: 335.028µs
P95 Latency: 10.080396ms
Relay: next-orly-mem
----------------------------------------
Status: COMPLETED
Events/sec: 14128.71
Events/sec: 524.40
Events/sec: 13535.38
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 273.836µs
Bottom 10% Avg Latency: 417.518µs
Avg Latency: 179.076µs
P95 Latency: 397.186µs
P95 Latency: 255.006µs
P95 Latency: 11.501888ms
Relay: next-orly-alloc
----------------------------------------
Status: COMPLETED
Events/sec: 14708.43
Events/sec: 526.24
Events/sec: 12079.01
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 264.237µs
Bottom 10% Avg Latency: 399.688µs
Avg Latency: 182.339µs
P95 Latency: 383.139µs
P95 Latency: 270.195µs
P95 Latency: 10.117687ms
================================================================
DETAILED RESULTS
================================================================
Individual relay reports are available in:
- /reports/run_20250913_114010/next-orly-alloc_results.txt
- /reports/run_20250913_114010/next-orly-cpu_results.txt
- /reports/run_20250913_114010/next-orly-mem_results.txt
================================================================
BENCHMARK COMPARISON TABLE
================================================================
Relay Status Peak Tput/s Avg Latency Success Rate
---- ------ ----------- ----------- ------------
next-orly-cpu OK 13750.67 283.533µs 100.0%
next-orly-mem OK 14128.71 273.836µs 100.0%
next-orly-alloc OK 14708.43 264.237µs 100.0%
================================================================
End of Report
================================================================

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly-alloc_8
Events: 5000, Workers: 4, Duration: 30s
20250913114427235918 INF /tmp/benchmark_next-orly-alloc_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250913114427236368 INF /tmp/benchmark_next-orly-alloc_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250913114427236420 INF /tmp/benchmark_next-orly-alloc_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250913114427237054 INF (*types.Uint32)(0xc0000100f0)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250913114427237113 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 5000/5000 (100.0%)
Duration: 339.941208ms
Events/sec: 14708.43
Avg latency: 264.237µs
P90 latency: 347.802µs
P95 latency: 383.139µs
P99 latency: 447.141µs
Bottom 10% Avg latency: 399.688µs
=== Burst Pattern Test ===
Burst completed: 500 events in 496.639004ms
Burst completed: 500 events in 395.431081ms
Burst completed: 500 events in 411.025866ms
Burst completed: 500 events in 419.57678ms
Burst completed: 500 events in 489.269517ms
Burst completed: 500 events in 428.035789ms
Burst completed: 500 events in 476.345049ms
Burst completed: 500 events in 515.739514ms
Burst completed: 500 events in 414.702914ms
Burst completed: 500 events in 440.760343ms
Burst test completed: 5000 events in 9.501434093s
Events/sec: 526.24
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 2500 writes, 2500 reads in 23.63556576s
Combined ops/sec: 211.55
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 5000/5000 (100.0%)
Duration: 413.941287ms
Events/sec: 12079.01
Avg latency: 323.873µs
P90 latency: 436.851µs
P95 latency: 488.881µs
P99 latency: 650.429µs
Bottom 10% Avg latency: 522.984µs
=== Burst Pattern Test ===
Burst completed: 500 events in 271.506837ms
Burst completed: 500 events in 361.931625ms
Burst completed: 500 events in 381.160975ms
Burst completed: 500 events in 310.403881ms
Burst completed: 500 events in 388.695936ms
Burst completed: 500 events in 323.300662ms
Burst completed: 500 events in 249.590375ms
Burst completed: 500 events in 318.935421ms
Burst completed: 500 events in 361.329644ms
Burst completed: 500 events in 474.806655ms
Burst test completed: 5000 events in 8.457868323s
Events/sec: 591.17
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 2500 writes, 2152 reads in 30.024551334s
Combined ops/sec: 154.94
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 339.941208ms
Total Events: 5000
Events/sec: 14708.43
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 142 MB
Avg Latency: 264.237µs
P90 Latency: 347.802µs
P95 Latency: 383.139µs
P99 Latency: 447.141µs
Bottom 10% Avg Latency: 399.688µs
----------------------------------------
Test: Burst Pattern
Duration: 9.501434093s
Total Events: 5000
Events/sec: 526.24
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 192 MB
Avg Latency: 182.339µs
P90 Latency: 240.387µs
P95 Latency: 270.195µs
P99 Latency: 333.575µs
Bottom 10% Avg Latency: 284.192µs
----------------------------------------
Test: Mixed Read/Write
Duration: 23.63556576s
Total Events: 5000
Events/sec: 211.55
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 199 MB
Avg Latency: 4.300403ms
P90 Latency: 8.510786ms
P95 Latency: 10.117687ms
P99 Latency: 12.490155ms
Bottom 10% Avg Latency: 10.404696ms
----------------------------------------
Test: Peak Throughput
Duration: 413.941287ms
Total Events: 5000
Events/sec: 12079.01
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 134 MB
Avg Latency: 323.873µs
P90 Latency: 436.851µs
P95 Latency: 488.881µs
P99 Latency: 650.429µs
Bottom 10% Avg Latency: 522.984µs
----------------------------------------
Test: Burst Pattern
Duration: 8.457868323s
Total Events: 5000
Events/sec: 591.17
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 185 MB
Avg Latency: 213.464µs
P90 Latency: 290.884µs
P95 Latency: 333.555µs
P99 Latency: 446.27µs
Bottom 10% Avg Latency: 360.86µs
----------------------------------------
Test: Mixed Read/Write
Duration: 30.024551334s
Total Events: 4652
Events/sec: 154.94
Success Rate: 93.0%
Concurrent Workers: 4
Memory Used: 137 MB
Avg Latency: 8.164383ms
P90 Latency: 17.857487ms
P95 Latency: 19.02409ms
P99 Latency: 21.306135ms
Bottom 10% Avg Latency: 19.344124ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly-alloc_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly-alloc_8/benchmark_report.adoc
20250913114629624246 INF /tmp/benchmark_next-orly-alloc_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250913114629944720 INF /tmp/benchmark_next-orly-alloc_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 22 MiB of 22 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250913114629946593 INF /tmp/benchmark_next-orly-alloc_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: next-orly-alloc
RELAY_URL: ws://next-orly-alloc:8080
TEST_TIMESTAMP: 2025-09-13T11:46:29+00:00
BENCHMARK_CONFIG:
Events: 5000
Workers: 4
Duration: 30s

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly-cpu_8
Events: 5000, Workers: 4, Duration: 30s
20250913114010336003 INF /tmp/benchmark_next-orly-cpu_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250913114010336560 INF /tmp/benchmark_next-orly-cpu_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250913114010336587 INF /tmp/benchmark_next-orly-cpu_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250913114010336810 INF (*types.Uint32)(0xc00001084c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250913114010336840 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 5000/5000 (100.0%)
Duration: 363.618589ms
Events/sec: 13750.67
Avg latency: 283.533µs
P90 latency: 378.941µs
P95 latency: 413.136µs
P99 latency: 527.123µs
Bottom 10% Avg latency: 455.08µs
=== Burst Pattern Test ===
Burst completed: 500 events in 383.290907ms
Burst completed: 500 events in 379.055138ms
Burst completed: 500 events in 344.482762ms
Burst completed: 500 events in 304.126389ms
Burst completed: 500 events in 274.619763ms
Burst completed: 500 events in 322.887417ms
Burst completed: 500 events in 277.199103ms
Burst completed: 500 events in 425.906745ms
Burst completed: 500 events in 471.907615ms
Burst completed: 500 events in 444.002409ms
Burst test completed: 5000 events in 8.641128603s
Events/sec: 578.63
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 2500 writes, 2500 reads in 23.666052402s
Combined ops/sec: 211.27
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 5000/5000 (100.0%)
Duration: 379.673375ms
Events/sec: 13169.21
Avg latency: 297.243µs
P90 latency: 393.52µs
P95 latency: 426.161µs
P99 latency: 508.367µs
Bottom 10% Avg latency: 445.997µs
=== Burst Pattern Test ===
Burst completed: 500 events in 375.562676ms
Burst completed: 500 events in 379.688521ms
Burst completed: 500 events in 339.807492ms
Burst completed: 500 events in 407.502127ms
Burst completed: 500 events in 347.61805ms
Burst completed: 500 events in 338.215243ms
Burst completed: 500 events in 420.579964ms
Burst completed: 500 events in 339.263061ms
Burst completed: 500 events in 318.379922ms
Burst completed: 500 events in 300.546668ms
Burst test completed: 5000 events in 8.580483954s
Events/sec: 582.72
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 2500 writes, 2094 reads in 30.014099513s
Combined ops/sec: 153.06
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 363.618589ms
Total Events: 5000
Events/sec: 13750.67
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 143 MB
Avg Latency: 283.533µs
P90 Latency: 378.941µs
P95 Latency: 413.136µs
P99 Latency: 527.123µs
Bottom 10% Avg Latency: 455.08µs
----------------------------------------
Test: Burst Pattern
Duration: 8.641128603s
Total Events: 5000
Events/sec: 578.63
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 193 MB
Avg Latency: 214.446µs
P90 Latency: 286.796µs
P95 Latency: 335.028µs
P99 Latency: 615.301µs
Bottom 10% Avg Latency: 421.658µs
----------------------------------------
Test: Mixed Read/Write
Duration: 23.666052402s
Total Events: 5000
Events/sec: 211.27
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 102 MB
Avg Latency: 4.328995ms
P90 Latency: 8.541367ms
P95 Latency: 10.080396ms
P99 Latency: 12.597948ms
Bottom 10% Avg Latency: 10.363345ms
----------------------------------------
Test: Peak Throughput
Duration: 379.673375ms
Total Events: 5000
Events/sec: 13169.21
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 119 MB
Avg Latency: 297.243µs
P90 Latency: 393.52µs
P95 Latency: 426.161µs
P99 Latency: 508.367µs
Bottom 10% Avg Latency: 445.997µs
----------------------------------------
Test: Burst Pattern
Duration: 8.580483954s
Total Events: 5000
Events/sec: 582.72
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 171 MB
Avg Latency: 202.362µs
P90 Latency: 263.111µs
P95 Latency: 296.545µs
P99 Latency: 359.544µs
Bottom 10% Avg Latency: 307.91µs
----------------------------------------
Test: Mixed Read/Write
Duration: 30.014099513s
Total Events: 4594
Events/sec: 153.06
Success Rate: 91.9%
Concurrent Workers: 4
Memory Used: 138 MB
Avg Latency: 8.407338ms
P90 Latency: 17.892951ms
P95 Latency: 21.106647ms
P99 Latency: 26.270739ms
Bottom 10% Avg Latency: 21.626824ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly-cpu_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly-cpu_8/benchmark_report.adoc
20250913114211995528 INF /tmp/benchmark_next-orly-cpu_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250913114212319238 INF /tmp/benchmark_next-orly-cpu_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 22 MiB of 22 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250913114212321065 INF /tmp/benchmark_next-orly-cpu_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: next-orly-cpu
RELAY_URL: ws://next-orly-cpu:8080
TEST_TIMESTAMP: 2025-09-13T11:42:12+00:00
BENCHMARK_CONFIG:
Events: 5000
Workers: 4
Duration: 30s

View File

@@ -0,0 +1,190 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly-mem_8
Events: 5000, Workers: 4, Duration: 30s
20250913114217371197 INF /tmp/benchmark_next-orly-mem_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250913114217371717 INF /tmp/benchmark_next-orly-mem_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250913114217371747 INF /tmp/benchmark_next-orly-mem_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250913114217372014 INF (*types.Uint32)(0xc0000100fc)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250913114217372100 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 5000/5000 (100.0%)
Duration: 353.88923ms
Events/sec: 14128.71
Avg latency: 273.836µs
P90 latency: 365.325µs
P95 latency: 397.186µs
P99 latency: 469.604µs
Bottom 10% Avg latency: 417.518µs
=== Burst Pattern Test ===
Burst completed: 500 events in 375.971522ms
Burst completed: 500 events in 446.782317ms
Burst completed: 500 events in 470.554433ms
Burst completed: 500 events in 491.456408ms
Burst completed: 500 events in 468.172794ms
Burst completed: 500 events in 408.391929ms
Burst completed: 500 events in 447.580014ms
Burst completed: 500 events in 457.169251ms
Burst completed: 500 events in 479.515284ms
Burst completed: 500 events in 474.094546ms
Burst test completed: 5000 events in 9.534688959s
Events/sec: 524.40
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 2500 writes, 2500 reads in 25.54544997s
Combined ops/sec: 195.73
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 5000/5000 (100.0%)
Duration: 369.402349ms
Events/sec: 13535.38
Avg latency: 289.113µs
P90 latency: 379.151µs
P95 latency: 412.575µs
P99 latency: 486.947µs
Bottom 10% Avg latency: 429.135µs
=== Burst Pattern Test ===
Burst completed: 500 events in 388.47726ms
Burst completed: 500 events in 367.258061ms
Burst completed: 500 events in 443.655199ms
Burst completed: 500 events in 364.303548ms
Burst completed: 500 events in 306.776349ms
Burst completed: 500 events in 305.273459ms
Burst completed: 500 events in 379.233497ms
Burst completed: 500 events in 377.586001ms
Burst completed: 500 events in 369.113691ms
Burst completed: 500 events in 333.996551ms
Burst test completed: 5000 events in 8.649351111s
Events/sec: 578.08
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 2500 writes, 1996 reads in 30.025357572s
Combined ops/sec: 149.74
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 353.88923ms
Total Events: 5000
Events/sec: 14128.71
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 143 MB
Avg Latency: 273.836µs
P90 Latency: 365.325µs
P95 Latency: 397.186µs
P99 Latency: 469.604µs
Bottom 10% Avg Latency: 417.518µs
----------------------------------------
Test: Burst Pattern
Duration: 9.534688959s
Total Events: 5000
Events/sec: 524.40
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 192 MB
Avg Latency: 179.076µs
P90 Latency: 231.24µs
P95 Latency: 255.006µs
P99 Latency: 303.558µs
Bottom 10% Avg Latency: 269.293µs
----------------------------------------
Test: Mixed Read/Write
Duration: 25.54544997s
Total Events: 5000
Events/sec: 195.73
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 114 MB
Avg Latency: 5.044988ms
P90 Latency: 10.156837ms
P95 Latency: 11.501888ms
P99 Latency: 14.645443ms
Bottom 10% Avg Latency: 11.993477ms
----------------------------------------
Test: Peak Throughput
Duration: 369.402349ms
Total Events: 5000
Events/sec: 13535.38
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 132 MB
Avg Latency: 289.113µs
P90 Latency: 379.151µs
P95 Latency: 412.575µs
P99 Latency: 486.947µs
Bottom 10% Avg Latency: 429.135µs
----------------------------------------
Test: Burst Pattern
Duration: 8.649351111s
Total Events: 5000
Events/sec: 578.08
Success Rate: 100.0%
Concurrent Workers: 4
Memory Used: 183 MB
Avg Latency: 201.741µs
P90 Latency: 263.752µs
P95 Latency: 297.136µs
P99 Latency: 358.212µs
Bottom 10% Avg Latency: 310.062µs
----------------------------------------
Test: Mixed Read/Write
Duration: 30.025357572s
Total Events: 4496
Events/sec: 149.74
Success Rate: 89.9%
Concurrent Workers: 4
Memory Used: 173 MB
Avg Latency: 8.802374ms
P90 Latency: 19.907605ms
P95 Latency: 22.490193ms
P99 Latency: 26.079898ms
Bottom 10% Avg Latency: 22.803343ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly-mem_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly-mem_8/benchmark_report.adoc
20250913114421864166 INF /tmp/benchmark_next-orly-mem_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250913114422185339 INF /tmp/benchmark_next-orly-mem_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 22 MiB of 22 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250913114422187124 INF /tmp/benchmark_next-orly-mem_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: next-orly-mem
RELAY_URL: ws://next-orly-mem:8080
TEST_TIMESTAMP: 2025-09-13T11:44:22+00:00
BENCHMARK_CONFIG:
Events: 5000
Workers: 4
Duration: 30s