Delete outdated benchmark reports and results.
Removed old benchmark reports and detailed logs from the repository to clean up unnecessary files. These reports appear to be auto-generated and no longer relevant for ongoing development.
This commit is contained in:
18
.dockerignore
Normal file
18
.dockerignore
Normal file
@@ -0,0 +1,18 @@
|
||||
# Exclude heavy or host-specific data from Docker build context
|
||||
# Fixes: failed to solve: error from sender: open cmd/benchmark/data/postgres: permission denied
|
||||
|
||||
# Benchmark data and reports (mounted at runtime via volumes)
|
||||
cmd/benchmark/data/
|
||||
cmd/benchmark/reports/
|
||||
|
||||
# VCS and OS cruft
|
||||
.git
|
||||
.gitignore
|
||||
**/.DS_Store
|
||||
**/Thumbs.db
|
||||
|
||||
# Go build cache and binaries
|
||||
**/bin/
|
||||
**/dist/
|
||||
**/build/
|
||||
**/*.out
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -91,6 +91,7 @@ cmd/benchmark/data
|
||||
!Dockerfile*
|
||||
!strfry.conf
|
||||
!config.toml
|
||||
!.dockerignore
|
||||
# ...even if they are in subdirectories
|
||||
!*/
|
||||
/blocklist.json
|
||||
|
||||
@@ -32,8 +32,10 @@ type C struct {
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
|
||||
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
||||
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation,heap,block,goroutine,threadcreate,mutex"`
|
||||
PprofPath string `env:"ORLY_PPROF_PATH" usage:"optional directory to write pprof profiles into (inside container); default is temporary dir"`
|
||||
PprofHTTP bool `env:"ORLY_PPROF_HTTP" default:"false" usage:"if true, expose net/http/pprof on port 6060"`
|
||||
OpenPprofWeb bool `env:"ORLY_OPEN_PPROF_WEB" default:"false" usage:"if true, automatically open the pprof web viewer when profiling is enabled"`
|
||||
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
|
||||
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
|
||||
|
||||
@@ -19,7 +19,7 @@ const (
|
||||
DefaultWriteWait = 10 * time.Second
|
||||
DefaultPongWait = 60 * time.Second
|
||||
DefaultPingWait = DefaultPongWait / 2
|
||||
DefaultReadTimeout = 3 * time.Second // Read timeout to detect stalled connections
|
||||
DefaultReadTimeout = 7 * time.Second // Read timeout to detect stalled connections
|
||||
DefaultWriteTimeout = 3 * time.Second
|
||||
DefaultMaxMessageSize = 1 * units.Mb
|
||||
|
||||
|
||||
@@ -34,13 +34,18 @@ COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||
# Make scripts executable
|
||||
RUN chmod +x /app/benchmark-runner
|
||||
|
||||
# Create reports directory
|
||||
RUN mkdir -p /reports
|
||||
# Create runtime user and reports directory owned by uid 1000
|
||||
RUN adduser -u 1000 -D appuser && \
|
||||
mkdir -p /reports && \
|
||||
chown -R 1000:1000 /app /reports
|
||||
|
||||
# Environment variables
|
||||
ENV BENCHMARK_EVENTS=10000
|
||||
ENV BENCHMARK_WORKERS=8
|
||||
ENV BENCHMARK_DURATION=60s
|
||||
|
||||
# Drop privileges: run as uid 1000
|
||||
USER 1000:1000
|
||||
|
||||
# Run the benchmark runner
|
||||
CMD ["/app/benchmark-runner"]
|
||||
@@ -46,7 +46,13 @@ RUN go mod download
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -o relay .
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -gcflags "all=-N -l" -o relay .
|
||||
|
||||
# Create non-root user (uid 1000) for runtime in builder stage (used by analyzer)
|
||||
RUN useradd -u 1000 -m -s /bin/bash appuser && \
|
||||
chown -R 1000:1000 /build
|
||||
# Switch to uid 1000 for any subsequent runtime use of this stage
|
||||
USER 1000:1000
|
||||
|
||||
# Final stage
|
||||
FROM ubuntu:22.04
|
||||
@@ -60,8 +66,10 @@ WORKDIR /app
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /build/relay /app/relay
|
||||
|
||||
# Create data directory
|
||||
RUN mkdir -p /data
|
||||
# Create runtime user and writable directories
|
||||
RUN useradd -u 1000 -m -s /bin/bash appuser && \
|
||||
mkdir -p /data /profiles /app && \
|
||||
chown -R 1000:1000 /data /profiles /app
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8080
|
||||
@@ -76,5 +84,8 @@ ENV ORLY_LOG_LEVEL=info
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD bash -lc "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8080 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
|
||||
|
||||
# Drop privileges: run as uid 1000
|
||||
USER 1000:1000
|
||||
|
||||
# Run the relay
|
||||
CMD ["/app/relay"]
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
lol "lol.mleku.dev"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
@@ -63,6 +64,7 @@ type Benchmark struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
lol.SetLogLevel("trace")
|
||||
config := parseFlags()
|
||||
|
||||
if config.RelayURL != "" {
|
||||
@@ -96,7 +98,7 @@ func parseFlags() *BenchmarkConfig {
|
||||
&config.DataDir, "datadir", "/tmp/benchmark_db", "Database directory",
|
||||
)
|
||||
flag.IntVar(
|
||||
&config.NumEvents, "events", 100000, "Number of events to generate",
|
||||
&config.NumEvents, "events", 10000, "Number of events to generate",
|
||||
)
|
||||
flag.IntVar(
|
||||
&config.ConcurrentWorkers, "workers", runtime.NumCPU(),
|
||||
@@ -133,8 +135,14 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
|
||||
"Network mode: relay=%s workers=%d rate=%d ev/s per worker duration=%s\n",
|
||||
cfg.RelayURL, cfg.NetWorkers, cfg.NetRate, cfg.TestDuration,
|
||||
)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), cfg.TestDuration)
|
||||
// Create a timeout context for benchmark control only, not for connections
|
||||
timeoutCtx, cancel := context.WithTimeout(context.Background(), cfg.TestDuration)
|
||||
defer cancel()
|
||||
|
||||
// Use a separate background context for relay connections to avoid
|
||||
// cancelling the server when the benchmark timeout expires
|
||||
connCtx := context.Background()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
if cfg.NetWorkers <= 0 {
|
||||
cfg.NetWorkers = 1
|
||||
@@ -146,8 +154,8 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
// Connect to relay
|
||||
rl, err := ws.RelayConnect(ctx, cfg.RelayURL)
|
||||
// Connect to relay using non-cancellable context
|
||||
rl, err := ws.RelayConnect(connCtx, cfg.RelayURL)
|
||||
if err != nil {
|
||||
fmt.Printf(
|
||||
"worker %d: failed to connect to %s: %v\n", workerID,
|
||||
@@ -174,17 +182,28 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
|
||||
f.Authors = tag.NewWithCap(1)
|
||||
f.Authors.T = append(f.Authors.T, keys.Pub())
|
||||
f.Since = timestamp.FromUnix(since)
|
||||
sub, err := rl.Subscribe(ctx, filter.NewS(f))
|
||||
sub, err := rl.Subscribe(connCtx, filter.NewS(f))
|
||||
if err != nil {
|
||||
fmt.Printf("worker %d: subscribe error: %v\n", workerID, err)
|
||||
fmt.Printf(
|
||||
"worker %d: subscribe error: %v\n", workerID, err,
|
||||
)
|
||||
return
|
||||
}
|
||||
defer sub.Unsub()
|
||||
recv := 0
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
fmt.Printf("worker %d: subscriber exiting after %d events\n", workerID, recv)
|
||||
case <-timeoutCtx.Done():
|
||||
fmt.Printf(
|
||||
"worker %d: subscriber exiting after %d events (benchmark timeout: %v)\n",
|
||||
workerID, recv, timeoutCtx.Err(),
|
||||
)
|
||||
return
|
||||
case <-rl.Context().Done():
|
||||
fmt.Printf(
|
||||
"worker %d: relay connection closed; cause=%v lastErr=%v\n",
|
||||
workerID, rl.ConnectionCause(), rl.LastError(),
|
||||
)
|
||||
return
|
||||
case <-sub.EndOfStoredEvents:
|
||||
// continue streaming live events
|
||||
@@ -194,7 +213,10 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
|
||||
}
|
||||
recv++
|
||||
if recv%100 == 0 {
|
||||
fmt.Printf("worker %d: received %d matching events\n", workerID, recv)
|
||||
fmt.Printf(
|
||||
"worker %d: received %d matching events\n",
|
||||
workerID, recv,
|
||||
)
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
@@ -207,7 +229,7 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
|
||||
count := 0
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-timeoutCtx.Done():
|
||||
fmt.Printf(
|
||||
"worker %d: stopping after %d publishes\n", workerID,
|
||||
count,
|
||||
@@ -233,12 +255,16 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
|
||||
select {
|
||||
case err := <-ch:
|
||||
if err != nil {
|
||||
fmt.Printf("worker %d: write error: %v\n", workerID, err)
|
||||
fmt.Printf(
|
||||
"worker %d: write error: %v\n", workerID, err,
|
||||
)
|
||||
}
|
||||
default:
|
||||
}
|
||||
if count%100 == 0 {
|
||||
fmt.Printf("worker %d: sent %d events\n", workerID, count)
|
||||
fmt.Printf(
|
||||
"worker %d: sent %d events\n", workerID, count,
|
||||
)
|
||||
}
|
||||
ev.Free()
|
||||
count++
|
||||
@@ -284,15 +310,25 @@ func (b *Benchmark) Close() {
|
||||
func (b *Benchmark) RunSuite() {
|
||||
for round := 1; round <= 2; round++ {
|
||||
fmt.Printf("\n=== Starting test round %d/2 ===\n", round)
|
||||
fmt.Printf("RunPeakThroughputTest..\n")
|
||||
b.RunPeakThroughputTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
fmt.Printf("RunBurstPatternTest..\n")
|
||||
b.RunBurstPatternTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
fmt.Printf("RunMixedReadWriteTest..\n")
|
||||
b.RunMixedReadWriteTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
fmt.Printf("RunQueryTest..\n")
|
||||
b.RunQueryTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
fmt.Printf("RunConcurrentQueryStoreTest..\n")
|
||||
b.RunConcurrentQueryStoreTest()
|
||||
if round < 2 {
|
||||
fmt.Println("\nPausing 10s before next round...")
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
fmt.Println("\n=== Test round completed ===\n")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -595,6 +631,330 @@ func (b *Benchmark) RunMixedReadWriteTest() {
|
||||
fmt.Printf("Combined ops/sec: %.2f\n", result.EventsPerSecond)
|
||||
}
|
||||
|
||||
// RunQueryTest specifically benchmarks the QueryEvents function performance
|
||||
func (b *Benchmark) RunQueryTest() {
|
||||
fmt.Println("\n=== Query Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var totalQueries int64
|
||||
var queryLatencies []time.Duration
|
||||
var errors []error
|
||||
var mu sync.Mutex
|
||||
|
||||
// Pre-populate with events for querying
|
||||
numSeedEvents := 10000
|
||||
seedEvents := b.generateEvents(numSeedEvents)
|
||||
ctx := context.Background()
|
||||
|
||||
fmt.Printf(
|
||||
"Pre-populating database with %d events for query tests...\n",
|
||||
numSeedEvents,
|
||||
)
|
||||
for _, ev := range seedEvents {
|
||||
b.db.SaveEvent(ctx, ev)
|
||||
}
|
||||
|
||||
// Create different types of filters for querying
|
||||
filters := []*filter.F{
|
||||
func() *filter.F { // Kind filter
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
limit := uint(100)
|
||||
f.Limit = &limit
|
||||
return f
|
||||
}(),
|
||||
func() *filter.F { // Tag filter
|
||||
f := filter.New()
|
||||
f.Tags = tag.NewS(
|
||||
tag.NewFromBytesSlice(
|
||||
[]byte("t"), []byte("benchmark"),
|
||||
),
|
||||
)
|
||||
limit := uint(100)
|
||||
f.Limit = &limit
|
||||
return f
|
||||
}(),
|
||||
func() *filter.F { // Mixed filter
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
f.Tags = tag.NewS(
|
||||
tag.NewFromBytesSlice(
|
||||
[]byte("t"), []byte("benchmark"),
|
||||
),
|
||||
)
|
||||
limit := uint(50)
|
||||
f.Limit = &limit
|
||||
return f
|
||||
}(),
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
// Start query workers
|
||||
for i := 0; i < b.config.ConcurrentWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
filterIndex := workerID % len(filters)
|
||||
queryCount := 0
|
||||
|
||||
for time.Since(start) < b.config.TestDuration {
|
||||
// Rotate through different filters
|
||||
f := filters[filterIndex]
|
||||
filterIndex = (filterIndex + 1) % len(filters)
|
||||
|
||||
// Execute query
|
||||
queryStart := time.Now()
|
||||
events, err := b.db.QueryEvents(ctx, f)
|
||||
queryLatency := time.Since(queryStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalQueries++
|
||||
queryLatencies = append(queryLatencies, queryLatency)
|
||||
|
||||
// Free event memory
|
||||
for _, ev := range events {
|
||||
ev.Free()
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
queryCount++
|
||||
if queryCount%10 == 0 {
|
||||
time.Sleep(10 * time.Millisecond) // Small delay every 10 queries
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Query Performance",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: float64(totalQueries) / duration.Seconds(),
|
||||
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
if len(queryLatencies) > 0 {
|
||||
result.AvgLatency = calculateAvgLatency(queryLatencies)
|
||||
result.P90Latency = calculatePercentileLatency(queryLatencies, 0.90)
|
||||
result.P95Latency = calculatePercentileLatency(queryLatencies, 0.95)
|
||||
result.P99Latency = calculatePercentileLatency(queryLatencies, 0.99)
|
||||
result.Bottom10Avg = calculateBottom10Avg(queryLatencies)
|
||||
}
|
||||
|
||||
result.SuccessRate = 100.0 // No specific target count for queries
|
||||
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.results = append(b.results, result)
|
||||
b.mu.Unlock()
|
||||
|
||||
fmt.Printf(
|
||||
"Query test completed: %d queries in %v\n", totalQueries, duration,
|
||||
)
|
||||
fmt.Printf("Queries/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Avg query latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P95 query latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 query latency: %v\n", result.P99Latency)
|
||||
}
|
||||
|
||||
// RunConcurrentQueryStoreTest benchmarks the performance of concurrent query and store operations
|
||||
func (b *Benchmark) RunConcurrentQueryStoreTest() {
|
||||
fmt.Println("\n=== Concurrent Query/Store Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var totalQueries, totalWrites int64
|
||||
var queryLatencies, writeLatencies []time.Duration
|
||||
var errors []error
|
||||
var mu sync.Mutex
|
||||
|
||||
// Pre-populate with some events
|
||||
numSeedEvents := 5000
|
||||
seedEvents := b.generateEvents(numSeedEvents)
|
||||
ctx := context.Background()
|
||||
|
||||
fmt.Printf(
|
||||
"Pre-populating database with %d events for concurrent query/store test...\n",
|
||||
numSeedEvents,
|
||||
)
|
||||
for _, ev := range seedEvents {
|
||||
b.db.SaveEvent(ctx, ev)
|
||||
}
|
||||
|
||||
// Generate events for writing during the test
|
||||
writeEvents := b.generateEvents(b.config.NumEvents)
|
||||
|
||||
// Create filters for querying
|
||||
filters := []*filter.F{
|
||||
func() *filter.F { // Recent events filter
|
||||
f := filter.New()
|
||||
f.Since = timestamp.FromUnix(time.Now().Add(-10 * time.Minute).Unix())
|
||||
limit := uint(100)
|
||||
f.Limit = &limit
|
||||
return f
|
||||
}(),
|
||||
func() *filter.F { // Kind and tag filter
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
f.Tags = tag.NewS(
|
||||
tag.NewFromBytesSlice(
|
||||
[]byte("t"), []byte("benchmark"),
|
||||
),
|
||||
)
|
||||
limit := uint(50)
|
||||
f.Limit = &limit
|
||||
return f
|
||||
}(),
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Half of the workers will be readers, half will be writers
|
||||
numReaders := b.config.ConcurrentWorkers / 2
|
||||
numWriters := b.config.ConcurrentWorkers - numReaders
|
||||
|
||||
// Start query workers (readers)
|
||||
for i := 0; i < numReaders; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
filterIndex := workerID % len(filters)
|
||||
queryCount := 0
|
||||
|
||||
for time.Since(start) < b.config.TestDuration {
|
||||
// Select a filter
|
||||
f := filters[filterIndex]
|
||||
filterIndex = (filterIndex + 1) % len(filters)
|
||||
|
||||
// Execute query
|
||||
queryStart := time.Now()
|
||||
events, err := b.db.QueryEvents(ctx, f)
|
||||
queryLatency := time.Since(queryStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalQueries++
|
||||
queryLatencies = append(queryLatencies, queryLatency)
|
||||
|
||||
// Free event memory
|
||||
for _, ev := range events {
|
||||
ev.Free()
|
||||
}
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
queryCount++
|
||||
if queryCount%5 == 0 {
|
||||
time.Sleep(5 * time.Millisecond) // Small delay
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Start write workers
|
||||
for i := 0; i < numWriters; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
eventIndex := workerID
|
||||
writeCount := 0
|
||||
|
||||
for time.Since(start) < b.config.TestDuration && eventIndex < len(writeEvents) {
|
||||
// Write operation
|
||||
writeStart := time.Now()
|
||||
_, _, err := b.db.SaveEvent(ctx, writeEvents[eventIndex])
|
||||
writeLatency := time.Since(writeStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalWrites++
|
||||
writeLatencies = append(writeLatencies, writeLatency)
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
eventIndex += numWriters
|
||||
writeCount++
|
||||
|
||||
if writeCount%10 == 0 {
|
||||
time.Sleep(10 * time.Millisecond) // Small delay every 10 writes
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
totalOps := totalQueries + totalWrites
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Concurrent Query/Store",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalOps),
|
||||
EventsPerSecond: float64(totalOps) / duration.Seconds(),
|
||||
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
// Calculate combined latencies for overall metrics
|
||||
allLatencies := append(queryLatencies, writeLatencies...)
|
||||
if len(allLatencies) > 0 {
|
||||
result.AvgLatency = calculateAvgLatency(allLatencies)
|
||||
result.P90Latency = calculatePercentileLatency(allLatencies, 0.90)
|
||||
result.P95Latency = calculatePercentileLatency(allLatencies, 0.95)
|
||||
result.P99Latency = calculatePercentileLatency(allLatencies, 0.99)
|
||||
result.Bottom10Avg = calculateBottom10Avg(allLatencies)
|
||||
}
|
||||
|
||||
result.SuccessRate = 100.0 // No specific target
|
||||
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.results = append(b.results, result)
|
||||
b.mu.Unlock()
|
||||
|
||||
// Calculate separate metrics for queries and writes
|
||||
var queryAvg, writeAvg time.Duration
|
||||
if len(queryLatencies) > 0 {
|
||||
queryAvg = calculateAvgLatency(queryLatencies)
|
||||
}
|
||||
if len(writeLatencies) > 0 {
|
||||
writeAvg = calculateAvgLatency(writeLatencies)
|
||||
}
|
||||
|
||||
fmt.Printf(
|
||||
"Concurrent test completed: %d operations (%d queries, %d writes) in %v\n",
|
||||
totalOps, totalQueries, totalWrites, duration,
|
||||
)
|
||||
fmt.Printf("Operations/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Avg latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("Avg query latency: %v\n", queryAvg)
|
||||
fmt.Printf("Avg write latency: %v\n", writeAvg)
|
||||
fmt.Printf("P95 latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 latency: %v\n", result.P99Latency)
|
||||
}
|
||||
|
||||
func (b *Benchmark) generateEvents(count int) []*event.E {
|
||||
events := make([]*event.E, count)
|
||||
now := timestamp.Now()
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: 2025-09-12T20:02:26+00:00
|
||||
Benchmark Configuration:
|
||||
Events per test: 10000
|
||||
Concurrent workers: 8
|
||||
Test duration: 60s
|
||||
|
||||
Relays tested: 6
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
Relay: next-orly
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17901.30
|
||||
Events/sec: 1504.52
|
||||
Events/sec: 17901.30
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 433.058µs
|
||||
Avg Latency: 182.813µs
|
||||
Avg Latency: 9.086952ms
|
||||
P95 Latency: 456.738µs
|
||||
P95 Latency: 152.86µs
|
||||
P95 Latency: 18.156339ms
|
||||
|
||||
Relay: khatru-sqlite
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 14291.70
|
||||
Events/sec: 1530.29
|
||||
Events/sec: 14291.70
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 545.724µs
|
||||
Avg Latency: 205.962µs
|
||||
Avg Latency: 9.092604ms
|
||||
P95 Latency: 473.43µs
|
||||
P95 Latency: 165.525µs
|
||||
P95 Latency: 19.302571ms
|
||||
|
||||
Relay: khatru-badger
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 16351.11
|
||||
Events/sec: 1539.25
|
||||
Events/sec: 16351.11
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 474.016µs
|
||||
Avg Latency: 226.602µs
|
||||
Avg Latency: 9.930935ms
|
||||
P95 Latency: 479.03µs
|
||||
P95 Latency: 239.525µs
|
||||
P95 Latency: 17.75358ms
|
||||
|
||||
Relay: relayer-basic
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 16522.60
|
||||
Events/sec: 1537.71
|
||||
Events/sec: 16522.60
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 466.066µs
|
||||
Avg Latency: 215.609µs
|
||||
Avg Latency: 9.851217ms
|
||||
P95 Latency: 514.849µs
|
||||
P95 Latency: 141.91µs
|
||||
P95 Latency: 23.101412ms
|
||||
|
||||
Relay: strfry
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15346.12
|
||||
Events/sec: 1534.88
|
||||
Events/sec: 15346.12
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 506.51µs
|
||||
Avg Latency: 216.564µs
|
||||
Avg Latency: 9.938991ms
|
||||
P95 Latency: 590.442µs
|
||||
P95 Latency: 267.91µs
|
||||
P95 Latency: 19.784708ms
|
||||
|
||||
Relay: nostr-rs-relay
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15199.95
|
||||
Events/sec: 1533.87
|
||||
Events/sec: 15199.95
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 508.699µs
|
||||
Avg Latency: 217.187µs
|
||||
Avg Latency: 9.38757ms
|
||||
P95 Latency: 1.011413ms
|
||||
P95 Latency: 130.018µs
|
||||
P95 Latency: 19.250416ms
|
||||
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
- /reports/run_20250912_195729/khatru-badger_results.txt
|
||||
- /reports/run_20250912_195729/khatru-sqlite_results.txt
|
||||
- /reports/run_20250912_195729/next-orly_results.txt
|
||||
- /reports/run_20250912_195729/nostr-rs-relay_results.txt
|
||||
- /reports/run_20250912_195729/relayer-basic_results.txt
|
||||
- /reports/run_20250912_195729/strfry_results.txt
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
Relay Status Peak Tput/s Avg Latency Success Rate
|
||||
---- ------ ----------- ----------- ------------
|
||||
next-orly OK 17901.30 433.058µs 100.0%
|
||||
khatru-sqlite OK 14291.70 545.724µs 100.0%
|
||||
khatru-badger OK 16351.11 474.016µs 100.0%
|
||||
relayer-basic OK 16522.60 466.066µs 100.0%
|
||||
strfry OK 15346.12 506.51µs 100.0%
|
||||
nostr-rs-relay OK 15199.95 508.699µs 100.0%
|
||||
|
||||
================================================================
|
||||
End of Report
|
||||
================================================================
|
||||
@@ -1,104 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912195906053114 INF /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912195906053741 INF /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912195906053768 INF /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912195906054020 INF (*types.Uint32)(0xc00570406c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912195906054071 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 611.579176ms
|
||||
Events/sec: 16351.11
|
||||
Avg latency: 474.016µs
|
||||
P95 latency: 479.03µs
|
||||
P99 latency: 594.73µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 160.976517ms
|
||||
Burst completed: 1000 events in 153.010415ms
|
||||
Burst completed: 1000 events in 146.10015ms
|
||||
Burst completed: 1000 events in 148.403729ms
|
||||
Burst completed: 1000 events in 141.681801ms
|
||||
Burst completed: 1000 events in 154.663067ms
|
||||
Burst completed: 1000 events in 135.960988ms
|
||||
Burst completed: 1000 events in 136.240589ms
|
||||
Burst completed: 1000 events in 141.75454ms
|
||||
Burst completed: 1000 events in 152.485379ms
|
||||
Burst test completed: 10000 events in 6.496690038s
|
||||
Events/sec: 1539.25
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 37.695370694s
|
||||
Combined ops/sec: 265.28
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 611.579176ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16351.11
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 474.016µs
|
||||
P95 Latency: 479.03µs
|
||||
P99 Latency: 594.73µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.496690038s
|
||||
Total Events: 10000
|
||||
Events/sec: 1539.25
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 207 MB
|
||||
Avg Latency: 226.602µs
|
||||
P95 Latency: 239.525µs
|
||||
P99 Latency: 168.561µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 37.695370694s
|
||||
Total Events: 10000
|
||||
Events/sec: 265.28
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 132 MB
|
||||
Avg Latency: 9.930935ms
|
||||
P95 Latency: 17.75358ms
|
||||
P99 Latency: 24.256293ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
|
||||
20250912195950858706 INF /tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912195951643646 INF /tmp/benchmark_khatru-badger_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912195951645255 INF /tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-badger
|
||||
RELAY_URL: ws://khatru-badger:3334
|
||||
TEST_TIMESTAMP: 2025-09-12T19:59:51+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,104 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912195817361580 INF /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912195817362030 INF /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912195817362064 INF /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912195817362711 INF (*types.Uint32)(0xc00000005c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912195817362777 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 699.706889ms
|
||||
Events/sec: 14291.70
|
||||
Avg latency: 545.724µs
|
||||
P95 latency: 473.43µs
|
||||
P99 latency: 478.349µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 138.253122ms
|
||||
Burst completed: 1000 events in 153.709429ms
|
||||
Burst completed: 1000 events in 158.711026ms
|
||||
Burst completed: 1000 events in 152.54677ms
|
||||
Burst completed: 1000 events in 144.735244ms
|
||||
Burst completed: 1000 events in 153.236893ms
|
||||
Burst completed: 1000 events in 150.180515ms
|
||||
Burst completed: 1000 events in 154.733588ms
|
||||
Burst completed: 1000 events in 151.252182ms
|
||||
Burst completed: 1000 events in 150.610613ms
|
||||
Burst test completed: 10000 events in 6.534724469s
|
||||
Events/sec: 1530.29
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 35.563312501s
|
||||
Combined ops/sec: 281.19
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 699.706889ms
|
||||
Total Events: 10000
|
||||
Events/sec: 14291.70
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 545.724µs
|
||||
P95 Latency: 473.43µs
|
||||
P99 Latency: 478.349µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.534724469s
|
||||
Total Events: 10000
|
||||
Events/sec: 1530.29
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 208 MB
|
||||
Avg Latency: 205.962µs
|
||||
P95 Latency: 165.525µs
|
||||
P99 Latency: 253.411µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 35.563312501s
|
||||
Total Events: 10000
|
||||
Events/sec: 281.19
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 146 MB
|
||||
Avg Latency: 9.092604ms
|
||||
P95 Latency: 19.302571ms
|
||||
P99 Latency: 16.944829ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
20250912195900161526 INF /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912195900909573 INF /tmp/benchmark_khatru-sqlite_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912195900911092 INF /tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-09-12T19:59:01+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,104 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912195729240522 INF /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912195729241087 INF /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912195729241168 INF /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912195729241759 INF (*types.Uint32)(0xc0001de49c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912195729241847 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 558.618706ms
|
||||
Events/sec: 17901.30
|
||||
Avg latency: 433.058µs
|
||||
P95 latency: 456.738µs
|
||||
P99 latency: 337.231µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 172.949275ms
|
||||
Burst completed: 1000 events in 175.209401ms
|
||||
Burst completed: 1000 events in 156.532197ms
|
||||
Burst completed: 1000 events in 157.913421ms
|
||||
Burst completed: 1000 events in 151.37659ms
|
||||
Burst completed: 1000 events in 161.938783ms
|
||||
Burst completed: 1000 events in 168.47761ms
|
||||
Burst completed: 1000 events in 159.951768ms
|
||||
Burst completed: 1000 events in 170.308111ms
|
||||
Burst completed: 1000 events in 146.767432ms
|
||||
Burst test completed: 10000 events in 6.646634323s
|
||||
Events/sec: 1504.52
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 35.548232107s
|
||||
Combined ops/sec: 281.31
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 558.618706ms
|
||||
Total Events: 10000
|
||||
Events/sec: 17901.30
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 433.058µs
|
||||
P95 Latency: 456.738µs
|
||||
P99 Latency: 337.231µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.646634323s
|
||||
Total Events: 10000
|
||||
Events/sec: 1504.52
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 207 MB
|
||||
Avg Latency: 182.813µs
|
||||
P95 Latency: 152.86µs
|
||||
P99 Latency: 204.198µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 35.548232107s
|
||||
Total Events: 10000
|
||||
Events/sec: 281.31
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 215 MB
|
||||
Avg Latency: 9.086952ms
|
||||
P95 Latency: 18.156339ms
|
||||
P99 Latency: 24.346573ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
|
||||
20250912195811996353 INF /tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912195812308400 INF /tmp/benchmark_next-orly_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912195812310341 INF /tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: next-orly
|
||||
RELAY_URL: ws://next-orly:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T19:58:12+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,104 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_nostr-rs-relay_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912200137539643 INF /tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912200137540391 INF /tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912200137540449 INF /tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912200137540903 INF (*types.Uint32)(0xc0001c24cc)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912200137540961 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 657.896815ms
|
||||
Events/sec: 15199.95
|
||||
Avg latency: 508.699µs
|
||||
P95 latency: 1.011413ms
|
||||
P99 latency: 710.782µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 149.389787ms
|
||||
Burst completed: 1000 events in 138.154354ms
|
||||
Burst completed: 1000 events in 139.952633ms
|
||||
Burst completed: 1000 events in 148.684306ms
|
||||
Burst completed: 1000 events in 154.779586ms
|
||||
Burst completed: 1000 events in 163.72717ms
|
||||
Burst completed: 1000 events in 142.665132ms
|
||||
Burst completed: 1000 events in 151.637082ms
|
||||
Burst completed: 1000 events in 143.018896ms
|
||||
Burst completed: 1000 events in 157.963802ms
|
||||
Burst test completed: 10000 events in 6.519459944s
|
||||
Events/sec: 1533.87
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 36.26569002s
|
||||
Combined ops/sec: 275.74
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 657.896815ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15199.95
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 508.699µs
|
||||
P95 Latency: 1.011413ms
|
||||
P99 Latency: 710.782µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.519459944s
|
||||
Total Events: 10000
|
||||
Events/sec: 1533.87
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 206 MB
|
||||
Avg Latency: 217.187µs
|
||||
P95 Latency: 130.018µs
|
||||
P99 Latency: 261.728µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 36.26569002s
|
||||
Total Events: 10000
|
||||
Events/sec: 275.74
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 225 MB
|
||||
Avg Latency: 9.38757ms
|
||||
P95 Latency: 19.250416ms
|
||||
P99 Latency: 20.049957ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
|
||||
20250912200220985006 INF /tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912200221295381 INF /tmp/benchmark_nostr-rs-relay_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912200221297677 INF /tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: nostr-rs-relay
|
||||
RELAY_URL: ws://nostr-rs-relay:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T20:02:21+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,104 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_relayer-basic_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912195956808180 INF /tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912195956808720 INF /tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912195956808755 INF /tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912195956809102 INF (*types.Uint32)(0xc0001bc04c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912195956809190 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 605.231707ms
|
||||
Events/sec: 16522.60
|
||||
Avg latency: 466.066µs
|
||||
P95 latency: 514.849µs
|
||||
P99 latency: 451.358µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 149.715312ms
|
||||
Burst completed: 1000 events in 146.385191ms
|
||||
Burst completed: 1000 events in 147.010481ms
|
||||
Burst completed: 1000 events in 151.671062ms
|
||||
Burst completed: 1000 events in 143.215087ms
|
||||
Burst completed: 1000 events in 137.331431ms
|
||||
Burst completed: 1000 events in 155.735079ms
|
||||
Burst completed: 1000 events in 161.246126ms
|
||||
Burst completed: 1000 events in 140.174417ms
|
||||
Burst completed: 1000 events in 144.819799ms
|
||||
Burst test completed: 10000 events in 6.503155987s
|
||||
Events/sec: 1537.71
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 37.45410417s
|
||||
Combined ops/sec: 266.99
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 605.231707ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16522.60
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 152 MB
|
||||
Avg Latency: 466.066µs
|
||||
P95 Latency: 514.849µs
|
||||
P99 Latency: 451.358µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.503155987s
|
||||
Total Events: 10000
|
||||
Events/sec: 1537.71
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 203 MB
|
||||
Avg Latency: 215.609µs
|
||||
P95 Latency: 141.91µs
|
||||
P99 Latency: 204.819µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 37.45410417s
|
||||
Total Events: 10000
|
||||
Events/sec: 266.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 148 MB
|
||||
Avg Latency: 9.851217ms
|
||||
P95 Latency: 23.101412ms
|
||||
P99 Latency: 17.889412ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
|
||||
20250912200041372670 INF /tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912200041686782 INF /tmp/benchmark_relayer-basic_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912200041689009 INF /tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: relayer-basic
|
||||
RELAY_URL: ws://relayer-basic:7447
|
||||
TEST_TIMESTAMP: 2025-09-12T20:00:41+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,35 +0,0 @@
|
||||
= NOSTR Relay Benchmark Results
|
||||
|
||||
Generated from: aggregate_report.txt
|
||||
|
||||
[cols="1,^1,^1,^1,^1,^1,^1",options="header"]
|
||||
|===
|
||||
| Metric | next-orly | khatru-sqlite | khatru-badger | relayer-basic | strfry | nostr-rs-relay
|
||||
|
||||
| Store Events/sec
|
||||
| 17901.30 | 14291.70 | 16351.11 | 16522.60 | 15346.12 | 15199.95
|
||||
|
||||
| Store Avg Latency #1
|
||||
| 433.058µs | 545.724µs | 474.016µs | 466.066µs | 506.51µs | 508.699µs
|
||||
|
||||
| Store P95 Latency #1
|
||||
| 456.738µs | 473.43µs | 479.03µs | 514.849µs | 590.442µs | 1.011413ms
|
||||
|
||||
| Query Events/sec #2
|
||||
| 1504.52 | 1530.29 | 1539.25 | 1537.71 | 1534.88 | 1533.87
|
||||
|
||||
| Query Avg Latency #2
|
||||
| 182.813µs | 205.962µs | 226.602µs | 215.609µs | 216.564µs | 217.187µs
|
||||
|
||||
| Query P95 Latency #2
|
||||
| 152.86µs | 165.525µs | 239.525µs | 141.91µs | 267.91µs | 130.018µs
|
||||
|
||||
| Concurrent Store/Query Events/sec #3
|
||||
| 17901.30 | 14291.70 | 16351.11 | 16522.60 | 15346.12 | 15199.95
|
||||
|
||||
| Concurrent Store/Query Avg Latency #3
|
||||
| 9.086952ms | 9.092604ms | 9.930935ms | 9.851217ms | 9.938991ms | 9.38757ms
|
||||
|
||||
| Concurrent Store/Query P95 Latency #3
|
||||
| 18.156339ms | 19.302571ms | 17.75358ms | 23.101412ms | 19.784708ms | 19.250416ms
|
||||
|===
|
||||
@@ -1,104 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_strfry_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912200046745432 INF /tmp/benchmark_strfry_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912200046746116 INF /tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912200046746193 INF /tmp/benchmark_strfry_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912200046746576 INF (*types.Uint32)(0xc0002a9c4c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912200046746636 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 651.630667ms
|
||||
Events/sec: 15346.12
|
||||
Avg latency: 506.51µs
|
||||
P95 latency: 590.442µs
|
||||
P99 latency: 278.399µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 148.701372ms
|
||||
Burst completed: 1000 events in 161.333951ms
|
||||
Burst completed: 1000 events in 146.993646ms
|
||||
Burst completed: 1000 events in 155.768019ms
|
||||
Burst completed: 1000 events in 143.83944ms
|
||||
Burst completed: 1000 events in 156.208347ms
|
||||
Burst completed: 1000 events in 150.769887ms
|
||||
Burst completed: 1000 events in 140.217044ms
|
||||
Burst completed: 1000 events in 150.831164ms
|
||||
Burst completed: 1000 events in 135.759058ms
|
||||
Burst test completed: 10000 events in 6.515183689s
|
||||
Events/sec: 1534.88
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 37.667054484s
|
||||
Combined ops/sec: 265.48
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 651.630667ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15346.12
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 152 MB
|
||||
Avg Latency: 506.51µs
|
||||
P95 Latency: 590.442µs
|
||||
P99 Latency: 278.399µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.515183689s
|
||||
Total Events: 10000
|
||||
Events/sec: 1534.88
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 203 MB
|
||||
Avg Latency: 216.564µs
|
||||
P95 Latency: 267.91µs
|
||||
P99 Latency: 310.46µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 37.667054484s
|
||||
Total Events: 10000
|
||||
Events/sec: 265.48
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 136 MB
|
||||
Avg Latency: 9.938991ms
|
||||
P95 Latency: 19.784708ms
|
||||
P99 Latency: 18.788985ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
|
||||
20250912200131581470 INF /tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912200132372653 INF /tmp/benchmark_strfry_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912200132384548 INF /tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: strfry
|
||||
RELAY_URL: ws://strfry:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T20:01:32+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,140 +0,0 @@
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: 2025-09-12T22:43:29+00:00
|
||||
Benchmark Configuration:
|
||||
Events per test: 10000
|
||||
Concurrent workers: 8
|
||||
Test duration: 60s
|
||||
|
||||
Relays tested: 6
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
Relay: next-orly
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 18056.94
|
||||
Events/sec: 1492.32
|
||||
Events/sec: 16750.82
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 428.869µs
|
||||
Bottom 10% Avg Latency: 643.51µs
|
||||
Avg Latency: 178.04µs
|
||||
P95 Latency: 607.997µs
|
||||
P95 Latency: 243.954µs
|
||||
P95 Latency: 21.665387ms
|
||||
|
||||
Relay: khatru-sqlite
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17635.76
|
||||
Events/sec: 1510.39
|
||||
Events/sec: 16509.10
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 437.941µs
|
||||
Bottom 10% Avg Latency: 659.71µs
|
||||
Avg Latency: 203.563µs
|
||||
P95 Latency: 621.964µs
|
||||
P95 Latency: 330.729µs
|
||||
P95 Latency: 21.838576ms
|
||||
|
||||
Relay: khatru-badger
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17312.60
|
||||
Events/sec: 1508.54
|
||||
Events/sec: 15933.99
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 448.778µs
|
||||
Bottom 10% Avg Latency: 664.268µs
|
||||
Avg Latency: 196.38µs
|
||||
P95 Latency: 633.085µs
|
||||
P95 Latency: 293.579µs
|
||||
P95 Latency: 22.727378ms
|
||||
|
||||
Relay: relayer-basic
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15155.00
|
||||
Events/sec: 1545.44
|
||||
Events/sec: 14255.58
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 513.243µs
|
||||
Bottom 10% Avg Latency: 864.746µs
|
||||
Avg Latency: 273.645µs
|
||||
P95 Latency: 792.685µs
|
||||
P95 Latency: 498.989µs
|
||||
P95 Latency: 22.924497ms
|
||||
|
||||
Relay: strfry
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15245.05
|
||||
Events/sec: 1533.59
|
||||
Events/sec: 15507.07
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 510.383µs
|
||||
Bottom 10% Avg Latency: 831.211µs
|
||||
Avg Latency: 223.359µs
|
||||
P95 Latency: 769.085µs
|
||||
P95 Latency: 378.145µs
|
||||
P95 Latency: 22.152884ms
|
||||
|
||||
Relay: nostr-rs-relay
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 16312.24
|
||||
Events/sec: 1502.05
|
||||
Events/sec: 14131.23
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 476.418µs
|
||||
Bottom 10% Avg Latency: 722.179µs
|
||||
Avg Latency: 182.765µs
|
||||
P95 Latency: 686.836µs
|
||||
P95 Latency: 257.082µs
|
||||
P95 Latency: 20.680962ms
|
||||
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
- /reports/run_20250912_222649/khatru-badger_results.txt
|
||||
- /reports/run_20250912_222649/khatru-sqlite_results.txt
|
||||
- /reports/run_20250912_222649/next-orly_results.txt
|
||||
- /reports/run_20250912_222649/nostr-rs-relay_results.txt
|
||||
- /reports/run_20250912_222649/relayer-basic_results.txt
|
||||
- /reports/run_20250912_222649/strfry_results.txt
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
Relay Status Peak Tput/s Avg Latency Success Rate
|
||||
---- ------ ----------- ----------- ------------
|
||||
next-orly OK 18056.94 428.869µs 100.0%
|
||||
khatru-sqlite OK 17635.76 437.941µs 100.0%
|
||||
khatru-badger OK 17312.60 448.778µs 100.0%
|
||||
relayer-basic OK 15155.00 513.243µs 100.0%
|
||||
strfry OK 15245.05 510.383µs 100.0%
|
||||
nostr-rs-relay OK 16312.24 476.418µs 100.0%
|
||||
|
||||
================================================================
|
||||
End of Report
|
||||
================================================================
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912223222496620 INF /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912223222497154 INF /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912223222497184 INF /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912223222497402 INF (*types.Uint32)(0xc0000100fc)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912223222497454 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 577.614152ms
|
||||
Events/sec: 17312.60
|
||||
Avg latency: 448.778µs
|
||||
P90 latency: 584.783µs
|
||||
P95 latency: 633.085µs
|
||||
P99 latency: 749.537µs
|
||||
Bottom 10% Avg latency: 664.268µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 161.62554ms
|
||||
Burst completed: 1000 events in 154.666063ms
|
||||
Burst completed: 1000 events in 149.999903ms
|
||||
Burst completed: 1000 events in 169.141205ms
|
||||
Burst completed: 1000 events in 153.987041ms
|
||||
Burst completed: 1000 events in 141.227756ms
|
||||
Burst completed: 1000 events in 168.989116ms
|
||||
Burst completed: 1000 events in 161.032171ms
|
||||
Burst completed: 1000 events in 182.128996ms
|
||||
Burst completed: 1000 events in 161.86147ms
|
||||
Burst test completed: 10000 events in 6.628942674s
|
||||
Events/sec: 1508.54
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 36.466065909s
|
||||
Combined ops/sec: 274.23
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 627.589155ms
|
||||
Events/sec: 15933.99
|
||||
Avg latency: 489.881µs
|
||||
P90 latency: 628.857µs
|
||||
P95 latency: 679.363µs
|
||||
P99 latency: 828.307µs
|
||||
Bottom 10% Avg latency: 716.862µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 150.262543ms
|
||||
Burst completed: 1000 events in 148.027109ms
|
||||
Burst completed: 1000 events in 139.184066ms
|
||||
Burst completed: 1000 events in 147.196277ms
|
||||
Burst completed: 1000 events in 141.143557ms
|
||||
Burst completed: 1000 events in 138.727197ms
|
||||
Burst completed: 1000 events in 143.014207ms
|
||||
Burst completed: 1000 events in 143.355055ms
|
||||
Burst completed: 1000 events in 162.573956ms
|
||||
Burst completed: 1000 events in 142.875393ms
|
||||
Burst test completed: 10000 events in 6.475822519s
|
||||
Events/sec: 1544.21
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4742 reads in 1m0.036644794s
|
||||
Combined ops/sec: 162.27
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 577.614152ms
|
||||
Total Events: 10000
|
||||
Events/sec: 17312.60
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 152 MB
|
||||
Avg Latency: 448.778µs
|
||||
P90 Latency: 584.783µs
|
||||
P95 Latency: 633.085µs
|
||||
P99 Latency: 749.537µs
|
||||
Bottom 10% Avg Latency: 664.268µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.628942674s
|
||||
Total Events: 10000
|
||||
Events/sec: 1508.54
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 204 MB
|
||||
Avg Latency: 196.38µs
|
||||
P90 Latency: 260.706µs
|
||||
P95 Latency: 293.579µs
|
||||
P99 Latency: 385.694µs
|
||||
Bottom 10% Avg Latency: 317.532µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 36.466065909s
|
||||
Total Events: 10000
|
||||
Events/sec: 274.23
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 128 MB
|
||||
Avg Latency: 9.448363ms
|
||||
P90 Latency: 20.988228ms
|
||||
P95 Latency: 22.727378ms
|
||||
P99 Latency: 25.094784ms
|
||||
Bottom 10% Avg Latency: 23.01277ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 627.589155ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15933.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 124 MB
|
||||
Avg Latency: 489.881µs
|
||||
P90 Latency: 628.857µs
|
||||
P95 Latency: 679.363µs
|
||||
P99 Latency: 828.307µs
|
||||
Bottom 10% Avg Latency: 716.862µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.475822519s
|
||||
Total Events: 10000
|
||||
Events/sec: 1544.21
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 170 MB
|
||||
Avg Latency: 215.418µs
|
||||
P90 Latency: 287.237µs
|
||||
P95 Latency: 339.025µs
|
||||
P99 Latency: 510.682µs
|
||||
Bottom 10% Avg Latency: 378.172µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.036644794s
|
||||
Total Events: 9742
|
||||
Events/sec: 162.27
|
||||
Success Rate: 97.4%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 181 MB
|
||||
Avg Latency: 19.714686ms
|
||||
P90 Latency: 44.573506ms
|
||||
P95 Latency: 46.895555ms
|
||||
P99 Latency: 50.425027ms
|
||||
Bottom 10% Avg Latency: 47.384489ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
|
||||
20250912223503335481 INF /tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912223504473151 INF /tmp/benchmark_khatru-badger_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912223504475627 INF /tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-badger
|
||||
RELAY_URL: ws://khatru-badger:3334
|
||||
TEST_TIMESTAMP: 2025-09-12T22:35:04+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912222936300616 INF /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912222936301606 INF /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912222936301647 INF /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912222936301987 INF (*types.Uint32)(0xc0001c23f0)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912222936302060 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 567.02963ms
|
||||
Events/sec: 17635.76
|
||||
Avg latency: 437.941µs
|
||||
P90 latency: 574.133µs
|
||||
P95 latency: 621.964µs
|
||||
P99 latency: 768.473µs
|
||||
Bottom 10% Avg latency: 659.71µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 172.012448ms
|
||||
Burst completed: 1000 events in 145.502701ms
|
||||
Burst completed: 1000 events in 153.928098ms
|
||||
Burst completed: 1000 events in 169.995269ms
|
||||
Burst completed: 1000 events in 147.617375ms
|
||||
Burst completed: 1000 events in 157.211387ms
|
||||
Burst completed: 1000 events in 153.332744ms
|
||||
Burst completed: 1000 events in 172.374938ms
|
||||
Burst completed: 1000 events in 167.518935ms
|
||||
Burst completed: 1000 events in 155.211871ms
|
||||
Burst test completed: 10000 events in 6.620785215s
|
||||
Events/sec: 1510.39
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 35.700582016s
|
||||
Combined ops/sec: 280.11
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 605.726547ms
|
||||
Events/sec: 16509.10
|
||||
Avg latency: 470.577µs
|
||||
P90 latency: 609.791µs
|
||||
P95 latency: 660.256µs
|
||||
P99 latency: 788.641µs
|
||||
Bottom 10% Avg latency: 687.847µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 135.310723ms
|
||||
Burst completed: 1000 events in 166.604305ms
|
||||
Burst completed: 1000 events in 141.453184ms
|
||||
Burst completed: 1000 events in 146.579351ms
|
||||
Burst completed: 1000 events in 154.453638ms
|
||||
Burst completed: 1000 events in 156.212516ms
|
||||
Burst completed: 1000 events in 142.309354ms
|
||||
Burst completed: 1000 events in 152.268188ms
|
||||
Burst completed: 1000 events in 144.187829ms
|
||||
Burst completed: 1000 events in 147.609002ms
|
||||
Burst test completed: 10000 events in 6.508461808s
|
||||
Events/sec: 1536.46
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4662 reads in 1m0.040595326s
|
||||
Combined ops/sec: 160.92
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 567.02963ms
|
||||
Total Events: 10000
|
||||
Events/sec: 17635.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 437.941µs
|
||||
P90 Latency: 574.133µs
|
||||
P95 Latency: 621.964µs
|
||||
P99 Latency: 768.473µs
|
||||
Bottom 10% Avg Latency: 659.71µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.620785215s
|
||||
Total Events: 10000
|
||||
Events/sec: 1510.39
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 208 MB
|
||||
Avg Latency: 203.563µs
|
||||
P90 Latency: 274.152µs
|
||||
P95 Latency: 330.729µs
|
||||
P99 Latency: 521.483µs
|
||||
Bottom 10% Avg Latency: 378.237µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 35.700582016s
|
||||
Total Events: 10000
|
||||
Events/sec: 280.11
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 232 MB
|
||||
Avg Latency: 9.150925ms
|
||||
P90 Latency: 20.1434ms
|
||||
P95 Latency: 21.838576ms
|
||||
P99 Latency: 24.0106ms
|
||||
Bottom 10% Avg Latency: 22.04901ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 605.726547ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16509.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 139 MB
|
||||
Avg Latency: 470.577µs
|
||||
P90 Latency: 609.791µs
|
||||
P95 Latency: 660.256µs
|
||||
P99 Latency: 788.641µs
|
||||
Bottom 10% Avg Latency: 687.847µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.508461808s
|
||||
Total Events: 10000
|
||||
Events/sec: 1536.46
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 182 MB
|
||||
Avg Latency: 199.49µs
|
||||
P90 Latency: 261.427µs
|
||||
P95 Latency: 294.771µs
|
||||
P99 Latency: 406.814µs
|
||||
Bottom 10% Avg Latency: 332.083µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.040595326s
|
||||
Total Events: 9662
|
||||
Events/sec: 160.92
|
||||
Success Rate: 96.6%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 204 MB
|
||||
Avg Latency: 19.935937ms
|
||||
P90 Latency: 44.802034ms
|
||||
P95 Latency: 48.282589ms
|
||||
P99 Latency: 52.169026ms
|
||||
Bottom 10% Avg Latency: 48.641697ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
|
||||
20250912223216370778 INF /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912223217349356 INF /tmp/benchmark_khatru-sqlite_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912223217352393 INF /tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-09-12T22:32:17+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912222650025765 INF /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912222650026455 INF /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912222650026497 INF /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912222650026747 INF (*types.Uint32)(0xc0001f63cc)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912222650026778 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 553.803776ms
|
||||
Events/sec: 18056.94
|
||||
Avg latency: 428.869µs
|
||||
P90 latency: 558.663µs
|
||||
P95 latency: 607.997µs
|
||||
P99 latency: 749.787µs
|
||||
Bottom 10% Avg latency: 643.51µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 190.801687ms
|
||||
Burst completed: 1000 events in 168.170564ms
|
||||
Burst completed: 1000 events in 161.16591ms
|
||||
Burst completed: 1000 events in 161.43364ms
|
||||
Burst completed: 1000 events in 148.293941ms
|
||||
Burst completed: 1000 events in 172.875177ms
|
||||
Burst completed: 1000 events in 178.930553ms
|
||||
Burst completed: 1000 events in 161.052715ms
|
||||
Burst completed: 1000 events in 162.071335ms
|
||||
Burst completed: 1000 events in 171.849756ms
|
||||
Burst test completed: 10000 events in 6.70096222s
|
||||
Events/sec: 1492.32
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 35.645619485s
|
||||
Combined ops/sec: 280.54
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 596.985601ms
|
||||
Events/sec: 16750.82
|
||||
Avg latency: 465.438µs
|
||||
P90 latency: 594.151µs
|
||||
P95 latency: 636.592µs
|
||||
P99 latency: 757.953µs
|
||||
Bottom 10% Avg latency: 672.673µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 152.121077ms
|
||||
Burst completed: 1000 events in 160.774367ms
|
||||
Burst completed: 1000 events in 137.913676ms
|
||||
Burst completed: 1000 events in 142.916647ms
|
||||
Burst completed: 1000 events in 166.771131ms
|
||||
Burst completed: 1000 events in 160.016244ms
|
||||
Burst completed: 1000 events in 156.369302ms
|
||||
Burst completed: 1000 events in 158.850666ms
|
||||
Burst completed: 1000 events in 154.842287ms
|
||||
Burst completed: 1000 events in 146.828122ms
|
||||
Burst test completed: 10000 events in 6.557799732s
|
||||
Events/sec: 1524.90
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4782 reads in 1m0.043775785s
|
||||
Combined ops/sec: 162.91
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 553.803776ms
|
||||
Total Events: 10000
|
||||
Events/sec: 18056.94
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 428.869µs
|
||||
P90 Latency: 558.663µs
|
||||
P95 Latency: 607.997µs
|
||||
P99 Latency: 749.787µs
|
||||
Bottom 10% Avg Latency: 643.51µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.70096222s
|
||||
Total Events: 10000
|
||||
Events/sec: 1492.32
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 204 MB
|
||||
Avg Latency: 178.04µs
|
||||
P90 Latency: 224.367µs
|
||||
P95 Latency: 243.954µs
|
||||
P99 Latency: 318.225µs
|
||||
Bottom 10% Avg Latency: 264.418µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 35.645619485s
|
||||
Total Events: 10000
|
||||
Events/sec: 280.54
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 120 MB
|
||||
Avg Latency: 9.118653ms
|
||||
P90 Latency: 19.852346ms
|
||||
P95 Latency: 21.665387ms
|
||||
P99 Latency: 23.946919ms
|
||||
Bottom 10% Avg Latency: 21.867062ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 596.985601ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16750.82
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 121 MB
|
||||
Avg Latency: 465.438µs
|
||||
P90 Latency: 594.151µs
|
||||
P95 Latency: 636.592µs
|
||||
P99 Latency: 757.953µs
|
||||
Bottom 10% Avg Latency: 672.673µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.557799732s
|
||||
Total Events: 10000
|
||||
Events/sec: 1524.90
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 167 MB
|
||||
Avg Latency: 189.538µs
|
||||
P90 Latency: 247.511µs
|
||||
P95 Latency: 274.011µs
|
||||
P99 Latency: 360.977µs
|
||||
Bottom 10% Avg Latency: 296.967µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.043775785s
|
||||
Total Events: 9782
|
||||
Events/sec: 162.91
|
||||
Success Rate: 97.8%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 193 MB
|
||||
Avg Latency: 19.562536ms
|
||||
P90 Latency: 43.431835ms
|
||||
P95 Latency: 46.326204ms
|
||||
P99 Latency: 50.533302ms
|
||||
Bottom 10% Avg Latency: 46.979603ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly_8/benchmark_report.adoc
|
||||
20250912222930150767 INF /tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912222931147258 INF /tmp/benchmark_next-orly_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912222931149928 INF /tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: next-orly
|
||||
RELAY_URL: ws://next-orly:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T22:29:31+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_nostr-rs-relay_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912224044213613 INF /tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912224044214094 INF /tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912224044214130 INF /tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912224044214381 INF (*types.Uint32)(0xc000233c3c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912224044214413 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 613.036589ms
|
||||
Events/sec: 16312.24
|
||||
Avg latency: 476.418µs
|
||||
P90 latency: 627.852µs
|
||||
P95 latency: 686.836µs
|
||||
P99 latency: 841.471µs
|
||||
Bottom 10% Avg latency: 722.179µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 156.218882ms
|
||||
Burst completed: 1000 events in 170.25756ms
|
||||
Burst completed: 1000 events in 164.944293ms
|
||||
Burst completed: 1000 events in 162.767866ms
|
||||
Burst completed: 1000 events in 148.744622ms
|
||||
Burst completed: 1000 events in 163.556351ms
|
||||
Burst completed: 1000 events in 172.007512ms
|
||||
Burst completed: 1000 events in 159.806858ms
|
||||
Burst completed: 1000 events in 168.086258ms
|
||||
Burst completed: 1000 events in 164.931889ms
|
||||
Burst test completed: 10000 events in 6.657581804s
|
||||
Events/sec: 1502.05
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 34.850355805s
|
||||
Combined ops/sec: 286.94
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 707.652249ms
|
||||
Events/sec: 14131.23
|
||||
Avg latency: 551.706µs
|
||||
P90 latency: 724.937µs
|
||||
P95 latency: 790.563µs
|
||||
P99 latency: 980.677µs
|
||||
Bottom 10% Avg latency: 836.659µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 164.62419ms
|
||||
Burst completed: 1000 events in 155.938167ms
|
||||
Burst completed: 1000 events in 132.903056ms
|
||||
Burst completed: 1000 events in 142.377596ms
|
||||
Burst completed: 1000 events in 155.024184ms
|
||||
Burst completed: 1000 events in 147.095521ms
|
||||
Burst completed: 1000 events in 150.027389ms
|
||||
Burst completed: 1000 events in 152.873043ms
|
||||
Burst completed: 1000 events in 150.635479ms
|
||||
Burst completed: 1000 events in 146.45553ms
|
||||
Burst test completed: 10000 events in 6.519122877s
|
||||
Events/sec: 1533.95
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4806 reads in 1m0.03930731s
|
||||
Combined ops/sec: 163.33
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 613.036589ms
|
||||
Total Events: 10000
|
||||
Events/sec: 16312.24
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 476.418µs
|
||||
P90 Latency: 627.852µs
|
||||
P95 Latency: 686.836µs
|
||||
P99 Latency: 841.471µs
|
||||
Bottom 10% Avg Latency: 722.179µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.657581804s
|
||||
Total Events: 10000
|
||||
Events/sec: 1502.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 209 MB
|
||||
Avg Latency: 182.765µs
|
||||
P90 Latency: 234.409µs
|
||||
P95 Latency: 257.082µs
|
||||
P99 Latency: 330.764µs
|
||||
Bottom 10% Avg Latency: 277.843µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 34.850355805s
|
||||
Total Events: 10000
|
||||
Events/sec: 286.94
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 221 MB
|
||||
Avg Latency: 8.802188ms
|
||||
P90 Latency: 19.075904ms
|
||||
P95 Latency: 20.680962ms
|
||||
P99 Latency: 22.78326ms
|
||||
Bottom 10% Avg Latency: 20.897398ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 707.652249ms
|
||||
Total Events: 10000
|
||||
Events/sec: 14131.23
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 120 MB
|
||||
Avg Latency: 551.706µs
|
||||
P90 Latency: 724.937µs
|
||||
P95 Latency: 790.563µs
|
||||
P99 Latency: 980.677µs
|
||||
Bottom 10% Avg Latency: 836.659µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.519122877s
|
||||
Total Events: 10000
|
||||
Events/sec: 1533.95
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 168 MB
|
||||
Avg Latency: 204.873µs
|
||||
P90 Latency: 271.569µs
|
||||
P95 Latency: 329.28µs
|
||||
P99 Latency: 558.829µs
|
||||
Bottom 10% Avg Latency: 380.136µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.03930731s
|
||||
Total Events: 9806
|
||||
Events/sec: 163.33
|
||||
Success Rate: 98.1%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 164 MB
|
||||
Avg Latency: 19.506135ms
|
||||
P90 Latency: 43.206775ms
|
||||
P95 Latency: 45.944446ms
|
||||
P99 Latency: 49.910436ms
|
||||
Bottom 10% Avg Latency: 46.417943ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
|
||||
20250912224323628137 INF /tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912224324180883 INF /tmp/benchmark_nostr-rs-relay_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912224324184069 INF /tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: nostr-rs-relay
|
||||
RELAY_URL: ws://nostr-rs-relay:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T22:43:24+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_relayer-basic_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912223509638362 INF /tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912223509638864 INF /tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912223509638903 INF /tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912223509639558 INF (*types.Uint32)(0xc00570005c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912223509639620 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 659.848301ms
|
||||
Events/sec: 15155.00
|
||||
Avg latency: 513.243µs
|
||||
P90 latency: 706.89µs
|
||||
P95 latency: 792.685µs
|
||||
P99 latency: 1.089215ms
|
||||
Bottom 10% Avg latency: 864.746µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 142.551144ms
|
||||
Burst completed: 1000 events in 137.426595ms
|
||||
Burst completed: 1000 events in 139.51501ms
|
||||
Burst completed: 1000 events in 143.683041ms
|
||||
Burst completed: 1000 events in 136.500167ms
|
||||
Burst completed: 1000 events in 139.573844ms
|
||||
Burst completed: 1000 events in 145.873173ms
|
||||
Burst completed: 1000 events in 144.256594ms
|
||||
Burst completed: 1000 events in 157.89329ms
|
||||
Burst completed: 1000 events in 153.882313ms
|
||||
Burst test completed: 10000 events in 6.47066659s
|
||||
Events/sec: 1545.44
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 37.483034098s
|
||||
Combined ops/sec: 266.79
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 701.479526ms
|
||||
Events/sec: 14255.58
|
||||
Avg latency: 544.692µs
|
||||
P90 latency: 742.997µs
|
||||
P95 latency: 845.975µs
|
||||
P99 latency: 1.147624ms
|
||||
Bottom 10% Avg latency: 913.45µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 143.063212ms
|
||||
Burst completed: 1000 events in 139.422008ms
|
||||
Burst completed: 1000 events in 138.184516ms
|
||||
Burst completed: 1000 events in 148.207616ms
|
||||
Burst completed: 1000 events in 137.663883ms
|
||||
Burst completed: 1000 events in 141.607643ms
|
||||
Burst completed: 1000 events in 143.668551ms
|
||||
Burst completed: 1000 events in 140.467359ms
|
||||
Burst completed: 1000 events in 139.860509ms
|
||||
Burst completed: 1000 events in 138.328306ms
|
||||
Burst test completed: 10000 events in 6.43971118s
|
||||
Events/sec: 1552.86
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4870 reads in 1m0.034216467s
|
||||
Combined ops/sec: 164.41
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 659.848301ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15155.00
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 513.243µs
|
||||
P90 Latency: 706.89µs
|
||||
P95 Latency: 792.685µs
|
||||
P99 Latency: 1.089215ms
|
||||
Bottom 10% Avg Latency: 864.746µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.47066659s
|
||||
Total Events: 10000
|
||||
Events/sec: 1545.44
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 206 MB
|
||||
Avg Latency: 273.645µs
|
||||
P90 Latency: 407.483µs
|
||||
P95 Latency: 498.989µs
|
||||
P99 Latency: 772.406µs
|
||||
Bottom 10% Avg Latency: 574.801µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 37.483034098s
|
||||
Total Events: 10000
|
||||
Events/sec: 266.79
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 163 MB
|
||||
Avg Latency: 9.873363ms
|
||||
P90 Latency: 21.643466ms
|
||||
P95 Latency: 22.924497ms
|
||||
P99 Latency: 24.961324ms
|
||||
Bottom 10% Avg Latency: 23.201171ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 701.479526ms
|
||||
Total Events: 10000
|
||||
Events/sec: 14255.58
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 544.692µs
|
||||
P90 Latency: 742.997µs
|
||||
P95 Latency: 845.975µs
|
||||
P99 Latency: 1.147624ms
|
||||
Bottom 10% Avg Latency: 913.45µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.43971118s
|
||||
Total Events: 10000
|
||||
Events/sec: 1552.86
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 204 MB
|
||||
Avg Latency: 266.006µs
|
||||
P90 Latency: 402.683µs
|
||||
P95 Latency: 491.253µs
|
||||
P99 Latency: 715.735µs
|
||||
Bottom 10% Avg Latency: 553.762µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.034216467s
|
||||
Total Events: 9870
|
||||
Events/sec: 164.41
|
||||
Success Rate: 98.7%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 184 MB
|
||||
Avg Latency: 19.308183ms
|
||||
P90 Latency: 42.766459ms
|
||||
P95 Latency: 45.372157ms
|
||||
P99 Latency: 49.993951ms
|
||||
Bottom 10% Avg Latency: 46.189525ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
|
||||
20250912223751453794 INF /tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912223752488197 INF /tmp/benchmark_relayer-basic_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912223752491495 INF /tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: relayer-basic
|
||||
RELAY_URL: ws://relayer-basic:7447
|
||||
TEST_TIMESTAMP: 2025-09-12T22:37:52+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_strfry_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
20250912223757656112 INF /tmp/benchmark_strfry_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250912223757657685 INF /tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250912223757657767 INF /tmp/benchmark_strfry_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250912223757658314 INF (*types.Uint32)(0xc0055c63ac)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250912223757658385 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 655.950723ms
|
||||
Events/sec: 15245.05
|
||||
Avg latency: 510.383µs
|
||||
P90 latency: 690.815µs
|
||||
P95 latency: 769.085µs
|
||||
P99 latency: 1.000349ms
|
||||
Bottom 10% Avg latency: 831.211µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 168.844089ms
|
||||
Burst completed: 1000 events in 138.644286ms
|
||||
Burst completed: 1000 events in 167.717113ms
|
||||
Burst completed: 1000 events in 141.566337ms
|
||||
Burst completed: 1000 events in 141.186447ms
|
||||
Burst completed: 1000 events in 145.845582ms
|
||||
Burst completed: 1000 events in 142.834263ms
|
||||
Burst completed: 1000 events in 144.707595ms
|
||||
Burst completed: 1000 events in 144.096361ms
|
||||
Burst completed: 1000 events in 158.524931ms
|
||||
Burst test completed: 10000 events in 6.520630606s
|
||||
Events/sec: 1533.59
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 36.04854491s
|
||||
Combined ops/sec: 277.40
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 644.867085ms
|
||||
Events/sec: 15507.07
|
||||
Avg latency: 501.972µs
|
||||
P90 latency: 650.197µs
|
||||
P95 latency: 709.37µs
|
||||
P99 latency: 914.673µs
|
||||
Bottom 10% Avg latency: 754.969µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 133.763626ms
|
||||
Burst completed: 1000 events in 135.289448ms
|
||||
Burst completed: 1000 events in 136.874215ms
|
||||
Burst completed: 1000 events in 135.118277ms
|
||||
Burst completed: 1000 events in 139.247778ms
|
||||
Burst completed: 1000 events in 142.262475ms
|
||||
Burst completed: 1000 events in 141.21783ms
|
||||
Burst completed: 1000 events in 143.089554ms
|
||||
Burst completed: 1000 events in 148.027057ms
|
||||
Burst completed: 1000 events in 150.006497ms
|
||||
Burst test completed: 10000 events in 6.429121967s
|
||||
Events/sec: 1555.42
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4857 reads in 1m0.047885362s
|
||||
Combined ops/sec: 164.15
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 655.950723ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15245.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 510.383µs
|
||||
P90 Latency: 690.815µs
|
||||
P95 Latency: 769.085µs
|
||||
P99 Latency: 1.000349ms
|
||||
Bottom 10% Avg Latency: 831.211µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.520630606s
|
||||
Total Events: 10000
|
||||
Events/sec: 1533.59
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 208 MB
|
||||
Avg Latency: 223.359µs
|
||||
P90 Latency: 321.256µs
|
||||
P95 Latency: 378.145µs
|
||||
P99 Latency: 530.597µs
|
||||
Bottom 10% Avg Latency: 412.953µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 36.04854491s
|
||||
Total Events: 10000
|
||||
Events/sec: 277.40
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 222 MB
|
||||
Avg Latency: 9.309397ms
|
||||
P90 Latency: 20.403594ms
|
||||
P95 Latency: 22.152884ms
|
||||
P99 Latency: 24.513304ms
|
||||
Bottom 10% Avg Latency: 22.447709ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 644.867085ms
|
||||
Total Events: 10000
|
||||
Events/sec: 15507.07
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 125 MB
|
||||
Avg Latency: 501.972µs
|
||||
P90 Latency: 650.197µs
|
||||
P95 Latency: 709.37µs
|
||||
P99 Latency: 914.673µs
|
||||
Bottom 10% Avg Latency: 754.969µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 6.429121967s
|
||||
Total Events: 10000
|
||||
Events/sec: 1555.42
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 170 MB
|
||||
Avg Latency: 239.454µs
|
||||
P90 Latency: 335.133µs
|
||||
P95 Latency: 408.012µs
|
||||
P99 Latency: 593.458µs
|
||||
Bottom 10% Avg Latency: 446.804µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.047885362s
|
||||
Total Events: 9857
|
||||
Events/sec: 164.15
|
||||
Success Rate: 98.6%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 189 MB
|
||||
Avg Latency: 19.373297ms
|
||||
P90 Latency: 42.953055ms
|
||||
P95 Latency: 45.636867ms
|
||||
P99 Latency: 49.71977ms
|
||||
Bottom 10% Avg Latency: 46.144029ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
|
||||
20250912224038033173 INF /tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250912224039055498 INF /tmp/benchmark_strfry_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250912224039058214 INF /tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: strfry
|
||||
RELAY_URL: ws://strfry:8080
|
||||
TEST_TIMESTAMP: 2025-09-12T22:40:39+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
2
go.mod
2
go.mod
@@ -51,3 +51,5 @@ require (
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
replace lol.mleku.dev => ../github.com/mleku/lol
|
||||
166
main.go
166
main.go
@@ -4,7 +4,9 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
pp "net/http/pprof"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"time"
|
||||
@@ -19,6 +21,31 @@ import (
|
||||
"next.orly.dev/pkg/version"
|
||||
)
|
||||
|
||||
// openBrowser attempts to open the specified URL in the default browser.
|
||||
// It supports multiple platforms including Linux, macOS, and Windows.
|
||||
func openBrowser(url string) {
|
||||
var err error
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
err = exec.Command("xdg-open", url).Start()
|
||||
case "windows":
|
||||
err = exec.Command(
|
||||
"rundll32", "url.dll,FileProtocolHandler", url,
|
||||
).Start()
|
||||
case "darwin":
|
||||
err = exec.Command("open", url).Start()
|
||||
default:
|
||||
log.W.F("unsupported platform for opening browser: %s", runtime.GOOS)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.E.F("failed to open browser: %v", err)
|
||||
} else {
|
||||
log.I.F("opened browser to %s", url)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU() * 4)
|
||||
var err error
|
||||
@@ -26,10 +53,18 @@ func main() {
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
}
|
||||
log.I.F("starting %s %s", cfg.AppName, version.V)
|
||||
|
||||
// If OpenPprofWeb is true and profiling is enabled, we need to ensure HTTP profiling is also enabled
|
||||
if cfg.OpenPprofWeb && cfg.Pprof != "" && !cfg.PprofHTTP {
|
||||
log.I.F("enabling HTTP pprof server to support web viewer")
|
||||
cfg.PprofHTTP = true
|
||||
}
|
||||
switch cfg.Pprof {
|
||||
case "cpu":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(profile.CPUProfile, profile.ProfilePath(cfg.PprofPath))
|
||||
prof := profile.Start(
|
||||
profile.CPUProfile, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.CPUProfile)
|
||||
@@ -37,7 +72,10 @@ func main() {
|
||||
}
|
||||
case "memory":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(profile.MemProfile, profile.ProfilePath(cfg.PprofPath))
|
||||
prof := profile.Start(
|
||||
profile.MemProfile, profile.MemProfileRate(32),
|
||||
profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.MemProfile)
|
||||
@@ -45,12 +83,67 @@ func main() {
|
||||
}
|
||||
case "allocation":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(profile.MemProfileAllocs, profile.ProfilePath(cfg.PprofPath))
|
||||
prof := profile.Start(
|
||||
profile.MemProfileAllocs, profile.MemProfileRate(32),
|
||||
profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.MemProfileAllocs)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "heap":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.MemProfileHeap, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.MemProfileHeap)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "mutex":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.MutexProfile, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.MutexProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "threadcreate":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.ThreadcreationProfile,
|
||||
profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.ThreadcreationProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "goroutine":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.GoroutineProfile, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.GoroutineProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "block":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.BlockProfile, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.BlockProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var db *database.D
|
||||
@@ -65,10 +158,51 @@ func main() {
|
||||
}
|
||||
acl.Registry.Syncer()
|
||||
|
||||
// Start HTTP pprof server if enabled
|
||||
if cfg.PprofHTTP {
|
||||
pprofAddr := fmt.Sprintf("%s:%d", cfg.Listen, 6060)
|
||||
pprofMux := http.NewServeMux()
|
||||
pprofMux.HandleFunc("/debug/pprof/", pp.Index)
|
||||
pprofMux.HandleFunc("/debug/pprof/cmdline", pp.Cmdline)
|
||||
pprofMux.HandleFunc("/debug/pprof/profile", pp.Profile)
|
||||
pprofMux.HandleFunc("/debug/pprof/symbol", pp.Symbol)
|
||||
pprofMux.HandleFunc("/debug/pprof/trace", pp.Trace)
|
||||
for _, p := range []string{
|
||||
"allocs", "block", "goroutine", "heap", "mutex", "threadcreate",
|
||||
} {
|
||||
pprofMux.Handle("/debug/pprof/"+p, pp.Handler(p))
|
||||
}
|
||||
ppSrv := &http.Server{Addr: pprofAddr, Handler: pprofMux}
|
||||
go func() {
|
||||
log.I.F("pprof server listening on %s", pprofAddr)
|
||||
if err := ppSrv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.E.F("pprof server error: %v", err)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutdownCtx, cancelShutdown := context.WithTimeout(
|
||||
context.Background(), 2*time.Second,
|
||||
)
|
||||
defer cancelShutdown()
|
||||
_ = ppSrv.Shutdown(shutdownCtx)
|
||||
}()
|
||||
|
||||
// Open the pprof web viewer if enabled
|
||||
if cfg.OpenPprofWeb && cfg.Pprof != "" {
|
||||
pprofURL := fmt.Sprintf("http://localhost:6060/debug/pprof/")
|
||||
go func() {
|
||||
// Wait a moment for the server to start
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
openBrowser(pprofURL)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Start health check HTTP server if configured
|
||||
var healthSrv *http.Server
|
||||
if cfg.HealthPort > 0 {
|
||||
mux := http.NewServeMux()
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(
|
||||
"/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
@@ -78,15 +212,17 @@ func main() {
|
||||
)
|
||||
// Optional shutdown endpoint to gracefully stop the process so profiling defers run
|
||||
if cfg.EnableShutdown {
|
||||
mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("shutting down"))
|
||||
log.I.F("shutdown requested via /shutdown; sending SIGINT to self")
|
||||
go func() {
|
||||
p, _ := os.FindProcess(os.Getpid())
|
||||
_ = p.Signal(os.Interrupt)
|
||||
}()
|
||||
})
|
||||
mux.HandleFunc(
|
||||
"/shutdown", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("shutting down"))
|
||||
log.I.F("shutdown requested via /shutdown; sending SIGINT to self")
|
||||
go func() {
|
||||
p, _ := os.FindProcess(os.Getpid())
|
||||
_ = p.Signal(os.Interrupt)
|
||||
}()
|
||||
},
|
||||
)
|
||||
}
|
||||
healthSrv = &http.Server{
|
||||
Addr: fmt.Sprintf(
|
||||
@@ -118,12 +254,10 @@ func main() {
|
||||
fmt.Printf("\r")
|
||||
cancel()
|
||||
chk.E(db.Close())
|
||||
return
|
||||
case <-quit:
|
||||
cancel()
|
||||
chk.E(db.Close())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.I.F("exiting")
|
||||
}
|
||||
|
||||
87
pkg/database/fetch-events-by-serials.go
Normal file
87
pkg/database/fetch-events-by-serials.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
)
|
||||
|
||||
// FetchEventsBySerials processes multiple serials in ascending order and retrieves
|
||||
// the corresponding events from the database. It optimizes database access by
|
||||
// sorting the serials and seeking to each one sequentially.
|
||||
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (evMap map[string]*event.E, err error) {
|
||||
log.T.F("FetchEventsBySerials: processing %d serials", len(serials))
|
||||
|
||||
// Initialize the result map
|
||||
evMap = make(map[string]*event.E)
|
||||
|
||||
// Return early if no serials are provided
|
||||
if len(serials) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Sort serials in ascending order for more efficient database access
|
||||
sortedSerials := make([]*types.Uint40, len(serials))
|
||||
copy(sortedSerials, serials)
|
||||
sort.Slice(sortedSerials, func(i, j int) bool {
|
||||
return sortedSerials[i].Get() < sortedSerials[j].Get()
|
||||
})
|
||||
|
||||
// Process all serials in a single transaction
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
// Create an iterator with default options
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
// Process each serial sequentially
|
||||
for _, ser := range sortedSerials {
|
||||
// Create the key for this serial
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
key := buf.Bytes()
|
||||
|
||||
// Seek to this key in the database
|
||||
it.Seek(key)
|
||||
if it.Valid() {
|
||||
item := it.Item()
|
||||
|
||||
// Verify the key matches exactly (should always be true after a Seek)
|
||||
if !bytes.Equal(item.Key(), key) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the item value
|
||||
var v []byte
|
||||
if v, err = item.ValueCopy(nil); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Unmarshal the event
|
||||
ev := new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Store the event in the result map using the serial value as string key
|
||||
evMap[strconv.FormatUint(ser.Get(), 10)] = ev
|
||||
}
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
log.T.F("FetchEventsBySerials: found %d events out of %d requested serials", len(evMap), len(serials))
|
||||
return
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
@@ -64,6 +65,99 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// GetSerialsByIds takes a tag.T containing multiple IDs and returns a map of IDs to their
|
||||
// corresponding serial numbers. It directly queries the IdPrefix index for matching IDs,
|
||||
// which is more efficient than using GetIndexesFromFilter.
|
||||
func (d *D) GetSerialsByIds(ids *tag.T) (
|
||||
serials map[string]*types.Uint40, err error,
|
||||
) {
|
||||
return d.GetSerialsByIdsWithFilter(ids, nil)
|
||||
}
|
||||
|
||||
// GetSerialsByIdsWithFilter takes a tag.T containing multiple IDs and returns a
|
||||
// map of IDs to their corresponding serial numbers, applying a filter function
|
||||
// to each event. The function directly creates ID index prefixes for efficient querying.
|
||||
func (d *D) GetSerialsByIdsWithFilter(
|
||||
ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool,
|
||||
) (serials map[string]*types.Uint40, err error) {
|
||||
log.T.F("GetSerialsByIdsWithFilter: input ids count=%d", ids.Len())
|
||||
|
||||
// Initialize the result map
|
||||
serials = make(map[string]*types.Uint40)
|
||||
|
||||
// Return early if no IDs are provided
|
||||
if ids.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Process all IDs in a single transaction
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
// Process each ID sequentially
|
||||
for _, id := range ids.T {
|
||||
// idHex := hex.Enc(id)
|
||||
|
||||
// Get the index prefix for this ID
|
||||
var idxs []Range
|
||||
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
|
||||
// Skip this ID if we can't create its index
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if no index was created
|
||||
if len(idxs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Seek to the start of this ID's range in the database
|
||||
it.Seek(idxs[0].Start)
|
||||
if it.ValidForPrefix(idxs[0].Start) {
|
||||
// Found an entry for this ID
|
||||
item := it.Item()
|
||||
key := item.Key()
|
||||
|
||||
// Extract the serial number from the key
|
||||
ser := new(types.Uint40)
|
||||
buf := bytes.NewBuffer(key[len(key)-5:])
|
||||
if err = ser.UnmarshalRead(buf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If a filter function is provided, fetch the event and apply the filter
|
||||
if fn != nil {
|
||||
var ev *event.E
|
||||
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
||||
// Skip this event if we can't fetch it
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply the filter
|
||||
if !fn(ev, ser) {
|
||||
// Skip this event if it doesn't pass the filter
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Store the serial in the result map using the hex-encoded ID as the key
|
||||
serials[string(id)] = ser
|
||||
}
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
log.T.F(
|
||||
"GetSerialsByIdsWithFilter: found %d serials out of %d requested ids",
|
||||
len(serials), ids.Len(),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// func (d *D) GetSerialBytesById(id []byte) (ser []byte, err error) {
|
||||
// var idxs []Range
|
||||
// if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.New(id)}); chk.E(err) {
|
||||
|
||||
@@ -48,9 +48,11 @@ func TestGetSerialById(t *testing.T) {
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
ev.Free()
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ev.Free()
|
||||
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
|
||||
@@ -55,8 +55,10 @@ func TestGetSerialsByRange(t *testing.T) {
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
ev.Free()
|
||||
t.Fatal(err)
|
||||
}
|
||||
ev.Free()
|
||||
|
||||
events = append(events, ev)
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
@@ -43,73 +42,49 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
||||
var expDeletes types.Uint40s
|
||||
var expEvs event.S
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
// for _, id := range f.Ids.T {
|
||||
// log.T.F("QueryEvents: looking for ID=%s", hex.Enc(id))
|
||||
// }
|
||||
// log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
|
||||
for _, idx := range f.Ids.T {
|
||||
// log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx))
|
||||
// we know there is only Ids in this, so run the ID query and fetch.
|
||||
var ser *types.Uint40
|
||||
var idErr error
|
||||
if ser, idErr = d.GetSerialById(idx); idErr != nil {
|
||||
// Check if this is a "not found" error which is expected for IDs we don't have
|
||||
if strings.Contains(idErr.Error(), "id not found in database") {
|
||||
// log.T.F(
|
||||
// "QueryEvents: ID not found in database: %s",
|
||||
// hex.Enc(idx),
|
||||
// )
|
||||
} else {
|
||||
// Log unexpected errors but continue processing other IDs
|
||||
// log.E.F(
|
||||
// "QueryEvents: error looking up id=%s err=%v",
|
||||
// hex.Enc(idx), idErr,
|
||||
// )
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Check if the serial is nil, which indicates the ID wasn't found
|
||||
if ser == nil {
|
||||
// log.T.F("QueryEvents: Serial is nil for ID: %s", hex.Enc(idx))
|
||||
continue
|
||||
}
|
||||
// fetch the events
|
||||
// Get all serials for the requested IDs in a single batch operation
|
||||
log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
|
||||
|
||||
// Use GetSerialsByIds to batch process all IDs at once
|
||||
serials, idErr := d.GetSerialsByIds(f.Ids)
|
||||
if idErr != nil {
|
||||
log.E.F("QueryEvents: error looking up ids: %v", idErr)
|
||||
// Continue with whatever IDs we found
|
||||
}
|
||||
|
||||
// Process each found serial, fetch the event, and apply filters
|
||||
for idHex, ser := range serials {
|
||||
// fetch the event
|
||||
var ev *event.E
|
||||
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
||||
// log.T.F(
|
||||
// "QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
|
||||
// hex.Enc(idx), ser, err,
|
||||
// )
|
||||
log.T.F(
|
||||
"QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
|
||||
idHex, ser, err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
// log.T.F(
|
||||
// "QueryEvents: found id=%s kind=%d created_at=%d",
|
||||
// hex.Enc(ev.ID), ev.Kind, ev.CreatedAt,
|
||||
// )
|
||||
|
||||
// check for an expiration tag and delete after returning the result
|
||||
if CheckExpiration(ev) {
|
||||
log.T.F(
|
||||
"QueryEvents: id=%s filtered out due to expiration",
|
||||
hex.Enc(ev.ID),
|
||||
"QueryEvents: id=%s filtered out due to expiration", idHex,
|
||||
)
|
||||
expDeletes = append(expDeletes, ser)
|
||||
expEvs = append(expEvs, ev)
|
||||
continue
|
||||
}
|
||||
|
||||
// skip events that have been deleted by a proper deletion event
|
||||
if derr := d.CheckForDeleted(ev, nil); derr != nil {
|
||||
// log.T.F(
|
||||
// "QueryEvents: id=%s filtered out due to deletion: %v",
|
||||
// hex.Enc(ev.ID), derr,
|
||||
// )
|
||||
// log.T.F("QueryEvents: id=%s filtered out due to deletion: %v", idHex, derr)
|
||||
continue
|
||||
}
|
||||
// log.T.F(
|
||||
// "QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results",
|
||||
// hex.Enc(ev.ID),
|
||||
// )
|
||||
|
||||
// Add the event to the results
|
||||
evs = append(evs, ev)
|
||||
// log.T.F("QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results", idHex)
|
||||
}
|
||||
|
||||
// sort the events by timestamp
|
||||
sort.Slice(
|
||||
evs, func(i, j int) bool {
|
||||
|
||||
@@ -56,8 +56,10 @@ func setupTestDB(t *testing.T) (
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
ev.Free()
|
||||
t.Fatal(err)
|
||||
}
|
||||
ev.Free()
|
||||
|
||||
events = append(events, ev)
|
||||
|
||||
|
||||
@@ -17,11 +17,12 @@ func (d *D) QueryForSerials(c context.Context, f *filter.F) (
|
||||
var founds []*types.Uint40
|
||||
var idPkTs []*store.IdPkTs
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
for _, id := range f.Ids.T {
|
||||
var ser *types.Uint40
|
||||
if ser, err = d.GetSerialById(id); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Use batch lookup to minimize transactions when resolving IDs to serials
|
||||
var serialMap map[string]*types.Uint40
|
||||
if serialMap, err = d.GetSerialsByIds(f.Ids); chk.E(err) {
|
||||
return
|
||||
}
|
||||
for _, ser := range serialMap {
|
||||
founds = append(founds, ser)
|
||||
}
|
||||
var tmp []*store.IdPkTs
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/text"
|
||||
@@ -161,6 +162,7 @@ func (en *Result) Unmarshal(b []byte) (r []byte, err error) {
|
||||
return
|
||||
}
|
||||
en.Event = event.New()
|
||||
log.I.F("unmarshal: '%s'", b)
|
||||
if r, err = en.Event.Unmarshal(r); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -173,6 +173,7 @@ func (ev *E) Serialize() (b []byte) {
|
||||
//
|
||||
// Call ev.Free() to return the provided buffer to the bufpool afterwards.
|
||||
func (ev *E) Unmarshal(b []byte) (rem []byte, err error) {
|
||||
log.I.F("Unmarshal\n%s\n", string(b))
|
||||
key := make([]byte, 0, 9)
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
// Skip whitespace
|
||||
@@ -344,8 +345,8 @@ BetweenKV:
|
||||
goto InKey
|
||||
}
|
||||
}
|
||||
log.I.F("between kv")
|
||||
goto eof
|
||||
// If we reach here, the buffer ended unexpectedly. Treat as end-of-object
|
||||
goto AfterClose
|
||||
AfterClose:
|
||||
rem = b
|
||||
return
|
||||
@@ -364,6 +365,7 @@ eof:
|
||||
//
|
||||
// Call ev.Free() to return the provided buffer to the bufpool afterwards.
|
||||
func (ev *E) UnmarshalJSON(b []byte) (err error) {
|
||||
log.I.F("UnmarshalJSON: '%s'", b)
|
||||
_, err = ev.Unmarshal(b)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -140,6 +140,12 @@ func (r *Client) Context() context.Context { return r.connectionContext }
|
||||
// IsConnected returns true if the connection to this relay seems to be active.
|
||||
func (r *Client) IsConnected() bool { return r.connectionContext.Err() == nil }
|
||||
|
||||
// ConnectionCause returns the cancel cause for the relay connection context.
|
||||
func (r *Client) ConnectionCause() error { return context.Cause(r.connectionContext) }
|
||||
|
||||
// LastError returns the last connection error observed by the reader loop.
|
||||
func (r *Client) LastError() error { return r.ConnectionError }
|
||||
|
||||
// Connect tries to establish a websocket connection to r.URL.
|
||||
// If the context expires before the connection is complete, an error is returned.
|
||||
// Once successfully connected, context expiration has no effect: call r.Close
|
||||
@@ -218,6 +224,11 @@ func (r *Client) ConnectWithTLS(
|
||||
for {
|
||||
select {
|
||||
case <-r.connectionContext.Done():
|
||||
log.T.F(
|
||||
"WS.Client: connection context done for %s: cause=%v lastErr=%v",
|
||||
r.URL, context.Cause(r.connectionContext),
|
||||
r.ConnectionError,
|
||||
)
|
||||
ticker.Stop()
|
||||
r.Connection = nil
|
||||
|
||||
@@ -241,13 +252,17 @@ func (r *Client) ConnectWithTLS(
|
||||
"{%s} error writing ping: %v; closing websocket", r.URL,
|
||||
err,
|
||||
)
|
||||
r.Close() // this should trigger a context cancelation
|
||||
r.CloseWithReason(
|
||||
fmt.Errorf(
|
||||
"ping failed: %w", err,
|
||||
),
|
||||
) // this should trigger a context cancelation
|
||||
return
|
||||
}
|
||||
|
||||
case wr := <-r.writeQueue:
|
||||
// all write requests will go through this to prevent races
|
||||
log.D.F("{%s} sending %v\n", r.URL, string(wr.msg))
|
||||
// log.D.F("{%s} sending %v\n", r.URL, string(wr.msg))
|
||||
if err = r.Connection.WriteMessage(
|
||||
r.connectionContext, wr.msg,
|
||||
); err != nil {
|
||||
@@ -269,7 +284,11 @@ func (r *Client) ConnectWithTLS(
|
||||
r.connectionContext, buf,
|
||||
); err != nil {
|
||||
r.ConnectionError = err
|
||||
r.Close()
|
||||
log.T.F(
|
||||
"WS.Client: reader loop error on %s: %v; closing connection",
|
||||
r.URL, err,
|
||||
)
|
||||
r.CloseWithReason(fmt.Errorf("reader loop error: %w", err))
|
||||
return
|
||||
}
|
||||
message := buf.Bytes()
|
||||
@@ -358,11 +377,11 @@ func (r *Client) ConnectWithTLS(
|
||||
if okCallback, exist := r.okCallbacks.Load(string(env.EventID)); exist {
|
||||
okCallback(env.OK, env.ReasonString())
|
||||
} else {
|
||||
log.I.F(
|
||||
"{%s} got an unexpected OK message for event %0x",
|
||||
r.URL,
|
||||
env.EventID,
|
||||
)
|
||||
// log.I.F(
|
||||
// "{%s} got an unexpected OK message for event %0x",
|
||||
// r.URL,
|
||||
// env.EventID,
|
||||
// )
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -479,14 +498,27 @@ func (r *Client) Subscribe(
|
||||
sub := r.PrepareSubscription(ctx, ff, opts...)
|
||||
|
||||
if r.Connection == nil {
|
||||
log.T.F(
|
||||
"WS.Subscribe: not connected to %s; aborting sub id=%s", r.URL,
|
||||
sub.GetID(),
|
||||
)
|
||||
return nil, fmt.Errorf("not connected to %s", r.URL)
|
||||
}
|
||||
|
||||
log.T.F(
|
||||
"WS.Subscribe: firing subscription id=%s to %s with %d filters",
|
||||
sub.GetID(), r.URL, len(*ff),
|
||||
)
|
||||
if err := sub.Fire(); err != nil {
|
||||
log.T.F(
|
||||
"WS.Subscribe: Fire failed id=%s to %s: %v", sub.GetID(), r.URL,
|
||||
err,
|
||||
)
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't subscribe to %v at %s: %w", ff, r.URL, err,
|
||||
)
|
||||
}
|
||||
log.T.F("WS.Subscribe: Fire succeeded id=%s to %s", sub.GetID(), r.URL)
|
||||
|
||||
return sub, nil
|
||||
}
|
||||
@@ -598,9 +630,10 @@ func (r *Client) QuerySync(
|
||||
}
|
||||
|
||||
// Close closes the relay connection.
|
||||
func (r *Client) Close() error {
|
||||
return r.close(errors.New("Close() called"))
|
||||
}
|
||||
func (r *Client) Close() error { return r.CloseWithReason(errors.New("Close() called")) }
|
||||
|
||||
// CloseWithReason closes the relay connection with a specific reason that will be stored as the context cancel cause.
|
||||
func (r *Client) CloseWithReason(reason error) error { return r.close(reason) }
|
||||
|
||||
func (r *Client) close(reason error) error {
|
||||
r.closeMutex.Lock()
|
||||
@@ -609,6 +642,10 @@ func (r *Client) close(reason error) error {
|
||||
if r.connectionContextCancel == nil {
|
||||
return fmt.Errorf("relay already closed")
|
||||
}
|
||||
log.T.F(
|
||||
"WS.Client: closing connection to %s: reason=%v lastErr=%v", r.URL,
|
||||
reason, r.ConnectionError,
|
||||
)
|
||||
r.connectionContextCancel(reason)
|
||||
r.connectionContextCancel = nil
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
@@ -88,8 +89,14 @@ var (
|
||||
)
|
||||
|
||||
func (sub *Subscription) start() {
|
||||
// Wait for the context to be done instead of blocking immediately
|
||||
// This allows the subscription to receive events before terminating
|
||||
sub.live.Store(true)
|
||||
// debug: log start of subscription goroutine
|
||||
log.T.F("WS.Subscription.start: started id=%s", sub.GetID())
|
||||
<-sub.Context.Done()
|
||||
// the subscription ends once the context is canceled (if not already)
|
||||
log.T.F("WS.Subscription.start: context done for id=%s", sub.GetID())
|
||||
sub.unsub(errors.New("context done on start()")) // this will set sub.live to false
|
||||
// do this so we don't have the possibility of closing the Events channel and then trying to send to it
|
||||
sub.mu.Lock()
|
||||
@@ -180,10 +187,18 @@ func (sub *Subscription) Fire() (err error) {
|
||||
var reqb []byte
|
||||
reqb = reqenvelope.NewFrom(sub.id, sub.Filters).Marshal(nil)
|
||||
sub.live.Store(true)
|
||||
log.T.F(
|
||||
"WS.Subscription.Fire: sending REQ id=%s filters=%d bytes=%d",
|
||||
sub.GetID(), len(*sub.Filters), len(reqb),
|
||||
)
|
||||
if err = <-sub.Client.Write(reqb); err != nil {
|
||||
err = fmt.Errorf("failed to write: %w", err)
|
||||
log.T.F(
|
||||
"WS.Subscription.Fire: write failed id=%s: %v", sub.GetID(), err,
|
||||
)
|
||||
sub.cancel(err)
|
||||
return
|
||||
}
|
||||
log.T.F("WS.Subscription.Fire: write ok id=%s", sub.GetID())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
OUT_DIR="/work/reports"
|
||||
BIN="/build/relay"
|
||||
CPU_DIR="/profiles/cpu"
|
||||
MEM_DIR="/profiles/mem"
|
||||
ALLOC_DIR="/profiles/alloc"
|
||||
REPORT="$OUT_DIR/profile-analysis.txt"
|
||||
|
||||
mkdir -p "$OUT_DIR"
|
||||
|
||||
# Helper: wait for any file matching a glob in a directory to exist and be non-empty
|
||||
wait_for_glob() {
|
||||
local dir="$1"; local pattern="$2"; local timeout=${3:-180}; local waited=0
|
||||
echo "[analyze] Waiting for profiles in ${dir}/${pattern} (timeout ${timeout}s)..." >&2
|
||||
while [[ $waited -lt $timeout ]]; do
|
||||
# shellcheck disable=SC2086
|
||||
local f
|
||||
f=$(ls -1 ${dir}/${pattern} 2>/dev/null | head -n1 || true)
|
||||
if [[ -n "${f}" && -s "${f}" ]]; then
|
||||
echo "[analyze] Found: ${f}" >&2
|
||||
echo -n "${f}"
|
||||
return 0
|
||||
fi
|
||||
sleep 3; waited=$((waited+3))
|
||||
done
|
||||
echo "" # return empty string
|
||||
return 1
|
||||
}
|
||||
|
||||
CPU_FILE=$(wait_for_glob "$CPU_DIR" "cpu*.pprof" 180 || true)
|
||||
MEM_FILE=$(wait_for_glob "$MEM_DIR" "*.pprof" 180 || true)
|
||||
ALLOC_FILE=$(wait_for_glob "$ALLOC_DIR" "*.pprof" 180 || true)
|
||||
|
||||
if [[ -z "$CPU_FILE" ]]; then echo "[analyze] WARNING: CPU profile not found at $CPU_DIR" >&2; fi
|
||||
if [[ -z "$MEM_FILE" ]]; then echo "[analyze] WARNING: Mem profile not found at $MEM_DIR" >&2; fi
|
||||
if [[ -z "$ALLOC_FILE" ]]; then echo "[analyze] WARNING: Alloc profile not found at $ALLOC_DIR" >&2; fi
|
||||
|
||||
{
|
||||
echo "==== next.orly.dev Profiling Analysis ===="
|
||||
date
|
||||
echo
|
||||
|
||||
if [[ -n "$CPU_FILE" && -s "$CPU_FILE" ]]; then
|
||||
echo "-- CPU Functions (full list by flat CPU) --"
|
||||
go tool pprof -top -nodecount=0 "$BIN" "$CPU_FILE" 2>/dev/null | sed '1,2d'
|
||||
echo
|
||||
else
|
||||
echo "CPU profile: not available"
|
||||
echo
|
||||
fi
|
||||
|
||||
if [[ -n "$MEM_FILE" && -s "$MEM_FILE" ]]; then
|
||||
echo "-- Memory (In-Use Space) Functions (full list) --"
|
||||
go tool pprof -top -sample_index=inuse_space -nodecount=0 "$BIN" "$MEM_FILE" 2>/dev/null | sed '1,2d'
|
||||
echo
|
||||
else
|
||||
echo "Memory (in-use) profile: not available"
|
||||
echo
|
||||
fi
|
||||
|
||||
if [[ -n "$ALLOC_FILE" && -s "$ALLOC_FILE" ]]; then
|
||||
echo "-- Allocations (Total Alloc Space) Functions (full list) --"
|
||||
go tool pprof -top -sample_index=alloc_space -nodecount=0 "$BIN" "$ALLOC_FILE" 2>/dev/null | sed '1,2d'
|
||||
echo
|
||||
echo "-- Allocation Frequency (Alloc Objects) Functions (full list) --"
|
||||
go tool pprof -top -sample_index=alloc_objects -nodecount=0 "$BIN" "$ALLOC_FILE" 2>/dev/null | sed '1,2d'
|
||||
echo
|
||||
else
|
||||
echo "Allocation profile: not available"
|
||||
echo
|
||||
fi
|
||||
|
||||
echo "Notes:"
|
||||
echo "- CPU section identifies functions using the most CPU time."
|
||||
echo "- Memory section identifies which functions retain the most memory (in-use)."
|
||||
echo "- Allocations sections identify functions responsible for the most allocation volume and count, which correlates with GC pressure."
|
||||
echo "- Profiles are created by github.com/pkg/profile and may only be flushed when the relay process receives a shutdown; CPU profile often requires process exit."
|
||||
} > "$REPORT"
|
||||
|
||||
echo "[analyze] Wrote report to $REPORT" >&2
|
||||
@@ -1,158 +0,0 @@
|
||||
services:
|
||||
# Build image for next.orly.dev relay (same as benchmark build)
|
||||
next-orly-image:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
image: next-orly:profiler
|
||||
|
||||
# Run relay with CPU profiling
|
||||
next-orly-cpu:
|
||||
image: next-orly:profiler
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: profiler-next-orly-cpu
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=8080
|
||||
- ORLY_LOG_LEVEL=info
|
||||
- ORLY_PPROF=cpu
|
||||
- ORLY_PPROF_PATH=/profiles
|
||||
- ORLY_HEALTH_PORT=18080
|
||||
- ORLY_ENABLE_SHUTDOWN=true
|
||||
volumes:
|
||||
- ./data/cpu:/data
|
||||
- ./profiles/cpu:/profiles
|
||||
ports:
|
||||
- "8101:8080"
|
||||
- "18081:18080"
|
||||
networks:
|
||||
- profiler-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
|
||||
interval: 15s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
|
||||
# Run relay with in-use memory profiling
|
||||
next-orly-mem:
|
||||
image: next-orly:profiler
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: profiler-next-orly-mem
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=8080
|
||||
- ORLY_LOG_LEVEL=info
|
||||
- ORLY_PPROF=memory
|
||||
- ORLY_PPROF_PATH=/profiles
|
||||
- ORLY_HEALTH_PORT=18080
|
||||
- ORLY_ENABLE_SHUTDOWN=true
|
||||
volumes:
|
||||
- ./data/mem:/data
|
||||
- ./profiles/mem:/profiles
|
||||
ports:
|
||||
- "8102:8080"
|
||||
- "18082:18080"
|
||||
networks:
|
||||
- profiler-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
|
||||
interval: 15s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
|
||||
# Run relay with allocation profiling (alloc_space/alloc_objects)
|
||||
next-orly-alloc:
|
||||
image: next-orly:profiler
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: profiler-next-orly-alloc
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=8080
|
||||
- ORLY_LOG_LEVEL=info
|
||||
- ORLY_PPROF=allocation
|
||||
- ORLY_PPROF_PATH=/profiles
|
||||
- ORLY_HEALTH_PORT=18080
|
||||
- ORLY_ENABLE_SHUTDOWN=true
|
||||
volumes:
|
||||
- ./data/alloc:/data
|
||||
- ./profiles/alloc:/profiles
|
||||
ports:
|
||||
- "8103:8080"
|
||||
- "18083:18080"
|
||||
networks:
|
||||
- profiler-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
|
||||
interval: 15s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
|
||||
# Load generator using existing benchmark runner
|
||||
loadgen:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: cmd/benchmark/Dockerfile.benchmark
|
||||
container_name: profiler-loadgen
|
||||
depends_on:
|
||||
next-orly-cpu:
|
||||
condition: service_healthy
|
||||
next-orly-mem:
|
||||
condition: service_healthy
|
||||
next-orly-alloc:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- BENCHMARK_TARGETS=next-orly-cpu:8080,next-orly-mem:8080,next-orly-alloc:8080
|
||||
- BENCHMARK_EVENTS=5000
|
||||
- BENCHMARK_WORKERS=4
|
||||
- BENCHMARK_DURATION=30s
|
||||
volumes:
|
||||
- ./reports:/reports
|
||||
networks:
|
||||
- profiler-net
|
||||
command: >
|
||||
sh -c "
|
||||
echo 'Waiting a bit before starting load...' && \
|
||||
sleep 10 && \
|
||||
/app/benchmark-runner --output-dir=/reports && \
|
||||
echo 'Triggering relay shutdowns to flush profiles...' && \
|
||||
curl -sS -X POST http://next-orly-cpu:18080/shutdown || true && \
|
||||
curl -sS -X POST http://next-orly-mem:18080/shutdown || true && \
|
||||
curl -sS -X POST http://next-orly-alloc:18080/shutdown || true && \
|
||||
echo 'Load generation complete. Waiting for relays to exit...' && \
|
||||
sleep 5
|
||||
"
|
||||
|
||||
# Analyzer service: use the builder stage so Go toolchain is available
|
||||
analyzer:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
target: builder
|
||||
container_name: profiler-analyzer
|
||||
depends_on:
|
||||
loadgen:
|
||||
condition: service_completed_successfully
|
||||
working_dir: /work
|
||||
volumes:
|
||||
- ./profiles:/profiles:ro
|
||||
- ./reports:/work/reports
|
||||
- ./analyze.sh:/work/analyze.sh:ro
|
||||
networks:
|
||||
- profiler-net
|
||||
command: ["bash", "-lc", "bash /work/analyze.sh"]
|
||||
|
||||
networks:
|
||||
profiler-net:
|
||||
driver: bridge
|
||||
@@ -1,191 +0,0 @@
|
||||
==== next.orly.dev Profiling Analysis ====
|
||||
Sat Sep 13 15:15:09 UTC 2025
|
||||
|
||||
-- CPU Functions (full list by flat CPU) --
|
||||
Type: cpu
|
||||
Time: Sep 13, 2025 at 3:08pm (UTC)
|
||||
Duration: 401.93s, Total samples = 2.67s ( 0.66%)
|
||||
Showing nodes accounting for 2.48s, 92.88% of 2.67s total
|
||||
Dropped 51 nodes (cum <= 0.01s)
|
||||
flat flat% sum% cum cum%
|
||||
0.78s 29.21% 29.21% 0.78s 29.21% internal/runtime/syscall.Syscall6
|
||||
0.27s 10.11% 39.33% 0.27s 10.11% runtime.futex
|
||||
0.12s 4.49% 43.82% 0.12s 4.49% runtime.write1
|
||||
0.11s 4.12% 47.94% 0.34s 12.73% runtime.stealWork
|
||||
0.09s 3.37% 51.31% 0.09s 3.37% runtime.read
|
||||
0.08s 3.00% 54.31% 1.94s 72.66% runtime.findRunnable
|
||||
0.08s 3.00% 57.30% 0.08s 3.00% runtime.pMask.read (inline)
|
||||
0.05s 1.87% 59.18% 0.06s 2.25% runtime.sellock
|
||||
0.05s 1.87% 61.05% 0.05s 1.87% runtime.unlock2
|
||||
0.04s 1.50% 62.55% 0.43s 16.10% github.com/dgraph-io/badger/v4.(*levelsController).runCompactor
|
||||
0.04s 1.50% 64.04% 0.89s 33.33% runtime.netpoll
|
||||
0.04s 1.50% 65.54% 0.16s 5.99% runtime.selectgo
|
||||
0.03s 1.12% 66.67% 0.04s 1.50% github.com/dgraph-io/ristretto/v2/z.(*Closer).HasBeenClosed (inline)
|
||||
0.03s 1.12% 67.79% 0.03s 1.12% internal/runtime/atomic.(*Uint32).CompareAndSwap (inline)
|
||||
0.03s 1.12% 68.91% 0.04s 1.50% runtime.mallocgcSmallNoscan
|
||||
0.03s 1.12% 70.04% 0.03s 1.12% runtime.pMask.set (inline)
|
||||
0.03s 1.12% 71.16% 0.03s 1.12% runtime.releaseSudog
|
||||
0.03s 1.12% 72.28% 0.03s 1.12% sync/atomic.(*Int32).Add (inline)
|
||||
0.02s 0.75% 73.03% 0.02s 0.75% github.com/dgraph-io/badger/v4.(*compactStatus).delSize
|
||||
0.02s 0.75% 73.78% 0.08s 3.00% github.com/dgraph-io/badger/v4.(*levelsController).levelTargets
|
||||
0.02s 0.75% 74.53% 0.20s 7.49% github.com/dgraph-io/badger/v4.(*levelsController).pickCompactLevels
|
||||
0.02s 0.75% 75.28% 0.02s 0.75% internal/reflectlite.unpackEface (inline)
|
||||
0.02s 0.75% 76.03% 0.02s 0.75% internal/runtime/atomic.(*Uint32).Load (inline)
|
||||
0.02s 0.75% 76.78% 0.04s 1.50% runtime.(*mcache).prepareForSweep
|
||||
0.02s 0.75% 77.53% 0.02s 0.75% runtime.(*randomEnum).next (inline)
|
||||
0.02s 0.75% 78.28% 0.03s 1.12% runtime.casgstatus
|
||||
0.02s 0.75% 79.03% 0.03s 1.12% runtime.checkRunqsNoP
|
||||
0.02s 0.75% 79.78% 0.02s 0.75% runtime.execute
|
||||
0.02s 0.75% 80.52% 0.19s 7.12% runtime.futexsleep
|
||||
0.02s 0.75% 81.27% 0.02s 0.75% runtime.lock2
|
||||
0.02s 0.75% 82.02% 0.02s 0.75% runtime.nanotime1
|
||||
0.02s 0.75% 82.77% 0.02s 0.75% runtime.nextFreeFast (inline)
|
||||
0.02s 0.75% 83.52% 0.02s 0.75% runtime.pidleput
|
||||
0.02s 0.75% 84.27% 0.02s 0.75% runtime.runqget (inline)
|
||||
0.02s 0.75% 85.02% 2.10s 78.65% runtime.schedule
|
||||
0.02s 0.75% 85.77% 0.02s 0.75% runtime.traceEnabled (inline)
|
||||
0.02s 0.75% 86.52% 0.03s 1.12% time.Now
|
||||
0.02s 0.75% 87.27% 0.03s 1.12% time.Time.Add
|
||||
0.01s 0.37% 87.64% 0.04s 1.50% github.com/dgraph-io/badger/v4.(*levelHandler).getTotalSize
|
||||
0.01s 0.37% 88.01% 0.76s 28.46% internal/runtime/syscall.EpollWait
|
||||
0.01s 0.37% 88.39% 0.14s 5.24% runtime.(*timer).unlockAndRun
|
||||
0.01s 0.37% 88.76% 0.03s 1.12% runtime.(*waitq).dequeue (inline)
|
||||
0.01s 0.37% 89.14% 0.07s 2.62% runtime.acquirep
|
||||
0.01s 0.37% 89.51% 0.04s 1.50% runtime.checkTimersNoP
|
||||
0.01s 0.37% 89.89% 0.02s 0.75% runtime.mallocgcSmallScanNoHeader
|
||||
0.01s 0.37% 90.26% 0.02s 0.75% runtime.markroot
|
||||
0.01s 0.37% 90.64% 2.12s 79.40% runtime.mcall
|
||||
0.01s 0.37% 91.01% 2.11s 79.03% runtime.park_m
|
||||
0.01s 0.37% 91.39% 0.02s 0.75% runtime.traceAcquire (inline)
|
||||
0.01s 0.37% 91.76% 0.12s 4.49% runtime.wakep
|
||||
0.01s 0.37% 92.13% 0.13s 4.87% runtime.write
|
||||
0.01s 0.37% 92.51% 0.03s 1.12% sort.Slice
|
||||
0.01s 0.37% 92.88% 0.13s 4.87% time.sendTime
|
||||
0 0% 92.88% 0.02s 0.75% github.com/dgraph-io/badger/v4.(*DB).openMemTable
|
||||
0 0% 92.88% 0.02s 0.75% github.com/dgraph-io/badger/v4.(*DB).openMemTables
|
||||
0 0% 92.88% 0.20s 7.49% github.com/dgraph-io/badger/v4.(*levelsController).runCompactor.func3
|
||||
0 0% 92.88% 0.02s 0.75% github.com/dgraph-io/badger/v4.(*logFile).Truncate
|
||||
0 0% 92.88% 0.03s 1.12% github.com/dgraph-io/badger/v4.Open
|
||||
0 0% 92.88% 0.02s 0.75% github.com/dgraph-io/ristretto/v2/z.(*MmapFile).Truncate
|
||||
0 0% 92.88% 0.02s 0.75% internal/poll.(*FD).Ftruncate
|
||||
0 0% 92.88% 0.02s 0.75% internal/poll.(*FD).Ftruncate.func1 (inline)
|
||||
0 0% 92.88% 0.02s 0.75% internal/poll.ignoringEINTR (inline)
|
||||
0 0% 92.88% 0.02s 0.75% internal/reflectlite.ValueOf (inline)
|
||||
0 0% 92.88% 0.03s 1.12% main.main
|
||||
0 0% 92.88% 0.03s 1.12% next.orly.dev/pkg/database.New
|
||||
0 0% 92.88% 0.02s 0.75% os.(*File).Truncate
|
||||
0 0% 92.88% 0.17s 6.37% runtime.(*timers).check
|
||||
0 0% 92.88% 0.14s 5.24% runtime.(*timers).run
|
||||
0 0% 92.88% 0.06s 2.25% runtime.chansend
|
||||
0 0% 92.88% 0.02s 0.75% runtime.convTslice
|
||||
0 0% 92.88% 0.10s 3.75% runtime.futexwakeup
|
||||
0 0% 92.88% 0.02s 0.75% runtime.gcBgMarkWorker
|
||||
0 0% 92.88% 0.02s 0.75% runtime.gcBgMarkWorker.func2
|
||||
0 0% 92.88% 0.02s 0.75% runtime.gcDrain
|
||||
0 0% 92.88% 0.02s 0.75% runtime.gcDrainMarkWorkerDedicated (inline)
|
||||
0 0% 92.88% 0.03s 1.12% runtime.goready (inline)
|
||||
0 0% 92.88% 0.02s 0.75% runtime.lock (inline)
|
||||
0 0% 92.88% 0.02s 0.75% runtime.lockWithRank (inline)
|
||||
0 0% 92.88% 0.19s 7.12% runtime.mPark (inline)
|
||||
0 0% 92.88% 0.03s 1.12% runtime.main
|
||||
0 0% 92.88% 0.05s 1.87% runtime.makeslice
|
||||
0 0% 92.88% 0.07s 2.62% runtime.mallocgc
|
||||
0 0% 92.88% 0.13s 4.87% runtime.netpollBreak
|
||||
0 0% 92.88% 0.19s 7.12% runtime.notesleep
|
||||
0 0% 92.88% 0.10s 3.75% runtime.notewakeup
|
||||
0 0% 92.88% 0.04s 1.50% runtime.pidleget
|
||||
0 0% 92.88% 0.03s 1.12% runtime.ready
|
||||
0 0% 92.88% 0.12s 4.49% runtime.resetspinning
|
||||
0 0% 92.88% 0.06s 2.25% runtime.selectnbsend
|
||||
0 0% 92.88% 0.03s 1.12% runtime.send
|
||||
0 0% 92.88% 0.03s 1.12% runtime.send.goready.func1
|
||||
0 0% 92.88% 0.10s 3.75% runtime.startm
|
||||
0 0% 92.88% 0.21s 7.87% runtime.stopm
|
||||
0 0% 92.88% 0.03s 1.12% runtime.systemstack
|
||||
0 0% 92.88% 0.05s 1.87% runtime.unlock (inline)
|
||||
0 0% 92.88% 0.05s 1.87% runtime.unlockWithRank (inline)
|
||||
0 0% 92.88% 0.02s 0.75% sync.(*RWMutex).RLock (inline)
|
||||
0 0% 92.88% 0.02s 0.75% syscall.Ftruncate
|
||||
0 0% 92.88% 0.03s 1.12% syscall.RawSyscall6
|
||||
0 0% 92.88% 0.03s 1.12% syscall.Syscall
|
||||
|
||||
-- Memory (In-Use Space) Functions (full list) --
|
||||
Type: inuse_space
|
||||
Time: Sep 13, 2025 at 3:15pm (UTC)
|
||||
Showing nodes accounting for 85209.25kB, 99.24% of 85860.37kB total
|
||||
Dropped 121 nodes (cum <= 429.30kB)
|
||||
flat flat% sum% cum cum%
|
||||
85200kB 99.23% 99.23% 85200kB 99.23% github.com/dgraph-io/badger/v4/skl.newArena (inline)
|
||||
9.25kB 0.011% 99.24% 85632.26kB 99.73% github.com/dgraph-io/badger/v4.Open
|
||||
0 0% 99.24% 85200kB 99.23% github.com/dgraph-io/badger/v4.(*DB).newMemTable
|
||||
0 0% 99.24% 85200kB 99.23% github.com/dgraph-io/badger/v4.(*DB).openMemTable
|
||||
0 0% 99.24% 85200kB 99.23% github.com/dgraph-io/badger/v4/skl.NewSkiplist
|
||||
0 0% 99.24% 85632.26kB 99.73% main.main
|
||||
0 0% 99.24% 85632.26kB 99.73% next.orly.dev/pkg/database.New
|
||||
0 0% 99.24% 85632.26kB 99.73% runtime.main
|
||||
|
||||
-- Allocations (Total Alloc Space) Functions (full list) --
|
||||
Type: alloc_space
|
||||
Time: Sep 13, 2025 at 3:15pm (UTC)
|
||||
Showing nodes accounting for 87.20MB, 100% of 87.21MB total
|
||||
Dropped 15 nodes (cum <= 0.44MB)
|
||||
flat flat% sum% cum cum%
|
||||
83.20MB 95.40% 95.40% 83.20MB 95.40% github.com/dgraph-io/badger/v4/skl.newArena (inline)
|
||||
4MB 4.59% 100% 4MB 4.59% github.com/dgraph-io/ristretto/v2/z.Calloc (inline)
|
||||
0 0% 100% 4MB 4.59% github.com/dgraph-io/badger/v4.(*DB).flushMemtable
|
||||
0 0% 100% 4MB 4.59% github.com/dgraph-io/badger/v4.(*DB).handleMemTableFlush
|
||||
0 0% 100% 83.20MB 95.40% github.com/dgraph-io/badger/v4.(*DB).newMemTable
|
||||
0 0% 100% 83.20MB 95.40% github.com/dgraph-io/badger/v4.(*DB).openMemTable
|
||||
0 0% 100% 83.20MB 95.40% github.com/dgraph-io/badger/v4.Open
|
||||
0 0% 100% 4MB 4.59% github.com/dgraph-io/badger/v4.Open.func5
|
||||
0 0% 100% 4MB 4.59% github.com/dgraph-io/badger/v4.buildL0Table
|
||||
0 0% 100% 83.20MB 95.40% github.com/dgraph-io/badger/v4/skl.NewSkiplist
|
||||
0 0% 100% 4MB 4.59% github.com/dgraph-io/badger/v4/table.NewTableBuilder
|
||||
0 0% 100% 4MB 4.59% github.com/dgraph-io/ristretto/v2/z.(*AllocatorPool).Get
|
||||
0 0% 100% 4MB 4.59% github.com/dgraph-io/ristretto/v2/z.NewAllocator
|
||||
0 0% 100% 83.20MB 95.40% main.main
|
||||
0 0% 100% 83.20MB 95.40% next.orly.dev/pkg/database.New
|
||||
0 0% 100% 83.20MB 95.40% runtime.main
|
||||
|
||||
-- Allocation Frequency (Alloc Objects) Functions (full list) --
|
||||
Type: alloc_objects
|
||||
Time: Sep 13, 2025 at 3:15pm (UTC)
|
||||
Showing nodes accounting for 6, 100% of 6 total
|
||||
flat flat% sum% cum cum%
|
||||
1 16.67% 16.67% 1 16.67% github.com/dgraph-io/badger/v4/skl.newArena (inline)
|
||||
1 16.67% 33.33% 1 16.67% github.com/dgraph-io/ristretto/v2/z.Calloc (inline)
|
||||
1 16.67% 50.00% 1 16.67% github.com/pkg/profile.Start
|
||||
1 16.67% 66.67% 1 16.67% github.com/pkg/profile.Start.func11
|
||||
1 16.67% 83.33% 1 16.67% runtime.allocm
|
||||
1 16.67% 100% 1 16.67% runtime.procresize
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4.(*DB).flushMemtable
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4.(*DB).handleMemTableFlush
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4.(*DB).newMemTable
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4.(*DB).openMemTable
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4.Open
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4.Open.func5
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4.buildL0Table
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4/skl.NewSkiplist
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/badger/v4/table.NewTableBuilder
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/ristretto/v2/z.(*AllocatorPool).Get
|
||||
0 0% 100% 1 16.67% github.com/dgraph-io/ristretto/v2/z.NewAllocator
|
||||
0 0% 100% 2 33.33% main.main
|
||||
0 0% 100% 1 16.67% next.orly.dev/pkg/database.New
|
||||
0 0% 100% 2 33.33% runtime.main
|
||||
0 0% 100% 1 16.67% runtime.mstart
|
||||
0 0% 100% 1 16.67% runtime.mstart0
|
||||
0 0% 100% 1 16.67% runtime.mstart1
|
||||
0 0% 100% 1 16.67% runtime.newm
|
||||
0 0% 100% 1 16.67% runtime.resetspinning
|
||||
0 0% 100% 1 16.67% runtime.schedule
|
||||
0 0% 100% 1 16.67% runtime.startTheWorld.func1
|
||||
0 0% 100% 1 16.67% runtime.startTheWorldWithSema
|
||||
0 0% 100% 1 16.67% runtime.startm
|
||||
0 0% 100% 1 16.67% runtime.systemstack
|
||||
0 0% 100% 1 16.67% runtime.wakep
|
||||
|
||||
Notes:
|
||||
- CPU section identifies functions using the most CPU time.
|
||||
- Memory section identifies which functions retain the most memory (in-use).
|
||||
- Allocations sections identify functions responsible for the most allocation volume and count, which correlates with GC pressure.
|
||||
- Profiles are created by github.com/pkg/profile and may only be flushed when the relay process receives a shutdown; CPU profile often requires process exit.
|
||||
@@ -1,86 +0,0 @@
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: 2025-09-13T15:15:03+00:00
|
||||
Benchmark Configuration:
|
||||
Events per test: 5000
|
||||
Concurrent workers: 4
|
||||
Test duration: 30s
|
||||
|
||||
Relays tested: 3
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
Relay: next-orly-cpu
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 14586.66
|
||||
Events/sec: 523.16
|
||||
Events/sec: 13427.76
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 266.404µs
|
||||
Bottom 10% Avg Latency: 404.279µs
|
||||
Avg Latency: 181.633µs
|
||||
P95 Latency: 386.835µs
|
||||
P95 Latency: 258.311µs
|
||||
P95 Latency: 9.420264ms
|
||||
|
||||
Relay: next-orly-mem
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 14400.62
|
||||
Events/sec: 517.16
|
||||
Events/sec: 13744.06
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 269.084µs
|
||||
Bottom 10% Avg Latency: 409.33µs
|
||||
Avg Latency: 178.033µs
|
||||
P95 Latency: 390.313µs
|
||||
P95 Latency: 255.165µs
|
||||
P95 Latency: 9.733877ms
|
||||
|
||||
Relay: next-orly-alloc
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 14727.35
|
||||
Events/sec: 534.10
|
||||
Events/sec: 13931.93
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 264.477µs
|
||||
Bottom 10% Avg Latency: 400.393µs
|
||||
Avg Latency: 183.763µs
|
||||
P95 Latency: 382.538µs
|
||||
P95 Latency: 269.453µs
|
||||
P95 Latency: 10.855199ms
|
||||
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
- /reports/run_20250913_150837/next-orly-alloc_results.txt
|
||||
- /reports/run_20250913_150837/next-orly-cpu_results.txt
|
||||
- /reports/run_20250913_150837/next-orly-mem_results.txt
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
Relay Status Peak Tput/s Avg Latency Success Rate
|
||||
---- ------ ----------- ----------- ------------
|
||||
next-orly-cpu OK 14586.66 266.404µs 100.0%
|
||||
next-orly-mem OK 14400.62 269.084µs 100.0%
|
||||
next-orly-alloc OK 14727.35 264.477µs 100.0%
|
||||
|
||||
================================================================
|
||||
End of Report
|
||||
================================================================
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly-alloc_8
|
||||
Events: 5000, Workers: 4, Duration: 30s
|
||||
20250913151255068694 INF /tmp/benchmark_next-orly-alloc_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250913151255069225 INF /tmp/benchmark_next-orly-alloc_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250913151255069261 INF /tmp/benchmark_next-orly-alloc_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250913151255069538 INF (*types.Uint32)(0xc00001010c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250913151255069681 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 5000/5000 (100.0%)
|
||||
Duration: 339.504298ms
|
||||
Events/sec: 14727.35
|
||||
Avg latency: 264.477µs
|
||||
P90 latency: 350.647µs
|
||||
P95 latency: 382.538µs
|
||||
P99 latency: 448.954µs
|
||||
Bottom 10% Avg latency: 400.393µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 500 events in 479.359678ms
|
||||
Burst completed: 500 events in 425.596145ms
|
||||
Burst completed: 500 events in 446.109496ms
|
||||
Burst completed: 500 events in 378.033475ms
|
||||
Burst completed: 500 events in 391.065418ms
|
||||
Burst completed: 500 events in 409.089993ms
|
||||
Burst completed: 500 events in 495.220448ms
|
||||
Burst completed: 500 events in 493.489266ms
|
||||
Burst completed: 500 events in 410.468883ms
|
||||
Burst completed: 500 events in 418.819105ms
|
||||
Burst test completed: 5000 events in 9.361610197s
|
||||
Events/sec: 534.10
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 2500 writes, 2500 reads in 24.078024733s
|
||||
Combined ops/sec: 207.66
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 5000/5000 (100.0%)
|
||||
Duration: 358.887842ms
|
||||
Events/sec: 13931.93
|
||||
Avg latency: 280.856µs
|
||||
P90 latency: 366.297µs
|
||||
P95 latency: 394.952µs
|
||||
P99 latency: 451.52µs
|
||||
Bottom 10% Avg latency: 409.325µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 500 events in 362.96011ms
|
||||
Burst completed: 500 events in 416.140454ms
|
||||
Burst completed: 500 events in 448.460564ms
|
||||
Burst completed: 500 events in 474.76808ms
|
||||
Burst completed: 500 events in 447.434655ms
|
||||
Burst completed: 500 events in 427.462963ms
|
||||
Burst completed: 500 events in 413.117609ms
|
||||
Burst completed: 500 events in 442.349391ms
|
||||
Burst completed: 500 events in 464.157823ms
|
||||
Burst completed: 500 events in 472.898161ms
|
||||
Burst test completed: 5000 events in 9.381865696s
|
||||
Events/sec: 532.94
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 2500 writes, 2122 reads in 30.021046357s
|
||||
Combined ops/sec: 153.96
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 339.504298ms
|
||||
Total Events: 5000
|
||||
Events/sec: 14727.35
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 142 MB
|
||||
Avg Latency: 264.477µs
|
||||
P90 Latency: 350.647µs
|
||||
P95 Latency: 382.538µs
|
||||
P99 Latency: 448.954µs
|
||||
Bottom 10% Avg Latency: 400.393µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.361610197s
|
||||
Total Events: 5000
|
||||
Events/sec: 534.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 192 MB
|
||||
Avg Latency: 183.763µs
|
||||
P90 Latency: 236.69µs
|
||||
P95 Latency: 269.453µs
|
||||
P99 Latency: 328.425µs
|
||||
Bottom 10% Avg Latency: 284.22µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.078024733s
|
||||
Total Events: 5000
|
||||
Events/sec: 207.66
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 105 MB
|
||||
Avg Latency: 4.442571ms
|
||||
P90 Latency: 8.682425ms
|
||||
P95 Latency: 10.855199ms
|
||||
P99 Latency: 12.949977ms
|
||||
Bottom 10% Avg Latency: 10.754668ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 358.887842ms
|
||||
Total Events: 5000
|
||||
Events/sec: 13931.93
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 122 MB
|
||||
Avg Latency: 280.856µs
|
||||
P90 Latency: 366.297µs
|
||||
P95 Latency: 394.952µs
|
||||
P99 Latency: 451.52µs
|
||||
Bottom 10% Avg Latency: 409.325µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.381865696s
|
||||
Total Events: 5000
|
||||
Events/sec: 532.94
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 175 MB
|
||||
Avg Latency: 186.501µs
|
||||
P90 Latency: 237.853µs
|
||||
P95 Latency: 269.192µs
|
||||
P99 Latency: 336.811µs
|
||||
Bottom 10% Avg Latency: 287.769µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 30.021046357s
|
||||
Total Events: 4622
|
||||
Events/sec: 153.96
|
||||
Success Rate: 92.4%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 129 MB
|
||||
Avg Latency: 8.281607ms
|
||||
P90 Latency: 17.758024ms
|
||||
P95 Latency: 19.206379ms
|
||||
P99 Latency: 24.13486ms
|
||||
Bottom 10% Avg Latency: 20.330644ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-alloc_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-alloc_8/benchmark_report.adoc
|
||||
20250913151458624980 INF /tmp/benchmark_next-orly-alloc_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250913151458946409 INF /tmp/benchmark_next-orly-alloc_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 22 MiB of 22 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250913151458948689 INF /tmp/benchmark_next-orly-alloc_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: next-orly-alloc
|
||||
RELAY_URL: ws://next-orly-alloc:8080
|
||||
TEST_TIMESTAMP: 2025-09-13T15:14:58+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 5000
|
||||
Workers: 4
|
||||
Duration: 30s
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly-cpu_8
|
||||
Events: 5000, Workers: 4, Duration: 30s
|
||||
20250913150837785506 INF /tmp/benchmark_next-orly-cpu_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250913150837785985 INF /tmp/benchmark_next-orly-cpu_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250913150837786066 INF /tmp/benchmark_next-orly-cpu_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250913150837786329 INF (*types.Uint32)(0xc0002b1cac)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250913150837786374 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 5000/5000 (100.0%)
|
||||
Duration: 342.779064ms
|
||||
Events/sec: 14586.66
|
||||
Avg latency: 266.404µs
|
||||
P90 latency: 354.213µs
|
||||
P95 latency: 386.835µs
|
||||
P99 latency: 459.934µs
|
||||
Bottom 10% Avg latency: 404.279µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 500 events in 423.591594ms
|
||||
Burst completed: 500 events in 454.585716ms
|
||||
Burst completed: 500 events in 486.007191ms
|
||||
Burst completed: 500 events in 503.830553ms
|
||||
Burst completed: 500 events in 433.97967ms
|
||||
Burst completed: 500 events in 427.591978ms
|
||||
Burst completed: 500 events in 500.38276ms
|
||||
Burst completed: 500 events in 489.161258ms
|
||||
Burst completed: 500 events in 474.350023ms
|
||||
Burst completed: 500 events in 350.911595ms
|
||||
Burst test completed: 5000 events in 9.557316818s
|
||||
Events/sec: 523.16
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 2500 writes, 2500 reads in 24.103605311s
|
||||
Combined ops/sec: 207.44
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 5000/5000 (100.0%)
|
||||
Duration: 372.362841ms
|
||||
Events/sec: 13427.76
|
||||
Avg latency: 291.441µs
|
||||
P90 latency: 380.663µs
|
||||
P95 latency: 417.233µs
|
||||
P99 latency: 494.64µs
|
||||
Bottom 10% Avg latency: 434.725µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 500 events in 417.269185ms
|
||||
Burst completed: 500 events in 468.354112ms
|
||||
Burst completed: 500 events in 443.231977ms
|
||||
Burst completed: 500 events in 435.358047ms
|
||||
Burst completed: 500 events in 380.799007ms
|
||||
Burst completed: 500 events in 322.904743ms
|
||||
Burst completed: 500 events in 354.234969ms
|
||||
Burst completed: 500 events in 390.690665ms
|
||||
Burst completed: 500 events in 392.317202ms
|
||||
Burst completed: 500 events in 467.071067ms
|
||||
Burst test completed: 5000 events in 9.085471121s
|
||||
Events/sec: 550.33
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 2500 writes, 2067 reads in 30.027760725s
|
||||
Combined ops/sec: 152.09
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 342.779064ms
|
||||
Total Events: 5000
|
||||
Events/sec: 14586.66
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 142 MB
|
||||
Avg Latency: 266.404µs
|
||||
P90 Latency: 354.213µs
|
||||
P95 Latency: 386.835µs
|
||||
P99 Latency: 459.934µs
|
||||
Bottom 10% Avg Latency: 404.279µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.557316818s
|
||||
Total Events: 5000
|
||||
Events/sec: 523.16
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 192 MB
|
||||
Avg Latency: 181.633µs
|
||||
P90 Latency: 225.639µs
|
||||
P95 Latency: 258.311µs
|
||||
P99 Latency: 332.341µs
|
||||
Bottom 10% Avg Latency: 277.625µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.103605311s
|
||||
Total Events: 5000
|
||||
Events/sec: 207.44
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 108 MB
|
||||
Avg Latency: 4.472533ms
|
||||
P90 Latency: 8.862063ms
|
||||
P95 Latency: 9.420264ms
|
||||
P99 Latency: 12.991173ms
|
||||
Bottom 10% Avg Latency: 10.293802ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 372.362841ms
|
||||
Total Events: 5000
|
||||
Events/sec: 13427.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 125 MB
|
||||
Avg Latency: 291.441µs
|
||||
P90 Latency: 380.663µs
|
||||
P95 Latency: 417.233µs
|
||||
P99 Latency: 494.64µs
|
||||
Bottom 10% Avg Latency: 434.725µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.085471121s
|
||||
Total Events: 5000
|
||||
Events/sec: 550.33
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 177 MB
|
||||
Avg Latency: 191.41µs
|
||||
P90 Latency: 247.551µs
|
||||
P95 Latency: 282.246µs
|
||||
P99 Latency: 336.099µs
|
||||
Bottom 10% Avg Latency: 290.757µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 30.027760725s
|
||||
Total Events: 4567
|
||||
Events/sec: 152.09
|
||||
Success Rate: 91.3%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 173 MB
|
||||
Avg Latency: 8.526682ms
|
||||
P90 Latency: 18.686774ms
|
||||
P95 Latency: 21.052665ms
|
||||
P99 Latency: 24.581304ms
|
||||
Bottom 10% Avg Latency: 21.498913ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-cpu_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-cpu_8/benchmark_report.adoc
|
||||
20250913151041289202 INF /tmp/benchmark_next-orly-cpu_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250913151041613609 INF /tmp/benchmark_next-orly-cpu_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 22 MiB of 22 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250913151041615724 INF /tmp/benchmark_next-orly-cpu_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: next-orly-cpu
|
||||
RELAY_URL: ws://next-orly-cpu:8080
|
||||
TEST_TIMESTAMP: 2025-09-13T15:10:41+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 5000
|
||||
Workers: 4
|
||||
Duration: 30s
|
||||
@@ -1,190 +0,0 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly-mem_8
|
||||
Events: 5000, Workers: 4, Duration: 30s
|
||||
20250913151046664505 INF /tmp/benchmark_next-orly-mem_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
20250913151046665322 INF /tmp/benchmark_next-orly-mem_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
20250913151046665358 INF /tmp/benchmark_next-orly-mem_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
20250913151046665590 INF (*types.Uint32)(0xc00013fc1c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
20250913151046665622 INF migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 5000/5000 (100.0%)
|
||||
Duration: 347.207269ms
|
||||
Events/sec: 14400.62
|
||||
Avg latency: 269.084µs
|
||||
P90 latency: 358.252µs
|
||||
P95 latency: 390.313µs
|
||||
P99 latency: 459.745µs
|
||||
Bottom 10% Avg latency: 409.33µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 500 events in 372.815911ms
|
||||
Burst completed: 500 events in 385.472929ms
|
||||
Burst completed: 500 events in 493.886155ms
|
||||
Burst completed: 500 events in 499.001539ms
|
||||
Burst completed: 500 events in 495.383911ms
|
||||
Burst completed: 500 events in 463.681787ms
|
||||
Burst completed: 500 events in 474.409848ms
|
||||
Burst completed: 500 events in 507.35297ms
|
||||
Burst completed: 500 events in 487.725706ms
|
||||
Burst completed: 500 events in 473.3812ms
|
||||
Burst test completed: 5000 events in 9.668130847s
|
||||
Events/sec: 517.16
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 2500 writes, 2500 reads in 23.457777521s
|
||||
Combined ops/sec: 213.15
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 5000/5000 (100.0%)
|
||||
Duration: 363.793593ms
|
||||
Events/sec: 13744.06
|
||||
Avg latency: 284.695µs
|
||||
P90 latency: 372.509µs
|
||||
P95 latency: 405.031µs
|
||||
P99 latency: 461.949µs
|
||||
Bottom 10% Avg latency: 418.636µs
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 500 events in 468.131728ms
|
||||
Burst completed: 500 events in 466.05048ms
|
||||
Burst completed: 500 events in 348.112373ms
|
||||
Burst completed: 500 events in 399.633064ms
|
||||
Burst completed: 500 events in 442.046507ms
|
||||
Burst completed: 500 events in 381.168926ms
|
||||
Burst completed: 500 events in 362.016406ms
|
||||
Burst completed: 500 events in 379.297871ms
|
||||
Burst completed: 500 events in 457.958442ms
|
||||
Burst completed: 500 events in 455.557676ms
|
||||
Burst test completed: 5000 events in 9.174541855s
|
||||
Events/sec: 544.99
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 2500 writes, 2146 reads in 30.008115864s
|
||||
Combined ops/sec: 154.82
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 347.207269ms
|
||||
Total Events: 5000
|
||||
Events/sec: 14400.62
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 143 MB
|
||||
Avg Latency: 269.084µs
|
||||
P90 Latency: 358.252µs
|
||||
P95 Latency: 390.313µs
|
||||
P99 Latency: 459.745µs
|
||||
Bottom 10% Avg Latency: 409.33µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.668130847s
|
||||
Total Events: 5000
|
||||
Events/sec: 517.16
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 192 MB
|
||||
Avg Latency: 178.033µs
|
||||
P90 Latency: 225.098µs
|
||||
P95 Latency: 255.165µs
|
||||
P99 Latency: 318.015µs
|
||||
Bottom 10% Avg Latency: 267.252µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 23.457777521s
|
||||
Total Events: 5000
|
||||
Events/sec: 213.15
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 103 MB
|
||||
Avg Latency: 4.235413ms
|
||||
P90 Latency: 8.39945ms
|
||||
P95 Latency: 9.733877ms
|
||||
P99 Latency: 12.359344ms
|
||||
Bottom 10% Avg Latency: 10.173821ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 363.793593ms
|
||||
Total Events: 5000
|
||||
Events/sec: 13744.06
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 121 MB
|
||||
Avg Latency: 284.695µs
|
||||
P90 Latency: 372.509µs
|
||||
P95 Latency: 405.031µs
|
||||
P99 Latency: 461.949µs
|
||||
Bottom 10% Avg Latency: 418.636µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.174541855s
|
||||
Total Events: 5000
|
||||
Events/sec: 544.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 174 MB
|
||||
Avg Latency: 188.886µs
|
||||
P90 Latency: 242.201µs
|
||||
P95 Latency: 270.475µs
|
||||
P99 Latency: 331.521µs
|
||||
Bottom 10% Avg Latency: 287.008µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 30.008115864s
|
||||
Total Events: 4646
|
||||
Events/sec: 154.82
|
||||
Success Rate: 92.9%
|
||||
Concurrent Workers: 4
|
||||
Memory Used: 184 MB
|
||||
Avg Latency: 8.147241ms
|
||||
P90 Latency: 17.076493ms
|
||||
P95 Latency: 17.970006ms
|
||||
P99 Latency: 24.424806ms
|
||||
Bottom 10% Avg Latency: 19.866973ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-mem_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-mem_8/benchmark_report.adoc
|
||||
20250913151249700098 INF /tmp/benchmark_next-orly-mem_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
20250913151250019024 INF /tmp/benchmark_next-orly-mem_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 01. Size: 22 MiB of 22 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
20250913151250021227 INF /tmp/benchmark_next-orly-mem_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: next-orly-mem
|
||||
RELAY_URL: ws://next-orly-mem:8080
|
||||
TEST_TIMESTAMP: 2025-09-13T15:12:50+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 5000
|
||||
Workers: 4
|
||||
Duration: 30s
|
||||
169
scripts/benchmark.sh
Executable file
169
scripts/benchmark.sh
Executable file
@@ -0,0 +1,169 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# scripts/benchmark.sh - Run full benchmark suite on a relay at a configurable address
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/benchmark.sh [relay_address] [relay_port]
|
||||
#
|
||||
# Example:
|
||||
# ./scripts/benchmark.sh localhost 3334
|
||||
# ./scripts/benchmark.sh nostr.example.com 8080
|
||||
#
|
||||
# If relay_address and relay_port are not provided, defaults to localhost:3334
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/.." && pwd)"
|
||||
cd "$REPO_ROOT"
|
||||
|
||||
# Default values
|
||||
RELAY_ADDRESS="${1:-localhost}"
|
||||
RELAY_PORT="${2:-3334}"
|
||||
RELAY_URL="ws://${RELAY_ADDRESS}:${RELAY_PORT}"
|
||||
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-10000}"
|
||||
BENCHMARK_WORKERS="${BENCHMARK_WORKERS:-8}"
|
||||
BENCHMARK_DURATION="${BENCHMARK_DURATION:-60s}"
|
||||
REPORTS_DIR="${REPORTS_DIR:-$REPO_ROOT/cmd/benchmark/reports}"
|
||||
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||
RUN_DIR="${REPORTS_DIR}/run_${TIMESTAMP}"
|
||||
|
||||
# Ensure the benchmark binary is built
|
||||
BENCHMARK_BIN="${REPO_ROOT}/cmd/benchmark/benchmark"
|
||||
if [[ ! -x "$BENCHMARK_BIN" ]]; then
|
||||
echo "Building benchmark binary..."
|
||||
go build -o "$BENCHMARK_BIN" "$REPO_ROOT/cmd/benchmark"
|
||||
fi
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "${RUN_DIR}"
|
||||
|
||||
echo "=================================================="
|
||||
echo "Nostr Relay Benchmark"
|
||||
echo "=================================================="
|
||||
echo "Timestamp: $(date)"
|
||||
echo "Target Relay: ${RELAY_URL}"
|
||||
echo "Events per test: ${BENCHMARK_EVENTS}"
|
||||
echo "Concurrent workers: ${BENCHMARK_WORKERS}"
|
||||
echo "Test duration: ${BENCHMARK_DURATION}"
|
||||
echo "Output directory: ${RUN_DIR}"
|
||||
echo "=================================================="
|
||||
|
||||
# Function to wait for relay to be ready
|
||||
wait_for_relay() {
|
||||
local url="$1"
|
||||
local max_attempts=30
|
||||
local attempt=0
|
||||
|
||||
echo "Waiting for relay to be ready at ${url}..."
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
# Try to get HTTP status code with curl
|
||||
local status=$(curl -s -o /dev/null -w "%{http_code}" --connect-timeout 5 --max-time 5 "http://${RELAY_ADDRESS}:${RELAY_PORT}" || echo 000)
|
||||
|
||||
case "$status" in
|
||||
101|200|400|404|426)
|
||||
echo "Relay is ready! (HTTP ${status})"
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
attempt=$((attempt + 1))
|
||||
echo " Attempt ${attempt}/${max_attempts}: Relay not ready yet (HTTP ${status})..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "ERROR: Relay failed to become ready after ${max_attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to run benchmark against the relay
|
||||
run_benchmark() {
|
||||
local output_file="${RUN_DIR}/benchmark_results.txt"
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Testing relay at ${RELAY_URL}"
|
||||
echo "=================================================="
|
||||
|
||||
# Wait for relay to be ready
|
||||
if ! wait_for_relay "${RELAY_ADDRESS}:${RELAY_PORT}"; then
|
||||
echo "ERROR: Relay is not responding, aborting..."
|
||||
echo "RELAY_URL: ${RELAY_URL}" > "${output_file}"
|
||||
echo "STATUS: FAILED - Relay not responding" >> "${output_file}"
|
||||
echo "ERROR: Connection failed" >> "${output_file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Run the benchmark
|
||||
echo "Running benchmark against ${RELAY_URL}..."
|
||||
|
||||
# Create temporary directory for benchmark data
|
||||
TEMP_DATA_DIR="/tmp/benchmark_${TIMESTAMP}"
|
||||
mkdir -p "${TEMP_DATA_DIR}"
|
||||
|
||||
# Run benchmark and capture both stdout and stderr
|
||||
if "${BENCHMARK_BIN}" \
|
||||
-relay-url="${RELAY_URL}" \
|
||||
-datadir="${TEMP_DATA_DIR}" \
|
||||
-events="${BENCHMARK_EVENTS}" \
|
||||
-workers="${BENCHMARK_WORKERS}" \
|
||||
-duration="${BENCHMARK_DURATION}" \
|
||||
# > "${output_file}"
|
||||
2>&1; then
|
||||
echo "✓ Benchmark completed successfully"
|
||||
# Add relay identification to the report
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_URL: ${RELAY_URL}" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
echo "BENCHMARK_CONFIG:" >> "${output_file}"
|
||||
echo " Events: ${BENCHMARK_EVENTS}" >> "${output_file}"
|
||||
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||
echo " Duration: ${BENCHMARK_DURATION}" >> "${output_file}" else
|
||||
echo "✗ Benchmark failed"
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_URL: ${RELAY_URL}" >> "${output_file}"
|
||||
echo "STATUS: FAILED" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
fi
|
||||
|
||||
# Clean up temporary data
|
||||
rm -rf "${TEMP_DATA_DIR}"
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Main execution
|
||||
echo "Starting relay benchmark..."
|
||||
run_benchmark
|
||||
|
||||
# Display results
|
||||
if [ -f "${RUN_DIR}/benchmark_results.txt" ]; then
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Benchmark Results Summary"
|
||||
echo "=================================================="
|
||||
# Extract key metrics from the benchmark report
|
||||
if grep -q "STATUS: FAILED" "${RUN_DIR}/benchmark_results.txt"; then
|
||||
echo "Status: FAILED"
|
||||
grep "ERROR:" "${RUN_DIR}/benchmark_results.txt" | head -1 || echo "Error: Unknown failure"
|
||||
else
|
||||
echo "Status: COMPLETED"
|
||||
|
||||
# Extract performance metrics
|
||||
grep "Events/sec:" "${RUN_DIR}/benchmark_results.txt" | head -3 || true
|
||||
grep "Success Rate:" "${RUN_DIR}/benchmark_results.txt" | head -3 || true
|
||||
grep "Avg Latency:" "${RUN_DIR}/benchmark_results.txt" | head -3 || true
|
||||
grep "P95 Latency:" "${RUN_DIR}/benchmark_results.txt" | head -3 || true
|
||||
grep "Memory:" "${RUN_DIR}/benchmark_results.txt" | head -3 || true
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Full results available in: ${RUN_DIR}/benchmark_results.txt"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Benchmark Completed!"
|
||||
echo "=================================================="
|
||||
echo "Results directory: ${RUN_DIR}"
|
||||
echo "Benchmark finished at: $(date)"
|
||||
Reference in New Issue
Block a user