add dgraph backend to benchmark suite with safe type assertions for multi-backend support
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
|
||||
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including:
|
||||
|
||||
- **next.orly.dev** (this repository) - BadgerDB-based relay
|
||||
- **next.orly.dev** (this repository) - Badger and DGraph backend variants
|
||||
- **Khatru** - SQLite and Badger variants
|
||||
- **Relayer** - Basic example implementation
|
||||
- **Strfry** - C++ LMDB-based relay
|
||||
@@ -91,13 +91,16 @@ ls reports/run_YYYYMMDD_HHMMSS/
|
||||
|
||||
### Docker Compose Services
|
||||
|
||||
| Service | Port | Description |
|
||||
| ---------------- | ---- | ----------------------------------------- |
|
||||
| next-orly | 8001 | This repository's BadgerDB relay |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
| relayer-basic | 8004 | Basic relayer example |
|
||||
| strfry | 8005 | Strfry C++ LMDB relay |
|
||||
| Service | Port | Description |
|
||||
| ------------------ | ---- | ----------------------------------------- |
|
||||
| next-orly-badger | 8001 | This repository's Badger relay |
|
||||
| next-orly-dgraph | 8007 | This repository's DGraph relay |
|
||||
| dgraph-zero | 5080 | DGraph cluster coordinator |
|
||||
| dgraph-alpha | 9080 | DGraph data node |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
| relayer-basic | 8004 | Basic relayer example |
|
||||
| strfry | 8005 | Strfry C++ LMDB relay |
|
||||
| nostr-rs-relay | 8006 | Rust SQLite relay |
|
||||
| benchmark-runner | - | Orchestrates tests and aggregates results |
|
||||
|
||||
@@ -173,6 +176,39 @@ go build -o benchmark main.go
|
||||
-duration=30s
|
||||
```
|
||||
|
||||
## Database Backend Comparison
|
||||
|
||||
The benchmark suite includes **next.orly.dev** with two different database backends to compare architectural approaches:
|
||||
|
||||
### Badger Backend (next-orly-badger)
|
||||
- **Type**: Embedded key-value store
|
||||
- **Architecture**: Single-process, no network overhead
|
||||
- **Best for**: Personal relays, single-instance deployments
|
||||
- **Characteristics**:
|
||||
- Lower latency for single-instance operations
|
||||
- No network round-trips
|
||||
- Simpler deployment
|
||||
- Limited to single-node scaling
|
||||
|
||||
### DGraph Backend (next-orly-dgraph)
|
||||
- **Type**: Distributed graph database
|
||||
- **Architecture**: Client-server with dgraph-zero (coordinator) and dgraph-alpha (data node)
|
||||
- **Best for**: Distributed deployments, horizontal scaling
|
||||
- **Characteristics**:
|
||||
- Network overhead from gRPC communication
|
||||
- Supports multi-node clustering
|
||||
- Built-in replication and sharding
|
||||
- More complex deployment
|
||||
|
||||
### Comparing the Backends
|
||||
|
||||
The benchmark results will show:
|
||||
- **Latency differences**: Embedded vs. distributed overhead
|
||||
- **Throughput trade-offs**: Single-process optimization vs. distributed scalability
|
||||
- **Resource usage**: Memory and CPU patterns for different architectures
|
||||
|
||||
This comparison helps determine which backend is appropriate for different deployment scenarios.
|
||||
|
||||
## Benchmark Results Interpretation
|
||||
|
||||
### Peak Throughput Test
|
||||
|
||||
574
cmd/benchmark/benchmark_adapter.go
Normal file
574
cmd/benchmark/benchmark_adapter.go
Normal file
@@ -0,0 +1,574 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
"next.orly.dev/pkg/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// BenchmarkAdapter adapts a database.Database interface to work with benchmark tests
|
||||
type BenchmarkAdapter struct {
|
||||
config *BenchmarkConfig
|
||||
db database.Database
|
||||
results []*BenchmarkResult
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewBenchmarkAdapter creates a new benchmark adapter
|
||||
func NewBenchmarkAdapter(config *BenchmarkConfig, db database.Database) *BenchmarkAdapter {
|
||||
return &BenchmarkAdapter{
|
||||
config: config,
|
||||
db: db,
|
||||
results: make([]*BenchmarkResult, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// RunPeakThroughputTest runs the peak throughput benchmark
|
||||
func (ba *BenchmarkAdapter) RunPeakThroughputTest() {
|
||||
fmt.Println("\n=== Peak Throughput Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
var totalEvents int64
|
||||
var errors []error
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
|
||||
events := ba.generateEvents(ba.config.NumEvents)
|
||||
eventChan := make(chan *event.E, len(events))
|
||||
|
||||
// Fill event channel
|
||||
for _, ev := range events {
|
||||
eventChan <- ev
|
||||
}
|
||||
close(eventChan)
|
||||
|
||||
// Start workers
|
||||
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
ctx := context.Background()
|
||||
for ev := range eventChan {
|
||||
eventStart := time.Now()
|
||||
|
||||
_, err := ba.db.SaveEvent(ctx, ev)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalEvents++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Peak Throughput",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalEvents),
|
||||
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
||||
ConcurrentWorkers: ba.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool {
|
||||
return latencies[i] < latencies[j]
|
||||
})
|
||||
result.AvgLatency = calculateAverage(latencies)
|
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
||||
result.Bottom10Avg = calculateAverage(bottom10)
|
||||
}
|
||||
|
||||
result.SuccessRate = float64(totalEvents) / float64(ba.config.NumEvents) * 100
|
||||
if len(errors) > 0 {
|
||||
result.Errors = make([]string, 0, len(errors))
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
ba.mu.Lock()
|
||||
ba.results = append(ba.results, result)
|
||||
ba.mu.Unlock()
|
||||
|
||||
ba.printResult(result)
|
||||
}
|
||||
|
||||
// RunBurstPatternTest runs burst pattern test
|
||||
func (ba *BenchmarkAdapter) RunBurstPatternTest() {
|
||||
fmt.Println("\n=== Burst Pattern Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var totalEvents int64
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
|
||||
ctx := context.Background()
|
||||
burstSize := 100
|
||||
bursts := ba.config.NumEvents / burstSize
|
||||
|
||||
for i := 0; i < bursts; i++ {
|
||||
// Generate a burst of events
|
||||
events := ba.generateEvents(burstSize)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, ev := range events {
|
||||
wg.Add(1)
|
||||
go func(e *event.E) {
|
||||
defer wg.Done()
|
||||
|
||||
eventStart := time.Now()
|
||||
_, err := ba.db.SaveEvent(ctx, e)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
if err == nil {
|
||||
totalEvents++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
}(ev)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Short pause between bursts
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Burst Pattern",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalEvents),
|
||||
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
||||
ConcurrentWorkers: burstSize,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: float64(totalEvents) / float64(ba.config.NumEvents) * 100,
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool {
|
||||
return latencies[i] < latencies[j]
|
||||
})
|
||||
result.AvgLatency = calculateAverage(latencies)
|
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
||||
result.Bottom10Avg = calculateAverage(bottom10)
|
||||
}
|
||||
|
||||
ba.mu.Lock()
|
||||
ba.results = append(ba.results, result)
|
||||
ba.mu.Unlock()
|
||||
|
||||
ba.printResult(result)
|
||||
}
|
||||
|
||||
// RunMixedReadWriteTest runs mixed read/write test
|
||||
func (ba *BenchmarkAdapter) RunMixedReadWriteTest() {
|
||||
fmt.Println("\n=== Mixed Read/Write Test ===")
|
||||
|
||||
// First, populate some events
|
||||
fmt.Println("Populating database with initial events...")
|
||||
populateEvents := ba.generateEvents(1000)
|
||||
ctx := context.Background()
|
||||
|
||||
for _, ev := range populateEvents {
|
||||
ba.db.SaveEvent(ctx, ev)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
var writeCount, readCount int64
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start workers doing mixed read/write
|
||||
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
events := ba.generateEvents(ba.config.NumEvents / ba.config.ConcurrentWorkers)
|
||||
|
||||
for idx, ev := range events {
|
||||
eventStart := time.Now()
|
||||
|
||||
if idx%3 == 0 {
|
||||
// Read operation
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
limit := uint(10)
|
||||
f.Limit = &limit
|
||||
_, _ = ba.db.QueryEvents(ctx, f)
|
||||
|
||||
mu.Lock()
|
||||
readCount++
|
||||
mu.Unlock()
|
||||
} else {
|
||||
// Write operation
|
||||
_, _ = ba.db.SaveEvent(ctx, ev)
|
||||
|
||||
mu.Lock()
|
||||
writeCount++
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
latency := time.Since(eventStart)
|
||||
mu.Lock()
|
||||
latencies = append(latencies, latency)
|
||||
mu.Unlock()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
result := &BenchmarkResult{
|
||||
TestName: fmt.Sprintf("Mixed R/W (R:%d W:%d)", readCount, writeCount),
|
||||
Duration: duration,
|
||||
TotalEvents: int(writeCount + readCount),
|
||||
EventsPerSecond: float64(writeCount+readCount) / duration.Seconds(),
|
||||
ConcurrentWorkers: ba.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool {
|
||||
return latencies[i] < latencies[j]
|
||||
})
|
||||
result.AvgLatency = calculateAverage(latencies)
|
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
||||
result.Bottom10Avg = calculateAverage(bottom10)
|
||||
}
|
||||
|
||||
ba.mu.Lock()
|
||||
ba.results = append(ba.results, result)
|
||||
ba.mu.Unlock()
|
||||
|
||||
ba.printResult(result)
|
||||
}
|
||||
|
||||
// RunQueryTest runs query performance test
|
||||
func (ba *BenchmarkAdapter) RunQueryTest() {
|
||||
fmt.Println("\n=== Query Performance Test ===")
|
||||
|
||||
// Populate with test data
|
||||
fmt.Println("Populating database for query tests...")
|
||||
events := ba.generateEvents(5000)
|
||||
ctx := context.Background()
|
||||
|
||||
for _, ev := range events {
|
||||
ba.db.SaveEvent(ctx, ev)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
var queryCount int64
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
queryTypes := []func() *filter.F{
|
||||
func() *filter.F {
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
limit := uint(100)
|
||||
f.Limit = &limit
|
||||
return f
|
||||
},
|
||||
func() *filter.F {
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote, kind.Repost)
|
||||
limit := uint(50)
|
||||
f.Limit = &limit
|
||||
return f
|
||||
},
|
||||
func() *filter.F {
|
||||
f := filter.New()
|
||||
limit := uint(10)
|
||||
f.Limit = &limit
|
||||
since := time.Now().Add(-1 * time.Hour).Unix()
|
||||
f.Since = timestamp.FromUnix(since)
|
||||
return f
|
||||
},
|
||||
}
|
||||
|
||||
// Run concurrent queries
|
||||
iterations := 1000
|
||||
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for j := 0; j < iterations/ba.config.ConcurrentWorkers; j++ {
|
||||
f := queryTypes[j%len(queryTypes)]()
|
||||
|
||||
queryStart := time.Now()
|
||||
_, _ = ba.db.QueryEvents(ctx, f)
|
||||
latency := time.Since(queryStart)
|
||||
|
||||
mu.Lock()
|
||||
queryCount++
|
||||
latencies = append(latencies, latency)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
result := &BenchmarkResult{
|
||||
TestName: fmt.Sprintf("Query Performance (%d queries)", queryCount),
|
||||
Duration: duration,
|
||||
TotalEvents: int(queryCount),
|
||||
EventsPerSecond: float64(queryCount) / duration.Seconds(),
|
||||
ConcurrentWorkers: ba.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool {
|
||||
return latencies[i] < latencies[j]
|
||||
})
|
||||
result.AvgLatency = calculateAverage(latencies)
|
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
||||
result.Bottom10Avg = calculateAverage(bottom10)
|
||||
}
|
||||
|
||||
ba.mu.Lock()
|
||||
ba.results = append(ba.results, result)
|
||||
ba.mu.Unlock()
|
||||
|
||||
ba.printResult(result)
|
||||
}
|
||||
|
||||
// RunConcurrentQueryStoreTest runs concurrent query and store test
|
||||
func (ba *BenchmarkAdapter) RunConcurrentQueryStoreTest() {
|
||||
fmt.Println("\n=== Concurrent Query+Store Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var storeCount, queryCount int64
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Half workers write, half query
|
||||
halfWorkers := ba.config.ConcurrentWorkers / 2
|
||||
if halfWorkers < 1 {
|
||||
halfWorkers = 1
|
||||
}
|
||||
|
||||
// Writers
|
||||
for i := 0; i < halfWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
events := ba.generateEvents(ba.config.NumEvents / halfWorkers)
|
||||
for _, ev := range events {
|
||||
eventStart := time.Now()
|
||||
ba.db.SaveEvent(ctx, ev)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
storeCount++
|
||||
latencies = append(latencies, latency)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Readers
|
||||
for i := 0; i < halfWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for j := 0; j < ba.config.NumEvents/halfWorkers; j++ {
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
limit := uint(10)
|
||||
f.Limit = &limit
|
||||
|
||||
queryStart := time.Now()
|
||||
ba.db.QueryEvents(ctx, f)
|
||||
latency := time.Since(queryStart)
|
||||
|
||||
mu.Lock()
|
||||
queryCount++
|
||||
latencies = append(latencies, latency)
|
||||
mu.Unlock()
|
||||
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
result := &BenchmarkResult{
|
||||
TestName: fmt.Sprintf("Concurrent Q+S (Q:%d S:%d)", queryCount, storeCount),
|
||||
Duration: duration,
|
||||
TotalEvents: int(storeCount + queryCount),
|
||||
EventsPerSecond: float64(storeCount+queryCount) / duration.Seconds(),
|
||||
ConcurrentWorkers: ba.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool {
|
||||
return latencies[i] < latencies[j]
|
||||
})
|
||||
result.AvgLatency = calculateAverage(latencies)
|
||||
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
|
||||
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
|
||||
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
|
||||
|
||||
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
|
||||
result.Bottom10Avg = calculateAverage(bottom10)
|
||||
}
|
||||
|
||||
ba.mu.Lock()
|
||||
ba.results = append(ba.results, result)
|
||||
ba.mu.Unlock()
|
||||
|
||||
ba.printResult(result)
|
||||
}
|
||||
|
||||
// generateEvents generates test events with proper signatures
|
||||
func (ba *BenchmarkAdapter) generateEvents(count int) []*event.E {
|
||||
events := make([]*event.E, count)
|
||||
|
||||
// Create a test signer
|
||||
signer := p8k.MustNew()
|
||||
if err := signer.Generate(); err != nil {
|
||||
panic(fmt.Sprintf("failed to generate test key: %v", err))
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.ToU16()
|
||||
ev.CreatedAt = time.Now().Unix()
|
||||
ev.Content = []byte(fmt.Sprintf("Benchmark event #%d - Testing Nostr relay performance with automated load generation", i))
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add some tags for variety
|
||||
if i%10 == 0 {
|
||||
benchmarkTag := tag.NewFromBytesSlice([]byte("t"), []byte("benchmark"))
|
||||
ev.Tags.Append(benchmarkTag)
|
||||
}
|
||||
|
||||
// Sign the event (sets Pubkey, ID, and Sig)
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
panic(fmt.Sprintf("failed to sign event: %v", err))
|
||||
}
|
||||
|
||||
events[i] = ev
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
func (ba *BenchmarkAdapter) printResult(r *BenchmarkResult) {
|
||||
fmt.Printf("\nResults for %s:\n", r.TestName)
|
||||
fmt.Printf(" Duration: %v\n", r.Duration)
|
||||
fmt.Printf(" Total Events: %d\n", r.TotalEvents)
|
||||
fmt.Printf(" Events/sec: %.2f\n", r.EventsPerSecond)
|
||||
fmt.Printf(" Success Rate: %.2f%%\n", r.SuccessRate)
|
||||
fmt.Printf(" Workers: %d\n", r.ConcurrentWorkers)
|
||||
fmt.Printf(" Memory Used: %.2f MB\n", float64(r.MemoryUsed)/1024/1024)
|
||||
|
||||
if r.AvgLatency > 0 {
|
||||
fmt.Printf(" Avg Latency: %v\n", r.AvgLatency)
|
||||
fmt.Printf(" P90 Latency: %v\n", r.P90Latency)
|
||||
fmt.Printf(" P95 Latency: %v\n", r.P95Latency)
|
||||
fmt.Printf(" P99 Latency: %v\n", r.P99Latency)
|
||||
fmt.Printf(" Bottom 10%% Avg: %v\n", r.Bottom10Avg)
|
||||
}
|
||||
|
||||
if len(r.Errors) > 0 {
|
||||
fmt.Printf(" Errors: %d\n", len(r.Errors))
|
||||
// Print first few errors as samples
|
||||
sampleCount := 3
|
||||
if len(r.Errors) < sampleCount {
|
||||
sampleCount = len(r.Errors)
|
||||
}
|
||||
for i := 0; i < sampleCount; i++ {
|
||||
fmt.Printf(" Sample %d: %s\n", i+1, r.Errors[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ba *BenchmarkAdapter) GenerateReport() {
|
||||
// Delegate to main benchmark report generator
|
||||
// We'll add the results to a file
|
||||
fmt.Println("\n=== Benchmark Results Summary ===")
|
||||
ba.mu.RLock()
|
||||
defer ba.mu.RUnlock()
|
||||
|
||||
for _, result := range ba.results {
|
||||
ba.printResult(result)
|
||||
}
|
||||
}
|
||||
|
||||
func (ba *BenchmarkAdapter) GenerateAsciidocReport() {
|
||||
// TODO: Implement asciidoc report generation
|
||||
fmt.Println("Asciidoc report generation not yet implemented for adapter")
|
||||
}
|
||||
|
||||
func calculateAverage(durations []time.Duration) time.Duration {
|
||||
if len(durations) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var total time.Duration
|
||||
for _, d := range durations {
|
||||
total += d
|
||||
}
|
||||
return total / time.Duration(len(durations))
|
||||
}
|
||||
122
cmd/benchmark/dgraph_benchmark.go
Normal file
122
cmd/benchmark/dgraph_benchmark.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
_ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory
|
||||
)
|
||||
|
||||
// DgraphBenchmark wraps a Benchmark with dgraph-specific setup
|
||||
type DgraphBenchmark struct {
|
||||
config *BenchmarkConfig
|
||||
docker *DgraphDocker
|
||||
database database.Database
|
||||
bench *BenchmarkAdapter
|
||||
}
|
||||
|
||||
// NewDgraphBenchmark creates a new dgraph benchmark instance
|
||||
func NewDgraphBenchmark(config *BenchmarkConfig) (*DgraphBenchmark, error) {
|
||||
// Create Docker manager
|
||||
docker := NewDgraphDocker()
|
||||
|
||||
// Start dgraph containers
|
||||
ctx := context.Background()
|
||||
if err := docker.Start(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to start dgraph: %w", err)
|
||||
}
|
||||
|
||||
// Set environment variable for dgraph connection
|
||||
os.Setenv("ORLY_DGRAPH_URL", docker.GetGRPCEndpoint())
|
||||
|
||||
// Create database instance using dgraph backend
|
||||
cancel := func() {}
|
||||
db, err := database.NewDatabase(ctx, cancel, "dgraph", config.DataDir, "warn")
|
||||
if err != nil {
|
||||
docker.Stop()
|
||||
return nil, fmt.Errorf("failed to create dgraph database: %w", err)
|
||||
}
|
||||
|
||||
// Wait for database to be ready
|
||||
fmt.Println("Waiting for dgraph database to be ready...")
|
||||
select {
|
||||
case <-db.Ready():
|
||||
fmt.Println("Dgraph database is ready")
|
||||
case <-time.After(30 * time.Second):
|
||||
db.Close()
|
||||
docker.Stop()
|
||||
return nil, fmt.Errorf("dgraph database failed to become ready")
|
||||
}
|
||||
|
||||
// Create adapter to use Database interface with Benchmark
|
||||
adapter := NewBenchmarkAdapter(config, db)
|
||||
|
||||
dgraphBench := &DgraphBenchmark{
|
||||
config: config,
|
||||
docker: docker,
|
||||
database: db,
|
||||
bench: adapter,
|
||||
}
|
||||
|
||||
return dgraphBench, nil
|
||||
}
|
||||
|
||||
// Close closes the dgraph benchmark and stops Docker containers
|
||||
func (dgb *DgraphBenchmark) Close() {
|
||||
fmt.Println("Closing dgraph benchmark...")
|
||||
|
||||
if dgb.database != nil {
|
||||
dgb.database.Close()
|
||||
}
|
||||
|
||||
if dgb.docker != nil {
|
||||
if err := dgb.docker.Stop(); err != nil {
|
||||
log.Printf("Error stopping dgraph Docker: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunSuite runs the benchmark suite on dgraph
|
||||
func (dgb *DgraphBenchmark) RunSuite() {
|
||||
fmt.Println("\n╔════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ DGRAPH BACKEND BENCHMARK SUITE ║")
|
||||
fmt.Println("╚════════════════════════════════════════════════════════╝")
|
||||
|
||||
// Run only one round for dgraph to keep benchmark time reasonable
|
||||
fmt.Printf("\n=== Starting dgraph benchmark ===\n")
|
||||
|
||||
fmt.Printf("RunPeakThroughputTest (dgraph)..\n")
|
||||
dgb.bench.RunPeakThroughputTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunBurstPatternTest (dgraph)..\n")
|
||||
dgb.bench.RunBurstPatternTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunMixedReadWriteTest (dgraph)..\n")
|
||||
dgb.bench.RunMixedReadWriteTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunQueryTest (dgraph)..\n")
|
||||
dgb.bench.RunQueryTest()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunConcurrentQueryStoreTest (dgraph)..\n")
|
||||
dgb.bench.RunConcurrentQueryStoreTest()
|
||||
|
||||
fmt.Printf("\n=== Dgraph benchmark completed ===\n\n")
|
||||
}
|
||||
|
||||
// GenerateReport generates the benchmark report
|
||||
func (dgb *DgraphBenchmark) GenerateReport() {
|
||||
dgb.bench.GenerateReport()
|
||||
}
|
||||
|
||||
// GenerateAsciidocReport generates asciidoc format report
|
||||
func (dgb *DgraphBenchmark) GenerateAsciidocReport() {
|
||||
dgb.bench.GenerateAsciidocReport()
|
||||
}
|
||||
160
cmd/benchmark/dgraph_docker.go
Normal file
160
cmd/benchmark/dgraph_docker.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DgraphDocker manages a dgraph instance via Docker Compose
|
||||
type DgraphDocker struct {
|
||||
composeFile string
|
||||
projectName string
|
||||
running bool
|
||||
}
|
||||
|
||||
// NewDgraphDocker creates a new dgraph Docker manager
|
||||
func NewDgraphDocker() *DgraphDocker {
|
||||
// Try to find the docker-compose file in the current directory first
|
||||
composeFile := "docker-compose-dgraph.yml"
|
||||
|
||||
// If not found, try the cmd/benchmark directory (for running from project root)
|
||||
if _, err := os.Stat(composeFile); os.IsNotExist(err) {
|
||||
composeFile = filepath.Join("cmd", "benchmark", "docker-compose-dgraph.yml")
|
||||
}
|
||||
|
||||
return &DgraphDocker{
|
||||
composeFile: composeFile,
|
||||
projectName: "orly-benchmark-dgraph",
|
||||
running: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the dgraph Docker containers
|
||||
func (d *DgraphDocker) Start(ctx context.Context) error {
|
||||
fmt.Println("Starting dgraph Docker containers...")
|
||||
|
||||
// Stop any existing containers first
|
||||
d.Stop()
|
||||
|
||||
// Start containers
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"up", "-d",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to start dgraph containers: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for dgraph to be healthy...")
|
||||
|
||||
// Wait for health checks to pass
|
||||
if err := d.waitForHealthy(ctx, 60*time.Second); err != nil {
|
||||
d.Stop() // Clean up on failure
|
||||
return err
|
||||
}
|
||||
|
||||
d.running = true
|
||||
fmt.Println("Dgraph is ready!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForHealthy waits for dgraph to become healthy
|
||||
func (d *DgraphDocker) waitForHealthy(ctx context.Context, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
// Check if alpha is healthy by checking docker health status
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
"docker",
|
||||
"inspect",
|
||||
"--format={{.State.Health.Status}}",
|
||||
"orly-benchmark-dgraph-alpha",
|
||||
)
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err == nil && string(output) == "healthy\n" {
|
||||
// Additional short wait to ensure full readiness
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(2 * time.Second):
|
||||
// Continue waiting
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("dgraph failed to become healthy within %v", timeout)
|
||||
}
|
||||
|
||||
// Stop stops and removes the dgraph Docker containers
|
||||
func (d *DgraphDocker) Stop() error {
|
||||
if !d.running {
|
||||
// Try to stop anyway in case of untracked state
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"down", "-v",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
_ = cmd.Run() // Ignore errors
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Println("Stopping dgraph Docker containers...")
|
||||
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"down", "-v",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to stop dgraph containers: %w", err)
|
||||
}
|
||||
|
||||
d.running = false
|
||||
fmt.Println("Dgraph containers stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGRPCEndpoint returns the dgraph gRPC endpoint
|
||||
func (d *DgraphDocker) GetGRPCEndpoint() string {
|
||||
return "localhost:9080"
|
||||
}
|
||||
|
||||
// IsRunning returns whether dgraph is running
|
||||
func (d *DgraphDocker) IsRunning() bool {
|
||||
return d.running
|
||||
}
|
||||
|
||||
// Logs returns the logs from dgraph containers
|
||||
func (d *DgraphDocker) Logs() error {
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"logs",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
44
cmd/benchmark/docker-compose-dgraph.yml
Normal file
44
cmd/benchmark/docker-compose-dgraph.yml
Normal file
@@ -0,0 +1,44 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
dgraph-zero:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: orly-benchmark-dgraph-zero
|
||||
working_dir: /data/zero
|
||||
ports:
|
||||
- "5080:5080"
|
||||
- "6080:6080"
|
||||
command: dgraph zero --my=dgraph-zero:5080
|
||||
networks:
|
||||
- orly-benchmark
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
|
||||
dgraph-alpha:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: orly-benchmark-dgraph-alpha
|
||||
working_dir: /data/alpha
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "9080:9080"
|
||||
command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0
|
||||
networks:
|
||||
- orly-benchmark
|
||||
depends_on:
|
||||
dgraph-zero:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 6
|
||||
start_period: 10s
|
||||
|
||||
networks:
|
||||
orly-benchmark:
|
||||
name: orly-benchmark-network
|
||||
driver: bridge
|
||||
@@ -1,19 +1,20 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# Next.orly.dev relay (this repository)
|
||||
next-orly:
|
||||
# Next.orly.dev relay with Badger (this repository)
|
||||
next-orly-badger:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly
|
||||
container_name: benchmark-next-orly-badger
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=8080
|
||||
- ORLY_LOG_LEVEL=off
|
||||
- ORLY_DB_TYPE=badger
|
||||
volumes:
|
||||
- ./data/next-orly:/data
|
||||
- ./data/next-orly-badger:/data
|
||||
ports:
|
||||
- "8001:8080"
|
||||
networks:
|
||||
@@ -25,6 +26,78 @@ services:
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Next.orly.dev relay with DGraph (this repository)
|
||||
next-orly-dgraph:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly-dgraph
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=8080
|
||||
- ORLY_LOG_LEVEL=off
|
||||
- ORLY_DB_TYPE=dgraph
|
||||
- ORLY_DGRAPH_URL=dgraph-alpha:9080
|
||||
volumes:
|
||||
- ./data/next-orly-dgraph:/data
|
||||
ports:
|
||||
- "8007:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
depends_on:
|
||||
dgraph-alpha:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
|
||||
# DGraph Zero - cluster coordinator
|
||||
dgraph-zero:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: benchmark-dgraph-zero
|
||||
working_dir: /data/zero
|
||||
ports:
|
||||
- "5080:5080"
|
||||
- "6080:6080"
|
||||
volumes:
|
||||
- ./data/dgraph-zero:/data
|
||||
command: dgraph zero --my=dgraph-zero:5080
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
|
||||
# DGraph Alpha - data node
|
||||
dgraph-alpha:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: benchmark-dgraph-alpha
|
||||
working_dir: /data/alpha
|
||||
ports:
|
||||
- "8088:8080"
|
||||
- "9080:9080"
|
||||
volumes:
|
||||
- ./data/dgraph-alpha:/data
|
||||
command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0
|
||||
networks:
|
||||
- benchmark-net
|
||||
depends_on:
|
||||
dgraph-zero:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 6
|
||||
start_period: 10s
|
||||
|
||||
# Khatru with SQLite
|
||||
khatru-sqlite:
|
||||
build:
|
||||
@@ -145,7 +218,9 @@ services:
|
||||
dockerfile: cmd/benchmark/Dockerfile.benchmark
|
||||
container_name: benchmark-runner
|
||||
depends_on:
|
||||
next-orly:
|
||||
next-orly-badger:
|
||||
condition: service_healthy
|
||||
next-orly-dgraph:
|
||||
condition: service_healthy
|
||||
khatru-sqlite:
|
||||
condition: service_healthy
|
||||
@@ -158,7 +233,7 @@ services:
|
||||
nostr-rs-relay:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- BENCHMARK_TARGETS=next-orly:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080
|
||||
- BENCHMARK_TARGETS=next-orly-badger:8080,next-orly-dgraph:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080
|
||||
- BENCHMARK_EVENTS=50000
|
||||
- BENCHMARK_WORKERS=24
|
||||
- BENCHMARK_DURATION=60s
|
||||
|
||||
@@ -36,6 +36,9 @@ type BenchmarkConfig struct {
|
||||
RelayURL string
|
||||
NetWorkers int
|
||||
NetRate int // events/sec per worker
|
||||
|
||||
// Backend selection
|
||||
UseDgraph bool
|
||||
}
|
||||
|
||||
type BenchmarkResult struct {
|
||||
@@ -71,7 +74,14 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Starting Nostr Relay Benchmark\n")
|
||||
if config.UseDgraph {
|
||||
// Run dgraph benchmark
|
||||
runDgraphBenchmark(config)
|
||||
return
|
||||
}
|
||||
|
||||
// Run standard Badger benchmark
|
||||
fmt.Printf("Starting Nostr Relay Benchmark (Badger Backend)\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
fmt.Printf(
|
||||
"Events: %d, Workers: %d, Duration: %v\n",
|
||||
@@ -89,6 +99,28 @@ func main() {
|
||||
benchmark.GenerateAsciidocReport()
|
||||
}
|
||||
|
||||
func runDgraphBenchmark(config *BenchmarkConfig) {
|
||||
fmt.Printf("Starting Nostr Relay Benchmark (Dgraph Backend)\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
fmt.Printf(
|
||||
"Events: %d, Workers: %d\n",
|
||||
config.NumEvents, config.ConcurrentWorkers,
|
||||
)
|
||||
|
||||
dgraphBench, err := NewDgraphBenchmark(config)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create dgraph benchmark: %v", err)
|
||||
}
|
||||
defer dgraphBench.Close()
|
||||
|
||||
// Run dgraph benchmark suite
|
||||
dgraphBench.RunSuite()
|
||||
|
||||
// Generate reports
|
||||
dgraphBench.GenerateReport()
|
||||
dgraphBench.GenerateAsciidocReport()
|
||||
}
|
||||
|
||||
func parseFlags() *BenchmarkConfig {
|
||||
config := &BenchmarkConfig{}
|
||||
|
||||
@@ -124,6 +156,12 @@ func parseFlags() *BenchmarkConfig {
|
||||
)
|
||||
flag.IntVar(&config.NetRate, "net-rate", 20, "Events per second per worker")
|
||||
|
||||
// Backend selection
|
||||
flag.BoolVar(
|
||||
&config.UseDgraph, "dgraph", false,
|
||||
"Use dgraph backend (requires Docker)",
|
||||
)
|
||||
|
||||
flag.Parse()
|
||||
return config
|
||||
}
|
||||
@@ -286,7 +324,7 @@ func NewBenchmark(config *BenchmarkConfig) *Benchmark {
|
||||
ctx := context.Background()
|
||||
cancel := func() {}
|
||||
|
||||
db, err := database.New(ctx, cancel, config.DataDir, "info")
|
||||
db, err := database.New(ctx, cancel, config.DataDir, "warn")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
@@ -974,7 +1012,7 @@ func (b *Benchmark) generateEvents(count int) []*event.E {
|
||||
log.Fatalf("Failed to generate keys for benchmark events: %v", err)
|
||||
}
|
||||
|
||||
// Define size distribution - from minimal to 500MB
|
||||
// Define size distribution - from minimal to 500KB
|
||||
// We'll create a logarithmic distribution to test various sizes
|
||||
sizeBuckets := []int{
|
||||
0, // Minimal: empty content, no tags
|
||||
@@ -984,13 +1022,8 @@ func (b *Benchmark) generateEvents(count int) []*event.E {
|
||||
10 * 1024, // 10 KB
|
||||
50 * 1024, // 50 KB
|
||||
100 * 1024, // 100 KB
|
||||
500 * 1024, // 500 KB
|
||||
1024 * 1024, // 1 MB
|
||||
5 * 1024 * 1024, // 5 MB
|
||||
10 * 1024 * 1024, // 10 MB
|
||||
50 * 1024 * 1024, // 50 MB
|
||||
100 * 1024 * 1024, // 100 MB
|
||||
500000000, // 500 MB (500,000,000 bytes)
|
||||
250 * 1024, // 250 KB
|
||||
500 * 1024, // 500 KB (max realistic size for Nostr)
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
|
||||
134
cmd/benchmark/reports/run_20251117_154730/next-orly_results.txt
Normal file
134
cmd/benchmark/reports/run_20251117_154730/next-orly_results.txt
Normal file
@@ -0,0 +1,134 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1763394450181444ℹ️ /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
|
||||
1763394450184981ℹ️ /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
|
||||
1763394450185044ℹ️ /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
|
||||
1763394450185315ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66
|
||||
1763394450185349ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73
|
||||
1763394450185369ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80
|
||||
1763394450185374ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
|
||||
1763394450185381ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
|
||||
1763394450185396ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87
|
||||
1763394450185400ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
|
||||
1763394450185410ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
|
||||
1763394450185415ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/11/17 15:47:30 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
|
||||
1763394452185466ℹ️ /tmp/benchmark_next-orly_8: database warmup complete, ready to serve requests
|
||||
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 4.816237891s
|
||||
Events/sec: 10381.55
|
||||
Avg latency: 1.655686ms
|
||||
P90 latency: 2.061483ms
|
||||
P95 latency: 2.348178ms
|
||||
P99 latency: 3.856522ms
|
||||
Bottom 10% Avg latency: 2.985064ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 308.793395ms
|
||||
Burst completed: 5000 events in 320.69366ms
|
||||
Burst completed: 5000 events in 324.127721ms
|
||||
Burst completed: 5000 events in 342.594802ms
|
||||
Burst completed: 5000 events in 302.350819ms
|
||||
Burst completed: 5000 events in 309.16143ms
|
||||
Burst completed: 5000 events in 306.739193ms
|
||||
Burst completed: 5000 events in 329.275972ms
|
||||
Burst completed: 5000 events in 329.234395ms
|
||||
Burst completed: 5000 events in 348.105403ms
|
||||
Burst test completed: 50000 events in 9.543815189s
|
||||
Events/sec: 5238.99
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.491349518s
|
||||
Combined ops/sec: 2041.54
|
||||
1763394510174043ℹ️ /tmp/benchmark_next-orly_8: Block cache metrics: hit: 248593 miss: 322620 keys-added: 236208 keys-updated: 73483 keys-evicted: 236188 cost-added: 12658387393408 cost-evicted: 12657366958988 sets-dropped: 0 sets-rejected: 12869 gets-dropped: 64 gets-kept: 570624 gets-total: 571213 hit-ratio: 0.44
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 258436 queries in 1m0.014042961s
|
||||
Queries/sec: 4306.26
|
||||
Avg query latency: 4.008354ms
|
||||
P95 query latency: 12.985167ms
|
||||
P99 query latency: 23.424372ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 252445 operations (202445 queries, 50000 writes) in 1m0.005913119s
|
||||
Operations/sec: 4207.00
|
||||
Avg latency: 2.121776ms
|
||||
Avg query latency: 2.374689ms
|
||||
Avg write latency: 1.097756ms
|
||||
P95 latency: 3.545393ms
|
||||
P99 latency: 4.795537ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 5.086723437s
|
||||
Events/sec: 9829.51
|
||||
Avg latency: 1.777699ms
|
||||
P90 latency: 2.219786ms
|
||||
P95 latency: 2.443201ms
|
||||
P99 latency: 3.504646ms
|
||||
Bottom 10% Avg latency: 3.103013ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 324.341799ms
|
||||
Burst completed: 5000 events in 319.047042ms
|
||||
Burst completed: 5000 events in 324.104589ms
|
||||
Burst completed: 5000 events in 342.464953ms
|
||||
Burst completed: 5000 events in 342.679451ms
|
||||
Burst completed: 5000 events in 359.150337ms
|
||||
Burst completed: 5000 events in 367.952516ms
|
||||
Burst completed: 5000 events in 338.4073ms
|
||||
Burst completed: 5000 events in 326.796197ms
|
||||
Burst completed: 5000 events in 357.71787ms
|
||||
Burst test completed: 50000 events in 9.769325434s
|
||||
Events/sec: 5118.06
|
||||
1763394684274617ℹ️ /tmp/benchmark_next-orly_8: [4] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 2.954s
|
||||
, deleted 1904950 bytes
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.464062793s
|
||||
Combined ops/sec: 2043.81
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 293040 queries in 1m0.010621036s
|
||||
Queries/sec: 4883.14
|
||||
Avg query latency: 3.419764ms
|
||||
P95 query latency: 11.042876ms
|
||||
P99 query latency: 19.984912ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
1763394810173629ℹ️ /tmp/benchmark_next-orly_8: Block cache metrics: hit: 517421289 miss: 4606293 keys-added: 1664534 keys-updated: 2530425 keys-evicted: 1664512 cost-added: 85045328540032 cost-evicted: 85044318079141 sets-dropped: 0 sets-rejected: 349798 gets-dropped: 404194112 gets-kept: 117717888 gets-total: 522027608 hit-ratio: 0.99
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
|
||||
@@ -0,0 +1,53 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1763397432159815ℹ️ /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
|
||||
1763397432162963ℹ️ /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
|
||||
1763397432163005ℹ️ /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
|
||||
1763397432163282ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66
|
||||
1763397432163367ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73
|
||||
1763397432163401ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80
|
||||
1763397432163409ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
|
||||
1763397432163473ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
|
||||
1763397432163564ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87
|
||||
1763397432163574ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
|
||||
1763397432163594ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
|
||||
1763397432163600ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/11/17 16:37:12 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
|
||||
1763397434164165ℹ️ /tmp/benchmark_khatru-badger_8: database warmup complete, ready to serve requests
|
||||
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 4.924203666s
|
||||
Events/sec: 10153.93
|
||||
Avg latency: 1.696974ms
|
||||
P90 latency: 2.11483ms
|
||||
P95 latency: 2.344067ms
|
||||
P99 latency: 3.241477ms
|
||||
Bottom 10% Avg latency: 2.7865ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 312.680497ms
|
||||
Burst completed: 5000 events in 320.868898ms
|
||||
Burst completed: 5000 events in 317.096109ms
|
||||
Burst completed: 5000 events in 356.971689ms
|
||||
Burst completed: 5000 events in 301.615682ms
|
||||
Burst completed: 5000 events in 306.525096ms
|
||||
Burst completed: 5000 events in 320.037813ms
|
||||
Burst completed: 5000 events in 318.017102ms
|
||||
Burst completed: 5000 events in 320.394281ms
|
||||
Burst completed: 5000 events in 333.619741ms
|
||||
Burst test completed: 50000 events in 9.552105607s
|
||||
Events/sec: 5234.45
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
@@ -0,0 +1,323 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1763397017138391ℹ️ /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
|
||||
1763397017141550ℹ️ /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
|
||||
1763397017141593ℹ️ /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
|
||||
1763397017141951ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66
|
||||
1763397017142013ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73
|
||||
1763397017142036ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80
|
||||
1763397017142042ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
|
||||
1763397017142055ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
|
||||
1763397017142080ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87
|
||||
1763397017142086ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
|
||||
1763397017142103ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
|
||||
1763397017142109ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/11/17 16:30:17 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
|
||||
1763397019142156ℹ️ /tmp/benchmark_khatru-sqlite_8: database warmup complete, ready to serve requests
|
||||
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 4.697220167s
|
||||
Events/sec: 10644.59
|
||||
Avg latency: 1.589521ms
|
||||
P90 latency: 1.927686ms
|
||||
P95 latency: 2.072081ms
|
||||
P99 latency: 2.794007ms
|
||||
Bottom 10% Avg latency: 2.449508ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 331.053594ms
|
||||
Burst completed: 5000 events in 339.97436ms
|
||||
Burst completed: 5000 events in 352.328844ms
|
||||
Burst completed: 5000 events in 376.613834ms
|
||||
Burst completed: 5000 events in 321.307729ms
|
||||
Burst completed: 5000 events in 314.265411ms
|
||||
Burst completed: 5000 events in 321.656622ms
|
||||
Burst completed: 5000 events in 325.689539ms
|
||||
Burst completed: 5000 events in 367.767832ms
|
||||
Burst completed: 5000 events in 367.275402ms
|
||||
Burst test completed: 50000 events in 9.780316233s
|
||||
Events/sec: 5112.31
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.45356557s
|
||||
Combined ops/sec: 2044.69
|
||||
1763397077132611⚠️ /tmp/benchmark_khatru-sqlite_8: Block cache might be too small. Metrics: hit: 164850 miss: 294509 keys-added: 226622 keys-updated: 54881 keys-evicted: 226603 cost-added: 12429978548485 cost-evicted: 12428976154843 sets-dropped: 0 sets-rejected: 12954 gets-dropped: 192 gets-kept: 458368 gets-total: 459359 hit-ratio: 0.36
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:450 /build/pkg/database/logger.go:46
|
||||
1763397077132680⚠️ /tmp/benchmark_khatru-sqlite_8: Cache life expectancy (in seconds):
|
||||
-- Histogram:
|
||||
Min value: 0
|
||||
Max value: 11
|
||||
Count: 226603
|
||||
50p: 2.00
|
||||
75p: 2.00
|
||||
90p: 2.00
|
||||
[0, 2) 226567 99.98% 99.98%
|
||||
[8, 16) 36 0.02% 100.00%
|
||||
--
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:451 /build/pkg/database/logger.go:46
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 253442 queries in 1m0.011742602s
|
||||
Queries/sec: 4223.21
|
||||
Avg query latency: 4.105842ms
|
||||
P95 query latency: 13.288591ms
|
||||
P99 query latency: 23.937862ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 237910 operations (187910 queries, 50000 writes) in 1m0.007412985s
|
||||
Operations/sec: 3964.68
|
||||
Avg latency: 2.360698ms
|
||||
Avg query latency: 2.630397ms
|
||||
Avg write latency: 1.347113ms
|
||||
P95 latency: 4.390739ms
|
||||
P99 latency: 6.940329ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 4.792392684s
|
||||
Events/sec: 10433.20
|
||||
Avg latency: 1.649743ms
|
||||
P90 latency: 1.991666ms
|
||||
P95 latency: 2.145348ms
|
||||
P99 latency: 2.77034ms
|
||||
Bottom 10% Avg latency: 2.781523ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 330.357755ms
|
||||
Burst completed: 5000 events in 334.984623ms
|
||||
Burst completed: 5000 events in 345.478382ms
|
||||
Burst completed: 5000 events in 340.589233ms
|
||||
Burst completed: 5000 events in 348.792025ms
|
||||
Burst completed: 5000 events in 354.019658ms
|
||||
Burst completed: 5000 events in 356.823662ms
|
||||
Burst completed: 5000 events in 347.496865ms
|
||||
Burst completed: 5000 events in 342.618798ms
|
||||
Burst completed: 5000 events in 337.759666ms
|
||||
Burst test completed: 50000 events in 9.775603327s
|
||||
Events/sec: 5114.77
|
||||
1763397250998218ℹ️ /tmp/benchmark_khatru-sqlite_8: [6] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 2.922s
|
||||
, deleted 1932516 bytes
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.35620806s
|
||||
Combined ops/sec: 2052.86
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 334922 queries in 1m0.011826287s
|
||||
Queries/sec: 5580.93
|
||||
Avg query latency: 2.871941ms
|
||||
P95 query latency: 8.86787ms
|
||||
P99 query latency: 16.075646ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
1763397377131811ℹ️ /tmp/benchmark_khatru-sqlite_8: Block cache metrics: hit: 485497199 miss: 4802603 keys-added: 1628313 keys-updated: 2776240 keys-evicted: 1628292 cost-added: 85662348259200 cost-evicted: 85661362474446 sets-dropped: 0 sets-rejected: 336231 gets-dropped: 382997632 gets-kept: 107185536 gets-total: 490299843 hit-ratio: 0.99
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
|
||||
Concurrent test completed: 266462 operations (216462 queries, 50000 writes) in 1m0.004503525s
|
||||
Operations/sec: 4440.70
|
||||
Avg latency: 1.968296ms
|
||||
Avg query latency: 2.154689ms
|
||||
Avg write latency: 1.161355ms
|
||||
P95 latency: 3.329033ms
|
||||
P99 latency: 4.878236ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.697220167s
|
||||
Total Events: 50000
|
||||
Events/sec: 10644.59
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 432 MB
|
||||
Avg Latency: 1.589521ms
|
||||
P90 Latency: 1.927686ms
|
||||
P95 Latency: 2.072081ms
|
||||
P99 Latency: 2.794007ms
|
||||
Bottom 10% Avg Latency: 2.449508ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.780316233s
|
||||
Total Events: 50000
|
||||
Events/sec: 5112.31
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 147 MB
|
||||
Avg Latency: 3.589724ms
|
||||
P90 Latency: 7.397294ms
|
||||
P95 Latency: 9.015658ms
|
||||
P99 Latency: 12.848707ms
|
||||
Bottom 10% Avg Latency: 10.286462ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.45356557s
|
||||
Total Events: 50000
|
||||
Events/sec: 2044.69
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 189 MB
|
||||
Avg Latency: 439.984µs
|
||||
P90 Latency: 878.495µs
|
||||
P95 Latency: 980.94µs
|
||||
P99 Latency: 1.17514ms
|
||||
Bottom 10% Avg Latency: 1.261937ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.011742602s
|
||||
Total Events: 253442
|
||||
Events/sec: 4223.21
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 165 MB
|
||||
Avg Latency: 4.105842ms
|
||||
P90 Latency: 8.468483ms
|
||||
P95 Latency: 13.288591ms
|
||||
P99 Latency: 23.937862ms
|
||||
Bottom 10% Avg Latency: 15.251447ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.007412985s
|
||||
Total Events: 237910
|
||||
Events/sec: 3964.68
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 149 MB
|
||||
Avg Latency: 2.360698ms
|
||||
P90 Latency: 3.517024ms
|
||||
P95 Latency: 4.390739ms
|
||||
P99 Latency: 6.940329ms
|
||||
Bottom 10% Avg Latency: 5.015416ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.792392684s
|
||||
Total Events: 50000
|
||||
Events/sec: 10433.20
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 523 MB
|
||||
Avg Latency: 1.649743ms
|
||||
P90 Latency: 1.991666ms
|
||||
P95 Latency: 2.145348ms
|
||||
P99 Latency: 2.77034ms
|
||||
Bottom 10% Avg Latency: 2.781523ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.775603327s
|
||||
Total Events: 50000
|
||||
Events/sec: 5114.77
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 541 MB
|
||||
Avg Latency: 2.925486ms
|
||||
P90 Latency: 5.542703ms
|
||||
P95 Latency: 7.775478ms
|
||||
P99 Latency: 11.125804ms
|
||||
Bottom 10% Avg Latency: 8.91184ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.35620806s
|
||||
Total Events: 50000
|
||||
Events/sec: 2052.86
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 200 MB
|
||||
Avg Latency: 424.333µs
|
||||
P90 Latency: 865.429µs
|
||||
P95 Latency: 968.085µs
|
||||
P99 Latency: 1.174568ms
|
||||
Bottom 10% Avg Latency: 1.224002ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.011826287s
|
||||
Total Events: 334922
|
||||
Events/sec: 5580.93
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 129 MB
|
||||
Avg Latency: 2.871941ms
|
||||
P90 Latency: 5.60422ms
|
||||
P95 Latency: 8.86787ms
|
||||
P99 Latency: 16.075646ms
|
||||
Bottom 10% Avg Latency: 10.23636ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.004503525s
|
||||
Total Events: 266462
|
||||
Events/sec: 4440.70
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 155 MB
|
||||
Avg Latency: 1.968296ms
|
||||
P90 Latency: 2.729181ms
|
||||
P95 Latency: 3.329033ms
|
||||
P99 Latency: 4.878236ms
|
||||
Bottom 10% Avg Latency: 3.768185ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
|
||||
1763397425682348ℹ️ /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56
|
||||
1763397426982581ℹ️ /tmp/benchmark_khatru-sqlite_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-11-17T16:37:07+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,311 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1763396182850462ℹ️ /tmp/benchmark_next-orly-badger_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
|
||||
1763396182853668ℹ️ /tmp/benchmark_next-orly-badger_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
|
||||
1763396182853712ℹ️ /tmp/benchmark_next-orly-badger_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
|
||||
1763396182854009ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66
|
||||
1763396182854056ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73
|
||||
1763396182854078ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80
|
||||
1763396182854082ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
|
||||
1763396182854129ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
|
||||
1763396182854260ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87
|
||||
1763396182854271ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
|
||||
1763396182854295ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
|
||||
1763396182854302ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/11/17 16:16:22 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
|
||||
1763396184854370ℹ️ /tmp/benchmark_next-orly-badger_8: database warmup complete, ready to serve requests
|
||||
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 5.666497805s
|
||||
Events/sec: 8823.79
|
||||
Avg latency: 2.020722ms
|
||||
P90 latency: 2.645436ms
|
||||
P95 latency: 2.995948ms
|
||||
P99 latency: 4.460502ms
|
||||
Bottom 10% Avg latency: 3.520179ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 352.025605ms
|
||||
Burst completed: 5000 events in 363.623929ms
|
||||
Burst completed: 5000 events in 367.475139ms
|
||||
Burst completed: 5000 events in 396.276199ms
|
||||
Burst completed: 5000 events in 334.007635ms
|
||||
Burst completed: 5000 events in 342.086817ms
|
||||
Burst completed: 5000 events in 360.687805ms
|
||||
Burst completed: 5000 events in 392.627451ms
|
||||
Burst completed: 5000 events in 397.635203ms
|
||||
Burst completed: 5000 events in 376.061572ms
|
||||
Burst test completed: 50000 events in 10.132858185s
|
||||
Events/sec: 4934.44
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
1763396242843490ℹ️ /tmp/benchmark_next-orly-badger_8: Block cache metrics: hit: 232171 miss: 337826 keys-added: 235144 keys-updated: 89642 keys-evicted: 235124 cost-added: 12615246695866 cost-evicted: 12614243474391 sets-dropped: 0 sets-rejected: 12961 gets-dropped: 1280 gets-kept: 568192 gets-total: 569997 hit-ratio: 0.41
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.625333257s
|
||||
Combined ops/sec: 2030.43
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 197562 queries in 1m0.011972513s
|
||||
Queries/sec: 3292.04
|
||||
Avg query latency: 5.52205ms
|
||||
P95 query latency: 18.40165ms
|
||||
P99 query latency: 32.139723ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 224870 operations (174870 queries, 50000 writes) in 1m0.006047854s
|
||||
Operations/sec: 3747.46
|
||||
Avg latency: 2.665369ms
|
||||
Avg query latency: 2.866192ms
|
||||
Avg write latency: 1.963009ms
|
||||
P95 latency: 5.204253ms
|
||||
P99 latency: 8.129537ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 5.145620568s
|
||||
Events/sec: 9717.00
|
||||
Avg latency: 1.788996ms
|
||||
P90 latency: 2.241725ms
|
||||
P95 latency: 2.442669ms
|
||||
P99 latency: 3.110506ms
|
||||
Bottom 10% Avg latency: 3.016821ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 362.292309ms
|
||||
Burst completed: 5000 events in 446.105376ms
|
||||
Burst completed: 5000 events in 414.443306ms
|
||||
Burst completed: 5000 events in 378.792051ms
|
||||
Burst completed: 5000 events in 381.274883ms
|
||||
Burst completed: 5000 events in 397.941224ms
|
||||
Burst completed: 5000 events in 449.109795ms
|
||||
Burst completed: 5000 events in 410.566974ms
|
||||
Burst completed: 5000 events in 385.220958ms
|
||||
Burst completed: 5000 events in 383.149443ms
|
||||
1763396419122547ℹ️ /tmp/benchmark_next-orly-badger_8: [0] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 3.061s
|
||||
, deleted 1899050 bytes
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56
|
||||
Burst test completed: 50000 events in 10.438224172s
|
||||
Events/sec: 4790.09
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.485622359s
|
||||
Combined ops/sec: 2042.01
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 293294 queries in 1m0.013023948s
|
||||
Queries/sec: 4887.17
|
||||
Avg query latency: 3.408294ms
|
||||
P95 query latency: 10.965419ms
|
||||
P99 query latency: 19.184675ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
1763396542843038ℹ️ /tmp/benchmark_next-orly-badger_8: Block cache metrics: hit: 411640922 miss: 5406705 keys-added: 1627143 keys-updated: 3422501 keys-evicted: 1627125 cost-added: 84304242021549 cost-evicted: 84303233712402 sets-dropped: 0 sets-rejected: 295382 gets-dropped: 325582080 gets-kept: 91360192 gets-total: 417047650 hit-ratio: 0.99
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
|
||||
Concurrent test completed: 254899 operations (204899 queries, 50000 writes) in 1m0.006656731s
|
||||
Operations/sec: 4247.85
|
||||
Avg latency: 2.125728ms
|
||||
Avg query latency: 2.314927ms
|
||||
Avg write latency: 1.350394ms
|
||||
P95 latency: 3.778776ms
|
||||
P99 latency: 5.393909ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 5.666497805s
|
||||
Total Events: 50000
|
||||
Events/sec: 8823.79
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 257 MB
|
||||
Avg Latency: 2.020722ms
|
||||
P90 Latency: 2.645436ms
|
||||
P95 Latency: 2.995948ms
|
||||
P99 Latency: 4.460502ms
|
||||
Bottom 10% Avg Latency: 3.520179ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 10.132858185s
|
||||
Total Events: 50000
|
||||
Events/sec: 4934.44
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 122 MB
|
||||
Avg Latency: 7.197024ms
|
||||
P90 Latency: 12.546513ms
|
||||
P95 Latency: 15.216454ms
|
||||
P99 Latency: 23.682573ms
|
||||
Bottom 10% Avg Latency: 18.172083ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.625333257s
|
||||
Total Events: 50000
|
||||
Events/sec: 2030.43
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 120 MB
|
||||
Avg Latency: 467.389µs
|
||||
P90 Latency: 914.891µs
|
||||
P95 Latency: 1.0349ms
|
||||
P99 Latency: 1.268268ms
|
||||
Bottom 10% Avg Latency: 1.393626ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.011972513s
|
||||
Total Events: 197562
|
||||
Events/sec: 3292.04
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 152 MB
|
||||
Avg Latency: 5.52205ms
|
||||
P90 Latency: 12.226879ms
|
||||
P95 Latency: 18.40165ms
|
||||
P99 Latency: 32.139723ms
|
||||
Bottom 10% Avg Latency: 20.985445ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.006047854s
|
||||
Total Events: 224870
|
||||
Events/sec: 3747.46
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 127 MB
|
||||
Avg Latency: 2.665369ms
|
||||
P90 Latency: 4.194993ms
|
||||
P95 Latency: 5.204253ms
|
||||
P99 Latency: 8.129537ms
|
||||
Bottom 10% Avg Latency: 5.884586ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 5.145620568s
|
||||
Total Events: 50000
|
||||
Events/sec: 9717.00
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 522 MB
|
||||
Avg Latency: 1.788996ms
|
||||
P90 Latency: 2.241725ms
|
||||
P95 Latency: 2.442669ms
|
||||
P99 Latency: 3.110506ms
|
||||
Bottom 10% Avg Latency: 3.016821ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 10.438224172s
|
||||
Total Events: 50000
|
||||
Events/sec: 4790.09
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 623 MB
|
||||
Avg Latency: 9.406859ms
|
||||
P90 Latency: 21.810715ms
|
||||
P95 Latency: 35.119382ms
|
||||
P99 Latency: 66.001509ms
|
||||
Bottom 10% Avg Latency: 39.782175ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.485622359s
|
||||
Total Events: 50000
|
||||
Events/sec: 2042.01
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 165 MB
|
||||
Avg Latency: 445.318µs
|
||||
P90 Latency: 907.915µs
|
||||
P95 Latency: 1.021172ms
|
||||
P99 Latency: 1.227095ms
|
||||
Bottom 10% Avg Latency: 1.265835ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.013023948s
|
||||
Total Events: 293294
|
||||
Events/sec: 4887.17
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 136 MB
|
||||
Avg Latency: 3.408294ms
|
||||
P90 Latency: 7.156129ms
|
||||
P95 Latency: 10.965419ms
|
||||
P99 Latency: 19.184675ms
|
||||
Bottom 10% Avg Latency: 12.469832ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.006656731s
|
||||
Total Events: 254899
|
||||
Events/sec: 4247.85
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 147 MB
|
||||
Avg Latency: 2.125728ms
|
||||
P90 Latency: 3.131901ms
|
||||
P95 Latency: 3.778776ms
|
||||
P99 Latency: 5.393909ms
|
||||
Bottom 10% Avg Latency: 4.22837ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.adoc
|
||||
1763396593981772ℹ️ /tmp/benchmark_next-orly-badger_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56
|
||||
1763396595378747ℹ️ /tmp/benchmark_next-orly-badger_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56
|
||||
|
||||
RELAY_NAME: next-orly-badger
|
||||
RELAY_URL: ws://next-orly-badger:8080
|
||||
TEST_TIMESTAMP: 2025-11-17T16:23:15+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,323 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly-dgraph_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1763396600574205ℹ️ /tmp/benchmark_next-orly-dgraph_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
|
||||
1763396600577795ℹ️ /tmp/benchmark_next-orly-dgraph_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
|
||||
1763396600577852ℹ️ /tmp/benchmark_next-orly-dgraph_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
|
||||
1763396600578216ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66
|
||||
1763396600578287ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73
|
||||
1763396600578319ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80
|
||||
1763396600578325ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
|
||||
1763396600578334ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
|
||||
1763396600578350ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87
|
||||
1763396600578355ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
|
||||
1763396600578372ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
|
||||
1763396600578378ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/11/17 16:23:20 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
|
||||
1763396602578437ℹ️ /tmp/benchmark_next-orly-dgraph_8: database warmup complete, ready to serve requests
|
||||
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 4.932431923s
|
||||
Events/sec: 10136.99
|
||||
Avg latency: 1.667317ms
|
||||
P90 latency: 2.069461ms
|
||||
P95 latency: 2.249895ms
|
||||
P99 latency: 2.861303ms
|
||||
Bottom 10% Avg latency: 2.592597ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 335.655402ms
|
||||
Burst completed: 5000 events in 330.360552ms
|
||||
Burst completed: 5000 events in 350.90491ms
|
||||
Burst completed: 5000 events in 373.041958ms
|
||||
Burst completed: 5000 events in 347.11564ms
|
||||
Burst completed: 5000 events in 315.949199ms
|
||||
Burst completed: 5000 events in 331.42993ms
|
||||
Burst completed: 5000 events in 352.164361ms
|
||||
Burst completed: 5000 events in 359.115619ms
|
||||
Burst completed: 5000 events in 360.397544ms
|
||||
Burst test completed: 50000 events in 9.808342155s
|
||||
Events/sec: 5097.70
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.59623701s
|
||||
Combined ops/sec: 2032.83
|
||||
1763396660567060⚠️ /tmp/benchmark_next-orly-dgraph_8: Block cache might be too small. Metrics: hit: 153935 miss: 305257 keys-added: 227607 keys-updated: 64636 keys-evicted: 227588 cost-added: 12452581576986 cost-evicted: 12451583862757 sets-dropped: 0 sets-rejected: 12954 gets-dropped: 256 gets-kept: 458496 gets-total: 459192 hit-ratio: 0.34
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:450 /build/pkg/database/logger.go:46
|
||||
1763396660567121⚠️ /tmp/benchmark_next-orly-dgraph_8: Cache life expectancy (in seconds):
|
||||
-- Histogram:
|
||||
Min value: 0
|
||||
Max value: 11
|
||||
Count: 227588
|
||||
50p: 2.00
|
||||
75p: 2.00
|
||||
90p: 2.00
|
||||
[0, 2) 227552 99.98% 99.98%
|
||||
[8, 16) 36 0.02% 100.00%
|
||||
--
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:451 /build/pkg/database/logger.go:46
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 221626 queries in 1m0.014161671s
|
||||
Queries/sec: 3692.90
|
||||
Avg query latency: 4.849059ms
|
||||
P95 query latency: 15.966874ms
|
||||
P99 query latency: 27.859712ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 235023 operations (185023 queries, 50000 writes) in 1m0.005568823s
|
||||
Operations/sec: 3916.69
|
||||
Avg latency: 2.401379ms
|
||||
Avg query latency: 2.672573ms
|
||||
Avg write latency: 1.397837ms
|
||||
P95 latency: 4.398002ms
|
||||
P99 latency: 6.207183ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 50000/50000 (100.0%)
|
||||
Duration: 5.127096799s
|
||||
Events/sec: 9752.11
|
||||
Avg latency: 1.795821ms
|
||||
P90 latency: 2.25461ms
|
||||
P95 latency: 2.466785ms
|
||||
P99 latency: 3.159176ms
|
||||
Bottom 10% Avg latency: 3.072242ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 358.012209ms
|
||||
Burst completed: 5000 events in 336.300441ms
|
||||
Burst completed: 5000 events in 363.657063ms
|
||||
Burst completed: 5000 events in 356.771817ms
|
||||
Burst completed: 5000 events in 368.000986ms
|
||||
Burst completed: 5000 events in 441.821658ms
|
||||
Burst completed: 5000 events in 451.146122ms
|
||||
Burst completed: 5000 events in 455.159014ms
|
||||
Burst completed: 5000 events in 359.826504ms
|
||||
Burst completed: 5000 events in 358.602207ms
|
||||
1763396835570723ℹ️ /tmp/benchmark_next-orly-dgraph_8: [6] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 3.055s
|
||||
, deleted 1901003 bytes
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56
|
||||
Burst test completed: 50000 events in 10.25458455s
|
||||
Events/sec: 4875.87
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.474786024s
|
||||
Combined ops/sec: 2042.92
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 287727 queries in 1m0.012156857s
|
||||
Queries/sec: 4794.48
|
||||
Avg query latency: 3.504598ms
|
||||
P95 query latency: 11.416502ms
|
||||
P99 query latency: 19.871886ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
1763396960566384ℹ️ /tmp/benchmark_next-orly-dgraph_8: Block cache metrics: hit: 436764091 miss: 4871096 keys-added: 1584381 keys-updated: 2919606 keys-evicted: 1584361 cost-added: 83226283032882 cost-evicted: 83225259887553 sets-dropped: 0 sets-rejected: 305847 gets-dropped: 344794880 gets-kept: 96734656 gets-total: 441635219 hit-ratio: 0.99
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
|
||||
Concurrent test completed: 252209 operations (202209 queries, 50000 writes) in 1m0.008028818s
|
||||
Operations/sec: 4202.92
|
||||
Avg latency: 2.189461ms
|
||||
Avg query latency: 2.337704ms
|
||||
Avg write latency: 1.58994ms
|
||||
P95 latency: 3.919323ms
|
||||
P99 latency: 5.959314ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.932431923s
|
||||
Total Events: 50000
|
||||
Events/sec: 10136.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 432 MB
|
||||
Avg Latency: 1.667317ms
|
||||
P90 Latency: 2.069461ms
|
||||
P95 Latency: 2.249895ms
|
||||
P99 Latency: 2.861303ms
|
||||
Bottom 10% Avg Latency: 2.592597ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.808342155s
|
||||
Total Events: 50000
|
||||
Events/sec: 5097.70
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 149 MB
|
||||
Avg Latency: 3.805495ms
|
||||
P90 Latency: 6.632151ms
|
||||
P95 Latency: 8.069195ms
|
||||
P99 Latency: 13.244195ms
|
||||
Bottom 10% Avg Latency: 9.922762ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.59623701s
|
||||
Total Events: 50000
|
||||
Events/sec: 2032.83
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 121 MB
|
||||
Avg Latency: 467.746µs
|
||||
P90 Latency: 911.189µs
|
||||
P95 Latency: 1.018554ms
|
||||
P99 Latency: 1.250848ms
|
||||
Bottom 10% Avg Latency: 1.345857ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.014161671s
|
||||
Total Events: 221626
|
||||
Events/sec: 3692.90
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 125 MB
|
||||
Avg Latency: 4.849059ms
|
||||
P90 Latency: 10.564822ms
|
||||
P95 Latency: 15.966874ms
|
||||
P99 Latency: 27.859712ms
|
||||
Bottom 10% Avg Latency: 18.180391ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.005568823s
|
||||
Total Events: 235023
|
||||
Events/sec: 3916.69
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 177 MB
|
||||
Avg Latency: 2.401379ms
|
||||
P90 Latency: 3.659643ms
|
||||
P95 Latency: 4.398002ms
|
||||
P99 Latency: 6.207183ms
|
||||
Bottom 10% Avg Latency: 4.857955ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 5.127096799s
|
||||
Total Events: 50000
|
||||
Events/sec: 9752.11
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 480 MB
|
||||
Avg Latency: 1.795821ms
|
||||
P90 Latency: 2.25461ms
|
||||
P95 Latency: 2.466785ms
|
||||
P99 Latency: 3.159176ms
|
||||
Bottom 10% Avg Latency: 3.072242ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 10.25458455s
|
||||
Total Events: 50000
|
||||
Events/sec: 4875.87
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 621 MB
|
||||
Avg Latency: 9.266976ms
|
||||
P90 Latency: 24.12544ms
|
||||
P95 Latency: 34.465042ms
|
||||
P99 Latency: 55.446215ms
|
||||
Bottom 10% Avg Latency: 37.317916ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.474786024s
|
||||
Total Events: 50000
|
||||
Events/sec: 2042.92
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 182 MB
|
||||
Avg Latency: 452.46µs
|
||||
P90 Latency: 909.806µs
|
||||
P95 Latency: 1.014516ms
|
||||
P99 Latency: 1.214797ms
|
||||
Bottom 10% Avg Latency: 1.304994ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.012156857s
|
||||
Total Events: 287727
|
||||
Events/sec: 4794.48
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 150 MB
|
||||
Avg Latency: 3.504598ms
|
||||
P90 Latency: 7.480817ms
|
||||
P95 Latency: 11.416502ms
|
||||
P99 Latency: 19.871886ms
|
||||
Bottom 10% Avg Latency: 12.934864ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.008028818s
|
||||
Total Events: 252209
|
||||
Events/sec: 4202.92
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 98 MB
|
||||
Avg Latency: 2.189461ms
|
||||
P90 Latency: 3.213337ms
|
||||
P95 Latency: 3.919323ms
|
||||
P99 Latency: 5.959314ms
|
||||
Bottom 10% Avg Latency: 4.521426ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-dgraph_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-dgraph_8/benchmark_report.adoc
|
||||
1763397010410098ℹ️ /tmp/benchmark_next-orly-dgraph_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56
|
||||
1763397011943178ℹ️ /tmp/benchmark_next-orly-dgraph_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
|
||||
Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56
|
||||
|
||||
RELAY_NAME: next-orly-dgraph
|
||||
RELAY_URL: ws://next-orly-dgraph:8080
|
||||
TEST_TIMESTAMP: 2025-11-17T16:30:12+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
19
cmd/benchmark/run-badger-benchmark.sh
Executable file
19
cmd/benchmark/run-badger-benchmark.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
# Run Badger benchmark with reduced cache sizes to avoid OOM
|
||||
|
||||
# Set reasonable cache sizes for benchmark
|
||||
export ORLY_DB_BLOCK_CACHE_MB=256 # Reduced from 1024MB
|
||||
export ORLY_DB_INDEX_CACHE_MB=128 # Reduced from 512MB
|
||||
export ORLY_QUERY_CACHE_SIZE_MB=128 # Reduced from 512MB
|
||||
|
||||
# Clean up old data
|
||||
rm -rf /tmp/benchmark_db_badger
|
||||
|
||||
echo "Running Badger benchmark with reduced cache sizes:"
|
||||
echo " Block Cache: ${ORLY_DB_BLOCK_CACHE_MB}MB"
|
||||
echo " Index Cache: ${ORLY_DB_INDEX_CACHE_MB}MB"
|
||||
echo " Query Cache: ${ORLY_QUERY_CACHE_SIZE_MB}MB"
|
||||
echo ""
|
||||
|
||||
# Run benchmark
|
||||
./benchmark -events "${1:-1000}" -workers "${2:-4}" -datadir /tmp/benchmark_db_badger
|
||||
@@ -31,8 +31,8 @@ fi
|
||||
|
||||
# Create fresh data directories with correct permissions
|
||||
echo "Preparing data directories..."
|
||||
mkdir -p data/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres}
|
||||
chmod 777 data/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres}
|
||||
mkdir -p data/{next-orly-badger,next-orly-dgraph,dgraph-zero,dgraph-alpha,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres}
|
||||
chmod 777 data/{next-orly-badger,next-orly-dgraph,dgraph-zero,dgraph-alpha,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres}
|
||||
|
||||
echo "Starting benchmark suite..."
|
||||
echo "This will automatically shut down all containers when the benchmark completes."
|
||||
|
||||
Reference in New Issue
Block a user