From 038d1959ed0eeb91ad82c858e409a77e88ec5a18 Mon Sep 17 00:00:00 2001 From: mleku Date: Mon, 17 Nov 2025 16:52:38 +0000 Subject: [PATCH] add dgraph backend to benchmark suite with safe type assertions for multi-backend support --- .claude/settings.local.json | 11 +- app/main.go | 127 ++-- cmd/benchmark/README.md | 52 +- cmd/benchmark/benchmark_adapter.go | 574 ++++++++++++++++++ cmd/benchmark/dgraph_benchmark.go | 122 ++++ cmd/benchmark/dgraph_docker.go | 160 +++++ cmd/benchmark/docker-compose-dgraph.yml | 44 ++ cmd/benchmark/docker-compose.yml | 87 ++- cmd/benchmark/main.go | 53 +- .../run_20251117_154730/next-orly_results.txt | 134 ++++ .../khatru-badger_results.txt | 53 ++ .../khatru-sqlite_results.txt | 323 ++++++++++ .../next-orly-badger_results.txt | 311 ++++++++++ .../next-orly-dgraph_results.txt | 323 ++++++++++ cmd/benchmark/run-badger-benchmark.sh | 19 + cmd/benchmark/run-benchmark.sh | 4 +- pkg/database/get-serial-by-id.go | 8 +- pkg/database/get-serials-by-range.go | 12 +- pkg/database/query-for-deleted.go | 19 +- pkg/database/save-event.go | 28 +- pkg/dgraph/delete.go | 84 ++- pkg/dgraph/dgraph.go | 42 +- pkg/dgraph/fetch-event.go | 170 +++++- pkg/dgraph/import-export.go | 82 ++- pkg/dgraph/query-events.go | 55 +- pkg/dgraph/save-event.go | 6 +- 26 files changed, 2717 insertions(+), 186 deletions(-) create mode 100644 cmd/benchmark/benchmark_adapter.go create mode 100644 cmd/benchmark/dgraph_benchmark.go create mode 100644 cmd/benchmark/dgraph_docker.go create mode 100644 cmd/benchmark/docker-compose-dgraph.yml create mode 100644 cmd/benchmark/reports/run_20251117_154730/next-orly_results.txt create mode 100644 cmd/benchmark/reports/run_20251117_161622/khatru-badger_results.txt create mode 100644 cmd/benchmark/reports/run_20251117_161622/khatru-sqlite_results.txt create mode 100644 cmd/benchmark/reports/run_20251117_161622/next-orly-badger_results.txt create mode 100644 cmd/benchmark/reports/run_20251117_161622/next-orly-dgraph_results.txt create mode 100755 cmd/benchmark/run-badger-benchmark.sh diff --git a/.claude/settings.local.json b/.claude/settings.local.json index 5c27edd..9b3f767 100644 --- a/.claude/settings.local.json +++ b/.claude/settings.local.json @@ -83,7 +83,16 @@ "Bash(ORLY_LOG_LEVEL=debug timeout 60 ./orly:*)", "Bash(ORLY_LOG_LEVEL=debug timeout 30 ./orly:*)", "Bash(killall:*)", - "Bash(kill:*)" + "Bash(kill:*)", + "Bash(gh repo list:*)", + "Bash(gh auth:*)", + "Bash(/tmp/backup-github-repos.sh)", + "Bash(./benchmark:*)", + "Bash(env)", + "Bash(./run-badger-benchmark.sh:*)", + "Bash(./update-github-vpn.sh:*)", + "Bash(dmesg:*)", + "Bash(export:*)" ], "deny": [], "ask": [] diff --git a/app/main.go b/app/main.go index a03b249..fd62b94 100644 --- a/app/main.go +++ b/app/main.go @@ -85,9 +85,9 @@ func Run( // Initialize policy manager l.policyManager = policy.NewWithManager(ctx, cfg.AppName, cfg.PolicyEnabled) - // Initialize spider manager based on mode - if cfg.SpiderMode != "none" { - if l.spiderManager, err = spider.New(ctx, db.(*database.D), l.publishers, cfg.SpiderMode); chk.E(err) { + // Initialize spider manager based on mode (only for Badger backend) + if badgerDB, ok := db.(*database.D); ok && cfg.SpiderMode != "none" { + if l.spiderManager, err = spider.New(ctx, badgerDB, l.publishers, cfg.SpiderMode); chk.E(err) { log.E.F("failed to create spider manager: %v", err) } else { // Set up callbacks for follows mode @@ -141,67 +141,79 @@ func Run( } } - // Initialize relay group manager - l.relayGroupMgr = dsync.NewRelayGroupManager(db.(*database.D), cfg.RelayGroupAdmins) - - // Initialize sync manager if relay peers are configured - var peers []string - if len(cfg.RelayPeers) > 0 { - peers = cfg.RelayPeers - } else { - // Try to get peers from relay group configuration - if config, err := l.relayGroupMgr.FindAuthoritativeConfig(ctx); err == nil && config != nil { - peers = config.Relays - log.I.F("using relay group configuration with %d peers", len(peers)) - } + // Initialize relay group manager (only for Badger backend) + if badgerDB, ok := db.(*database.D); ok { + l.relayGroupMgr = dsync.NewRelayGroupManager(badgerDB, cfg.RelayGroupAdmins) + } else if cfg.SpiderMode != "none" || len(cfg.RelayPeers) > 0 || len(cfg.ClusterAdmins) > 0 { + log.I.Ln("spider, sync, and cluster features require Badger backend (currently using alternative backend)") } - if len(peers) > 0 { - // Get relay identity for node ID - sk, err := db.GetOrCreateRelayIdentitySecret() - if err != nil { - log.E.F("failed to get relay identity for sync: %v", err) + // Initialize sync manager if relay peers are configured (only for Badger backend) + if badgerDB, ok := db.(*database.D); ok { + var peers []string + if len(cfg.RelayPeers) > 0 { + peers = cfg.RelayPeers } else { - nodeID, err := keys.SecretBytesToPubKeyHex(sk) - if err != nil { - log.E.F("failed to derive pubkey for sync node ID: %v", err) - } else { - relayURL := cfg.RelayURL - if relayURL == "" { - relayURL = fmt.Sprintf("http://localhost:%d", cfg.Port) + // Try to get peers from relay group configuration + if l.relayGroupMgr != nil { + if config, err := l.relayGroupMgr.FindAuthoritativeConfig(ctx); err == nil && config != nil { + peers = config.Relays + log.I.F("using relay group configuration with %d peers", len(peers)) + } + } + } + + if len(peers) > 0 { + // Get relay identity for node ID + sk, err := db.GetOrCreateRelayIdentitySecret() + if err != nil { + log.E.F("failed to get relay identity for sync: %v", err) + } else { + nodeID, err := keys.SecretBytesToPubKeyHex(sk) + if err != nil { + log.E.F("failed to derive pubkey for sync node ID: %v", err) + } else { + relayURL := cfg.RelayURL + if relayURL == "" { + relayURL = fmt.Sprintf("http://localhost:%d", cfg.Port) + } + l.syncManager = dsync.NewManager(ctx, badgerDB, nodeID, relayURL, peers, l.relayGroupMgr, l.policyManager) + log.I.F("distributed sync manager initialized with %d peers", len(peers)) } - l.syncManager = dsync.NewManager(ctx, db.(*database.D), nodeID, relayURL, peers, l.relayGroupMgr, l.policyManager) - log.I.F("distributed sync manager initialized with %d peers", len(peers)) } } } - // Initialize cluster manager for cluster replication - var clusterAdminNpubs []string - if len(cfg.ClusterAdmins) > 0 { - clusterAdminNpubs = cfg.ClusterAdmins - } else { - // Default to regular admins if no cluster admins specified - for _, admin := range cfg.Admins { - clusterAdminNpubs = append(clusterAdminNpubs, admin) + // Initialize cluster manager for cluster replication (only for Badger backend) + if badgerDB, ok := db.(*database.D); ok { + var clusterAdminNpubs []string + if len(cfg.ClusterAdmins) > 0 { + clusterAdminNpubs = cfg.ClusterAdmins + } else { + // Default to regular admins if no cluster admins specified + for _, admin := range cfg.Admins { + clusterAdminNpubs = append(clusterAdminNpubs, admin) + } } - } - if len(clusterAdminNpubs) > 0 { - l.clusterManager = dsync.NewClusterManager(ctx, db.(*database.D), clusterAdminNpubs, cfg.ClusterPropagatePrivilegedEvents, l.publishers) - l.clusterManager.Start() - log.I.F("cluster replication manager initialized with %d admin npubs", len(clusterAdminNpubs)) + if len(clusterAdminNpubs) > 0 { + l.clusterManager = dsync.NewClusterManager(ctx, badgerDB, clusterAdminNpubs, cfg.ClusterPropagatePrivilegedEvents, l.publishers) + l.clusterManager.Start() + log.I.F("cluster replication manager initialized with %d admin npubs", len(clusterAdminNpubs)) + } } // Initialize the user interface l.UserInterface() - // Initialize Blossom blob storage server - if l.blossomServer, err = initializeBlossomServer(ctx, cfg, db.(*database.D)); err != nil { - log.E.F("failed to initialize blossom server: %v", err) - // Continue without blossom server - } else if l.blossomServer != nil { - log.I.F("blossom blob storage server initialized") + // Initialize Blossom blob storage server (only for Badger backend) + if badgerDB, ok := db.(*database.D); ok { + if l.blossomServer, err = initializeBlossomServer(ctx, cfg, badgerDB); err != nil { + log.E.F("failed to initialize blossom server: %v", err) + // Continue without blossom server + } else if l.blossomServer != nil { + log.I.F("blossom blob storage server initialized") + } } // Ensure a relay identity secret key exists when subscriptions and NWC are enabled @@ -237,14 +249,17 @@ func Run( } } - if l.paymentProcessor, err = NewPaymentProcessor(ctx, cfg, db.(*database.D)); err != nil { - // log.E.F("failed to create payment processor: %v", err) - // Continue without payment processor - } else { - if err = l.paymentProcessor.Start(); err != nil { - log.E.F("failed to start payment processor: %v", err) + // Initialize payment processor (only for Badger backend) + if badgerDB, ok := db.(*database.D); ok { + if l.paymentProcessor, err = NewPaymentProcessor(ctx, cfg, badgerDB); err != nil { + // log.E.F("failed to create payment processor: %v", err) + // Continue without payment processor } else { - log.I.F("payment processor started successfully") + if err = l.paymentProcessor.Start(); err != nil { + log.E.F("failed to start payment processor: %v", err) + } else { + log.I.F("payment processor started successfully") + } } } diff --git a/cmd/benchmark/README.md b/cmd/benchmark/README.md index 73b334e..0907401 100644 --- a/cmd/benchmark/README.md +++ b/cmd/benchmark/README.md @@ -2,7 +2,7 @@ A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including: -- **next.orly.dev** (this repository) - BadgerDB-based relay +- **next.orly.dev** (this repository) - Badger and DGraph backend variants - **Khatru** - SQLite and Badger variants - **Relayer** - Basic example implementation - **Strfry** - C++ LMDB-based relay @@ -91,13 +91,16 @@ ls reports/run_YYYYMMDD_HHMMSS/ ### Docker Compose Services -| Service | Port | Description | -| ---------------- | ---- | ----------------------------------------- | -| next-orly | 8001 | This repository's BadgerDB relay | -| khatru-sqlite | 8002 | Khatru with SQLite backend | -| khatru-badger | 8003 | Khatru with Badger backend | -| relayer-basic | 8004 | Basic relayer example | -| strfry | 8005 | Strfry C++ LMDB relay | +| Service | Port | Description | +| ------------------ | ---- | ----------------------------------------- | +| next-orly-badger | 8001 | This repository's Badger relay | +| next-orly-dgraph | 8007 | This repository's DGraph relay | +| dgraph-zero | 5080 | DGraph cluster coordinator | +| dgraph-alpha | 9080 | DGraph data node | +| khatru-sqlite | 8002 | Khatru with SQLite backend | +| khatru-badger | 8003 | Khatru with Badger backend | +| relayer-basic | 8004 | Basic relayer example | +| strfry | 8005 | Strfry C++ LMDB relay | | nostr-rs-relay | 8006 | Rust SQLite relay | | benchmark-runner | - | Orchestrates tests and aggregates results | @@ -173,6 +176,39 @@ go build -o benchmark main.go -duration=30s ``` +## Database Backend Comparison + +The benchmark suite includes **next.orly.dev** with two different database backends to compare architectural approaches: + +### Badger Backend (next-orly-badger) +- **Type**: Embedded key-value store +- **Architecture**: Single-process, no network overhead +- **Best for**: Personal relays, single-instance deployments +- **Characteristics**: + - Lower latency for single-instance operations + - No network round-trips + - Simpler deployment + - Limited to single-node scaling + +### DGraph Backend (next-orly-dgraph) +- **Type**: Distributed graph database +- **Architecture**: Client-server with dgraph-zero (coordinator) and dgraph-alpha (data node) +- **Best for**: Distributed deployments, horizontal scaling +- **Characteristics**: + - Network overhead from gRPC communication + - Supports multi-node clustering + - Built-in replication and sharding + - More complex deployment + +### Comparing the Backends + +The benchmark results will show: +- **Latency differences**: Embedded vs. distributed overhead +- **Throughput trade-offs**: Single-process optimization vs. distributed scalability +- **Resource usage**: Memory and CPU patterns for different architectures + +This comparison helps determine which backend is appropriate for different deployment scenarios. + ## Benchmark Results Interpretation ### Peak Throughput Test diff --git a/cmd/benchmark/benchmark_adapter.go b/cmd/benchmark/benchmark_adapter.go new file mode 100644 index 0000000..dcd7e47 --- /dev/null +++ b/cmd/benchmark/benchmark_adapter.go @@ -0,0 +1,574 @@ +package main + +import ( + "context" + "fmt" + "sort" + "sync" + "time" + + "next.orly.dev/pkg/database" + "next.orly.dev/pkg/encoders/event" + "next.orly.dev/pkg/encoders/filter" + "next.orly.dev/pkg/encoders/kind" + "next.orly.dev/pkg/encoders/tag" + "next.orly.dev/pkg/encoders/timestamp" + "next.orly.dev/pkg/interfaces/signer/p8k" +) + +// BenchmarkAdapter adapts a database.Database interface to work with benchmark tests +type BenchmarkAdapter struct { + config *BenchmarkConfig + db database.Database + results []*BenchmarkResult + mu sync.RWMutex +} + +// NewBenchmarkAdapter creates a new benchmark adapter +func NewBenchmarkAdapter(config *BenchmarkConfig, db database.Database) *BenchmarkAdapter { + return &BenchmarkAdapter{ + config: config, + db: db, + results: make([]*BenchmarkResult, 0), + } +} + +// RunPeakThroughputTest runs the peak throughput benchmark +func (ba *BenchmarkAdapter) RunPeakThroughputTest() { + fmt.Println("\n=== Peak Throughput Test ===") + + start := time.Now() + var wg sync.WaitGroup + var totalEvents int64 + var errors []error + var latencies []time.Duration + var mu sync.Mutex + + events := ba.generateEvents(ba.config.NumEvents) + eventChan := make(chan *event.E, len(events)) + + // Fill event channel + for _, ev := range events { + eventChan <- ev + } + close(eventChan) + + // Start workers + for i := 0; i < ba.config.ConcurrentWorkers; i++ { + wg.Add(1) + go func(workerID int) { + defer wg.Done() + + ctx := context.Background() + for ev := range eventChan { + eventStart := time.Now() + + _, err := ba.db.SaveEvent(ctx, ev) + latency := time.Since(eventStart) + + mu.Lock() + if err != nil { + errors = append(errors, err) + } else { + totalEvents++ + latencies = append(latencies, latency) + } + mu.Unlock() + } + }(i) + } + + wg.Wait() + duration := time.Since(start) + + // Calculate metrics + result := &BenchmarkResult{ + TestName: "Peak Throughput", + Duration: duration, + TotalEvents: int(totalEvents), + EventsPerSecond: float64(totalEvents) / duration.Seconds(), + ConcurrentWorkers: ba.config.ConcurrentWorkers, + MemoryUsed: getMemUsage(), + } + + if len(latencies) > 0 { + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) + result.AvgLatency = calculateAverage(latencies) + result.P90Latency = latencies[int(float64(len(latencies))*0.90)] + result.P95Latency = latencies[int(float64(len(latencies))*0.95)] + result.P99Latency = latencies[int(float64(len(latencies))*0.99)] + + bottom10 := latencies[:int(float64(len(latencies))*0.10)] + result.Bottom10Avg = calculateAverage(bottom10) + } + + result.SuccessRate = float64(totalEvents) / float64(ba.config.NumEvents) * 100 + if len(errors) > 0 { + result.Errors = make([]string, 0, len(errors)) + for _, err := range errors { + result.Errors = append(result.Errors, err.Error()) + } + } + + ba.mu.Lock() + ba.results = append(ba.results, result) + ba.mu.Unlock() + + ba.printResult(result) +} + +// RunBurstPatternTest runs burst pattern test +func (ba *BenchmarkAdapter) RunBurstPatternTest() { + fmt.Println("\n=== Burst Pattern Test ===") + + start := time.Now() + var totalEvents int64 + var latencies []time.Duration + var mu sync.Mutex + + ctx := context.Background() + burstSize := 100 + bursts := ba.config.NumEvents / burstSize + + for i := 0; i < bursts; i++ { + // Generate a burst of events + events := ba.generateEvents(burstSize) + + var wg sync.WaitGroup + for _, ev := range events { + wg.Add(1) + go func(e *event.E) { + defer wg.Done() + + eventStart := time.Now() + _, err := ba.db.SaveEvent(ctx, e) + latency := time.Since(eventStart) + + mu.Lock() + if err == nil { + totalEvents++ + latencies = append(latencies, latency) + } + mu.Unlock() + }(ev) + } + + wg.Wait() + + // Short pause between bursts + time.Sleep(10 * time.Millisecond) + } + + duration := time.Since(start) + + result := &BenchmarkResult{ + TestName: "Burst Pattern", + Duration: duration, + TotalEvents: int(totalEvents), + EventsPerSecond: float64(totalEvents) / duration.Seconds(), + ConcurrentWorkers: burstSize, + MemoryUsed: getMemUsage(), + SuccessRate: float64(totalEvents) / float64(ba.config.NumEvents) * 100, + } + + if len(latencies) > 0 { + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) + result.AvgLatency = calculateAverage(latencies) + result.P90Latency = latencies[int(float64(len(latencies))*0.90)] + result.P95Latency = latencies[int(float64(len(latencies))*0.95)] + result.P99Latency = latencies[int(float64(len(latencies))*0.99)] + + bottom10 := latencies[:int(float64(len(latencies))*0.10)] + result.Bottom10Avg = calculateAverage(bottom10) + } + + ba.mu.Lock() + ba.results = append(ba.results, result) + ba.mu.Unlock() + + ba.printResult(result) +} + +// RunMixedReadWriteTest runs mixed read/write test +func (ba *BenchmarkAdapter) RunMixedReadWriteTest() { + fmt.Println("\n=== Mixed Read/Write Test ===") + + // First, populate some events + fmt.Println("Populating database with initial events...") + populateEvents := ba.generateEvents(1000) + ctx := context.Background() + + for _, ev := range populateEvents { + ba.db.SaveEvent(ctx, ev) + } + + start := time.Now() + var writeCount, readCount int64 + var latencies []time.Duration + var mu sync.Mutex + var wg sync.WaitGroup + + // Start workers doing mixed read/write + for i := 0; i < ba.config.ConcurrentWorkers; i++ { + wg.Add(1) + go func(workerID int) { + defer wg.Done() + + events := ba.generateEvents(ba.config.NumEvents / ba.config.ConcurrentWorkers) + + for idx, ev := range events { + eventStart := time.Now() + + if idx%3 == 0 { + // Read operation + f := filter.New() + f.Kinds = kind.NewS(kind.TextNote) + limit := uint(10) + f.Limit = &limit + _, _ = ba.db.QueryEvents(ctx, f) + + mu.Lock() + readCount++ + mu.Unlock() + } else { + // Write operation + _, _ = ba.db.SaveEvent(ctx, ev) + + mu.Lock() + writeCount++ + mu.Unlock() + } + + latency := time.Since(eventStart) + mu.Lock() + latencies = append(latencies, latency) + mu.Unlock() + } + }(i) + } + + wg.Wait() + duration := time.Since(start) + + result := &BenchmarkResult{ + TestName: fmt.Sprintf("Mixed R/W (R:%d W:%d)", readCount, writeCount), + Duration: duration, + TotalEvents: int(writeCount + readCount), + EventsPerSecond: float64(writeCount+readCount) / duration.Seconds(), + ConcurrentWorkers: ba.config.ConcurrentWorkers, + MemoryUsed: getMemUsage(), + SuccessRate: 100.0, + } + + if len(latencies) > 0 { + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) + result.AvgLatency = calculateAverage(latencies) + result.P90Latency = latencies[int(float64(len(latencies))*0.90)] + result.P95Latency = latencies[int(float64(len(latencies))*0.95)] + result.P99Latency = latencies[int(float64(len(latencies))*0.99)] + + bottom10 := latencies[:int(float64(len(latencies))*0.10)] + result.Bottom10Avg = calculateAverage(bottom10) + } + + ba.mu.Lock() + ba.results = append(ba.results, result) + ba.mu.Unlock() + + ba.printResult(result) +} + +// RunQueryTest runs query performance test +func (ba *BenchmarkAdapter) RunQueryTest() { + fmt.Println("\n=== Query Performance Test ===") + + // Populate with test data + fmt.Println("Populating database for query tests...") + events := ba.generateEvents(5000) + ctx := context.Background() + + for _, ev := range events { + ba.db.SaveEvent(ctx, ev) + } + + start := time.Now() + var queryCount int64 + var latencies []time.Duration + var mu sync.Mutex + var wg sync.WaitGroup + + queryTypes := []func() *filter.F{ + func() *filter.F { + f := filter.New() + f.Kinds = kind.NewS(kind.TextNote) + limit := uint(100) + f.Limit = &limit + return f + }, + func() *filter.F { + f := filter.New() + f.Kinds = kind.NewS(kind.TextNote, kind.Repost) + limit := uint(50) + f.Limit = &limit + return f + }, + func() *filter.F { + f := filter.New() + limit := uint(10) + f.Limit = &limit + since := time.Now().Add(-1 * time.Hour).Unix() + f.Since = timestamp.FromUnix(since) + return f + }, + } + + // Run concurrent queries + iterations := 1000 + for i := 0; i < ba.config.ConcurrentWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for j := 0; j < iterations/ba.config.ConcurrentWorkers; j++ { + f := queryTypes[j%len(queryTypes)]() + + queryStart := time.Now() + _, _ = ba.db.QueryEvents(ctx, f) + latency := time.Since(queryStart) + + mu.Lock() + queryCount++ + latencies = append(latencies, latency) + mu.Unlock() + } + }() + } + + wg.Wait() + duration := time.Since(start) + + result := &BenchmarkResult{ + TestName: fmt.Sprintf("Query Performance (%d queries)", queryCount), + Duration: duration, + TotalEvents: int(queryCount), + EventsPerSecond: float64(queryCount) / duration.Seconds(), + ConcurrentWorkers: ba.config.ConcurrentWorkers, + MemoryUsed: getMemUsage(), + SuccessRate: 100.0, + } + + if len(latencies) > 0 { + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) + result.AvgLatency = calculateAverage(latencies) + result.P90Latency = latencies[int(float64(len(latencies))*0.90)] + result.P95Latency = latencies[int(float64(len(latencies))*0.95)] + result.P99Latency = latencies[int(float64(len(latencies))*0.99)] + + bottom10 := latencies[:int(float64(len(latencies))*0.10)] + result.Bottom10Avg = calculateAverage(bottom10) + } + + ba.mu.Lock() + ba.results = append(ba.results, result) + ba.mu.Unlock() + + ba.printResult(result) +} + +// RunConcurrentQueryStoreTest runs concurrent query and store test +func (ba *BenchmarkAdapter) RunConcurrentQueryStoreTest() { + fmt.Println("\n=== Concurrent Query+Store Test ===") + + start := time.Now() + var storeCount, queryCount int64 + var latencies []time.Duration + var mu sync.Mutex + var wg sync.WaitGroup + + ctx := context.Background() + + // Half workers write, half query + halfWorkers := ba.config.ConcurrentWorkers / 2 + if halfWorkers < 1 { + halfWorkers = 1 + } + + // Writers + for i := 0; i < halfWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + events := ba.generateEvents(ba.config.NumEvents / halfWorkers) + for _, ev := range events { + eventStart := time.Now() + ba.db.SaveEvent(ctx, ev) + latency := time.Since(eventStart) + + mu.Lock() + storeCount++ + latencies = append(latencies, latency) + mu.Unlock() + } + }() + } + + // Readers + for i := 0; i < halfWorkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + for j := 0; j < ba.config.NumEvents/halfWorkers; j++ { + f := filter.New() + f.Kinds = kind.NewS(kind.TextNote) + limit := uint(10) + f.Limit = &limit + + queryStart := time.Now() + ba.db.QueryEvents(ctx, f) + latency := time.Since(queryStart) + + mu.Lock() + queryCount++ + latencies = append(latencies, latency) + mu.Unlock() + + time.Sleep(1 * time.Millisecond) + } + }() + } + + wg.Wait() + duration := time.Since(start) + + result := &BenchmarkResult{ + TestName: fmt.Sprintf("Concurrent Q+S (Q:%d S:%d)", queryCount, storeCount), + Duration: duration, + TotalEvents: int(storeCount + queryCount), + EventsPerSecond: float64(storeCount+queryCount) / duration.Seconds(), + ConcurrentWorkers: ba.config.ConcurrentWorkers, + MemoryUsed: getMemUsage(), + SuccessRate: 100.0, + } + + if len(latencies) > 0 { + sort.Slice(latencies, func(i, j int) bool { + return latencies[i] < latencies[j] + }) + result.AvgLatency = calculateAverage(latencies) + result.P90Latency = latencies[int(float64(len(latencies))*0.90)] + result.P95Latency = latencies[int(float64(len(latencies))*0.95)] + result.P99Latency = latencies[int(float64(len(latencies))*0.99)] + + bottom10 := latencies[:int(float64(len(latencies))*0.10)] + result.Bottom10Avg = calculateAverage(bottom10) + } + + ba.mu.Lock() + ba.results = append(ba.results, result) + ba.mu.Unlock() + + ba.printResult(result) +} + +// generateEvents generates test events with proper signatures +func (ba *BenchmarkAdapter) generateEvents(count int) []*event.E { + events := make([]*event.E, count) + + // Create a test signer + signer := p8k.MustNew() + if err := signer.Generate(); err != nil { + panic(fmt.Sprintf("failed to generate test key: %v", err)) + } + + for i := 0; i < count; i++ { + ev := event.New() + ev.Kind = kind.TextNote.ToU16() + ev.CreatedAt = time.Now().Unix() + ev.Content = []byte(fmt.Sprintf("Benchmark event #%d - Testing Nostr relay performance with automated load generation", i)) + ev.Tags = tag.NewS() + + // Add some tags for variety + if i%10 == 0 { + benchmarkTag := tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")) + ev.Tags.Append(benchmarkTag) + } + + // Sign the event (sets Pubkey, ID, and Sig) + if err := ev.Sign(signer); err != nil { + panic(fmt.Sprintf("failed to sign event: %v", err)) + } + + events[i] = ev + } + + return events +} + +func (ba *BenchmarkAdapter) printResult(r *BenchmarkResult) { + fmt.Printf("\nResults for %s:\n", r.TestName) + fmt.Printf(" Duration: %v\n", r.Duration) + fmt.Printf(" Total Events: %d\n", r.TotalEvents) + fmt.Printf(" Events/sec: %.2f\n", r.EventsPerSecond) + fmt.Printf(" Success Rate: %.2f%%\n", r.SuccessRate) + fmt.Printf(" Workers: %d\n", r.ConcurrentWorkers) + fmt.Printf(" Memory Used: %.2f MB\n", float64(r.MemoryUsed)/1024/1024) + + if r.AvgLatency > 0 { + fmt.Printf(" Avg Latency: %v\n", r.AvgLatency) + fmt.Printf(" P90 Latency: %v\n", r.P90Latency) + fmt.Printf(" P95 Latency: %v\n", r.P95Latency) + fmt.Printf(" P99 Latency: %v\n", r.P99Latency) + fmt.Printf(" Bottom 10%% Avg: %v\n", r.Bottom10Avg) + } + + if len(r.Errors) > 0 { + fmt.Printf(" Errors: %d\n", len(r.Errors)) + // Print first few errors as samples + sampleCount := 3 + if len(r.Errors) < sampleCount { + sampleCount = len(r.Errors) + } + for i := 0; i < sampleCount; i++ { + fmt.Printf(" Sample %d: %s\n", i+1, r.Errors[i]) + } + } +} + +func (ba *BenchmarkAdapter) GenerateReport() { + // Delegate to main benchmark report generator + // We'll add the results to a file + fmt.Println("\n=== Benchmark Results Summary ===") + ba.mu.RLock() + defer ba.mu.RUnlock() + + for _, result := range ba.results { + ba.printResult(result) + } +} + +func (ba *BenchmarkAdapter) GenerateAsciidocReport() { + // TODO: Implement asciidoc report generation + fmt.Println("Asciidoc report generation not yet implemented for adapter") +} + +func calculateAverage(durations []time.Duration) time.Duration { + if len(durations) == 0 { + return 0 + } + + var total time.Duration + for _, d := range durations { + total += d + } + return total / time.Duration(len(durations)) +} diff --git a/cmd/benchmark/dgraph_benchmark.go b/cmd/benchmark/dgraph_benchmark.go new file mode 100644 index 0000000..935162e --- /dev/null +++ b/cmd/benchmark/dgraph_benchmark.go @@ -0,0 +1,122 @@ +package main + +import ( + "context" + "fmt" + "log" + "os" + "time" + + "next.orly.dev/pkg/database" + _ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory +) + +// DgraphBenchmark wraps a Benchmark with dgraph-specific setup +type DgraphBenchmark struct { + config *BenchmarkConfig + docker *DgraphDocker + database database.Database + bench *BenchmarkAdapter +} + +// NewDgraphBenchmark creates a new dgraph benchmark instance +func NewDgraphBenchmark(config *BenchmarkConfig) (*DgraphBenchmark, error) { + // Create Docker manager + docker := NewDgraphDocker() + + // Start dgraph containers + ctx := context.Background() + if err := docker.Start(ctx); err != nil { + return nil, fmt.Errorf("failed to start dgraph: %w", err) + } + + // Set environment variable for dgraph connection + os.Setenv("ORLY_DGRAPH_URL", docker.GetGRPCEndpoint()) + + // Create database instance using dgraph backend + cancel := func() {} + db, err := database.NewDatabase(ctx, cancel, "dgraph", config.DataDir, "warn") + if err != nil { + docker.Stop() + return nil, fmt.Errorf("failed to create dgraph database: %w", err) + } + + // Wait for database to be ready + fmt.Println("Waiting for dgraph database to be ready...") + select { + case <-db.Ready(): + fmt.Println("Dgraph database is ready") + case <-time.After(30 * time.Second): + db.Close() + docker.Stop() + return nil, fmt.Errorf("dgraph database failed to become ready") + } + + // Create adapter to use Database interface with Benchmark + adapter := NewBenchmarkAdapter(config, db) + + dgraphBench := &DgraphBenchmark{ + config: config, + docker: docker, + database: db, + bench: adapter, + } + + return dgraphBench, nil +} + +// Close closes the dgraph benchmark and stops Docker containers +func (dgb *DgraphBenchmark) Close() { + fmt.Println("Closing dgraph benchmark...") + + if dgb.database != nil { + dgb.database.Close() + } + + if dgb.docker != nil { + if err := dgb.docker.Stop(); err != nil { + log.Printf("Error stopping dgraph Docker: %v", err) + } + } +} + +// RunSuite runs the benchmark suite on dgraph +func (dgb *DgraphBenchmark) RunSuite() { + fmt.Println("\n╔════════════════════════════════════════════════════════╗") + fmt.Println("║ DGRAPH BACKEND BENCHMARK SUITE ║") + fmt.Println("╚════════════════════════════════════════════════════════╝") + + // Run only one round for dgraph to keep benchmark time reasonable + fmt.Printf("\n=== Starting dgraph benchmark ===\n") + + fmt.Printf("RunPeakThroughputTest (dgraph)..\n") + dgb.bench.RunPeakThroughputTest() + time.Sleep(10 * time.Second) + + fmt.Printf("RunBurstPatternTest (dgraph)..\n") + dgb.bench.RunBurstPatternTest() + time.Sleep(10 * time.Second) + + fmt.Printf("RunMixedReadWriteTest (dgraph)..\n") + dgb.bench.RunMixedReadWriteTest() + time.Sleep(10 * time.Second) + + fmt.Printf("RunQueryTest (dgraph)..\n") + dgb.bench.RunQueryTest() + time.Sleep(10 * time.Second) + + fmt.Printf("RunConcurrentQueryStoreTest (dgraph)..\n") + dgb.bench.RunConcurrentQueryStoreTest() + + fmt.Printf("\n=== Dgraph benchmark completed ===\n\n") +} + +// GenerateReport generates the benchmark report +func (dgb *DgraphBenchmark) GenerateReport() { + dgb.bench.GenerateReport() +} + +// GenerateAsciidocReport generates asciidoc format report +func (dgb *DgraphBenchmark) GenerateAsciidocReport() { + dgb.bench.GenerateAsciidocReport() +} diff --git a/cmd/benchmark/dgraph_docker.go b/cmd/benchmark/dgraph_docker.go new file mode 100644 index 0000000..bf293fc --- /dev/null +++ b/cmd/benchmark/dgraph_docker.go @@ -0,0 +1,160 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" +) + +// DgraphDocker manages a dgraph instance via Docker Compose +type DgraphDocker struct { + composeFile string + projectName string + running bool +} + +// NewDgraphDocker creates a new dgraph Docker manager +func NewDgraphDocker() *DgraphDocker { + // Try to find the docker-compose file in the current directory first + composeFile := "docker-compose-dgraph.yml" + + // If not found, try the cmd/benchmark directory (for running from project root) + if _, err := os.Stat(composeFile); os.IsNotExist(err) { + composeFile = filepath.Join("cmd", "benchmark", "docker-compose-dgraph.yml") + } + + return &DgraphDocker{ + composeFile: composeFile, + projectName: "orly-benchmark-dgraph", + running: false, + } +} + +// Start starts the dgraph Docker containers +func (d *DgraphDocker) Start(ctx context.Context) error { + fmt.Println("Starting dgraph Docker containers...") + + // Stop any existing containers first + d.Stop() + + // Start containers + cmd := exec.CommandContext( + ctx, + "docker-compose", + "-f", d.composeFile, + "-p", d.projectName, + "up", "-d", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to start dgraph containers: %w", err) + } + + fmt.Println("Waiting for dgraph to be healthy...") + + // Wait for health checks to pass + if err := d.waitForHealthy(ctx, 60*time.Second); err != nil { + d.Stop() // Clean up on failure + return err + } + + d.running = true + fmt.Println("Dgraph is ready!") + return nil +} + +// waitForHealthy waits for dgraph to become healthy +func (d *DgraphDocker) waitForHealthy(ctx context.Context, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + + for time.Now().Before(deadline) { + // Check if alpha is healthy by checking docker health status + cmd := exec.CommandContext( + ctx, + "docker", + "inspect", + "--format={{.State.Health.Status}}", + "orly-benchmark-dgraph-alpha", + ) + + output, err := cmd.Output() + if err == nil && string(output) == "healthy\n" { + // Additional short wait to ensure full readiness + time.Sleep(2 * time.Second) + return nil + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(2 * time.Second): + // Continue waiting + } + } + + return fmt.Errorf("dgraph failed to become healthy within %v", timeout) +} + +// Stop stops and removes the dgraph Docker containers +func (d *DgraphDocker) Stop() error { + if !d.running { + // Try to stop anyway in case of untracked state + cmd := exec.Command( + "docker-compose", + "-f", d.composeFile, + "-p", d.projectName, + "down", "-v", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + _ = cmd.Run() // Ignore errors + return nil + } + + fmt.Println("Stopping dgraph Docker containers...") + + cmd := exec.Command( + "docker-compose", + "-f", d.composeFile, + "-p", d.projectName, + "down", "-v", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to stop dgraph containers: %w", err) + } + + d.running = false + fmt.Println("Dgraph containers stopped") + return nil +} + +// GetGRPCEndpoint returns the dgraph gRPC endpoint +func (d *DgraphDocker) GetGRPCEndpoint() string { + return "localhost:9080" +} + +// IsRunning returns whether dgraph is running +func (d *DgraphDocker) IsRunning() bool { + return d.running +} + +// Logs returns the logs from dgraph containers +func (d *DgraphDocker) Logs() error { + cmd := exec.Command( + "docker-compose", + "-f", d.composeFile, + "-p", d.projectName, + "logs", + ) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/cmd/benchmark/docker-compose-dgraph.yml b/cmd/benchmark/docker-compose-dgraph.yml new file mode 100644 index 0000000..adf48ed --- /dev/null +++ b/cmd/benchmark/docker-compose-dgraph.yml @@ -0,0 +1,44 @@ +version: "3.9" + +services: + dgraph-zero: + image: dgraph/dgraph:v23.1.0 + container_name: orly-benchmark-dgraph-zero + working_dir: /data/zero + ports: + - "5080:5080" + - "6080:6080" + command: dgraph zero --my=dgraph-zero:5080 + networks: + - orly-benchmark + healthcheck: + test: ["CMD", "sh", "-c", "dgraph version || exit 1"] + interval: 5s + timeout: 3s + retries: 3 + start_period: 5s + + dgraph-alpha: + image: dgraph/dgraph:v23.1.0 + container_name: orly-benchmark-dgraph-alpha + working_dir: /data/alpha + ports: + - "8080:8080" + - "9080:9080" + command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0 + networks: + - orly-benchmark + depends_on: + dgraph-zero: + condition: service_healthy + healthcheck: + test: ["CMD", "sh", "-c", "dgraph version || exit 1"] + interval: 5s + timeout: 3s + retries: 6 + start_period: 10s + +networks: + orly-benchmark: + name: orly-benchmark-network + driver: bridge diff --git a/cmd/benchmark/docker-compose.yml b/cmd/benchmark/docker-compose.yml index c6639ab..976840b 100644 --- a/cmd/benchmark/docker-compose.yml +++ b/cmd/benchmark/docker-compose.yml @@ -1,19 +1,20 @@ version: "3.8" services: - # Next.orly.dev relay (this repository) - next-orly: + # Next.orly.dev relay with Badger (this repository) + next-orly-badger: build: context: ../.. dockerfile: cmd/benchmark/Dockerfile.next-orly - container_name: benchmark-next-orly + container_name: benchmark-next-orly-badger environment: - ORLY_DATA_DIR=/data - ORLY_LISTEN=0.0.0.0 - ORLY_PORT=8080 - ORLY_LOG_LEVEL=off + - ORLY_DB_TYPE=badger volumes: - - ./data/next-orly:/data + - ./data/next-orly-badger:/data ports: - "8001:8080" networks: @@ -25,6 +26,78 @@ services: retries: 3 start_period: 40s + # Next.orly.dev relay with DGraph (this repository) + next-orly-dgraph: + build: + context: ../.. + dockerfile: cmd/benchmark/Dockerfile.next-orly + container_name: benchmark-next-orly-dgraph + environment: + - ORLY_DATA_DIR=/data + - ORLY_LISTEN=0.0.0.0 + - ORLY_PORT=8080 + - ORLY_LOG_LEVEL=off + - ORLY_DB_TYPE=dgraph + - ORLY_DGRAPH_URL=dgraph-alpha:9080 + volumes: + - ./data/next-orly-dgraph:/data + ports: + - "8007:8080" + networks: + - benchmark-net + depends_on: + dgraph-alpha: + condition: service_healthy + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 60s + + # DGraph Zero - cluster coordinator + dgraph-zero: + image: dgraph/dgraph:v23.1.0 + container_name: benchmark-dgraph-zero + working_dir: /data/zero + ports: + - "5080:5080" + - "6080:6080" + volumes: + - ./data/dgraph-zero:/data + command: dgraph zero --my=dgraph-zero:5080 + networks: + - benchmark-net + healthcheck: + test: ["CMD", "sh", "-c", "dgraph version || exit 1"] + interval: 5s + timeout: 3s + retries: 3 + start_period: 5s + + # DGraph Alpha - data node + dgraph-alpha: + image: dgraph/dgraph:v23.1.0 + container_name: benchmark-dgraph-alpha + working_dir: /data/alpha + ports: + - "8088:8080" + - "9080:9080" + volumes: + - ./data/dgraph-alpha:/data + command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0 + networks: + - benchmark-net + depends_on: + dgraph-zero: + condition: service_healthy + healthcheck: + test: ["CMD", "sh", "-c", "dgraph version || exit 1"] + interval: 5s + timeout: 3s + retries: 6 + start_period: 10s + # Khatru with SQLite khatru-sqlite: build: @@ -145,7 +218,9 @@ services: dockerfile: cmd/benchmark/Dockerfile.benchmark container_name: benchmark-runner depends_on: - next-orly: + next-orly-badger: + condition: service_healthy + next-orly-dgraph: condition: service_healthy khatru-sqlite: condition: service_healthy @@ -158,7 +233,7 @@ services: nostr-rs-relay: condition: service_healthy environment: - - BENCHMARK_TARGETS=next-orly:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080 + - BENCHMARK_TARGETS=next-orly-badger:8080,next-orly-dgraph:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080 - BENCHMARK_EVENTS=50000 - BENCHMARK_WORKERS=24 - BENCHMARK_DURATION=60s diff --git a/cmd/benchmark/main.go b/cmd/benchmark/main.go index 37aa06e..c81ad80 100644 --- a/cmd/benchmark/main.go +++ b/cmd/benchmark/main.go @@ -36,6 +36,9 @@ type BenchmarkConfig struct { RelayURL string NetWorkers int NetRate int // events/sec per worker + + // Backend selection + UseDgraph bool } type BenchmarkResult struct { @@ -71,7 +74,14 @@ func main() { return } - fmt.Printf("Starting Nostr Relay Benchmark\n") + if config.UseDgraph { + // Run dgraph benchmark + runDgraphBenchmark(config) + return + } + + // Run standard Badger benchmark + fmt.Printf("Starting Nostr Relay Benchmark (Badger Backend)\n") fmt.Printf("Data Directory: %s\n", config.DataDir) fmt.Printf( "Events: %d, Workers: %d, Duration: %v\n", @@ -89,6 +99,28 @@ func main() { benchmark.GenerateAsciidocReport() } +func runDgraphBenchmark(config *BenchmarkConfig) { + fmt.Printf("Starting Nostr Relay Benchmark (Dgraph Backend)\n") + fmt.Printf("Data Directory: %s\n", config.DataDir) + fmt.Printf( + "Events: %d, Workers: %d\n", + config.NumEvents, config.ConcurrentWorkers, + ) + + dgraphBench, err := NewDgraphBenchmark(config) + if err != nil { + log.Fatalf("Failed to create dgraph benchmark: %v", err) + } + defer dgraphBench.Close() + + // Run dgraph benchmark suite + dgraphBench.RunSuite() + + // Generate reports + dgraphBench.GenerateReport() + dgraphBench.GenerateAsciidocReport() +} + func parseFlags() *BenchmarkConfig { config := &BenchmarkConfig{} @@ -124,6 +156,12 @@ func parseFlags() *BenchmarkConfig { ) flag.IntVar(&config.NetRate, "net-rate", 20, "Events per second per worker") + // Backend selection + flag.BoolVar( + &config.UseDgraph, "dgraph", false, + "Use dgraph backend (requires Docker)", + ) + flag.Parse() return config } @@ -286,7 +324,7 @@ func NewBenchmark(config *BenchmarkConfig) *Benchmark { ctx := context.Background() cancel := func() {} - db, err := database.New(ctx, cancel, config.DataDir, "info") + db, err := database.New(ctx, cancel, config.DataDir, "warn") if err != nil { log.Fatalf("Failed to create database: %v", err) } @@ -974,7 +1012,7 @@ func (b *Benchmark) generateEvents(count int) []*event.E { log.Fatalf("Failed to generate keys for benchmark events: %v", err) } - // Define size distribution - from minimal to 500MB + // Define size distribution - from minimal to 500KB // We'll create a logarithmic distribution to test various sizes sizeBuckets := []int{ 0, // Minimal: empty content, no tags @@ -984,13 +1022,8 @@ func (b *Benchmark) generateEvents(count int) []*event.E { 10 * 1024, // 10 KB 50 * 1024, // 50 KB 100 * 1024, // 100 KB - 500 * 1024, // 500 KB - 1024 * 1024, // 1 MB - 5 * 1024 * 1024, // 5 MB - 10 * 1024 * 1024, // 10 MB - 50 * 1024 * 1024, // 50 MB - 100 * 1024 * 1024, // 100 MB - 500000000, // 500 MB (500,000,000 bytes) + 250 * 1024, // 250 KB + 500 * 1024, // 500 KB (max realistic size for Nostr) } for i := 0; i < count; i++ { diff --git a/cmd/benchmark/reports/run_20251117_154730/next-orly_results.txt b/cmd/benchmark/reports/run_20251117_154730/next-orly_results.txt new file mode 100644 index 0000000..b9bf961 --- /dev/null +++ b/cmd/benchmark/reports/run_20251117_154730/next-orly_results.txt @@ -0,0 +1,134 @@ +Starting Nostr Relay Benchmark +Data Directory: /tmp/benchmark_next-orly_8 +Events: 50000, Workers: 24, Duration: 1m0s +1763394450181444ℹ️ /tmp/benchmark_next-orly_8: All 0 tables opened in 0s +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56 +1763394450184981ℹ️ /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56 +1763394450185044ℹ️ /tmp/benchmark_next-orly_8: Set nextTxnTs to 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56 +1763394450185315ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66 +1763394450185349ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73 +1763394450185369ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80 +1763394450185374ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287 +1763394450185381ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332 +1763394450185396ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87 +1763394450185400ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340 +1763394450185410ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429 +1763394450185415ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538 + +=== Starting test round 1/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +2025/11/17 15:47:30 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2 +1763394452185466ℹ️ /tmp/benchmark_next-orly_8: database warmup complete, ready to serve requests +/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56 +Events saved: 50000/50000 (100.0%) +Duration: 4.816237891s +Events/sec: 10381.55 +Avg latency: 1.655686ms +P90 latency: 2.061483ms +P95 latency: 2.348178ms +P99 latency: 3.856522ms +Bottom 10% Avg latency: 2.985064ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 308.793395ms +Burst completed: 5000 events in 320.69366ms +Burst completed: 5000 events in 324.127721ms +Burst completed: 5000 events in 342.594802ms +Burst completed: 5000 events in 302.350819ms +Burst completed: 5000 events in 309.16143ms +Burst completed: 5000 events in 306.739193ms +Burst completed: 5000 events in 329.275972ms +Burst completed: 5000 events in 329.234395ms +Burst completed: 5000 events in 348.105403ms +Burst test completed: 50000 events in 9.543815189s +Events/sec: 5238.99 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... +Mixed test completed: 25000 writes, 25000 reads in 24.491349518s +Combined ops/sec: 2041.54 +1763394510174043ℹ️ /tmp/benchmark_next-orly_8: Block cache metrics: hit: 248593 miss: 322620 keys-added: 236208 keys-updated: 73483 keys-evicted: 236188 cost-added: 12658387393408 cost-evicted: 12657366958988 sets-dropped: 0 sets-rejected: 12869 gets-dropped: 64 gets-kept: 570624 gets-total: 571213 hit-ratio: 0.44 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56 +RunQueryTest.. + +=== Query Test === +Pre-populating database with 10000 events for query tests... +Query test completed: 258436 queries in 1m0.014042961s +Queries/sec: 4306.26 +Avg query latency: 4.008354ms +P95 query latency: 12.985167ms +P99 query latency: 23.424372ms +RunConcurrentQueryStoreTest.. + +=== Concurrent Query/Store Test === +Pre-populating database with 5000 events for concurrent query/store test... +Concurrent test completed: 252445 operations (202445 queries, 50000 writes) in 1m0.005913119s +Operations/sec: 4207.00 +Avg latency: 2.121776ms +Avg query latency: 2.374689ms +Avg write latency: 1.097756ms +P95 latency: 3.545393ms +P99 latency: 4.795537ms + +Pausing 10s before next round... + +=== Test round completed === + + +=== Starting test round 2/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +Events saved: 50000/50000 (100.0%) +Duration: 5.086723437s +Events/sec: 9829.51 +Avg latency: 1.777699ms +P90 latency: 2.219786ms +P95 latency: 2.443201ms +P99 latency: 3.504646ms +Bottom 10% Avg latency: 3.103013ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 324.341799ms +Burst completed: 5000 events in 319.047042ms +Burst completed: 5000 events in 324.104589ms +Burst completed: 5000 events in 342.464953ms +Burst completed: 5000 events in 342.679451ms +Burst completed: 5000 events in 359.150337ms +Burst completed: 5000 events in 367.952516ms +Burst completed: 5000 events in 338.4073ms +Burst completed: 5000 events in 326.796197ms +Burst completed: 5000 events in 357.71787ms +Burst test completed: 50000 events in 9.769325434s +Events/sec: 5118.06 +1763394684274617ℹ️ /tmp/benchmark_next-orly_8: [4] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 2.954s +, deleted 1904950 bytes +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... +Mixed test completed: 25000 writes, 25000 reads in 24.464062793s +Combined ops/sec: 2043.81 +RunQueryTest.. + +=== Query Test === +Pre-populating database with 10000 events for query tests... +Query test completed: 293040 queries in 1m0.010621036s +Queries/sec: 4883.14 +Avg query latency: 3.419764ms +P95 query latency: 11.042876ms +P99 query latency: 19.984912ms +RunConcurrentQueryStoreTest.. + +=== Concurrent Query/Store Test === +Pre-populating database with 5000 events for concurrent query/store test... +1763394810173629ℹ️ /tmp/benchmark_next-orly_8: Block cache metrics: hit: 517421289 miss: 4606293 keys-added: 1664534 keys-updated: 2530425 keys-evicted: 1664512 cost-added: 85045328540032 cost-evicted: 85044318079141 sets-dropped: 0 sets-rejected: 349798 gets-dropped: 404194112 gets-kept: 117717888 gets-total: 522027608 hit-ratio: 0.99 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56 diff --git a/cmd/benchmark/reports/run_20251117_161622/khatru-badger_results.txt b/cmd/benchmark/reports/run_20251117_161622/khatru-badger_results.txt new file mode 100644 index 0000000..47e80f2 --- /dev/null +++ b/cmd/benchmark/reports/run_20251117_161622/khatru-badger_results.txt @@ -0,0 +1,53 @@ +Starting Nostr Relay Benchmark +Data Directory: /tmp/benchmark_khatru-badger_8 +Events: 50000, Workers: 24, Duration: 1m0s +1763397432159815ℹ️ /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56 +1763397432162963ℹ️ /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56 +1763397432163005ℹ️ /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56 +1763397432163282ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66 +1763397432163367ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73 +1763397432163401ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80 +1763397432163409ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287 +1763397432163473ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332 +1763397432163564ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87 +1763397432163574ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340 +1763397432163594ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429 +1763397432163600ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538 + +=== Starting test round 1/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +2025/11/17 16:37:12 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2 +1763397434164165ℹ️ /tmp/benchmark_khatru-badger_8: database warmup complete, ready to serve requests +/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56 +Events saved: 50000/50000 (100.0%) +Duration: 4.924203666s +Events/sec: 10153.93 +Avg latency: 1.696974ms +P90 latency: 2.11483ms +P95 latency: 2.344067ms +P99 latency: 3.241477ms +Bottom 10% Avg latency: 2.7865ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 312.680497ms +Burst completed: 5000 events in 320.868898ms +Burst completed: 5000 events in 317.096109ms +Burst completed: 5000 events in 356.971689ms +Burst completed: 5000 events in 301.615682ms +Burst completed: 5000 events in 306.525096ms +Burst completed: 5000 events in 320.037813ms +Burst completed: 5000 events in 318.017102ms +Burst completed: 5000 events in 320.394281ms +Burst completed: 5000 events in 333.619741ms +Burst test completed: 50000 events in 9.552105607s +Events/sec: 5234.45 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... diff --git a/cmd/benchmark/reports/run_20251117_161622/khatru-sqlite_results.txt b/cmd/benchmark/reports/run_20251117_161622/khatru-sqlite_results.txt new file mode 100644 index 0000000..4db4f67 --- /dev/null +++ b/cmd/benchmark/reports/run_20251117_161622/khatru-sqlite_results.txt @@ -0,0 +1,323 @@ +Starting Nostr Relay Benchmark +Data Directory: /tmp/benchmark_khatru-sqlite_8 +Events: 50000, Workers: 24, Duration: 1m0s +1763397017138391ℹ️ /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56 +1763397017141550ℹ️ /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56 +1763397017141593ℹ️ /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56 +1763397017141951ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66 +1763397017142013ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73 +1763397017142036ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80 +1763397017142042ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287 +1763397017142055ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332 +1763397017142080ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87 +1763397017142086ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340 +1763397017142103ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429 +1763397017142109ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538 + +=== Starting test round 1/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +2025/11/17 16:30:17 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2 +1763397019142156ℹ️ /tmp/benchmark_khatru-sqlite_8: database warmup complete, ready to serve requests +/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56 +Events saved: 50000/50000 (100.0%) +Duration: 4.697220167s +Events/sec: 10644.59 +Avg latency: 1.589521ms +P90 latency: 1.927686ms +P95 latency: 2.072081ms +P99 latency: 2.794007ms +Bottom 10% Avg latency: 2.449508ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 331.053594ms +Burst completed: 5000 events in 339.97436ms +Burst completed: 5000 events in 352.328844ms +Burst completed: 5000 events in 376.613834ms +Burst completed: 5000 events in 321.307729ms +Burst completed: 5000 events in 314.265411ms +Burst completed: 5000 events in 321.656622ms +Burst completed: 5000 events in 325.689539ms +Burst completed: 5000 events in 367.767832ms +Burst completed: 5000 events in 367.275402ms +Burst test completed: 50000 events in 9.780316233s +Events/sec: 5112.31 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... +Mixed test completed: 25000 writes, 25000 reads in 24.45356557s +Combined ops/sec: 2044.69 +1763397077132611⚠️ /tmp/benchmark_khatru-sqlite_8: Block cache might be too small. Metrics: hit: 164850 miss: 294509 keys-added: 226622 keys-updated: 54881 keys-evicted: 226603 cost-added: 12429978548485 cost-evicted: 12428976154843 sets-dropped: 0 sets-rejected: 12954 gets-dropped: 192 gets-kept: 458368 gets-total: 459359 hit-ratio: 0.36 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:450 /build/pkg/database/logger.go:46 +1763397077132680⚠️ /tmp/benchmark_khatru-sqlite_8: Cache life expectancy (in seconds): + -- Histogram: +Min value: 0 +Max value: 11 +Count: 226603 +50p: 2.00 +75p: 2.00 +90p: 2.00 +[0, 2) 226567 99.98% 99.98% +[8, 16) 36 0.02% 100.00% + -- +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:451 /build/pkg/database/logger.go:46 +RunQueryTest.. + +=== Query Test === +Pre-populating database with 10000 events for query tests... +Query test completed: 253442 queries in 1m0.011742602s +Queries/sec: 4223.21 +Avg query latency: 4.105842ms +P95 query latency: 13.288591ms +P99 query latency: 23.937862ms +RunConcurrentQueryStoreTest.. + +=== Concurrent Query/Store Test === +Pre-populating database with 5000 events for concurrent query/store test... +Concurrent test completed: 237910 operations (187910 queries, 50000 writes) in 1m0.007412985s +Operations/sec: 3964.68 +Avg latency: 2.360698ms +Avg query latency: 2.630397ms +Avg write latency: 1.347113ms +P95 latency: 4.390739ms +P99 latency: 6.940329ms + +Pausing 10s before next round... + +=== Test round completed === + + +=== Starting test round 2/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +Events saved: 50000/50000 (100.0%) +Duration: 4.792392684s +Events/sec: 10433.20 +Avg latency: 1.649743ms +P90 latency: 1.991666ms +P95 latency: 2.145348ms +P99 latency: 2.77034ms +Bottom 10% Avg latency: 2.781523ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 330.357755ms +Burst completed: 5000 events in 334.984623ms +Burst completed: 5000 events in 345.478382ms +Burst completed: 5000 events in 340.589233ms +Burst completed: 5000 events in 348.792025ms +Burst completed: 5000 events in 354.019658ms +Burst completed: 5000 events in 356.823662ms +Burst completed: 5000 events in 347.496865ms +Burst completed: 5000 events in 342.618798ms +Burst completed: 5000 events in 337.759666ms +Burst test completed: 50000 events in 9.775603327s +Events/sec: 5114.77 +1763397250998218ℹ️ /tmp/benchmark_khatru-sqlite_8: [6] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 2.922s +, deleted 1932516 bytes +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... +Mixed test completed: 25000 writes, 25000 reads in 24.35620806s +Combined ops/sec: 2052.86 +RunQueryTest.. + +=== Query Test === +Pre-populating database with 10000 events for query tests... +Query test completed: 334922 queries in 1m0.011826287s +Queries/sec: 5580.93 +Avg query latency: 2.871941ms +P95 query latency: 8.86787ms +P99 query latency: 16.075646ms +RunConcurrentQueryStoreTest.. + +=== Concurrent Query/Store Test === +Pre-populating database with 5000 events for concurrent query/store test... +1763397377131811ℹ️ /tmp/benchmark_khatru-sqlite_8: Block cache metrics: hit: 485497199 miss: 4802603 keys-added: 1628313 keys-updated: 2776240 keys-evicted: 1628292 cost-added: 85662348259200 cost-evicted: 85661362474446 sets-dropped: 0 sets-rejected: 336231 gets-dropped: 382997632 gets-kept: 107185536 gets-total: 490299843 hit-ratio: 0.99 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56 +Concurrent test completed: 266462 operations (216462 queries, 50000 writes) in 1m0.004503525s +Operations/sec: 4440.70 +Avg latency: 1.968296ms +Avg query latency: 2.154689ms +Avg write latency: 1.161355ms +P95 latency: 3.329033ms +P99 latency: 4.878236ms + +=== Test round completed === + + +================================================================================ +BENCHMARK REPORT +================================================================================ + +Test: Peak Throughput +Duration: 4.697220167s +Total Events: 50000 +Events/sec: 10644.59 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 432 MB +Avg Latency: 1.589521ms +P90 Latency: 1.927686ms +P95 Latency: 2.072081ms +P99 Latency: 2.794007ms +Bottom 10% Avg Latency: 2.449508ms +---------------------------------------- + +Test: Burst Pattern +Duration: 9.780316233s +Total Events: 50000 +Events/sec: 5112.31 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 147 MB +Avg Latency: 3.589724ms +P90 Latency: 7.397294ms +P95 Latency: 9.015658ms +P99 Latency: 12.848707ms +Bottom 10% Avg Latency: 10.286462ms +---------------------------------------- + +Test: Mixed Read/Write +Duration: 24.45356557s +Total Events: 50000 +Events/sec: 2044.69 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 189 MB +Avg Latency: 439.984µs +P90 Latency: 878.495µs +P95 Latency: 980.94µs +P99 Latency: 1.17514ms +Bottom 10% Avg Latency: 1.261937ms +---------------------------------------- + +Test: Query Performance +Duration: 1m0.011742602s +Total Events: 253442 +Events/sec: 4223.21 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 165 MB +Avg Latency: 4.105842ms +P90 Latency: 8.468483ms +P95 Latency: 13.288591ms +P99 Latency: 23.937862ms +Bottom 10% Avg Latency: 15.251447ms +---------------------------------------- + +Test: Concurrent Query/Store +Duration: 1m0.007412985s +Total Events: 237910 +Events/sec: 3964.68 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 149 MB +Avg Latency: 2.360698ms +P90 Latency: 3.517024ms +P95 Latency: 4.390739ms +P99 Latency: 6.940329ms +Bottom 10% Avg Latency: 5.015416ms +---------------------------------------- + +Test: Peak Throughput +Duration: 4.792392684s +Total Events: 50000 +Events/sec: 10433.20 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 523 MB +Avg Latency: 1.649743ms +P90 Latency: 1.991666ms +P95 Latency: 2.145348ms +P99 Latency: 2.77034ms +Bottom 10% Avg Latency: 2.781523ms +---------------------------------------- + +Test: Burst Pattern +Duration: 9.775603327s +Total Events: 50000 +Events/sec: 5114.77 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 541 MB +Avg Latency: 2.925486ms +P90 Latency: 5.542703ms +P95 Latency: 7.775478ms +P99 Latency: 11.125804ms +Bottom 10% Avg Latency: 8.91184ms +---------------------------------------- + +Test: Mixed Read/Write +Duration: 24.35620806s +Total Events: 50000 +Events/sec: 2052.86 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 200 MB +Avg Latency: 424.333µs +P90 Latency: 865.429µs +P95 Latency: 968.085µs +P99 Latency: 1.174568ms +Bottom 10% Avg Latency: 1.224002ms +---------------------------------------- + +Test: Query Performance +Duration: 1m0.011826287s +Total Events: 334922 +Events/sec: 5580.93 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 129 MB +Avg Latency: 2.871941ms +P90 Latency: 5.60422ms +P95 Latency: 8.86787ms +P99 Latency: 16.075646ms +Bottom 10% Avg Latency: 10.23636ms +---------------------------------------- + +Test: Concurrent Query/Store +Duration: 1m0.004503525s +Total Events: 266462 +Events/sec: 4440.70 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 155 MB +Avg Latency: 1.968296ms +P90 Latency: 2.729181ms +P95 Latency: 3.329033ms +P99 Latency: 4.878236ms +Bottom 10% Avg Latency: 3.768185ms +---------------------------------------- + +Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt +AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc +1763397425682348ℹ️ /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56 +1763397426982581ℹ️ /tmp/benchmark_khatru-sqlite_8: +Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB +Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB +Level Done +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56 + +RELAY_NAME: khatru-sqlite +RELAY_URL: ws://khatru-sqlite:3334 +TEST_TIMESTAMP: 2025-11-17T16:37:07+00:00 +BENCHMARK_CONFIG: + Events: 50000 + Workers: 24 + Duration: 60s diff --git a/cmd/benchmark/reports/run_20251117_161622/next-orly-badger_results.txt b/cmd/benchmark/reports/run_20251117_161622/next-orly-badger_results.txt new file mode 100644 index 0000000..08aa22d --- /dev/null +++ b/cmd/benchmark/reports/run_20251117_161622/next-orly-badger_results.txt @@ -0,0 +1,311 @@ +Starting Nostr Relay Benchmark +Data Directory: /tmp/benchmark_next-orly-badger_8 +Events: 50000, Workers: 24, Duration: 1m0s +1763396182850462ℹ️ /tmp/benchmark_next-orly-badger_8: All 0 tables opened in 0s +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56 +1763396182853668ℹ️ /tmp/benchmark_next-orly-badger_8: Discard stats nextEmptySlot: 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56 +1763396182853712ℹ️ /tmp/benchmark_next-orly-badger_8: Set nextTxnTs to 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56 +1763396182854009ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66 +1763396182854056ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73 +1763396182854078ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80 +1763396182854082ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287 +1763396182854129ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332 +1763396182854260ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87 +1763396182854271ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340 +1763396182854295ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429 +1763396182854302ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538 + +=== Starting test round 1/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +2025/11/17 16:16:22 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2 +1763396184854370ℹ️ /tmp/benchmark_next-orly-badger_8: database warmup complete, ready to serve requests +/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56 +Events saved: 50000/50000 (100.0%) +Duration: 5.666497805s +Events/sec: 8823.79 +Avg latency: 2.020722ms +P90 latency: 2.645436ms +P95 latency: 2.995948ms +P99 latency: 4.460502ms +Bottom 10% Avg latency: 3.520179ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 352.025605ms +Burst completed: 5000 events in 363.623929ms +Burst completed: 5000 events in 367.475139ms +Burst completed: 5000 events in 396.276199ms +Burst completed: 5000 events in 334.007635ms +Burst completed: 5000 events in 342.086817ms +Burst completed: 5000 events in 360.687805ms +Burst completed: 5000 events in 392.627451ms +Burst completed: 5000 events in 397.635203ms +Burst completed: 5000 events in 376.061572ms +Burst test completed: 50000 events in 10.132858185s +Events/sec: 4934.44 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... +1763396242843490ℹ️ /tmp/benchmark_next-orly-badger_8: Block cache metrics: hit: 232171 miss: 337826 keys-added: 235144 keys-updated: 89642 keys-evicted: 235124 cost-added: 12615246695866 cost-evicted: 12614243474391 sets-dropped: 0 sets-rejected: 12961 gets-dropped: 1280 gets-kept: 568192 gets-total: 569997 hit-ratio: 0.41 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56 +Mixed test completed: 25000 writes, 25000 reads in 24.625333257s +Combined ops/sec: 2030.43 +RunQueryTest.. + +=== Query Test === +Pre-populating database with 10000 events for query tests... +Query test completed: 197562 queries in 1m0.011972513s +Queries/sec: 3292.04 +Avg query latency: 5.52205ms +P95 query latency: 18.40165ms +P99 query latency: 32.139723ms +RunConcurrentQueryStoreTest.. + +=== Concurrent Query/Store Test === +Pre-populating database with 5000 events for concurrent query/store test... +Concurrent test completed: 224870 operations (174870 queries, 50000 writes) in 1m0.006047854s +Operations/sec: 3747.46 +Avg latency: 2.665369ms +Avg query latency: 2.866192ms +Avg write latency: 1.963009ms +P95 latency: 5.204253ms +P99 latency: 8.129537ms + +Pausing 10s before next round... + +=== Test round completed === + + +=== Starting test round 2/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +Events saved: 50000/50000 (100.0%) +Duration: 5.145620568s +Events/sec: 9717.00 +Avg latency: 1.788996ms +P90 latency: 2.241725ms +P95 latency: 2.442669ms +P99 latency: 3.110506ms +Bottom 10% Avg latency: 3.016821ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 362.292309ms +Burst completed: 5000 events in 446.105376ms +Burst completed: 5000 events in 414.443306ms +Burst completed: 5000 events in 378.792051ms +Burst completed: 5000 events in 381.274883ms +Burst completed: 5000 events in 397.941224ms +Burst completed: 5000 events in 449.109795ms +Burst completed: 5000 events in 410.566974ms +Burst completed: 5000 events in 385.220958ms +Burst completed: 5000 events in 383.149443ms +1763396419122547ℹ️ /tmp/benchmark_next-orly-badger_8: [0] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 3.061s +, deleted 1899050 bytes +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56 +Burst test completed: 50000 events in 10.438224172s +Events/sec: 4790.09 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... +Mixed test completed: 25000 writes, 25000 reads in 24.485622359s +Combined ops/sec: 2042.01 +RunQueryTest.. + +=== Query Test === +Pre-populating database with 10000 events for query tests... +Query test completed: 293294 queries in 1m0.013023948s +Queries/sec: 4887.17 +Avg query latency: 3.408294ms +P95 query latency: 10.965419ms +P99 query latency: 19.184675ms +RunConcurrentQueryStoreTest.. + +=== Concurrent Query/Store Test === +Pre-populating database with 5000 events for concurrent query/store test... +1763396542843038ℹ️ /tmp/benchmark_next-orly-badger_8: Block cache metrics: hit: 411640922 miss: 5406705 keys-added: 1627143 keys-updated: 3422501 keys-evicted: 1627125 cost-added: 84304242021549 cost-evicted: 84303233712402 sets-dropped: 0 sets-rejected: 295382 gets-dropped: 325582080 gets-kept: 91360192 gets-total: 417047650 hit-ratio: 0.99 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56 +Concurrent test completed: 254899 operations (204899 queries, 50000 writes) in 1m0.006656731s +Operations/sec: 4247.85 +Avg latency: 2.125728ms +Avg query latency: 2.314927ms +Avg write latency: 1.350394ms +P95 latency: 3.778776ms +P99 latency: 5.393909ms + +=== Test round completed === + + +================================================================================ +BENCHMARK REPORT +================================================================================ + +Test: Peak Throughput +Duration: 5.666497805s +Total Events: 50000 +Events/sec: 8823.79 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 257 MB +Avg Latency: 2.020722ms +P90 Latency: 2.645436ms +P95 Latency: 2.995948ms +P99 Latency: 4.460502ms +Bottom 10% Avg Latency: 3.520179ms +---------------------------------------- + +Test: Burst Pattern +Duration: 10.132858185s +Total Events: 50000 +Events/sec: 4934.44 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 122 MB +Avg Latency: 7.197024ms +P90 Latency: 12.546513ms +P95 Latency: 15.216454ms +P99 Latency: 23.682573ms +Bottom 10% Avg Latency: 18.172083ms +---------------------------------------- + +Test: Mixed Read/Write +Duration: 24.625333257s +Total Events: 50000 +Events/sec: 2030.43 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 120 MB +Avg Latency: 467.389µs +P90 Latency: 914.891µs +P95 Latency: 1.0349ms +P99 Latency: 1.268268ms +Bottom 10% Avg Latency: 1.393626ms +---------------------------------------- + +Test: Query Performance +Duration: 1m0.011972513s +Total Events: 197562 +Events/sec: 3292.04 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 152 MB +Avg Latency: 5.52205ms +P90 Latency: 12.226879ms +P95 Latency: 18.40165ms +P99 Latency: 32.139723ms +Bottom 10% Avg Latency: 20.985445ms +---------------------------------------- + +Test: Concurrent Query/Store +Duration: 1m0.006047854s +Total Events: 224870 +Events/sec: 3747.46 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 127 MB +Avg Latency: 2.665369ms +P90 Latency: 4.194993ms +P95 Latency: 5.204253ms +P99 Latency: 8.129537ms +Bottom 10% Avg Latency: 5.884586ms +---------------------------------------- + +Test: Peak Throughput +Duration: 5.145620568s +Total Events: 50000 +Events/sec: 9717.00 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 522 MB +Avg Latency: 1.788996ms +P90 Latency: 2.241725ms +P95 Latency: 2.442669ms +P99 Latency: 3.110506ms +Bottom 10% Avg Latency: 3.016821ms +---------------------------------------- + +Test: Burst Pattern +Duration: 10.438224172s +Total Events: 50000 +Events/sec: 4790.09 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 623 MB +Avg Latency: 9.406859ms +P90 Latency: 21.810715ms +P95 Latency: 35.119382ms +P99 Latency: 66.001509ms +Bottom 10% Avg Latency: 39.782175ms +---------------------------------------- + +Test: Mixed Read/Write +Duration: 24.485622359s +Total Events: 50000 +Events/sec: 2042.01 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 165 MB +Avg Latency: 445.318µs +P90 Latency: 907.915µs +P95 Latency: 1.021172ms +P99 Latency: 1.227095ms +Bottom 10% Avg Latency: 1.265835ms +---------------------------------------- + +Test: Query Performance +Duration: 1m0.013023948s +Total Events: 293294 +Events/sec: 4887.17 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 136 MB +Avg Latency: 3.408294ms +P90 Latency: 7.156129ms +P95 Latency: 10.965419ms +P99 Latency: 19.184675ms +Bottom 10% Avg Latency: 12.469832ms +---------------------------------------- + +Test: Concurrent Query/Store +Duration: 1m0.006656731s +Total Events: 254899 +Events/sec: 4247.85 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 147 MB +Avg Latency: 2.125728ms +P90 Latency: 3.131901ms +P95 Latency: 3.778776ms +P99 Latency: 5.393909ms +Bottom 10% Avg Latency: 4.22837ms +---------------------------------------- + +Report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.txt +AsciiDoc report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.adoc +1763396593981772ℹ️ /tmp/benchmark_next-orly-badger_8: Lifetime L0 stalled for: 0s +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56 +1763396595378747ℹ️ /tmp/benchmark_next-orly-badger_8: +Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB +Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB +Level Done +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56 + +RELAY_NAME: next-orly-badger +RELAY_URL: ws://next-orly-badger:8080 +TEST_TIMESTAMP: 2025-11-17T16:23:15+00:00 +BENCHMARK_CONFIG: + Events: 50000 + Workers: 24 + Duration: 60s diff --git a/cmd/benchmark/reports/run_20251117_161622/next-orly-dgraph_results.txt b/cmd/benchmark/reports/run_20251117_161622/next-orly-dgraph_results.txt new file mode 100644 index 0000000..7cb411a --- /dev/null +++ b/cmd/benchmark/reports/run_20251117_161622/next-orly-dgraph_results.txt @@ -0,0 +1,323 @@ +Starting Nostr Relay Benchmark +Data Directory: /tmp/benchmark_next-orly-dgraph_8 +Events: 50000, Workers: 24, Duration: 1m0s +1763396600574205ℹ️ /tmp/benchmark_next-orly-dgraph_8: All 0 tables opened in 0s +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56 +1763396600577795ℹ️ /tmp/benchmark_next-orly-dgraph_8: Discard stats nextEmptySlot: 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56 +1763396600577852ℹ️ /tmp/benchmark_next-orly-dgraph_8: Set nextTxnTs to 0 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56 +1763396600578216ℹ️ migrating to version 1... /build/pkg/database/migrations.go:66 +1763396600578287ℹ️ migrating to version 2... /build/pkg/database/migrations.go:73 +1763396600578319ℹ️ migrating to version 3... /build/pkg/database/migrations.go:80 +1763396600578325ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287 +1763396600578334ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332 +1763396600578350ℹ️ migrating to version 4... /build/pkg/database/migrations.go:87 +1763396600578355ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340 +1763396600578372ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429 +1763396600578378ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538 + +=== Starting test round 1/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +2025/11/17 16:23:20 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2 +1763396602578437ℹ️ /tmp/benchmark_next-orly-dgraph_8: database warmup complete, ready to serve requests +/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56 +Events saved: 50000/50000 (100.0%) +Duration: 4.932431923s +Events/sec: 10136.99 +Avg latency: 1.667317ms +P90 latency: 2.069461ms +P95 latency: 2.249895ms +P99 latency: 2.861303ms +Bottom 10% Avg latency: 2.592597ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 335.655402ms +Burst completed: 5000 events in 330.360552ms +Burst completed: 5000 events in 350.90491ms +Burst completed: 5000 events in 373.041958ms +Burst completed: 5000 events in 347.11564ms +Burst completed: 5000 events in 315.949199ms +Burst completed: 5000 events in 331.42993ms +Burst completed: 5000 events in 352.164361ms +Burst completed: 5000 events in 359.115619ms +Burst completed: 5000 events in 360.397544ms +Burst test completed: 50000 events in 9.808342155s +Events/sec: 5097.70 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... +Mixed test completed: 25000 writes, 25000 reads in 24.59623701s +Combined ops/sec: 2032.83 +1763396660567060⚠️ /tmp/benchmark_next-orly-dgraph_8: Block cache might be too small. Metrics: hit: 153935 miss: 305257 keys-added: 227607 keys-updated: 64636 keys-evicted: 227588 cost-added: 12452581576986 cost-evicted: 12451583862757 sets-dropped: 0 sets-rejected: 12954 gets-dropped: 256 gets-kept: 458496 gets-total: 459192 hit-ratio: 0.34 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:450 /build/pkg/database/logger.go:46 +1763396660567121⚠️ /tmp/benchmark_next-orly-dgraph_8: Cache life expectancy (in seconds): + -- Histogram: +Min value: 0 +Max value: 11 +Count: 227588 +50p: 2.00 +75p: 2.00 +90p: 2.00 +[0, 2) 227552 99.98% 99.98% +[8, 16) 36 0.02% 100.00% + -- +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:451 /build/pkg/database/logger.go:46 +RunQueryTest.. + +=== Query Test === +Pre-populating database with 10000 events for query tests... +Query test completed: 221626 queries in 1m0.014161671s +Queries/sec: 3692.90 +Avg query latency: 4.849059ms +P95 query latency: 15.966874ms +P99 query latency: 27.859712ms +RunConcurrentQueryStoreTest.. + +=== Concurrent Query/Store Test === +Pre-populating database with 5000 events for concurrent query/store test... +Concurrent test completed: 235023 operations (185023 queries, 50000 writes) in 1m0.005568823s +Operations/sec: 3916.69 +Avg latency: 2.401379ms +Avg query latency: 2.672573ms +Avg write latency: 1.397837ms +P95 latency: 4.398002ms +P99 latency: 6.207183ms + +Pausing 10s before next round... + +=== Test round completed === + + +=== Starting test round 2/2 === +RunPeakThroughputTest.. + +=== Peak Throughput Test === +Events saved: 50000/50000 (100.0%) +Duration: 5.127096799s +Events/sec: 9752.11 +Avg latency: 1.795821ms +P90 latency: 2.25461ms +P95 latency: 2.466785ms +P99 latency: 3.159176ms +Bottom 10% Avg latency: 3.072242ms +RunBurstPatternTest.. + +=== Burst Pattern Test === +Burst completed: 5000 events in 358.012209ms +Burst completed: 5000 events in 336.300441ms +Burst completed: 5000 events in 363.657063ms +Burst completed: 5000 events in 356.771817ms +Burst completed: 5000 events in 368.000986ms +Burst completed: 5000 events in 441.821658ms +Burst completed: 5000 events in 451.146122ms +Burst completed: 5000 events in 455.159014ms +Burst completed: 5000 events in 359.826504ms +Burst completed: 5000 events in 358.602207ms +1763396835570723ℹ️ /tmp/benchmark_next-orly-dgraph_8: [6] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 3.055s +, deleted 1901003 bytes +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56 +Burst test completed: 50000 events in 10.25458455s +Events/sec: 4875.87 +RunMixedReadWriteTest.. + +=== Mixed Read/Write Test === +Pre-populating database for read tests... +Mixed test completed: 25000 writes, 25000 reads in 24.474786024s +Combined ops/sec: 2042.92 +RunQueryTest.. + +=== Query Test === +Pre-populating database with 10000 events for query tests... +Query test completed: 287727 queries in 1m0.012156857s +Queries/sec: 4794.48 +Avg query latency: 3.504598ms +P95 query latency: 11.416502ms +P99 query latency: 19.871886ms +RunConcurrentQueryStoreTest.. + +=== Concurrent Query/Store Test === +Pre-populating database with 5000 events for concurrent query/store test... +1763396960566384ℹ️ /tmp/benchmark_next-orly-dgraph_8: Block cache metrics: hit: 436764091 miss: 4871096 keys-added: 1584381 keys-updated: 2919606 keys-evicted: 1584361 cost-added: 83226283032882 cost-evicted: 83225259887553 sets-dropped: 0 sets-rejected: 305847 gets-dropped: 344794880 gets-kept: 96734656 gets-total: 441635219 hit-ratio: 0.99 +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56 +Concurrent test completed: 252209 operations (202209 queries, 50000 writes) in 1m0.008028818s +Operations/sec: 4202.92 +Avg latency: 2.189461ms +Avg query latency: 2.337704ms +Avg write latency: 1.58994ms +P95 latency: 3.919323ms +P99 latency: 5.959314ms + +=== Test round completed === + + +================================================================================ +BENCHMARK REPORT +================================================================================ + +Test: Peak Throughput +Duration: 4.932431923s +Total Events: 50000 +Events/sec: 10136.99 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 432 MB +Avg Latency: 1.667317ms +P90 Latency: 2.069461ms +P95 Latency: 2.249895ms +P99 Latency: 2.861303ms +Bottom 10% Avg Latency: 2.592597ms +---------------------------------------- + +Test: Burst Pattern +Duration: 9.808342155s +Total Events: 50000 +Events/sec: 5097.70 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 149 MB +Avg Latency: 3.805495ms +P90 Latency: 6.632151ms +P95 Latency: 8.069195ms +P99 Latency: 13.244195ms +Bottom 10% Avg Latency: 9.922762ms +---------------------------------------- + +Test: Mixed Read/Write +Duration: 24.59623701s +Total Events: 50000 +Events/sec: 2032.83 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 121 MB +Avg Latency: 467.746µs +P90 Latency: 911.189µs +P95 Latency: 1.018554ms +P99 Latency: 1.250848ms +Bottom 10% Avg Latency: 1.345857ms +---------------------------------------- + +Test: Query Performance +Duration: 1m0.014161671s +Total Events: 221626 +Events/sec: 3692.90 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 125 MB +Avg Latency: 4.849059ms +P90 Latency: 10.564822ms +P95 Latency: 15.966874ms +P99 Latency: 27.859712ms +Bottom 10% Avg Latency: 18.180391ms +---------------------------------------- + +Test: Concurrent Query/Store +Duration: 1m0.005568823s +Total Events: 235023 +Events/sec: 3916.69 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 177 MB +Avg Latency: 2.401379ms +P90 Latency: 3.659643ms +P95 Latency: 4.398002ms +P99 Latency: 6.207183ms +Bottom 10% Avg Latency: 4.857955ms +---------------------------------------- + +Test: Peak Throughput +Duration: 5.127096799s +Total Events: 50000 +Events/sec: 9752.11 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 480 MB +Avg Latency: 1.795821ms +P90 Latency: 2.25461ms +P95 Latency: 2.466785ms +P99 Latency: 3.159176ms +Bottom 10% Avg Latency: 3.072242ms +---------------------------------------- + +Test: Burst Pattern +Duration: 10.25458455s +Total Events: 50000 +Events/sec: 4875.87 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 621 MB +Avg Latency: 9.266976ms +P90 Latency: 24.12544ms +P95 Latency: 34.465042ms +P99 Latency: 55.446215ms +Bottom 10% Avg Latency: 37.317916ms +---------------------------------------- + +Test: Mixed Read/Write +Duration: 24.474786024s +Total Events: 50000 +Events/sec: 2042.92 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 182 MB +Avg Latency: 452.46µs +P90 Latency: 909.806µs +P95 Latency: 1.014516ms +P99 Latency: 1.214797ms +Bottom 10% Avg Latency: 1.304994ms +---------------------------------------- + +Test: Query Performance +Duration: 1m0.012156857s +Total Events: 287727 +Events/sec: 4794.48 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 150 MB +Avg Latency: 3.504598ms +P90 Latency: 7.480817ms +P95 Latency: 11.416502ms +P99 Latency: 19.871886ms +Bottom 10% Avg Latency: 12.934864ms +---------------------------------------- + +Test: Concurrent Query/Store +Duration: 1m0.008028818s +Total Events: 252209 +Events/sec: 4202.92 +Success Rate: 100.0% +Concurrent Workers: 24 +Memory Used: 98 MB +Avg Latency: 2.189461ms +P90 Latency: 3.213337ms +P95 Latency: 3.919323ms +P99 Latency: 5.959314ms +Bottom 10% Avg Latency: 4.521426ms +---------------------------------------- + +Report saved to: /tmp/benchmark_next-orly-dgraph_8/benchmark_report.txt +AsciiDoc report saved to: /tmp/benchmark_next-orly-dgraph_8/benchmark_report.adoc +1763397010410098ℹ️ /tmp/benchmark_next-orly-dgraph_8: Lifetime L0 stalled for: 0s +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56 +1763397011943178ℹ️ /tmp/benchmark_next-orly-dgraph_8: +Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB +Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB +Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB +Level Done +/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56 + +RELAY_NAME: next-orly-dgraph +RELAY_URL: ws://next-orly-dgraph:8080 +TEST_TIMESTAMP: 2025-11-17T16:30:12+00:00 +BENCHMARK_CONFIG: + Events: 50000 + Workers: 24 + Duration: 60s diff --git a/cmd/benchmark/run-badger-benchmark.sh b/cmd/benchmark/run-badger-benchmark.sh new file mode 100755 index 0000000..e12bacc --- /dev/null +++ b/cmd/benchmark/run-badger-benchmark.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Run Badger benchmark with reduced cache sizes to avoid OOM + +# Set reasonable cache sizes for benchmark +export ORLY_DB_BLOCK_CACHE_MB=256 # Reduced from 1024MB +export ORLY_DB_INDEX_CACHE_MB=128 # Reduced from 512MB +export ORLY_QUERY_CACHE_SIZE_MB=128 # Reduced from 512MB + +# Clean up old data +rm -rf /tmp/benchmark_db_badger + +echo "Running Badger benchmark with reduced cache sizes:" +echo " Block Cache: ${ORLY_DB_BLOCK_CACHE_MB}MB" +echo " Index Cache: ${ORLY_DB_INDEX_CACHE_MB}MB" +echo " Query Cache: ${ORLY_QUERY_CACHE_SIZE_MB}MB" +echo "" + +# Run benchmark +./benchmark -events "${1:-1000}" -workers "${2:-4}" -datadir /tmp/benchmark_db_badger diff --git a/cmd/benchmark/run-benchmark.sh b/cmd/benchmark/run-benchmark.sh index 77d4a44..001707c 100755 --- a/cmd/benchmark/run-benchmark.sh +++ b/cmd/benchmark/run-benchmark.sh @@ -31,8 +31,8 @@ fi # Create fresh data directories with correct permissions echo "Preparing data directories..." -mkdir -p data/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres} -chmod 777 data/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres} +mkdir -p data/{next-orly-badger,next-orly-dgraph,dgraph-zero,dgraph-alpha,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres} +chmod 777 data/{next-orly-badger,next-orly-dgraph,dgraph-zero,dgraph-alpha,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres} echo "Starting benchmark suite..." echo "This will automatically shut down all containers when the benchmark completes." diff --git a/pkg/database/get-serial-by-id.go b/pkg/database/get-serial-by-id.go index 431dec7..8c47ee4 100644 --- a/pkg/database/get-serial-by-id.go +++ b/pkg/database/get-serial-by-id.go @@ -10,12 +10,12 @@ import ( "next.orly.dev/pkg/database/indexes/types" "next.orly.dev/pkg/encoders/event" "next.orly.dev/pkg/encoders/filter" - "next.orly.dev/pkg/encoders/hex" + // "next.orly.dev/pkg/encoders/hex" "next.orly.dev/pkg/encoders/tag" ) func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) { - log.T.F("GetSerialById: input id=%s", hex.Enc(id)) + // log.T.F("GetSerialById: input id=%s", hex.Enc(id)) var idxs []Range if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) { return @@ -58,7 +58,7 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) { return } if !idFound { - err = errorf.T("id not found in database: %s", hex.Enc(id)) + // err = errorf.T("id not found in database: %s", hex.Enc(id)) return } @@ -80,7 +80,7 @@ func (d *D) GetSerialsByIds(ids *tag.T) ( func (d *D) GetSerialsByIdsWithFilter( ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool, ) (serials map[string]*types.Uint40, err error) { - log.T.F("GetSerialsByIdsWithFilter: input ids count=%d", ids.Len()) + // log.T.F("GetSerialsByIdsWithFilter: input ids count=%d", ids.Len()) // Initialize the result map with estimated capacity to reduce reallocations serials = make(map[string]*types.Uint40, ids.Len()) diff --git a/pkg/database/get-serials-by-range.go b/pkg/database/get-serials-by-range.go index f56daee..421be82 100644 --- a/pkg/database/get-serials-by-range.go +++ b/pkg/database/get-serials-by-range.go @@ -33,7 +33,7 @@ func (d *D) GetSerialsByRange(idx Range) ( } iterCount := 0 it.Seek(endBoundary) - log.T.F("GetSerialsByRange: iterator valid=%v, sought to endBoundary", it.Valid()) + // log.T.F("GetSerialsByRange: iterator valid=%v, sought to endBoundary", it.Valid()) for it.Valid() { iterCount++ if iterCount > 100 { @@ -46,12 +46,12 @@ func (d *D) GetSerialsByRange(idx Range) ( key = item.Key() keyWithoutSerial := key[:len(key)-5] cmp := bytes.Compare(keyWithoutSerial, idx.Start) - log.T.F("GetSerialsByRange: iter %d, key prefix matches=%v, cmp=%d", iterCount, bytes.HasPrefix(key, idx.Start[:len(idx.Start)-8]), cmp) + // log.T.F("GetSerialsByRange: iter %d, key prefix matches=%v, cmp=%d", iterCount, bytes.HasPrefix(key, idx.Start[:len(idx.Start)-8]), cmp) if cmp < 0 { // didn't find it within the timestamp range - log.T.F("GetSerialsByRange: key out of range (cmp=%d), stopping iteration", cmp) - log.T.F(" keyWithoutSerial len=%d: %x", len(keyWithoutSerial), keyWithoutSerial) - log.T.F(" idx.Start len=%d: %x", len(idx.Start), idx.Start) + // log.T.F("GetSerialsByRange: key out of range (cmp=%d), stopping iteration", cmp) + // log.T.F(" keyWithoutSerial len=%d: %x", len(keyWithoutSerial), keyWithoutSerial) + // log.T.F(" idx.Start len=%d: %x", len(idx.Start), idx.Start) return } ser := new(types.Uint40) @@ -62,7 +62,7 @@ func (d *D) GetSerialsByRange(idx Range) ( sers = append(sers, ser) it.Next() } - log.T.F("GetSerialsByRange: iteration complete, found %d serials", len(sers)) + // log.T.F("GetSerialsByRange: iteration complete, found %d serials", len(sers)) return }, ); chk.E(err) { diff --git a/pkg/database/query-for-deleted.go b/pkg/database/query-for-deleted.go index f501629..10482a1 100644 --- a/pkg/database/query-for-deleted.go +++ b/pkg/database/query-for-deleted.go @@ -5,7 +5,6 @@ import ( "lol.mleku.dev/chk" "lol.mleku.dev/errorf" - "lol.mleku.dev/log" "next.orly.dev/pkg/database/indexes/types" "next.orly.dev/pkg/encoders/event" "next.orly.dev/pkg/encoders/filter" @@ -21,7 +20,7 @@ import ( // pubkeys that also may delete the event, normally only the author is allowed // to delete an event. func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) { - log.T.F("CheckForDeleted: checking event %x", ev.ID) + // log.T.F("CheckForDeleted: checking event %x", ev.ID) keys := append([][]byte{ev.Pubkey}, admins...) authors := tag.NewFromBytesSlice(keys...) // if the event is addressable, check for a deletion event with the same @@ -186,9 +185,9 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) { return } // otherwise we check for a delete by event id - log.T.F("CheckForDeleted: checking for e-tag deletion of event %x", ev.ID) - log.T.F("CheckForDeleted: authors filter: %v", authors) - log.T.F("CheckForDeleted: looking for tag e with value: %s", hex.Enc(ev.ID)) + // log.T.F("CheckForDeleted: checking for e-tag deletion of event %x", ev.ID) + // log.T.F("CheckForDeleted: authors filter: %v", authors) + // log.T.F("CheckForDeleted: looking for tag e with value: %s", hex.Enc(ev.ID)) var idxs []Range if idxs, err = GetIndexesFromFilter( &filter.F{ @@ -201,18 +200,18 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) { ); chk.E(err) { return } - log.T.F("CheckForDeleted: found %d indexes", len(idxs)) + // log.T.F("CheckForDeleted: found %d indexes", len(idxs)) var sers types.Uint40s - for i, idx := range idxs { - log.T.F("CheckForDeleted: checking index %d: %v", i, idx) + for _, idx := range idxs { + // log.T.F("CheckForDeleted: checking index %d: %v", i, idx) var s types.Uint40s if s, err = d.GetSerialsByRange(idx); chk.E(err) { return } - log.T.F("CheckForDeleted: index %d returned %d serials", i, len(s)) + // log.T.F("CheckForDeleted: index %d returned %d serials", i, len(s)) if len(s) > 0 { // Any e-tag deletion found means the exact event was deleted and cannot be resubmitted - log.T.F("CheckForDeleted: found e-tag deletion for event %x", ev.ID) + // log.T.F("CheckForDeleted: found e-tag deletion for event %x", ev.ID) err = errorf.E("blocked: %0x has been deleted", ev.ID) return } diff --git a/pkg/database/save-event.go b/pkg/database/save-event.go index c44a6ec..030e93b 100644 --- a/pkg/database/save-event.go +++ b/pkg/database/save-event.go @@ -180,10 +180,10 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) ( if idxs, err = GetIndexesForEvent(ev, serial); chk.E(err) { return } - log.T.F( - "SaveEvent: generated %d indexes for event %x (kind %d)", len(idxs), - ev.ID, ev.Kind, - ) + // log.T.F( + // "SaveEvent: generated %d indexes for event %x (kind %d)", len(idxs), + // ev.ID, ev.Kind, + // ) // Serialize event once to check size eventDataBuf := new(bytes.Buffer) @@ -247,10 +247,10 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) ( if err = txn.Set(keyBuf.Bytes(), nil); chk.E(err) { return } - log.T.F( - "SaveEvent: stored small event inline (%d bytes)", - len(eventData), - ) + // log.T.F( + // "SaveEvent: stored small event inline (%d bytes)", + // len(eventData), + // ) } else { // Large event: store separately with evt prefix keyBuf := new(bytes.Buffer) @@ -260,10 +260,10 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) ( if err = txn.Set(keyBuf.Bytes(), eventData); chk.E(err) { return } - log.T.F( - "SaveEvent: stored large event separately (%d bytes)", - len(eventData), - ) + // log.T.F( + // "SaveEvent: stored large event separately (%d bytes)", + // len(eventData), + // ) } // Additionally, store replaceable/addressable events with specialized keys for direct access @@ -293,7 +293,7 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) ( if err = txn.Set(keyBuf.Bytes(), nil); chk.E(err) { return } - log.T.F("SaveEvent: also stored addressable event with specialized key") + // log.T.F("SaveEvent: also stored addressable event with specialized key") } else if isReplaceableEvent && isSmallEvent { // Replaceable event: also store with rev|pubkey_hash|kind|size|data pubHash := new(types.PubHash) @@ -340,7 +340,7 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) ( // This ensures subsequent queries will see the new event if d.queryCache != nil { d.queryCache.Invalidate() - log.T.F("SaveEvent: invalidated query cache") + // log.T.F("SaveEvent: invalidated query cache") } return diff --git a/pkg/dgraph/delete.go b/pkg/dgraph/delete.go index b7dcb86..406300a 100644 --- a/pkg/dgraph/delete.go +++ b/pkg/dgraph/delete.go @@ -2,7 +2,9 @@ package dgraph import ( "context" + "encoding/json" "fmt" + "time" "github.com/dgraph-io/dgo/v230/protos/api" "next.orly.dev/pkg/database/indexes/types" @@ -98,13 +100,83 @@ func (d *D) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event. return nil } -// DeleteExpired removes events that have passed their expiration time +// DeleteExpired removes events that have passed their expiration time (NIP-40) func (d *D) DeleteExpired() { - // Query for events with expiration tags - // This is a stub - full implementation would: - // 1. Find events with "expiration" tag - // 2. Check if current time > expiration time - // 3. Delete those events + // Query for events that have an "expiration" tag + // NIP-40: events should have a tag ["expiration", ""] + query := `{ + events(func: has(event.tags)) { + uid + event.id + event.tags + event.created_at + } + }` + + resp, err := d.Query(context.Background(), query) + if err != nil { + d.Logger.Errorf("failed to query events for expiration: %v", err) + return + } + + var result struct { + Events []struct { + UID string `json:"uid"` + ID string `json:"event.id"` + Tags string `json:"event.tags"` + CreatedAt int64 `json:"event.created_at"` + } `json:"events"` + } + + if err = unmarshalJSON(resp.Json, &result); err != nil { + d.Logger.Errorf("failed to parse events for expiration: %v", err) + return + } + + now := time.Now().Unix() + deletedCount := 0 + + for _, ev := range result.Events { + // Parse tags + if ev.Tags == "" { + continue + } + + var tags [][]string + if err := json.Unmarshal([]byte(ev.Tags), &tags); err != nil { + continue + } + + // Look for expiration tag + var expirationTime int64 + for _, tag := range tags { + if len(tag) >= 2 && tag[0] == "expiration" { + // Parse expiration timestamp + if _, err := fmt.Sscanf(tag[1], "%d", &expirationTime); err != nil { + continue + } + break + } + } + + // If expiration time found and passed, delete the event + if expirationTime > 0 && now > expirationTime { + mutation := &api.Mutation{ + DelNquads: []byte(fmt.Sprintf("<%s> * * .", ev.UID)), + CommitNow: true, + } + + if _, err := d.Mutate(context.Background(), mutation); err != nil { + d.Logger.Warningf("failed to delete expired event %s: %v", ev.ID, err) + } else { + deletedCount++ + } + } + } + + if deletedCount > 0 { + d.Logger.Infof("deleted %d expired events", deletedCount) + } } // ProcessDelete processes a kind 5 deletion event diff --git a/pkg/dgraph/dgraph.go b/pkg/dgraph/dgraph.go index 8eaa93e..b805a99 100644 --- a/pkg/dgraph/dgraph.go +++ b/pkg/dgraph/dgraph.go @@ -4,6 +4,7 @@ package dgraph import ( "context" + "encoding/json" "fmt" "os" "path/filepath" @@ -12,11 +13,11 @@ import ( "github.com/dgraph-io/dgo/v230" "github.com/dgraph-io/dgo/v230/protos/api" "google.golang.org/grpc" - "next.orly.dev/pkg/encoders/filter" "google.golang.org/grpc/credentials/insecure" "lol.mleku.dev" "lol.mleku.dev/chk" "next.orly.dev/pkg/database" + "next.orly.dev/pkg/encoders/filter" "next.orly.dev/pkg/utils/apputil" ) @@ -198,8 +199,11 @@ func (d *D) Mutate(ctx context.Context, mutation *api.Mutation) (*api.Response, return nil, fmt.Errorf("dgraph mutation failed: %w", err) } - if err := txn.Commit(ctx); err != nil { - return nil, fmt.Errorf("dgraph commit failed: %w", err) + // Only commit if CommitNow is false (mutation didn't auto-commit) + if !mutation.CommitNow { + if err := txn.Commit(ctx); err != nil { + return nil, fmt.Errorf("dgraph commit failed: %w", err) + } } return resp, nil @@ -256,12 +260,38 @@ func (d *D) SetLogLevel(level string) { // d.Logger.SetLevel(lol.GetLogLevel(level)) } -// EventIdsBySerial retrieves event IDs by serial range (stub) +// EventIdsBySerial retrieves event IDs by serial range func (d *D) EventIdsBySerial(start uint64, count int) ( evs []uint64, err error, ) { - err = fmt.Errorf("not implemented") - return + // Query for events in the specified serial range + query := fmt.Sprintf(`{ + events(func: ge(event.serial, %d), orderdesc: event.serial, first: %d) { + event.serial + } + }`, start, count) + + resp, err := d.Query(context.Background(), query) + if err != nil { + return nil, fmt.Errorf("failed to query event IDs by serial: %w", err) + } + + var result struct { + Events []struct { + Serial int64 `json:"event.serial"` + } `json:"events"` + } + + if err = json.Unmarshal(resp.Json, &result); err != nil { + return nil, err + } + + evs = make([]uint64, 0, len(result.Events)) + for _, ev := range result.Events { + evs = append(evs, uint64(ev.Serial)) + } + + return evs, nil } // RunMigrations runs database migrations (no-op for dgraph) diff --git a/pkg/dgraph/fetch-event.go b/pkg/dgraph/fetch-event.go index cfba701..29e010f 100644 --- a/pkg/dgraph/fetch-event.go +++ b/pkg/dgraph/fetch-event.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "strings" "next.orly.dev/pkg/database" "next.orly.dev/pkg/database/indexes/types" @@ -54,15 +55,16 @@ func (d *D) FetchEventsBySerials(serials []*types.Uint40) ( return make(map[uint64]*event.E), nil } - // Build query for multiple serials - serialStrs := make([]string, len(serials)) + // Build a filter for multiple serials using OR conditions + serialConditions := make([]string, len(serials)) for i, ser := range serials { - serialStrs[i] = fmt.Sprintf("%d", ser.Get()) + serialConditions[i] = fmt.Sprintf("eq(event.serial, %d)", ser.Get()) } + serialFilter := strings.Join(serialConditions, " OR ") - // Use uid() function for efficient multi-get + // Query with proper batch filtering query := fmt.Sprintf(`{ - events(func: uid(%s)) { + events(func: has(event.serial)) @filter(%s) { event.id event.kind event.created_at @@ -72,24 +74,70 @@ func (d *D) FetchEventsBySerials(serials []*types.Uint40) ( event.tags event.serial } - }`, serialStrs[0]) // Simplified - in production you'd handle multiple UIDs properly + }`, serialFilter) resp, err := d.Query(context.Background(), query) if err != nil { return nil, fmt.Errorf("failed to fetch events by serials: %w", err) } - evs, err := d.parseEventsFromResponse(resp.Json) - if err != nil { + // Parse the response including serial numbers + var result struct { + Events []struct { + ID string `json:"event.id"` + Kind int `json:"event.kind"` + CreatedAt int64 `json:"event.created_at"` + Content string `json:"event.content"` + Sig string `json:"event.sig"` + Pubkey string `json:"event.pubkey"` + Tags string `json:"event.tags"` + Serial int64 `json:"event.serial"` + } `json:"events"` + } + + if err = json.Unmarshal(resp.Json, &result); err != nil { return nil, err } - // Map events by serial + // Map events by their serial numbers events = make(map[uint64]*event.E) - for i, ser := range serials { - if i < len(evs) { - events[ser.Get()] = evs[i] + for _, ev := range result.Events { + // Decode hex strings + id, err := hex.Dec(ev.ID) + if err != nil { + continue } + sig, err := hex.Dec(ev.Sig) + if err != nil { + continue + } + pubkey, err := hex.Dec(ev.Pubkey) + if err != nil { + continue + } + + // Parse tags from JSON + var tags tag.S + if ev.Tags != "" { + if err := json.Unmarshal([]byte(ev.Tags), &tags); err != nil { + continue + } + } + + // Create event + e := &event.E{ + Kind: uint16(ev.Kind), + CreatedAt: ev.CreatedAt, + Content: []byte(ev.Content), + Tags: &tags, + } + + // Copy fixed-size arrays + copy(e.ID[:], id) + copy(e.Sig[:], sig) + copy(e.Pubkey[:], pubkey) + + events[uint64(ev.Serial)] = e } return events, nil @@ -140,17 +188,54 @@ func (d *D) GetSerialsByIds(ids *tag.T) ( return serials, nil } - // Query each ID individually (simplified implementation) - for _, id := range ids.T { - if len(id) >= 2 { - idStr := string(id[1]) - serial, err := d.GetSerialById([]byte(idStr)) - if err == nil { - serials[idStr] = serial - } + // Build batch query for all IDs at once + idConditions := make([]string, 0, len(ids.T)) + idMap := make(map[string][]byte) // Map hex ID to original bytes + + for _, idBytes := range ids.T { + if len(idBytes) > 0 { + idStr := hex.Enc(idBytes) + idConditions = append(idConditions, fmt.Sprintf("eq(event.id, %q)", idStr)) + idMap[idStr] = idBytes } } + if len(idConditions) == 0 { + return serials, nil + } + + // Create single query with OR conditions + idFilter := strings.Join(idConditions, " OR ") + query := fmt.Sprintf(`{ + events(func: has(event.id)) @filter(%s) { + event.id + event.serial + } + }`, idFilter) + + resp, err := d.Query(context.Background(), query) + if err != nil { + return nil, fmt.Errorf("failed to batch query serials by IDs: %w", err) + } + + var result struct { + Events []struct { + ID string `json:"event.id"` + Serial int64 `json:"event.serial"` + } `json:"events"` + } + + if err = json.Unmarshal(resp.Json, &result); err != nil { + return nil, err + } + + // Map results back + for _, ev := range result.Events { + serial := types.Uint40{} + serial.Set(uint64(ev.Serial)) + serials[ev.ID] = &serial + } + return serials, nil } @@ -191,10 +276,47 @@ func (d *D) GetSerialsByIdsWithFilter( func (d *D) GetSerialsByRange(idx database.Range) ( serials types.Uint40s, err error, ) { - // This would need to be implemented based on how ranges are defined - // For now, returning not implemented - err = fmt.Errorf("not implemented") - return + // Range represents a byte-prefix range for index scanning + // For dgraph, we need to convert this to a query on indexed fields + // The range is typically used for scanning event IDs or other hex-encoded keys + + if len(idx.Start) == 0 && len(idx.End) == 0 { + return nil, fmt.Errorf("empty range provided") + } + + startStr := hex.Enc(idx.Start) + endStr := hex.Enc(idx.End) + + // Query for events with IDs in the specified range + query := fmt.Sprintf(`{ + events(func: ge(event.id, %q)) @filter(le(event.id, %q)) { + event.serial + } + }`, startStr, endStr) + + resp, err := d.Query(context.Background(), query) + if err != nil { + return nil, fmt.Errorf("failed to query serials by range: %w", err) + } + + var result struct { + Events []struct { + Serial int64 `json:"event.serial"` + } `json:"events"` + } + + if err = json.Unmarshal(resp.Json, &result); err != nil { + return nil, err + } + + serials = make([]*types.Uint40, 0, len(result.Events)) + for _, ev := range result.Events { + serial := types.Uint40{} + serial.Set(uint64(ev.Serial)) + serials = append(serials, &serial) + } + + return serials, nil } // GetFullIdPubkeyBySerial retrieves ID and pubkey for a serial number diff --git a/pkg/dgraph/import-export.go b/pkg/dgraph/import-export.go index 660722d..70ceefa 100644 --- a/pkg/dgraph/import-export.go +++ b/pkg/dgraph/import-export.go @@ -6,8 +6,10 @@ import ( "encoding/json" "fmt" "io" + "strings" "next.orly.dev/pkg/encoders/event" + "next.orly.dev/pkg/encoders/hex" ) // Import imports events from a reader (JSONL format) @@ -17,11 +19,83 @@ func (d *D) Import(rr io.Reader) { // Export exports events to a writer (JSONL format) func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) { - // Query all events or events for specific pubkeys - // Write as JSONL + // Build query based on whether pubkeys are specified + var query string - // Stub implementation - fmt.Fprintf(w, "# Export not yet implemented for dgraph\n") + if len(pubkeys) > 0 { + // Build pubkey filter + pubkeyStrs := make([]string, len(pubkeys)) + for i, pk := range pubkeys { + pubkeyStrs[i] = fmt.Sprintf("eq(event.pubkey, %q)", hex.Enc(pk)) + } + pubkeyFilter := strings.Join(pubkeyStrs, " OR ") + + query = fmt.Sprintf(`{ + events(func: has(event.id)) @filter(%s) { + event.id + event.kind + event.created_at + event.content + event.sig + event.pubkey + event.tags + } + }`, pubkeyFilter) + } else { + // Export all events + query = `{ + events(func: has(event.id)) { + event.id + event.kind + event.created_at + event.content + event.sig + event.pubkey + event.tags + } + }` + } + + // Execute query + resp, err := d.Query(c, query) + if err != nil { + d.Logger.Errorf("failed to query events for export: %v", err) + fmt.Fprintf(w, "# Error: failed to query events: %v\n", err) + return + } + + // Parse events + evs, err := d.parseEventsFromResponse(resp.Json) + if err != nil { + d.Logger.Errorf("failed to parse events for export: %v", err) + fmt.Fprintf(w, "# Error: failed to parse events: %v\n", err) + return + } + + // Write header comment + fmt.Fprintf(w, "# Exported %d events from dgraph\n", len(evs)) + + // Write each event as JSONL + count := 0 + for _, ev := range evs { + jsonData, err := json.Marshal(ev) + if err != nil { + d.Logger.Warningf("failed to marshal event: %v", err) + continue + } + + if _, err := fmt.Fprintf(w, "%s\n", jsonData); err != nil { + d.Logger.Errorf("failed to write event: %v", err) + return + } + + count++ + if count%1000 == 0 { + d.Logger.Infof("exported %d events", count) + } + } + + d.Logger.Infof("export complete: %d events written", count) } // ImportEventsFromReader imports events from a reader diff --git a/pkg/dgraph/query-events.go b/pkg/dgraph/query-events.go index 45905b2..7b8a7ee 100644 --- a/pkg/dgraph/query-events.go +++ b/pkg/dgraph/query-events.go @@ -48,6 +48,20 @@ func (d *D) QueryEventsWithOptions( // buildDQLQuery constructs a DQL query from a Nostr filter func (d *D) buildDQLQuery(f *filter.F, includeDeleteEvents bool) string { + return d.buildDQLQueryWithFields(f, includeDeleteEvents, []string{ + "uid", + "event.id", + "event.kind", + "event.created_at", + "event.content", + "event.sig", + "event.pubkey", + "event.tags", + }) +} + +// buildDQLQueryWithFields constructs a DQL query with custom field selection +func (d *D) buildDQLQueryWithFields(f *filter.F, includeDeleteEvents bool, fields []string) string { var conditions []string var funcQuery string @@ -139,18 +153,14 @@ func (d *D) buildDQLQuery(f *filter.F, includeDeleteEvents bool) string { limitStr = fmt.Sprintf(", first: %d", f.Limit) } + // Build field list + fieldStr := strings.Join(fields, "\n\t\t\t") + query := fmt.Sprintf(`{ events(func: %s%s%s%s) { - uid - event.id - event.kind - event.created_at - event.content - event.sig - event.pubkey - event.tags + %s } - }`, funcQuery, filterStr, orderBy, limitStr) + }`, funcQuery, filterStr, orderBy, limitStr, fieldStr) return query } @@ -257,12 +267,8 @@ func (d *D) QueryDeleteEventsByTargetId(c context.Context, targetEventId []byte) func (d *D) QueryForSerials(c context.Context, f *filter.F) ( serials types.Uint40s, err error, ) { - // Build query - query := d.buildDQLQuery(f, false) - - // Modify query to only return serial numbers - query = strings.Replace(query, "event.id\n\t\t\tevent.kind", "event.serial", 1) - query = strings.Replace(query, "\t\t\tevent.created_at\n\t\t\tevent.content\n\t\t\tevent.sig\n\t\t\tevent.pubkey\n\t\t\tevent.tags", "", 1) + // Build query requesting only serial numbers + query := d.buildDQLQueryWithFields(f, false, []string{"event.serial"}) resp, err := d.Query(c, query) if err != nil { @@ -293,11 +299,13 @@ func (d *D) QueryForSerials(c context.Context, f *filter.F) ( func (d *D) QueryForIds(c context.Context, f *filter.F) ( idPkTs []*store.IdPkTs, err error, ) { - // Build query - query := d.buildDQLQuery(f, false) - - // Modify query to only return ID, pubkey, created_at, serial - query = strings.Replace(query, "event.kind\n\t\t\tevent.created_at\n\t\t\tevent.content\n\t\t\tevent.sig\n\t\t\tevent.pubkey\n\t\t\tevent.tags", "event.id\n\t\t\tevent.pubkey\n\t\t\tevent.created_at\n\t\t\tevent.serial", 1) + // Build query requesting only ID, pubkey, created_at, serial + query := d.buildDQLQueryWithFields(f, false, []string{ + "event.id", + "event.pubkey", + "event.created_at", + "event.serial", + }) resp, err := d.Query(c, query) if err != nil { @@ -342,11 +350,8 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) ( func (d *D) CountEvents(c context.Context, f *filter.F) ( count int, approximate bool, err error, ) { - // Build query with count - query := d.buildDQLQuery(f, false) - - // Modify to count instead of returning full data - query = strings.Replace(query, "uid\n\t\t\tevent.id\n\t\t\tevent.kind\n\t\t\tevent.created_at\n\t\t\tevent.content\n\t\t\tevent.sig\n\t\t\tevent.pubkey\n\t\t\tevent.tags", "count(uid)", 1) + // Build query requesting only count + query := d.buildDQLQueryWithFields(f, false, []string{"count(uid)"}) resp, err := d.Query(c, query) if err != nil { diff --git a/pkg/dgraph/save-event.go b/pkg/dgraph/save-event.go index 5d845e2..1f8436d 100644 --- a/pkg/dgraph/save-event.go +++ b/pkg/dgraph/save-event.go @@ -127,10 +127,8 @@ func (d *D) buildEventNQuads(ev *event.E, serial uint64) string { // GetSerialsFromFilter returns event serials matching a filter func (d *D) GetSerialsFromFilter(f *filter.F) (serials types.Uint40s, err error) { - // For dgraph, we'll use the event.serial field - // This is a stub implementation - err = fmt.Errorf("not implemented") - return + // Use QueryForSerials which already implements the proper filter logic + return d.QueryForSerials(context.Background(), f) } // WouldReplaceEvent checks if an event would replace existing events