add first draft graph query implementation
Some checks failed
Go / build-and-release (push) Has been cancelled
Some checks failed
Go / build-and-release (push) Has been cancelled
This commit is contained in:
@@ -1,7 +1,17 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(ls:*)"
|
||||
"Bash(ls:*)",
|
||||
"Bash(go build:*)",
|
||||
"Bash(export LD_LIBRARY_PATH:*)",
|
||||
"Bash(/tmp/benchmark_test:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(go doc:*)",
|
||||
"Bash(CGO_ENABLED=0 go build:*)",
|
||||
"Bash(docker compose:*)",
|
||||
"Bash(./run-benchmark.sh:*)",
|
||||
"Bash(tee:*)",
|
||||
"Bash(sudo rm:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Dockerfile for benchmark runner
|
||||
# Uses pure Go build with purego for dynamic libsecp256k1 loading
|
||||
# Fetches latest tag from git repository for stable builds
|
||||
|
||||
# Use Debian-based Go image to match runtime stage (avoids musl/glibc linker mismatch)
|
||||
FROM golang:1.25-bookworm AS builder
|
||||
@@ -10,12 +11,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends git ca-certific
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
# Clone the repository and checkout the latest tag
|
||||
# Using git.nostrdev.com (primary repo, most up-to-date)
|
||||
RUN git clone https://git.nostrdev.com/mleku/next.orly.dev.git . && \
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "main") && \
|
||||
echo "Building benchmark from ORLY version: ${LATEST_TAG}" && \
|
||||
git checkout "${LATEST_TAG}"
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Build the benchmark tool with CGO disabled (uses purego for crypto)
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o benchmark ./cmd/benchmark
|
||||
@@ -36,8 +40,8 @@ WORKDIR /app
|
||||
# Copy benchmark binary (libsecp256k1.so.1 is already installed via apt)
|
||||
COPY --from=builder /build/benchmark /app/benchmark
|
||||
|
||||
# Copy benchmark runner script
|
||||
COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||
# Copy benchmark runner script from the local code
|
||||
COPY --from=builder /build/cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /app/benchmark-runner
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Dockerfile for next.orly.dev relay (benchmark version)
|
||||
# Uses pure Go build with purego for dynamic libsecp256k1 loading
|
||||
# Fetches latest tag from git repository instead of local code
|
||||
|
||||
# Stage 1: Build stage
|
||||
# Use Debian-based Go image to match runtime stage (avoids musl/glibc linker mismatch)
|
||||
@@ -11,12 +12,15 @@ RUN apt-get update && apt-get install -y --no-install-recommends git make && rm
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go mod files first for better layer caching
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
# Clone the repository and checkout the latest tag
|
||||
# Using git.nostrdev.com (primary repo, most up-to-date)
|
||||
RUN git clone https://git.nostrdev.com/mleku/next.orly.dev.git . && \
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "main") && \
|
||||
echo "Building ORLY version: ${LATEST_TAG}" && \
|
||||
git checkout "${LATEST_TAG}"
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Build the relay with CGO disabled (uses purego for crypto)
|
||||
# Include debug symbols for profiling
|
||||
|
||||
@@ -27,6 +27,7 @@ echo "Timestamp: $(date)"
|
||||
echo "Events per test: ${BENCHMARK_EVENTS}"
|
||||
echo "Concurrent workers: ${BENCHMARK_WORKERS}"
|
||||
echo "Test duration: ${BENCHMARK_DURATION}"
|
||||
echo "Graph traversal: ${BENCHMARK_GRAPH_TRAVERSAL:-false}"
|
||||
echo "Output directory: ${RUN_DIR}"
|
||||
echo "=================================================="
|
||||
|
||||
@@ -70,12 +71,12 @@ run_benchmark() {
|
||||
local relay_name="$1"
|
||||
local relay_url="$2"
|
||||
local output_file="$3"
|
||||
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Testing ${relay_name} at ws://${relay_url}"
|
||||
echo "=================================================="
|
||||
|
||||
|
||||
# Wait for relay to be ready
|
||||
if ! wait_for_relay "${relay_name}" "${relay_url}"; then
|
||||
echo "ERROR: ${relay_name} is not responding, skipping..."
|
||||
@@ -84,14 +85,14 @@ run_benchmark() {
|
||||
echo "ERROR: Connection failed" >> "${output_file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Run the benchmark
|
||||
echo "Running benchmark against ${relay_name}..."
|
||||
|
||||
|
||||
# Create temporary directory for this relay's data
|
||||
TEMP_DATA_DIR="/tmp/benchmark_${relay_name}_$$"
|
||||
mkdir -p "${TEMP_DATA_DIR}"
|
||||
|
||||
|
||||
# Run benchmark and capture both stdout and stderr
|
||||
if /app/benchmark \
|
||||
-datadir="${TEMP_DATA_DIR}" \
|
||||
@@ -99,9 +100,9 @@ run_benchmark() {
|
||||
-workers="${BENCHMARK_WORKERS}" \
|
||||
-duration="${BENCHMARK_DURATION}" \
|
||||
> "${output_file}" 2>&1; then
|
||||
|
||||
|
||||
echo "✓ Benchmark completed successfully for ${relay_name}"
|
||||
|
||||
|
||||
# Add relay identification to the report
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
@@ -111,7 +112,7 @@ run_benchmark() {
|
||||
echo " Events: ${BENCHMARK_EVENTS}" >> "${output_file}"
|
||||
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||
echo " Duration: ${BENCHMARK_DURATION}" >> "${output_file}"
|
||||
|
||||
|
||||
else
|
||||
echo "✗ Benchmark failed for ${relay_name}"
|
||||
echo "" >> "${output_file}"
|
||||
@@ -120,7 +121,67 @@ run_benchmark() {
|
||||
echo "STATUS: FAILED" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
fi
|
||||
|
||||
|
||||
# Clean up temporary data
|
||||
rm -rf "${TEMP_DATA_DIR}"
|
||||
}
|
||||
|
||||
# Function to run network graph traversal benchmark against a specific relay
|
||||
run_graph_traversal_benchmark() {
|
||||
local relay_name="$1"
|
||||
local relay_url="$2"
|
||||
local output_file="$3"
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Graph Traversal Benchmark: ${relay_name} at ws://${relay_url}"
|
||||
echo "=================================================="
|
||||
|
||||
# Wait for relay to be ready
|
||||
if ! wait_for_relay "${relay_name}" "${relay_url}"; then
|
||||
echo "ERROR: ${relay_name} is not responding, skipping graph traversal..."
|
||||
echo "RELAY: ${relay_name}" > "${output_file}"
|
||||
echo "STATUS: FAILED - Relay not responding" >> "${output_file}"
|
||||
echo "ERROR: Connection failed" >> "${output_file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Run the network graph traversal benchmark
|
||||
echo "Running network graph traversal benchmark against ${relay_name}..."
|
||||
|
||||
# Create temporary directory for this relay's data
|
||||
TEMP_DATA_DIR="/tmp/graph_benchmark_${relay_name}_$$"
|
||||
mkdir -p "${TEMP_DATA_DIR}"
|
||||
|
||||
# Run graph traversal benchmark via WebSocket
|
||||
if /app/benchmark \
|
||||
-graph-network \
|
||||
-relay-url="ws://${relay_url}" \
|
||||
-datadir="${TEMP_DATA_DIR}" \
|
||||
-workers="${BENCHMARK_WORKERS}" \
|
||||
> "${output_file}" 2>&1; then
|
||||
|
||||
echo "✓ Graph traversal benchmark completed successfully for ${relay_name}"
|
||||
|
||||
# Add relay identification to the report
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "TEST_TYPE: Graph Traversal (100k pubkeys, 3-degree follows)" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
echo "BENCHMARK_CONFIG:" >> "${output_file}"
|
||||
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||
|
||||
else
|
||||
echo "✗ Graph traversal benchmark failed for ${relay_name}"
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "TEST_TYPE: Graph Traversal" >> "${output_file}"
|
||||
echo "STATUS: FAILED" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
fi
|
||||
|
||||
# Clean up temporary data
|
||||
rm -rf "${TEMP_DATA_DIR}"
|
||||
}
|
||||
@@ -234,22 +295,50 @@ EOF
|
||||
# Main execution
|
||||
echo "Starting relay benchmark suite..."
|
||||
|
||||
# Check if graph traversal mode is enabled
|
||||
BENCHMARK_GRAPH_TRAVERSAL="${BENCHMARK_GRAPH_TRAVERSAL:-false}"
|
||||
|
||||
# Parse targets and run benchmarks
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
echo "WARNING: Skipping invalid target: ${relay_name}:${relay_port}"
|
||||
continue
|
||||
fi
|
||||
|
||||
|
||||
relay_url="${relay_name}:${relay_port}"
|
||||
output_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
|
||||
run_benchmark "${relay_name}" "${relay_url}" "${output_file}"
|
||||
|
||||
|
||||
# Small delay between tests
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Run graph traversal benchmarks if enabled
|
||||
if [ "${BENCHMARK_GRAPH_TRAVERSAL}" = "true" ]; then
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Starting Graph Traversal Benchmark Suite"
|
||||
echo "=================================================="
|
||||
echo "This tests 100k pubkeys with 1-1000 follows each"
|
||||
echo "and performs 3-degree traversal queries"
|
||||
echo "=================================================="
|
||||
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_url="${relay_name}:${relay_port}"
|
||||
output_file="${RUN_DIR}/${relay_name}_graph_traversal_results.txt"
|
||||
|
||||
run_graph_traversal_benchmark "${relay_name}" "${relay_url}" "${output_file}"
|
||||
|
||||
# Longer delay between graph traversal tests (they're more intensive)
|
||||
sleep 10
|
||||
done
|
||||
fi
|
||||
|
||||
# Generate aggregate report
|
||||
generate_aggregate_report
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# Next.orly.dev relay with Badger (this repository)
|
||||
# Next.orly.dev relay with Badger (fetches latest tag from git)
|
||||
next-orly-badger:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
context: .
|
||||
dockerfile: Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly-badger
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
@@ -26,11 +26,11 @@ services:
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Next.orly.dev relay with Neo4j (this repository)
|
||||
# Next.orly.dev relay with Neo4j (fetches latest tag from git)
|
||||
next-orly-neo4j:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
context: .
|
||||
dockerfile: Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly-neo4j
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
@@ -219,11 +219,11 @@ services:
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
|
||||
# Benchmark runner
|
||||
# Benchmark runner (fetches latest tag from git)
|
||||
benchmark-runner:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.benchmark
|
||||
context: .
|
||||
dockerfile: Dockerfile.benchmark
|
||||
container_name: benchmark-runner
|
||||
depends_on:
|
||||
next-orly-badger:
|
||||
@@ -247,6 +247,7 @@ services:
|
||||
- BENCHMARK_EVENTS=50000
|
||||
- BENCHMARK_WORKERS=24
|
||||
- BENCHMARK_DURATION=60s
|
||||
- BENCHMARK_GRAPH_TRAVERSAL=${BENCHMARK_GRAPH_TRAVERSAL:-false}
|
||||
volumes:
|
||||
- ./reports:/reports
|
||||
networks:
|
||||
|
||||
520
cmd/benchmark/graph_traversal_benchmark.go
Normal file
520
cmd/benchmark/graph_traversal_benchmark.go
Normal file
@@ -0,0 +1,520 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
"lukechampine.com/frand"
|
||||
"next.orly.dev/pkg/database"
|
||||
)
|
||||
|
||||
const (
|
||||
// GraphBenchNumPubkeys is the number of pubkeys to generate for graph benchmark
|
||||
GraphBenchNumPubkeys = 100000
|
||||
// GraphBenchMinFollows is the minimum number of follows per pubkey
|
||||
GraphBenchMinFollows = 1
|
||||
// GraphBenchMaxFollows is the maximum number of follows per pubkey
|
||||
GraphBenchMaxFollows = 1000
|
||||
// GraphBenchSeed is the deterministic seed for frand PRNG (fits in uint64)
|
||||
GraphBenchSeed uint64 = 0x4E6F737472 // "Nostr" in hex
|
||||
// GraphBenchTraversalDepth is the depth of graph traversal (3 = third degree)
|
||||
GraphBenchTraversalDepth = 3
|
||||
)
|
||||
|
||||
// GraphTraversalBenchmark benchmarks graph traversal using NIP-01 style queries
|
||||
type GraphTraversalBenchmark struct {
|
||||
config *BenchmarkConfig
|
||||
db *database.D
|
||||
results []*BenchmarkResult
|
||||
mu sync.RWMutex
|
||||
|
||||
// Cached data for the benchmark
|
||||
pubkeys [][]byte // 100k pubkeys as 32-byte arrays
|
||||
signers []*p8k.Signer // signers for each pubkey
|
||||
follows [][]int // follows[i] = list of indices that pubkey[i] follows
|
||||
rng *frand.RNG // deterministic PRNG
|
||||
}
|
||||
|
||||
// NewGraphTraversalBenchmark creates a new graph traversal benchmark
|
||||
func NewGraphTraversalBenchmark(config *BenchmarkConfig, db *database.D) *GraphTraversalBenchmark {
|
||||
return &GraphTraversalBenchmark{
|
||||
config: config,
|
||||
db: db,
|
||||
results: make([]*BenchmarkResult, 0),
|
||||
rng: frand.NewCustom(make([]byte, 32), 1024, 12), // ChaCha12 with seed buffer
|
||||
}
|
||||
}
|
||||
|
||||
// initializeDeterministicRNG initializes the PRNG with deterministic seed
|
||||
func (g *GraphTraversalBenchmark) initializeDeterministicRNG() {
|
||||
// Create seed buffer from GraphBenchSeed (uint64 spread across 8 bytes)
|
||||
seedBuf := make([]byte, 32)
|
||||
seed := GraphBenchSeed
|
||||
seedBuf[0] = byte(seed >> 56)
|
||||
seedBuf[1] = byte(seed >> 48)
|
||||
seedBuf[2] = byte(seed >> 40)
|
||||
seedBuf[3] = byte(seed >> 32)
|
||||
seedBuf[4] = byte(seed >> 24)
|
||||
seedBuf[5] = byte(seed >> 16)
|
||||
seedBuf[6] = byte(seed >> 8)
|
||||
seedBuf[7] = byte(seed)
|
||||
g.rng = frand.NewCustom(seedBuf, 1024, 12)
|
||||
}
|
||||
|
||||
// generatePubkeys generates deterministic pubkeys using frand
|
||||
func (g *GraphTraversalBenchmark) generatePubkeys() {
|
||||
fmt.Printf("Generating %d deterministic pubkeys...\n", GraphBenchNumPubkeys)
|
||||
start := time.Now()
|
||||
|
||||
g.initializeDeterministicRNG()
|
||||
g.pubkeys = make([][]byte, GraphBenchNumPubkeys)
|
||||
g.signers = make([]*p8k.Signer, GraphBenchNumPubkeys)
|
||||
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
// Generate deterministic 32-byte secret key from PRNG
|
||||
secretKey := make([]byte, 32)
|
||||
g.rng.Read(secretKey)
|
||||
|
||||
// Create signer from secret key
|
||||
signer := p8k.MustNew()
|
||||
if err := signer.InitSec(secretKey); err != nil {
|
||||
panic(fmt.Sprintf("failed to init signer %d: %v", i, err))
|
||||
}
|
||||
|
||||
g.signers[i] = signer
|
||||
g.pubkeys[i] = make([]byte, 32)
|
||||
copy(g.pubkeys[i], signer.Pub())
|
||||
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Generated %d/%d pubkeys...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Generated %d pubkeys in %v\n", GraphBenchNumPubkeys, time.Since(start))
|
||||
}
|
||||
|
||||
// generateFollowGraph generates the random follow graph with deterministic PRNG
|
||||
func (g *GraphTraversalBenchmark) generateFollowGraph() {
|
||||
fmt.Printf("Generating follow graph (1-%d follows per pubkey)...\n", GraphBenchMaxFollows)
|
||||
start := time.Now()
|
||||
|
||||
// Reset RNG to ensure deterministic follow graph
|
||||
g.initializeDeterministicRNG()
|
||||
// Skip the bytes used for pubkey generation
|
||||
skipBuf := make([]byte, 32*GraphBenchNumPubkeys)
|
||||
g.rng.Read(skipBuf)
|
||||
|
||||
g.follows = make([][]int, GraphBenchNumPubkeys)
|
||||
|
||||
totalFollows := 0
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
// Determine number of follows for this pubkey (1 to 1000)
|
||||
numFollows := int(g.rng.Uint64n(uint64(GraphBenchMaxFollows-GraphBenchMinFollows+1))) + GraphBenchMinFollows
|
||||
|
||||
// Generate random follow indices (excluding self)
|
||||
followSet := make(map[int]struct{})
|
||||
for len(followSet) < numFollows {
|
||||
followIdx := int(g.rng.Uint64n(uint64(GraphBenchNumPubkeys)))
|
||||
if followIdx != i {
|
||||
followSet[followIdx] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice
|
||||
g.follows[i] = make([]int, 0, numFollows)
|
||||
for idx := range followSet {
|
||||
g.follows[i] = append(g.follows[i], idx)
|
||||
}
|
||||
totalFollows += numFollows
|
||||
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Generated follow lists for %d/%d pubkeys...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
|
||||
avgFollows := float64(totalFollows) / float64(GraphBenchNumPubkeys)
|
||||
fmt.Printf("Generated follow graph in %v (avg %.1f follows/pubkey, total %d follows)\n",
|
||||
time.Since(start), avgFollows, totalFollows)
|
||||
}
|
||||
|
||||
// createFollowListEvents creates kind 3 follow list events in the database
|
||||
func (g *GraphTraversalBenchmark) createFollowListEvents() {
|
||||
fmt.Println("Creating follow list events in database...")
|
||||
start := time.Now()
|
||||
|
||||
ctx := context.Background()
|
||||
baseTime := time.Now().Unix()
|
||||
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var successCount, errorCount int64
|
||||
latencies := make([]time.Duration, 0, GraphBenchNumPubkeys)
|
||||
|
||||
// Use worker pool for parallel event creation
|
||||
numWorkers := g.config.ConcurrentWorkers
|
||||
if numWorkers < 1 {
|
||||
numWorkers = 4
|
||||
}
|
||||
|
||||
workChan := make(chan int, numWorkers*2)
|
||||
|
||||
// Rate limiter: cap at 20,000 events/second
|
||||
perWorkerRate := 20000.0 / float64(numWorkers)
|
||||
|
||||
for w := 0; w < numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
workerLimiter := NewRateLimiter(perWorkerRate)
|
||||
|
||||
for i := range workChan {
|
||||
workerLimiter.Wait()
|
||||
|
||||
ev := event.New()
|
||||
ev.Kind = kind.FollowList.K
|
||||
ev.CreatedAt = baseTime + int64(i)
|
||||
ev.Content = []byte("")
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add p tags for all follows
|
||||
for _, followIdx := range g.follows[i] {
|
||||
pubkeyHex := hex.Enc(g.pubkeys[followIdx])
|
||||
ev.Tags.Append(tag.NewFromAny("p", pubkeyHex))
|
||||
}
|
||||
|
||||
// Sign the event
|
||||
if err := ev.Sign(g.signers[i]); err != nil {
|
||||
mu.Lock()
|
||||
errorCount++
|
||||
mu.Unlock()
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
|
||||
// Save to database
|
||||
eventStart := time.Now()
|
||||
_, err := g.db.SaveEvent(ctx, ev)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errorCount++
|
||||
} else {
|
||||
successCount++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send work
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
workChan <- i
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Queued %d/%d follow list events...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
duration := time.Since(start)
|
||||
eventsPerSec := float64(successCount) / duration.Seconds()
|
||||
|
||||
// Calculate latency stats
|
||||
var avgLatency, p95Latency, p99Latency time.Duration
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool { return latencies[i] < latencies[j] })
|
||||
avgLatency = calculateAvgLatency(latencies)
|
||||
p95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||
p99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||
}
|
||||
|
||||
fmt.Printf("Created %d follow list events in %v (%.2f events/sec, errors: %d)\n",
|
||||
successCount, duration, eventsPerSec, errorCount)
|
||||
fmt.Printf(" Avg latency: %v, P95: %v, P99: %v\n", avgLatency, p95Latency, p99Latency)
|
||||
|
||||
// Record result for event creation phase
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Graph Setup (Follow Lists)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(successCount),
|
||||
EventsPerSecond: eventsPerSec,
|
||||
AvgLatency: avgLatency,
|
||||
P95Latency: p95Latency,
|
||||
P99Latency: p99Latency,
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: float64(successCount) / float64(GraphBenchNumPubkeys) * 100,
|
||||
}
|
||||
|
||||
g.mu.Lock()
|
||||
g.results = append(g.results, result)
|
||||
g.mu.Unlock()
|
||||
}
|
||||
|
||||
// runThirdDegreeTraversal runs the third-degree graph traversal benchmark
|
||||
func (g *GraphTraversalBenchmark) runThirdDegreeTraversal() {
|
||||
fmt.Printf("\n=== Third-Degree Graph Traversal Benchmark ===\n")
|
||||
fmt.Printf("Traversing 3 degrees of follows for each of %d pubkeys...\n", GraphBenchNumPubkeys)
|
||||
|
||||
start := time.Now()
|
||||
ctx := context.Background()
|
||||
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var totalQueries int64
|
||||
var totalPubkeysFound int64
|
||||
queryLatencies := make([]time.Duration, 0, GraphBenchNumPubkeys*3)
|
||||
traversalLatencies := make([]time.Duration, 0, GraphBenchNumPubkeys)
|
||||
|
||||
// Sample a subset for detailed traversal (full 100k would take too long)
|
||||
sampleSize := 1000
|
||||
if sampleSize > GraphBenchNumPubkeys {
|
||||
sampleSize = GraphBenchNumPubkeys
|
||||
}
|
||||
|
||||
// Deterministic sampling
|
||||
g.initializeDeterministicRNG()
|
||||
sampleIndices := make([]int, sampleSize)
|
||||
for i := 0; i < sampleSize; i++ {
|
||||
sampleIndices[i] = int(g.rng.Uint64n(uint64(GraphBenchNumPubkeys)))
|
||||
}
|
||||
|
||||
fmt.Printf("Sampling %d pubkeys for traversal...\n", sampleSize)
|
||||
|
||||
numWorkers := g.config.ConcurrentWorkers
|
||||
if numWorkers < 1 {
|
||||
numWorkers = 4
|
||||
}
|
||||
|
||||
workChan := make(chan int, numWorkers*2)
|
||||
|
||||
for w := 0; w < numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for startIdx := range workChan {
|
||||
traversalStart := time.Now()
|
||||
foundPubkeys := make(map[string]struct{})
|
||||
|
||||
// Start with the initial pubkey
|
||||
currentLevel := [][]byte{g.pubkeys[startIdx]}
|
||||
startPubkeyHex := hex.Enc(g.pubkeys[startIdx])
|
||||
foundPubkeys[startPubkeyHex] = struct{}{}
|
||||
|
||||
// Traverse 3 degrees
|
||||
for depth := 0; depth < GraphBenchTraversalDepth; depth++ {
|
||||
if len(currentLevel) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
nextLevel := make([][]byte, 0)
|
||||
|
||||
// Query follow lists for all pubkeys at current level
|
||||
// Batch queries for efficiency
|
||||
batchSize := 100
|
||||
for batchStart := 0; batchStart < len(currentLevel); batchStart += batchSize {
|
||||
batchEnd := batchStart + batchSize
|
||||
if batchEnd > len(currentLevel) {
|
||||
batchEnd = len(currentLevel)
|
||||
}
|
||||
|
||||
batch := currentLevel[batchStart:batchEnd]
|
||||
|
||||
// Build filter for kind 3 events from these pubkeys
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.FollowList)
|
||||
f.Authors = tag.NewWithCap(len(batch))
|
||||
for _, pk := range batch {
|
||||
// Authors.T expects raw byte slices (pubkeys)
|
||||
f.Authors.T = append(f.Authors.T, pk)
|
||||
}
|
||||
|
||||
queryStart := time.Now()
|
||||
events, err := g.db.QueryEvents(ctx, f)
|
||||
queryLatency := time.Since(queryStart)
|
||||
|
||||
mu.Lock()
|
||||
totalQueries++
|
||||
queryLatencies = append(queryLatencies, queryLatency)
|
||||
mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract followed pubkeys from p tags
|
||||
for _, ev := range events {
|
||||
for _, t := range *ev.Tags {
|
||||
if len(t.T) >= 2 && string(t.T[0]) == "p" {
|
||||
pubkeyHex := string(t.ValueHex())
|
||||
if _, exists := foundPubkeys[pubkeyHex]; !exists {
|
||||
foundPubkeys[pubkeyHex] = struct{}{}
|
||||
// Decode hex to bytes for next level
|
||||
if pkBytes, err := hex.Dec(pubkeyHex); err == nil {
|
||||
nextLevel = append(nextLevel, pkBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
}
|
||||
|
||||
currentLevel = nextLevel
|
||||
}
|
||||
|
||||
traversalLatency := time.Since(traversalStart)
|
||||
|
||||
mu.Lock()
|
||||
totalPubkeysFound += int64(len(foundPubkeys))
|
||||
traversalLatencies = append(traversalLatencies, traversalLatency)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send work
|
||||
for _, idx := range sampleIndices {
|
||||
workChan <- idx
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate statistics
|
||||
var avgQueryLatency, p95QueryLatency, p99QueryLatency time.Duration
|
||||
if len(queryLatencies) > 0 {
|
||||
sort.Slice(queryLatencies, func(i, j int) bool { return queryLatencies[i] < queryLatencies[j] })
|
||||
avgQueryLatency = calculateAvgLatency(queryLatencies)
|
||||
p95QueryLatency = calculatePercentileLatency(queryLatencies, 0.95)
|
||||
p99QueryLatency = calculatePercentileLatency(queryLatencies, 0.99)
|
||||
}
|
||||
|
||||
var avgTraversalLatency, p95TraversalLatency, p99TraversalLatency time.Duration
|
||||
if len(traversalLatencies) > 0 {
|
||||
sort.Slice(traversalLatencies, func(i, j int) bool { return traversalLatencies[i] < traversalLatencies[j] })
|
||||
avgTraversalLatency = calculateAvgLatency(traversalLatencies)
|
||||
p95TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.95)
|
||||
p99TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.99)
|
||||
}
|
||||
|
||||
avgPubkeysPerTraversal := float64(totalPubkeysFound) / float64(sampleSize)
|
||||
traversalsPerSec := float64(sampleSize) / duration.Seconds()
|
||||
queriesPerSec := float64(totalQueries) / duration.Seconds()
|
||||
|
||||
fmt.Printf("\n=== Graph Traversal Results ===\n")
|
||||
fmt.Printf("Traversals completed: %d\n", sampleSize)
|
||||
fmt.Printf("Total queries: %d (%.2f queries/sec)\n", totalQueries, queriesPerSec)
|
||||
fmt.Printf("Avg pubkeys found per traversal: %.1f\n", avgPubkeysPerTraversal)
|
||||
fmt.Printf("Total duration: %v\n", duration)
|
||||
fmt.Printf("\nQuery Latencies:\n")
|
||||
fmt.Printf(" Avg: %v, P95: %v, P99: %v\n", avgQueryLatency, p95QueryLatency, p99QueryLatency)
|
||||
fmt.Printf("\nFull Traversal Latencies (3 degrees):\n")
|
||||
fmt.Printf(" Avg: %v, P95: %v, P99: %v\n", avgTraversalLatency, p95TraversalLatency, p99TraversalLatency)
|
||||
fmt.Printf("Traversals/sec: %.2f\n", traversalsPerSec)
|
||||
|
||||
// Record result for traversal phase
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Graph Traversal (3 Degrees)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: traversalsPerSec,
|
||||
AvgLatency: avgTraversalLatency,
|
||||
P90Latency: calculatePercentileLatency(traversalLatencies, 0.90),
|
||||
P95Latency: p95TraversalLatency,
|
||||
P99Latency: p99TraversalLatency,
|
||||
Bottom10Avg: calculateBottom10Avg(traversalLatencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
g.mu.Lock()
|
||||
g.results = append(g.results, result)
|
||||
g.mu.Unlock()
|
||||
|
||||
// Also record query performance separately
|
||||
queryResult := &BenchmarkResult{
|
||||
TestName: "Graph Queries (Follow Lists)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: queriesPerSec,
|
||||
AvgLatency: avgQueryLatency,
|
||||
P90Latency: calculatePercentileLatency(queryLatencies, 0.90),
|
||||
P95Latency: p95QueryLatency,
|
||||
P99Latency: p99QueryLatency,
|
||||
Bottom10Avg: calculateBottom10Avg(queryLatencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
g.mu.Lock()
|
||||
g.results = append(g.results, queryResult)
|
||||
g.mu.Unlock()
|
||||
}
|
||||
|
||||
// RunSuite runs the complete graph traversal benchmark suite
|
||||
func (g *GraphTraversalBenchmark) RunSuite() {
|
||||
fmt.Println("\n╔════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ GRAPH TRAVERSAL BENCHMARK (100k Pubkeys) ║")
|
||||
fmt.Println("╚════════════════════════════════════════════════════════╝")
|
||||
|
||||
// Step 1: Generate pubkeys
|
||||
g.generatePubkeys()
|
||||
|
||||
// Step 2: Generate follow graph
|
||||
g.generateFollowGraph()
|
||||
|
||||
// Step 3: Create follow list events in database
|
||||
g.createFollowListEvents()
|
||||
|
||||
// Step 4: Run third-degree traversal benchmark
|
||||
g.runThirdDegreeTraversal()
|
||||
|
||||
fmt.Printf("\n=== Graph Traversal Benchmark Complete ===\n\n")
|
||||
}
|
||||
|
||||
// GetResults returns the benchmark results
|
||||
func (g *GraphTraversalBenchmark) GetResults() []*BenchmarkResult {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
return g.results
|
||||
}
|
||||
|
||||
// PrintResults prints the benchmark results
|
||||
func (g *GraphTraversalBenchmark) PrintResults() {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
for _, result := range g.results {
|
||||
fmt.Printf("\nTest: %s\n", result.TestName)
|
||||
fmt.Printf("Duration: %v\n", result.Duration)
|
||||
fmt.Printf("Total Events/Queries: %d\n", result.TotalEvents)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Success Rate: %.1f%%\n", result.SuccessRate)
|
||||
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers)
|
||||
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024))
|
||||
fmt.Printf("Avg Latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P90 Latency: %v\n", result.P90Latency)
|
||||
fmt.Printf("P95 Latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 Latency: %v\n", result.P99Latency)
|
||||
fmt.Printf("Bottom 10%% Avg Latency: %v\n", result.Bottom10Avg)
|
||||
}
|
||||
}
|
||||
572
cmd/benchmark/graph_traversal_network.go
Normal file
572
cmd/benchmark/graph_traversal_network.go
Normal file
@@ -0,0 +1,572 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/eventenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
"git.mleku.dev/mleku/nostr/ws"
|
||||
"lukechampine.com/frand"
|
||||
)
|
||||
|
||||
// NetworkGraphTraversalBenchmark benchmarks graph traversal using NIP-01 queries over WebSocket
|
||||
type NetworkGraphTraversalBenchmark struct {
|
||||
relayURL string
|
||||
relay *ws.Client
|
||||
results []*BenchmarkResult
|
||||
mu sync.RWMutex
|
||||
workers int
|
||||
|
||||
// Cached data for the benchmark
|
||||
pubkeys [][]byte // 100k pubkeys as 32-byte arrays
|
||||
signers []*p8k.Signer // signers for each pubkey
|
||||
follows [][]int // follows[i] = list of indices that pubkey[i] follows
|
||||
rng *frand.RNG // deterministic PRNG
|
||||
}
|
||||
|
||||
// NewNetworkGraphTraversalBenchmark creates a new network graph traversal benchmark
|
||||
func NewNetworkGraphTraversalBenchmark(relayURL string, workers int) *NetworkGraphTraversalBenchmark {
|
||||
return &NetworkGraphTraversalBenchmark{
|
||||
relayURL: relayURL,
|
||||
workers: workers,
|
||||
results: make([]*BenchmarkResult, 0),
|
||||
rng: frand.NewCustom(make([]byte, 32), 1024, 12), // ChaCha12 with seed buffer
|
||||
}
|
||||
}
|
||||
|
||||
// Connect establishes WebSocket connection to the relay
|
||||
func (n *NetworkGraphTraversalBenchmark) Connect(ctx context.Context) error {
|
||||
var err error
|
||||
n.relay, err = ws.RelayConnect(ctx, n.relayURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to relay %s: %w", n.relayURL, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the relay connection
|
||||
func (n *NetworkGraphTraversalBenchmark) Close() {
|
||||
if n.relay != nil {
|
||||
n.relay.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// initializeDeterministicRNG initializes the PRNG with deterministic seed
|
||||
func (n *NetworkGraphTraversalBenchmark) initializeDeterministicRNG() {
|
||||
// Create seed buffer from GraphBenchSeed (uint64 spread across 8 bytes)
|
||||
seedBuf := make([]byte, 32)
|
||||
seed := GraphBenchSeed
|
||||
seedBuf[0] = byte(seed >> 56)
|
||||
seedBuf[1] = byte(seed >> 48)
|
||||
seedBuf[2] = byte(seed >> 40)
|
||||
seedBuf[3] = byte(seed >> 32)
|
||||
seedBuf[4] = byte(seed >> 24)
|
||||
seedBuf[5] = byte(seed >> 16)
|
||||
seedBuf[6] = byte(seed >> 8)
|
||||
seedBuf[7] = byte(seed)
|
||||
n.rng = frand.NewCustom(seedBuf, 1024, 12)
|
||||
}
|
||||
|
||||
// generatePubkeys generates deterministic pubkeys using frand
|
||||
func (n *NetworkGraphTraversalBenchmark) generatePubkeys() {
|
||||
fmt.Printf("Generating %d deterministic pubkeys...\n", GraphBenchNumPubkeys)
|
||||
start := time.Now()
|
||||
|
||||
n.initializeDeterministicRNG()
|
||||
n.pubkeys = make([][]byte, GraphBenchNumPubkeys)
|
||||
n.signers = make([]*p8k.Signer, GraphBenchNumPubkeys)
|
||||
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
// Generate deterministic 32-byte secret key from PRNG
|
||||
secretKey := make([]byte, 32)
|
||||
n.rng.Read(secretKey)
|
||||
|
||||
// Create signer from secret key
|
||||
signer := p8k.MustNew()
|
||||
if err := signer.InitSec(secretKey); err != nil {
|
||||
panic(fmt.Sprintf("failed to init signer %d: %v", i, err))
|
||||
}
|
||||
|
||||
n.signers[i] = signer
|
||||
n.pubkeys[i] = make([]byte, 32)
|
||||
copy(n.pubkeys[i], signer.Pub())
|
||||
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Generated %d/%d pubkeys...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Generated %d pubkeys in %v\n", GraphBenchNumPubkeys, time.Since(start))
|
||||
}
|
||||
|
||||
// generateFollowGraph generates the random follow graph with deterministic PRNG
|
||||
func (n *NetworkGraphTraversalBenchmark) generateFollowGraph() {
|
||||
fmt.Printf("Generating follow graph (1-%d follows per pubkey)...\n", GraphBenchMaxFollows)
|
||||
start := time.Now()
|
||||
|
||||
// Reset RNG to ensure deterministic follow graph
|
||||
n.initializeDeterministicRNG()
|
||||
// Skip the bytes used for pubkey generation
|
||||
skipBuf := make([]byte, 32*GraphBenchNumPubkeys)
|
||||
n.rng.Read(skipBuf)
|
||||
|
||||
n.follows = make([][]int, GraphBenchNumPubkeys)
|
||||
|
||||
totalFollows := 0
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
// Determine number of follows for this pubkey (1 to 1000)
|
||||
numFollows := int(n.rng.Uint64n(uint64(GraphBenchMaxFollows-GraphBenchMinFollows+1))) + GraphBenchMinFollows
|
||||
|
||||
// Generate random follow indices (excluding self)
|
||||
followSet := make(map[int]struct{})
|
||||
for len(followSet) < numFollows {
|
||||
followIdx := int(n.rng.Uint64n(uint64(GraphBenchNumPubkeys)))
|
||||
if followIdx != i {
|
||||
followSet[followIdx] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice
|
||||
n.follows[i] = make([]int, 0, numFollows)
|
||||
for idx := range followSet {
|
||||
n.follows[i] = append(n.follows[i], idx)
|
||||
}
|
||||
totalFollows += numFollows
|
||||
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Generated follow lists for %d/%d pubkeys...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
|
||||
avgFollows := float64(totalFollows) / float64(GraphBenchNumPubkeys)
|
||||
fmt.Printf("Generated follow graph in %v (avg %.1f follows/pubkey, total %d follows)\n",
|
||||
time.Since(start), avgFollows, totalFollows)
|
||||
}
|
||||
|
||||
// createFollowListEvents creates kind 3 follow list events via WebSocket
|
||||
func (n *NetworkGraphTraversalBenchmark) createFollowListEvents(ctx context.Context) {
|
||||
fmt.Println("Creating follow list events via WebSocket...")
|
||||
start := time.Now()
|
||||
|
||||
baseTime := time.Now().Unix()
|
||||
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var successCount, errorCount int64
|
||||
latencies := make([]time.Duration, 0, GraphBenchNumPubkeys)
|
||||
|
||||
// Use worker pool for parallel event creation
|
||||
numWorkers := n.workers
|
||||
if numWorkers < 1 {
|
||||
numWorkers = 4
|
||||
}
|
||||
|
||||
workChan := make(chan int, numWorkers*2)
|
||||
|
||||
// Rate limiter: cap at 1000 events/second per relay (to avoid overwhelming)
|
||||
perWorkerRate := 1000.0 / float64(numWorkers)
|
||||
|
||||
for w := 0; w < numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
workerLimiter := NewRateLimiter(perWorkerRate)
|
||||
|
||||
for i := range workChan {
|
||||
workerLimiter.Wait()
|
||||
|
||||
ev := event.New()
|
||||
ev.Kind = kind.FollowList.K
|
||||
ev.CreatedAt = baseTime + int64(i)
|
||||
ev.Content = []byte("")
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add p tags for all follows
|
||||
for _, followIdx := range n.follows[i] {
|
||||
pubkeyHex := hex.Enc(n.pubkeys[followIdx])
|
||||
ev.Tags.Append(tag.NewFromAny("p", pubkeyHex))
|
||||
}
|
||||
|
||||
// Sign the event
|
||||
if err := ev.Sign(n.signers[i]); err != nil {
|
||||
mu.Lock()
|
||||
errorCount++
|
||||
mu.Unlock()
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
|
||||
// Publish via WebSocket
|
||||
eventStart := time.Now()
|
||||
errCh := n.relay.Write(eventenvelope.NewSubmissionWith(ev).Marshal(nil))
|
||||
|
||||
// Wait for write to complete
|
||||
select {
|
||||
case err := <-errCh:
|
||||
latency := time.Since(eventStart)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errorCount++
|
||||
} else {
|
||||
successCount++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
case <-ctx.Done():
|
||||
mu.Lock()
|
||||
errorCount++
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send work
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
workChan <- i
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Queued %d/%d follow list events...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
duration := time.Since(start)
|
||||
eventsPerSec := float64(successCount) / duration.Seconds()
|
||||
|
||||
// Calculate latency stats
|
||||
var avgLatency, p90Latency, p95Latency, p99Latency time.Duration
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool { return latencies[i] < latencies[j] })
|
||||
avgLatency = calculateAvgLatency(latencies)
|
||||
p90Latency = calculatePercentileLatency(latencies, 0.90)
|
||||
p95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||
p99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||
}
|
||||
|
||||
fmt.Printf("Created %d follow list events in %v (%.2f events/sec, errors: %d)\n",
|
||||
successCount, duration, eventsPerSec, errorCount)
|
||||
fmt.Printf(" Avg latency: %v, P95: %v, P99: %v\n", avgLatency, p95Latency, p99Latency)
|
||||
|
||||
// Record result for event creation phase
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Graph Setup (Follow Lists)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(successCount),
|
||||
EventsPerSecond: eventsPerSec,
|
||||
AvgLatency: avgLatency,
|
||||
P90Latency: p90Latency,
|
||||
P95Latency: p95Latency,
|
||||
P99Latency: p99Latency,
|
||||
Bottom10Avg: calculateBottom10Avg(latencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: float64(successCount) / float64(GraphBenchNumPubkeys) * 100,
|
||||
}
|
||||
|
||||
n.mu.Lock()
|
||||
n.results = append(n.results, result)
|
||||
n.mu.Unlock()
|
||||
}
|
||||
|
||||
// runThirdDegreeTraversal runs the third-degree graph traversal benchmark via WebSocket
|
||||
func (n *NetworkGraphTraversalBenchmark) runThirdDegreeTraversal(ctx context.Context) {
|
||||
fmt.Printf("\n=== Third-Degree Graph Traversal Benchmark (Network) ===\n")
|
||||
fmt.Printf("Traversing 3 degrees of follows via WebSocket...\n")
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var totalQueries int64
|
||||
var totalPubkeysFound int64
|
||||
queryLatencies := make([]time.Duration, 0, 10000)
|
||||
traversalLatencies := make([]time.Duration, 0, 1000)
|
||||
|
||||
// Sample a subset for detailed traversal
|
||||
sampleSize := 1000
|
||||
if sampleSize > GraphBenchNumPubkeys {
|
||||
sampleSize = GraphBenchNumPubkeys
|
||||
}
|
||||
|
||||
// Deterministic sampling
|
||||
n.initializeDeterministicRNG()
|
||||
sampleIndices := make([]int, sampleSize)
|
||||
for i := 0; i < sampleSize; i++ {
|
||||
sampleIndices[i] = int(n.rng.Uint64n(uint64(GraphBenchNumPubkeys)))
|
||||
}
|
||||
|
||||
fmt.Printf("Sampling %d pubkeys for traversal...\n", sampleSize)
|
||||
|
||||
numWorkers := n.workers
|
||||
if numWorkers < 1 {
|
||||
numWorkers = 4
|
||||
}
|
||||
|
||||
workChan := make(chan int, numWorkers*2)
|
||||
|
||||
for w := 0; w < numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for startIdx := range workChan {
|
||||
traversalStart := time.Now()
|
||||
foundPubkeys := make(map[string]struct{})
|
||||
|
||||
// Start with the initial pubkey
|
||||
currentLevel := [][]byte{n.pubkeys[startIdx]}
|
||||
startPubkeyHex := hex.Enc(n.pubkeys[startIdx])
|
||||
foundPubkeys[startPubkeyHex] = struct{}{}
|
||||
|
||||
// Traverse 3 degrees
|
||||
for depth := 0; depth < GraphBenchTraversalDepth; depth++ {
|
||||
if len(currentLevel) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
nextLevel := make([][]byte, 0)
|
||||
|
||||
// Query follow lists for all pubkeys at current level
|
||||
// Batch queries for efficiency
|
||||
batchSize := 50
|
||||
for batchStart := 0; batchStart < len(currentLevel); batchStart += batchSize {
|
||||
batchEnd := batchStart + batchSize
|
||||
if batchEnd > len(currentLevel) {
|
||||
batchEnd = len(currentLevel)
|
||||
}
|
||||
|
||||
batch := currentLevel[batchStart:batchEnd]
|
||||
|
||||
// Build filter for kind 3 events from these pubkeys
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.FollowList)
|
||||
f.Authors = tag.NewWithCap(len(batch))
|
||||
for _, pk := range batch {
|
||||
f.Authors.T = append(f.Authors.T, pk)
|
||||
}
|
||||
|
||||
queryStart := time.Now()
|
||||
|
||||
// Subscribe and collect results
|
||||
sub, err := n.relay.Subscribe(ctx, filter.NewS(f))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Collect events with timeout
|
||||
timeout := time.After(5 * time.Second)
|
||||
events := make([]*event.E, 0)
|
||||
collectLoop:
|
||||
for {
|
||||
select {
|
||||
case ev := <-sub.Events:
|
||||
if ev != nil {
|
||||
events = append(events, ev)
|
||||
}
|
||||
case <-sub.EndOfStoredEvents:
|
||||
break collectLoop
|
||||
case <-timeout:
|
||||
break collectLoop
|
||||
case <-ctx.Done():
|
||||
break collectLoop
|
||||
}
|
||||
}
|
||||
sub.Unsub()
|
||||
|
||||
queryLatency := time.Since(queryStart)
|
||||
|
||||
mu.Lock()
|
||||
totalQueries++
|
||||
queryLatencies = append(queryLatencies, queryLatency)
|
||||
mu.Unlock()
|
||||
|
||||
// Extract followed pubkeys from p tags
|
||||
for _, ev := range events {
|
||||
for _, t := range *ev.Tags {
|
||||
if len(t.T) >= 2 && string(t.T[0]) == "p" {
|
||||
pubkeyHex := string(t.ValueHex())
|
||||
if _, exists := foundPubkeys[pubkeyHex]; !exists {
|
||||
foundPubkeys[pubkeyHex] = struct{}{}
|
||||
// Decode hex to bytes for next level
|
||||
if pkBytes, err := hex.Dec(pubkeyHex); err == nil {
|
||||
nextLevel = append(nextLevel, pkBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
}
|
||||
|
||||
currentLevel = nextLevel
|
||||
}
|
||||
|
||||
traversalLatency := time.Since(traversalStart)
|
||||
|
||||
mu.Lock()
|
||||
totalPubkeysFound += int64(len(foundPubkeys))
|
||||
traversalLatencies = append(traversalLatencies, traversalLatency)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send work
|
||||
for _, idx := range sampleIndices {
|
||||
workChan <- idx
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate statistics
|
||||
var avgQueryLatency, p90QueryLatency, p95QueryLatency, p99QueryLatency time.Duration
|
||||
if len(queryLatencies) > 0 {
|
||||
sort.Slice(queryLatencies, func(i, j int) bool { return queryLatencies[i] < queryLatencies[j] })
|
||||
avgQueryLatency = calculateAvgLatency(queryLatencies)
|
||||
p90QueryLatency = calculatePercentileLatency(queryLatencies, 0.90)
|
||||
p95QueryLatency = calculatePercentileLatency(queryLatencies, 0.95)
|
||||
p99QueryLatency = calculatePercentileLatency(queryLatencies, 0.99)
|
||||
}
|
||||
|
||||
var avgTraversalLatency, p90TraversalLatency, p95TraversalLatency, p99TraversalLatency time.Duration
|
||||
if len(traversalLatencies) > 0 {
|
||||
sort.Slice(traversalLatencies, func(i, j int) bool { return traversalLatencies[i] < traversalLatencies[j] })
|
||||
avgTraversalLatency = calculateAvgLatency(traversalLatencies)
|
||||
p90TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.90)
|
||||
p95TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.95)
|
||||
p99TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.99)
|
||||
}
|
||||
|
||||
avgPubkeysPerTraversal := float64(totalPubkeysFound) / float64(sampleSize)
|
||||
traversalsPerSec := float64(sampleSize) / duration.Seconds()
|
||||
queriesPerSec := float64(totalQueries) / duration.Seconds()
|
||||
|
||||
fmt.Printf("\n=== Graph Traversal Results (Network) ===\n")
|
||||
fmt.Printf("Traversals completed: %d\n", sampleSize)
|
||||
fmt.Printf("Total queries: %d (%.2f queries/sec)\n", totalQueries, queriesPerSec)
|
||||
fmt.Printf("Avg pubkeys found per traversal: %.1f\n", avgPubkeysPerTraversal)
|
||||
fmt.Printf("Total duration: %v\n", duration)
|
||||
fmt.Printf("\nQuery Latencies:\n")
|
||||
fmt.Printf(" Avg: %v, P95: %v, P99: %v\n", avgQueryLatency, p95QueryLatency, p99QueryLatency)
|
||||
fmt.Printf("\nFull Traversal Latencies (3 degrees):\n")
|
||||
fmt.Printf(" Avg: %v, P95: %v, P99: %v\n", avgTraversalLatency, p95TraversalLatency, p99TraversalLatency)
|
||||
fmt.Printf("Traversals/sec: %.2f\n", traversalsPerSec)
|
||||
|
||||
// Record result for traversal phase
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Graph Traversal (3 Degrees)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: traversalsPerSec,
|
||||
AvgLatency: avgTraversalLatency,
|
||||
P90Latency: p90TraversalLatency,
|
||||
P95Latency: p95TraversalLatency,
|
||||
P99Latency: p99TraversalLatency,
|
||||
Bottom10Avg: calculateBottom10Avg(traversalLatencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
n.mu.Lock()
|
||||
n.results = append(n.results, result)
|
||||
n.mu.Unlock()
|
||||
|
||||
// Also record query performance separately
|
||||
queryResult := &BenchmarkResult{
|
||||
TestName: "Graph Queries (Follow Lists)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: queriesPerSec,
|
||||
AvgLatency: avgQueryLatency,
|
||||
P90Latency: p90QueryLatency,
|
||||
P95Latency: p95QueryLatency,
|
||||
P99Latency: p99QueryLatency,
|
||||
Bottom10Avg: calculateBottom10Avg(queryLatencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
n.mu.Lock()
|
||||
n.results = append(n.results, queryResult)
|
||||
n.mu.Unlock()
|
||||
}
|
||||
|
||||
// RunSuite runs the complete network graph traversal benchmark suite
|
||||
func (n *NetworkGraphTraversalBenchmark) RunSuite(ctx context.Context) error {
|
||||
fmt.Println("\n╔════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ NETWORK GRAPH TRAVERSAL BENCHMARK (100k Pubkeys) ║")
|
||||
fmt.Printf("║ Relay: %-46s ║\n", n.relayURL)
|
||||
fmt.Println("╚════════════════════════════════════════════════════════╝")
|
||||
|
||||
// Step 1: Generate pubkeys
|
||||
n.generatePubkeys()
|
||||
|
||||
// Step 2: Generate follow graph
|
||||
n.generateFollowGraph()
|
||||
|
||||
// Step 3: Connect to relay
|
||||
fmt.Printf("\nConnecting to relay: %s\n", n.relayURL)
|
||||
if err := n.Connect(ctx); err != nil {
|
||||
return fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
defer n.Close()
|
||||
fmt.Println("Connected successfully!")
|
||||
|
||||
// Step 4: Create follow list events via WebSocket
|
||||
n.createFollowListEvents(ctx)
|
||||
|
||||
// Small delay to ensure events are processed
|
||||
fmt.Println("\nWaiting for events to be processed...")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Step 5: Run third-degree traversal benchmark
|
||||
n.runThirdDegreeTraversal(ctx)
|
||||
|
||||
fmt.Printf("\n=== Network Graph Traversal Benchmark Complete ===\n\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetResults returns the benchmark results
|
||||
func (n *NetworkGraphTraversalBenchmark) GetResults() []*BenchmarkResult {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
return n.results
|
||||
}
|
||||
|
||||
// PrintResults prints the benchmark results
|
||||
func (n *NetworkGraphTraversalBenchmark) PrintResults() {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
for _, result := range n.results {
|
||||
fmt.Printf("\nTest: %s\n", result.TestName)
|
||||
fmt.Printf("Duration: %v\n", result.Duration)
|
||||
fmt.Printf("Total Events/Queries: %d\n", result.TotalEvents)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Success Rate: %.1f%%\n", result.SuccessRate)
|
||||
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers)
|
||||
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024))
|
||||
fmt.Printf("Avg Latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P90 Latency: %v\n", result.P90Latency)
|
||||
fmt.Printf("P95 Latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 Latency: %v\n", result.P99Latency)
|
||||
fmt.Printf("Bottom 10%% Avg Latency: %v\n", result.Bottom10Avg)
|
||||
}
|
||||
}
|
||||
@@ -44,6 +44,10 @@ type BenchmarkConfig struct {
|
||||
// Backend selection
|
||||
UseNeo4j bool
|
||||
UseRelySQLite bool
|
||||
|
||||
// Graph traversal benchmark
|
||||
UseGraphTraversal bool
|
||||
UseNetworkGraphTraversal bool // Network-mode graph traversal (for multi-relay testing)
|
||||
}
|
||||
|
||||
type BenchmarkResult struct {
|
||||
@@ -108,6 +112,15 @@ func main() {
|
||||
// lol.SetLogLevel("trace")
|
||||
config := parseFlags()
|
||||
|
||||
if config.UseNetworkGraphTraversal {
|
||||
// Network graph traversal mode: requires relay URL
|
||||
if config.RelayURL == "" {
|
||||
log.Fatal("Network graph traversal benchmark requires -relay-url flag")
|
||||
}
|
||||
runNetworkGraphTraversalBenchmark(config)
|
||||
return
|
||||
}
|
||||
|
||||
if config.RelayURL != "" {
|
||||
// Network mode: connect to relay and generate traffic
|
||||
runNetworkLoad(config)
|
||||
@@ -126,6 +139,12 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if config.UseGraphTraversal {
|
||||
// Run graph traversal benchmark
|
||||
runGraphTraversalBenchmark(config)
|
||||
return
|
||||
}
|
||||
|
||||
// Run standard Badger benchmark
|
||||
fmt.Printf("Starting Nostr Relay Benchmark (Badger Backend)\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
@@ -189,6 +208,130 @@ func runRelySQLiteBenchmark(config *BenchmarkConfig) {
|
||||
relysqliteBench.GenerateAsciidocReport()
|
||||
}
|
||||
|
||||
func runGraphTraversalBenchmark(config *BenchmarkConfig) {
|
||||
fmt.Printf("Starting Graph Traversal Benchmark (Badger Backend)\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
fmt.Printf("Workers: %d\n", config.ConcurrentWorkers)
|
||||
fmt.Printf("Pubkeys: %d, Follows per pubkey: %d-%d\n",
|
||||
GraphBenchNumPubkeys, GraphBenchMinFollows, GraphBenchMaxFollows)
|
||||
|
||||
// Clean up existing data directory
|
||||
os.RemoveAll(config.DataDir)
|
||||
|
||||
ctx := context.Background()
|
||||
cancel := func() {}
|
||||
|
||||
db, err := database.New(ctx, cancel, config.DataDir, "warn")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create and run graph traversal benchmark
|
||||
graphBench := NewGraphTraversalBenchmark(config, db)
|
||||
graphBench.RunSuite()
|
||||
|
||||
// Generate reports
|
||||
graphBench.PrintResults()
|
||||
generateGraphTraversalAsciidocReport(config, graphBench.GetResults())
|
||||
}
|
||||
|
||||
func generateGraphTraversalAsciidocReport(config *BenchmarkConfig, results []*BenchmarkResult) {
|
||||
path := filepath.Join(config.DataDir, "graph_traversal_report.adoc")
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Printf("Failed to create report: %v", err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString("= Graph Traversal Benchmark Results\n\n")
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Generated: %s\n\n", time.Now().Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
file.WriteString(fmt.Sprintf("Pubkeys: %d\n", GraphBenchNumPubkeys))
|
||||
file.WriteString(fmt.Sprintf("Follows per pubkey: %d-%d\n", GraphBenchMinFollows, GraphBenchMaxFollows))
|
||||
file.WriteString(fmt.Sprintf("Traversal depth: %d degrees\n\n", GraphBenchTraversalDepth))
|
||||
|
||||
file.WriteString("[cols=\"1,^1,^1,^1,^1,^1,^1\",options=\"header\"]\n")
|
||||
file.WriteString("|===\n")
|
||||
file.WriteString("| Test | Events/sec | Avg Latency | P90 | P95 | P99 | Bottom 10% Avg\n")
|
||||
|
||||
for _, r := range results {
|
||||
file.WriteString(fmt.Sprintf("| %s\n", r.TestName))
|
||||
file.WriteString(fmt.Sprintf("| %.2f\n", r.EventsPerSecond))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.AvgLatency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P90Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P95Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P99Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.Bottom10Avg))
|
||||
}
|
||||
file.WriteString("|===\n")
|
||||
|
||||
fmt.Printf("AsciiDoc report saved to: %s\n", path)
|
||||
}
|
||||
|
||||
func runNetworkGraphTraversalBenchmark(config *BenchmarkConfig) {
|
||||
fmt.Printf("Starting Network Graph Traversal Benchmark\n")
|
||||
fmt.Printf("Relay URL: %s\n", config.RelayURL)
|
||||
fmt.Printf("Workers: %d\n", config.ConcurrentWorkers)
|
||||
fmt.Printf("Pubkeys: %d, Follows per pubkey: %d-%d\n",
|
||||
GraphBenchNumPubkeys, GraphBenchMinFollows, GraphBenchMaxFollows)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create and run network graph traversal benchmark
|
||||
netGraphBench := NewNetworkGraphTraversalBenchmark(config.RelayURL, config.ConcurrentWorkers)
|
||||
|
||||
if err := netGraphBench.RunSuite(ctx); err != nil {
|
||||
log.Fatalf("Network graph traversal benchmark failed: %v", err)
|
||||
}
|
||||
|
||||
// Generate reports
|
||||
netGraphBench.PrintResults()
|
||||
generateNetworkGraphTraversalAsciidocReport(config, netGraphBench.GetResults())
|
||||
}
|
||||
|
||||
func generateNetworkGraphTraversalAsciidocReport(config *BenchmarkConfig, results []*BenchmarkResult) {
|
||||
path := filepath.Join(config.DataDir, "network_graph_traversal_report.adoc")
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Printf("Failed to create report: %v", err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString("= Network Graph Traversal Benchmark Results\n\n")
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Generated: %s\n\n", time.Now().Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
file.WriteString(fmt.Sprintf("Relay URL: %s\n", config.RelayURL))
|
||||
file.WriteString(fmt.Sprintf("Pubkeys: %d\n", GraphBenchNumPubkeys))
|
||||
file.WriteString(fmt.Sprintf("Follows per pubkey: %d-%d\n", GraphBenchMinFollows, GraphBenchMaxFollows))
|
||||
file.WriteString(fmt.Sprintf("Traversal depth: %d degrees\n\n", GraphBenchTraversalDepth))
|
||||
|
||||
file.WriteString("[cols=\"1,^1,^1,^1,^1,^1,^1\",options=\"header\"]\n")
|
||||
file.WriteString("|===\n")
|
||||
file.WriteString("| Test | Events/sec | Avg Latency | P90 | P95 | P99 | Bottom 10% Avg\n")
|
||||
|
||||
for _, r := range results {
|
||||
file.WriteString(fmt.Sprintf("| %s\n", r.TestName))
|
||||
file.WriteString(fmt.Sprintf("| %.2f\n", r.EventsPerSecond))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.AvgLatency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P90Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P95Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P99Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.Bottom10Avg))
|
||||
}
|
||||
file.WriteString("|===\n")
|
||||
|
||||
fmt.Printf("AsciiDoc report saved to: %s\n", path)
|
||||
}
|
||||
|
||||
func parseFlags() *BenchmarkConfig {
|
||||
config := &BenchmarkConfig{}
|
||||
|
||||
@@ -234,6 +377,16 @@ func parseFlags() *BenchmarkConfig {
|
||||
"Use rely-sqlite backend",
|
||||
)
|
||||
|
||||
// Graph traversal benchmark
|
||||
flag.BoolVar(
|
||||
&config.UseGraphTraversal, "graph", false,
|
||||
"Run graph traversal benchmark (100k pubkeys, 3-degree follows)",
|
||||
)
|
||||
flag.BoolVar(
|
||||
&config.UseNetworkGraphTraversal, "graph-network", false,
|
||||
"Run network graph traversal benchmark against relay specified by -relay-url",
|
||||
)
|
||||
|
||||
flag.Parse()
|
||||
return config
|
||||
}
|
||||
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764840226432341ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764840226432976ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764840226433077ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764840226433085ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764840226433100ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764840226433117ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764840226433122ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764840226433129ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764840226433135ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764840226433150ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764840226433155ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764840226433164ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764840226433169ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764840226433182ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764840226433186ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764840226433202ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764840226433206ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 09:23:46 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.949326718s
|
||||
Events/sec: 16953.02
|
||||
Avg latency: 1.296368ms
|
||||
P90 latency: 1.675853ms
|
||||
P95 latency: 1.934996ms
|
||||
P99 latency: 3.691782ms
|
||||
Bottom 10% Avg latency: 738.489µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 292.737912ms
|
||||
Burst completed: 5000 events in 295.756562ms
|
||||
Burst completed: 5000 events in 280.376675ms
|
||||
Burst completed: 5000 events in 283.027074ms
|
||||
Burst completed: 5000 events in 292.213914ms
|
||||
Burst completed: 5000 events in 292.804158ms
|
||||
Burst completed: 5000 events in 265.332637ms
|
||||
Burst completed: 5000 events in 262.359574ms
|
||||
Burst completed: 5000 events in 271.801669ms
|
||||
Burst completed: 5000 events in 270.594731ms
|
||||
Burst test completed: 50000 events in 7.813073176s, errors: 0
|
||||
Events/sec: 6399.53
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.337354042s
|
||||
Combined ops/sec: 2054.46
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 418759 queries in 1m0.009274332s
|
||||
Queries/sec: 6978.24
|
||||
Avg query latency: 2.156012ms
|
||||
P95 query latency: 8.060424ms
|
||||
P99 query latency: 12.213045ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 313770 operations (263770 queries, 50000 writes) in 1m0.003742319s
|
||||
Operations/sec: 5229.17
|
||||
Avg latency: 1.452263ms
|
||||
Avg query latency: 1.541956ms
|
||||
Avg write latency: 979.094µs
|
||||
P95 latency: 3.734524ms
|
||||
P99 latency: 9.585308ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.949326718s
|
||||
Total Events: 50000
|
||||
Events/sec: 16953.02
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 286 MB
|
||||
Avg Latency: 1.296368ms
|
||||
P90 Latency: 1.675853ms
|
||||
P95 Latency: 1.934996ms
|
||||
P99 Latency: 3.691782ms
|
||||
Bottom 10% Avg Latency: 738.489µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.813073176s
|
||||
Total Events: 50000
|
||||
Events/sec: 6399.53
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 275 MB
|
||||
Avg Latency: 1.179921ms
|
||||
P90 Latency: 1.527861ms
|
||||
P95 Latency: 1.722912ms
|
||||
P99 Latency: 3.6275ms
|
||||
Bottom 10% Avg Latency: 587.766µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.337354042s
|
||||
Total Events: 50000
|
||||
Events/sec: 2054.46
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 185 MB
|
||||
Avg Latency: 387.847µs
|
||||
P90 Latency: 809.663µs
|
||||
P95 Latency: 905.205µs
|
||||
P99 Latency: 1.133569ms
|
||||
Bottom 10% Avg Latency: 1.057923ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.009274332s
|
||||
Total Events: 418759
|
||||
Events/sec: 6978.24
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 199 MB
|
||||
Avg Latency: 2.156012ms
|
||||
P90 Latency: 6.536561ms
|
||||
P95 Latency: 8.060424ms
|
||||
P99 Latency: 12.213045ms
|
||||
Bottom 10% Avg Latency: 8.880182ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003742319s
|
||||
Total Events: 313770
|
||||
Events/sec: 5229.17
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 177 MB
|
||||
Avg Latency: 1.452263ms
|
||||
P90 Latency: 3.028419ms
|
||||
P95 Latency: 3.734524ms
|
||||
P99 Latency: 9.585308ms
|
||||
Bottom 10% Avg Latency: 5.204062ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: next-orly-badger
|
||||
RELAY_URL: ws://next-orly-badger:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T09:27:02+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,78 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-neo4j_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764840427673892ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764840427674007ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764840427674031ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764840427674036ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764840427674056ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764840427674081ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764840427674087ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764840427674097ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764840427674102ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764840427674116ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764840427674121ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764840427674128ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764840427674132ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764840427674146ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764840427674151ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764840427674168ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764840427674172ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 09:27:07 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.004845722s
|
||||
Events/sec: 16639.79
|
||||
Avg latency: 1.323689ms
|
||||
P90 latency: 1.758038ms
|
||||
P95 latency: 2.077948ms
|
||||
P99 latency: 3.856256ms
|
||||
Bottom 10% Avg latency: 730.568µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 283.966934ms
|
||||
Burst completed: 5000 events in 294.692625ms
|
||||
Burst completed: 5000 events in 363.280618ms
|
||||
Burst completed: 5000 events in 340.745621ms
|
||||
Burst completed: 5000 events in 304.674199ms
|
||||
Burst completed: 5000 events in 280.09038ms
|
||||
Burst completed: 5000 events in 266.781378ms
|
||||
Burst completed: 5000 events in 277.70181ms
|
||||
Burst completed: 5000 events in 271.658408ms
|
||||
Burst completed: 5000 events in 309.272288ms
|
||||
Burst test completed: 50000 events in 8.000384614s, errors: 0
|
||||
Events/sec: 6249.70
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.409054146s
|
||||
Combined ops/sec: 2048.42
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
@@ -0,0 +1,202 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_rely-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764840025108837ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764840025108932ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764840025108958ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764840025108965ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764840025108976ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764840025108998ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764840025109005ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764840025109017ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764840025109023ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764840025109041ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764840025109047ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764840025109059ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764840025109087ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764840025109131ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764840025109138ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764840025109161ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764840025109166ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 09:20:25 INFO: Extracted embedded libsecp256k1 to /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
2025/12/04 09:20:25 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.916633222s
|
||||
Events/sec: 17143.05
|
||||
Avg latency: 1.278819ms
|
||||
P90 latency: 1.645294ms
|
||||
P95 latency: 1.861406ms
|
||||
P99 latency: 3.124622ms
|
||||
Bottom 10% Avg latency: 729.231µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 299.940949ms
|
||||
Burst completed: 5000 events in 320.651151ms
|
||||
Burst completed: 5000 events in 285.455745ms
|
||||
Burst completed: 5000 events in 309.502203ms
|
||||
Burst completed: 5000 events in 298.703461ms
|
||||
Burst completed: 5000 events in 298.785067ms
|
||||
Burst completed: 5000 events in 272.364406ms
|
||||
Burst completed: 5000 events in 264.606838ms
|
||||
Burst completed: 5000 events in 315.333631ms
|
||||
Burst completed: 5000 events in 290.913401ms
|
||||
Burst test completed: 50000 events in 7.960160876s, errors: 0
|
||||
Events/sec: 6281.28
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.295679369s
|
||||
Combined ops/sec: 2057.98
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 409908 queries in 1m0.005235789s
|
||||
Queries/sec: 6831.20
|
||||
Avg query latency: 2.219665ms
|
||||
P95 query latency: 8.253853ms
|
||||
P99 query latency: 12.450497ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 312694 operations (262694 queries, 50000 writes) in 1m0.003601943s
|
||||
Operations/sec: 5211.25
|
||||
Avg latency: 1.479337ms
|
||||
Avg query latency: 1.552934ms
|
||||
Avg write latency: 1.092669ms
|
||||
P95 latency: 3.715568ms
|
||||
P99 latency: 9.865884ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.916633222s
|
||||
Total Events: 50000
|
||||
Events/sec: 17143.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 199 MB
|
||||
Avg Latency: 1.278819ms
|
||||
P90 Latency: 1.645294ms
|
||||
P95 Latency: 1.861406ms
|
||||
P99 Latency: 3.124622ms
|
||||
Bottom 10% Avg Latency: 729.231µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.960160876s
|
||||
Total Events: 50000
|
||||
Events/sec: 6281.28
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 172 MB
|
||||
Avg Latency: 1.284949ms
|
||||
P90 Latency: 1.745856ms
|
||||
P95 Latency: 2.012483ms
|
||||
P99 Latency: 3.414064ms
|
||||
Bottom 10% Avg Latency: 603.349µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.295679369s
|
||||
Total Events: 50000
|
||||
Events/sec: 2057.98
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 188 MB
|
||||
Avg Latency: 386.608µs
|
||||
P90 Latency: 813.46µs
|
||||
P95 Latency: 908.982µs
|
||||
P99 Latency: 1.125173ms
|
||||
Bottom 10% Avg Latency: 1.033435ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005235789s
|
||||
Total Events: 409908
|
||||
Events/sec: 6831.20
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 203 MB
|
||||
Avg Latency: 2.219665ms
|
||||
P90 Latency: 6.727054ms
|
||||
P95 Latency: 8.253853ms
|
||||
P99 Latency: 12.450497ms
|
||||
Bottom 10% Avg Latency: 9.092639ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003601943s
|
||||
Total Events: 312694
|
||||
Events/sec: 5211.25
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 134 MB
|
||||
Avg Latency: 1.479337ms
|
||||
P90 Latency: 2.996278ms
|
||||
P95 Latency: 3.715568ms
|
||||
P99 Latency: 9.865884ms
|
||||
Bottom 10% Avg Latency: 5.322579ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: rely-sqlite
|
||||
RELAY_URL: ws://rely-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T09:23:41+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -3,19 +3,25 @@
|
||||
# Wrapper script to run the benchmark suite and automatically shut down when complete
|
||||
#
|
||||
# Usage:
|
||||
# ./run-benchmark.sh # Use disk-based storage (default)
|
||||
# ./run-benchmark.sh --ramdisk # Use /dev/shm ramdisk for maximum performance
|
||||
# ./run-benchmark.sh # Use disk-based storage (default)
|
||||
# ./run-benchmark.sh --ramdisk # Use /dev/shm ramdisk for maximum performance
|
||||
# ./run-benchmark.sh --graph # Also run graph traversal benchmarks
|
||||
|
||||
set -e
|
||||
|
||||
# Parse command line arguments
|
||||
USE_RAMDISK=false
|
||||
USE_GRAPH_TRAVERSAL=false
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--ramdisk)
|
||||
USE_RAMDISK=true
|
||||
shift
|
||||
;;
|
||||
--graph)
|
||||
USE_GRAPH_TRAVERSAL=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
@@ -23,6 +29,8 @@ for arg in "$@"; do
|
||||
echo " --ramdisk Use /dev/shm ramdisk storage instead of disk"
|
||||
echo " This eliminates disk I/O bottlenecks for accurate"
|
||||
echo " relay performance measurement."
|
||||
echo " --graph Run graph traversal benchmarks (100k pubkeys,"
|
||||
echo " 1-1000 follows each, 3-degree traversal)"
|
||||
echo " --help, -h Show this help message"
|
||||
echo ""
|
||||
echo "Requirements for --ramdisk:"
|
||||
@@ -39,6 +47,9 @@ for arg in "$@"; do
|
||||
esac
|
||||
done
|
||||
|
||||
# Export graph traversal setting for docker-compose
|
||||
export BENCHMARK_GRAPH_TRAVERSAL="${USE_GRAPH_TRAVERSAL}"
|
||||
|
||||
# Determine docker-compose command
|
||||
if docker compose version &> /dev/null 2>&1; then
|
||||
DOCKER_COMPOSE="docker compose"
|
||||
@@ -97,6 +108,17 @@ else
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Show graph traversal status
|
||||
if [ "$USE_GRAPH_TRAVERSAL" = true ]; then
|
||||
echo "======================================================"
|
||||
echo " GRAPH TRAVERSAL BENCHMARK ENABLED"
|
||||
echo "======================================================"
|
||||
echo " Will test 100k pubkeys with 1-1000 follows each"
|
||||
echo " performing 3-degree graph traversal queries"
|
||||
echo "======================================================"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Clean old data directories (may be owned by root from Docker)
|
||||
if [ -d "${DATA_BASE}" ]; then
|
||||
echo "Cleaning old data directories at ${DATA_BASE}..."
|
||||
|
||||
Reference in New Issue
Block a user