Compare commits
2 Commits
6b98c23606
...
83c27a52b0
| Author | SHA1 | Date | |
|---|---|---|---|
|
83c27a52b0
|
|||
|
1e9c447fe6
|
@@ -25,7 +25,15 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||
log.I.F("HandleDelete: processing delete event %0x from pubkey %0x", env.E.ID, env.E.Pubkey)
|
||||
log.I.F("HandleDelete: delete event tags: %d tags", len(*env.E.Tags))
|
||||
for i, t := range *env.E.Tags {
|
||||
log.I.F("HandleDelete: tag %d: %s = %s", i, string(t.Key()), string(t.Value()))
|
||||
// Use ValueHex() for e/p tags to properly display binary-encoded values
|
||||
key := string(t.Key())
|
||||
var val string
|
||||
if key == "e" || key == "p" {
|
||||
val = string(t.ValueHex()) // Properly converts binary to hex
|
||||
} else {
|
||||
val = string(t.Value())
|
||||
}
|
||||
log.I.F("HandleDelete: tag %d: %s = %s", i, key, val)
|
||||
}
|
||||
|
||||
// Debug: log admin and owner lists
|
||||
@@ -142,27 +150,21 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||
// if e tags are found, delete them if the author is signer, or one of
|
||||
// the owners is signer
|
||||
if utils.FastEqual(t.Key(), []byte("e")) {
|
||||
// First try binary format (optimized storage for e-tags)
|
||||
var dst []byte
|
||||
if binVal := t.ValueBinary(); binVal != nil {
|
||||
dst = binVal
|
||||
log.I.F("HandleDelete: processing binary e-tag event ID: %0x", dst)
|
||||
} else {
|
||||
// Fall back to hex decoding for non-binary values
|
||||
val := t.Value()
|
||||
if len(val) == 0 {
|
||||
log.W.F("HandleDelete: empty e-tag value")
|
||||
continue
|
||||
}
|
||||
log.I.F("HandleDelete: processing e-tag with value: %s", string(val))
|
||||
if b, e := hex.Dec(string(val)); chk.E(e) {
|
||||
log.E.F("HandleDelete: failed to decode hex event ID %s: %v", string(val), e)
|
||||
continue
|
||||
} else {
|
||||
dst = b
|
||||
log.I.F("HandleDelete: decoded event ID: %0x", dst)
|
||||
}
|
||||
// Use ValueHex() which properly handles both binary-encoded and hex string formats
|
||||
hexVal := t.ValueHex()
|
||||
if len(hexVal) == 0 {
|
||||
log.W.F("HandleDelete: empty e-tag value")
|
||||
continue
|
||||
}
|
||||
log.I.F("HandleDelete: processing e-tag event ID: %s", string(hexVal))
|
||||
|
||||
// Decode hex to binary for filter
|
||||
dst, e := hex.Dec(string(hexVal))
|
||||
if chk.E(e) {
|
||||
log.E.F("HandleDelete: failed to decode event ID %s: %v", string(hexVal), e)
|
||||
continue
|
||||
}
|
||||
|
||||
f := &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(dst),
|
||||
}
|
||||
|
||||
@@ -11,7 +11,12 @@
|
||||
"Bash(docker compose:*)",
|
||||
"Bash(./run-benchmark.sh:*)",
|
||||
"Bash(tee:*)",
|
||||
"Bash(sudo rm:*)"
|
||||
"Bash(sudo rm:*)",
|
||||
"Bash(go mod tidy:*)",
|
||||
"Bash(docker run --rm -v \"/home/mleku/src/next.orly.dev/cmd/benchmark/data:/data\" --user root alpine sh -c \"rm -rf /data/* /data/.[!.]*\")",
|
||||
"Bash(head:*)",
|
||||
"Bash(cat:*)",
|
||||
"Bash(chmod:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
@@ -18,8 +18,11 @@ RUN git clone https://git.nostrdev.com/mleku/next.orly.dev.git . && \
|
||||
echo "Building benchmark from ORLY version: ${LATEST_TAG}" && \
|
||||
git checkout "${LATEST_TAG}"
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
# Remove local replace directives and update to released version, then download dependencies
|
||||
RUN sed -i '/^replace .* => \/home/d' go.mod && \
|
||||
sed -i 's/git.mleku.dev\/mleku\/nostr v1.0.7/git.mleku.dev\/mleku\/nostr v1.0.8/' go.mod && \
|
||||
go mod tidy && \
|
||||
go mod download
|
||||
|
||||
# Build the benchmark tool with CGO disabled (uses purego for crypto)
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o benchmark ./cmd/benchmark
|
||||
|
||||
@@ -19,8 +19,11 @@ RUN git clone https://git.nostrdev.com/mleku/next.orly.dev.git . && \
|
||||
echo "Building ORLY version: ${LATEST_TAG}" && \
|
||||
git checkout "${LATEST_TAG}"
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
# Remove local replace directives and update to released version, then download dependencies
|
||||
RUN sed -i '/^replace .* => \/home/d' go.mod && \
|
||||
sed -i 's/git.mleku.dev\/mleku\/nostr v1.0.7/git.mleku.dev\/mleku\/nostr v1.0.8/' go.mod && \
|
||||
go mod tidy && \
|
||||
go mod download
|
||||
|
||||
# Build the relay with CGO disabled (uses purego for crypto)
|
||||
# Include debug symbols for profiling
|
||||
|
||||
@@ -0,0 +1,74 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764840830987179ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764840830987255ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764840830987278ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764840830987283ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764840830987292ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764840830987305ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764840830987310ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764840830987336ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764840830987364ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764840830987412ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764840830987419ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764840830987429ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764840830987435ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764840830987452ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764840830987458ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764840830987473ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764840830987479ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 09:33:50 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.213866224s
|
||||
Events/sec: 15557.59
|
||||
Avg latency: 1.456848ms
|
||||
P90 latency: 1.953553ms
|
||||
P95 latency: 2.322455ms
|
||||
P99 latency: 4.316566ms
|
||||
Bottom 10% Avg latency: 793.956µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 336.223018ms
|
||||
Burst completed: 5000 events in 314.023603ms
|
||||
Burst completed: 5000 events in 296.961158ms
|
||||
Burst completed: 5000 events in 313.470891ms
|
||||
Burst completed: 5000 events in 312.977339ms
|
||||
Burst completed: 5000 events in 304.290846ms
|
||||
Burst completed: 5000 events in 279.718158ms
|
||||
Burst completed: 5000 events in 351.360773ms
|
||||
Burst completed: 5000 events in 413.446584ms
|
||||
Burst completed: 5000 events in 412.074279ms
|
||||
Burst test completed: 50000 events in 8.341599033s, errors: 0
|
||||
Events/sec: 5994.05
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.442820936s
|
||||
Combined ops/sec: 2045.59
|
||||
Wiping database between tests...
|
||||
@@ -0,0 +1,8 @@
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T09:33:45+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -1,78 +0,0 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-neo4j_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764840427673892ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764840427674007ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764840427674031ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764840427674036ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764840427674056ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764840427674081ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764840427674087ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764840427674097ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764840427674102ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764840427674116ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764840427674121ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764840427674128ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764840427674132ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764840427674146ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764840427674151ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764840427674168ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764840427674172ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 09:27:07 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.004845722s
|
||||
Events/sec: 16639.79
|
||||
Avg latency: 1.323689ms
|
||||
P90 latency: 1.758038ms
|
||||
P95 latency: 2.077948ms
|
||||
P99 latency: 3.856256ms
|
||||
Bottom 10% Avg latency: 730.568µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 283.966934ms
|
||||
Burst completed: 5000 events in 294.692625ms
|
||||
Burst completed: 5000 events in 363.280618ms
|
||||
Burst completed: 5000 events in 340.745621ms
|
||||
Burst completed: 5000 events in 304.674199ms
|
||||
Burst completed: 5000 events in 280.09038ms
|
||||
Burst completed: 5000 events in 266.781378ms
|
||||
Burst completed: 5000 events in 277.70181ms
|
||||
Burst completed: 5000 events in 271.658408ms
|
||||
Burst completed: 5000 events in 309.272288ms
|
||||
Burst test completed: 50000 events in 8.000384614s, errors: 0
|
||||
Events/sec: 6249.70
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.409054146s
|
||||
Combined ops/sec: 2048.42
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845904475025ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845904475112ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845904475134ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845904475139ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845904475152ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845904475166ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845904475171ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845904475182ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845904475187ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845904475202ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845904475207ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845904475213ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845904475218ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845904475233ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845904475238ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845904475247ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845904475252ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:58:24 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.536980771s
|
||||
Events/sec: 11020.54
|
||||
Avg latency: 2.141467ms
|
||||
P90 latency: 3.415814ms
|
||||
P95 latency: 4.218151ms
|
||||
P99 latency: 6.573395ms
|
||||
Bottom 10% Avg latency: 965.163µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 562.534206ms
|
||||
Burst completed: 5000 events in 495.672511ms
|
||||
Burst completed: 5000 events in 403.9333ms
|
||||
Burst completed: 5000 events in 406.633831ms
|
||||
Burst completed: 5000 events in 497.747932ms
|
||||
Burst completed: 5000 events in 375.06022ms
|
||||
Burst completed: 5000 events in 357.935146ms
|
||||
Burst completed: 5000 events in 354.7018ms
|
||||
Burst completed: 5000 events in 363.034284ms
|
||||
Burst completed: 5000 events in 369.648798ms
|
||||
Burst test completed: 50000 events in 9.192909424s, errors: 0
|
||||
Events/sec: 5438.97
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.759007602s
|
||||
Combined ops/sec: 2019.47
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 279947 queries in 1m0.0101769s
|
||||
Queries/sec: 4664.99
|
||||
Avg query latency: 3.577317ms
|
||||
P95 query latency: 13.542975ms
|
||||
P99 query latency: 20.687227ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 236582 operations (186582 queries, 50000 writes) in 1m0.004658961s
|
||||
Operations/sec: 3942.73
|
||||
Avg latency: 2.272206ms
|
||||
Avg query latency: 2.486915ms
|
||||
Avg write latency: 1.470991ms
|
||||
P95 latency: 6.629071ms
|
||||
P99 latency: 17.102632ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.536980771s
|
||||
Total Events: 50000
|
||||
Events/sec: 11020.54
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 233 MB
|
||||
Avg Latency: 2.141467ms
|
||||
P90 Latency: 3.415814ms
|
||||
P95 Latency: 4.218151ms
|
||||
P99 Latency: 6.573395ms
|
||||
Bottom 10% Avg Latency: 965.163µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.192909424s
|
||||
Total Events: 50000
|
||||
Events/sec: 5438.97
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 235 MB
|
||||
Avg Latency: 1.990208ms
|
||||
P90 Latency: 3.107457ms
|
||||
P95 Latency: 3.856432ms
|
||||
P99 Latency: 6.336835ms
|
||||
Bottom 10% Avg Latency: 900.221µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.759007602s
|
||||
Total Events: 50000
|
||||
Events/sec: 2019.47
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 174 MB
|
||||
Avg Latency: 450.921µs
|
||||
P90 Latency: 937.184µs
|
||||
P95 Latency: 1.10841ms
|
||||
P99 Latency: 1.666212ms
|
||||
Bottom 10% Avg Latency: 1.296193ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.0101769s
|
||||
Total Events: 279947
|
||||
Events/sec: 4664.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 142 MB
|
||||
Avg Latency: 3.577317ms
|
||||
P90 Latency: 10.560196ms
|
||||
P95 Latency: 13.542975ms
|
||||
P99 Latency: 20.687227ms
|
||||
Bottom 10% Avg Latency: 14.957911ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.004658961s
|
||||
Total Events: 236582
|
||||
Events/sec: 3942.73
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 161 MB
|
||||
Avg Latency: 2.272206ms
|
||||
P90 Latency: 4.975152ms
|
||||
P95 Latency: 6.629071ms
|
||||
P99 Latency: 17.102632ms
|
||||
Bottom 10% Avg Latency: 8.89611ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: khatru-badger
|
||||
RELAY_URL: ws://khatru-badger:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T11:01:44+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845699509026ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845699509106ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845699509128ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845699509133ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845699509146ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845699509159ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845699509164ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845699509172ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845699509178ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845699509192ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845699509197ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845699509206ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845699509211ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845699509224ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845699509228ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845699509238ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845699509242ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:54:59 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.109596583s
|
||||
Events/sec: 12166.64
|
||||
Avg latency: 1.93573ms
|
||||
P90 latency: 2.871977ms
|
||||
P95 latency: 3.44036ms
|
||||
P99 latency: 5.475515ms
|
||||
Bottom 10% Avg latency: 961.636µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 515.356224ms
|
||||
Burst completed: 5000 events in 399.9581ms
|
||||
Burst completed: 5000 events in 459.416277ms
|
||||
Burst completed: 5000 events in 428.20652ms
|
||||
Burst completed: 5000 events in 747.547021ms
|
||||
Burst completed: 5000 events in 647.984214ms
|
||||
Burst completed: 5000 events in 488.90592ms
|
||||
Burst completed: 5000 events in 377.505306ms
|
||||
Burst completed: 5000 events in 465.109125ms
|
||||
Burst completed: 5000 events in 429.364917ms
|
||||
Burst test completed: 50000 events in 9.965909051s, errors: 0
|
||||
Events/sec: 5017.10
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.612452482s
|
||||
Combined ops/sec: 2031.49
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 302291 queries in 1m0.005394665s
|
||||
Queries/sec: 5037.73
|
||||
Avg query latency: 3.277291ms
|
||||
P95 query latency: 12.307232ms
|
||||
P99 query latency: 18.488169ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 243436 operations (193436 queries, 50000 writes) in 1m0.00468811s
|
||||
Operations/sec: 4056.95
|
||||
Avg latency: 2.220391ms
|
||||
Avg query latency: 2.393422ms
|
||||
Avg write latency: 1.550983ms
|
||||
P95 latency: 6.295105ms
|
||||
P99 latency: 16.788623ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.109596583s
|
||||
Total Events: 50000
|
||||
Events/sec: 12166.64
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 243 MB
|
||||
Avg Latency: 1.93573ms
|
||||
P90 Latency: 2.871977ms
|
||||
P95 Latency: 3.44036ms
|
||||
P99 Latency: 5.475515ms
|
||||
Bottom 10% Avg Latency: 961.636µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.965909051s
|
||||
Total Events: 50000
|
||||
Events/sec: 5017.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 257 MB
|
||||
Avg Latency: 2.375602ms
|
||||
P90 Latency: 3.854368ms
|
||||
P95 Latency: 5.019226ms
|
||||
P99 Latency: 8.287248ms
|
||||
Bottom 10% Avg Latency: 1.013228ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.612452482s
|
||||
Total Events: 50000
|
||||
Events/sec: 2031.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 190 MB
|
||||
Avg Latency: 432.265µs
|
||||
P90 Latency: 913.499µs
|
||||
P95 Latency: 1.051763ms
|
||||
P99 Latency: 1.395767ms
|
||||
Bottom 10% Avg Latency: 1.160261ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005394665s
|
||||
Total Events: 302291
|
||||
Events/sec: 5037.73
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 234 MB
|
||||
Avg Latency: 3.277291ms
|
||||
P90 Latency: 9.787032ms
|
||||
P95 Latency: 12.307232ms
|
||||
P99 Latency: 18.488169ms
|
||||
Bottom 10% Avg Latency: 13.509646ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.00468811s
|
||||
Total Events: 243436
|
||||
Events/sec: 4056.95
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 148 MB
|
||||
Avg Latency: 2.220391ms
|
||||
P90 Latency: 4.746928ms
|
||||
P95 Latency: 6.295105ms
|
||||
P99 Latency: 16.788623ms
|
||||
Bottom 10% Avg Latency: 8.681502ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T10:58:19+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,43 @@
|
||||
Starting Network Graph Traversal Benchmark
|
||||
Relay URL: ws://next-orly-badger:8080
|
||||
Workers: 24
|
||||
Pubkeys: 100000, Follows per pubkey: 1-1000
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ NETWORK GRAPH TRAVERSAL BENCHMARK (100k Pubkeys) ║
|
||||
║ Relay: ws://next-orly-badger:8080 ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
Generating 100000 deterministic pubkeys...
|
||||
2025/12/04 13:19:05 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Generated 10000/100000 pubkeys...
|
||||
Generated 20000/100000 pubkeys...
|
||||
Generated 30000/100000 pubkeys...
|
||||
Generated 40000/100000 pubkeys...
|
||||
Generated 50000/100000 pubkeys...
|
||||
Generated 60000/100000 pubkeys...
|
||||
Generated 70000/100000 pubkeys...
|
||||
Generated 80000/100000 pubkeys...
|
||||
Generated 90000/100000 pubkeys...
|
||||
Generated 100000/100000 pubkeys...
|
||||
Generated 100000 pubkeys in 2.473794335s
|
||||
Generating follow graph (1-1000 follows per pubkey)...
|
||||
Generated follow lists for 10000/100000 pubkeys...
|
||||
Generated follow lists for 20000/100000 pubkeys...
|
||||
Generated follow lists for 30000/100000 pubkeys...
|
||||
Generated follow lists for 40000/100000 pubkeys...
|
||||
Generated follow lists for 50000/100000 pubkeys...
|
||||
Generated follow lists for 60000/100000 pubkeys...
|
||||
Generated follow lists for 70000/100000 pubkeys...
|
||||
Generated follow lists for 80000/100000 pubkeys...
|
||||
Generated follow lists for 90000/100000 pubkeys...
|
||||
Generated follow lists for 100000/100000 pubkeys...
|
||||
Generated follow graph in 4.361425602s (avg 500.5 follows/pubkey, total 50048088 follows)
|
||||
|
||||
Connecting to relay: ws://next-orly-badger:8080
|
||||
Connected successfully!
|
||||
Creating follow list events via WebSocket...
|
||||
Queued 10000/100000 follow list events...
|
||||
Queued 20000/100000 follow list events...
|
||||
Queued 30000/100000 follow list events...
|
||||
1764854401568817🚨 NOTICE from ws://next-orly-badger:8080: 'EVENT processing failed' /go/pkg/mod/git.mleku.dev/mleku/nostr@v1.0.8/ws/client.go:326
|
||||
1764854402773843🚨 failed to write message: %!w(*net.OpError=&{write tcp 0xc0001b0f30 0xc0001b0f60 {}}) /go/pkg/mod/git.mleku.dev/mleku/nostr@v1.0.8/ws/connection.go:63
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845290757888ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845290758084ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845290758119ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845290758124ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845290758135ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845290758150ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845290758155ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845290758167ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845290758173ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845290758190ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845290758195ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845290758204ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845290758210ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845290758224ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845290758229ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845290758241ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845290758247ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:48:10 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.113585513s
|
||||
Events/sec: 12154.85
|
||||
Avg latency: 1.935424ms
|
||||
P90 latency: 2.908617ms
|
||||
P95 latency: 3.52541ms
|
||||
P99 latency: 5.586614ms
|
||||
Bottom 10% Avg latency: 943.568µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 384.404827ms
|
||||
Burst completed: 5000 events in 366.066982ms
|
||||
Burst completed: 5000 events in 413.972961ms
|
||||
Burst completed: 5000 events in 540.992935ms
|
||||
Burst completed: 5000 events in 444.488278ms
|
||||
Burst completed: 5000 events in 342.979185ms
|
||||
Burst completed: 5000 events in 393.451489ms
|
||||
Burst completed: 5000 events in 530.328367ms
|
||||
Burst completed: 5000 events in 483.78923ms
|
||||
Burst completed: 5000 events in 356.248835ms
|
||||
Burst test completed: 50000 events in 9.263453685s, errors: 0
|
||||
Events/sec: 5397.55
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.809227197s
|
||||
Combined ops/sec: 2015.38
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 256384 queries in 1m0.005966351s
|
||||
Queries/sec: 4272.64
|
||||
Avg query latency: 3.92418ms
|
||||
P95 query latency: 14.841512ms
|
||||
P99 query latency: 22.768552ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 220975 operations (170975 queries, 50000 writes) in 1m0.003529193s
|
||||
Operations/sec: 3682.70
|
||||
Avg latency: 2.572587ms
|
||||
Avg query latency: 2.803798ms
|
||||
Avg write latency: 1.781959ms
|
||||
P95 latency: 7.618974ms
|
||||
P99 latency: 19.690393ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.113585513s
|
||||
Total Events: 50000
|
||||
Events/sec: 12154.85
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 127 MB
|
||||
Avg Latency: 1.935424ms
|
||||
P90 Latency: 2.908617ms
|
||||
P95 Latency: 3.52541ms
|
||||
P99 Latency: 5.586614ms
|
||||
Bottom 10% Avg Latency: 943.568µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.263453685s
|
||||
Total Events: 50000
|
||||
Events/sec: 5397.55
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 231 MB
|
||||
Avg Latency: 2.034536ms
|
||||
P90 Latency: 3.126682ms
|
||||
P95 Latency: 3.863975ms
|
||||
P99 Latency: 6.098539ms
|
||||
Bottom 10% Avg Latency: 935.662µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.809227197s
|
||||
Total Events: 50000
|
||||
Events/sec: 2015.38
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 184 MB
|
||||
Avg Latency: 438.529µs
|
||||
P90 Latency: 917.747µs
|
||||
P95 Latency: 1.086949ms
|
||||
P99 Latency: 1.523991ms
|
||||
Bottom 10% Avg Latency: 1.218802ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005966351s
|
||||
Total Events: 256384
|
||||
Events/sec: 4272.64
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 151 MB
|
||||
Avg Latency: 3.92418ms
|
||||
P90 Latency: 11.560176ms
|
||||
P95 Latency: 14.841512ms
|
||||
P99 Latency: 22.768552ms
|
||||
Bottom 10% Avg Latency: 16.422096ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003529193s
|
||||
Total Events: 220975
|
||||
Events/sec: 3682.70
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 207 MB
|
||||
Avg Latency: 2.572587ms
|
||||
P90 Latency: 5.5629ms
|
||||
P95 Latency: 7.618974ms
|
||||
P99 Latency: 19.690393ms
|
||||
Bottom 10% Avg Latency: 10.306482ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: next-orly-badger
|
||||
RELAY_URL: ws://next-orly-badger:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T10:51:30+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-neo4j_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845495230040ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845495230118ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845495230154ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845495230159ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845495230168ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845495230182ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845495230187ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845495230198ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845495230204ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845495230219ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845495230224ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845495230232ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845495230237ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845495230250ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845495230255ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845495230265ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845495230269ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:51:35 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.737037757s
|
||||
Events/sec: 13379.58
|
||||
Avg latency: 1.744659ms
|
||||
P90 latency: 2.47401ms
|
||||
P95 latency: 2.895953ms
|
||||
P99 latency: 4.909556ms
|
||||
Bottom 10% Avg latency: 897.762µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 421.882059ms
|
||||
Burst completed: 5000 events in 412.531799ms
|
||||
Burst completed: 5000 events in 429.098267ms
|
||||
Burst completed: 5000 events in 390.670143ms
|
||||
Burst completed: 5000 events in 438.603112ms
|
||||
Burst completed: 5000 events in 366.944086ms
|
||||
Burst completed: 5000 events in 534.455064ms
|
||||
Burst completed: 5000 events in 559.621403ms
|
||||
Burst completed: 5000 events in 393.427363ms
|
||||
Burst completed: 5000 events in 371.875354ms
|
||||
Burst test completed: 50000 events in 9.324705477s, errors: 0
|
||||
Events/sec: 5362.10
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.924958418s
|
||||
Combined ops/sec: 2006.02
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 244167 queries in 1m0.008740456s
|
||||
Queries/sec: 4068.86
|
||||
Avg query latency: 4.157543ms
|
||||
P95 query latency: 15.724716ms
|
||||
P99 query latency: 24.284362ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 227664 operations (177664 queries, 50000 writes) in 1m0.005538199s
|
||||
Operations/sec: 3794.05
|
||||
Avg latency: 2.523997ms
|
||||
Avg query latency: 2.668863ms
|
||||
Avg write latency: 2.009247ms
|
||||
P95 latency: 7.235855ms
|
||||
P99 latency: 20.657306ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 3.737037757s
|
||||
Total Events: 50000
|
||||
Events/sec: 13379.58
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 217 MB
|
||||
Avg Latency: 1.744659ms
|
||||
P90 Latency: 2.47401ms
|
||||
P95 Latency: 2.895953ms
|
||||
P99 Latency: 4.909556ms
|
||||
Bottom 10% Avg Latency: 897.762µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.324705477s
|
||||
Total Events: 50000
|
||||
Events/sec: 5362.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 304 MB
|
||||
Avg Latency: 2.063122ms
|
||||
P90 Latency: 3.130188ms
|
||||
P95 Latency: 3.8975ms
|
||||
P99 Latency: 6.378352ms
|
||||
Bottom 10% Avg Latency: 954.959µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.924958418s
|
||||
Total Events: 50000
|
||||
Events/sec: 2006.02
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 272 MB
|
||||
Avg Latency: 475.177µs
|
||||
P90 Latency: 996.497µs
|
||||
P95 Latency: 1.205595ms
|
||||
P99 Latency: 1.873106ms
|
||||
Bottom 10% Avg Latency: 1.414397ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.008740456s
|
||||
Total Events: 244167
|
||||
Events/sec: 4068.86
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 148 MB
|
||||
Avg Latency: 4.157543ms
|
||||
P90 Latency: 12.228439ms
|
||||
P95 Latency: 15.724716ms
|
||||
P99 Latency: 24.284362ms
|
||||
Bottom 10% Avg Latency: 17.427943ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.005538199s
|
||||
Total Events: 227664
|
||||
Events/sec: 3794.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 211 MB
|
||||
Avg Latency: 2.523997ms
|
||||
P90 Latency: 5.269722ms
|
||||
P95 Latency: 7.235855ms
|
||||
P99 Latency: 20.657306ms
|
||||
Bottom 10% Avg Latency: 10.288906ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-neo4j_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-neo4j_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: next-orly-neo4j
|
||||
RELAY_URL: ws://next-orly-neo4j:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T10:54:54+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_nostr-rs-relay_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764846517510492ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764846517510692ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764846517511210ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764846517511251ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764846517511274ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764846517511304ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764846517511317ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764846517511329ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764846517511340ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764846517511366ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764846517511373ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764846517511388ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764846517511394ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764846517511443ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764846517511452ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764846517511466ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764846517511472ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 11:08:37 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.118969633s
|
||||
Events/sec: 12138.96
|
||||
Avg latency: 1.937994ms
|
||||
P90 latency: 2.852802ms
|
||||
P95 latency: 3.444328ms
|
||||
P99 latency: 5.727836ms
|
||||
Bottom 10% Avg latency: 946.456µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 403.020917ms
|
||||
Burst completed: 5000 events in 372.371612ms
|
||||
Burst completed: 5000 events in 424.238707ms
|
||||
Burst completed: 5000 events in 385.317421ms
|
||||
Burst completed: 5000 events in 516.841571ms
|
||||
Burst completed: 5000 events in 591.703187ms
|
||||
Burst completed: 5000 events in 445.314485ms
|
||||
Burst completed: 5000 events in 374.011153ms
|
||||
Burst completed: 5000 events in 398.6942ms
|
||||
Burst completed: 5000 events in 365.965806ms
|
||||
Burst test completed: 50000 events in 9.28457886s, errors: 0
|
||||
Events/sec: 5385.27
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.684808581s
|
||||
Combined ops/sec: 2025.54
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 251672 queries in 1m0.006178379s
|
||||
Queries/sec: 4194.10
|
||||
Avg query latency: 4.01666ms
|
||||
P95 query latency: 15.051188ms
|
||||
P99 query latency: 22.451758ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 219001 operations (169001 queries, 50000 writes) in 1m0.004144652s
|
||||
Operations/sec: 3649.76
|
||||
Avg latency: 2.620549ms
|
||||
Avg query latency: 2.844617ms
|
||||
Avg write latency: 1.863195ms
|
||||
P95 latency: 7.482377ms
|
||||
P99 latency: 20.396275ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.118969633s
|
||||
Total Events: 50000
|
||||
Events/sec: 12138.96
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 150 MB
|
||||
Avg Latency: 1.937994ms
|
||||
P90 Latency: 2.852802ms
|
||||
P95 Latency: 3.444328ms
|
||||
P99 Latency: 5.727836ms
|
||||
Bottom 10% Avg Latency: 946.456µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.28457886s
|
||||
Total Events: 50000
|
||||
Events/sec: 5385.27
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 259 MB
|
||||
Avg Latency: 2.040218ms
|
||||
P90 Latency: 3.113648ms
|
||||
P95 Latency: 3.901749ms
|
||||
P99 Latency: 6.623842ms
|
||||
Bottom 10% Avg Latency: 930.455µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.684808581s
|
||||
Total Events: 50000
|
||||
Events/sec: 2025.54
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 170 MB
|
||||
Avg Latency: 435.806µs
|
||||
P90 Latency: 909.692µs
|
||||
P95 Latency: 1.063135ms
|
||||
P99 Latency: 1.414473ms
|
||||
Bottom 10% Avg Latency: 1.173081ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.006178379s
|
||||
Total Events: 251672
|
||||
Events/sec: 4194.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 159 MB
|
||||
Avg Latency: 4.01666ms
|
||||
P90 Latency: 11.874709ms
|
||||
P95 Latency: 15.051188ms
|
||||
P99 Latency: 22.451758ms
|
||||
Bottom 10% Avg Latency: 16.47537ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.004144652s
|
||||
Total Events: 219001
|
||||
Events/sec: 3649.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 2.620549ms
|
||||
P90 Latency: 5.591506ms
|
||||
P95 Latency: 7.482377ms
|
||||
P99 Latency: 20.396275ms
|
||||
Bottom 10% Avg Latency: 10.345145ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: nostr-rs-relay
|
||||
RELAY_URL: ws://nostr-rs-relay:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T11:11:56+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_relayer-basic_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764846109277147ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764846109277265ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764846109277319ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764846109277325ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764846109277335ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764846109277350ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764846109277355ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764846109277363ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764846109277369ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764846109277389ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764846109277396ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764846109277405ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764846109277410ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764846109277424ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764846109277429ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764846109277439ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764846109277443ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 11:01:49 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.829064715s
|
||||
Events/sec: 13058.02
|
||||
Avg latency: 1.792879ms
|
||||
P90 latency: 2.621872ms
|
||||
P95 latency: 3.153103ms
|
||||
P99 latency: 4.914106ms
|
||||
Bottom 10% Avg latency: 919.64µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 406.089196ms
|
||||
Burst completed: 5000 events in 571.162214ms
|
||||
Burst completed: 5000 events in 417.21044ms
|
||||
Burst completed: 5000 events in 388.695149ms
|
||||
Burst completed: 5000 events in 448.68702ms
|
||||
Burst completed: 5000 events in 349.680067ms
|
||||
Burst completed: 5000 events in 352.379547ms
|
||||
Burst completed: 5000 events in 348.007743ms
|
||||
Burst completed: 5000 events in 396.819076ms
|
||||
Burst completed: 5000 events in 388.190088ms
|
||||
Burst test completed: 50000 events in 9.077665116s, errors: 0
|
||||
Events/sec: 5508.02
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.750507885s
|
||||
Combined ops/sec: 2020.16
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 272535 queries in 1m0.006407297s
|
||||
Queries/sec: 4541.76
|
||||
Avg query latency: 3.702484ms
|
||||
P95 query latency: 14.064278ms
|
||||
P99 query latency: 21.546984ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 236255 operations (186255 queries, 50000 writes) in 1m0.005350378s
|
||||
Operations/sec: 3937.23
|
||||
Avg latency: 2.284443ms
|
||||
Avg query latency: 2.471631ms
|
||||
Avg write latency: 1.58715ms
|
||||
P95 latency: 6.469447ms
|
||||
P99 latency: 17.551758ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 3.829064715s
|
||||
Total Events: 50000
|
||||
Events/sec: 13058.02
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 226 MB
|
||||
Avg Latency: 1.792879ms
|
||||
P90 Latency: 2.621872ms
|
||||
P95 Latency: 3.153103ms
|
||||
P99 Latency: 4.914106ms
|
||||
Bottom 10% Avg Latency: 919.64µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.077665116s
|
||||
Total Events: 50000
|
||||
Events/sec: 5508.02
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 263 MB
|
||||
Avg Latency: 1.938961ms
|
||||
P90 Latency: 2.872088ms
|
||||
P95 Latency: 3.585166ms
|
||||
P99 Latency: 6.443979ms
|
||||
Bottom 10% Avg Latency: 919.151µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.750507885s
|
||||
Total Events: 50000
|
||||
Events/sec: 2020.16
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 173 MB
|
||||
Avg Latency: 448.262µs
|
||||
P90 Latency: 942.865µs
|
||||
P95 Latency: 1.09768ms
|
||||
P99 Latency: 1.554199ms
|
||||
Bottom 10% Avg Latency: 1.241163ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.006407297s
|
||||
Total Events: 272535
|
||||
Events/sec: 4541.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 134 MB
|
||||
Avg Latency: 3.702484ms
|
||||
P90 Latency: 10.940029ms
|
||||
P95 Latency: 14.064278ms
|
||||
P99 Latency: 21.546984ms
|
||||
Bottom 10% Avg Latency: 15.564533ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.005350378s
|
||||
Total Events: 236255
|
||||
Events/sec: 3937.23
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 200 MB
|
||||
Avg Latency: 2.284443ms
|
||||
P90 Latency: 4.876796ms
|
||||
P95 Latency: 6.469447ms
|
||||
P99 Latency: 17.551758ms
|
||||
Bottom 10% Avg Latency: 8.957464ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: relayer-basic
|
||||
RELAY_URL: ws://relayer-basic:7447
|
||||
TEST_TIMESTAMP: 2025-12-04T11:05:08+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,63 @@
|
||||
Starting Network Graph Traversal Benchmark
|
||||
Relay URL: ws://rely-sqlite:3334
|
||||
Workers: 24
|
||||
Pubkeys: 100000, Follows per pubkey: 1-1000
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ NETWORK GRAPH TRAVERSAL BENCHMARK (100k Pubkeys) ║
|
||||
║ Relay: ws://rely-sqlite:3334 ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
Generating 100000 deterministic pubkeys...
|
||||
2025/12/04 11:12:01 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Generated 10000/100000 pubkeys...
|
||||
Generated 20000/100000 pubkeys...
|
||||
Generated 30000/100000 pubkeys...
|
||||
Generated 40000/100000 pubkeys...
|
||||
Generated 50000/100000 pubkeys...
|
||||
Generated 60000/100000 pubkeys...
|
||||
Generated 70000/100000 pubkeys...
|
||||
Generated 80000/100000 pubkeys...
|
||||
Generated 90000/100000 pubkeys...
|
||||
Generated 100000/100000 pubkeys...
|
||||
Generated 100000 pubkeys in 2.699112464s
|
||||
Generating follow graph (1-1000 follows per pubkey)...
|
||||
Generated follow lists for 10000/100000 pubkeys...
|
||||
Generated follow lists for 20000/100000 pubkeys...
|
||||
Generated follow lists for 30000/100000 pubkeys...
|
||||
Generated follow lists for 40000/100000 pubkeys...
|
||||
Generated follow lists for 50000/100000 pubkeys...
|
||||
Generated follow lists for 60000/100000 pubkeys...
|
||||
Generated follow lists for 70000/100000 pubkeys...
|
||||
Generated follow lists for 80000/100000 pubkeys...
|
||||
Generated follow lists for 90000/100000 pubkeys...
|
||||
Generated follow lists for 100000/100000 pubkeys...
|
||||
Generated follow graph in 5.172393834s (avg 500.5 follows/pubkey, total 50048088 follows)
|
||||
|
||||
Connecting to relay: ws://rely-sqlite:3334
|
||||
Connected successfully!
|
||||
Creating follow list events via WebSocket...
|
||||
Queued 10000/100000 follow list events...
|
||||
Queued 20000/100000 follow list events...
|
||||
Queued 30000/100000 follow list events...
|
||||
Queued 40000/100000 follow list events...
|
||||
Queued 50000/100000 follow list events...
|
||||
Queued 60000/100000 follow list events...
|
||||
Queued 70000/100000 follow list events...
|
||||
Queued 80000/100000 follow list events...
|
||||
Queued 90000/100000 follow list events...
|
||||
Queued 100000/100000 follow list events...
|
||||
Created 100000 follow list events in 1m47.750797847s (928.07 events/sec, errors: 0)
|
||||
Avg latency: 5.218453ms, P95: 30.619168ms, P99: 66.455368ms
|
||||
|
||||
Waiting for events to be processed...
|
||||
|
||||
=== Third-Degree Graph Traversal Benchmark (Network) ===
|
||||
Traversing 3 degrees of follows via WebSocket...
|
||||
Sampling 1000 pubkeys for traversal...
|
||||
Killed
|
||||
|
||||
RELAY_NAME: rely-sqlite
|
||||
RELAY_URL: ws://rely-sqlite:3334
|
||||
TEST_TYPE: Graph Traversal
|
||||
STATUS: FAILED
|
||||
TEST_TIMESTAMP: 2025-12-04T13:18:55+00:00
|
||||
@@ -0,0 +1,202 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_rely-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845084601162ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845084601278ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845084601338ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845084601353ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845084601368ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845084601398ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845084601404ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845084601425ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845084601432ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845084601453ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845084601459ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845084601470ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845084601476ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845084601492ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845084601498ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845084601512ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845084601518ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:44:44 INFO: Extracted embedded libsecp256k1 to /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
2025/12/04 10:44:44 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.863868097s
|
||||
Events/sec: 10279.88
|
||||
Avg latency: 2.303586ms
|
||||
P90 latency: 3.506294ms
|
||||
P95 latency: 4.26606ms
|
||||
P99 latency: 6.589692ms
|
||||
Bottom 10% Avg latency: 1.039748ms
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 490.290781ms
|
||||
Burst completed: 5000 events in 660.13017ms
|
||||
Burst completed: 5000 events in 395.417016ms
|
||||
Burst completed: 5000 events in 386.572933ms
|
||||
Burst completed: 5000 events in 453.417446ms
|
||||
Burst completed: 5000 events in 431.074552ms
|
||||
Burst completed: 5000 events in 425.56635ms
|
||||
Burst completed: 5000 events in 480.609672ms
|
||||
Burst completed: 5000 events in 491.483839ms
|
||||
Burst completed: 5000 events in 855.851556ms
|
||||
Burst test completed: 50000 events in 10.076554319s, errors: 0
|
||||
Events/sec: 4962.01
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.99725206s
|
||||
Combined ops/sec: 2000.22
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 248134 queries in 1m0.010897965s
|
||||
Queries/sec: 4134.82
|
||||
Avg query latency: 4.008215ms
|
||||
P95 query latency: 15.241611ms
|
||||
P99 query latency: 23.364071ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 223423 operations (173423 queries, 50000 writes) in 1m0.003723611s
|
||||
Operations/sec: 3723.49
|
||||
Avg latency: 2.490436ms
|
||||
Avg query latency: 2.752076ms
|
||||
Avg write latency: 1.582945ms
|
||||
P95 latency: 7.431916ms
|
||||
P99 latency: 18.31948ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.863868097s
|
||||
Total Events: 50000
|
||||
Events/sec: 10279.88
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 210 MB
|
||||
Avg Latency: 2.303586ms
|
||||
P90 Latency: 3.506294ms
|
||||
P95 Latency: 4.26606ms
|
||||
P99 Latency: 6.589692ms
|
||||
Bottom 10% Avg Latency: 1.039748ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 10.076554319s
|
||||
Total Events: 50000
|
||||
Events/sec: 4962.01
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 306 MB
|
||||
Avg Latency: 2.440058ms
|
||||
P90 Latency: 3.974234ms
|
||||
P95 Latency: 5.200288ms
|
||||
P99 Latency: 9.335708ms
|
||||
Bottom 10% Avg Latency: 1.00845ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.99725206s
|
||||
Total Events: 50000
|
||||
Events/sec: 2000.22
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 270 MB
|
||||
Avg Latency: 457.992µs
|
||||
P90 Latency: 957.983µs
|
||||
P95 Latency: 1.136012ms
|
||||
P99 Latency: 1.617368ms
|
||||
Bottom 10% Avg Latency: 1.292479ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.010897965s
|
||||
Total Events: 248134
|
||||
Events/sec: 4134.82
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 138 MB
|
||||
Avg Latency: 4.008215ms
|
||||
P90 Latency: 11.8477ms
|
||||
P95 Latency: 15.241611ms
|
||||
P99 Latency: 23.364071ms
|
||||
Bottom 10% Avg Latency: 16.87008ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003723611s
|
||||
Total Events: 223423
|
||||
Events/sec: 3723.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 195 MB
|
||||
Avg Latency: 2.490436ms
|
||||
P90 Latency: 5.497334ms
|
||||
P95 Latency: 7.431916ms
|
||||
P99 Latency: 18.31948ms
|
||||
Bottom 10% Avg Latency: 9.827857ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: rely-sqlite
|
||||
RELAY_URL: ws://rely-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T10:48:05+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
201
cmd/benchmark/reports/run_20251204_104444/strfry_results.txt
Normal file
201
cmd/benchmark/reports/run_20251204_104444/strfry_results.txt
Normal file
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_strfry_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764846313173994ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764846313174100ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764846313174135ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764846313174143ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764846313174154ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764846313174172ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764846313174177ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764846313174193ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764846313174199ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764846313174215ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764846313174222ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764846313174232ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764846313174238ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764846313174259ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764846313174264ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764846313174274ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764846313174282ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 11:05:13 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.876849434s
|
||||
Events/sec: 12897.07
|
||||
Avg latency: 1.815658ms
|
||||
P90 latency: 2.61564ms
|
||||
P95 latency: 3.107597ms
|
||||
P99 latency: 5.258081ms
|
||||
Bottom 10% Avg latency: 919.54µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 399.187129ms
|
||||
Burst completed: 5000 events in 388.99822ms
|
||||
Burst completed: 5000 events in 402.825697ms
|
||||
Burst completed: 5000 events in 402.426226ms
|
||||
Burst completed: 5000 events in 509.746009ms
|
||||
Burst completed: 5000 events in 360.327121ms
|
||||
Burst completed: 5000 events in 354.620576ms
|
||||
Burst completed: 5000 events in 340.233233ms
|
||||
Burst completed: 5000 events in 484.991889ms
|
||||
Burst completed: 5000 events in 450.540384ms
|
||||
Burst test completed: 50000 events in 9.101582141s, errors: 0
|
||||
Events/sec: 5493.55
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.968859674s
|
||||
Combined ops/sec: 2002.49
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 261904 queries in 1m0.006069229s
|
||||
Queries/sec: 4364.63
|
||||
Avg query latency: 3.860709ms
|
||||
P95 query latency: 14.612102ms
|
||||
P99 query latency: 22.708667ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 230898 operations (180898 queries, 50000 writes) in 1m0.007085265s
|
||||
Operations/sec: 3847.85
|
||||
Avg latency: 2.400221ms
|
||||
Avg query latency: 2.609803ms
|
||||
Avg write latency: 1.641962ms
|
||||
P95 latency: 6.834352ms
|
||||
P99 latency: 18.125521ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 3.876849434s
|
||||
Total Events: 50000
|
||||
Events/sec: 12897.07
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 194 MB
|
||||
Avg Latency: 1.815658ms
|
||||
P90 Latency: 2.61564ms
|
||||
P95 Latency: 3.107597ms
|
||||
P99 Latency: 5.258081ms
|
||||
Bottom 10% Avg Latency: 919.54µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.101582141s
|
||||
Total Events: 50000
|
||||
Events/sec: 5493.55
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 189 MB
|
||||
Avg Latency: 1.954573ms
|
||||
P90 Latency: 2.922786ms
|
||||
P95 Latency: 3.66591ms
|
||||
P99 Latency: 6.353176ms
|
||||
Bottom 10% Avg Latency: 904.101µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.968859674s
|
||||
Total Events: 50000
|
||||
Events/sec: 2002.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 188 MB
|
||||
Avg Latency: 443.895µs
|
||||
P90 Latency: 930.312µs
|
||||
P95 Latency: 1.08191ms
|
||||
P99 Latency: 1.476191ms
|
||||
Bottom 10% Avg Latency: 1.222569ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.006069229s
|
||||
Total Events: 261904
|
||||
Events/sec: 4364.63
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 198 MB
|
||||
Avg Latency: 3.860709ms
|
||||
P90 Latency: 11.381821ms
|
||||
P95 Latency: 14.612102ms
|
||||
P99 Latency: 22.708667ms
|
||||
Bottom 10% Avg Latency: 16.28305ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.007085265s
|
||||
Total Events: 230898
|
||||
Events/sec: 3847.85
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 178 MB
|
||||
Avg Latency: 2.400221ms
|
||||
P90 Latency: 5.16819ms
|
||||
P95 Latency: 6.834352ms
|
||||
P99 Latency: 18.125521ms
|
||||
Bottom 10% Avg Latency: 9.340478ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: strfry
|
||||
RELAY_URL: ws://strfry:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T11:08:32+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
4
go.mod
4
go.mod
@@ -3,7 +3,7 @@ module next.orly.dev
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
git.mleku.dev/mleku/nostr v1.0.7
|
||||
git.mleku.dev/mleku/nostr v1.0.8
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
@@ -82,5 +82,3 @@ require (
|
||||
)
|
||||
|
||||
retract v1.0.3
|
||||
|
||||
replace git.mleku.dev/mleku/nostr => /home/mleku/src/git.mleku.dev/mleku/nostr
|
||||
|
||||
2
go.sum
2
go.sum
@@ -1,3 +1,5 @@
|
||||
git.mleku.dev/mleku/nostr v1.0.8 h1:YYREdIxobEqYkzxQ7/5ALACPzLkiHW+CTira+VvSQZk=
|
||||
git.mleku.dev/mleku/nostr v1.0.8/go.mod h1:iYTlg2WKJXJ0kcsM6QBGOJ0UDiJidMgL/i64cHyPjZc=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||
|
||||
24
pkg/neo4j/docker-compose.yaml
Normal file
24
pkg/neo4j/docker-compose.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Docker Compose file for Neo4j test database
|
||||
# Usage: docker compose up -d && go test ./pkg/neo4j/... && docker compose down
|
||||
services:
|
||||
neo4j-test:
|
||||
image: neo4j:5.15.0-community
|
||||
container_name: neo4j-test
|
||||
ports:
|
||||
- "7687:7687" # Bolt protocol
|
||||
- "7474:7474" # HTTP (browser interface)
|
||||
environment:
|
||||
- NEO4J_AUTH=neo4j/testpassword
|
||||
- NEO4J_PLUGINS=["apoc"]
|
||||
- NEO4J_dbms_security_procedures_unrestricted=apoc.*
|
||||
- NEO4J_dbms_memory_heap_initial__size=512m
|
||||
- NEO4J_dbms_memory_heap_max__size=1g
|
||||
- NEO4J_dbms_memory_pagecache_size=512m
|
||||
healthcheck:
|
||||
test: ["CMD", "cypher-shell", "-u", "neo4j", "-p", "testpassword", "RETURN 1"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
tmpfs:
|
||||
- /data # Use tmpfs for faster tests
|
||||
277
pkg/neo4j/hex_utils_test.go
Normal file
277
pkg/neo4j/hex_utils_test.go
Normal file
@@ -0,0 +1,277 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
// TestIsBinaryEncoded tests the IsBinaryEncoded function
|
||||
func TestIsBinaryEncoded(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []byte
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Valid binary encoded (33 bytes with null terminator)",
|
||||
input: append(make([]byte, 32), 0),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Invalid - 32 bytes without terminator",
|
||||
input: make([]byte, 32),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid - 33 bytes without null terminator",
|
||||
input: append(make([]byte, 32), 1),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid - 64 bytes (hex string)",
|
||||
input: []byte("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid - empty",
|
||||
input: []byte{},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid - too short",
|
||||
input: []byte{0, 1, 2, 3},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := IsBinaryEncoded(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("IsBinaryEncoded(%v) = %v, want %v", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNormalizePubkeyHex tests the NormalizePubkeyHex function
|
||||
func TestNormalizePubkeyHex(t *testing.T) {
|
||||
// Create a 32-byte test value
|
||||
testBytes := make([]byte, 32)
|
||||
testBytes[31] = 0x01 // Set last byte to 1
|
||||
|
||||
// Create binary-encoded version (33 bytes with null terminator)
|
||||
binaryEncoded := append(testBytes, 0)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input []byte
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Binary encoded to hex",
|
||||
input: binaryEncoded,
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
{
|
||||
name: "Lowercase hex passthrough",
|
||||
input: []byte("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
{
|
||||
name: "Uppercase hex to lowercase",
|
||||
input: []byte("ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789"),
|
||||
expected: "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
},
|
||||
{
|
||||
name: "Mixed case hex to lowercase",
|
||||
input: []byte("AbCdEf0123456789AbCdEf0123456789AbCdEf0123456789AbCdEf0123456789"),
|
||||
expected: "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
},
|
||||
{
|
||||
name: "Prefix hex (shorter than 64)",
|
||||
input: []byte("ABCD"),
|
||||
expected: "abcd",
|
||||
},
|
||||
{
|
||||
name: "Empty input",
|
||||
input: []byte{},
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := NormalizePubkeyHex(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("NormalizePubkeyHex(%v) = %q, want %q", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractPTagValue tests the ExtractPTagValue function
|
||||
func TestExtractPTagValue(t *testing.T) {
|
||||
// Create a valid pubkey hex string
|
||||
validHex := "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tag *tag.T
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Nil tag",
|
||||
tag: nil,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Empty tag",
|
||||
tag: &tag.T{T: [][]byte{}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Tag with only key",
|
||||
tag: &tag.T{T: [][]byte{[]byte("p")}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Valid p-tag with hex value",
|
||||
tag: &tag.T{T: [][]byte{
|
||||
[]byte("p"),
|
||||
[]byte(validHex),
|
||||
}},
|
||||
expected: validHex,
|
||||
},
|
||||
{
|
||||
name: "P-tag with uppercase hex",
|
||||
tag: &tag.T{T: [][]byte{
|
||||
[]byte("p"),
|
||||
[]byte("ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789"),
|
||||
}},
|
||||
expected: "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ExtractPTagValue(tt.tag)
|
||||
if result != tt.expected {
|
||||
t.Errorf("ExtractPTagValue() = %q, want %q", result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractETagValue tests the ExtractETagValue function
|
||||
func TestExtractETagValue(t *testing.T) {
|
||||
// Create a valid event ID hex string
|
||||
validHex := "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tag *tag.T
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Nil tag",
|
||||
tag: nil,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Empty tag",
|
||||
tag: &tag.T{T: [][]byte{}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Tag with only key",
|
||||
tag: &tag.T{T: [][]byte{[]byte("e")}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Valid e-tag with hex value",
|
||||
tag: &tag.T{T: [][]byte{
|
||||
[]byte("e"),
|
||||
[]byte(validHex),
|
||||
}},
|
||||
expected: validHex,
|
||||
},
|
||||
{
|
||||
name: "E-tag with uppercase hex",
|
||||
tag: &tag.T{T: [][]byte{
|
||||
[]byte("e"),
|
||||
[]byte("1234567890ABCDEF1234567890ABCDEF1234567890ABCDEF1234567890ABCDEF"),
|
||||
}},
|
||||
expected: "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ExtractETagValue(tt.tag)
|
||||
if result != tt.expected {
|
||||
t.Errorf("ExtractETagValue() = %q, want %q", result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsValidHexPubkey tests the IsValidHexPubkey function
|
||||
func TestIsValidHexPubkey(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Valid lowercase hex",
|
||||
input: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Valid uppercase hex",
|
||||
input: "ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Valid mixed case hex",
|
||||
input: "AbCdEf0123456789AbCdEf0123456789AbCdEf0123456789AbCdEf0123456789",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Too short",
|
||||
input: "0000000000000000000000000000000000000000000000000000000000000",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Too long",
|
||||
input: "00000000000000000000000000000000000000000000000000000000000000001",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Contains non-hex character",
|
||||
input: "000000000000000000000000000000000000000000000000000000000000000g",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Empty string",
|
||||
input: "",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Contains space",
|
||||
input: "0000000000000000000000000000000000000000000000000000000000000 01",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := IsValidHexPubkey(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("IsValidHexPubkey(%q) = %v, want %v", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,11 @@ var migrations = []Migration{
|
||||
Description: "Merge Author nodes into NostrUser nodes",
|
||||
Migrate: migrateAuthorToNostrUser,
|
||||
},
|
||||
{
|
||||
Version: "v2",
|
||||
Description: "Clean up binary-encoded pubkeys and event IDs to lowercase hex",
|
||||
Migrate: migrateBinaryToHex,
|
||||
},
|
||||
}
|
||||
|
||||
// RunMigrations executes all pending migrations
|
||||
@@ -195,3 +200,146 @@ func migrateAuthorToNostrUser(ctx context.Context, n *N) error {
|
||||
n.Logger.Infof("completed Author to NostrUser migration")
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateBinaryToHex cleans up any binary-encoded pubkeys and event IDs
|
||||
// The nostr library stores e/p tag values in binary format (33 bytes with null terminator),
|
||||
// but Neo4j should store them as lowercase hex strings for consistent querying.
|
||||
// This migration:
|
||||
// 1. Finds NostrUser nodes with invalid (non-hex) pubkeys and deletes them
|
||||
// 2. Finds Event nodes with invalid pubkeys/IDs and deletes them
|
||||
// 3. Finds Tag nodes (type 'e' or 'p') with invalid values and deletes them
|
||||
// 4. Cleans up MENTIONS relationships pointing to invalid NostrUser nodes
|
||||
func migrateBinaryToHex(ctx context.Context, n *N) error {
|
||||
// Step 1: Count problematic nodes before cleanup
|
||||
n.Logger.Infof("scanning for binary-encoded values in Neo4j...")
|
||||
|
||||
// Check for NostrUser nodes with invalid pubkeys (not 64 char hex)
|
||||
// A valid hex pubkey is exactly 64 lowercase hex characters
|
||||
countInvalidUsersCypher := `
|
||||
MATCH (u:NostrUser)
|
||||
WHERE size(u.pubkey) <> 64
|
||||
OR NOT u.pubkey =~ '^[0-9a-f]{64}$'
|
||||
RETURN count(u) AS count
|
||||
`
|
||||
result, err := n.ExecuteRead(ctx, countInvalidUsersCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count invalid NostrUser nodes: %w", err)
|
||||
}
|
||||
|
||||
var invalidUserCount int64
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
invalidUserCount = count
|
||||
}
|
||||
}
|
||||
n.Logger.Infof("found %d NostrUser nodes with invalid pubkeys", invalidUserCount)
|
||||
|
||||
// Check for Event nodes with invalid pubkeys or IDs
|
||||
countInvalidEventsCypher := `
|
||||
MATCH (e:Event)
|
||||
WHERE (size(e.pubkey) <> 64 OR NOT e.pubkey =~ '^[0-9a-f]{64}$')
|
||||
OR (size(e.id) <> 64 OR NOT e.id =~ '^[0-9a-f]{64}$')
|
||||
RETURN count(e) AS count
|
||||
`
|
||||
result, err = n.ExecuteRead(ctx, countInvalidEventsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count invalid Event nodes: %w", err)
|
||||
}
|
||||
|
||||
var invalidEventCount int64
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
invalidEventCount = count
|
||||
}
|
||||
}
|
||||
n.Logger.Infof("found %d Event nodes with invalid pubkeys or IDs", invalidEventCount)
|
||||
|
||||
// Check for Tag nodes (e/p type) with invalid values
|
||||
countInvalidTagsCypher := `
|
||||
MATCH (t:Tag)
|
||||
WHERE t.type IN ['e', 'p']
|
||||
AND (size(t.value) <> 64 OR NOT t.value =~ '^[0-9a-f]{64}$')
|
||||
RETURN count(t) AS count
|
||||
`
|
||||
result, err = n.ExecuteRead(ctx, countInvalidTagsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count invalid Tag nodes: %w", err)
|
||||
}
|
||||
|
||||
var invalidTagCount int64
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
invalidTagCount = count
|
||||
}
|
||||
}
|
||||
n.Logger.Infof("found %d Tag nodes (e/p type) with invalid values", invalidTagCount)
|
||||
|
||||
// If nothing to clean up, we're done
|
||||
if invalidUserCount == 0 && invalidEventCount == 0 && invalidTagCount == 0 {
|
||||
n.Logger.Infof("no binary-encoded values found, migration complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Step 2: Delete invalid NostrUser nodes and their relationships
|
||||
if invalidUserCount > 0 {
|
||||
n.Logger.Infof("deleting %d invalid NostrUser nodes...", invalidUserCount)
|
||||
deleteInvalidUsersCypher := `
|
||||
MATCH (u:NostrUser)
|
||||
WHERE size(u.pubkey) <> 64
|
||||
OR NOT u.pubkey =~ '^[0-9a-f]{64}$'
|
||||
DETACH DELETE u
|
||||
`
|
||||
_, err = n.ExecuteWrite(ctx, deleteInvalidUsersCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete invalid NostrUser nodes: %w", err)
|
||||
}
|
||||
n.Logger.Infof("deleted %d invalid NostrUser nodes", invalidUserCount)
|
||||
}
|
||||
|
||||
// Step 3: Delete invalid Event nodes and their relationships
|
||||
if invalidEventCount > 0 {
|
||||
n.Logger.Infof("deleting %d invalid Event nodes...", invalidEventCount)
|
||||
deleteInvalidEventsCypher := `
|
||||
MATCH (e:Event)
|
||||
WHERE (size(e.pubkey) <> 64 OR NOT e.pubkey =~ '^[0-9a-f]{64}$')
|
||||
OR (size(e.id) <> 64 OR NOT e.id =~ '^[0-9a-f]{64}$')
|
||||
DETACH DELETE e
|
||||
`
|
||||
_, err = n.ExecuteWrite(ctx, deleteInvalidEventsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete invalid Event nodes: %w", err)
|
||||
}
|
||||
n.Logger.Infof("deleted %d invalid Event nodes", invalidEventCount)
|
||||
}
|
||||
|
||||
// Step 4: Delete invalid Tag nodes (e/p type) and their relationships
|
||||
if invalidTagCount > 0 {
|
||||
n.Logger.Infof("deleting %d invalid Tag nodes...", invalidTagCount)
|
||||
deleteInvalidTagsCypher := `
|
||||
MATCH (t:Tag)
|
||||
WHERE t.type IN ['e', 'p']
|
||||
AND (size(t.value) <> 64 OR NOT t.value =~ '^[0-9a-f]{64}$')
|
||||
DETACH DELETE t
|
||||
`
|
||||
_, err = n.ExecuteWrite(ctx, deleteInvalidTagsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete invalid Tag nodes: %w", err)
|
||||
}
|
||||
n.Logger.Infof("deleted %d invalid Tag nodes", invalidTagCount)
|
||||
}
|
||||
|
||||
// Step 5: Clean up any orphaned MENTIONS/REFERENCES relationships
|
||||
// These would be relationships pointing to nodes we just deleted
|
||||
cleanupOrphanedCypher := `
|
||||
// Clean up any ProcessedSocialEvent nodes with invalid pubkeys
|
||||
MATCH (p:ProcessedSocialEvent)
|
||||
WHERE size(p.pubkey) <> 64
|
||||
OR NOT p.pubkey =~ '^[0-9a-f]{64}$'
|
||||
DETACH DELETE p
|
||||
`
|
||||
_, _ = n.ExecuteWrite(ctx, cleanupOrphanedCypher, nil)
|
||||
// Ignore errors - best effort cleanup
|
||||
|
||||
n.Logger.Infof("binary-to-hex migration completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
302
pkg/neo4j/migrations_test.go
Normal file
302
pkg/neo4j/migrations_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestMigrationV2_CleanupBinaryEncodedValues tests that migration v2 properly
|
||||
// cleans up binary-encoded pubkeys and event IDs
|
||||
func TestMigrationV2_CleanupBinaryEncodedValues(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create some valid NostrUser nodes (should NOT be deleted)
|
||||
validPubkeys := []string{
|
||||
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||
"abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
}
|
||||
for _, pk := range validPubkeys {
|
||||
setupInvalidNostrUser(t, pk) // Using setupInvalidNostrUser to create directly
|
||||
}
|
||||
|
||||
// Create some invalid NostrUser nodes (should be deleted)
|
||||
invalidPubkeys := []string{
|
||||
"binary\x00garbage\x00data", // Binary garbage
|
||||
"ABCDEF", // Too short
|
||||
"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG", // Non-hex chars
|
||||
string(append(make([]byte, 32), 0)), // 33-byte binary format
|
||||
}
|
||||
for _, pk := range invalidPubkeys {
|
||||
setupInvalidNostrUser(t, pk)
|
||||
}
|
||||
|
||||
// Verify invalid nodes exist before migration
|
||||
invalidCountBefore := countInvalidNostrUsers(t)
|
||||
if invalidCountBefore != 4 {
|
||||
t.Errorf("Expected 4 invalid NostrUsers before migration, got %d", invalidCountBefore)
|
||||
}
|
||||
|
||||
totalBefore := countNodes(t, "NostrUser")
|
||||
if totalBefore != 6 {
|
||||
t.Errorf("Expected 6 total NostrUsers before migration, got %d", totalBefore)
|
||||
}
|
||||
|
||||
// Run the migration
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify invalid nodes were deleted
|
||||
invalidCountAfter := countInvalidNostrUsers(t)
|
||||
if invalidCountAfter != 0 {
|
||||
t.Errorf("Expected 0 invalid NostrUsers after migration, got %d", invalidCountAfter)
|
||||
}
|
||||
|
||||
// Verify valid nodes were NOT deleted
|
||||
totalAfter := countNodes(t, "NostrUser")
|
||||
if totalAfter != 2 {
|
||||
t.Errorf("Expected 2 valid NostrUsers after migration, got %d", totalAfter)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationV2_CleanupInvalidEvents tests that migration v2 properly
|
||||
// cleans up Event nodes with invalid pubkeys or IDs
|
||||
func TestMigrationV2_CleanupInvalidEvents(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create valid events
|
||||
validEventID := "1111111111111111111111111111111111111111111111111111111111111111"
|
||||
validPubkey := "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
setupTestEvent(t, validEventID, validPubkey, 1, "[]")
|
||||
|
||||
// Create invalid events directly
|
||||
setupInvalidEvent(t, "invalid_id", validPubkey) // Invalid ID
|
||||
setupInvalidEvent(t, validEventID+"2", "invalid_pubkey") // Invalid pubkey (different ID to avoid duplicate)
|
||||
setupInvalidEvent(t, "TOOSHORT", "binary\x00garbage") // Both invalid
|
||||
|
||||
// Count events before migration
|
||||
eventsBefore := countNodes(t, "Event")
|
||||
if eventsBefore != 4 {
|
||||
t.Errorf("Expected 4 Events before migration, got %d", eventsBefore)
|
||||
}
|
||||
|
||||
// Run the migration
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify only valid event remains
|
||||
eventsAfter := countNodes(t, "Event")
|
||||
if eventsAfter != 1 {
|
||||
t.Errorf("Expected 1 valid Event after migration, got %d", eventsAfter)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationV2_CleanupInvalidTags tests that migration v2 properly
|
||||
// cleans up Tag nodes (e/p type) with invalid values
|
||||
func TestMigrationV2_CleanupInvalidTags(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create valid tags
|
||||
validHex := "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"
|
||||
setupInvalidTag(t, "e", validHex) // Valid e-tag
|
||||
setupInvalidTag(t, "p", validHex) // Valid p-tag
|
||||
setupInvalidTag(t, "t", "topic") // Non e/p tag (should not be affected)
|
||||
|
||||
// Create invalid e/p tags
|
||||
setupInvalidTag(t, "e", "binary\x00garbage") // Invalid e-tag
|
||||
setupInvalidTag(t, "p", "TOOSHORT") // Invalid p-tag (too short)
|
||||
setupInvalidTag(t, "e", string(append(make([]byte, 32), 0))) // Binary encoded
|
||||
|
||||
// Count tags before migration
|
||||
tagsBefore := countNodes(t, "Tag")
|
||||
if tagsBefore != 6 {
|
||||
t.Errorf("Expected 6 Tags before migration, got %d", tagsBefore)
|
||||
}
|
||||
|
||||
invalidBefore := countInvalidTags(t)
|
||||
if invalidBefore != 3 {
|
||||
t.Errorf("Expected 3 invalid e/p Tags before migration, got %d", invalidBefore)
|
||||
}
|
||||
|
||||
// Run the migration
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify invalid tags were deleted
|
||||
invalidAfter := countInvalidTags(t)
|
||||
if invalidAfter != 0 {
|
||||
t.Errorf("Expected 0 invalid e/p Tags after migration, got %d", invalidAfter)
|
||||
}
|
||||
|
||||
// Verify valid tags remain (2 e/p valid + 1 t-tag)
|
||||
tagsAfter := countNodes(t, "Tag")
|
||||
if tagsAfter != 3 {
|
||||
t.Errorf("Expected 3 Tags after migration, got %d", tagsAfter)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationV2_Idempotent tests that migration v2 can be run multiple times safely
|
||||
func TestMigrationV2_Idempotent(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create only valid data
|
||||
validPubkey := "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
validEventID := "1111111111111111111111111111111111111111111111111111111111111111"
|
||||
setupTestEvent(t, validEventID, validPubkey, 1, "[]")
|
||||
|
||||
countBefore := countNodes(t, "Event")
|
||||
|
||||
// Run migration first time
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("First migration run failed: %v", err)
|
||||
}
|
||||
|
||||
countAfterFirst := countNodes(t, "Event")
|
||||
if countAfterFirst != countBefore {
|
||||
t.Errorf("First migration changed valid event count: before=%d, after=%d", countBefore, countAfterFirst)
|
||||
}
|
||||
|
||||
// Run migration second time
|
||||
err = migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Second migration run failed: %v", err)
|
||||
}
|
||||
|
||||
countAfterSecond := countNodes(t, "Event")
|
||||
if countAfterSecond != countBefore {
|
||||
t.Errorf("Second migration changed valid event count: before=%d, after=%d", countBefore, countAfterSecond)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationV2_NoDataDoesNotFail tests that migration v2 succeeds with empty database
|
||||
func TestMigrationV2_NoDataDoesNotFail(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up completely
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Run migration on empty database - should not fail
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration on empty database failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationMarking tests that migrations are properly tracked
|
||||
func TestMigrationMarking(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Verify migration v2 has not been applied
|
||||
if testDB.migrationApplied(ctx, "v2") {
|
||||
t.Error("Migration v2 should not be applied before test")
|
||||
}
|
||||
|
||||
// Mark migration as complete
|
||||
err := testDB.markMigrationComplete(ctx, "v2", "Test migration")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to mark migration complete: %v", err)
|
||||
}
|
||||
|
||||
// Verify migration is now marked as applied
|
||||
if !testDB.migrationApplied(ctx, "v2") {
|
||||
t.Error("Migration v2 should be applied after marking")
|
||||
}
|
||||
|
||||
// Clean up
|
||||
cleanTestDatabase()
|
||||
}
|
||||
|
||||
// TestMigrationV1_AuthorToNostrUserMerge tests the author migration
|
||||
func TestMigrationV1_AuthorToNostrUserMerge(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create some Author nodes (legacy format)
|
||||
authorPubkeys := []string{
|
||||
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||
}
|
||||
|
||||
for _, pk := range authorPubkeys {
|
||||
cypher := `CREATE (a:Author {pubkey: $pubkey})`
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, map[string]any{"pubkey": pk})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Author node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify Author nodes exist
|
||||
authorCount := countNodes(t, "Author")
|
||||
if authorCount != 2 {
|
||||
t.Errorf("Expected 2 Author nodes, got %d", authorCount)
|
||||
}
|
||||
|
||||
// Run migration
|
||||
err := migrateAuthorToNostrUser(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify NostrUser nodes were created
|
||||
nostrUserCount := countNodes(t, "NostrUser")
|
||||
if nostrUserCount != 2 {
|
||||
t.Errorf("Expected 2 NostrUser nodes after migration, got %d", nostrUserCount)
|
||||
}
|
||||
|
||||
// Verify Author nodes were deleted (they should have no relationships after migration)
|
||||
authorCountAfter := countNodes(t, "Author")
|
||||
if authorCountAfter != 0 {
|
||||
t.Errorf("Expected 0 Author nodes after migration, got %d", authorCountAfter)
|
||||
}
|
||||
}
|
||||
@@ -55,43 +55,71 @@ func (n *N) buildCypherQuery(f *filter.F, includeDeleteEvents bool) (string, map
|
||||
matchClause := "MATCH (e:Event)"
|
||||
|
||||
// IDs filter - uses exact match or prefix matching
|
||||
if len(f.Ids.T) > 0 {
|
||||
idConditions := make([]string, len(f.Ids.T))
|
||||
// Note: IDs can be either binary (32 bytes) or hex strings (64 chars)
|
||||
// We need to normalize to lowercase hex for consistent Neo4j matching
|
||||
if f.Ids != nil && len(f.Ids.T) > 0 {
|
||||
idConditions := make([]string, 0, len(f.Ids.T))
|
||||
for i, id := range f.Ids.T {
|
||||
if len(id) == 0 {
|
||||
continue // Skip empty IDs
|
||||
}
|
||||
paramName := fmt.Sprintf("id_%d", i)
|
||||
hexID := hex.Enc(id)
|
||||
|
||||
// Normalize to lowercase hex using our utility function
|
||||
// This handles both binary-encoded IDs and hex string IDs (including uppercase)
|
||||
hexID := NormalizePubkeyHex(id)
|
||||
if hexID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle prefix matching for partial IDs
|
||||
if len(id) < 32 { // Full event ID is 32 bytes (64 hex chars)
|
||||
idConditions[i] = fmt.Sprintf("e.id STARTS WITH $%s", paramName)
|
||||
// After normalization, check hex length (should be 64 for full ID)
|
||||
if len(hexID) < 64 {
|
||||
idConditions = append(idConditions, fmt.Sprintf("e.id STARTS WITH $%s", paramName))
|
||||
} else {
|
||||
idConditions[i] = fmt.Sprintf("e.id = $%s", paramName)
|
||||
idConditions = append(idConditions, fmt.Sprintf("e.id = $%s", paramName))
|
||||
}
|
||||
params[paramName] = hexID
|
||||
}
|
||||
whereClauses = append(whereClauses, "("+strings.Join(idConditions, " OR ")+")")
|
||||
if len(idConditions) > 0 {
|
||||
whereClauses = append(whereClauses, "("+strings.Join(idConditions, " OR ")+")")
|
||||
}
|
||||
}
|
||||
|
||||
// Authors filter - supports prefix matching for partial pubkeys
|
||||
if len(f.Authors.T) > 0 {
|
||||
authorConditions := make([]string, len(f.Authors.T))
|
||||
// Note: Authors can be either binary (32 bytes) or hex strings (64 chars)
|
||||
// We need to normalize to lowercase hex for consistent Neo4j matching
|
||||
if f.Authors != nil && len(f.Authors.T) > 0 {
|
||||
authorConditions := make([]string, 0, len(f.Authors.T))
|
||||
for i, author := range f.Authors.T {
|
||||
if len(author) == 0 {
|
||||
continue // Skip empty authors
|
||||
}
|
||||
paramName := fmt.Sprintf("author_%d", i)
|
||||
hexAuthor := hex.Enc(author)
|
||||
|
||||
// Normalize to lowercase hex using our utility function
|
||||
// This handles both binary-encoded pubkeys and hex string pubkeys (including uppercase)
|
||||
hexAuthor := NormalizePubkeyHex(author)
|
||||
if hexAuthor == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle prefix matching for partial pubkeys
|
||||
if len(author) < 32 { // Full pubkey is 32 bytes (64 hex chars)
|
||||
authorConditions[i] = fmt.Sprintf("e.pubkey STARTS WITH $%s", paramName)
|
||||
// After normalization, check hex length (should be 64 for full pubkey)
|
||||
if len(hexAuthor) < 64 {
|
||||
authorConditions = append(authorConditions, fmt.Sprintf("e.pubkey STARTS WITH $%s", paramName))
|
||||
} else {
|
||||
authorConditions[i] = fmt.Sprintf("e.pubkey = $%s", paramName)
|
||||
authorConditions = append(authorConditions, fmt.Sprintf("e.pubkey = $%s", paramName))
|
||||
}
|
||||
params[paramName] = hexAuthor
|
||||
}
|
||||
whereClauses = append(whereClauses, "("+strings.Join(authorConditions, " OR ")+")")
|
||||
if len(authorConditions) > 0 {
|
||||
whereClauses = append(whereClauses, "("+strings.Join(authorConditions, " OR ")+")")
|
||||
}
|
||||
}
|
||||
|
||||
// Kinds filter - matches event types
|
||||
if len(f.Kinds.K) > 0 {
|
||||
if f.Kinds != nil && len(f.Kinds.K) > 0 {
|
||||
kinds := make([]int64, len(f.Kinds.K))
|
||||
for i, k := range f.Kinds.K {
|
||||
kinds[i] = int64(k.K)
|
||||
|
||||
314
pkg/neo4j/query_events_test.go
Normal file
314
pkg/neo4j/query_events_test.go
Normal file
@@ -0,0 +1,314 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
)
|
||||
|
||||
// Valid test pubkeys and event IDs (64-character lowercase hex)
|
||||
const (
|
||||
validPubkey1 = "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
validPubkey2 = "0000000000000000000000000000000000000000000000000000000000000002"
|
||||
validPubkey3 = "0000000000000000000000000000000000000000000000000000000000000003"
|
||||
validEventID1 = "1111111111111111111111111111111111111111111111111111111111111111"
|
||||
validEventID2 = "2222222222222222222222222222222222222222222222222222222222222222"
|
||||
validEventID3 = "3333333333333333333333333333333333333333333333333333333333333333"
|
||||
)
|
||||
|
||||
// TestQueryEventsWithNilFilter tests that QueryEvents handles nil filter fields gracefully
|
||||
// This test covers the nil pointer fix in query-events.go
|
||||
func TestQueryEventsWithNilFilter(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Setup some test events
|
||||
setupTestEvent(t, validEventID1, validPubkey1, 1, "[]")
|
||||
setupTestEvent(t, validEventID2, validPubkey2, 1, "[]")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Test 1: Completely empty filter (all nil fields)
|
||||
t.Run("EmptyFilter", func(t *testing.T) {
|
||||
f := &filter.F{}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with empty filter should not panic: %v", err)
|
||||
}
|
||||
if len(events) == 0 {
|
||||
t.Error("Expected to find events with empty filter")
|
||||
}
|
||||
})
|
||||
|
||||
// Test 2: Filter with nil Ids
|
||||
t.Run("NilIds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: nil, // Explicitly nil
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with nil Ids should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 3: Filter with nil Authors
|
||||
t.Run("NilAuthors", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Authors: nil, // Explicitly nil
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with nil Authors should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 4: Filter with nil Kinds
|
||||
t.Run("NilKinds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: nil, // Explicitly nil
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with nil Kinds should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Filter with empty Ids slice
|
||||
t.Run("EmptyIds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: &tag.S{T: [][]byte{}},
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with empty Ids should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 6: Filter with empty Authors slice
|
||||
t.Run("EmptyAuthors", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Authors: &tag.S{T: [][]byte{}},
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with empty Authors should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 7: Filter with empty Kinds slice
|
||||
t.Run("EmptyKinds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: &kind.S{K: []*kind.T{}},
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with empty Kinds should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestQueryEventsWithValidFilters tests that QueryEvents works correctly with valid filters
|
||||
func TestQueryEventsWithValidFilters(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Setup test events
|
||||
setupTestEvent(t, validEventID1, validPubkey1, 1, "[]")
|
||||
setupTestEvent(t, validEventID2, validPubkey2, 3, "[]")
|
||||
setupTestEvent(t, validEventID3, validPubkey1, 1, "[]")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Test 1: Filter by ID
|
||||
t.Run("FilterByID", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: tag.NewFromBytesSlice([]byte(validEventID1)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Errorf("Expected 1 event, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test 2: Filter by Author
|
||||
t.Run("FilterByAuthor", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice([]byte(validPubkey1)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 2 {
|
||||
t.Errorf("Expected 2 events from pubkey1, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test 3: Filter by Kind
|
||||
t.Run("FilterByKind", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 2 {
|
||||
t.Errorf("Expected 2 kind-1 events, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test 4: Combined filters (kind + author)
|
||||
t.Run("FilterByKindAndAuthor", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Authors: tag.NewFromBytesSlice([]byte(validPubkey1)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 2 {
|
||||
t.Errorf("Expected 2 kind-1 events from pubkey1, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Filter with limit
|
||||
t.Run("FilterWithLimit", func(t *testing.T) {
|
||||
limit := 1
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Limit: &limit,
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Errorf("Expected 1 event due to limit, got %d", len(events))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestBuildCypherQueryWithNilFields tests the buildCypherQuery function with nil fields
|
||||
func TestBuildCypherQueryWithNilFields(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Test that buildCypherQuery doesn't panic with nil fields
|
||||
t.Run("AllNilFields", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: nil,
|
||||
Authors: nil,
|
||||
Kinds: nil,
|
||||
Since: nil,
|
||||
Until: nil,
|
||||
Tags: nil,
|
||||
Limit: nil,
|
||||
}
|
||||
cypher, params := testDB.buildCypherQuery(f, false)
|
||||
if cypher == "" {
|
||||
t.Error("Expected non-empty Cypher query")
|
||||
}
|
||||
if params == nil {
|
||||
t.Error("Expected non-nil params map")
|
||||
}
|
||||
})
|
||||
|
||||
// Test with empty slices
|
||||
t.Run("EmptySlices", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: &tag.S{T: [][]byte{}},
|
||||
Authors: &tag.S{T: [][]byte{}},
|
||||
Kinds: &kind.S{K: []*kind.T{}},
|
||||
}
|
||||
cypher, params := testDB.buildCypherQuery(f, false)
|
||||
if cypher == "" {
|
||||
t.Error("Expected non-empty Cypher query")
|
||||
}
|
||||
if params == nil {
|
||||
t.Error("Expected non-nil params map")
|
||||
}
|
||||
})
|
||||
|
||||
// Test with time filters
|
||||
t.Run("TimeFilters", func(t *testing.T) {
|
||||
since := timestamp.Now()
|
||||
until := timestamp.Now()
|
||||
f := &filter.F{
|
||||
Since: &since,
|
||||
Until: &until,
|
||||
}
|
||||
cypher, params := testDB.buildCypherQuery(f, false)
|
||||
if _, ok := params["since"]; !ok {
|
||||
t.Error("Expected 'since' param")
|
||||
}
|
||||
if _, ok := params["until"]; !ok {
|
||||
t.Error("Expected 'until' param")
|
||||
}
|
||||
_ = cypher
|
||||
})
|
||||
}
|
||||
|
||||
// TestQueryEventsUppercaseHexNormalization tests that uppercase hex in filters is normalized
|
||||
func TestQueryEventsUppercaseHexNormalization(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Setup test event with lowercase pubkey (as Neo4j stores)
|
||||
lowercasePubkey := "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"
|
||||
lowercaseEventID := "fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210"
|
||||
setupTestEvent(t, lowercaseEventID, lowercasePubkey, 1, "[]")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Test query with uppercase pubkey - should be normalized and still match
|
||||
t.Run("UppercaseAuthor", func(t *testing.T) {
|
||||
uppercasePubkey := "ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789"
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice([]byte(uppercasePubkey)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Errorf("Expected to find 1 event with uppercase pubkey filter, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test query with uppercase event ID - should be normalized and still match
|
||||
t.Run("UppercaseEventID", func(t *testing.T) {
|
||||
uppercaseEventID := "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210"
|
||||
f := &filter.F{
|
||||
Ids: tag.NewFromBytesSlice([]byte(uppercaseEventID)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Errorf("Expected to find 1 event with uppercase ID filter, got %d", len(events))
|
||||
}
|
||||
})
|
||||
}
|
||||
50
pkg/neo4j/run-tests.sh
Executable file
50
pkg/neo4j/run-tests.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
# Run Neo4j integration tests with Docker
|
||||
# Usage: ./run-tests.sh
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
echo "Starting Neo4j test database..."
|
||||
docker compose up -d
|
||||
|
||||
echo "Waiting for Neo4j to be ready..."
|
||||
for i in {1..30}; do
|
||||
if docker compose exec -T neo4j-test cypher-shell -u neo4j -p testpassword "RETURN 1" > /dev/null 2>&1; then
|
||||
echo "Neo4j is ready!"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
echo "Timeout waiting for Neo4j"
|
||||
docker compose logs
|
||||
docker compose down
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting... ($i/30)"
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "Running tests..."
|
||||
echo "================="
|
||||
|
||||
# Set environment variables for tests
|
||||
export NEO4J_TEST_URI="bolt://localhost:7687"
|
||||
export NEO4J_TEST_USER="neo4j"
|
||||
export NEO4J_TEST_PASSWORD="testpassword"
|
||||
|
||||
# Run tests with verbose output
|
||||
cd ../..
|
||||
CGO_ENABLED=0 go test -v ./pkg/neo4j/... -count=1
|
||||
TEST_EXIT_CODE=$?
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
echo ""
|
||||
echo "================="
|
||||
echo "Stopping Neo4j test database..."
|
||||
docker compose down
|
||||
|
||||
exit $TEST_EXIT_CODE
|
||||
@@ -158,6 +158,14 @@ CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
// This is required because Cypher doesn't allow MATCH after CREATE without WITH
|
||||
needsWithClause := true
|
||||
|
||||
// Collect all e-tags, p-tags, and other tags first so we can generate proper Cypher
|
||||
// Neo4j requires WITH clauses between certain clause types (FOREACH -> MATCH/MERGE)
|
||||
type tagInfo struct {
|
||||
tagType string
|
||||
value string
|
||||
}
|
||||
var eTags, pTags, otherTags []tagInfo
|
||||
|
||||
// Only process tags if they exist
|
||||
if ev.Tags != nil {
|
||||
for _, tagItem := range *ev.Tags {
|
||||
@@ -168,30 +176,39 @@ CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
tagType := string(tagItem.T[0])
|
||||
|
||||
switch tagType {
|
||||
case "e": // Event reference - creates REFERENCES relationship
|
||||
// Use ExtractETagValue to handle binary encoding and normalize to lowercase hex
|
||||
case "e": // Event reference
|
||||
tagValue := ExtractETagValue(tagItem)
|
||||
if tagValue == "" {
|
||||
continue // Skip invalid e-tags
|
||||
if tagValue != "" {
|
||||
eTags = append(eTags, tagInfo{"e", tagValue})
|
||||
}
|
||||
case "p": // Pubkey mention
|
||||
tagValue := ExtractPTagValue(tagItem)
|
||||
if tagValue != "" {
|
||||
pTags = append(pTags, tagInfo{"p", tagValue})
|
||||
}
|
||||
default: // Other tags
|
||||
tagValue := string(tagItem.T[1])
|
||||
otherTags = append(otherTags, tagInfo{tagType, tagValue})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create reference to another event (if it exists)
|
||||
paramName := fmt.Sprintf("eTag_%d", eTagIndex)
|
||||
params[paramName] = tagValue
|
||||
// Generate Cypher for e-tags (OPTIONAL MATCH + FOREACH pattern)
|
||||
// These need WITH clause before first one, and WITH after all FOREACHes
|
||||
for i, tag := range eTags {
|
||||
paramName := fmt.Sprintf("eTag_%d", eTagIndex)
|
||||
params[paramName] = tag.value
|
||||
|
||||
// Add WITH clause before first OPTIONAL MATCH only
|
||||
// This is required because Cypher doesn't allow MATCH after CREATE without WITH.
|
||||
// However, you CAN chain multiple OPTIONAL MATCH + FOREACH pairs without
|
||||
// additional WITH clauses between them - Cypher allows OPTIONAL MATCH after FOREACH.
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Add WITH clause before first OPTIONAL MATCH only
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
cypher += fmt.Sprintf(`
|
||||
// Reference to event (e-tag)
|
||||
OPTIONAL MATCH (ref%d:Event {id: $%s})
|
||||
FOREACH (ignoreMe IN CASE WHEN ref%d IS NOT NULL THEN [1] ELSE [] END |
|
||||
@@ -199,47 +216,64 @@ FOREACH (ignoreMe IN CASE WHEN ref%d IS NOT NULL THEN [1] ELSE [] END |
|
||||
)
|
||||
`, eTagIndex, paramName, eTagIndex, eTagIndex)
|
||||
|
||||
eTagIndex++
|
||||
eTagIndex++
|
||||
|
||||
case "p": // Pubkey mention - creates MENTIONS relationship
|
||||
// Use ExtractPTagValue to handle binary encoding and normalize to lowercase hex
|
||||
tagValue := ExtractPTagValue(tagItem)
|
||||
if tagValue == "" {
|
||||
continue // Skip invalid p-tags
|
||||
}
|
||||
// After the last e-tag FOREACH, add WITH clause if there are p-tags or other tags
|
||||
if i == len(eTags)-1 && (len(pTags) > 0 || len(otherTags) > 0) {
|
||||
cypher += `
|
||||
// Required WITH after FOREACH before MERGE/MATCH
|
||||
WITH e, a
|
||||
`
|
||||
}
|
||||
}
|
||||
|
||||
// Create mention to another NostrUser
|
||||
paramName := fmt.Sprintf("pTag_%d", pTagIndex)
|
||||
params[paramName] = tagValue
|
||||
// Generate Cypher for p-tags (MERGE pattern)
|
||||
for _, tag := range pTags {
|
||||
paramName := fmt.Sprintf("pTag_%d", pTagIndex)
|
||||
params[paramName] = tag.value
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// If no e-tags were processed, we still need the initial WITH
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// Mention of NostrUser (p-tag)
|
||||
MERGE (mentioned%d:NostrUser {pubkey: $%s})
|
||||
ON CREATE SET mentioned%d.created_at = timestamp()
|
||||
CREATE (e)-[:MENTIONS]->(mentioned%d)
|
||||
`, pTagIndex, paramName, pTagIndex, pTagIndex)
|
||||
|
||||
pTagIndex++
|
||||
pTagIndex++
|
||||
}
|
||||
|
||||
default: // Other tags - creates Tag nodes and TAGGED_WITH relationships
|
||||
// For non-e/p tags, use direct string conversion (no binary encoding)
|
||||
tagValue := string(tagItem.T[1])
|
||||
// Generate Cypher for other tags (MERGE pattern)
|
||||
for _, tag := range otherTags {
|
||||
typeParam := fmt.Sprintf("tagType_%d", tagNodeIndex)
|
||||
valueParam := fmt.Sprintf("tagValue_%d", tagNodeIndex)
|
||||
params[typeParam] = tag.tagType
|
||||
params[valueParam] = tag.value
|
||||
|
||||
// Create tag node and relationship
|
||||
typeParam := fmt.Sprintf("tagType_%d", tagNodeIndex)
|
||||
valueParam := fmt.Sprintf("tagValue_%d", tagNodeIndex)
|
||||
params[typeParam] = tagType
|
||||
params[valueParam] = tagValue
|
||||
// If no e-tags or p-tags were processed, we still need the initial WITH
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
cypher += fmt.Sprintf(`
|
||||
// Generic tag relationship
|
||||
MERGE (tag%d:Tag {type: $%s, value: $%s})
|
||||
CREATE (e)-[:TAGGED_WITH]->(tag%d)
|
||||
`, tagNodeIndex, typeParam, valueParam, tagNodeIndex)
|
||||
|
||||
tagNodeIndex++
|
||||
}
|
||||
}
|
||||
tagNodeIndex++
|
||||
}
|
||||
|
||||
// Return the created event
|
||||
|
||||
@@ -1,15 +1,246 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
)
|
||||
|
||||
// skipIfNeo4jNotAvailable skips the test if Neo4j is not available
|
||||
func skipIfNeo4jNotAvailable(t *testing.T) {
|
||||
// Check if Neo4j connection details are provided
|
||||
uri := os.Getenv("ORLY_NEO4J_URI")
|
||||
if uri == "" {
|
||||
t.Skip("Neo4j not available (set ORLY_NEO4J_URI to enable tests)")
|
||||
// testDB is the shared database instance for tests
|
||||
var testDB *N
|
||||
|
||||
// TestMain sets up and tears down the test database
|
||||
func TestMain(m *testing.M) {
|
||||
// Skip integration tests if NEO4J_TEST_URI is not set
|
||||
neo4jURI := os.Getenv("NEO4J_TEST_URI")
|
||||
if neo4jURI == "" {
|
||||
neo4jURI = "bolt://localhost:7687"
|
||||
}
|
||||
neo4jUser := os.Getenv("NEO4J_TEST_USER")
|
||||
if neo4jUser == "" {
|
||||
neo4jUser = "neo4j"
|
||||
}
|
||||
neo4jPassword := os.Getenv("NEO4J_TEST_PASSWORD")
|
||||
if neo4jPassword == "" {
|
||||
neo4jPassword = "testpassword"
|
||||
}
|
||||
|
||||
// Try to connect to Neo4j
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cfg := &database.DatabaseConfig{
|
||||
DataDir: os.TempDir(),
|
||||
Neo4jURI: neo4jURI,
|
||||
Neo4jUser: neo4jUser,
|
||||
Neo4jPassword: neo4jPassword,
|
||||
}
|
||||
|
||||
var err error
|
||||
testDB, err = NewWithConfig(ctx, cancel, cfg)
|
||||
if err != nil {
|
||||
// If Neo4j is not available, skip integration tests
|
||||
os.Stderr.WriteString("Neo4j not available, skipping integration tests: " + err.Error() + "\n")
|
||||
os.Stderr.WriteString("Start Neo4j with: docker compose -f pkg/neo4j/docker-compose.yaml up -d\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Wait for database to be ready
|
||||
select {
|
||||
case <-testDB.Ready():
|
||||
// Database is ready
|
||||
case <-time.After(30 * time.Second):
|
||||
os.Stderr.WriteString("Timeout waiting for Neo4j to be ready\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Clean database before running tests
|
||||
cleanTestDatabase()
|
||||
|
||||
// Run tests
|
||||
code := m.Run()
|
||||
|
||||
// Clean up
|
||||
cleanTestDatabase()
|
||||
testDB.Close()
|
||||
cancel()
|
||||
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// cleanTestDatabase removes all nodes and relationships
|
||||
func cleanTestDatabase() {
|
||||
ctx := context.Background()
|
||||
// Delete all nodes and relationships
|
||||
_, _ = testDB.ExecuteWrite(ctx, "MATCH (n) DETACH DELETE n", nil)
|
||||
// Clear migration markers so migrations can run fresh
|
||||
_, _ = testDB.ExecuteWrite(ctx, "MATCH (m:Migration) DELETE m", nil)
|
||||
}
|
||||
|
||||
// setupTestEvent creates a test event directly in Neo4j for testing queries
|
||||
func setupTestEvent(t *testing.T, eventID, pubkey string, kind int64, tags string) {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `
|
||||
MERGE (a:NostrUser {pubkey: $pubkey})
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
kind: $kind,
|
||||
created_at: $createdAt,
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tags,
|
||||
expiration: 0
|
||||
})
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
`
|
||||
|
||||
params := map[string]any{
|
||||
"eventId": eventID,
|
||||
"serial": time.Now().UnixNano(),
|
||||
"kind": kind,
|
||||
"createdAt": time.Now().Unix(),
|
||||
"content": "test content",
|
||||
"sig": "0000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"pubkey": pubkey,
|
||||
"tags": tags,
|
||||
}
|
||||
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup test event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setupInvalidNostrUser creates a NostrUser with an invalid (binary) pubkey for testing migrations
|
||||
func setupInvalidNostrUser(t *testing.T, invalidPubkey string) {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `CREATE (u:NostrUser {pubkey: $pubkey, created_at: timestamp()})`
|
||||
params := map[string]any{"pubkey": invalidPubkey}
|
||||
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup invalid NostrUser: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setupInvalidEvent creates an Event with an invalid pubkey/ID for testing migrations
|
||||
func setupInvalidEvent(t *testing.T, invalidID, invalidPubkey string) {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `
|
||||
CREATE (e:Event {
|
||||
id: $id,
|
||||
pubkey: $pubkey,
|
||||
kind: 1,
|
||||
created_at: timestamp(),
|
||||
content: 'test',
|
||||
sig: 'invalid',
|
||||
tags: '[]',
|
||||
serial: $serial,
|
||||
expiration: 0
|
||||
})
|
||||
`
|
||||
params := map[string]any{
|
||||
"id": invalidID,
|
||||
"pubkey": invalidPubkey,
|
||||
"serial": time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup invalid Event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setupInvalidTag creates a Tag node with invalid value for testing migrations
|
||||
func setupInvalidTag(t *testing.T, tagType string, invalidValue string) {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `CREATE (tag:Tag {type: $type, value: $value})`
|
||||
params := map[string]any{
|
||||
"type": tagType,
|
||||
"value": invalidValue,
|
||||
}
|
||||
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup invalid Tag: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// countNodes counts nodes with a given label
|
||||
func countNodes(t *testing.T, label string) int64 {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := "MATCH (n:" + label + ") RETURN count(n) AS count"
|
||||
result, err := testDB.ExecuteRead(ctx, cypher, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count nodes: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
return count
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// countInvalidNostrUsers counts NostrUser nodes with invalid pubkeys
|
||||
func countInvalidNostrUsers(t *testing.T) int64 {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `
|
||||
MATCH (u:NostrUser)
|
||||
WHERE size(u.pubkey) <> 64
|
||||
OR NOT u.pubkey =~ '^[0-9a-f]{64}$'
|
||||
RETURN count(u) AS count
|
||||
`
|
||||
result, err := testDB.ExecuteRead(ctx, cypher, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count invalid NostrUsers: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
return count
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// countInvalidTags counts Tag nodes (e/p type) with invalid values
|
||||
func countInvalidTags(t *testing.T) int64 {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `
|
||||
MATCH (t:Tag)
|
||||
WHERE t.type IN ['e', 'p']
|
||||
AND (size(t.value) <> 64 OR NOT t.value =~ '^[0-9a-f]{64}$')
|
||||
RETURN count(t) AS count
|
||||
`
|
||||
result, err := testDB.ExecuteRead(ctx, cypher, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count invalid Tags: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
return count
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.34.0
|
||||
v0.34.1
|
||||
Reference in New Issue
Block a user