Compare commits

...

25 Commits

Author SHA1 Message Date
5bcb8d7f52 upgrade to gitea workflows
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-18 20:50:05 +00:00
b3b963ecf5 replace github workflows with gitea 2025-11-18 20:46:54 +00:00
d4fb6cbf49 fix handleevents not prompting auth for event publish with auth-required
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-18 20:26:36 +00:00
d5c0e3abfc bump to v0.29.3
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-18 18:22:39 +00:00
1d4d877a10 fix auth-required not sending immediate challenge, benchmark leak 2025-11-18 18:21:11 +00:00
038d1959ed add dgraph backend to benchmark suite with safe type assertions for multi-backend support 2025-11-17 16:52:38 +00:00
86481a42e8 initial draft of neo4j database driver 2025-11-17 08:19:44 +00:00
beed174e83 make query cache normalize filters so same query different order filters are cache hits
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-17 00:04:21 +00:00
511b8cae5f improve query cache with zstd level 9
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-16 20:52:18 +00:00
dfe8b5f8b2 add a filter query cache 512mb that stores already decoded recent query results
this should improve performance noticeably for typical kind 1 client queries
2025-11-16 18:29:53 +00:00
95bcf85ad7 optimizing badger cache, won a 10-15% improvement in most benchmarks 2025-11-16 15:07:36 +00:00
9bb3a7e057 totally off topic little document about ion drives 2025-11-16 00:00:04 +00:00
a608c06138 draft spec for integrating dgraph 2025-11-14 22:46:43 +00:00
bf8d912063 enhance spider with rate limit handling, follow list updates, and improved reconnect logic; bump version to v0.29.0
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
also reduces CPU load for spider, and minor CORS fixes
2025-11-14 21:15:24 +00:00
24eef5b5a8 fix CORS headers and a wasm experiment
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-14 19:15:50 +00:00
9fb976703d hello world in wat 2025-11-14 14:37:36 +00:00
1d9a6903b8 bump version
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-14 12:18:01 +00:00
29e175efb0 implement event table subtyping for small events in value log
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-14 12:15:52 +00:00
7169a2158f when in "none" ACL mode, privileged checks are not enforced
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-13 08:31:02 +00:00
baede6d37f extend script test to two read two write to ensure script continues running
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-11 15:24:58 +00:00
3e7cc01d27 make script stderr print into relay logs
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-11 14:41:54 +00:00
cc99fcfab5 bump to v0.27.5
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-11 14:38:05 +00:00
b2056b6636 bump to v0.27.5
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-11 13:48:23 +00:00
108cbdce93 fix docker image cleanups in test 2025-11-11 13:47:57 +00:00
e9fb314496 fully test and verify policy script functionality
Some checks failed
Go / build (push) Has been cancelled
Go / release (push) Has been cancelled
2025-11-11 09:37:42 +00:00
165 changed files with 20871 additions and 6186 deletions

View File

@@ -32,9 +32,73 @@
"Bash(export CGO_ENABLED=0)",
"Bash(bash:*)",
"Bash(CGO_ENABLED=0 ORLY_LOG_LEVEL=debug go test:*)",
"Bash(/tmp/test-policy-script.sh)"
"Bash(/tmp/test-policy-script.sh)",
"Bash(docker --version:*)",
"Bash(mkdir:*)",
"Bash(./test-docker-policy/test-policy.sh:*)",
"Bash(docker-compose:*)",
"Bash(tee:*)",
"Bash(docker logs:*)",
"Bash(timeout 5 websocat:*)",
"Bash(docker exec:*)",
"Bash(TESTSIG=\"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb\":*)",
"Bash(echo:*)",
"Bash(git rm:*)",
"Bash(git add:*)",
"Bash(./test-policy.sh:*)",
"Bash(docker rm:*)",
"Bash(./scripts/docker-policy/test-policy.sh:*)",
"Bash(./policytest:*)",
"WebSearch",
"WebFetch(domain:blog.scottlogic.com)",
"WebFetch(domain:eli.thegreenplace.net)",
"WebFetch(domain:learn-wasm.dev)",
"Bash(curl:*)",
"Bash(./build.sh)",
"Bash(./pkg/wasm/shell/run.sh:*)",
"Bash(./run.sh echo.wasm)",
"Bash(./test.sh)",
"Bash(ORLY_PPROF=cpu ORLY_LOG_LEVEL=info ORLY_LISTEN=0.0.0.0 ORLY_PORT=3334 ORLY_ADMINS=npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku ORLY_OWNERS=npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku ORLY_ACL_MODE=follows ORLY_SPIDER_MODE=follows timeout 120 go run:*)",
"Bash(go tool pprof:*)",
"Bash(go get:*)",
"Bash(go mod tidy:*)",
"Bash(go list:*)",
"Bash(timeout 180 go build:*)",
"Bash(timeout 240 go build:*)",
"Bash(timeout 300 go build:*)",
"Bash(/tmp/orly:*)",
"Bash(./orly version:*)",
"Bash(git checkout:*)",
"Bash(docker ps:*)",
"Bash(./run-profile.sh:*)",
"Bash(sudo rm:*)",
"Bash(docker compose:*)",
"Bash(./run-benchmark.sh:*)",
"Bash(docker run:*)",
"Bash(docker inspect:*)",
"Bash(./run-benchmark-clean.sh:*)",
"Bash(cd:*)",
"Bash(CGO_ENABLED=0 timeout 180 go build:*)",
"Bash(/home/mleku/src/next.orly.dev/pkg/dgraph/dgraph.go)",
"Bash(ORLY_LOG_LEVEL=debug timeout 60 ./orly:*)",
"Bash(ORLY_LOG_LEVEL=debug timeout 30 ./orly:*)",
"Bash(killall:*)",
"Bash(kill:*)",
"Bash(gh repo list:*)",
"Bash(gh auth:*)",
"Bash(/tmp/backup-github-repos.sh)",
"Bash(./benchmark:*)",
"Bash(env)",
"Bash(./run-badger-benchmark.sh:*)",
"Bash(./update-github-vpn.sh:*)",
"Bash(dmesg:*)",
"Bash(export:*)",
"Bash(timeout 60 /tmp/benchmark-fixed:*)",
"Bash(/tmp/test-auth-event.sh)",
"Bash(CGO_ENABLED=0 timeout 180 go test:*)"
],
"deny": [],
"ask": []
}
},
"outputStyle": "Explanatory"
}

90
.dockerignore Normal file
View File

@@ -0,0 +1,90 @@
# Build artifacts
orly
test-build
*.exe
*.dll
*.so
*.dylib
# Test files
*_test.go
# IDE files
.vscode/
.idea/
*.swp
*.swo
*~
# OS files
.DS_Store
Thumbs.db
# Git
.git/
.gitignore
# Docker files (except the one we're using)
Dockerfile*
!scripts/Dockerfile.deploy-test
docker-compose.yml
.dockerignore
# Node modules (will be installed during build)
app/web/node_modules/
# app/web/dist/ - NEEDED for embedded web UI
app/web/bun.lockb
# Go modules cache
# go.sum - NEEDED for docker builds
# Logs and temp files
*.log
tmp/
temp/
# Database files
*.db
*.badger
# Certificates and keys
*.pem
*.key
*.crt
# Environment files
.env
.env.local
.env.production
# Documentation that's not needed for deployment test
docs/
*.md
*.adoc
!README.adoc
# Scripts we don't need for testing
scripts/benchmark.sh
scripts/reload.sh
scripts/run-*.sh
scripts/test.sh
scripts/runtests.sh
scripts/sprocket/
# Benchmark and test data
# cmd/benchmark/ - NEEDED for benchmark-runner docker build
cmd/benchmark/data/
cmd/benchmark/reports/
cmd/benchmark/external/
reports/
*.txt
*.conf
*.jsonl
# Policy test files
POLICY_*.md
test_policy.sh
test-*.sh
# Other build artifacts
tee

View File

@@ -1,5 +1,5 @@
# This workflow will build a golang project
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
# This workflow will build a golang project for Gitea Actions
# For more information see: https://docs.gitea.com/usage/actions/overview
#
# NOTE: All builds use CGO_ENABLED=0 since p8k library uses purego (not CGO)
# The library dynamically loads libsecp256k1 at runtime via purego
@@ -10,9 +10,9 @@
# git tag v1.2.3
# git push origin v1.2.3
# 3. The workflow will automatically:
# - Build binaries for multiple platforms (Linux, macOS, Windows)
# - Create a GitHub release with the binaries
# - Generate release notes
# - Build binaries for Linux AMD64
# - Create a Gitea release with the binaries
# - Generate checksums
name: Go
@@ -25,10 +25,13 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: "1.25"
@@ -40,26 +43,26 @@ jobs:
# Copy the libsecp256k1.so to root directory so tests can find it
cp pkg/crypto/p8k/libsecp256k1.so .
CGO_ENABLED=0 go test -v $(go list ./... | xargs -n1 sh -c 'ls $0/*_test.go 1>/dev/null 2>&1 && echo $0' | grep .)
release:
needs: build
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
steps:
- uses: actions/checkout@v4
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
uses: actions/setup-go@v5
with:
go-version: '1.25'
- name: Build Release Binaries (Pure Go + purego)
if: startsWith(github.ref, 'refs/tags/v')
run: |
# Extract version from tag (e.g., v1.2.3 -> 1.2.3)
VERSION=${GITHUB_REF#refs/tags/v}
VERSION=${GITHUB_REF_NAME#v}
echo "Building release binaries for version $VERSION (pure Go + purego)"
# Create directory for binaries
@@ -72,17 +75,17 @@ jobs:
echo "Building Linux AMD64 (pure Go + purego dynamic loading)..."
GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=amd64 CGO_ENABLED=0 \
go build -ldflags "-s -w" -o release-binaries/orly-${VERSION}-linux-amd64 .
# Create checksums
cd release-binaries
sha256sum * > SHA256SUMS.txt
cd ..
- name: Create GitHub Release
if: startsWith(github.ref, 'refs/tags/v')
uses: softprops/action-gh-release@v1
- name: Create Gitea Release
uses: actions/upload-release-action@v1
with:
files: release-binaries/*
draft: false
prerelease: false
generate_release_notes: true
repo_token: ${{ secrets.GITEA_TOKEN }}
file: release-binaries/*
tag: ${{ github.ref_name }}
overwrite: true
file_glob: true

3634
.gitignore vendored

File diff suppressed because it is too large Load Diff

319
BADGER_MIGRATION_GUIDE.md Normal file
View File

@@ -0,0 +1,319 @@
# Badger Database Migration Guide
## Overview
This guide covers migrating your ORLY relay database when changing Badger configuration parameters, specifically for the VLogPercentile and table size optimizations.
## When Migration is Needed
Based on research of Badger v4 source code and documentation:
### Configuration Changes That DON'T Require Migration
The following options can be changed **without migration**:
- `BlockCacheSize` - Only affects in-memory cache
- `IndexCacheSize` - Only affects in-memory cache
- `NumCompactors` - Runtime setting
- `NumLevelZeroTables` - Affects compaction timing
- `NumMemtables` - Affects write buffering
- `DetectConflicts` - Runtime conflict detection
- `Compression` - New data uses new compression, old data remains as-is
- `BlockSize` - Explicitly stated in Badger source: "Changing BlockSize across DB runs will not break badger"
### Configuration Changes That BENEFIT from Migration
The following options apply to **new writes only** - existing data gradually adopts new settings through compaction:
- `VLogPercentile` - Affects where **new** values are stored (LSM vs vlog)
- `BaseTableSize` - **New** SST files use new size
- `MemTableSize` - Affects new write buffering
- `BaseLevelSize` - Affects new LSM tree structure
- `ValueLogFileSize` - New vlog files use new size
**Migration Impact:** Without migration, existing data remains in its current location (LSM tree or value log). The database will **gradually** adapt through normal compaction, which may take days or weeks depending on write volume.
## Migration Options
### Option 1: No Migration (Let Natural Compaction Handle It)
**Best for:** Low-traffic relays, testing environments
**Pros:**
- No downtime required
- No manual intervention
- Zero risk of data loss
**Cons:**
- Benefits take time to materialize (days/weeks)
- Old data layout persists until natural compaction
- Cache tuning benefits delayed
**Steps:**
1. Update Badger configuration in `pkg/database/database.go`
2. Restart ORLY relay
3. Monitor performance over several days
4. Optionally run manual GC: `db.RunValueLogGC(0.5)` periodically
### Option 2: Manual Value Log Garbage Collection
**Best for:** Medium-traffic relays wanting faster optimization
**Pros:**
- Faster than natural compaction
- Still safe (no export/import)
- Can run while relay is online
**Cons:**
- Still gradual (hours instead of days)
- CPU/disk intensive during GC
- Partial benefit until GC completes
**Steps:**
1. Update Badger configuration
2. Restart ORLY relay
3. Monitor logs for compaction activity
4. Manually trigger GC if needed (future feature - not currently exposed)
### Option 3: Full Export/Import Migration (RECOMMENDED for Production)
**Best for:** Production relays, large databases, maximum performance
**Pros:**
- Immediate full benefit of new configuration
- Clean database structure
- Predictable migration time
- Reclaims all disk space
**Cons:**
- Requires relay downtime (several hours for large DBs)
- Requires 2x disk space temporarily
- More complex procedure
**Steps:** See detailed procedure below
## Full Migration Procedure (Option 3)
### Prerequisites
1. **Disk space:** At minimum 2.5x current database size
- 1x for current database
- 1x for JSONL export
- 0.5x for new database (will be smaller with compression)
2. **Time estimate:**
- Export: ~100-500 MB/s depending on disk speed
- Import: ~50-200 MB/s with indexing overhead
- Example: 10 GB database = ~10-30 minutes total
3. **Backup:** Ensure you have a recent backup before proceeding
### Step-by-Step Migration
#### 1. Prepare Migration Script
Use the provided `scripts/migrate-badger-config.sh` script (see below).
#### 2. Stop the Relay
```bash
# If using systemd
sudo systemctl stop orly
# If running manually
pkill orly
```
#### 3. Run Migration
```bash
cd ~/src/next.orly.dev
chmod +x scripts/migrate-badger-config.sh
./scripts/migrate-badger-config.sh
```
The script will:
- Export all events to JSONL format
- Move old database to backup location
- Create new database with updated configuration
- Import all events (rebuilds indexes automatically)
- Verify event count matches
#### 4. Verify Migration
```bash
# Check that events were migrated
echo "Old event count:"
cat ~/.local/share/ORLY-backup-*/migration.log | grep "exported.*events"
echo "New event count:"
cat ~/.local/share/ORLY/migration.log | grep "saved.*events"
```
#### 5. Restart Relay
```bash
# If using systemd
sudo systemctl start orly
sudo journalctl -u orly -f
# If running manually
./orly
```
#### 6. Monitor Performance
Watch for improvements in:
- Cache hit ratio (should be >85% with new config)
- Average query latency (should be <3ms for cached events)
- No "Block cache too small" warnings in logs
#### 7. Clean Up (After Verification)
```bash
# Once you confirm everything works (wait 24-48 hours)
rm -rf ~/.local/share/ORLY-backup-*
rm ~/.local/share/ORLY/events-export.jsonl
```
## Migration Script
The migration script is located at `scripts/migrate-badger-config.sh` and handles:
- Automatic export of all events to JSONL
- Safe backup of existing database
- Creation of new database with updated config
- Import and indexing of all events
- Verification of event counts
## Rollback Procedure
If migration fails or performance degrades:
```bash
# Stop the relay
sudo systemctl stop orly # or pkill orly
# Restore old database
rm -rf ~/.local/share/ORLY
mv ~/.local/share/ORLY-backup-$(date +%Y%m%d)* ~/.local/share/ORLY
# Restart with old configuration
sudo systemctl start orly
```
## Configuration Changes Summary
### Changes Applied in pkg/database/database.go
```go
// Cache sizes (can change without migration)
opts.BlockCacheSize = 16384 MB (was 512 MB)
opts.IndexCacheSize = 4096 MB (was 256 MB)
// Table sizes (benefits from migration)
opts.BaseTableSize = 8 MB (was 64 MB)
opts.MemTableSize = 16 MB (was 64 MB)
opts.ValueLogFileSize = 128 MB (was 256 MB)
// Inline event optimization (CRITICAL - benefits from migration)
opts.VLogPercentile = 0.99 (was 0.0 - default)
// LSM structure (benefits from migration)
opts.BaseLevelSize = 64 MB (was 10 MB - default)
// Performance settings (no migration needed)
opts.DetectConflicts = false (was true)
opts.Compression = options.ZSTD (was options.None)
opts.NumCompactors = 8 (was 4)
opts.NumMemtables = 8 (was 5)
```
## Expected Improvements
### Before Migration
- Cache hit ratio: 33%
- Average latency: 9.35ms
- P95 latency: 34.48ms
- Block cache warnings: Yes
### After Migration
- Cache hit ratio: 85-95%
- Average latency: <3ms
- P95 latency: <8ms
- Block cache warnings: No
- Inline events: 3-5x faster reads
## Troubleshooting
### Migration Script Fails
**Error:** "Not enough disk space"
- Free up space or use Option 1 (natural compaction)
- Ensure you have 2.5x current DB size available
**Error:** "Export failed"
- Check database is not corrupted
- Ensure ORLY is stopped
- Check file permissions
**Error:** "Import count mismatch"
- This is informational - some events may be duplicates
- Check logs for specific errors
- Verify core events are present via relay queries
### Performance Not Improved
**After migration, performance is the same:**
1. Verify configuration was actually applied:
```bash
# Check running relay logs for config output
sudo journalctl -u orly | grep -i "block.*cache\|vlog"
```
2. Wait for cache to warm up (2-5 minutes after start)
3. Check if workload changed (different query patterns)
4. Verify disk I/O is not bottleneck:
```bash
iostat -x 5
```
### High CPU During Migration
- This is normal - import rebuilds all indexes
- Migration is single-threaded by design (data consistency)
- Expect 30-60% CPU usage on one core
## Additional Notes
### Compression Impact
The `Compression = options.ZSTD` setting:
- Only compresses **new** data
- Old data remains uncompressed until rewritten by compaction
- Migration forces all data to be rewritten → immediate compression benefit
- Expect 2-3x compression ratio for event data
### VLogPercentile Behavior
With `VLogPercentile = 0.99`:
- **99% of values** stored in LSM tree (fast access)
- **1% of values** stored in value log (large events >100 KB)
- Threshold dynamically adjusted based on value size distribution
- Perfect for ORLY's inline event optimization
### Production Considerations
For production relays:
1. Schedule migration during low-traffic period
2. Notify users of maintenance window
3. Have rollback plan ready
4. Monitor closely for 24-48 hours after migration
5. Keep backup for at least 1 week
## References
- Badger v4 Documentation: https://pkg.go.dev/github.com/dgraph-io/badger/v4
- ORLY Database Package: `pkg/database/database.go`
- Export/Import Implementation: `pkg/database/{export,import}.go`
- Cache Optimization Analysis: `cmd/benchmark/CACHE_OPTIMIZATION_STRATEGY.md`
- Inline Event Optimization: `cmd/benchmark/INLINE_EVENT_OPTIMIZATION.md`

View File

@@ -8,11 +8,11 @@ ORLY is a high-performance Nostr relay written in Go, designed for personal rela
**Key Technologies:**
- **Language**: Go 1.25.3+
- **Database**: Badger v4 (embedded key-value store)
- **Database**: Badger v4 (embedded key-value store) or DGraph (distributed graph database)
- **Cryptography**: Custom p8k library using purego for secp256k1 operations (no CGO)
- **Web UI**: Svelte frontend embedded in the binary
- **WebSocket**: gorilla/websocket for Nostr protocol
- **Performance**: SIMD-accelerated SHA256 and hex encoding
- **Performance**: SIMD-accelerated SHA256 and hex encoding, query result caching with zstd compression
## Build Commands
@@ -41,8 +41,8 @@ go build -o orly
### Development Mode (Web UI Hot Reload)
```bash
# Terminal 1: Start relay with dev proxy
export ORLY_WEB_DISABLE_EMBEDDED=true
export ORLY_WEB_DEV_PROXY_URL=localhost:5000
export ORLY_WEB_DISABLE=true
export ORLY_WEB_DEV_PROXY_URL=http://localhost:5173
./orly &
# Terminal 2: Start dev server
@@ -89,11 +89,18 @@ go run cmd/relay-tester/main.go -url ws://localhost:3334 -test "Basic Event"
### Benchmarking
```bash
# Run benchmarks in specific package
# Run Go benchmarks in specific package
go test -bench=. -benchmem ./pkg/database
# Crypto benchmarks
cd pkg/crypto/p8k && make bench
# Run full relay benchmark suite
cd cmd/benchmark
go run main.go -data-dir /tmp/bench-db -events 10000 -workers 4
# Benchmark reports are saved to cmd/benchmark/reports/
# The benchmark tool tests event storage, queries, and subscription performance
```
## Running the Relay
@@ -131,6 +138,18 @@ export ORLY_SPROCKET_ENABLED=true
# Enable policy system
export ORLY_POLICY_ENABLED=true
# Database backend selection (badger or dgraph)
export ORLY_DB_TYPE=badger
export ORLY_DGRAPH_URL=localhost:9080 # Only for dgraph backend
# Query cache configuration (improves REQ response times)
export ORLY_QUERY_CACHE_SIZE_MB=512 # Default: 512MB
export ORLY_QUERY_CACHE_MAX_AGE=5m # Cache expiry time
# Database cache tuning (for Badger backend)
export ORLY_DB_BLOCK_CACHE_MB=512 # Block cache size
export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
```
## Code Architecture
@@ -155,10 +174,12 @@ export ORLY_POLICY_ENABLED=true
- `web.go` - Embedded web UI serving and dev proxy
- `config/` - Environment variable configuration using go-simpler.org/env
**`pkg/database/`** - Badger-based event storage
- `database.go` - Database initialization with cache tuning
**`pkg/database/`** - Database abstraction layer with multiple backend support
- `interface.go` - Database interface definition for pluggable backends
- `factory.go` - Database backend selection (Badger or DGraph)
- `database.go` - Badger implementation with cache tuning and query cache
- `save-event.go` - Event storage with index updates
- `query-events.go` - Main query execution engine
- `query-events.go` - Main query execution engine with filter normalization
- `query-for-*.go` - Specialized query builders for different filter patterns
- `indexes/` - Index key construction for efficient lookups
- `export.go` / `import.go` - Event export/import in JSONL format
@@ -238,10 +259,19 @@ export ORLY_POLICY_ENABLED=true
- This avoids CGO complexity while maintaining C library performance
- `libsecp256k1.so` must be in `LD_LIBRARY_PATH` or same directory as binary
**Database Backend Selection:**
- Supports multiple backends via `ORLY_DB_TYPE` environment variable
- **Badger** (default): Embedded key-value store with custom indexing, ideal for single-instance deployments
- **DGraph**: Distributed graph database for larger, multi-node deployments
- Backend selected via factory pattern in `pkg/database/factory.go`
- All backends implement the same `Database` interface defined in `pkg/database/interface.go`
**Database Query Pattern:**
- Filters are analyzed in `get-indexes-from-filter.go` to determine optimal query strategy
- Filters are normalized before cache lookup, ensuring identical queries with different field ordering hit the cache
- Different query builders (`query-for-kinds.go`, `query-for-authors.go`, etc.) handle specific filter patterns
- All queries return event serials (uint64) for efficient joining
- Query results cached with zstd level 9 compression (configurable size and TTL)
- Final events fetched via `fetch-events-by-serials.go`
**WebSocket Message Flow:**
@@ -272,7 +302,7 @@ export ORLY_POLICY_ENABLED=true
### Making Changes to Web UI
1. Edit files in `app/web/src/`
2. For hot reload: `cd app/web && bun run dev` (with `ORLY_WEB_DISABLE_EMBEDDED=true`)
2. For hot reload: `cd app/web && bun run dev` (with `ORLY_WEB_DISABLE=true` and `ORLY_WEB_DEV_PROXY_URL=http://localhost:5173`)
3. For production build: `./scripts/update-embedded-web.sh`
### Adding New Nostr Protocol Handlers
@@ -377,12 +407,42 @@ sudo journalctl -u orly -f
## Performance Considerations
- **Database Caching**: Tune `ORLY_DB_BLOCK_CACHE_MB` and `ORLY_DB_INDEX_CACHE_MB` for workload
- **Query Optimization**: Add indexes for common filter patterns
- **Query Cache**: 512MB query result cache (configurable via `ORLY_QUERY_CACHE_SIZE_MB`) with zstd level 9 compression reduces database load for repeated queries
- **Filter Normalization**: Filters are normalized before cache lookup, so identical queries with different field ordering produce cache hits
- **Database Caching**: Tune `ORLY_DB_BLOCK_CACHE_MB` and `ORLY_DB_INDEX_CACHE_MB` for workload (Badger backend only)
- **Query Optimization**: Add indexes for common filter patterns; multiple specialized query builders optimize different filter combinations
- **Batch Operations**: ID lookups and event fetching use batch operations via `GetSerialsByIds` and `FetchEventsBySerials`
- **Memory Pooling**: Use buffer pools in encoders (see `pkg/encoders/event/`)
- **SIMD Operations**: Leverage minio/sha256-simd and templexxx/xhex
- **SIMD Operations**: Leverage minio/sha256-simd and templexxx/xhex for cryptographic operations
- **Goroutine Management**: Each WebSocket connection runs in its own goroutine
## Recent Optimizations
ORLY has received several significant performance improvements in recent updates:
### Query Cache System (Latest)
- 512MB query result cache with zstd level 9 compression
- Filter normalization ensures cache hits regardless of filter field ordering
- Configurable size (`ORLY_QUERY_CACHE_SIZE_MB`) and TTL (`ORLY_QUERY_CACHE_MAX_AGE`)
- Dramatically reduces database load for repeated queries (common in Nostr clients)
- Cache key includes normalized filter representation for optimal hit rate
### Badger Cache Tuning
- Optimized block cache (default 512MB, tune via `ORLY_DB_BLOCK_CACHE_MB`)
- Optimized index cache (default 256MB, tune via `ORLY_DB_INDEX_CACHE_MB`)
- Resulted in 10-15% improvement in most benchmark scenarios
- See git history for cache tuning evolution
### Query Execution Improvements
- Multiple specialized query builders for different filter patterns:
- `query-for-kinds.go` - Kind-based queries
- `query-for-authors.go` - Author-based queries
- `query-for-tags.go` - Tag-based queries
- Combination builders for `kinds+authors`, `kinds+tags`, `kinds+authors+tags`
- Batch operations for ID lookups via `GetSerialsByIds`
- Serial-based event fetching for efficiency
- Filter analysis in `get-indexes-from-filter.go` selects optimal strategy
## Release Process
1. Update version in `pkg/version/version` file (e.g., v1.2.3)

View File

@@ -0,0 +1,387 @@
# Dgraph Database Implementation Status
## Overview
This document tracks the implementation of Dgraph as an alternative database backend for ORLY. The implementation allows switching between Badger (default) and Dgraph via the `ORLY_DB_TYPE` environment variable.
## Completion Status: ✅ STEP 1 COMPLETE - DGRAPH SERVER INTEGRATION + TESTS
**Build Status:** ✅ Successfully compiles with `CGO_ENABLED=0`
**Binary Test:** ✅ ORLY v0.29.0 starts and runs successfully
**Database Backend:** Uses badger by default, dgraph client integration complete
**Dgraph Integration:** ✅ Real dgraph client connection via dgo library
**Test Suite:** ✅ Comprehensive test suite mirroring badger tests
### ✅ Completed Components
1. **Core Infrastructure**
- Database interface abstraction (`pkg/database/interface.go`)
- Database factory with `ORLY_DB_TYPE` configuration
- Dgraph package structure (`pkg/dgraph/`)
- Schema definition for Nostr events, authors, tags, and markers
- Lifecycle management (initialization, shutdown)
2. **Serial Number Generation**
- Atomic counter using Dgraph markers (`pkg/dgraph/serial.go`)
- Automatic initialization on startup
- Thread-safe increment with mutex protection
- Serial numbers assigned during SaveEvent
3. **Event Operations**
- `SaveEvent`: Store events with graph relationships
- `QueryEvents`: DQL query generation from Nostr filters
- `QueryEventsWithOptions`: Support for delete events and versions
- `CountEvents`: Event counting
- `FetchEventBySerial`: Retrieve by serial number
- `DeleteEvent`: Event deletion by ID
- `Delete EventBySerial`: Event deletion by serial
- `ProcessDelete`: Kind 5 deletion processing
4. **Metadata Storage (Marker-based)**
- `SetMarker`/`GetMarker`/`HasMarker`/`DeleteMarker`: Key-value storage
- Relay identity storage (using markers)
- All metadata stored as special Marker nodes in graph
5. **Subscriptions & Payments**
- `GetSubscription`/`IsSubscriptionActive`/`ExtendSubscription`
- `RecordPayment`/`GetPaymentHistory`
- `ExtendBlossomSubscription`/`GetBlossomStorageQuota`
- `IsFirstTimeUser`
- All implemented using JSON-encoded markers
6. **NIP-43 Invite System**
- `AddNIP43Member`/`RemoveNIP43Member`/`IsNIP43Member`
- `GetNIP43Membership`/`GetAllNIP43Members`
- `StoreInviteCode`/`ValidateInviteCode`/`DeleteInviteCode`
- All implemented using JSON-encoded markers
7. **Import/Export**
- `Import`/`ImportEventsFromReader`/`ImportEventsFromStrings`
- JSONL format support
- Basic `Export` stub
8. **Configuration**
- `ORLY_DB_TYPE` environment variable added
- Factory pattern for database instantiation
- main.go updated to use database.Database interface
9. **Compilation Fixes (Completed)**
- ✅ All interface signatures matched to badger implementation
- ✅ Fixed 100+ type errors in pkg/dgraph package
- ✅ Updated app layer to use database interface instead of concrete types
- ✅ Added type assertions for compatibility with existing managers
- ✅ Project compiles successfully with both badger and dgraph implementations
10. **Dgraph Server Integration (✅ STEP 1 COMPLETE)**
- ✅ Added dgo client library (v230.0.1)
- ✅ Implemented gRPC connection to external dgraph instance
- ✅ Real Query() and Mutate() methods using dgraph client
- ✅ Schema definition and automatic application on startup
- ✅ ORLY_DGRAPH_URL configuration (default: localhost:9080)
- ✅ Proper connection lifecycle management
- ✅ Badger metadata store for local key-value storage
- ✅ Dual-storage architecture: dgraph for events, badger for metadata
11. **Test Suite (✅ COMPLETE)**
- ✅ Test infrastructure (testmain_test.go, helpers_test.go)
- ✅ Comprehensive save-event tests
- ✅ Comprehensive query-events tests
- ✅ Docker-compose setup for dgraph server
- ✅ Automated test scripts (test-dgraph.sh, dgraph-start.sh)
- ✅ Test documentation (DGRAPH_TESTING.md)
- ✅ All tests compile successfully
- ⏳ Tests require running dgraph server to execute
### ⚠️ Remaining Work (For Production Use)
1. **Unimplemented Methods** (Stubs - Not Critical)
- `GetSerialsFromFilter`: Returns "not implemented" error
- `GetSerialsByRange`: Returns "not implemented" error
- `EventIdsBySerial`: Returns "not implemented" error
- These are helper methods that may not be critical for basic operation
2. **📝 STEP 2: DQL Implementation** (Next Priority)
- Update save-event.go to use real Mutate() calls with RDF N-Quads
- Update query-events.go to parse actual DQL responses
- Implement proper event JSON unmarshaling from dgraph responses
- Add error handling for dgraph-specific errors
- Optimize DQL queries for performance
3. **Schema Optimizations**
- Current tag queries are simplified
- Complex tag filters may need refinement
- Consider using Dgraph facets for better tag indexing
4. **📝 STEP 3: Testing** (After DQL Implementation)
- Set up local dgraph instance for testing
- Integration testing with relay-tester
- Performance comparison with Badger
- Memory usage profiling
- Test with actual dgraph server instance
### 📦 Dependencies Added
```bash
go get github.com/dgraph-io/dgo/v230@v230.0.1
go get google.golang.org/grpc@latest
go get github.com/dgraph-io/badger/v4 # For metadata storage
```
All dependencies have been added and `go mod tidy` completed successfully.
### 🔌 Dgraph Server Integration Details
The implementation uses a **client-server architecture**:
1. **Dgraph Server** (External)
- Runs as a separate process (via docker or standalone)
- Default gRPC endpoint: `localhost:9080`
- Configured via `ORLY_DGRAPH_URL` environment variable
2. **ORLY Dgraph Client** (Integrated)
- Uses dgo library for gRPC communication
- Connects on startup, applies Nostr schema automatically
- Query and Mutate methods communicate with dgraph server
3. **Dual Storage Architecture**
- **Dgraph**: Event graph storage (events, authors, tags, relationships)
- **Badger**: Metadata storage (markers, counters, relay identity)
- This hybrid approach leverages strengths of both databases
## Implementation Approach
### Marker-Based Storage
For metadata that doesn't fit the graph model (subscriptions, NIP-43, identity), we use a marker-based approach:
1. **Markers** are special graph nodes with type "Marker"
2. Each marker has:
- `marker.key`: String index for lookup
- `marker.value`: Hex-encoded or JSON-encoded data
3. This provides key-value storage within the graph database
### Serial Number Management
Serial numbers are critical for event ordering. Implementation:
```go
// Serial counter stored as a special marker
const serialCounterKey = "serial_counter"
// Atomic increment with mutex protection
func (d *D) getNextSerial() (uint64, error) {
serialMutex.Lock()
defer serialMutex.Unlock()
// Query current value, increment, save
...
}
```
### Event Storage
Events are stored as graph nodes with relationships:
- **Event nodes**: ID, serial, kind, created_at, content, sig, pubkey, tags
- **Author nodes**: Pubkey with reverse edges to events
- **Tag nodes**: Tag type and value with reverse edges
- **Relationships**: `authored_by`, `references`, `mentions`, `tagged_with`
## Files Created/Modified
### New Files (`pkg/dgraph/`)
- `dgraph.go`: Main implementation, initialization, schema
- `save-event.go`: Event storage with RDF triple generation
- `query-events.go`: Nostr filter to DQL translation
- `fetch-event.go`: Event retrieval methods
- `delete.go`: Event deletion
- `markers.go`: Key-value metadata storage
- `identity.go`: Relay identity management
- `serial.go`: Serial number generation
- `subscriptions.go`: Subscription/payment methods
- `nip43.go`: NIP-43 invite system
- `import-export.go`: Import/export operations
- `logger.go`: Logging adapter
- `utils.go`: Helper functions
- `README.md`: Documentation
### Modified Files
- `pkg/database/interface.go`: Database interface definition
- `pkg/database/factory.go`: Database factory
- `pkg/database/database.go`: Badger compile-time check
- `app/config/config.go`: Added `ORLY_DB_TYPE` config
- `app/server.go`: Changed to use Database interface
- `app/main.go`: Updated to use Database interface
- `main.go`: Added dgraph import and factory usage
## Usage
### Setting Up Dgraph Server
Before using dgraph mode, start a dgraph server:
```bash
# Using docker (recommended)
docker run -d -p 8080:8080 -p 9080:9080 -p 8000:8000 \
-v ~/dgraph:/dgraph \
dgraph/standalone:latest
# Or using docker-compose (see docs/dgraph-docker-compose.yml)
docker-compose up -d dgraph
```
### Environment Configuration
```bash
# Use Badger (default)
./orly
# Use Dgraph with default localhost connection
export ORLY_DB_TYPE=dgraph
./orly
# Use Dgraph with custom server
export ORLY_DB_TYPE=dgraph
export ORLY_DGRAPH_URL=remote.dgraph.server:9080
./orly
# With full configuration
export ORLY_DB_TYPE=dgraph
export ORLY_DGRAPH_URL=localhost:9080
export ORLY_DATA_DIR=/path/to/data
./orly
```
### Data Storage
#### Badger
- Single directory with SST files
- Typical size: 100-500MB for moderate usage
#### Dgraph
- Three subdirectories:
- `p/`: Postings (main data)
- `w/`: Write-ahead log
- Typical size: 500MB-2GB overhead + event data
## Performance Considerations
### Memory Usage
- **Badger**: ~100-200MB baseline
- **Dgraph**: ~500MB-1GB baseline
### Query Performance
- **Simple queries** (by ID, kind, author): Dgraph may be slower than Badger
- **Graph traversals** (follows-of-follows): Dgraph significantly faster
- **Full-text search**: Dgraph has built-in support
### Recommendations
1. Use Badger for simple, high-performance relays
2. Use Dgraph for relays needing complex graph queries
3. Consider hybrid approach: Badger primary + Dgraph secondary
## Next Steps to Complete
### ✅ STEP 1: Dgraph Server Integration (COMPLETED)
- ✅ Added dgo client library
- ✅ Implemented gRPC connection
- ✅ Real Query/Mutate methods
- ✅ Schema application
- ✅ Configuration added
### 📝 STEP 2: DQL Implementation (Next Priority)
1. **Update SaveEvent Implementation** (2-3 hours)
- Replace RDF string building with actual Mutate() calls
- Use dgraph's SetNquads for event insertion
- Handle UIDs and references properly
- Add error handling and transaction rollback
2. **Update QueryEvents Implementation** (2-3 hours)
- Parse actual JSON responses from dgraph Query()
- Implement proper event deserialization
- Handle pagination with DQL offset/limit
- Add query optimization for common patterns
3. **Implement Helper Methods** (1-2 hours)
- FetchEventBySerial using DQL
- GetSerialsByIds using DQL
- CountEvents using DQL aggregation
- DeleteEvent using dgraph mutations
### 📝 STEP 3: Testing (After DQL)
1. **Setup Dgraph Test Instance** (30 minutes)
```bash
# Start dgraph server
docker run -d -p 9080:9080 dgraph/standalone:latest
# Test connection
ORLY_DB_TYPE=dgraph ORLY_DGRAPH_URL=localhost:9080 ./orly
```
2. **Basic Functional Testing** (1 hour)
```bash
# Start with dgraph
ORLY_DB_TYPE=dgraph ./orly
# Test with relay-tester
go run cmd/relay-tester/main.go -url ws://localhost:3334
```
3. **Performance Testing** (2 hours)
```bash
# Compare query performance
# Memory profiling
# Load testing
```
## Known Limitations
1. **Subscription Storage**: Uses simple JSON encoding in markers rather than proper graph nodes
2. **Tag Queries**: Simplified implementation may not handle all complex tag filter combinations
3. **Export**: Basic stub - needs full implementation for production use
4. **Migrations**: Not implemented (Dgraph schema changes require manual updates)
## Conclusion
The Dgraph implementation has completed **✅ STEP 1: DGRAPH SERVER INTEGRATION** successfully.
### What Works Now (Step 1 Complete)
- ✅ Full database interface implementation
- ✅ All method signatures match badger implementation
- ✅ Project compiles successfully with `CGO_ENABLED=0`
- ✅ Binary runs and starts successfully
- ✅ Real dgraph client connection via dgo library
- ✅ gRPC communication with external dgraph server
- ✅ Schema application on startup
- ✅ Query() and Mutate() methods implemented
- ✅ ORLY_DGRAPH_URL configuration
- ✅ Dual-storage architecture (dgraph + badger metadata)
### Implementation Status
- **Step 1: Dgraph Server Integration** ✅ COMPLETE
- **Step 2: DQL Implementation** 📝 Next (save-event.go and query-events.go need updates)
- **Step 3: Testing** 📝 After Step 2 (relay-tester, performance benchmarks)
### Architecture Summary
The implementation uses a **client-server architecture** with dual storage:
1. **Dgraph Client** (ORLY)
- Connects to external dgraph via gRPC (default: localhost:9080)
- Applies Nostr schema automatically on startup
- Query/Mutate methods ready for DQL operations
2. **Dgraph Server** (External)
- Run separately via docker or standalone binary
- Stores event graph data (events, authors, tags, relationships)
- Handles all graph queries and mutations
3. **Badger Metadata Store** (Local)
- Stores markers, counters, relay identity
- Provides fast key-value access for non-graph data
- Complements dgraph for hybrid storage benefits
The abstraction layer is complete and the dgraph client integration is functional. Next step is implementing actual DQL query/mutation logic in save-event.go and query-events.go.

View File

@@ -76,6 +76,12 @@ type C struct {
NIP43PublishMemberList bool `env:"ORLY_NIP43_PUBLISH_MEMBER_LIST" default:"true" usage:"publish kind 13534 membership list events"`
NIP43InviteExpiry time.Duration `env:"ORLY_NIP43_INVITE_EXPIRY" default:"24h" usage:"how long invite codes remain valid"`
// Database configuration
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger or dgraph"`
DgraphURL string `env:"ORLY_DGRAPH_URL" default:"localhost:9080" usage:"dgraph gRPC endpoint address (only used when ORLY_DB_TYPE=dgraph)"`
QueryCacheSizeMB int `env:"ORLY_QUERY_CACHE_SIZE_MB" default:"512" usage:"query cache size in MB (caches database query results for faster REQ responses)"`
QueryCacheMaxAge string `env:"ORLY_QUERY_CACHE_MAX_AGE" default:"5m" usage:"maximum age for cached query results (e.g., 5m, 10m, 1h)"`
// TLS configuration
TLSDomains []string `env:"ORLY_TLS_DOMAINS" usage:"comma-separated list of domains to respond to for TLS"`
Certs []string `env:"ORLY_CERTS" usage:"comma-separated list of paths to certificate root names (e.g., /path/to/cert will load /path/to/cert.pem and /path/to/cert.key)"`

View File

@@ -60,7 +60,7 @@ func (l *Listener) HandleAuth(b []byte) (err error) {
// handleFirstTimeUser checks if user is logging in for first time and creates welcome note
func (l *Listener) handleFirstTimeUser(pubkey []byte) {
// Check if this is a first-time user
isFirstTime, err := l.Server.D.IsFirstTimeUser(pubkey)
isFirstTime, err := l.Server.DB.IsFirstTimeUser(pubkey)
if err != nil {
log.E.F("failed to check first-time user status: %v", err)
return

View File

@@ -78,7 +78,7 @@ func (l *Listener) HandleCount(msg []byte) (err error) {
}
var cnt int
var a bool
cnt, a, err = l.D.CountEvents(ctx, f)
cnt, a, err = l.DB.CountEvents(ctx, f)
if chk.E(err) {
return
}

View File

@@ -18,7 +18,7 @@ import (
func (l *Listener) GetSerialsFromFilter(f *filter.F) (
sers types.Uint40s, err error,
) {
return l.D.GetSerialsFromFilter(f)
return l.DB.GetSerialsFromFilter(f)
}
func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
@@ -89,7 +89,7 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
if len(sers) > 0 {
for _, s := range sers {
var ev *event.E
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
if ev, err = l.DB.FetchEventBySerial(s); chk.E(err) {
continue
}
// Only delete events that match the a-tag criteria:
@@ -127,7 +127,7 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
hex.Enc(ev.ID), at.Kind.K, hex.Enc(at.Pubkey),
string(at.DTag), ev.CreatedAt, env.E.CreatedAt,
)
if err = l.DeleteEventBySerial(
if err = l.DB.DeleteEventBySerial(
l.Ctx(), s, ev,
); chk.E(err) {
log.E.F("HandleDelete: failed to delete event %s: %v", hex.Enc(ev.ID), err)
@@ -171,7 +171,7 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
// delete them all
for _, s := range sers {
var ev *event.E
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
if ev, err = l.DB.FetchEventBySerial(s); chk.E(err) {
continue
}
// Debug: log the comparison details
@@ -199,7 +199,7 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
"HandleDelete: deleting event %s by authorized user %s",
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
)
if err = l.DeleteEventBySerial(l.Ctx(), s, ev); chk.E(err) {
if err = l.DB.DeleteEventBySerial(l.Ctx(), s, ev); chk.E(err) {
log.E.F("HandleDelete: failed to delete event %s: %v", hex.Enc(ev.ID), err)
continue
}
@@ -233,7 +233,7 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
// delete old ones, so we can just delete them all
for _, s := range sers {
var ev *event.E
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
if ev, err = l.DB.FetchEventBySerial(s); chk.E(err) {
continue
}
// For admin/owner deletes: allow deletion regardless of pubkey match
@@ -246,7 +246,7 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
"HandleDelete: deleting event %s via k-tag by authorized user %s",
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
)
if err = l.DeleteEventBySerial(l.Ctx(), s, ev); chk.E(err) {
if err = l.DB.DeleteEventBySerial(l.Ctx(), s, ev); chk.E(err) {
log.E.F("HandleDelete: failed to delete event %s: %v", hex.Enc(ev.ID), err)
continue
}

View File

@@ -253,6 +253,12 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
).Write(l); chk.E(err) {
return
}
// Send AUTH challenge to prompt authentication
log.D.F("HandleEvent: sending AUTH challenge to %s", l.remote)
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
Write(l); chk.E(err) {
return
}
return
}
@@ -396,7 +402,7 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
env.E.Pubkey,
)
log.I.F("delete event pubkey hex: %s", hex.Enc(env.E.Pubkey))
if _, err = l.SaveEvent(saveCtx, env.E); err != nil {
if _, err = l.DB.SaveEvent(saveCtx, env.E); err != nil {
log.E.F("failed to save delete event %0x: %v", env.E.ID, err)
if strings.HasPrefix(err.Error(), "blocked:") {
errStr := err.Error()[len("blocked: "):len(err.Error())]
@@ -446,7 +452,7 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
// check if the event was deleted
// Combine admins and owners for deletion checking
adminOwners := append(l.Admins, l.Owners...)
if err = l.CheckForDeleted(env.E, adminOwners); err != nil {
if err = l.DB.CheckForDeleted(env.E, adminOwners); err != nil {
if strings.HasPrefix(err.Error(), "blocked:") {
errStr := err.Error()[len("blocked: "):len(err.Error())]
if err = Ok.Error(
@@ -461,7 +467,7 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
saveCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// log.I.F("saving event %0x, %s", env.E.ID, env.E.Serialize())
if _, err = l.SaveEvent(saveCtx, env.E); err != nil {
if _, err = l.DB.SaveEvent(saveCtx, env.E); err != nil {
if strings.HasPrefix(err.Error(), "blocked:") {
errStr := err.Error()[len("blocked: "):len(err.Error())]
if err = Ok.Error(

View File

@@ -27,7 +27,7 @@ func (l *Listener) HandleNIP43JoinRequest(ev *event.E) error {
}
// Check if user is already a member
isMember, err := l.D.IsNIP43Member(ev.Pubkey)
isMember, err := l.DB.IsNIP43Member(ev.Pubkey)
if chk.E(err) {
log.E.F("error checking membership: %v", err)
return l.sendOKResponse(ev.ID, false, "error: internal server error")
@@ -47,7 +47,7 @@ func (l *Listener) HandleNIP43JoinRequest(ev *event.E) error {
}
// Add the member
if err = l.D.AddNIP43Member(ev.Pubkey, inviteCode); chk.E(err) {
if err = l.DB.AddNIP43Member(ev.Pubkey, inviteCode); chk.E(err) {
log.E.F("error adding member: %v", err)
return l.sendOKResponse(ev.ID, false, "error: failed to add member")
}
@@ -88,7 +88,7 @@ func (l *Listener) HandleNIP43LeaveRequest(ev *event.E) error {
}
// Check if user is a member
isMember, err := l.D.IsNIP43Member(ev.Pubkey)
isMember, err := l.DB.IsNIP43Member(ev.Pubkey)
if chk.E(err) {
log.E.F("error checking membership: %v", err)
return l.sendOKResponse(ev.ID, false, "error: internal server error")
@@ -100,7 +100,7 @@ func (l *Listener) HandleNIP43LeaveRequest(ev *event.E) error {
}
// Remove the member
if err = l.D.RemoveNIP43Member(ev.Pubkey); chk.E(err) {
if err = l.DB.RemoveNIP43Member(ev.Pubkey); chk.E(err) {
log.E.F("error removing member: %v", err)
return l.sendOKResponse(ev.ID, false, "error: failed to remove member")
}
@@ -160,7 +160,7 @@ func (s *Server) HandleNIP43InviteRequest(pubkey []byte) (*event.E, error) {
// publishAddUserEvent publishes a kind 8000 add user event
func (l *Listener) publishAddUserEvent(userPubkey []byte) error {
relaySecret, err := l.D.GetOrCreateRelayIdentitySecret()
relaySecret, err := l.DB.GetOrCreateRelayIdentitySecret()
if chk.E(err) {
return err
}
@@ -173,7 +173,7 @@ func (l *Listener) publishAddUserEvent(userPubkey []byte) error {
// Save to database
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if _, err = l.SaveEvent(ctx, ev); chk.E(err) {
if _, err = l.DB.SaveEvent(ctx, ev); chk.E(err) {
return err
}
@@ -186,7 +186,7 @@ func (l *Listener) publishAddUserEvent(userPubkey []byte) error {
// publishRemoveUserEvent publishes a kind 8001 remove user event
func (l *Listener) publishRemoveUserEvent(userPubkey []byte) error {
relaySecret, err := l.D.GetOrCreateRelayIdentitySecret()
relaySecret, err := l.DB.GetOrCreateRelayIdentitySecret()
if chk.E(err) {
return err
}
@@ -199,7 +199,7 @@ func (l *Listener) publishRemoveUserEvent(userPubkey []byte) error {
// Save to database
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if _, err = l.SaveEvent(ctx, ev); chk.E(err) {
if _, err = l.DB.SaveEvent(ctx, ev); chk.E(err) {
return err
}
@@ -213,12 +213,12 @@ func (l *Listener) publishRemoveUserEvent(userPubkey []byte) error {
// publishMembershipList publishes a kind 13534 membership list event
func (l *Listener) publishMembershipList() error {
// Get all members
members, err := l.D.GetAllNIP43Members()
members, err := l.DB.GetAllNIP43Members()
if chk.E(err) {
return err
}
relaySecret, err := l.D.GetOrCreateRelayIdentitySecret()
relaySecret, err := l.DB.GetOrCreateRelayIdentitySecret()
if chk.E(err) {
return err
}
@@ -231,7 +231,7 @@ func (l *Listener) publishMembershipList() error {
// Save to database
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if _, err = l.SaveEvent(ctx, ev); chk.E(err) {
if _, err = l.DB.SaveEvent(ctx, ev); chk.E(err) {
return err
}

View File

@@ -7,9 +7,11 @@ import (
"time"
"next.orly.dev/app/config"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/crypto/keys"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/interfaces/signer/p8k"
"next.orly.dev/pkg/protocol/nip43"
@@ -38,24 +40,47 @@ func setupTestListener(t *testing.T) (*Listener, *database.D, func()) {
RelayURL: "wss://test.relay",
Listen: "localhost",
Port: 3334,
ACLMode: "none",
}
server := &Server{
Ctx: ctx,
Config: cfg,
D: db,
DB: db,
publishers: publish.New(NewPublisher(ctx)),
InviteManager: nip43.NewInviteManager(cfg.NIP43InviteExpiry),
cfg: cfg,
db: db,
}
listener := &Listener{
Server: server,
ctx: ctx,
// Configure ACL registry
acl.Registry.Active.Store(cfg.ACLMode)
if err = acl.Registry.Configure(cfg, db, ctx); err != nil {
db.Close()
os.RemoveAll(tempDir)
t.Fatalf("failed to configure ACL: %v", err)
}
listener := &Listener{
Server: server,
ctx: ctx,
writeChan: make(chan publish.WriteRequest, 100),
writeDone: make(chan struct{}),
messageQueue: make(chan messageRequest, 100),
processingDone: make(chan struct{}),
subscriptions: make(map[string]context.CancelFunc),
}
// Start write worker and message processor
go listener.writeWorker()
go listener.messageProcessor()
cleanup := func() {
// Close listener channels
close(listener.writeChan)
<-listener.writeDone
close(listener.messageQueue)
<-listener.processingDone
db.Close()
os.RemoveAll(tempDir)
}
@@ -350,8 +375,13 @@ func TestHandleNIP43InviteRequest_ValidRequest(t *testing.T) {
}
adminPubkey := adminSigner.Pub()
// Add admin to server (simulating admin config)
listener.Server.Admins = [][]byte{adminPubkey}
// Add admin to config and reconfigure ACL
adminHex := hex.Enc(adminPubkey)
listener.Server.Config.Admins = []string{adminHex}
acl.Registry.Active.Store("none")
if err = acl.Registry.Configure(listener.Server.Config, listener.Server.DB, listener.ctx); err != nil {
t.Fatalf("failed to reconfigure ACL: %v", err)
}
// Handle invite request
inviteEvent, err := listener.Server.HandleNIP43InviteRequest(adminPubkey)

View File

@@ -35,7 +35,7 @@ func TestHandleNIP86Management_Basic(t *testing.T) {
// Setup server
server := &Server{
Config: cfg,
D: db,
DB: db,
Admins: [][]byte{[]byte("admin1")},
Owners: [][]byte{[]byte("owner1")},
}

View File

@@ -83,7 +83,7 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
log.I.Ln("supported NIPs", supportedNIPs)
// Get relay identity pubkey as hex
var relayPubkey string
if skb, err := s.D.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
if skb, err := s.DB.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
var sign *p8k.Signer
var sigErr error
if sign, sigErr = p8k.New(); sigErr == nil {

View File

@@ -150,6 +150,34 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
)
defer queryCancel()
// Check cache first for single-filter queries (most common case)
// Multi-filter queries are not cached as they're more complex
if len(*env.Filters) == 1 && env.Filters != nil {
f := (*env.Filters)[0]
if cachedJSON, found := l.DB.GetCachedJSON(f); found {
log.D.F("REQ %s: cache HIT, sending %d cached events", env.Subscription, len(cachedJSON))
// Send cached JSON directly
for _, jsonEnvelope := range cachedJSON {
if _, err = l.Write(jsonEnvelope); err != nil {
if !strings.Contains(err.Error(), "context canceled") {
chk.E(err)
}
return
}
}
// Send EOSE
if err = eoseenvelope.NewFrom(env.Subscription).Write(l); chk.E(err) {
return
}
// Don't create subscription for cached results with satisfied limits
if f.Limit != nil && len(cachedJSON) >= int(*f.Limit) {
log.D.F("REQ %s: limit satisfied by cache, not creating subscription", env.Subscription)
return
}
// Fall through to create subscription for ongoing updates
}
}
// Collect all events from all filters
var allEvents event.S
for _, f := range *env.Filters {
@@ -558,6 +586,10 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
events = privateFilteredEvents
seen := make(map[string]struct{})
// Collect marshaled JSON for caching (only for single-filter queries)
var marshaledForCache [][]byte
shouldCache := len(*env.Filters) == 1 && len(events) > 0
for _, ev := range events {
log.T.C(
func() string {
@@ -578,6 +610,18 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
); chk.E(err) {
return
}
// Get serialized envelope for caching
if shouldCache {
serialized := res.Marshal(nil)
if len(serialized) > 0 {
// Make a copy for the cache
cacheCopy := make([]byte, len(serialized))
copy(cacheCopy, serialized)
marshaledForCache = append(marshaledForCache, cacheCopy)
}
}
if err = res.Write(l); err != nil {
// Don't log context canceled errors as they're expected during shutdown
if !strings.Contains(err.Error(), "context canceled") {
@@ -588,6 +632,13 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
// track the IDs we've sent (use hex encoding for stable key)
seen[hexenc.Enc(ev.ID)] = struct{}{}
}
// Populate cache after successfully sending all events
if shouldCache && len(marshaledForCache) > 0 {
f := (*env.Filters)[0]
l.DB.CacheMarshaledJSON(f, marshaledForCache)
log.D.F("REQ %s: cached %d marshaled events", env.Subscription, len(marshaledForCache))
}
// write the EOSE to signal to the client that all events found have been
// sent.
log.T.F("sending EOSE to %s", l.remote)
@@ -661,6 +712,8 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
l.subscriptionsMu.Unlock()
// Register subscription with publisher
// Set AuthRequired based on ACL mode - when ACL is "none", don't require auth for privileged events
authRequired := acl.Registry.Active.Load() != "none"
l.publishers.Receive(
&W{
Conn: l.conn,
@@ -669,6 +722,7 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
Receiver: receiver,
Filters: &subbedFilters,
AuthedPubkey: l.authedPubkey.Load(),
AuthRequired: authRequired,
},
)

View File

@@ -118,7 +118,8 @@ whitelist:
chal := make([]byte, 32)
rand.Read(chal)
listener.challenge.Store([]byte(hex.Enc(chal)))
if s.Config.ACLMode != "none" {
// Send AUTH challenge if ACL mode requires it, or if auth is required/required for writes
if s.Config.ACLMode != "none" || s.Config.AuthRequired || s.Config.AuthToWrite {
log.D.F("sending AUTH challenge to %s", remote)
if err = authenvelope.NewChallengeWith(listener.challenge.Load()).
Write(listener); chk.E(err) {

View File

@@ -161,6 +161,12 @@ func (l *Listener) writeWorker() {
return
}
// Skip writes if no connection (unit tests)
if l.conn == nil {
log.T.F("ws->%s skipping write (no connection)", l.remote)
continue
}
// Handle the write request
var err error
if req.IsPing {
@@ -239,12 +245,12 @@ func (l *Listener) getManagedACL() *database.ManagedACL {
// QueryEvents queries events using the database QueryEvents method
func (l *Listener) QueryEvents(ctx context.Context, f *filter.F) (event.S, error) {
return l.D.QueryEvents(ctx, f)
return l.DB.QueryEvents(ctx, f)
}
// QueryAllVersions queries events using the database QueryAllVersions method
func (l *Listener) QueryAllVersions(ctx context.Context, f *filter.F) (event.S, error) {
return l.D.QueryAllVersions(ctx, f)
return l.DB.QueryAllVersions(ctx, f)
}
// canSeePrivateEvent checks if the authenticated user can see an event with a private tag

View File

@@ -25,7 +25,7 @@ import (
)
func Run(
ctx context.Context, cfg *config.C, db *database.D,
ctx context.Context, cfg *config.C, db database.Database,
) (quit chan struct{}) {
quit = make(chan struct{})
var once sync.Once
@@ -65,7 +65,7 @@ func Run(
l := &Server{
Ctx: ctx,
Config: cfg,
D: db,
DB: db,
publishers: publish.New(NewPublisher(ctx)),
Admins: adminKeys,
Owners: ownerKeys,
@@ -85,9 +85,9 @@ func Run(
// Initialize policy manager
l.policyManager = policy.NewWithManager(ctx, cfg.AppName, cfg.PolicyEnabled)
// Initialize spider manager based on mode
if cfg.SpiderMode != "none" {
if l.spiderManager, err = spider.New(ctx, db, l.publishers, cfg.SpiderMode); chk.E(err) {
// Initialize spider manager based on mode (only for Badger backend)
if badgerDB, ok := db.(*database.D); ok && cfg.SpiderMode != "none" {
if l.spiderManager, err = spider.New(ctx, badgerDB, l.publishers, cfg.SpiderMode); chk.E(err) {
log.E.F("failed to create spider manager: %v", err)
} else {
// Set up callbacks for follows mode
@@ -122,71 +122,98 @@ func Run(
log.E.F("failed to start spider manager: %v", err)
} else {
log.I.F("spider manager started successfully in '%s' mode", cfg.SpiderMode)
}
}
}
// Initialize relay group manager
l.relayGroupMgr = dsync.NewRelayGroupManager(db, cfg.RelayGroupAdmins)
// Initialize sync manager if relay peers are configured
var peers []string
if len(cfg.RelayPeers) > 0 {
peers = cfg.RelayPeers
} else {
// Try to get peers from relay group configuration
if config, err := l.relayGroupMgr.FindAuthoritativeConfig(ctx); err == nil && config != nil {
peers = config.Relays
log.I.F("using relay group configuration with %d peers", len(peers))
}
}
if len(peers) > 0 {
// Get relay identity for node ID
sk, err := db.GetOrCreateRelayIdentitySecret()
if err != nil {
log.E.F("failed to get relay identity for sync: %v", err)
} else {
nodeID, err := keys.SecretBytesToPubKeyHex(sk)
if err != nil {
log.E.F("failed to derive pubkey for sync node ID: %v", err)
} else {
relayURL := cfg.RelayURL
if relayURL == "" {
relayURL = fmt.Sprintf("http://localhost:%d", cfg.Port)
// Hook up follow list update notifications from ACL to spider
if cfg.SpiderMode == "follows" {
for _, aclInstance := range acl.Registry.ACL {
if aclInstance.Type() == "follows" {
if follows, ok := aclInstance.(*acl.Follows); ok {
follows.SetFollowListUpdateCallback(func() {
log.I.F("follow list updated, notifying spider")
l.spiderManager.NotifyFollowListUpdate()
})
log.I.F("spider: follow list update notifications configured")
}
}
}
}
l.syncManager = dsync.NewManager(ctx, db, nodeID, relayURL, peers, l.relayGroupMgr, l.policyManager)
log.I.F("distributed sync manager initialized with %d peers", len(peers))
}
}
}
// Initialize cluster manager for cluster replication
var clusterAdminNpubs []string
if len(cfg.ClusterAdmins) > 0 {
clusterAdminNpubs = cfg.ClusterAdmins
} else {
// Default to regular admins if no cluster admins specified
for _, admin := range cfg.Admins {
clusterAdminNpubs = append(clusterAdminNpubs, admin)
// Initialize relay group manager (only for Badger backend)
if badgerDB, ok := db.(*database.D); ok {
l.relayGroupMgr = dsync.NewRelayGroupManager(badgerDB, cfg.RelayGroupAdmins)
} else if cfg.SpiderMode != "none" || len(cfg.RelayPeers) > 0 || len(cfg.ClusterAdmins) > 0 {
log.I.Ln("spider, sync, and cluster features require Badger backend (currently using alternative backend)")
}
// Initialize sync manager if relay peers are configured (only for Badger backend)
if badgerDB, ok := db.(*database.D); ok {
var peers []string
if len(cfg.RelayPeers) > 0 {
peers = cfg.RelayPeers
} else {
// Try to get peers from relay group configuration
if l.relayGroupMgr != nil {
if config, err := l.relayGroupMgr.FindAuthoritativeConfig(ctx); err == nil && config != nil {
peers = config.Relays
log.I.F("using relay group configuration with %d peers", len(peers))
}
}
}
if len(peers) > 0 {
// Get relay identity for node ID
sk, err := db.GetOrCreateRelayIdentitySecret()
if err != nil {
log.E.F("failed to get relay identity for sync: %v", err)
} else {
nodeID, err := keys.SecretBytesToPubKeyHex(sk)
if err != nil {
log.E.F("failed to derive pubkey for sync node ID: %v", err)
} else {
relayURL := cfg.RelayURL
if relayURL == "" {
relayURL = fmt.Sprintf("http://localhost:%d", cfg.Port)
}
l.syncManager = dsync.NewManager(ctx, badgerDB, nodeID, relayURL, peers, l.relayGroupMgr, l.policyManager)
log.I.F("distributed sync manager initialized with %d peers", len(peers))
}
}
}
}
if len(clusterAdminNpubs) > 0 {
l.clusterManager = dsync.NewClusterManager(ctx, db, clusterAdminNpubs, cfg.ClusterPropagatePrivilegedEvents, l.publishers)
l.clusterManager.Start()
log.I.F("cluster replication manager initialized with %d admin npubs", len(clusterAdminNpubs))
// Initialize cluster manager for cluster replication (only for Badger backend)
if badgerDB, ok := db.(*database.D); ok {
var clusterAdminNpubs []string
if len(cfg.ClusterAdmins) > 0 {
clusterAdminNpubs = cfg.ClusterAdmins
} else {
// Default to regular admins if no cluster admins specified
for _, admin := range cfg.Admins {
clusterAdminNpubs = append(clusterAdminNpubs, admin)
}
}
if len(clusterAdminNpubs) > 0 {
l.clusterManager = dsync.NewClusterManager(ctx, badgerDB, clusterAdminNpubs, cfg.ClusterPropagatePrivilegedEvents, l.publishers)
l.clusterManager.Start()
log.I.F("cluster replication manager initialized with %d admin npubs", len(clusterAdminNpubs))
}
}
// Initialize the user interface
l.UserInterface()
// Initialize Blossom blob storage server
if l.blossomServer, err = initializeBlossomServer(ctx, cfg, db); err != nil {
log.E.F("failed to initialize blossom server: %v", err)
// Continue without blossom server
} else if l.blossomServer != nil {
log.I.F("blossom blob storage server initialized")
// Initialize Blossom blob storage server (only for Badger backend)
if badgerDB, ok := db.(*database.D); ok {
if l.blossomServer, err = initializeBlossomServer(ctx, cfg, badgerDB); err != nil {
log.E.F("failed to initialize blossom server: %v", err)
// Continue without blossom server
} else if l.blossomServer != nil {
log.I.F("blossom blob storage server initialized")
}
}
// Ensure a relay identity secret key exists when subscriptions and NWC are enabled
@@ -222,17 +249,25 @@ func Run(
}
}
if l.paymentProcessor, err = NewPaymentProcessor(ctx, cfg, db); err != nil {
// log.E.F("failed to create payment processor: %v", err)
// Continue without payment processor
} else {
if err = l.paymentProcessor.Start(); err != nil {
log.E.F("failed to start payment processor: %v", err)
// Initialize payment processor (only for Badger backend)
if badgerDB, ok := db.(*database.D); ok {
if l.paymentProcessor, err = NewPaymentProcessor(ctx, cfg, badgerDB); err != nil {
// log.E.F("failed to create payment processor: %v", err)
// Continue without payment processor
} else {
log.I.F("payment processor started successfully")
if err = l.paymentProcessor.Start(); err != nil {
log.E.F("failed to start payment processor: %v", err)
} else {
log.I.F("payment processor started successfully")
}
}
}
// Wait for database to be ready before accepting requests
log.I.F("waiting for database warmup to complete...")
<-db.Ready()
log.I.F("database ready, starting HTTP servers")
// Check if TLS is enabled
var tlsEnabled bool
var tlsServer *http.Server

View File

@@ -1,26 +1,54 @@
package app
import (
"next.orly.dev/pkg/interfaces/signer/p8k"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
"next.orly.dev/pkg/interfaces/signer/p8k"
"os"
"path/filepath"
"testing"
"time"
"next.orly.dev/app/config"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/crypto/keys"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/protocol/nip43"
"next.orly.dev/pkg/protocol/publish"
"next.orly.dev/pkg/protocol/relayinfo"
)
// newTestListener creates a properly initialized Listener for testing
func newTestListener(server *Server, ctx context.Context) *Listener {
listener := &Listener{
Server: server,
ctx: ctx,
writeChan: make(chan publish.WriteRequest, 100),
writeDone: make(chan struct{}),
messageQueue: make(chan messageRequest, 100),
processingDone: make(chan struct{}),
subscriptions: make(map[string]context.CancelFunc),
}
// Start write worker and message processor
go listener.writeWorker()
go listener.messageProcessor()
return listener
}
// closeTestListener properly closes a test listener
func closeTestListener(listener *Listener) {
close(listener.writeChan)
<-listener.writeDone
close(listener.messageQueue)
<-listener.processingDone
}
// setupE2ETest creates a full test server for end-to-end testing
func setupE2ETest(t *testing.T) (*Server, *httptest.Server, func()) {
tempDir, err := os.MkdirTemp("", "nip43_e2e_test_*")
@@ -62,26 +90,40 @@ func setupE2ETest(t *testing.T) (*Server, *httptest.Server, func()) {
}
adminPubkey := adminSigner.Pub()
// Add admin to config for ACL
cfg.Admins = []string{hex.Enc(adminPubkey)}
server := &Server{
Ctx: ctx,
Config: cfg,
D: db,
DB: db,
publishers: publish.New(NewPublisher(ctx)),
Admins: [][]byte{adminPubkey},
InviteManager: nip43.NewInviteManager(cfg.NIP43InviteExpiry),
cfg: cfg,
db: db,
}
// Configure ACL registry
acl.Registry.Active.Store(cfg.ACLMode)
if err = acl.Registry.Configure(cfg, db, ctx); err != nil {
db.Close()
os.RemoveAll(tempDir)
t.Fatalf("failed to configure ACL: %v", err)
}
server.mux = http.NewServeMux()
// Set up HTTP handlers
server.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Accept") == "application/nostr+json" {
server.HandleRelayInfo(w, r)
return
}
http.NotFound(w, r)
})
server.mux.HandleFunc(
"/", func(w http.ResponseWriter, r *http.Request) {
if r.Header.Get("Accept") == "application/nostr+json" {
server.HandleRelayInfo(w, r)
return
}
http.NotFound(w, r)
},
)
httpServer := httptest.NewServer(server.mux)
@@ -133,7 +175,10 @@ func TestE2E_RelayInfoIncludesNIP43(t *testing.T) {
// Verify server name
if info.Name != server.Config.AppName {
t.Errorf("wrong relay name: got %s, want %s", info.Name, server.Config.AppName)
t.Errorf(
"wrong relay name: got %s, want %s", info.Name,
server.Config.AppName,
)
}
}
@@ -173,6 +218,7 @@ func TestE2E_CompleteJoinFlow(t *testing.T) {
joinEv := event.New()
joinEv.Kind = nip43.KindJoinRequest
copy(joinEv.Pubkey, userPubkey)
joinEv.Tags = tag.NewS()
joinEv.Tags.Append(tag.NewFromAny("-"))
joinEv.Tags.Append(tag.NewFromAny("claim", inviteCode))
joinEv.CreatedAt = time.Now().Unix()
@@ -182,17 +228,15 @@ func TestE2E_CompleteJoinFlow(t *testing.T) {
}
// Step 3: Process join request
listener := &Listener{
Server: server,
ctx: server.Ctx,
}
listener := newTestListener(server, server.Ctx)
defer closeTestListener(listener)
err = listener.HandleNIP43JoinRequest(joinEv)
if err != nil {
t.Fatalf("failed to handle join request: %v", err)
}
// Step 4: Verify membership
isMember, err := server.D.IsNIP43Member(userPubkey)
isMember, err := server.DB.IsNIP43Member(userPubkey)
if err != nil {
t.Fatalf("failed to check membership: %v", err)
}
@@ -200,12 +244,15 @@ func TestE2E_CompleteJoinFlow(t *testing.T) {
t.Error("user was not added as member")
}
membership, err := server.D.GetNIP43Membership(userPubkey)
membership, err := server.DB.GetNIP43Membership(userPubkey)
if err != nil {
t.Fatalf("failed to get membership: %v", err)
}
if membership.InviteCode != inviteCode {
t.Errorf("wrong invite code: got %s, want %s", membership.InviteCode, inviteCode)
t.Errorf(
"wrong invite code: got %s, want %s", membership.InviteCode,
inviteCode,
)
}
}
@@ -220,10 +267,8 @@ func TestE2E_InviteCodeReuse(t *testing.T) {
t.Fatalf("failed to generate invite code: %v", err)
}
listener := &Listener{
Server: server,
ctx: server.Ctx,
}
listener := newTestListener(server, server.Ctx)
defer closeTestListener(listener)
// First user uses the code
user1Secret, err := keys.GenerateSecretKey()
@@ -242,6 +287,7 @@ func TestE2E_InviteCodeReuse(t *testing.T) {
joinEv1 := event.New()
joinEv1.Kind = nip43.KindJoinRequest
copy(joinEv1.Pubkey, user1Pubkey)
joinEv1.Tags = tag.NewS()
joinEv1.Tags.Append(tag.NewFromAny("-"))
joinEv1.Tags.Append(tag.NewFromAny("claim", code))
joinEv1.CreatedAt = time.Now().Unix()
@@ -256,7 +302,7 @@ func TestE2E_InviteCodeReuse(t *testing.T) {
}
// Verify first user is member
isMember, err := server.D.IsNIP43Member(user1Pubkey)
isMember, err := server.DB.IsNIP43Member(user1Pubkey)
if err != nil {
t.Fatalf("failed to check user1 membership: %v", err)
}
@@ -281,6 +327,7 @@ func TestE2E_InviteCodeReuse(t *testing.T) {
joinEv2 := event.New()
joinEv2.Kind = nip43.KindJoinRequest
copy(joinEv2.Pubkey, user2Pubkey)
joinEv2.Tags = tag.NewS()
joinEv2.Tags.Append(tag.NewFromAny("-"))
joinEv2.Tags.Append(tag.NewFromAny("claim", code))
joinEv2.CreatedAt = time.Now().Unix()
@@ -296,7 +343,7 @@ func TestE2E_InviteCodeReuse(t *testing.T) {
}
// Verify second user is NOT member
isMember, err = server.D.IsNIP43Member(user2Pubkey)
isMember, err = server.DB.IsNIP43Member(user2Pubkey)
if err != nil {
t.Fatalf("failed to check user2 membership: %v", err)
}
@@ -310,10 +357,8 @@ func TestE2E_MembershipListGeneration(t *testing.T) {
server, _, cleanup := setupE2ETest(t)
defer cleanup()
listener := &Listener{
Server: server,
ctx: server.Ctx,
}
listener := newTestListener(server, server.Ctx)
defer closeTestListener(listener)
// Add multiple members
memberCount := 5
@@ -331,7 +376,7 @@ func TestE2E_MembershipListGeneration(t *testing.T) {
members[i] = userPubkey
// Add directly to database for speed
err = server.D.AddNIP43Member(userPubkey, "code")
err = server.DB.AddNIP43Member(userPubkey, "code")
if err != nil {
t.Fatalf("failed to add member %d: %v", i, err)
}
@@ -355,6 +400,9 @@ func TestE2E_ExpiredInviteCode(t *testing.T) {
}
defer os.RemoveAll(tempDir)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db, err := database.New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("failed to open database: %v", err)
@@ -366,22 +414,18 @@ func TestE2E_ExpiredInviteCode(t *testing.T) {
NIP43InviteExpiry: 1 * time.Millisecond, // Very short expiry
}
ctx := context.Background()
server := &Server{
Ctx: ctx,
Config: cfg,
D: db,
DB: db,
publishers: publish.New(NewPublisher(ctx)),
InviteManager: nip43.NewInviteManager(cfg.NIP43InviteExpiry),
cfg: cfg,
db: db,
}
listener := &Listener{
Server: server,
ctx: ctx,
}
listener := newTestListener(server, ctx)
defer closeTestListener(listener)
// Generate invite code
code, err := server.InviteManager.GenerateCode()
@@ -409,6 +453,7 @@ func TestE2E_ExpiredInviteCode(t *testing.T) {
joinEv := event.New()
joinEv.Kind = nip43.KindJoinRequest
copy(joinEv.Pubkey, userPubkey)
joinEv.Tags = tag.NewS()
joinEv.Tags.Append(tag.NewFromAny("-"))
joinEv.Tags.Append(tag.NewFromAny("claim", code))
joinEv.CreatedAt = time.Now().Unix()
@@ -437,10 +482,8 @@ func TestE2E_InvalidTimestampRejected(t *testing.T) {
server, _, cleanup := setupE2ETest(t)
defer cleanup()
listener := &Listener{
Server: server,
ctx: server.Ctx,
}
listener := newTestListener(server, server.Ctx)
defer closeTestListener(listener)
// Generate invite code
code, err := server.InviteManager.GenerateCode()
@@ -466,6 +509,7 @@ func TestE2E_InvalidTimestampRejected(t *testing.T) {
joinEv := event.New()
joinEv.Kind = nip43.KindJoinRequest
copy(joinEv.Pubkey, userPubkey)
joinEv.Tags = tag.NewS()
joinEv.Tags.Append(tag.NewFromAny("-"))
joinEv.Tags.Append(tag.NewFromAny("claim", code))
joinEv.CreatedAt = time.Now().Unix() - 700 // More than 10 minutes ago
@@ -481,7 +525,7 @@ func TestE2E_InvalidTimestampRejected(t *testing.T) {
}
// Verify user was NOT added
isMember, err := server.D.IsNIP43Member(userPubkey)
isMember, err := server.DB.IsNIP43Member(userPubkey)
if err != nil {
t.Fatalf("failed to check membership: %v", err)
}
@@ -498,7 +542,10 @@ func BenchmarkJoinRequestProcessing(b *testing.B) {
}
defer os.RemoveAll(tempDir)
db, err := database.Open(filepath.Join(tempDir, "test.db"), "error")
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db, err := database.New(ctx, cancel, tempDir, "error")
if err != nil {
b.Fatalf("failed to open database: %v", err)
}
@@ -509,22 +556,18 @@ func BenchmarkJoinRequestProcessing(b *testing.B) {
NIP43InviteExpiry: 24 * time.Hour,
}
ctx := context.Background()
server := &Server{
Ctx: ctx,
Config: cfg,
D: db,
DB: db,
publishers: publish.New(NewPublisher(ctx)),
InviteManager: nip43.NewInviteManager(cfg.NIP43InviteExpiry),
cfg: cfg,
db: db,
}
listener := &Listener{
Server: server,
ctx: ctx,
}
listener := newTestListener(server, ctx)
defer closeTestListener(listener)
b.ResetTimer()
@@ -538,6 +581,7 @@ func BenchmarkJoinRequestProcessing(b *testing.B) {
joinEv := event.New()
joinEv.Kind = nip43.KindJoinRequest
copy(joinEv.Pubkey, userPubkey)
joinEv.Tags = tag.NewS()
joinEv.Tags.Append(tag.NewFromAny("-"))
joinEv.Tags.Append(tag.NewFromAny("claim", code))
joinEv.CreatedAt = time.Now().Unix()

View File

@@ -28,6 +28,7 @@ type Subscription struct {
remote string
AuthedPubkey []byte
Receiver event.C // Channel for delivering events to this subscription
AuthRequired bool // Whether ACL requires authentication for privileged events
*filter.S
}
@@ -58,6 +59,11 @@ type W struct {
// AuthedPubkey is the authenticated pubkey associated with the listener (if any).
AuthedPubkey []byte
// AuthRequired indicates whether the ACL in operation requires auth. If
// this is set to true, the publisher will not publish privileged or other
// restricted events to non-authed listeners, otherwise, it will.
AuthRequired bool
}
func (w *W) Type() (typeName string) { return Type }
@@ -87,7 +93,6 @@ func NewPublisher(c context.Context) (publisher *P) {
func (p *P) Type() (typeName string) { return Type }
// Receive handles incoming messages to manage websocket listener subscriptions
// and associated filters.
//
@@ -120,12 +125,14 @@ func (p *P) Receive(msg typer.T) {
if subs, ok := p.Map[m.Conn]; !ok {
subs = make(map[string]Subscription)
subs[m.Id] = Subscription{
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey, Receiver: m.Receiver,
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
Receiver: m.Receiver, AuthRequired: m.AuthRequired,
}
p.Map[m.Conn] = subs
} else {
subs[m.Id] = Subscription{
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey, Receiver: m.Receiver,
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
Receiver: m.Receiver, AuthRequired: m.AuthRequired,
}
}
}
@@ -174,11 +181,14 @@ func (p *P) Deliver(ev *event.E) {
for _, d := range deliveries {
// If the event is privileged, enforce that the subscriber's authed pubkey matches
// either the event pubkey or appears in any 'p' tag of the event.
if kind.IsPrivileged(ev.Kind) {
// Only check authentication if AuthRequired is true (ACL is active)
if kind.IsPrivileged(ev.Kind) && d.sub.AuthRequired {
if len(d.sub.AuthedPubkey) == 0 {
// Not authenticated - cannot see privileged events
log.D.F("subscription delivery DENIED for privileged event %s to %s (not authenticated)",
hex.Enc(ev.ID), d.sub.remote)
log.D.F(
"subscription delivery DENIED for privileged event %s to %s (not authenticated)",
hex.Enc(ev.ID), d.sub.remote,
)
continue
}
@@ -201,8 +211,10 @@ func (p *P) Deliver(ev *event.E) {
}
}
if !allowed {
log.D.F("subscription delivery DENIED for privileged event %s to %s (auth mismatch)",
hex.Enc(ev.ID), d.sub.remote)
log.D.F(
"subscription delivery DENIED for privileged event %s to %s (auth mismatch)",
hex.Enc(ev.ID), d.sub.remote,
)
// Skip delivery for this subscriber
continue
}
@@ -225,26 +237,37 @@ func (p *P) Deliver(ev *event.E) {
}
if hasPrivateTag {
canSeePrivate := p.canSeePrivateEvent(d.sub.AuthedPubkey, privatePubkey, d.sub.remote)
canSeePrivate := p.canSeePrivateEvent(
d.sub.AuthedPubkey, privatePubkey, d.sub.remote,
)
if !canSeePrivate {
log.D.F("subscription delivery DENIED for private event %s to %s (unauthorized)",
hex.Enc(ev.ID), d.sub.remote)
log.D.F(
"subscription delivery DENIED for private event %s to %s (unauthorized)",
hex.Enc(ev.ID), d.sub.remote,
)
continue
}
log.D.F("subscription delivery ALLOWED for private event %s to %s (authorized)",
hex.Enc(ev.ID), d.sub.remote)
log.D.F(
"subscription delivery ALLOWED for private event %s to %s (authorized)",
hex.Enc(ev.ID), d.sub.remote,
)
}
}
// Send event to the subscription's receiver channel
// The consumer goroutine (in handle-req.go) will read from this channel
// and forward it to the client via the write channel
log.D.F("attempting delivery of event %s (kind=%d) to subscription %s @ %s",
hex.Enc(ev.ID), ev.Kind, d.id, d.sub.remote)
log.D.F(
"attempting delivery of event %s (kind=%d) to subscription %s @ %s",
hex.Enc(ev.ID), ev.Kind, d.id, d.sub.remote,
)
// Check if receiver channel exists
if d.sub.Receiver == nil {
log.E.F("subscription %s has nil receiver channel for %s", d.id, d.sub.remote)
log.E.F(
"subscription %s has nil receiver channel for %s", d.id,
d.sub.remote,
)
continue
}
@@ -253,11 +276,15 @@ func (p *P) Deliver(ev *event.E) {
case <-p.c.Done():
continue
case d.sub.Receiver <- ev:
log.D.F("subscription delivery QUEUED: event=%s to=%s sub=%s",
hex.Enc(ev.ID), d.sub.remote, d.id)
log.D.F(
"subscription delivery QUEUED: event=%s to=%s sub=%s",
hex.Enc(ev.ID), d.sub.remote, d.id,
)
case <-time.After(DefaultWriteTimeout):
log.E.F("subscription delivery TIMEOUT: event=%s to=%s sub=%s",
hex.Enc(ev.ID), d.sub.remote, d.id)
log.E.F(
"subscription delivery TIMEOUT: event=%s to=%s sub=%s",
hex.Enc(ev.ID), d.sub.remote, d.id,
)
// Receiver channel is full - subscription consumer is stuck or slow
// The subscription should be removed by the cleanup logic
}
@@ -285,7 +312,9 @@ func (p *P) removeSubscriberId(ws *websocket.Conn, id string) {
// SetWriteChan stores the write channel for a websocket connection
// If writeChan is nil, the entry is removed from the map
func (p *P) SetWriteChan(conn *websocket.Conn, writeChan chan publish.WriteRequest) {
func (p *P) SetWriteChan(
conn *websocket.Conn, writeChan chan publish.WriteRequest,
) {
p.Mx.Lock()
defer p.Mx.Unlock()
if writeChan == nil {
@@ -296,7 +325,9 @@ func (p *P) SetWriteChan(conn *websocket.Conn, writeChan chan publish.WriteReque
}
// GetWriteChan returns the write channel for a websocket connection
func (p *P) GetWriteChan(conn *websocket.Conn) (chan publish.WriteRequest, bool) {
func (p *P) GetWriteChan(conn *websocket.Conn) (
chan publish.WriteRequest, bool,
) {
p.Mx.RLock()
defer p.Mx.RUnlock()
ch, ok := p.WriteChans[conn]
@@ -313,7 +344,9 @@ func (p *P) removeSubscriber(ws *websocket.Conn) {
}
// canSeePrivateEvent checks if the authenticated user can see an event with a private tag
func (p *P) canSeePrivateEvent(authedPubkey, privatePubkey []byte, remote string) (canSee bool) {
func (p *P) canSeePrivateEvent(
authedPubkey, privatePubkey []byte, remote string,
) (canSee bool) {
// If no authenticated user, deny access
if len(authedPubkey) == 0 {
return false

View File

@@ -17,6 +17,7 @@ import (
"lol.mleku.dev/chk"
"next.orly.dev/app/config"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/blossom"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
@@ -29,7 +30,6 @@ import (
"next.orly.dev/pkg/protocol/publish"
"next.orly.dev/pkg/spider"
dsync "next.orly.dev/pkg/sync"
blossom "next.orly.dev/pkg/blossom"
)
type Server struct {
@@ -39,7 +39,7 @@ type Server struct {
publishers *publish.S
Admins [][]byte
Owners [][]byte
*database.D
DB database.Database // Changed from embedded *database.D to interface field
// optional reverse proxy for dev web server
devProxy *httputil.ReverseProxy
@@ -58,7 +58,7 @@ type Server struct {
blossomServer *blossom.Server
InviteManager *nip43.InviteManager
cfg *config.C
db *database.D
db database.Database // Changed from *database.D to interface
}
// isIPBlacklisted checks if an IP address is blacklisted using the managed ACL system
@@ -91,19 +91,9 @@ func (s *Server) isIPBlacklisted(remote string) bool {
}
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Set comprehensive CORS headers for proxy compatibility
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers",
"Origin, X-Requested-With, Content-Type, Accept, Authorization, "+
"X-Forwarded-For, X-Forwarded-Proto, X-Forwarded-Host, X-Real-IP, "+
"Upgrade, Connection, Sec-WebSocket-Key, Sec-WebSocket-Version, "+
"Sec-WebSocket-Protocol, Sec-WebSocket-Extensions")
w.Header().Set("Access-Control-Allow-Credentials", "true")
w.Header().Set("Access-Control-Max-Age", "86400")
// Add proxy-friendly headers
w.Header().Set("Vary", "Origin, Access-Control-Request-Method, Access-Control-Request-Headers")
// CORS headers should be handled by the reverse proxy (Caddy/nginx)
// to avoid duplicate headers. If running without a reverse proxy,
// uncomment the CORS configuration below or configure via environment variable.
// Handle preflight OPTIONS requests
if r.Method == "OPTIONS" {
@@ -245,7 +235,9 @@ func (s *Server) UserInterface() {
s.mux.HandleFunc("/api/sprocket/update", s.handleSprocketUpdate)
s.mux.HandleFunc("/api/sprocket/restart", s.handleSprocketRestart)
s.mux.HandleFunc("/api/sprocket/versions", s.handleSprocketVersions)
s.mux.HandleFunc("/api/sprocket/delete-version", s.handleSprocketDeleteVersion)
s.mux.HandleFunc(
"/api/sprocket/delete-version", s.handleSprocketDeleteVersion,
)
s.mux.HandleFunc("/api/sprocket/config", s.handleSprocketConfig)
// NIP-86 management endpoint
s.mux.HandleFunc("/api/nip86", s.handleNIP86Management)
@@ -343,7 +335,9 @@ func (s *Server) handleAuthChallenge(w http.ResponseWriter, r *http.Request) {
jsonData, err := json.Marshal(response)
if chk.E(err) {
http.Error(w, "Error generating challenge", http.StatusInternalServerError)
http.Error(
w, "Error generating challenge", http.StatusInternalServerError,
)
return
}
@@ -561,7 +555,10 @@ func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
// Check permissions - require write, admin, or owner level
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "write" && accessLevel != "admin" && accessLevel != "owner" {
http.Error(w, "Write, admin, or owner permission required", http.StatusForbidden)
http.Error(
w, "Write, admin, or owner permission required",
http.StatusForbidden,
)
return
}
@@ -610,10 +607,12 @@ func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
}
w.Header().Set("Content-Type", "application/x-ndjson")
w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"")
w.Header().Set(
"Content-Disposition", "attachment; filename=\""+filename+"\"",
)
// Stream export
s.D.Export(s.Ctx, w, pks...)
s.DB.Export(s.Ctx, w, pks...)
}
// handleEventsMine returns the authenticated user's events in JSON format with pagination using NIP-98 authentication.
@@ -656,7 +655,7 @@ func (s *Server) handleEventsMine(w http.ResponseWriter, r *http.Request) {
}
log.Printf("DEBUG: Querying events for pubkey: %s", hex.Enc(pubkey))
events, err := s.D.QueryEvents(s.Ctx, f)
events, err := s.DB.QueryEvents(s.Ctx, f)
if chk.E(err) {
log.Printf("DEBUG: QueryEvents failed: %v", err)
http.Error(w, "Failed to query events", http.StatusInternalServerError)
@@ -725,7 +724,9 @@ func (s *Server) handleImport(w http.ResponseWriter, r *http.Request) {
// Check permissions - require admin or owner level
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "admin" && accessLevel != "owner" {
http.Error(w, "Admin or owner permission required", http.StatusForbidden)
http.Error(
w, "Admin or owner permission required", http.StatusForbidden,
)
return
}
@@ -741,13 +742,13 @@ func (s *Server) handleImport(w http.ResponseWriter, r *http.Request) {
return
}
defer file.Close()
s.D.Import(file)
s.DB.Import(file)
} else {
if r.Body == nil {
http.Error(w, "Empty request body", http.StatusBadRequest)
return
}
s.D.Import(r.Body)
s.DB.Import(r.Body)
}
w.Header().Set("Content-Type", "application/json")
@@ -785,7 +786,9 @@ func (s *Server) handleSprocketStatus(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
jsonData, err := json.Marshal(status)
if chk.E(err) {
http.Error(w, "Error generating response", http.StatusInternalServerError)
http.Error(
w, "Error generating response", http.StatusInternalServerError,
)
return
}
@@ -826,7 +829,10 @@ func (s *Server) handleSprocketUpdate(w http.ResponseWriter, r *http.Request) {
// Update the sprocket script
if err := s.sprocketManager.UpdateSprocket(string(body)); chk.E(err) {
http.Error(w, fmt.Sprintf("Failed to update sprocket: %v", err), http.StatusInternalServerError)
http.Error(
w, fmt.Sprintf("Failed to update sprocket: %v", err),
http.StatusInternalServerError,
)
return
}
@@ -861,7 +867,10 @@ func (s *Server) handleSprocketRestart(w http.ResponseWriter, r *http.Request) {
// Restart the sprocket script
if err := s.sprocketManager.RestartSprocket(); chk.E(err) {
http.Error(w, fmt.Sprintf("Failed to restart sprocket: %v", err), http.StatusInternalServerError)
http.Error(
w, fmt.Sprintf("Failed to restart sprocket: %v", err),
http.StatusInternalServerError,
)
return
}
@@ -870,7 +879,9 @@ func (s *Server) handleSprocketRestart(w http.ResponseWriter, r *http.Request) {
}
// handleSprocketVersions returns all sprocket script versions
func (s *Server) handleSprocketVersions(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleSprocketVersions(
w http.ResponseWriter, r *http.Request,
) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
@@ -896,14 +907,19 @@ func (s *Server) handleSprocketVersions(w http.ResponseWriter, r *http.Request)
versions, err := s.sprocketManager.GetSprocketVersions()
if chk.E(err) {
http.Error(w, fmt.Sprintf("Failed to get sprocket versions: %v", err), http.StatusInternalServerError)
http.Error(
w, fmt.Sprintf("Failed to get sprocket versions: %v", err),
http.StatusInternalServerError,
)
return
}
w.Header().Set("Content-Type", "application/json")
jsonData, err := json.Marshal(versions)
if chk.E(err) {
http.Error(w, "Error generating response", http.StatusInternalServerError)
http.Error(
w, "Error generating response", http.StatusInternalServerError,
)
return
}
@@ -911,7 +927,9 @@ func (s *Server) handleSprocketVersions(w http.ResponseWriter, r *http.Request)
}
// handleSprocketDeleteVersion deletes a specific sprocket version
func (s *Server) handleSprocketDeleteVersion(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleSprocketDeleteVersion(
w http.ResponseWriter, r *http.Request,
) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
@@ -957,7 +975,10 @@ func (s *Server) handleSprocketDeleteVersion(w http.ResponseWriter, r *http.Requ
// Delete the sprocket version
if err := s.sprocketManager.DeleteSprocketVersion(request.Filename); chk.E(err) {
http.Error(w, fmt.Sprintf("Failed to delete sprocket version: %v", err), http.StatusInternalServerError)
http.Error(
w, fmt.Sprintf("Failed to delete sprocket version: %v", err),
http.StatusInternalServerError,
)
return
}
@@ -982,7 +1003,9 @@ func (s *Server) handleSprocketConfig(w http.ResponseWriter, r *http.Request) {
jsonData, err := json.Marshal(response)
if chk.E(err) {
http.Error(w, "Error generating response", http.StatusInternalServerError)
http.Error(
w, "Error generating response", http.StatusInternalServerError,
)
return
}
@@ -1006,7 +1029,9 @@ func (s *Server) handleACLMode(w http.ResponseWriter, r *http.Request) {
jsonData, err := json.Marshal(response)
if chk.E(err) {
http.Error(w, "Error generating response", http.StatusInternalServerError)
http.Error(
w, "Error generating response", http.StatusInternalServerError,
)
return
}
@@ -1016,7 +1041,9 @@ func (s *Server) handleACLMode(w http.ResponseWriter, r *http.Request) {
// handleSyncCurrent handles requests for the current serial number
func (s *Server) handleSyncCurrent(w http.ResponseWriter, r *http.Request) {
if s.syncManager == nil {
http.Error(w, "Sync manager not initialized", http.StatusServiceUnavailable)
http.Error(
w, "Sync manager not initialized", http.StatusServiceUnavailable,
)
return
}
@@ -1031,7 +1058,9 @@ func (s *Server) handleSyncCurrent(w http.ResponseWriter, r *http.Request) {
// handleSyncEventIDs handles requests for event IDs with their serial numbers
func (s *Server) handleSyncEventIDs(w http.ResponseWriter, r *http.Request) {
if s.syncManager == nil {
http.Error(w, "Sync manager not initialized", http.StatusServiceUnavailable)
http.Error(
w, "Sync manager not initialized", http.StatusServiceUnavailable,
)
return
}
@@ -1044,12 +1073,16 @@ func (s *Server) handleSyncEventIDs(w http.ResponseWriter, r *http.Request) {
}
// validatePeerRequest validates NIP-98 authentication and checks if the requesting peer is authorized
func (s *Server) validatePeerRequest(w http.ResponseWriter, r *http.Request) bool {
func (s *Server) validatePeerRequest(
w http.ResponseWriter, r *http.Request,
) bool {
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if err != nil {
log.Printf("NIP-98 auth validation error: %v", err)
http.Error(w, "Authentication validation failed", http.StatusUnauthorized)
http.Error(
w, "Authentication validation failed", http.StatusUnauthorized,
)
return false
}
if !valid {

View File

@@ -199,7 +199,7 @@ func TestLongRunningSubscriptionStability(t *testing.T) {
ev := createSignedTestEvent(t, 1, fmt.Sprintf("Test event %d for long-running subscription", i))
// Save event to database
if _, err := server.D.SaveEvent(context.Background(), ev); err != nil {
if _, err := server.DB.SaveEvent(context.Background(), ev); err != nil {
t.Errorf("Failed to save event %d: %v", i, err)
continue
}
@@ -376,7 +376,7 @@ func TestMultipleConcurrentSubscriptions(t *testing.T) {
// Create and sign test event
ev := createSignedTestEvent(t, uint16(sub.kind), fmt.Sprintf("Test for kind %d event %d", sub.kind, i))
if _, err := server.D.SaveEvent(context.Background(), ev); err != nil {
if _, err := server.DB.SaveEvent(context.Background(), ev); err != nil {
t.Errorf("Failed to save event: %v", err)
}
@@ -431,7 +431,7 @@ func setupTestServer(t *testing.T) (*Server, func()) {
// Setup server
server := &Server{
Config: cfg,
D: db,
DB: db,
Ctx: ctx,
publishers: publish.New(NewPublisher(ctx)),
Admins: [][]byte{},

View File

@@ -1 +1,17 @@
test
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width,initial-scale=1" />
<title>ORLY?</title>
<link rel="icon" type="image/png" href="/favicon.png" />
<link rel="stylesheet" href="/global.css" />
<link rel="stylesheet" href="/bundle.css" />
<script defer src="/bundle.js"></script>
</head>
<body></body>
</html>

View File

@@ -0,0 +1,188 @@
# Badger Cache Optimization Strategy
## Problem Analysis
### Initial Configuration (FAILED)
- Block cache: 2048 MB
- Index cache: 1024 MB
- **Result**: Cache hit ratio remained at 33%
### Root Cause Discovery
Badger's Ristretto cache uses a "cost" metric that doesn't directly map to bytes:
```
Average cost per key: 54,628,383 bytes = 52.10 MB
Cache size: 2048 MB
Keys that fit: ~39 keys only!
```
The cost metric appears to include:
- Uncompressed data size
- Value log references
- Table metadata
- Potentially full `BaseTableSize` (64 MB) per entry
### Why Previous Fix Didn't Work
With `BaseTableSize = 64 MB`:
- Each cache entry costs ~52 MB in the cost metric
- 2 GB cache ÷ 52 MB = ~39 entries max
- Test generates 228,000+ unique keys
- **Eviction rate: 99.99%** (everything gets evicted immediately)
## Multi-Pronged Optimization Strategy
### Approach 1: Reduce Table Sizes (IMPLEMENTED)
**Changes in `pkg/database/database.go`:**
```go
// OLD (causing high cache cost):
opts.BaseTableSize = 64 * units.Mb // 64 MB per table
opts.MemTableSize = 64 * units.Mb // 64 MB memtable
// NEW (lower cache cost):
opts.BaseTableSize = 8 * units.Mb // 8 MB per table (8x reduction)
opts.MemTableSize = 16 * units.Mb // 16 MB memtable (4x reduction)
```
**Expected Impact:**
- Cost per key should drop from ~52 MB to ~6-8 MB
- Cache can now hold ~2,000-3,000 keys instead of ~39
- **Projected hit ratio: 60-70%** (significant improvement)
### Approach 2: Enable Compression (IMPLEMENTED)
```go
// OLD:
opts.Compression = options.None
// NEW:
opts.Compression = options.ZSTD
opts.ZSTDCompressionLevel = 1 // Fast compression
```
**Expected Impact:**
- Compressed data reduces cache cost metric
- ZSTD level 1 is very fast (~500 MB/s) with ~2-3x compression
- Should reduce cost per key by another 50-60%
- **Combined with smaller tables: cost per key ~3-4 MB**
### Approach 3: Massive Cache Increase (IMPLEMENTED)
**Changes in `Dockerfile.next-orly`:**
```dockerfile
ENV ORLY_DB_BLOCK_CACHE_MB=16384 # 16 GB (was 2 GB)
ENV ORLY_DB_INDEX_CACHE_MB=4096 # 4 GB (was 1 GB)
```
**Rationale:**
- With 16 GB cache and 3-4 MB cost per key: **~4,000-5,000 keys** can fit
- This should cover the working set for most benchmark tests
- **Target hit ratio: 80-90%**
## Combined Effect Calculation
### Before Optimization:
- Table size: 64 MB
- Cost per key: ~52 MB
- Cache: 2 GB
- Keys in cache: ~39
- Hit ratio: 33%
### After Optimization:
- Table size: 8 MB (8x smaller)
- Compression: ZSTD (~3x reduction)
- Effective cost per key: ~2-3 MB (17-25x reduction!)
- Cache: 16 GB (8x larger)
- Keys in cache: **~5,000-8,000** (128-205x improvement)
- **Projected hit ratio: 85-95%**
## Trade-offs
### Smaller Tables
**Pros:**
- Lower cache cost
- Faster individual compactions
- Better cache efficiency
**Cons:**
- More files to manage (mitigated by faster compaction)
- Slightly more compaction overhead
**Verdict:** Worth it for 25x cache efficiency improvement
### Compression
**Pros:**
- Reduces cache cost
- Reduces disk space
- ZSTD level 1 is very fast
**Cons:**
- ~5-10% CPU overhead for compression
- ~3-5% CPU overhead for decompression
**Verdict:** Minor CPU cost for major cache gains
### Large Cache
**Pros:**
- High hit ratio
- Lower latency
- Better throughput
**Cons:**
- 20 GB memory usage (16 GB block + 4 GB index)
- May not be suitable for resource-constrained environments
**Verdict:** Acceptable for high-performance relay deployments
## Alternative Configurations
### For 8 GB RAM Systems:
```dockerfile
ENV ORLY_DB_BLOCK_CACHE_MB=6144 # 6 GB
ENV ORLY_DB_INDEX_CACHE_MB=1536 # 1.5 GB
```
With optimized tables+compression: ~2,000-3,000 keys, 70-80% hit ratio
### For 4 GB RAM Systems:
```dockerfile
ENV ORLY_DB_BLOCK_CACHE_MB=2560 # 2.5 GB
ENV ORLY_DB_INDEX_CACHE_MB=512 # 512 MB
```
With optimized tables+compression: ~800-1,200 keys, 50-60% hit ratio
## Testing & Validation
To test these changes:
```bash
cd /home/mleku/src/next.orly.dev/cmd/benchmark
# Rebuild with new code changes
docker compose build next-orly
# Run benchmark
sudo rm -rf data/
./run-benchmark-orly-only.sh
```
### Metrics to Monitor:
1. **Cache hit ratio** (target: >85%)
2. **Cache life expectancy** (target: >30 seconds)
3. **Average latency** (target: <3ms)
4. **P95 latency** (target: <10ms)
5. **Burst pattern performance** (target: match khatru-sqlite)
## Expected Results
### Burst Pattern Test:
- **Before**: 9.35ms avg, 34.48ms P95
- **After**: <4ms avg, <10ms P95 (60-70% improvement)
### Overall Performance:
- Match or exceed khatru-sqlite and khatru-badger
- Eliminate cache warnings
- Stable performance across test rounds

View File

@@ -0,0 +1,97 @@
# Badger Cache Tuning Analysis
## Problem Identified
From benchmark run `run_20251116_092759`, the Badger block cache showed critical performance issues:
### Cache Metrics (Round 1):
```
Block cache might be too small. Metrics:
- hit: 151,469
- miss: 307,989
- hit-ratio: 0.33 (33%)
- keys-added: 226,912
- keys-evicted: 226,893 (99.99% eviction rate!)
- Cache life expectancy: 2 seconds (90th percentile)
```
### Performance Impact:
- **Burst Pattern Latency**: 9.35ms avg (vs 3.61ms for khatru-sqlite)
- **P95 Latency**: 34.48ms (vs 8.59ms for khatru-sqlite)
- **Cache hit ratio**: Only 33% - causing constant disk I/O
## Root Cause
The benchmark container was using **default Badger cache sizes** (much smaller than the code defaults):
- Block cache: ~64 MB (Badger default)
- Index cache: ~32 MB (Badger default)
The code has better defaults (1024 MB / 512 MB), but these weren't set in the Docker container.
## Cache Size Calculation
Based on benchmark workload analysis:
### Block Cache Requirements:
- Total cost added: 12.44 TB during test
- With 226K keys and immediate evictions, we need to hold ~100-200K blocks in memory
- At ~10-20 KB per block average: **2-4 GB needed**
### Index Cache Requirements:
- For 200K+ keys with metadata
- Efficient index lookups during queries
- **1-2 GB needed**
## Solution
Updated `Dockerfile.next-orly` with optimized cache settings:
```dockerfile
ENV ORLY_DB_BLOCK_CACHE_MB=2048 # 2 GB block cache
ENV ORLY_DB_INDEX_CACHE_MB=1024 # 1 GB index cache
```
### Expected Improvements:
- **Cache hit ratio**: Target 85-95% (up from 33%)
- **Burst pattern latency**: Target <5ms avg (down from 9.35ms)
- **P95 latency**: Target <15ms (down from 34.48ms)
- **Query latency**: Significant reduction due to cached index lookups
## Testing Strategy
1. Rebuild Docker image with new cache settings
2. Run full benchmark suite
3. Compare metrics:
- Cache hit ratio
- Average/P95/P99 latencies
- Throughput under burst patterns
- Memory usage
## Memory Budget
With these settings, the relay will use approximately:
- Block cache: 2 GB
- Index cache: 1 GB
- Badger internal structures: ~200 MB
- Go runtime: ~200 MB
- **Total**: ~3.5 GB
This is reasonable for a high-performance relay and well within modern server capabilities.
## Alternative Configurations
For constrained environments:
### Medium (1.5 GB total):
```
ORLY_DB_BLOCK_CACHE_MB=1024
ORLY_DB_INDEX_CACHE_MB=512
```
### Minimal (512 MB total):
```
ORLY_DB_BLOCK_CACHE_MB=384
ORLY_DB_INDEX_CACHE_MB=128
```
Note: Smaller caches will result in lower hit ratios and higher latencies.

View File

@@ -24,7 +24,7 @@ RUN go mod download
COPY . .
# Build the benchmark tool with CGO enabled
RUN CGO_ENABLED=1 GOOS=linux go build -a -o benchmark cmd/benchmark/main.go
RUN CGO_ENABLED=1 GOOS=linux go build -a -o benchmark ./cmd/benchmark
# Copy libsecp256k1.so if available
RUN if [ -f pkg/crypto/p8k/libsecp256k1.so ]; then \
@@ -42,8 +42,7 @@ WORKDIR /app
# Copy benchmark binary
COPY --from=builder /build/benchmark /app/benchmark
# Copy libsecp256k1.so if available
COPY --from=builder /build/libsecp256k1.so /app/libsecp256k1.so 2>/dev/null || true
# libsecp256k1 is already installed system-wide via apk
# Copy benchmark runner script
COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
@@ -60,8 +59,8 @@ RUN adduser -u 1000 -D appuser && \
ENV LD_LIBRARY_PATH=/app:/usr/local/lib:/usr/lib
# Environment variables
ENV BENCHMARK_EVENTS=10000
ENV BENCHMARK_WORKERS=8
ENV BENCHMARK_EVENTS=50000
ENV BENCHMARK_WORKERS=24
ENV BENCHMARK_DURATION=60s
# Drop privileges: run as uid 1000

View File

@@ -6,7 +6,7 @@ WORKDIR /build
COPY . .
# Build the basic-badger example
RUN echo ${pwd};cd examples/basic-badger && \
RUN cd examples/basic-badger && \
go mod tidy && \
CGO_ENABLED=0 go build -o khatru-badger .
@@ -15,8 +15,9 @@ RUN apk --no-cache add ca-certificates wget
WORKDIR /app
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
RUN mkdir -p /data
EXPOSE 3334
EXPOSE 8080
ENV DATABASE_PATH=/data/badger
ENV PORT=8080
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD wget --quiet --tries=1 --spider http://localhost:3334 || exit 1
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
CMD ["/app/khatru-badger"]

View File

@@ -15,8 +15,9 @@ RUN apk --no-cache add ca-certificates sqlite wget
WORKDIR /app
COPY --from=builder /build/examples/basic-sqlite3/khatru-sqlite /app/
RUN mkdir -p /data
EXPOSE 3334
EXPOSE 8080
ENV DATABASE_PATH=/data/khatru.db
ENV PORT=8080
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD wget --quiet --tries=1 --spider http://localhost:3334 || exit 1
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
CMD ["/app/khatru-sqlite"]

View File

@@ -45,14 +45,9 @@ RUN go mod download
# Copy source code
COPY . .
# Build the relay
# Build the relay (libsecp256k1 installed via make install to /usr/lib)
RUN CGO_ENABLED=1 GOOS=linux go build -gcflags "all=-N -l" -o relay .
# Copy libsecp256k1.so if it exists in the repo
RUN if [ -f pkg/crypto/p8k/libsecp256k1.so ]; then \
cp pkg/crypto/p8k/libsecp256k1.so /build/; \
fi
# Create non-root user (uid 1000) for runtime in builder stage (used by analyzer)
RUN useradd -u 1000 -m -s /bin/bash appuser && \
chown -R 1000:1000 /build
@@ -71,8 +66,7 @@ WORKDIR /app
# Copy binary from builder
COPY --from=builder /build/relay /app/relay
# Copy libsecp256k1.so if it was built with the binary
COPY --from=builder /build/libsecp256k1.so /app/libsecp256k1.so 2>/dev/null || true
# libsecp256k1 is already installed system-wide in the final stage via apt-get install libsecp256k1-0
# Create runtime user and writable directories
RUN useradd -u 1000 -m -s /bin/bash appuser && \
@@ -87,10 +81,16 @@ ENV ORLY_DATA_DIR=/data
ENV ORLY_LISTEN=0.0.0.0
ENV ORLY_PORT=8080
ENV ORLY_LOG_LEVEL=off
# Aggressive cache settings to match Badger's cost metric
# Badger tracks ~52MB cost per key, need massive cache for good hit ratio
# Block cache: 16GB to hold ~300 keys in cache
# Index cache: 4GB for index lookups
ENV ORLY_DB_BLOCK_CACHE_MB=16384
ENV ORLY_DB_INDEX_CACHE_MB=4096
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD bash -lc "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8080 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:8080/ || exit 1
# Drop privileges: run as uid 1000
USER 1000:1000

View File

@@ -1,12 +1,12 @@
FROM rust:1.81-alpine AS builder
FROM rust:alpine AS builder
RUN apk add --no-cache musl-dev sqlite-dev build-base bash perl protobuf
RUN apk add --no-cache musl-dev sqlite-dev build-base autoconf automake libtool protobuf-dev protoc
WORKDIR /build
COPY . .
# Build the relay
RUN cargo build --release
# Regenerate Cargo.lock if needed, then build
RUN rm -f Cargo.lock && cargo generate-lockfile && cargo build --release
FROM alpine:latest
RUN apk --no-cache add ca-certificates sqlite wget

View File

@@ -15,9 +15,9 @@ RUN apk --no-cache add ca-certificates sqlite wget
WORKDIR /app
COPY --from=builder /build/examples/basic/relayer-basic /app/
RUN mkdir -p /data
EXPOSE 7447
EXPOSE 8080
ENV DATABASE_PATH=/data/relayer.db
# PORT env is not used by relayer-basic; it always binds to 7447 in code.
ENV PORT=8080
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD wget --quiet --tries=1 --spider http://localhost:7447 || exit 1
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
CMD ["/app/relayer-basic"]

View File

@@ -15,9 +15,7 @@ RUN apt-get update && apt-get install -y \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /build
# Fetch strfry source with submodules to ensure golpe is present
RUN git clone --recurse-submodules https://github.com/hoytech/strfry .
COPY . .
# Build strfry
RUN make setup-golpe && \

View File

@@ -0,0 +1,162 @@
# Inline Event Optimization Strategy
## Problem: Value Log vs LSM Tree
By default, Badger stores all values above a small threshold (~1KB) in the value log (separate files). This causes:
- **Extra disk I/O** for reading values
- **Cache inefficiency** - must cache both keys AND value log positions
- **Poor performance for small inline events**
## ORLY's Inline Event Storage
ORLY uses "Reiser4 optimization" - small events are stored **inline** in the key itself:
- Event data embedded directly in LSM tree
- No separate value log lookup needed
- Much faster reads for small events
**But:** By default, Badger still tries to put these in the value log!
## Solution: VLogPercentile
```go
opts.VLogPercentile = 0.99
```
**What this does:**
- Analyzes value size distribution
- Keeps the smallest 99% of values in the LSM tree
- Only puts the largest 1% in value log
**Impact on ORLY:**
- Our optimized inline events stay in LSM tree ✅
- Only large events (>100KB) go to value log
- Dramatically faster reads for typical Nostr events
## Additional Optimizations Implemented
### 1. Disable Conflict Detection
```go
opts.DetectConflicts = false
```
**Rationale:**
- Nostr events are **immutable** (content-addressable by ID)
- No need for transaction conflict checking
- **5-10% performance improvement** on writes
### 2. Optimize BaseLevelSize
```go
opts.BaseLevelSize = 64 * units.Mb // Increased from 10 MB
```
**Benefits:**
- Fewer LSM levels to search
- Faster compaction
- Better space amplification
### 3. Enable ZSTD Compression
```go
opts.Compression = options.ZSTD
opts.ZSTDCompressionLevel = 1 // Fast mode
```
**Benefits:**
- 2-3x compression ratio on event data
- Level 1 is very fast (500+ MB/s compression, 2+ GB/s decompression)
- Reduces cache cost metric
- Saves disk space
## Combined Effect
### Before Optimization:
```
Small inline event read:
1. Read key from LSM tree
2. Get value log position from LSM
3. Seek to value log file
4. Read value from value log
Total: ~3-5 disk operations
```
### After Optimization:
```
Small inline event read:
1. Read key+value from LSM tree (in cache!)
Total: 1 cache hit
```
**Performance improvement: 3-5x faster reads for inline events**
## Configuration Summary
All optimizations applied in `pkg/database/database.go`:
```go
// Cache
opts.BlockCacheSize = 16384 MB // 16 GB
opts.IndexCacheSize = 4096 MB // 4 GB
// Table sizes (reduce cache cost)
opts.BaseTableSize = 8 MB
opts.MemTableSize = 16 MB
// Keep inline events in LSM
opts.VLogPercentile = 0.99
// LSM structure
opts.BaseLevelSize = 64 MB
opts.LevelSizeMultiplier = 10
// Performance
opts.Compression = ZSTD (level 1)
opts.DetectConflicts = false
opts.NumCompactors = 8
opts.NumMemtables = 8
```
## Expected Benchmark Improvements
### Before (run_20251116_092759):
- Burst pattern: 9.35ms avg, 34.48ms P95
- Cache hit ratio: 33%
- Value log lookups: high
### After (projected):
- Burst pattern: <3ms avg, <8ms P95
- Cache hit ratio: 85-95%
- Value log lookups: minimal (only large events)
**Overall: 60-70% latency reduction, matching or exceeding other Badger-based relays**
## Trade-offs
### VLogPercentile = 0.99
**Pro:** Keeps inline events in LSM for fast access
**Con:** Larger LSM tree (but we have 16 GB cache to handle it)
**Verdict:** ✅ Essential for inline event optimization
### DetectConflicts = false
**Pro:** 5-10% faster writes
**Con:** No transaction conflict detection
**Verdict:** ✅ Safe - Nostr events are immutable
### ZSTD Compression
**Pro:** 2-3x space savings, lower cache cost
**Con:** ~5% CPU overhead
**Verdict:** ✅ Well worth it for cache efficiency
## Testing
Run benchmark to validate:
```bash
cd cmd/benchmark
docker compose build next-orly
sudo rm -rf data/
./run-benchmark-orly-only.sh
```
Monitor for:
1. ✅ No "Block cache too small" warnings
2. ✅ Cache hit ratio >85%
3. ✅ Latencies competitive with khatru-badger
4. ✅ Most values in LSM tree (check logs)

View File

@@ -0,0 +1,137 @@
# ORLY Performance Analysis
## Benchmark Results Summary
### Performance with 90s warmup:
- **Peak Throughput**: 10,452 events/sec
- **Avg Latency**: 1.63ms
- **P95 Latency**: 2.27ms
- **Success Rate**: 100%
### Key Findings
#### 1. Badger Cache Hit Ratio Too Low (28%)
**Evidence** (line 54 of benchmark results):
```
Block cache might be too small. Metrics: hit: 128456 miss: 332127 ... hit-ratio: 0.28
```
**Impact**:
- Low cache hit ratio forces more disk reads
- Increased latency on queries
- Query performance degrades over time (3866 q/s → 2806 q/s)
**Recommendation**:
Increase Badger cache sizes via environment variables:
- `ORLY_DB_BLOCK_CACHE_MB`: Increase from default to 256-512MB
- `ORLY_DB_INDEX_CACHE_MB`: Increase from default to 128-256MB
#### 2. CPU Profile Analysis
**Total CPU time**: 3.65s over 510s runtime (0.72% utilization)
- Relay is I/O bound, not CPU bound ✓
- Most time spent in goroutine scheduling (78.63%)
- Badger compaction uses 12.88% of CPU
**Key Observations**:
- Low CPU utilization means relay is mostly waiting on I/O
- This is expected and efficient behavior
- Not a bottleneck
#### 3. Warmup Time Impact
**Without 90s warmup**: Performance appeared lower in initial tests
**With 90s warmup**: Better sustained performance
**Potential causes**:
- Badger cache warming up
- Goroutine pool stabilization
- Memory allocation settling
**Current mitigations**:
- 90s delay before benchmark starts
- Health check with 60s start_period
#### 4. Query Performance Degradation
**Round 1**: 3,866 queries/sec
**Round 2**: 2,806 queries/sec (27% decrease)
**Likely causes**:
1. Cache pressure from accumulated data
2. Badger compaction interference
3. LSM tree depth increasing
**Recommendations**:
1. Increase cache sizes (primary fix)
2. Tune Badger compaction settings
3. Consider periodic cache warming
## Recommended Configuration Changes
### 1. Increase Badger Cache Sizes
Add to `cmd/benchmark/Dockerfile.next-orly`:
```dockerfile
ENV ORLY_DB_BLOCK_CACHE_MB=512
ENV ORLY_DB_INDEX_CACHE_MB=256
```
### 2. Tune Badger Options
Consider adjusting in `pkg/database/database.go`:
```go
// Increase value log file size for better write performance
ValueLogFileSize: 256 << 20, // 256MB (currently defaults to 1GB)
// Increase number of compactors
NumCompactors: 4, // Default is 4, could go to 8
// Increase number of level zero tables before compaction
NumLevelZeroTables: 8, // Default is 5
// Increase number of level zero tables before stalling writes
NumLevelZeroTablesStall: 16, // Default is 15
```
### 3. Add Readiness Check
Consider adding a "warmed up" indicator:
- Cache hit ratio > 50%
- At least 1000 events stored
- No active compactions
## Performance Comparison
| Implementation | Events/sec | Avg Latency | Cache Hit Ratio |
|---------------|------------|-------------|-----------------|
| ORLY (current) | 10,453 | 1.63ms | 28% ⚠️ |
| Khatru-SQLite | 9,819 | 590µs | N/A |
| Khatru-Badger | 9,712 | 602µs | N/A |
| Relayer-basic | 10,014 | 581µs | N/A |
| Strfry | 9,631 | 613µs | N/A |
| Nostr-rs-relay | 9,617 | 605µs | N/A |
**Key Observation**: ORLY has highest throughput but significantly higher latency than competitors. The low cache hit ratio explains this discrepancy.
## Next Steps
1. **Immediate**: Test with increased cache sizes
2. **Short-term**: Optimize Badger configuration
3. **Medium-term**: Investigate query path optimizations
4. **Long-term**: Consider query result caching layer
## Files Modified
- `cmd/benchmark/docker-compose.profile.yml` - Profile-enabled ORLY setup
- `cmd/benchmark/run-profile.sh` - Script to run profiled benchmarks
- This analysis document
## Profile Data
CPU profile available at: `cmd/benchmark/profiles/cpu.pprof`
Analyze with:
```bash
go tool pprof -http=:8080 profiles/cpu.pprof
```

View File

@@ -2,7 +2,7 @@
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including:
- **next.orly.dev** (this repository) - BadgerDB-based relay
- **next.orly.dev** (this repository) - Badger and DGraph backend variants
- **Khatru** - SQLite and Badger variants
- **Relayer** - Basic example implementation
- **Strfry** - C++ LMDB-based relay
@@ -91,13 +91,16 @@ ls reports/run_YYYYMMDD_HHMMSS/
### Docker Compose Services
| Service | Port | Description |
| ---------------- | ---- | ----------------------------------------- |
| next-orly | 8001 | This repository's BadgerDB relay |
| khatru-sqlite | 8002 | Khatru with SQLite backend |
| khatru-badger | 8003 | Khatru with Badger backend |
| relayer-basic | 8004 | Basic relayer example |
| strfry | 8005 | Strfry C++ LMDB relay |
| Service | Port | Description |
| ------------------ | ---- | ----------------------------------------- |
| next-orly-badger | 8001 | This repository's Badger relay |
| next-orly-dgraph | 8007 | This repository's DGraph relay |
| dgraph-zero | 5080 | DGraph cluster coordinator |
| dgraph-alpha | 9080 | DGraph data node |
| khatru-sqlite | 8002 | Khatru with SQLite backend |
| khatru-badger | 8003 | Khatru with Badger backend |
| relayer-basic | 8004 | Basic relayer example |
| strfry | 8005 | Strfry C++ LMDB relay |
| nostr-rs-relay | 8006 | Rust SQLite relay |
| benchmark-runner | - | Orchestrates tests and aggregates results |
@@ -173,6 +176,39 @@ go build -o benchmark main.go
-duration=30s
```
## Database Backend Comparison
The benchmark suite includes **next.orly.dev** with two different database backends to compare architectural approaches:
### Badger Backend (next-orly-badger)
- **Type**: Embedded key-value store
- **Architecture**: Single-process, no network overhead
- **Best for**: Personal relays, single-instance deployments
- **Characteristics**:
- Lower latency for single-instance operations
- No network round-trips
- Simpler deployment
- Limited to single-node scaling
### DGraph Backend (next-orly-dgraph)
- **Type**: Distributed graph database
- **Architecture**: Client-server with dgraph-zero (coordinator) and dgraph-alpha (data node)
- **Best for**: Distributed deployments, horizontal scaling
- **Characteristics**:
- Network overhead from gRPC communication
- Supports multi-node clustering
- Built-in replication and sharding
- More complex deployment
### Comparing the Backends
The benchmark results will show:
- **Latency differences**: Embedded vs. distributed overhead
- **Throughput trade-offs**: Single-process optimization vs. distributed scalability
- **Resource usage**: Memory and CPU patterns for different architectures
This comparison helps determine which backend is appropriate for different deployment scenarios.
## Benchmark Results Interpretation
### Peak Throughput Test

View File

@@ -0,0 +1,574 @@
package main
import (
"context"
"fmt"
"sort"
"sync"
"time"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/encoders/timestamp"
"next.orly.dev/pkg/interfaces/signer/p8k"
)
// BenchmarkAdapter adapts a database.Database interface to work with benchmark tests
type BenchmarkAdapter struct {
config *BenchmarkConfig
db database.Database
results []*BenchmarkResult
mu sync.RWMutex
}
// NewBenchmarkAdapter creates a new benchmark adapter
func NewBenchmarkAdapter(config *BenchmarkConfig, db database.Database) *BenchmarkAdapter {
return &BenchmarkAdapter{
config: config,
db: db,
results: make([]*BenchmarkResult, 0),
}
}
// RunPeakThroughputTest runs the peak throughput benchmark
func (ba *BenchmarkAdapter) RunPeakThroughputTest() {
fmt.Println("\n=== Peak Throughput Test ===")
start := time.Now()
var wg sync.WaitGroup
var totalEvents int64
var errors []error
var latencies []time.Duration
var mu sync.Mutex
events := ba.generateEvents(ba.config.NumEvents)
eventChan := make(chan *event.E, len(events))
// Fill event channel
for _, ev := range events {
eventChan <- ev
}
close(eventChan)
// Start workers
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
ctx := context.Background()
for ev := range eventChan {
eventStart := time.Now()
_, err := ba.db.SaveEvent(ctx, ev)
latency := time.Since(eventStart)
mu.Lock()
if err != nil {
errors = append(errors, err)
} else {
totalEvents++
latencies = append(latencies, latency)
}
mu.Unlock()
}
}(i)
}
wg.Wait()
duration := time.Since(start)
// Calculate metrics
result := &BenchmarkResult{
TestName: "Peak Throughput",
Duration: duration,
TotalEvents: int(totalEvents),
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
ConcurrentWorkers: ba.config.ConcurrentWorkers,
MemoryUsed: getMemUsage(),
}
if len(latencies) > 0 {
sort.Slice(latencies, func(i, j int) bool {
return latencies[i] < latencies[j]
})
result.AvgLatency = calculateAverage(latencies)
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
result.Bottom10Avg = calculateAverage(bottom10)
}
result.SuccessRate = float64(totalEvents) / float64(ba.config.NumEvents) * 100
if len(errors) > 0 {
result.Errors = make([]string, 0, len(errors))
for _, err := range errors {
result.Errors = append(result.Errors, err.Error())
}
}
ba.mu.Lock()
ba.results = append(ba.results, result)
ba.mu.Unlock()
ba.printResult(result)
}
// RunBurstPatternTest runs burst pattern test
func (ba *BenchmarkAdapter) RunBurstPatternTest() {
fmt.Println("\n=== Burst Pattern Test ===")
start := time.Now()
var totalEvents int64
var latencies []time.Duration
var mu sync.Mutex
ctx := context.Background()
burstSize := 100
bursts := ba.config.NumEvents / burstSize
for i := 0; i < bursts; i++ {
// Generate a burst of events
events := ba.generateEvents(burstSize)
var wg sync.WaitGroup
for _, ev := range events {
wg.Add(1)
go func(e *event.E) {
defer wg.Done()
eventStart := time.Now()
_, err := ba.db.SaveEvent(ctx, e)
latency := time.Since(eventStart)
mu.Lock()
if err == nil {
totalEvents++
latencies = append(latencies, latency)
}
mu.Unlock()
}(ev)
}
wg.Wait()
// Short pause between bursts
time.Sleep(10 * time.Millisecond)
}
duration := time.Since(start)
result := &BenchmarkResult{
TestName: "Burst Pattern",
Duration: duration,
TotalEvents: int(totalEvents),
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
ConcurrentWorkers: burstSize,
MemoryUsed: getMemUsage(),
SuccessRate: float64(totalEvents) / float64(ba.config.NumEvents) * 100,
}
if len(latencies) > 0 {
sort.Slice(latencies, func(i, j int) bool {
return latencies[i] < latencies[j]
})
result.AvgLatency = calculateAverage(latencies)
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
result.Bottom10Avg = calculateAverage(bottom10)
}
ba.mu.Lock()
ba.results = append(ba.results, result)
ba.mu.Unlock()
ba.printResult(result)
}
// RunMixedReadWriteTest runs mixed read/write test
func (ba *BenchmarkAdapter) RunMixedReadWriteTest() {
fmt.Println("\n=== Mixed Read/Write Test ===")
// First, populate some events
fmt.Println("Populating database with initial events...")
populateEvents := ba.generateEvents(1000)
ctx := context.Background()
for _, ev := range populateEvents {
ba.db.SaveEvent(ctx, ev)
}
start := time.Now()
var writeCount, readCount int64
var latencies []time.Duration
var mu sync.Mutex
var wg sync.WaitGroup
// Start workers doing mixed read/write
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
events := ba.generateEvents(ba.config.NumEvents / ba.config.ConcurrentWorkers)
for idx, ev := range events {
eventStart := time.Now()
if idx%3 == 0 {
// Read operation
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
limit := uint(10)
f.Limit = &limit
_, _ = ba.db.QueryEvents(ctx, f)
mu.Lock()
readCount++
mu.Unlock()
} else {
// Write operation
_, _ = ba.db.SaveEvent(ctx, ev)
mu.Lock()
writeCount++
mu.Unlock()
}
latency := time.Since(eventStart)
mu.Lock()
latencies = append(latencies, latency)
mu.Unlock()
}
}(i)
}
wg.Wait()
duration := time.Since(start)
result := &BenchmarkResult{
TestName: fmt.Sprintf("Mixed R/W (R:%d W:%d)", readCount, writeCount),
Duration: duration,
TotalEvents: int(writeCount + readCount),
EventsPerSecond: float64(writeCount+readCount) / duration.Seconds(),
ConcurrentWorkers: ba.config.ConcurrentWorkers,
MemoryUsed: getMemUsage(),
SuccessRate: 100.0,
}
if len(latencies) > 0 {
sort.Slice(latencies, func(i, j int) bool {
return latencies[i] < latencies[j]
})
result.AvgLatency = calculateAverage(latencies)
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
result.Bottom10Avg = calculateAverage(bottom10)
}
ba.mu.Lock()
ba.results = append(ba.results, result)
ba.mu.Unlock()
ba.printResult(result)
}
// RunQueryTest runs query performance test
func (ba *BenchmarkAdapter) RunQueryTest() {
fmt.Println("\n=== Query Performance Test ===")
// Populate with test data
fmt.Println("Populating database for query tests...")
events := ba.generateEvents(5000)
ctx := context.Background()
for _, ev := range events {
ba.db.SaveEvent(ctx, ev)
}
start := time.Now()
var queryCount int64
var latencies []time.Duration
var mu sync.Mutex
var wg sync.WaitGroup
queryTypes := []func() *filter.F{
func() *filter.F {
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
limit := uint(100)
f.Limit = &limit
return f
},
func() *filter.F {
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote, kind.Repost)
limit := uint(50)
f.Limit = &limit
return f
},
func() *filter.F {
f := filter.New()
limit := uint(10)
f.Limit = &limit
since := time.Now().Add(-1 * time.Hour).Unix()
f.Since = timestamp.FromUnix(since)
return f
},
}
// Run concurrent queries
iterations := 1000
for i := 0; i < ba.config.ConcurrentWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < iterations/ba.config.ConcurrentWorkers; j++ {
f := queryTypes[j%len(queryTypes)]()
queryStart := time.Now()
_, _ = ba.db.QueryEvents(ctx, f)
latency := time.Since(queryStart)
mu.Lock()
queryCount++
latencies = append(latencies, latency)
mu.Unlock()
}
}()
}
wg.Wait()
duration := time.Since(start)
result := &BenchmarkResult{
TestName: fmt.Sprintf("Query Performance (%d queries)", queryCount),
Duration: duration,
TotalEvents: int(queryCount),
EventsPerSecond: float64(queryCount) / duration.Seconds(),
ConcurrentWorkers: ba.config.ConcurrentWorkers,
MemoryUsed: getMemUsage(),
SuccessRate: 100.0,
}
if len(latencies) > 0 {
sort.Slice(latencies, func(i, j int) bool {
return latencies[i] < latencies[j]
})
result.AvgLatency = calculateAverage(latencies)
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
result.Bottom10Avg = calculateAverage(bottom10)
}
ba.mu.Lock()
ba.results = append(ba.results, result)
ba.mu.Unlock()
ba.printResult(result)
}
// RunConcurrentQueryStoreTest runs concurrent query and store test
func (ba *BenchmarkAdapter) RunConcurrentQueryStoreTest() {
fmt.Println("\n=== Concurrent Query+Store Test ===")
start := time.Now()
var storeCount, queryCount int64
var latencies []time.Duration
var mu sync.Mutex
var wg sync.WaitGroup
ctx := context.Background()
// Half workers write, half query
halfWorkers := ba.config.ConcurrentWorkers / 2
if halfWorkers < 1 {
halfWorkers = 1
}
// Writers
for i := 0; i < halfWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
events := ba.generateEvents(ba.config.NumEvents / halfWorkers)
for _, ev := range events {
eventStart := time.Now()
ba.db.SaveEvent(ctx, ev)
latency := time.Since(eventStart)
mu.Lock()
storeCount++
latencies = append(latencies, latency)
mu.Unlock()
}
}()
}
// Readers
for i := 0; i < halfWorkers; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < ba.config.NumEvents/halfWorkers; j++ {
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
limit := uint(10)
f.Limit = &limit
queryStart := time.Now()
ba.db.QueryEvents(ctx, f)
latency := time.Since(queryStart)
mu.Lock()
queryCount++
latencies = append(latencies, latency)
mu.Unlock()
time.Sleep(1 * time.Millisecond)
}
}()
}
wg.Wait()
duration := time.Since(start)
result := &BenchmarkResult{
TestName: fmt.Sprintf("Concurrent Q+S (Q:%d S:%d)", queryCount, storeCount),
Duration: duration,
TotalEvents: int(storeCount + queryCount),
EventsPerSecond: float64(storeCount+queryCount) / duration.Seconds(),
ConcurrentWorkers: ba.config.ConcurrentWorkers,
MemoryUsed: getMemUsage(),
SuccessRate: 100.0,
}
if len(latencies) > 0 {
sort.Slice(latencies, func(i, j int) bool {
return latencies[i] < latencies[j]
})
result.AvgLatency = calculateAverage(latencies)
result.P90Latency = latencies[int(float64(len(latencies))*0.90)]
result.P95Latency = latencies[int(float64(len(latencies))*0.95)]
result.P99Latency = latencies[int(float64(len(latencies))*0.99)]
bottom10 := latencies[:int(float64(len(latencies))*0.10)]
result.Bottom10Avg = calculateAverage(bottom10)
}
ba.mu.Lock()
ba.results = append(ba.results, result)
ba.mu.Unlock()
ba.printResult(result)
}
// generateEvents generates test events with proper signatures
func (ba *BenchmarkAdapter) generateEvents(count int) []*event.E {
events := make([]*event.E, count)
// Create a test signer
signer := p8k.MustNew()
if err := signer.Generate(); err != nil {
panic(fmt.Sprintf("failed to generate test key: %v", err))
}
for i := 0; i < count; i++ {
ev := event.New()
ev.Kind = kind.TextNote.ToU16()
ev.CreatedAt = time.Now().Unix()
ev.Content = []byte(fmt.Sprintf("Benchmark event #%d - Testing Nostr relay performance with automated load generation", i))
ev.Tags = tag.NewS()
// Add some tags for variety
if i%10 == 0 {
benchmarkTag := tag.NewFromBytesSlice([]byte("t"), []byte("benchmark"))
ev.Tags.Append(benchmarkTag)
}
// Sign the event (sets Pubkey, ID, and Sig)
if err := ev.Sign(signer); err != nil {
panic(fmt.Sprintf("failed to sign event: %v", err))
}
events[i] = ev
}
return events
}
func (ba *BenchmarkAdapter) printResult(r *BenchmarkResult) {
fmt.Printf("\nResults for %s:\n", r.TestName)
fmt.Printf(" Duration: %v\n", r.Duration)
fmt.Printf(" Total Events: %d\n", r.TotalEvents)
fmt.Printf(" Events/sec: %.2f\n", r.EventsPerSecond)
fmt.Printf(" Success Rate: %.2f%%\n", r.SuccessRate)
fmt.Printf(" Workers: %d\n", r.ConcurrentWorkers)
fmt.Printf(" Memory Used: %.2f MB\n", float64(r.MemoryUsed)/1024/1024)
if r.AvgLatency > 0 {
fmt.Printf(" Avg Latency: %v\n", r.AvgLatency)
fmt.Printf(" P90 Latency: %v\n", r.P90Latency)
fmt.Printf(" P95 Latency: %v\n", r.P95Latency)
fmt.Printf(" P99 Latency: %v\n", r.P99Latency)
fmt.Printf(" Bottom 10%% Avg: %v\n", r.Bottom10Avg)
}
if len(r.Errors) > 0 {
fmt.Printf(" Errors: %d\n", len(r.Errors))
// Print first few errors as samples
sampleCount := 3
if len(r.Errors) < sampleCount {
sampleCount = len(r.Errors)
}
for i := 0; i < sampleCount; i++ {
fmt.Printf(" Sample %d: %s\n", i+1, r.Errors[i])
}
}
}
func (ba *BenchmarkAdapter) GenerateReport() {
// Delegate to main benchmark report generator
// We'll add the results to a file
fmt.Println("\n=== Benchmark Results Summary ===")
ba.mu.RLock()
defer ba.mu.RUnlock()
for _, result := range ba.results {
ba.printResult(result)
}
}
func (ba *BenchmarkAdapter) GenerateAsciidocReport() {
// TODO: Implement asciidoc report generation
fmt.Println("Asciidoc report generation not yet implemented for adapter")
}
func calculateAverage(durations []time.Duration) time.Duration {
if len(durations) == 0 {
return 0
}
var total time.Duration
for _, d := range durations {
total += d
}
return total / time.Duration(len(durations))
}

View File

@@ -3,7 +3,7 @@
##
# Directory that contains the strfry LMDB database (restart required)
db = "/data/strfry.lmdb"
db = "/data/strfry-db"
dbParams {
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)

View File

@@ -0,0 +1,122 @@
package main
import (
"context"
"fmt"
"log"
"os"
"time"
"next.orly.dev/pkg/database"
_ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory
)
// DgraphBenchmark wraps a Benchmark with dgraph-specific setup
type DgraphBenchmark struct {
config *BenchmarkConfig
docker *DgraphDocker
database database.Database
bench *BenchmarkAdapter
}
// NewDgraphBenchmark creates a new dgraph benchmark instance
func NewDgraphBenchmark(config *BenchmarkConfig) (*DgraphBenchmark, error) {
// Create Docker manager
docker := NewDgraphDocker()
// Start dgraph containers
ctx := context.Background()
if err := docker.Start(ctx); err != nil {
return nil, fmt.Errorf("failed to start dgraph: %w", err)
}
// Set environment variable for dgraph connection
os.Setenv("ORLY_DGRAPH_URL", docker.GetGRPCEndpoint())
// Create database instance using dgraph backend
cancel := func() {}
db, err := database.NewDatabase(ctx, cancel, "dgraph", config.DataDir, "warn")
if err != nil {
docker.Stop()
return nil, fmt.Errorf("failed to create dgraph database: %w", err)
}
// Wait for database to be ready
fmt.Println("Waiting for dgraph database to be ready...")
select {
case <-db.Ready():
fmt.Println("Dgraph database is ready")
case <-time.After(30 * time.Second):
db.Close()
docker.Stop()
return nil, fmt.Errorf("dgraph database failed to become ready")
}
// Create adapter to use Database interface with Benchmark
adapter := NewBenchmarkAdapter(config, db)
dgraphBench := &DgraphBenchmark{
config: config,
docker: docker,
database: db,
bench: adapter,
}
return dgraphBench, nil
}
// Close closes the dgraph benchmark and stops Docker containers
func (dgb *DgraphBenchmark) Close() {
fmt.Println("Closing dgraph benchmark...")
if dgb.database != nil {
dgb.database.Close()
}
if dgb.docker != nil {
if err := dgb.docker.Stop(); err != nil {
log.Printf("Error stopping dgraph Docker: %v", err)
}
}
}
// RunSuite runs the benchmark suite on dgraph
func (dgb *DgraphBenchmark) RunSuite() {
fmt.Println("\n╔════════════════════════════════════════════════════════╗")
fmt.Println("║ DGRAPH BACKEND BENCHMARK SUITE ║")
fmt.Println("╚════════════════════════════════════════════════════════╝")
// Run only one round for dgraph to keep benchmark time reasonable
fmt.Printf("\n=== Starting dgraph benchmark ===\n")
fmt.Printf("RunPeakThroughputTest (dgraph)..\n")
dgb.bench.RunPeakThroughputTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunBurstPatternTest (dgraph)..\n")
dgb.bench.RunBurstPatternTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunMixedReadWriteTest (dgraph)..\n")
dgb.bench.RunMixedReadWriteTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunQueryTest (dgraph)..\n")
dgb.bench.RunQueryTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunConcurrentQueryStoreTest (dgraph)..\n")
dgb.bench.RunConcurrentQueryStoreTest()
fmt.Printf("\n=== Dgraph benchmark completed ===\n\n")
}
// GenerateReport generates the benchmark report
func (dgb *DgraphBenchmark) GenerateReport() {
dgb.bench.GenerateReport()
}
// GenerateAsciidocReport generates asciidoc format report
func (dgb *DgraphBenchmark) GenerateAsciidocReport() {
dgb.bench.GenerateAsciidocReport()
}

View File

@@ -0,0 +1,160 @@
package main
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"time"
)
// DgraphDocker manages a dgraph instance via Docker Compose
type DgraphDocker struct {
composeFile string
projectName string
running bool
}
// NewDgraphDocker creates a new dgraph Docker manager
func NewDgraphDocker() *DgraphDocker {
// Try to find the docker-compose file in the current directory first
composeFile := "docker-compose-dgraph.yml"
// If not found, try the cmd/benchmark directory (for running from project root)
if _, err := os.Stat(composeFile); os.IsNotExist(err) {
composeFile = filepath.Join("cmd", "benchmark", "docker-compose-dgraph.yml")
}
return &DgraphDocker{
composeFile: composeFile,
projectName: "orly-benchmark-dgraph",
running: false,
}
}
// Start starts the dgraph Docker containers
func (d *DgraphDocker) Start(ctx context.Context) error {
fmt.Println("Starting dgraph Docker containers...")
// Stop any existing containers first
d.Stop()
// Start containers
cmd := exec.CommandContext(
ctx,
"docker-compose",
"-f", d.composeFile,
"-p", d.projectName,
"up", "-d",
)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to start dgraph containers: %w", err)
}
fmt.Println("Waiting for dgraph to be healthy...")
// Wait for health checks to pass
if err := d.waitForHealthy(ctx, 60*time.Second); err != nil {
d.Stop() // Clean up on failure
return err
}
d.running = true
fmt.Println("Dgraph is ready!")
return nil
}
// waitForHealthy waits for dgraph to become healthy
func (d *DgraphDocker) waitForHealthy(ctx context.Context, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
// Check if alpha is healthy by checking docker health status
cmd := exec.CommandContext(
ctx,
"docker",
"inspect",
"--format={{.State.Health.Status}}",
"orly-benchmark-dgraph-alpha",
)
output, err := cmd.Output()
if err == nil && string(output) == "healthy\n" {
// Additional short wait to ensure full readiness
time.Sleep(2 * time.Second)
return nil
}
select {
case <-ctx.Done():
return ctx.Err()
case <-time.After(2 * time.Second):
// Continue waiting
}
}
return fmt.Errorf("dgraph failed to become healthy within %v", timeout)
}
// Stop stops and removes the dgraph Docker containers
func (d *DgraphDocker) Stop() error {
if !d.running {
// Try to stop anyway in case of untracked state
cmd := exec.Command(
"docker-compose",
"-f", d.composeFile,
"-p", d.projectName,
"down", "-v",
)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
_ = cmd.Run() // Ignore errors
return nil
}
fmt.Println("Stopping dgraph Docker containers...")
cmd := exec.Command(
"docker-compose",
"-f", d.composeFile,
"-p", d.projectName,
"down", "-v",
)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to stop dgraph containers: %w", err)
}
d.running = false
fmt.Println("Dgraph containers stopped")
return nil
}
// GetGRPCEndpoint returns the dgraph gRPC endpoint
func (d *DgraphDocker) GetGRPCEndpoint() string {
return "localhost:9080"
}
// IsRunning returns whether dgraph is running
func (d *DgraphDocker) IsRunning() bool {
return d.running
}
// Logs returns the logs from dgraph containers
func (d *DgraphDocker) Logs() error {
cmd := exec.Command(
"docker-compose",
"-f", d.composeFile,
"-p", d.projectName,
"logs",
)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}

View File

@@ -0,0 +1,44 @@
version: "3.9"
services:
dgraph-zero:
image: dgraph/dgraph:v23.1.0
container_name: orly-benchmark-dgraph-zero
working_dir: /data/zero
ports:
- "5080:5080"
- "6080:6080"
command: dgraph zero --my=dgraph-zero:5080
networks:
- orly-benchmark
healthcheck:
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
interval: 5s
timeout: 3s
retries: 3
start_period: 5s
dgraph-alpha:
image: dgraph/dgraph:v23.1.0
container_name: orly-benchmark-dgraph-alpha
working_dir: /data/alpha
ports:
- "8080:8080"
- "9080:9080"
command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0
networks:
- orly-benchmark
depends_on:
dgraph-zero:
condition: service_healthy
healthcheck:
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
interval: 5s
timeout: 3s
retries: 6
start_period: 10s
networks:
orly-benchmark:
name: orly-benchmark-network
driver: bridge

View File

@@ -0,0 +1,65 @@
version: "3.8"
services:
# Next.orly.dev relay with profiling enabled
next-orly:
build:
context: ../..
dockerfile: cmd/benchmark/Dockerfile.next-orly
container_name: benchmark-next-orly-profile
environment:
- ORLY_DATA_DIR=/data
- ORLY_LISTEN=0.0.0.0
- ORLY_PORT=8080
- ORLY_LOG_LEVEL=info
- ORLY_PPROF=cpu
- ORLY_PPROF_HTTP=true
- ORLY_PPROF_PATH=/profiles
- ORLY_DB_BLOCK_CACHE_MB=512
- ORLY_DB_INDEX_CACHE_MB=256
volumes:
- ./data/next-orly:/data
- ./profiles:/profiles
ports:
- "8001:8080"
- "6060:6060" # pprof HTTP endpoint
networks:
- benchmark-net
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
interval: 10s
timeout: 5s
retries: 5
start_period: 60s # Longer startup period
# Benchmark runner - only test next-orly
benchmark-runner:
build:
context: ../..
dockerfile: cmd/benchmark/Dockerfile.benchmark
container_name: benchmark-runner-profile
depends_on:
next-orly:
condition: service_healthy
environment:
- BENCHMARK_TARGETS=next-orly:8080
- BENCHMARK_EVENTS=50000
- BENCHMARK_WORKERS=24
- BENCHMARK_DURATION=60s
volumes:
- ./reports:/reports
networks:
- benchmark-net
command: >
sh -c "
echo 'Waiting for ORLY to be ready (healthcheck)...' &&
sleep 5 &&
echo 'Starting benchmark tests...' &&
/app/benchmark-runner --output-dir=/reports &&
echo 'Benchmark complete - triggering shutdown...' &&
exit 0
"
networks:
benchmark-net:
driver: bridge

View File

@@ -1,34 +1,103 @@
version: "3.8"
services:
# Next.orly.dev relay (this repository)
next-orly:
# Next.orly.dev relay with Badger (this repository)
next-orly-badger:
build:
context: ../..
dockerfile: cmd/benchmark/Dockerfile.next-orly
container_name: benchmark-next-orly
container_name: benchmark-next-orly-badger
environment:
- ORLY_DATA_DIR=/data
- ORLY_LISTEN=0.0.0.0
- ORLY_PORT=8080
- ORLY_LOG_LEVEL=off
- ORLY_DB_TYPE=badger
volumes:
- ./data/next-orly:/data
- ./data/next-orly-badger:/data
ports:
- "8001:8080"
networks:
- benchmark-net
healthcheck:
test:
[
"CMD-SHELL",
"code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null",
]
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Next.orly.dev relay with DGraph (this repository)
next-orly-dgraph:
build:
context: ../..
dockerfile: cmd/benchmark/Dockerfile.next-orly
container_name: benchmark-next-orly-dgraph
environment:
- ORLY_DATA_DIR=/data
- ORLY_LISTEN=0.0.0.0
- ORLY_PORT=8080
- ORLY_LOG_LEVEL=off
- ORLY_DB_TYPE=dgraph
- ORLY_DGRAPH_URL=dgraph-alpha:9080
volumes:
- ./data/next-orly-dgraph:/data
ports:
- "8007:8080"
networks:
- benchmark-net
depends_on:
dgraph-alpha:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
# DGraph Zero - cluster coordinator
dgraph-zero:
image: dgraph/dgraph:v23.1.0
container_name: benchmark-dgraph-zero
working_dir: /data/zero
ports:
- "5080:5080"
- "6080:6080"
volumes:
- ./data/dgraph-zero:/data
command: dgraph zero --my=dgraph-zero:5080
networks:
- benchmark-net
healthcheck:
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
interval: 5s
timeout: 3s
retries: 3
start_period: 5s
# DGraph Alpha - data node
dgraph-alpha:
image: dgraph/dgraph:v23.1.0
container_name: benchmark-dgraph-alpha
working_dir: /data/alpha
ports:
- "8088:8080"
- "9080:9080"
volumes:
- ./data/dgraph-alpha:/data
command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0
networks:
- benchmark-net
depends_on:
dgraph-zero:
condition: service_healthy
healthcheck:
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
interval: 5s
timeout: 3s
retries: 6
start_period: 10s
# Khatru with SQLite
khatru-sqlite:
build:
@@ -45,11 +114,7 @@ services:
networks:
- benchmark-net
healthcheck:
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
test: ["CMD-SHELL", "wget -q -O- http://localhost:3334 || exit 0"]
interval: 30s
timeout: 10s
retries: 3
@@ -71,11 +136,7 @@ services:
networks:
- benchmark-net
healthcheck:
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
test: ["CMD-SHELL", "wget -q -O- http://localhost:3334 || exit 0"]
interval: 30s
timeout: 10s
retries: 3
@@ -99,11 +160,7 @@ services:
postgres:
condition: service_healthy
healthcheck:
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
test: ["CMD-SHELL", "wget -q -O- http://localhost:7447 || exit 0"]
interval: 30s
timeout: 10s
retries: 3
@@ -114,7 +171,7 @@ services:
image: ghcr.io/hoytech/strfry:latest
container_name: benchmark-strfry
environment:
- STRFRY_DB_PATH=/data/strfry.lmdb
- STRFRY_DB_PATH=/data/strfry-db
- STRFRY_RELAY_PORT=8080
volumes:
- ./data/strfry:/data
@@ -123,12 +180,10 @@ services:
- "8005:8080"
networks:
- benchmark-net
entrypoint: /bin/sh
command: -c "mkdir -p /data/strfry-db && exec /app/strfry relay"
healthcheck:
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null",
]
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://127.0.0.1:8080"]
interval: 30s
timeout: 10s
retries: 3
@@ -150,15 +205,7 @@ services:
networks:
- benchmark-net
healthcheck:
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:8080",
]
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
interval: 30s
timeout: 10s
retries: 3
@@ -171,7 +218,9 @@ services:
dockerfile: cmd/benchmark/Dockerfile.benchmark
container_name: benchmark-runner
depends_on:
next-orly:
next-orly-badger:
condition: service_healthy
next-orly-dgraph:
condition: service_healthy
khatru-sqlite:
condition: service_healthy
@@ -184,9 +233,9 @@ services:
nostr-rs-relay:
condition: service_healthy
environment:
- BENCHMARK_TARGETS=next-orly:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080
- BENCHMARK_EVENTS=10000
- BENCHMARK_WORKERS=8
- BENCHMARK_TARGETS=next-orly-badger:8080,next-orly-dgraph:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080
- BENCHMARK_EVENTS=50000
- BENCHMARK_WORKERS=24
- BENCHMARK_DURATION=60s
volumes:
- ./reports:/reports
@@ -197,7 +246,9 @@ services:
echo 'Waiting for all relays to be ready...' &&
sleep 30 &&
echo 'Starting benchmark tests...' &&
/app/benchmark-runner --output-dir=/reports
/app/benchmark-runner --output-dir=/reports &&
echo 'Benchmark complete - triggering shutdown...' &&
exit 0
"
# PostgreSQL for relayer-basic

View File

@@ -0,0 +1,257 @@
package main
import (
"bufio"
"encoding/json"
"fmt"
"math"
"math/rand"
"os"
"path/filepath"
"time"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/encoders/timestamp"
"next.orly.dev/pkg/interfaces/signer/p8k"
)
// EventStream manages disk-based event generation to avoid memory bloat
type EventStream struct {
baseDir string
count int
chunkSize int
rng *rand.Rand
}
// NewEventStream creates a new event stream that stores events on disk
func NewEventStream(baseDir string, count int) (*EventStream, error) {
// Create events directory
eventsDir := filepath.Join(baseDir, "events")
if err := os.MkdirAll(eventsDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create events directory: %w", err)
}
return &EventStream{
baseDir: eventsDir,
count: count,
chunkSize: 1000, // Store 1000 events per file to balance I/O
rng: rand.New(rand.NewSource(time.Now().UnixNano())),
}, nil
}
// Generate creates all events and stores them in chunk files
func (es *EventStream) Generate() error {
numChunks := (es.count + es.chunkSize - 1) / es.chunkSize
for chunk := 0; chunk < numChunks; chunk++ {
chunkFile := filepath.Join(es.baseDir, fmt.Sprintf("chunk_%04d.jsonl", chunk))
f, err := os.Create(chunkFile)
if err != nil {
return fmt.Errorf("failed to create chunk file %s: %w", chunkFile, err)
}
writer := bufio.NewWriter(f)
startIdx := chunk * es.chunkSize
endIdx := min(startIdx+es.chunkSize, es.count)
for i := startIdx; i < endIdx; i++ {
ev, err := es.generateEvent(i)
if err != nil {
f.Close()
return fmt.Errorf("failed to generate event %d: %w", i, err)
}
// Marshal event to JSON
eventJSON, err := json.Marshal(ev)
if err != nil {
f.Close()
return fmt.Errorf("failed to marshal event %d: %w", i, err)
}
// Write JSON line
if _, err := writer.Write(eventJSON); err != nil {
f.Close()
return fmt.Errorf("failed to write event %d: %w", i, err)
}
if _, err := writer.WriteString("\n"); err != nil {
f.Close()
return fmt.Errorf("failed to write newline after event %d: %w", i, err)
}
}
if err := writer.Flush(); err != nil {
f.Close()
return fmt.Errorf("failed to flush chunk file %s: %w", chunkFile, err)
}
if err := f.Close(); err != nil {
return fmt.Errorf("failed to close chunk file %s: %w", chunkFile, err)
}
if (chunk+1)%10 == 0 || chunk == numChunks-1 {
fmt.Printf(" Generated %d/%d events (%.1f%%)\n",
endIdx, es.count, float64(endIdx)/float64(es.count)*100)
}
}
return nil
}
// generateEvent creates a single event with realistic size distribution
func (es *EventStream) generateEvent(index int) (*event.E, error) {
// Create signer for this event
keys, err := p8k.New()
if err != nil {
return nil, fmt.Errorf("failed to create signer: %w", err)
}
if err := keys.Generate(); err != nil {
return nil, fmt.Errorf("failed to generate keys: %w", err)
}
ev := event.New()
ev.Kind = 1 // Text note
ev.CreatedAt = timestamp.Now().I64()
// Add some tags for realism
numTags := es.rng.Intn(5)
tags := make([]*tag.T, 0, numTags)
for i := 0; i < numTags; i++ {
tags = append(tags, tag.NewFromBytesSlice(
[]byte("t"),
[]byte(fmt.Sprintf("tag%d", es.rng.Intn(100))),
))
}
ev.Tags = tag.NewS(tags...)
// Generate content with log-distributed size
contentSize := es.generateLogDistributedSize()
ev.Content = []byte(es.generateRandomContent(contentSize))
// Sign the event
if err := ev.Sign(keys); err != nil {
return nil, fmt.Errorf("failed to sign event: %w", err)
}
return ev, nil
}
// generateLogDistributedSize generates sizes following a power law distribution
// This creates realistic size distribution:
// - Most events are small (< 1KB)
// - Some events are medium (1-10KB)
// - Few events are large (10-100KB)
func (es *EventStream) generateLogDistributedSize() int {
// Use power law with exponent 4.0 for strong skew toward small sizes
const powerExponent = 4.0
uniform := es.rng.Float64()
skewed := math.Pow(uniform, powerExponent)
// Scale to max size of 100KB
const maxSize = 100 * 1024
size := int(skewed * maxSize)
// Ensure minimum size of 10 bytes
if size < 10 {
size = 10
}
return size
}
// generateRandomContent creates random text content of specified size
func (es *EventStream) generateRandomContent(size int) string {
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 \n"
content := make([]byte, size)
for i := range content {
content[i] = charset[es.rng.Intn(len(charset))]
}
return string(content)
}
// GetEventChannel returns a channel that streams events from disk
// bufferSize controls memory usage - larger buffers improve throughput but use more memory
func (es *EventStream) GetEventChannel(bufferSize int) (<-chan *event.E, <-chan error) {
eventChan := make(chan *event.E, bufferSize)
errChan := make(chan error, 1)
go func() {
defer close(eventChan)
defer close(errChan)
numChunks := (es.count + es.chunkSize - 1) / es.chunkSize
for chunk := 0; chunk < numChunks; chunk++ {
chunkFile := filepath.Join(es.baseDir, fmt.Sprintf("chunk_%04d.jsonl", chunk))
f, err := os.Open(chunkFile)
if err != nil {
errChan <- fmt.Errorf("failed to open chunk file %s: %w", chunkFile, err)
return
}
scanner := bufio.NewScanner(f)
// Increase buffer size for large events
buf := make([]byte, 0, 64*1024)
scanner.Buffer(buf, 1024*1024) // Max 1MB per line
for scanner.Scan() {
var ev event.E
if err := json.Unmarshal(scanner.Bytes(), &ev); err != nil {
f.Close()
errChan <- fmt.Errorf("failed to unmarshal event: %w", err)
return
}
eventChan <- &ev
}
if err := scanner.Err(); err != nil {
f.Close()
errChan <- fmt.Errorf("error reading chunk file %s: %w", chunkFile, err)
return
}
f.Close()
}
}()
return eventChan, errChan
}
// ForEach iterates over all events without loading them all into memory
func (es *EventStream) ForEach(fn func(*event.E) error) error {
numChunks := (es.count + es.chunkSize - 1) / es.chunkSize
for chunk := 0; chunk < numChunks; chunk++ {
chunkFile := filepath.Join(es.baseDir, fmt.Sprintf("chunk_%04d.jsonl", chunk))
f, err := os.Open(chunkFile)
if err != nil {
return fmt.Errorf("failed to open chunk file %s: %w", chunkFile, err)
}
scanner := bufio.NewScanner(f)
buf := make([]byte, 0, 64*1024)
scanner.Buffer(buf, 1024*1024)
for scanner.Scan() {
var ev event.E
if err := json.Unmarshal(scanner.Bytes(), &ev); err != nil {
f.Close()
return fmt.Errorf("failed to unmarshal event: %w", err)
}
if err := fn(&ev); err != nil {
f.Close()
return err
}
}
if err := scanner.Err(); err != nil {
f.Close()
return fmt.Errorf("error reading chunk file %s: %w", chunkFile, err)
}
f.Close()
}
return nil
}

View File

@@ -0,0 +1,173 @@
package main
import (
"bufio"
"encoding/binary"
"fmt"
"os"
"path/filepath"
"sort"
"sync"
"time"
)
// LatencyRecorder writes latency measurements to disk to avoid memory bloat
type LatencyRecorder struct {
file *os.File
writer *bufio.Writer
mu sync.Mutex
count int64
}
// LatencyStats contains calculated latency statistics
type LatencyStats struct {
Avg time.Duration
P90 time.Duration
P95 time.Duration
P99 time.Duration
Bottom10 time.Duration
Count int64
}
// NewLatencyRecorder creates a new latency recorder that writes to disk
func NewLatencyRecorder(baseDir string, testName string) (*LatencyRecorder, error) {
latencyFile := filepath.Join(baseDir, fmt.Sprintf("latency_%s.bin", testName))
f, err := os.Create(latencyFile)
if err != nil {
return nil, fmt.Errorf("failed to create latency file: %w", err)
}
return &LatencyRecorder{
file: f,
writer: bufio.NewWriter(f),
count: 0,
}, nil
}
// Record writes a latency measurement to disk (8 bytes per measurement)
func (lr *LatencyRecorder) Record(latency time.Duration) error {
lr.mu.Lock()
defer lr.mu.Unlock()
// Write latency as 8-byte value (int64 nanoseconds)
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(latency.Nanoseconds()))
if _, err := lr.writer.Write(buf); err != nil {
return fmt.Errorf("failed to write latency: %w", err)
}
lr.count++
return nil
}
// Close flushes and closes the latency file
func (lr *LatencyRecorder) Close() error {
lr.mu.Lock()
defer lr.mu.Unlock()
if err := lr.writer.Flush(); err != nil {
return fmt.Errorf("failed to flush latency file: %w", err)
}
if err := lr.file.Close(); err != nil {
return fmt.Errorf("failed to close latency file: %w", err)
}
return nil
}
// CalculateStats reads all latencies from disk, sorts them, and calculates statistics
// This is done on-demand to avoid keeping all latencies in memory during the test
func (lr *LatencyRecorder) CalculateStats() (*LatencyStats, error) {
lr.mu.Lock()
filePath := lr.file.Name()
count := lr.count
lr.mu.Unlock()
// If no measurements, return zeros
if count == 0 {
return &LatencyStats{
Avg: 0,
P90: 0,
P95: 0,
P99: 0,
Bottom10: 0,
Count: 0,
}, nil
}
// Open file for reading
f, err := os.Open(filePath)
if err != nil {
return nil, fmt.Errorf("failed to open latency file for reading: %w", err)
}
defer f.Close()
// Read all latencies into memory temporarily for sorting
latencies := make([]time.Duration, 0, count)
buf := make([]byte, 8)
reader := bufio.NewReader(f)
for {
n, err := reader.Read(buf)
if err != nil {
if err.Error() == "EOF" {
break
}
return nil, fmt.Errorf("failed to read latency data: %w", err)
}
if n != 8 {
break
}
nanos := binary.LittleEndian.Uint64(buf)
latencies = append(latencies, time.Duration(nanos))
}
// Check if we actually got any latencies
if len(latencies) == 0 {
return &LatencyStats{
Avg: 0,
P90: 0,
P95: 0,
P99: 0,
Bottom10: 0,
Count: 0,
}, nil
}
// Sort for percentile calculation
sort.Slice(latencies, func(i, j int) bool {
return latencies[i] < latencies[j]
})
// Calculate statistics
stats := &LatencyStats{
Count: int64(len(latencies)),
}
// Average
var sum time.Duration
for _, lat := range latencies {
sum += lat
}
stats.Avg = sum / time.Duration(len(latencies))
// Percentiles
stats.P90 = latencies[int(float64(len(latencies))*0.90)]
stats.P95 = latencies[int(float64(len(latencies))*0.95)]
stats.P99 = latencies[int(float64(len(latencies))*0.99)]
// Bottom 10% average
bottom10Count := int(float64(len(latencies)) * 0.10)
if bottom10Count > 0 {
var bottom10Sum time.Duration
for i := 0; i < bottom10Count; i++ {
bottom10Sum += latencies[i]
}
stats.Bottom10 = bottom10Sum / time.Duration(bottom10Count)
}
return stats, nil
}

View File

@@ -36,6 +36,9 @@ type BenchmarkConfig struct {
RelayURL string
NetWorkers int
NetRate int // events/sec per worker
// Backend selection
UseDgraph bool
}
type BenchmarkResult struct {
@@ -55,10 +58,11 @@ type BenchmarkResult struct {
}
type Benchmark struct {
config *BenchmarkConfig
db *database.D
results []*BenchmarkResult
mu sync.RWMutex
config *BenchmarkConfig
db *database.D
eventStream *EventStream
results []*BenchmarkResult
mu sync.RWMutex
}
func main() {
@@ -71,7 +75,14 @@ func main() {
return
}
fmt.Printf("Starting Nostr Relay Benchmark\n")
if config.UseDgraph {
// Run dgraph benchmark
runDgraphBenchmark(config)
return
}
// Run standard Badger benchmark
fmt.Printf("Starting Nostr Relay Benchmark (Badger Backend)\n")
fmt.Printf("Data Directory: %s\n", config.DataDir)
fmt.Printf(
"Events: %d, Workers: %d, Duration: %v\n",
@@ -89,6 +100,28 @@ func main() {
benchmark.GenerateAsciidocReport()
}
func runDgraphBenchmark(config *BenchmarkConfig) {
fmt.Printf("Starting Nostr Relay Benchmark (Dgraph Backend)\n")
fmt.Printf("Data Directory: %s\n", config.DataDir)
fmt.Printf(
"Events: %d, Workers: %d\n",
config.NumEvents, config.ConcurrentWorkers,
)
dgraphBench, err := NewDgraphBenchmark(config)
if err != nil {
log.Fatalf("Failed to create dgraph benchmark: %v", err)
}
defer dgraphBench.Close()
// Run dgraph benchmark suite
dgraphBench.RunSuite()
// Generate reports
dgraphBench.GenerateReport()
dgraphBench.GenerateAsciidocReport()
}
func parseFlags() *BenchmarkConfig {
config := &BenchmarkConfig{}
@@ -124,6 +157,12 @@ func parseFlags() *BenchmarkConfig {
)
flag.IntVar(&config.NetRate, "net-rate", 20, "Events per second per worker")
// Backend selection
flag.BoolVar(
&config.UseDgraph, "dgraph", false,
"Use dgraph backend (requires Docker)",
)
flag.Parse()
return config
}
@@ -286,15 +325,28 @@ func NewBenchmark(config *BenchmarkConfig) *Benchmark {
ctx := context.Background()
cancel := func() {}
db, err := database.New(ctx, cancel, config.DataDir, "info")
db, err := database.New(ctx, cancel, config.DataDir, "warn")
if err != nil {
log.Fatalf("Failed to create database: %v", err)
}
// Create event stream (stores events on disk to avoid memory bloat)
eventStream, err := NewEventStream(config.DataDir, config.NumEvents)
if err != nil {
log.Fatalf("Failed to create event stream: %v", err)
}
// Pre-generate all events to disk
fmt.Printf("Pre-generating %d events to disk to avoid memory bloat...\n", config.NumEvents)
if err := eventStream.Generate(); err != nil {
log.Fatalf("Failed to generate events: %v", err)
}
b := &Benchmark{
config: config,
db: db,
results: make([]*BenchmarkResult, 0),
config: config,
db: db,
eventStream: eventStream,
results: make([]*BenchmarkResult, 0),
}
// Trigger compaction/GC before starting tests
@@ -309,31 +361,49 @@ func (b *Benchmark) Close() {
}
}
// RunSuite runs the three tests with a 10s pause between them and repeats the
// set twice with a 10s pause between rounds.
// RunSuite runs the memory-optimized tests (Peak Throughput and Burst Pattern only)
func (b *Benchmark) RunSuite() {
for round := 1; round <= 2; round++ {
fmt.Printf("\n=== Starting test round %d/2 ===\n", round)
fmt.Printf("RunPeakThroughputTest..\n")
b.RunPeakThroughputTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunBurstPatternTest..\n")
b.RunBurstPatternTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunMixedReadWriteTest..\n")
b.RunMixedReadWriteTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunQueryTest..\n")
b.RunQueryTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunConcurrentQueryStoreTest..\n")
b.RunConcurrentQueryStoreTest()
if round < 2 {
fmt.Printf("\nPausing 10s before next round...\n")
time.Sleep(10 * time.Second)
}
fmt.Printf("\n=== Test round completed ===\n\n")
fmt.Printf("\n=== Running Memory-Optimized Tests ===\n")
fmt.Printf("RunPeakThroughputTest..\n")
b.RunPeakThroughputTest()
// Clear database between tests to avoid duplicate event issues
fmt.Printf("\nClearing database for next test...\n")
if err := b.db.Close(); err != nil {
log.Printf("Error closing database: %v", err)
}
time.Sleep(1 * time.Second)
// Remove database files (.sst, .vlog, MANIFEST, etc.)
// Badger stores files directly in the data directory
matches, err := filepath.Glob(filepath.Join(b.config.DataDir, "*.sst"))
if err == nil {
for _, f := range matches {
os.Remove(f)
}
}
matches, err = filepath.Glob(filepath.Join(b.config.DataDir, "*.vlog"))
if err == nil {
for _, f := range matches {
os.Remove(f)
}
}
os.Remove(filepath.Join(b.config.DataDir, "MANIFEST"))
os.Remove(filepath.Join(b.config.DataDir, "DISCARD"))
os.Remove(filepath.Join(b.config.DataDir, "KEYREGISTRY"))
// Create fresh database
ctx := context.Background()
cancel := func() {}
db, err := database.New(ctx, cancel, b.config.DataDir, "warn")
if err != nil {
log.Fatalf("Failed to create fresh database: %v", err)
}
b.db = db
fmt.Printf("RunBurstPatternTest..\n")
b.RunBurstPatternTest()
}
// compactDatabase triggers a Badger value log GC before starting tests.
@@ -348,50 +418,71 @@ func (b *Benchmark) compactDatabase() {
func (b *Benchmark) RunPeakThroughputTest() {
fmt.Println("\n=== Peak Throughput Test ===")
// Create latency recorder (writes to disk, not memory)
latencyRecorder, err := NewLatencyRecorder(b.config.DataDir, "peak_throughput")
if err != nil {
log.Fatalf("Failed to create latency recorder: %v", err)
}
start := time.Now()
var wg sync.WaitGroup
var totalEvents int64
var errors []error
var latencies []time.Duration
var errorCount int64
var mu sync.Mutex
events := b.generateEvents(b.config.NumEvents)
eventChan := make(chan *event.E, len(events))
// Fill event channel
for _, ev := range events {
eventChan <- ev
}
close(eventChan)
// Stream events from disk with reasonable buffer
eventChan, errChan := b.eventStream.GetEventChannel(1000)
// Start workers
ctx := context.Background()
for i := 0; i < b.config.ConcurrentWorkers; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
ctx := context.Background()
for ev := range eventChan {
eventStart := time.Now()
_, err := b.db.SaveEvent(ctx, ev)
latency := time.Since(eventStart)
mu.Lock()
if err != nil {
errors = append(errors, err)
errorCount++
} else {
totalEvents++
latencies = append(latencies, latency)
if err := latencyRecorder.Record(latency); err != nil {
log.Printf("Failed to record latency: %v", err)
}
}
mu.Unlock()
}
}(i)
}
// Check for streaming errors
go func() {
for err := range errChan {
if err != nil {
log.Printf("Event stream error: %v", err)
}
}
}()
wg.Wait()
duration := time.Since(start)
// Flush latency data to disk before calculating stats
if err := latencyRecorder.Close(); err != nil {
log.Printf("Failed to close latency recorder: %v", err)
}
// Calculate statistics from disk
latencyStats, err := latencyRecorder.CalculateStats()
if err != nil {
log.Printf("Failed to calculate latency stats: %v", err)
latencyStats = &LatencyStats{}
}
// Calculate metrics
result := &BenchmarkResult{
TestName: "Peak Throughput",
@@ -400,29 +491,22 @@ func (b *Benchmark) RunPeakThroughputTest() {
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
ConcurrentWorkers: b.config.ConcurrentWorkers,
MemoryUsed: getMemUsage(),
}
if len(latencies) > 0 {
result.AvgLatency = calculateAvgLatency(latencies)
result.P90Latency = calculatePercentileLatency(latencies, 0.90)
result.P95Latency = calculatePercentileLatency(latencies, 0.95)
result.P99Latency = calculatePercentileLatency(latencies, 0.99)
result.Bottom10Avg = calculateBottom10Avg(latencies)
AvgLatency: latencyStats.Avg,
P90Latency: latencyStats.P90,
P95Latency: latencyStats.P95,
P99Latency: latencyStats.P99,
Bottom10Avg: latencyStats.Bottom10,
}
result.SuccessRate = float64(totalEvents) / float64(b.config.NumEvents) * 100
for _, err := range errors {
result.Errors = append(result.Errors, err.Error())
}
b.mu.Lock()
b.results = append(b.results, result)
b.mu.Unlock()
fmt.Printf(
"Events saved: %d/%d (%.1f%%)\n", totalEvents, b.config.NumEvents,
result.SuccessRate,
"Events saved: %d/%d (%.1f%%), errors: %d\n",
totalEvents, b.config.NumEvents, result.SuccessRate, errorCount,
)
fmt.Printf("Duration: %v\n", duration)
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
@@ -436,14 +520,28 @@ func (b *Benchmark) RunPeakThroughputTest() {
func (b *Benchmark) RunBurstPatternTest() {
fmt.Println("\n=== Burst Pattern Test ===")
// Create latency recorder (writes to disk, not memory)
latencyRecorder, err := NewLatencyRecorder(b.config.DataDir, "burst_pattern")
if err != nil {
log.Fatalf("Failed to create latency recorder: %v", err)
}
start := time.Now()
var totalEvents int64
var errors []error
var latencies []time.Duration
var errorCount int64
var mu sync.Mutex
// Generate events for burst pattern
events := b.generateEvents(b.config.NumEvents)
// Stream events from disk
eventChan, errChan := b.eventStream.GetEventChannel(500)
// Check for streaming errors
go func() {
for err := range errChan {
if err != nil {
log.Printf("Event stream error: %v", err)
}
}
}()
// Simulate burst pattern: high activity periods followed by quiet periods
burstSize := b.config.NumEvents / 10 // 10% of events in each burst
@@ -451,37 +549,51 @@ func (b *Benchmark) RunBurstPatternTest() {
burstPeriod := 100 * time.Millisecond
ctx := context.Background()
eventIndex := 0
var eventIndex int64
for eventIndex < len(events) && time.Since(start) < b.config.TestDuration {
// Burst period - send events rapidly
burstStart := time.Now()
var wg sync.WaitGroup
for i := 0; i < burstSize && eventIndex < len(events); i++ {
wg.Add(1)
go func(ev *event.E) {
defer wg.Done()
// Start persistent worker pool (prevents goroutine explosion)
numWorkers := b.config.ConcurrentWorkers
eventQueue := make(chan *event.E, numWorkers*4)
var wg sync.WaitGroup
for w := 0; w < numWorkers; w++ {
wg.Add(1)
go func() {
defer wg.Done()
for ev := range eventQueue {
eventStart := time.Now()
_, err := b.db.SaveEvent(ctx, ev)
latency := time.Since(eventStart)
mu.Lock()
if err != nil {
errors = append(errors, err)
errorCount++
} else {
totalEvents++
latencies = append(latencies, latency)
// Record latency to disk instead of keeping in memory
if err := latencyRecorder.Record(latency); err != nil {
log.Printf("Failed to record latency: %v", err)
}
}
mu.Unlock()
}(events[eventIndex])
}
}()
}
for int(eventIndex) < b.config.NumEvents && time.Since(start) < b.config.TestDuration {
// Burst period - send events rapidly
burstStart := time.Now()
for i := 0; i < burstSize && int(eventIndex) < b.config.NumEvents; i++ {
ev, ok := <-eventChan
if !ok {
break
}
eventQueue <- ev
eventIndex++
time.Sleep(burstPeriod / time.Duration(burstSize))
}
wg.Wait()
fmt.Printf(
"Burst completed: %d events in %v\n", burstSize,
time.Since(burstStart),
@@ -491,8 +603,23 @@ func (b *Benchmark) RunBurstPatternTest() {
time.Sleep(quietPeriod)
}
close(eventQueue)
wg.Wait()
duration := time.Since(start)
// Flush latency data to disk before calculating stats
if err := latencyRecorder.Close(); err != nil {
log.Printf("Failed to close latency recorder: %v", err)
}
// Calculate statistics from disk
latencyStats, err := latencyRecorder.CalculateStats()
if err != nil {
log.Printf("Failed to calculate latency stats: %v", err)
latencyStats = &LatencyStats{}
}
// Calculate metrics
result := &BenchmarkResult{
TestName: "Burst Pattern",
@@ -501,27 +628,23 @@ func (b *Benchmark) RunBurstPatternTest() {
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
ConcurrentWorkers: b.config.ConcurrentWorkers,
MemoryUsed: getMemUsage(),
}
if len(latencies) > 0 {
result.AvgLatency = calculateAvgLatency(latencies)
result.P90Latency = calculatePercentileLatency(latencies, 0.90)
result.P95Latency = calculatePercentileLatency(latencies, 0.95)
result.P99Latency = calculatePercentileLatency(latencies, 0.99)
result.Bottom10Avg = calculateBottom10Avg(latencies)
AvgLatency: latencyStats.Avg,
P90Latency: latencyStats.P90,
P95Latency: latencyStats.P95,
P99Latency: latencyStats.P99,
Bottom10Avg: latencyStats.Bottom10,
}
result.SuccessRate = float64(totalEvents) / float64(eventIndex) * 100
for _, err := range errors {
result.Errors = append(result.Errors, err.Error())
}
b.mu.Lock()
b.results = append(b.results, result)
b.mu.Unlock()
fmt.Printf("Burst test completed: %d events in %v\n", totalEvents, duration)
fmt.Printf(
"Burst test completed: %d events in %v, errors: %d\n",
totalEvents, duration, errorCount,
)
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
}
@@ -974,24 +1097,75 @@ func (b *Benchmark) generateEvents(count int) []*event.E {
log.Fatalf("Failed to generate keys for benchmark events: %v", err)
}
// Define size distribution - from minimal to 500KB
// We'll create a logarithmic distribution to test various sizes
sizeBuckets := []int{
0, // Minimal: empty content, no tags
10, // Tiny: ~10 bytes
100, // Small: ~100 bytes
1024, // 1 KB
10 * 1024, // 10 KB
50 * 1024, // 50 KB
100 * 1024, // 100 KB
250 * 1024, // 250 KB
500 * 1024, // 500 KB (max realistic size for Nostr)
}
for i := 0; i < count; i++ {
ev := event.New()
ev.CreatedAt = now.I64()
ev.Kind = kind.TextNote.K
ev.Content = []byte(fmt.Sprintf(
"This is test event number %d with some content", i,
))
// Create tags using NewFromBytesSlice
ev.Tags = tag.NewS(
tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")),
tag.NewFromBytesSlice(
[]byte("e"), []byte(fmt.Sprintf("ref_%d", i%50)),
),
)
// Distribute events across size buckets
bucketIndex := i % len(sizeBuckets)
targetSize := sizeBuckets[bucketIndex]
// Properly sign the event instead of generating fake signatures
// Generate content based on target size
if targetSize == 0 {
// Minimal event: empty content, no tags
ev.Content = []byte{}
ev.Tags = tag.NewS() // Empty tag set
} else if targetSize < 1024 {
// Small events: simple text content
ev.Content = []byte(fmt.Sprintf(
"Event %d - Size bucket: %d bytes. %s",
i, targetSize, strings.Repeat("x", max(0, targetSize-50)),
))
// Add minimal tags
ev.Tags = tag.NewS(
tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")),
)
} else {
// Larger events: fill with repeated content to reach target size
// Account for JSON overhead (~200 bytes for event structure)
contentSize := targetSize - 200
if contentSize < 0 {
contentSize = targetSize
}
// Build content with repeated pattern
pattern := fmt.Sprintf("Event %d, target size %d bytes. ", i, targetSize)
repeatCount := contentSize / len(pattern)
if repeatCount < 1 {
repeatCount = 1
}
ev.Content = []byte(strings.Repeat(pattern, repeatCount))
// Add some tags (contributes to total size)
numTags := min(5, max(1, targetSize/10000)) // More tags for larger events
tags := make([]*tag.T, 0, numTags+1)
tags = append(tags, tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")))
for j := 0; j < numTags; j++ {
tags = append(tags, tag.NewFromBytesSlice(
[]byte("e"),
[]byte(fmt.Sprintf("ref_%d_%d", i, j)),
))
}
ev.Tags = tag.NewS(tags...)
}
// Properly sign the event
if err := ev.Sign(keys); err != nil {
log.Fatalf("Failed to sign event %d: %v", i, err)
}
@@ -999,9 +1173,54 @@ func (b *Benchmark) generateEvents(count int) []*event.E {
events[i] = ev
}
// Log size distribution summary
fmt.Printf("\nGenerated %d events with size distribution:\n", count)
for idx, size := range sizeBuckets {
eventsInBucket := count / len(sizeBuckets)
if idx < count%len(sizeBuckets) {
eventsInBucket++
}
sizeStr := formatSize(size)
fmt.Printf(" %s: ~%d events\n", sizeStr, eventsInBucket)
}
fmt.Println()
return events
}
// formatSize formats byte size in human-readable format
func formatSize(bytes int) string {
if bytes == 0 {
return "Empty (0 bytes)"
}
if bytes < 1024 {
return fmt.Sprintf("%d bytes", bytes)
}
if bytes < 1024*1024 {
return fmt.Sprintf("%d KB", bytes/1024)
}
if bytes < 1024*1024*1024 {
return fmt.Sprintf("%d MB", bytes/(1024*1024))
}
return fmt.Sprintf("%.2f GB", float64(bytes)/(1024*1024*1024))
}
// min returns the minimum of two integers
func min(a, b int) int {
if a < b {
return a
}
return b
}
// max returns the maximum of two integers
func max(a, b int) int {
if a > b {
return a
}
return b
}
func (b *Benchmark) GenerateReport() {
fmt.Println("\n" + strings.Repeat("=", 80))
fmt.Println("BENCHMARK REPORT")

View File

@@ -1,140 +0,0 @@
================================================================
NOSTR RELAY BENCHMARK AGGREGATE REPORT
================================================================
Generated: 2025-09-20T11:04:39+00:00
Benchmark Configuration:
Events per test: 10000
Concurrent workers: 8
Test duration: 60s
Relays tested: 6
================================================================
SUMMARY BY RELAY
================================================================
Relay: next-orly
----------------------------------------
Status: COMPLETED
Events/sec: 1035.42
Events/sec: 659.20
Events/sec: 1094.56
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 470.069µs
Bottom 10% Avg Latency: 750.491µs
Avg Latency: 190.573µs
P95 Latency: 693.101µs
P95 Latency: 289.761µs
P95 Latency: 22.450848ms
Relay: khatru-sqlite
----------------------------------------
Status: COMPLETED
Events/sec: 1105.61
Events/sec: 624.87
Events/sec: 1070.10
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 458.035µs
Bottom 10% Avg Latency: 702.193µs
Avg Latency: 193.997µs
P95 Latency: 660.608µs
P95 Latency: 302.666µs
P95 Latency: 23.653412ms
Relay: khatru-badger
----------------------------------------
Status: COMPLETED
Events/sec: 1040.11
Events/sec: 663.14
Events/sec: 1065.58
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 454.784µs
Bottom 10% Avg Latency: 706.219µs
Avg Latency: 193.914µs
P95 Latency: 654.637µs
P95 Latency: 296.525µs
P95 Latency: 21.642655ms
Relay: relayer-basic
----------------------------------------
Status: COMPLETED
Events/sec: 1104.88
Events/sec: 642.17
Events/sec: 1079.27
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 433.89µs
Bottom 10% Avg Latency: 653.813µs
Avg Latency: 186.306µs
P95 Latency: 617.868µs
P95 Latency: 279.192µs
P95 Latency: 21.247322ms
Relay: strfry
----------------------------------------
Status: COMPLETED
Events/sec: 1090.49
Events/sec: 652.03
Events/sec: 1098.57
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 448.058µs
Bottom 10% Avg Latency: 729.464µs
Avg Latency: 189.06µs
P95 Latency: 667.141µs
P95 Latency: 290.433µs
P95 Latency: 20.822884ms
Relay: nostr-rs-relay
----------------------------------------
Status: COMPLETED
Events/sec: 1123.91
Events/sec: 647.62
Events/sec: 1033.64
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 416.753µs
Bottom 10% Avg Latency: 638.318µs
Avg Latency: 185.217µs
P95 Latency: 597.338µs
P95 Latency: 273.191µs
P95 Latency: 22.416221ms
================================================================
DETAILED RESULTS
================================================================
Individual relay reports are available in:
- /reports/run_20250920_101521/khatru-badger_results.txt
- /reports/run_20250920_101521/khatru-sqlite_results.txt
- /reports/run_20250920_101521/next-orly_results.txt
- /reports/run_20250920_101521/nostr-rs-relay_results.txt
- /reports/run_20250920_101521/relayer-basic_results.txt
- /reports/run_20250920_101521/strfry_results.txt
================================================================
BENCHMARK COMPARISON TABLE
================================================================
Relay Status Peak Tput/s Avg Latency Success Rate
---- ------ ----------- ----------- ------------
next-orly OK 1035.42 470.069µs 100.0%
khatru-sqlite OK 1105.61 458.035µs 100.0%
khatru-badger OK 1040.11 454.784µs 100.0%
relayer-basic OK 1104.88 433.89µs 100.0%
strfry OK 1090.49 448.058µs 100.0%
nostr-rs-relay OK 1123.91 416.753µs 100.0%
================================================================
End of Report
================================================================

View File

@@ -1,298 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-badger_8
Events: 10000, Workers: 8, Duration: 1m0s
1758364309339505/tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758364309340007/tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758364309340039/tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758364309340327(*types.Uint32)(0xc000147840)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758364309340465migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.614321551s
Events/sec: 1040.11
Avg latency: 454.784µs
P90 latency: 596.266µs
P95 latency: 654.637µs
P99 latency: 844.569µs
Bottom 10% Avg latency: 706.219µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 136.444875ms
Burst completed: 1000 events in 141.806497ms
Burst completed: 1000 events in 168.991278ms
Burst completed: 1000 events in 167.713425ms
Burst completed: 1000 events in 162.89698ms
Burst completed: 1000 events in 157.775164ms
Burst completed: 1000 events in 166.476709ms
Burst completed: 1000 events in 161.742632ms
Burst completed: 1000 events in 162.138977ms
Burst completed: 1000 events in 156.657194ms
Burst test completed: 10000 events in 15.07982611s
Events/sec: 663.14
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 44.903267299s
Combined ops/sec: 222.70
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3166 queries in 1m0.104195004s
Queries/sec: 52.68
Avg query latency: 125.847553ms
P95 query latency: 148.109766ms
P99 query latency: 212.054697ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11366 operations (1366 queries, 10000 writes) in 1m0.127232573s
Operations/sec: 189.03
Avg latency: 16.671438ms
Avg query latency: 134.993072ms
Avg write latency: 508.703µs
P95 latency: 133.755996ms
P99 latency: 152.790563ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.384548186s
Events/sec: 1065.58
Avg latency: 566.375µs
P90 latency: 738.377µs
P95 latency: 839.679µs
P99 latency: 1.131084ms
Bottom 10% Avg latency: 1.312791ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 166.832259ms
Burst completed: 1000 events in 175.061575ms
Burst completed: 1000 events in 168.897493ms
Burst completed: 1000 events in 167.584171ms
Burst completed: 1000 events in 178.212526ms
Burst completed: 1000 events in 202.208945ms
Burst completed: 1000 events in 154.130024ms
Burst completed: 1000 events in 168.817721ms
Burst completed: 1000 events in 153.032223ms
Burst completed: 1000 events in 154.799008ms
Burst test completed: 10000 events in 15.449161726s
Events/sec: 647.28
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4582 reads in 1m0.037041762s
Combined ops/sec: 159.60
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 959 queries in 1m0.42440735s
Queries/sec: 15.87
Avg query latency: 418.846875ms
P95 query latency: 473.089327ms
P99 query latency: 650.467474ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10484 operations (484 queries, 10000 writes) in 1m0.283590079s
Operations/sec: 173.91
Avg latency: 17.921964ms
Avg query latency: 381.041592ms
Avg write latency: 346.974µs
P95 latency: 1.269749ms
P99 latency: 399.015222ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.614321551s
Total Events: 10000
Events/sec: 1040.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 118 MB
Avg Latency: 454.784µs
P90 Latency: 596.266µs
P95 Latency: 654.637µs
P99 Latency: 844.569µs
Bottom 10% Avg Latency: 706.219µs
----------------------------------------
Test: Burst Pattern
Duration: 15.07982611s
Total Events: 10000
Events/sec: 663.14
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 162 MB
Avg Latency: 193.914µs
P90 Latency: 255.617µs
P95 Latency: 296.525µs
P99 Latency: 451.81µs
Bottom 10% Avg Latency: 343.222µs
----------------------------------------
Test: Mixed Read/Write
Duration: 44.903267299s
Total Events: 10000
Events/sec: 222.70
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 121 MB
Avg Latency: 9.145633ms
P90 Latency: 19.946513ms
P95 Latency: 21.642655ms
P99 Latency: 23.951572ms
Bottom 10% Avg Latency: 21.861602ms
----------------------------------------
Test: Query Performance
Duration: 1m0.104195004s
Total Events: 3166
Events/sec: 52.68
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 188 MB
Avg Latency: 125.847553ms
P90 Latency: 140.664966ms
P95 Latency: 148.109766ms
P99 Latency: 212.054697ms
Bottom 10% Avg Latency: 164.089129ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.127232573s
Total Events: 11366
Events/sec: 189.03
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 112 MB
Avg Latency: 16.671438ms
P90 Latency: 122.627849ms
P95 Latency: 133.755996ms
P99 Latency: 152.790563ms
Bottom 10% Avg Latency: 138.087104ms
----------------------------------------
Test: Peak Throughput
Duration: 9.384548186s
Total Events: 10000
Events/sec: 1065.58
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 566.375µs
P90 Latency: 738.377µs
P95 Latency: 839.679µs
P99 Latency: 1.131084ms
Bottom 10% Avg Latency: 1.312791ms
----------------------------------------
Test: Burst Pattern
Duration: 15.449161726s
Total Events: 10000
Events/sec: 647.28
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 165 MB
Avg Latency: 186.353µs
P90 Latency: 243.413µs
P95 Latency: 283.06µs
P99 Latency: 440.76µs
Bottom 10% Avg Latency: 324.151µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.037041762s
Total Events: 9582
Events/sec: 159.60
Success Rate: 95.8%
Concurrent Workers: 8
Memory Used: 138 MB
Avg Latency: 16.358228ms
P90 Latency: 37.654373ms
P95 Latency: 40.578604ms
P99 Latency: 46.331181ms
Bottom 10% Avg Latency: 41.76124ms
----------------------------------------
Test: Query Performance
Duration: 1m0.42440735s
Total Events: 959
Events/sec: 15.87
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 110 MB
Avg Latency: 418.846875ms
P90 Latency: 448.809017ms
P95 Latency: 473.089327ms
P99 Latency: 650.467474ms
Bottom 10% Avg Latency: 518.112626ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.283590079s
Total Events: 10484
Events/sec: 173.91
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 205 MB
Avg Latency: 17.921964ms
P90 Latency: 582.319µs
P95 Latency: 1.269749ms
P99 Latency: 399.015222ms
Bottom 10% Avg Latency: 176.257001ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
1758364794792663/tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758364796617126/tmp/benchmark_khatru-badger_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758364796621659/tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-badger
RELAY_URL: ws://khatru-badger:3334
TEST_TIMESTAMP: 2025-09-20T10:39:56+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,298 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-sqlite_8
Events: 10000, Workers: 8, Duration: 1m0s
1758363814412229/tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758363814412803/tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758363814412840/tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758363814413123(*types.Uint32)(0xc0001ea00c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758363814413200migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.044789549s
Events/sec: 1105.61
Avg latency: 458.035µs
P90 latency: 601.736µs
P95 latency: 660.608µs
P99 latency: 844.108µs
Bottom 10% Avg latency: 702.193µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 146.610877ms
Burst completed: 1000 events in 179.229665ms
Burst completed: 1000 events in 157.096919ms
Burst completed: 1000 events in 164.796374ms
Burst completed: 1000 events in 188.464354ms
Burst completed: 1000 events in 196.529596ms
Burst completed: 1000 events in 169.425581ms
Burst completed: 1000 events in 147.99354ms
Burst completed: 1000 events in 157.996252ms
Burst completed: 1000 events in 167.299262ms
Burst test completed: 10000 events in 16.003207139s
Events/sec: 624.87
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 46.924555793s
Combined ops/sec: 213.11
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3052 queries in 1m0.102264s
Queries/sec: 50.78
Avg query latency: 128.464192ms
P95 query latency: 148.086431ms
P99 query latency: 219.275394ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11296 operations (1296 queries, 10000 writes) in 1m0.108871986s
Operations/sec: 187.93
Avg latency: 16.71621ms
Avg query latency: 142.320434ms
Avg write latency: 437.903µs
P95 latency: 141.357185ms
P99 latency: 163.50992ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.344884331s
Events/sec: 1070.10
Avg latency: 578.453µs
P90 latency: 742.585µs
P95 latency: 849.679µs
P99 latency: 1.122058ms
Bottom 10% Avg latency: 1.362355ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 185.472655ms
Burst completed: 1000 events in 194.135516ms
Burst completed: 1000 events in 176.056931ms
Burst completed: 1000 events in 161.500315ms
Burst completed: 1000 events in 157.673837ms
Burst completed: 1000 events in 167.130208ms
Burst completed: 1000 events in 182.164655ms
Burst completed: 1000 events in 156.589581ms
Burst completed: 1000 events in 154.419949ms
Burst completed: 1000 events in 158.445927ms
Burst test completed: 10000 events in 15.587711126s
Events/sec: 641.53
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4405 reads in 1m0.043842569s
Combined ops/sec: 156.64
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 915 queries in 1m0.3452177s
Queries/sec: 15.16
Avg query latency: 435.125142ms
P95 query latency: 520.311963ms
P99 query latency: 618.85899ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10489 operations (489 queries, 10000 writes) in 1m0.27235761s
Operations/sec: 174.03
Avg latency: 18.043774ms
Avg query latency: 379.681531ms
Avg write latency: 359.688µs
P95 latency: 1.316628ms
P99 latency: 400.223248ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.044789549s
Total Events: 10000
Events/sec: 1105.61
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 144 MB
Avg Latency: 458.035µs
P90 Latency: 601.736µs
P95 Latency: 660.608µs
P99 Latency: 844.108µs
Bottom 10% Avg Latency: 702.193µs
----------------------------------------
Test: Burst Pattern
Duration: 16.003207139s
Total Events: 10000
Events/sec: 624.87
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 89 MB
Avg Latency: 193.997µs
P90 Latency: 261.969µs
P95 Latency: 302.666µs
P99 Latency: 431.933µs
Bottom 10% Avg Latency: 334.383µs
----------------------------------------
Test: Mixed Read/Write
Duration: 46.924555793s
Total Events: 10000
Events/sec: 213.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 96 MB
Avg Latency: 9.781737ms
P90 Latency: 21.91971ms
P95 Latency: 23.653412ms
P99 Latency: 27.511972ms
Bottom 10% Avg Latency: 24.396695ms
----------------------------------------
Test: Query Performance
Duration: 1m0.102264s
Total Events: 3052
Events/sec: 50.78
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 209 MB
Avg Latency: 128.464192ms
P90 Latency: 142.195039ms
P95 Latency: 148.086431ms
P99 Latency: 219.275394ms
Bottom 10% Avg Latency: 162.874217ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.108871986s
Total Events: 11296
Events/sec: 187.93
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 159 MB
Avg Latency: 16.71621ms
P90 Latency: 127.287246ms
P95 Latency: 141.357185ms
P99 Latency: 163.50992ms
Bottom 10% Avg Latency: 145.199189ms
----------------------------------------
Test: Peak Throughput
Duration: 9.344884331s
Total Events: 10000
Events/sec: 1070.10
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 578.453µs
P90 Latency: 742.585µs
P95 Latency: 849.679µs
P99 Latency: 1.122058ms
Bottom 10% Avg Latency: 1.362355ms
----------------------------------------
Test: Burst Pattern
Duration: 15.587711126s
Total Events: 10000
Events/sec: 641.53
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 141 MB
Avg Latency: 190.235µs
P90 Latency: 254.795µs
P95 Latency: 290.563µs
P99 Latency: 437.323µs
Bottom 10% Avg Latency: 328.752µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.043842569s
Total Events: 9405
Events/sec: 156.64
Success Rate: 94.0%
Concurrent Workers: 8
Memory Used: 105 MB
Avg Latency: 16.852438ms
P90 Latency: 39.677855ms
P95 Latency: 42.553634ms
P99 Latency: 48.262077ms
Bottom 10% Avg Latency: 43.994063ms
----------------------------------------
Test: Query Performance
Duration: 1m0.3452177s
Total Events: 915
Events/sec: 15.16
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 157 MB
Avg Latency: 435.125142ms
P90 Latency: 482.304439ms
P95 Latency: 520.311963ms
P99 Latency: 618.85899ms
Bottom 10% Avg Latency: 545.670939ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.27235761s
Total Events: 10489
Events/sec: 174.03
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 132 MB
Avg Latency: 18.043774ms
P90 Latency: 583.962µs
P95 Latency: 1.316628ms
P99 Latency: 400.223248ms
Bottom 10% Avg Latency: 177.440946ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
1758364302230610/tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758364304057942/tmp/benchmark_khatru-sqlite_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758364304063521/tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-sqlite
RELAY_URL: ws://khatru-sqlite:3334
TEST_TIMESTAMP: 2025-09-20T10:31:44+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,298 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly_8
Events: 10000, Workers: 8, Duration: 1m0s
1758363321263384/tmp/benchmark_next-orly_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758363321263864/tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758363321263887/tmp/benchmark_next-orly_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758363321264128(*types.Uint32)(0xc0001f7ffc)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758363321264177migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.657904043s
Events/sec: 1035.42
Avg latency: 470.069µs
P90 latency: 628.167µs
P95 latency: 693.101µs
P99 latency: 922.357µs
Bottom 10% Avg latency: 750.491µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 175.034134ms
Burst completed: 1000 events in 150.401771ms
Burst completed: 1000 events in 168.992305ms
Burst completed: 1000 events in 179.447581ms
Burst completed: 1000 events in 165.602457ms
Burst completed: 1000 events in 178.649561ms
Burst completed: 1000 events in 195.002303ms
Burst completed: 1000 events in 168.970954ms
Burst completed: 1000 events in 150.818413ms
Burst completed: 1000 events in 185.285662ms
Burst test completed: 10000 events in 15.169978801s
Events/sec: 659.20
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 45.597478865s
Combined ops/sec: 219.31
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3151 queries in 1m0.067849757s
Queries/sec: 52.46
Avg query latency: 126.38548ms
P95 query latency: 149.976367ms
P99 query latency: 205.807461ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11325 operations (1325 queries, 10000 writes) in 1m0.081967157s
Operations/sec: 188.49
Avg latency: 16.694154ms
Avg query latency: 139.524748ms
Avg write latency: 419.1µs
P95 latency: 138.688202ms
P99 latency: 158.824742ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.136097148s
Events/sec: 1094.56
Avg latency: 510.7µs
P90 latency: 636.763µs
P95 latency: 705.564µs
P99 latency: 922.777µs
Bottom 10% Avg latency: 1.094965ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 176.337148ms
Burst completed: 1000 events in 177.351251ms
Burst completed: 1000 events in 181.515292ms
Burst completed: 1000 events in 164.043866ms
Burst completed: 1000 events in 152.697196ms
Burst completed: 1000 events in 144.231922ms
Burst completed: 1000 events in 162.606659ms
Burst completed: 1000 events in 137.485182ms
Burst completed: 1000 events in 163.19487ms
Burst completed: 1000 events in 147.900339ms
Burst test completed: 10000 events in 15.514130113s
Events/sec: 644.57
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4489 reads in 1m0.036174989s
Combined ops/sec: 158.05
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 900 queries in 1m0.304636826s
Queries/sec: 14.92
Avg query latency: 444.57989ms
P95 query latency: 547.598358ms
P99 query latency: 660.926147ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10462 operations (462 queries, 10000 writes) in 1m0.362856212s
Operations/sec: 173.32
Avg latency: 17.808607ms
Avg query latency: 395.594177ms
Avg write latency: 354.914µs
P95 latency: 1.221657ms
P99 latency: 411.642669ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.657904043s
Total Events: 10000
Events/sec: 1035.42
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 144 MB
Avg Latency: 470.069µs
P90 Latency: 628.167µs
P95 Latency: 693.101µs
P99 Latency: 922.357µs
Bottom 10% Avg Latency: 750.491µs
----------------------------------------
Test: Burst Pattern
Duration: 15.169978801s
Total Events: 10000
Events/sec: 659.20
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 135 MB
Avg Latency: 190.573µs
P90 Latency: 252.701µs
P95 Latency: 289.761µs
P99 Latency: 408.147µs
Bottom 10% Avg Latency: 316.797µs
----------------------------------------
Test: Mixed Read/Write
Duration: 45.597478865s
Total Events: 10000
Events/sec: 219.31
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 119 MB
Avg Latency: 9.381158ms
P90 Latency: 20.487026ms
P95 Latency: 22.450848ms
P99 Latency: 24.696325ms
Bottom 10% Avg Latency: 22.632933ms
----------------------------------------
Test: Query Performance
Duration: 1m0.067849757s
Total Events: 3151
Events/sec: 52.46
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 145 MB
Avg Latency: 126.38548ms
P90 Latency: 142.39268ms
P95 Latency: 149.976367ms
P99 Latency: 205.807461ms
Bottom 10% Avg Latency: 162.636454ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.081967157s
Total Events: 11325
Events/sec: 188.49
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 194 MB
Avg Latency: 16.694154ms
P90 Latency: 125.314618ms
P95 Latency: 138.688202ms
P99 Latency: 158.824742ms
Bottom 10% Avg Latency: 142.699977ms
----------------------------------------
Test: Peak Throughput
Duration: 9.136097148s
Total Events: 10000
Events/sec: 1094.56
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 510.7µs
P90 Latency: 636.763µs
P95 Latency: 705.564µs
P99 Latency: 922.777µs
Bottom 10% Avg Latency: 1.094965ms
----------------------------------------
Test: Burst Pattern
Duration: 15.514130113s
Total Events: 10000
Events/sec: 644.57
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 138 MB
Avg Latency: 230.062µs
P90 Latency: 316.624µs
P95 Latency: 389.882µs
P99 Latency: 859.548µs
Bottom 10% Avg Latency: 529.836µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.036174989s
Total Events: 9489
Events/sec: 158.05
Success Rate: 94.9%
Concurrent Workers: 8
Memory Used: 182 MB
Avg Latency: 16.56372ms
P90 Latency: 38.24931ms
P95 Latency: 41.187306ms
P99 Latency: 46.02529ms
Bottom 10% Avg Latency: 42.131189ms
----------------------------------------
Test: Query Performance
Duration: 1m0.304636826s
Total Events: 900
Events/sec: 14.92
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 141 MB
Avg Latency: 444.57989ms
P90 Latency: 490.730651ms
P95 Latency: 547.598358ms
P99 Latency: 660.926147ms
Bottom 10% Avg Latency: 563.628707ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.362856212s
Total Events: 10462
Events/sec: 173.32
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 152 MB
Avg Latency: 17.808607ms
P90 Latency: 631.703µs
P95 Latency: 1.221657ms
P99 Latency: 411.642669ms
Bottom 10% Avg Latency: 175.052418ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly_8/benchmark_report.adoc
1758363807245770/tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758363809118416/tmp/benchmark_next-orly_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758363809123697/tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: next-orly
RELAY_URL: ws://next-orly:8080
TEST_TIMESTAMP: 2025-09-20T10:23:29+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,298 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_nostr-rs-relay_8
Events: 10000, Workers: 8, Duration: 1m0s
1758365785928076/tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758365785929028/tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758365785929097/tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758365785929509(*types.Uint32)(0xc0001c820c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758365785929573migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 8.897492256s
Events/sec: 1123.91
Avg latency: 416.753µs
P90 latency: 546.351µs
P95 latency: 597.338µs
P99 latency: 760.549µs
Bottom 10% Avg latency: 638.318µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 158.263016ms
Burst completed: 1000 events in 181.558983ms
Burst completed: 1000 events in 155.219861ms
Burst completed: 1000 events in 183.834156ms
Burst completed: 1000 events in 192.398437ms
Burst completed: 1000 events in 176.450074ms
Burst completed: 1000 events in 175.050138ms
Burst completed: 1000 events in 178.883047ms
Burst completed: 1000 events in 180.74321ms
Burst completed: 1000 events in 169.39146ms
Burst test completed: 10000 events in 15.441062872s
Events/sec: 647.62
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 45.847091984s
Combined ops/sec: 218.12
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3229 queries in 1m0.085047549s
Queries/sec: 53.74
Avg query latency: 123.209617ms
P95 query latency: 141.745618ms
P99 query latency: 154.527843ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11298 operations (1298 queries, 10000 writes) in 1m0.096751583s
Operations/sec: 188.00
Avg latency: 16.447175ms
Avg query latency: 139.791065ms
Avg write latency: 437.138µs
P95 latency: 137.879538ms
P99 latency: 162.020385ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.674593819s
Events/sec: 1033.64
Avg latency: 541.545µs
P90 latency: 693.862µs
P95 latency: 775.757µs
P99 latency: 1.05005ms
Bottom 10% Avg latency: 1.219386ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 168.056064ms
Burst completed: 1000 events in 159.819647ms
Burst completed: 1000 events in 147.500264ms
Burst completed: 1000 events in 159.150392ms
Burst completed: 1000 events in 149.954829ms
Burst completed: 1000 events in 138.082938ms
Burst completed: 1000 events in 157.234213ms
Burst completed: 1000 events in 158.468955ms
Burst completed: 1000 events in 144.346047ms
Burst completed: 1000 events in 154.930576ms
Burst test completed: 10000 events in 15.646785427s
Events/sec: 639.11
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4415 reads in 1m0.02899167s
Combined ops/sec: 156.84
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 890 queries in 1m0.279192867s
Queries/sec: 14.76
Avg query latency: 448.809547ms
P95 query latency: 607.28509ms
P99 query latency: 786.387053ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10469 operations (469 queries, 10000 writes) in 1m0.190785048s
Operations/sec: 173.93
Avg latency: 17.73903ms
Avg query latency: 388.59336ms
Avg write latency: 345.962µs
P95 latency: 1.158136ms
P99 latency: 407.947907ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 8.897492256s
Total Events: 10000
Events/sec: 1123.91
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 132 MB
Avg Latency: 416.753µs
P90 Latency: 546.351µs
P95 Latency: 597.338µs
P99 Latency: 760.549µs
Bottom 10% Avg Latency: 638.318µs
----------------------------------------
Test: Burst Pattern
Duration: 15.441062872s
Total Events: 10000
Events/sec: 647.62
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 104 MB
Avg Latency: 185.217µs
P90 Latency: 241.64µs
P95 Latency: 273.191µs
P99 Latency: 412.897µs
Bottom 10% Avg Latency: 306.752µs
----------------------------------------
Test: Mixed Read/Write
Duration: 45.847091984s
Total Events: 10000
Events/sec: 218.12
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 96 MB
Avg Latency: 9.446215ms
P90 Latency: 20.522135ms
P95 Latency: 22.416221ms
P99 Latency: 24.696283ms
Bottom 10% Avg Latency: 22.59535ms
----------------------------------------
Test: Query Performance
Duration: 1m0.085047549s
Total Events: 3229
Events/sec: 53.74
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 175 MB
Avg Latency: 123.209617ms
P90 Latency: 137.629898ms
P95 Latency: 141.745618ms
P99 Latency: 154.527843ms
Bottom 10% Avg Latency: 145.245967ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.096751583s
Total Events: 11298
Events/sec: 188.00
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 181 MB
Avg Latency: 16.447175ms
P90 Latency: 123.920421ms
P95 Latency: 137.879538ms
P99 Latency: 162.020385ms
Bottom 10% Avg Latency: 142.654147ms
----------------------------------------
Test: Peak Throughput
Duration: 9.674593819s
Total Events: 10000
Events/sec: 1033.64
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 541.545µs
P90 Latency: 693.862µs
P95 Latency: 775.757µs
P99 Latency: 1.05005ms
Bottom 10% Avg Latency: 1.219386ms
----------------------------------------
Test: Burst Pattern
Duration: 15.646785427s
Total Events: 10000
Events/sec: 639.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 146 MB
Avg Latency: 331.896µs
P90 Latency: 520.511µs
P95 Latency: 864.486µs
P99 Latency: 2.251087ms
Bottom 10% Avg Latency: 1.16922ms
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.02899167s
Total Events: 9415
Events/sec: 156.84
Success Rate: 94.2%
Concurrent Workers: 8
Memory Used: 147 MB
Avg Latency: 16.723365ms
P90 Latency: 39.058801ms
P95 Latency: 41.904891ms
P99 Latency: 47.156263ms
Bottom 10% Avg Latency: 42.800456ms
----------------------------------------
Test: Query Performance
Duration: 1m0.279192867s
Total Events: 890
Events/sec: 14.76
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 156 MB
Avg Latency: 448.809547ms
P90 Latency: 524.488485ms
P95 Latency: 607.28509ms
P99 Latency: 786.387053ms
Bottom 10% Avg Latency: 634.016595ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.190785048s
Total Events: 10469
Events/sec: 173.93
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 226 MB
Avg Latency: 17.73903ms
P90 Latency: 561.359µs
P95 Latency: 1.158136ms
P99 Latency: 407.947907ms
Bottom 10% Avg Latency: 174.508065ms
----------------------------------------
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
1758366272164052/tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758366274030399/tmp/benchmark_nostr-rs-relay_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758366274036413/tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: nostr-rs-relay
RELAY_URL: ws://nostr-rs-relay:8080
TEST_TIMESTAMP: 2025-09-20T11:04:34+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,298 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_relayer-basic_8
Events: 10000, Workers: 8, Duration: 1m0s
1758364801895559/tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758364801896041/tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758364801896078/tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758364801896347(*types.Uint32)(0xc0001a801c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758364801896400migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.050770003s
Events/sec: 1104.88
Avg latency: 433.89µs
P90 latency: 567.261µs
P95 latency: 617.868µs
P99 latency: 783.593µs
Bottom 10% Avg latency: 653.813µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 183.738134ms
Burst completed: 1000 events in 155.035832ms
Burst completed: 1000 events in 160.066514ms
Burst completed: 1000 events in 183.724238ms
Burst completed: 1000 events in 178.910929ms
Burst completed: 1000 events in 168.905441ms
Burst completed: 1000 events in 172.584809ms
Burst completed: 1000 events in 177.214508ms
Burst completed: 1000 events in 169.921566ms
Burst completed: 1000 events in 162.042488ms
Burst test completed: 10000 events in 15.572250139s
Events/sec: 642.17
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 44.509677166s
Combined ops/sec: 224.67
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3253 queries in 1m0.095238426s
Queries/sec: 54.13
Avg query latency: 122.100718ms
P95 query latency: 140.360749ms
P99 query latency: 148.353154ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11408 operations (1408 queries, 10000 writes) in 1m0.117581615s
Operations/sec: 189.76
Avg latency: 16.525268ms
Avg query latency: 130.972853ms
Avg write latency: 411.048µs
P95 latency: 132.130964ms
P99 latency: 146.285305ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.265496879s
Events/sec: 1079.27
Avg latency: 529.266µs
P90 latency: 658.033µs
P95 latency: 732.024µs
P99 latency: 953.285µs
Bottom 10% Avg latency: 1.168714ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 172.300479ms
Burst completed: 1000 events in 149.247397ms
Burst completed: 1000 events in 170.000198ms
Burst completed: 1000 events in 133.786958ms
Burst completed: 1000 events in 172.157036ms
Burst completed: 1000 events in 153.284738ms
Burst completed: 1000 events in 166.711903ms
Burst completed: 1000 events in 170.635427ms
Burst completed: 1000 events in 153.381031ms
Burst completed: 1000 events in 162.125949ms
Burst test completed: 10000 events in 16.674963543s
Events/sec: 599.70
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4665 reads in 1m0.035358264s
Combined ops/sec: 160.99
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 944 queries in 1m0.383519958s
Queries/sec: 15.63
Avg query latency: 421.75292ms
P95 query latency: 491.340259ms
P99 query latency: 664.614262ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10479 operations (479 queries, 10000 writes) in 1m0.291926697s
Operations/sec: 173.80
Avg latency: 18.049265ms
Avg query latency: 385.864458ms
Avg write latency: 430.918µs
P95 latency: 3.05038ms
P99 latency: 404.540502ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.050770003s
Total Events: 10000
Events/sec: 1104.88
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 433.89µs
P90 Latency: 567.261µs
P95 Latency: 617.868µs
P99 Latency: 783.593µs
Bottom 10% Avg Latency: 653.813µs
----------------------------------------
Test: Burst Pattern
Duration: 15.572250139s
Total Events: 10000
Events/sec: 642.17
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 134 MB
Avg Latency: 186.306µs
P90 Latency: 243.995µs
P95 Latency: 279.192µs
P99 Latency: 392.859µs
Bottom 10% Avg Latency: 303.766µs
----------------------------------------
Test: Mixed Read/Write
Duration: 44.509677166s
Total Events: 10000
Events/sec: 224.67
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 163 MB
Avg Latency: 8.892738ms
P90 Latency: 19.406836ms
P95 Latency: 21.247322ms
P99 Latency: 23.452072ms
Bottom 10% Avg Latency: 21.397913ms
----------------------------------------
Test: Query Performance
Duration: 1m0.095238426s
Total Events: 3253
Events/sec: 54.13
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 126 MB
Avg Latency: 122.100718ms
P90 Latency: 136.523661ms
P95 Latency: 140.360749ms
P99 Latency: 148.353154ms
Bottom 10% Avg Latency: 142.067372ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.117581615s
Total Events: 11408
Events/sec: 189.76
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 149 MB
Avg Latency: 16.525268ms
P90 Latency: 121.696848ms
P95 Latency: 132.130964ms
P99 Latency: 146.285305ms
Bottom 10% Avg Latency: 134.054744ms
----------------------------------------
Test: Peak Throughput
Duration: 9.265496879s
Total Events: 10000
Events/sec: 1079.27
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 529.266µs
P90 Latency: 658.033µs
P95 Latency: 732.024µs
P99 Latency: 953.285µs
Bottom 10% Avg Latency: 1.168714ms
----------------------------------------
Test: Burst Pattern
Duration: 16.674963543s
Total Events: 10000
Events/sec: 599.70
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 142 MB
Avg Latency: 264.288µs
P90 Latency: 350.187µs
P95 Latency: 519.139µs
P99 Latency: 1.961326ms
Bottom 10% Avg Latency: 877.366µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.035358264s
Total Events: 9665
Events/sec: 160.99
Success Rate: 96.7%
Concurrent Workers: 8
Memory Used: 151 MB
Avg Latency: 16.019245ms
P90 Latency: 36.340362ms
P95 Latency: 39.113864ms
P99 Latency: 44.271098ms
Bottom 10% Avg Latency: 40.108462ms
----------------------------------------
Test: Query Performance
Duration: 1m0.383519958s
Total Events: 944
Events/sec: 15.63
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 280 MB
Avg Latency: 421.75292ms
P90 Latency: 460.902551ms
P95 Latency: 491.340259ms
P99 Latency: 664.614262ms
Bottom 10% Avg Latency: 538.014725ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.291926697s
Total Events: 10479
Events/sec: 173.80
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 122 MB
Avg Latency: 18.049265ms
P90 Latency: 843.867µs
P95 Latency: 3.05038ms
P99 Latency: 404.540502ms
Bottom 10% Avg Latency: 177.245211ms
----------------------------------------
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
1758365287933287/tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758365289807797/tmp/benchmark_relayer-basic_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758365289812921/tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: relayer-basic
RELAY_URL: ws://relayer-basic:7447
TEST_TIMESTAMP: 2025-09-20T10:48:10+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,298 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_strfry_8
Events: 10000, Workers: 8, Duration: 1m0s
1758365295110579/tmp/benchmark_strfry_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758365295111085/tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758365295111113/tmp/benchmark_strfry_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758365295111319(*types.Uint32)(0xc000141a3c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758365295111354migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.170212358s
Events/sec: 1090.49
Avg latency: 448.058µs
P90 latency: 597.558µs
P95 latency: 667.141µs
P99 latency: 920.784µs
Bottom 10% Avg latency: 729.464µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 172.138862ms
Burst completed: 1000 events in 168.99322ms
Burst completed: 1000 events in 162.213786ms
Burst completed: 1000 events in 161.027417ms
Burst completed: 1000 events in 183.148824ms
Burst completed: 1000 events in 178.152837ms
Burst completed: 1000 events in 158.65623ms
Burst completed: 1000 events in 186.7166ms
Burst completed: 1000 events in 177.202878ms
Burst completed: 1000 events in 182.780071ms
Burst test completed: 10000 events in 15.336760896s
Events/sec: 652.03
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 44.257468151s
Combined ops/sec: 225.95
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3002 queries in 1m0.091429487s
Queries/sec: 49.96
Avg query latency: 131.632043ms
P95 query latency: 175.810416ms
P99 query latency: 228.52716ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11308 operations (1308 queries, 10000 writes) in 1m0.111257202s
Operations/sec: 188.12
Avg latency: 16.193707ms
Avg query latency: 137.019852ms
Avg write latency: 389.647µs
P95 latency: 136.70132ms
P99 latency: 156.996779ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.102738s
Events/sec: 1098.57
Avg latency: 493.093µs
P90 latency: 605.684µs
P95 latency: 659.477µs
P99 latency: 826.344µs
Bottom 10% Avg latency: 1.097884ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 178.755916ms
Burst completed: 1000 events in 170.810722ms
Burst completed: 1000 events in 166.730701ms
Burst completed: 1000 events in 172.177576ms
Burst completed: 1000 events in 164.907178ms
Burst completed: 1000 events in 153.267727ms
Burst completed: 1000 events in 157.855743ms
Burst completed: 1000 events in 159.632496ms
Burst completed: 1000 events in 160.802526ms
Burst completed: 1000 events in 178.513954ms
Burst test completed: 10000 events in 15.535933443s
Events/sec: 643.67
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4550 reads in 1m0.032080518s
Combined ops/sec: 159.08
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 913 queries in 1m0.248877091s
Queries/sec: 15.15
Avg query latency: 436.472206ms
P95 query latency: 493.12732ms
P99 query latency: 623.201275ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10470 operations (470 queries, 10000 writes) in 1m0.293280495s
Operations/sec: 173.65
Avg latency: 18.084009ms
Avg query latency: 395.171481ms
Avg write latency: 360.898µs
P95 latency: 1.338148ms
P99 latency: 413.21015ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.170212358s
Total Events: 10000
Events/sec: 1090.49
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 108 MB
Avg Latency: 448.058µs
P90 Latency: 597.558µs
P95 Latency: 667.141µs
P99 Latency: 920.784µs
Bottom 10% Avg Latency: 729.464µs
----------------------------------------
Test: Burst Pattern
Duration: 15.336760896s
Total Events: 10000
Events/sec: 652.03
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 123 MB
Avg Latency: 189.06µs
P90 Latency: 248.714µs
P95 Latency: 290.433µs
P99 Latency: 416.924µs
Bottom 10% Avg Latency: 324.174µs
----------------------------------------
Test: Mixed Read/Write
Duration: 44.257468151s
Total Events: 10000
Events/sec: 225.95
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 158 MB
Avg Latency: 8.745534ms
P90 Latency: 18.980294ms
P95 Latency: 20.822884ms
P99 Latency: 23.124918ms
Bottom 10% Avg Latency: 21.006886ms
----------------------------------------
Test: Query Performance
Duration: 1m0.091429487s
Total Events: 3002
Events/sec: 49.96
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 191 MB
Avg Latency: 131.632043ms
P90 Latency: 152.618309ms
P95 Latency: 175.810416ms
P99 Latency: 228.52716ms
Bottom 10% Avg Latency: 186.230874ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.111257202s
Total Events: 11308
Events/sec: 188.12
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 146 MB
Avg Latency: 16.193707ms
P90 Latency: 122.204256ms
P95 Latency: 136.70132ms
P99 Latency: 156.996779ms
Bottom 10% Avg Latency: 140.031139ms
----------------------------------------
Test: Peak Throughput
Duration: 9.102738s
Total Events: 10000
Events/sec: 1098.57
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 493.093µs
P90 Latency: 605.684µs
P95 Latency: 659.477µs
P99 Latency: 826.344µs
Bottom 10% Avg Latency: 1.097884ms
----------------------------------------
Test: Burst Pattern
Duration: 15.535933443s
Total Events: 10000
Events/sec: 643.67
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 130 MB
Avg Latency: 186.177µs
P90 Latency: 243.915µs
P95 Latency: 276.146µs
P99 Latency: 418.787µs
Bottom 10% Avg Latency: 309.015µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.032080518s
Total Events: 9550
Events/sec: 159.08
Success Rate: 95.5%
Concurrent Workers: 8
Memory Used: 115 MB
Avg Latency: 16.401942ms
P90 Latency: 37.575878ms
P95 Latency: 40.323279ms
P99 Latency: 45.453669ms
Bottom 10% Avg Latency: 41.331235ms
----------------------------------------
Test: Query Performance
Duration: 1m0.248877091s
Total Events: 913
Events/sec: 15.15
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 211 MB
Avg Latency: 436.472206ms
P90 Latency: 474.430346ms
P95 Latency: 493.12732ms
P99 Latency: 623.201275ms
Bottom 10% Avg Latency: 523.084076ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.293280495s
Total Events: 10470
Events/sec: 173.65
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 171 MB
Avg Latency: 18.084009ms
P90 Latency: 624.339µs
P95 Latency: 1.338148ms
P99 Latency: 413.21015ms
Bottom 10% Avg Latency: 177.8924ms
----------------------------------------
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
1758365779337138/tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758365780726692/tmp/benchmark_strfry_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758365780732292/tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: strfry
RELAY_URL: ws://strfry:8080
TEST_TIMESTAMP: 2025-09-20T10:56:20+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,134 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly_8
Events: 50000, Workers: 24, Duration: 1m0s
1763394450181444 /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
1763394450184981 /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
1763394450185044 /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
1763394450185315 migrating to version 1... /build/pkg/database/migrations.go:66
1763394450185349 migrating to version 2... /build/pkg/database/migrations.go:73
1763394450185369 migrating to version 3... /build/pkg/database/migrations.go:80
1763394450185374 cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
1763394450185381 cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
1763394450185396 migrating to version 4... /build/pkg/database/migrations.go:87
1763394450185400 converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
1763394450185410 found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
1763394450185415 migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
2025/11/17 15:47:30 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
1763394452185466 /tmp/benchmark_next-orly_8: database warmup complete, ready to serve requests
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
Events saved: 50000/50000 (100.0%)
Duration: 4.816237891s
Events/sec: 10381.55
Avg latency: 1.655686ms
P90 latency: 2.061483ms
P95 latency: 2.348178ms
P99 latency: 3.856522ms
Bottom 10% Avg latency: 2.985064ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 308.793395ms
Burst completed: 5000 events in 320.69366ms
Burst completed: 5000 events in 324.127721ms
Burst completed: 5000 events in 342.594802ms
Burst completed: 5000 events in 302.350819ms
Burst completed: 5000 events in 309.16143ms
Burst completed: 5000 events in 306.739193ms
Burst completed: 5000 events in 329.275972ms
Burst completed: 5000 events in 329.234395ms
Burst completed: 5000 events in 348.105403ms
Burst test completed: 50000 events in 9.543815189s
Events/sec: 5238.99
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 25000 writes, 25000 reads in 24.491349518s
Combined ops/sec: 2041.54
1763394510174043 /tmp/benchmark_next-orly_8: Block cache metrics: hit: 248593 miss: 322620 keys-added: 236208 keys-updated: 73483 keys-evicted: 236188 cost-added: 12658387393408 cost-evicted: 12657366958988 sets-dropped: 0 sets-rejected: 12869 gets-dropped: 64 gets-kept: 570624 gets-total: 571213 hit-ratio: 0.44
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 258436 queries in 1m0.014042961s
Queries/sec: 4306.26
Avg query latency: 4.008354ms
P95 query latency: 12.985167ms
P99 query latency: 23.424372ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 252445 operations (202445 queries, 50000 writes) in 1m0.005913119s
Operations/sec: 4207.00
Avg latency: 2.121776ms
Avg query latency: 2.374689ms
Avg write latency: 1.097756ms
P95 latency: 3.545393ms
P99 latency: 4.795537ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 50000/50000 (100.0%)
Duration: 5.086723437s
Events/sec: 9829.51
Avg latency: 1.777699ms
P90 latency: 2.219786ms
P95 latency: 2.443201ms
P99 latency: 3.504646ms
Bottom 10% Avg latency: 3.103013ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 324.341799ms
Burst completed: 5000 events in 319.047042ms
Burst completed: 5000 events in 324.104589ms
Burst completed: 5000 events in 342.464953ms
Burst completed: 5000 events in 342.679451ms
Burst completed: 5000 events in 359.150337ms
Burst completed: 5000 events in 367.952516ms
Burst completed: 5000 events in 338.4073ms
Burst completed: 5000 events in 326.796197ms
Burst completed: 5000 events in 357.71787ms
Burst test completed: 50000 events in 9.769325434s
Events/sec: 5118.06
1763394684274617 /tmp/benchmark_next-orly_8: [4] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 2.954s
, deleted 1904950 bytes
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 25000 writes, 25000 reads in 24.464062793s
Combined ops/sec: 2043.81
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 293040 queries in 1m0.010621036s
Queries/sec: 4883.14
Avg query latency: 3.419764ms
P95 query latency: 11.042876ms
P99 query latency: 19.984912ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
1763394810173629 /tmp/benchmark_next-orly_8: Block cache metrics: hit: 517421289 miss: 4606293 keys-added: 1664534 keys-updated: 2530425 keys-evicted: 1664512 cost-added: 85045328540032 cost-evicted: 85044318079141 sets-dropped: 0 sets-rejected: 349798 gets-dropped: 404194112 gets-kept: 117717888 gets-total: 522027608 hit-ratio: 0.99
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56

View File

@@ -0,0 +1,53 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-badger_8
Events: 50000, Workers: 24, Duration: 1m0s
1763397432159815 /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
1763397432162963 /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
1763397432163005 /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
1763397432163282 migrating to version 1... /build/pkg/database/migrations.go:66
1763397432163367 migrating to version 2... /build/pkg/database/migrations.go:73
1763397432163401 migrating to version 3... /build/pkg/database/migrations.go:80
1763397432163409 cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
1763397432163473 cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
1763397432163564 migrating to version 4... /build/pkg/database/migrations.go:87
1763397432163574 converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
1763397432163594 found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
1763397432163600 migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
2025/11/17 16:37:12 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
1763397434164165 /tmp/benchmark_khatru-badger_8: database warmup complete, ready to serve requests
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
Events saved: 50000/50000 (100.0%)
Duration: 4.924203666s
Events/sec: 10153.93
Avg latency: 1.696974ms
P90 latency: 2.11483ms
P95 latency: 2.344067ms
P99 latency: 3.241477ms
Bottom 10% Avg latency: 2.7865ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 312.680497ms
Burst completed: 5000 events in 320.868898ms
Burst completed: 5000 events in 317.096109ms
Burst completed: 5000 events in 356.971689ms
Burst completed: 5000 events in 301.615682ms
Burst completed: 5000 events in 306.525096ms
Burst completed: 5000 events in 320.037813ms
Burst completed: 5000 events in 318.017102ms
Burst completed: 5000 events in 320.394281ms
Burst completed: 5000 events in 333.619741ms
Burst test completed: 50000 events in 9.552105607s
Events/sec: 5234.45
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...

View File

@@ -0,0 +1,323 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-sqlite_8
Events: 50000, Workers: 24, Duration: 1m0s
1763397017138391 /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
1763397017141550 /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
1763397017141593 /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
1763397017141951 migrating to version 1... /build/pkg/database/migrations.go:66
1763397017142013 migrating to version 2... /build/pkg/database/migrations.go:73
1763397017142036 migrating to version 3... /build/pkg/database/migrations.go:80
1763397017142042 cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
1763397017142055 cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
1763397017142080 migrating to version 4... /build/pkg/database/migrations.go:87
1763397017142086 converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
1763397017142103 found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
1763397017142109 migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
2025/11/17 16:30:17 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
1763397019142156 /tmp/benchmark_khatru-sqlite_8: database warmup complete, ready to serve requests
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
Events saved: 50000/50000 (100.0%)
Duration: 4.697220167s
Events/sec: 10644.59
Avg latency: 1.589521ms
P90 latency: 1.927686ms
P95 latency: 2.072081ms
P99 latency: 2.794007ms
Bottom 10% Avg latency: 2.449508ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 331.053594ms
Burst completed: 5000 events in 339.97436ms
Burst completed: 5000 events in 352.328844ms
Burst completed: 5000 events in 376.613834ms
Burst completed: 5000 events in 321.307729ms
Burst completed: 5000 events in 314.265411ms
Burst completed: 5000 events in 321.656622ms
Burst completed: 5000 events in 325.689539ms
Burst completed: 5000 events in 367.767832ms
Burst completed: 5000 events in 367.275402ms
Burst test completed: 50000 events in 9.780316233s
Events/sec: 5112.31
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 25000 writes, 25000 reads in 24.45356557s
Combined ops/sec: 2044.69
1763397077132611⚠ /tmp/benchmark_khatru-sqlite_8: Block cache might be too small. Metrics: hit: 164850 miss: 294509 keys-added: 226622 keys-updated: 54881 keys-evicted: 226603 cost-added: 12429978548485 cost-evicted: 12428976154843 sets-dropped: 0 sets-rejected: 12954 gets-dropped: 192 gets-kept: 458368 gets-total: 459359 hit-ratio: 0.36
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:450 /build/pkg/database/logger.go:46
1763397077132680⚠ /tmp/benchmark_khatru-sqlite_8: Cache life expectancy (in seconds):
-- Histogram:
Min value: 0
Max value: 11
Count: 226603
50p: 2.00
75p: 2.00
90p: 2.00
[0, 2) 226567 99.98% 99.98%
[8, 16) 36 0.02% 100.00%
--
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:451 /build/pkg/database/logger.go:46
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 253442 queries in 1m0.011742602s
Queries/sec: 4223.21
Avg query latency: 4.105842ms
P95 query latency: 13.288591ms
P99 query latency: 23.937862ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 237910 operations (187910 queries, 50000 writes) in 1m0.007412985s
Operations/sec: 3964.68
Avg latency: 2.360698ms
Avg query latency: 2.630397ms
Avg write latency: 1.347113ms
P95 latency: 4.390739ms
P99 latency: 6.940329ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 50000/50000 (100.0%)
Duration: 4.792392684s
Events/sec: 10433.20
Avg latency: 1.649743ms
P90 latency: 1.991666ms
P95 latency: 2.145348ms
P99 latency: 2.77034ms
Bottom 10% Avg latency: 2.781523ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 330.357755ms
Burst completed: 5000 events in 334.984623ms
Burst completed: 5000 events in 345.478382ms
Burst completed: 5000 events in 340.589233ms
Burst completed: 5000 events in 348.792025ms
Burst completed: 5000 events in 354.019658ms
Burst completed: 5000 events in 356.823662ms
Burst completed: 5000 events in 347.496865ms
Burst completed: 5000 events in 342.618798ms
Burst completed: 5000 events in 337.759666ms
Burst test completed: 50000 events in 9.775603327s
Events/sec: 5114.77
1763397250998218 /tmp/benchmark_khatru-sqlite_8: [6] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 2.922s
, deleted 1932516 bytes
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 25000 writes, 25000 reads in 24.35620806s
Combined ops/sec: 2052.86
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 334922 queries in 1m0.011826287s
Queries/sec: 5580.93
Avg query latency: 2.871941ms
P95 query latency: 8.86787ms
P99 query latency: 16.075646ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
1763397377131811 /tmp/benchmark_khatru-sqlite_8: Block cache metrics: hit: 485497199 miss: 4802603 keys-added: 1628313 keys-updated: 2776240 keys-evicted: 1628292 cost-added: 85662348259200 cost-evicted: 85661362474446 sets-dropped: 0 sets-rejected: 336231 gets-dropped: 382997632 gets-kept: 107185536 gets-total: 490299843 hit-ratio: 0.99
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
Concurrent test completed: 266462 operations (216462 queries, 50000 writes) in 1m0.004503525s
Operations/sec: 4440.70
Avg latency: 1.968296ms
Avg query latency: 2.154689ms
Avg write latency: 1.161355ms
P95 latency: 3.329033ms
P99 latency: 4.878236ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 4.697220167s
Total Events: 50000
Events/sec: 10644.59
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 432 MB
Avg Latency: 1.589521ms
P90 Latency: 1.927686ms
P95 Latency: 2.072081ms
P99 Latency: 2.794007ms
Bottom 10% Avg Latency: 2.449508ms
----------------------------------------
Test: Burst Pattern
Duration: 9.780316233s
Total Events: 50000
Events/sec: 5112.31
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 147 MB
Avg Latency: 3.589724ms
P90 Latency: 7.397294ms
P95 Latency: 9.015658ms
P99 Latency: 12.848707ms
Bottom 10% Avg Latency: 10.286462ms
----------------------------------------
Test: Mixed Read/Write
Duration: 24.45356557s
Total Events: 50000
Events/sec: 2044.69
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 189 MB
Avg Latency: 439.984µs
P90 Latency: 878.495µs
P95 Latency: 980.94µs
P99 Latency: 1.17514ms
Bottom 10% Avg Latency: 1.261937ms
----------------------------------------
Test: Query Performance
Duration: 1m0.011742602s
Total Events: 253442
Events/sec: 4223.21
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 165 MB
Avg Latency: 4.105842ms
P90 Latency: 8.468483ms
P95 Latency: 13.288591ms
P99 Latency: 23.937862ms
Bottom 10% Avg Latency: 15.251447ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.007412985s
Total Events: 237910
Events/sec: 3964.68
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 149 MB
Avg Latency: 2.360698ms
P90 Latency: 3.517024ms
P95 Latency: 4.390739ms
P99 Latency: 6.940329ms
Bottom 10% Avg Latency: 5.015416ms
----------------------------------------
Test: Peak Throughput
Duration: 4.792392684s
Total Events: 50000
Events/sec: 10433.20
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 523 MB
Avg Latency: 1.649743ms
P90 Latency: 1.991666ms
P95 Latency: 2.145348ms
P99 Latency: 2.77034ms
Bottom 10% Avg Latency: 2.781523ms
----------------------------------------
Test: Burst Pattern
Duration: 9.775603327s
Total Events: 50000
Events/sec: 5114.77
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 541 MB
Avg Latency: 2.925486ms
P90 Latency: 5.542703ms
P95 Latency: 7.775478ms
P99 Latency: 11.125804ms
Bottom 10% Avg Latency: 8.91184ms
----------------------------------------
Test: Mixed Read/Write
Duration: 24.35620806s
Total Events: 50000
Events/sec: 2052.86
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 200 MB
Avg Latency: 424.333µs
P90 Latency: 865.429µs
P95 Latency: 968.085µs
P99 Latency: 1.174568ms
Bottom 10% Avg Latency: 1.224002ms
----------------------------------------
Test: Query Performance
Duration: 1m0.011826287s
Total Events: 334922
Events/sec: 5580.93
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 129 MB
Avg Latency: 2.871941ms
P90 Latency: 5.60422ms
P95 Latency: 8.86787ms
P99 Latency: 16.075646ms
Bottom 10% Avg Latency: 10.23636ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.004503525s
Total Events: 266462
Events/sec: 4440.70
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 155 MB
Avg Latency: 1.968296ms
P90 Latency: 2.729181ms
P95 Latency: 3.329033ms
P99 Latency: 4.878236ms
Bottom 10% Avg Latency: 3.768185ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
1763397425682348 /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56
1763397426982581 /tmp/benchmark_khatru-sqlite_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB
Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56
RELAY_NAME: khatru-sqlite
RELAY_URL: ws://khatru-sqlite:3334
TEST_TIMESTAMP: 2025-11-17T16:37:07+00:00
BENCHMARK_CONFIG:
Events: 50000
Workers: 24
Duration: 60s

View File

@@ -0,0 +1,311 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly-badger_8
Events: 50000, Workers: 24, Duration: 1m0s
1763396182850462 /tmp/benchmark_next-orly-badger_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
1763396182853668 /tmp/benchmark_next-orly-badger_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
1763396182853712 /tmp/benchmark_next-orly-badger_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
1763396182854009 migrating to version 1... /build/pkg/database/migrations.go:66
1763396182854056 migrating to version 2... /build/pkg/database/migrations.go:73
1763396182854078 migrating to version 3... /build/pkg/database/migrations.go:80
1763396182854082 cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
1763396182854129 cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
1763396182854260 migrating to version 4... /build/pkg/database/migrations.go:87
1763396182854271 converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
1763396182854295 found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
1763396182854302 migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
2025/11/17 16:16:22 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
1763396184854370 /tmp/benchmark_next-orly-badger_8: database warmup complete, ready to serve requests
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
Events saved: 50000/50000 (100.0%)
Duration: 5.666497805s
Events/sec: 8823.79
Avg latency: 2.020722ms
P90 latency: 2.645436ms
P95 latency: 2.995948ms
P99 latency: 4.460502ms
Bottom 10% Avg latency: 3.520179ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 352.025605ms
Burst completed: 5000 events in 363.623929ms
Burst completed: 5000 events in 367.475139ms
Burst completed: 5000 events in 396.276199ms
Burst completed: 5000 events in 334.007635ms
Burst completed: 5000 events in 342.086817ms
Burst completed: 5000 events in 360.687805ms
Burst completed: 5000 events in 392.627451ms
Burst completed: 5000 events in 397.635203ms
Burst completed: 5000 events in 376.061572ms
Burst test completed: 50000 events in 10.132858185s
Events/sec: 4934.44
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
1763396242843490 /tmp/benchmark_next-orly-badger_8: Block cache metrics: hit: 232171 miss: 337826 keys-added: 235144 keys-updated: 89642 keys-evicted: 235124 cost-added: 12615246695866 cost-evicted: 12614243474391 sets-dropped: 0 sets-rejected: 12961 gets-dropped: 1280 gets-kept: 568192 gets-total: 569997 hit-ratio: 0.41
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
Mixed test completed: 25000 writes, 25000 reads in 24.625333257s
Combined ops/sec: 2030.43
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 197562 queries in 1m0.011972513s
Queries/sec: 3292.04
Avg query latency: 5.52205ms
P95 query latency: 18.40165ms
P99 query latency: 32.139723ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 224870 operations (174870 queries, 50000 writes) in 1m0.006047854s
Operations/sec: 3747.46
Avg latency: 2.665369ms
Avg query latency: 2.866192ms
Avg write latency: 1.963009ms
P95 latency: 5.204253ms
P99 latency: 8.129537ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 50000/50000 (100.0%)
Duration: 5.145620568s
Events/sec: 9717.00
Avg latency: 1.788996ms
P90 latency: 2.241725ms
P95 latency: 2.442669ms
P99 latency: 3.110506ms
Bottom 10% Avg latency: 3.016821ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 362.292309ms
Burst completed: 5000 events in 446.105376ms
Burst completed: 5000 events in 414.443306ms
Burst completed: 5000 events in 378.792051ms
Burst completed: 5000 events in 381.274883ms
Burst completed: 5000 events in 397.941224ms
Burst completed: 5000 events in 449.109795ms
Burst completed: 5000 events in 410.566974ms
Burst completed: 5000 events in 385.220958ms
Burst completed: 5000 events in 383.149443ms
1763396419122547 /tmp/benchmark_next-orly-badger_8: [0] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 3.061s
, deleted 1899050 bytes
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56
Burst test completed: 50000 events in 10.438224172s
Events/sec: 4790.09
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 25000 writes, 25000 reads in 24.485622359s
Combined ops/sec: 2042.01
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 293294 queries in 1m0.013023948s
Queries/sec: 4887.17
Avg query latency: 3.408294ms
P95 query latency: 10.965419ms
P99 query latency: 19.184675ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
1763396542843038 /tmp/benchmark_next-orly-badger_8: Block cache metrics: hit: 411640922 miss: 5406705 keys-added: 1627143 keys-updated: 3422501 keys-evicted: 1627125 cost-added: 84304242021549 cost-evicted: 84303233712402 sets-dropped: 0 sets-rejected: 295382 gets-dropped: 325582080 gets-kept: 91360192 gets-total: 417047650 hit-ratio: 0.99
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
Concurrent test completed: 254899 operations (204899 queries, 50000 writes) in 1m0.006656731s
Operations/sec: 4247.85
Avg latency: 2.125728ms
Avg query latency: 2.314927ms
Avg write latency: 1.350394ms
P95 latency: 3.778776ms
P99 latency: 5.393909ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 5.666497805s
Total Events: 50000
Events/sec: 8823.79
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 257 MB
Avg Latency: 2.020722ms
P90 Latency: 2.645436ms
P95 Latency: 2.995948ms
P99 Latency: 4.460502ms
Bottom 10% Avg Latency: 3.520179ms
----------------------------------------
Test: Burst Pattern
Duration: 10.132858185s
Total Events: 50000
Events/sec: 4934.44
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 122 MB
Avg Latency: 7.197024ms
P90 Latency: 12.546513ms
P95 Latency: 15.216454ms
P99 Latency: 23.682573ms
Bottom 10% Avg Latency: 18.172083ms
----------------------------------------
Test: Mixed Read/Write
Duration: 24.625333257s
Total Events: 50000
Events/sec: 2030.43
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 120 MB
Avg Latency: 467.389µs
P90 Latency: 914.891µs
P95 Latency: 1.0349ms
P99 Latency: 1.268268ms
Bottom 10% Avg Latency: 1.393626ms
----------------------------------------
Test: Query Performance
Duration: 1m0.011972513s
Total Events: 197562
Events/sec: 3292.04
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 152 MB
Avg Latency: 5.52205ms
P90 Latency: 12.226879ms
P95 Latency: 18.40165ms
P99 Latency: 32.139723ms
Bottom 10% Avg Latency: 20.985445ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.006047854s
Total Events: 224870
Events/sec: 3747.46
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 127 MB
Avg Latency: 2.665369ms
P90 Latency: 4.194993ms
P95 Latency: 5.204253ms
P99 Latency: 8.129537ms
Bottom 10% Avg Latency: 5.884586ms
----------------------------------------
Test: Peak Throughput
Duration: 5.145620568s
Total Events: 50000
Events/sec: 9717.00
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 522 MB
Avg Latency: 1.788996ms
P90 Latency: 2.241725ms
P95 Latency: 2.442669ms
P99 Latency: 3.110506ms
Bottom 10% Avg Latency: 3.016821ms
----------------------------------------
Test: Burst Pattern
Duration: 10.438224172s
Total Events: 50000
Events/sec: 4790.09
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 623 MB
Avg Latency: 9.406859ms
P90 Latency: 21.810715ms
P95 Latency: 35.119382ms
P99 Latency: 66.001509ms
Bottom 10% Avg Latency: 39.782175ms
----------------------------------------
Test: Mixed Read/Write
Duration: 24.485622359s
Total Events: 50000
Events/sec: 2042.01
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 165 MB
Avg Latency: 445.318µs
P90 Latency: 907.915µs
P95 Latency: 1.021172ms
P99 Latency: 1.227095ms
Bottom 10% Avg Latency: 1.265835ms
----------------------------------------
Test: Query Performance
Duration: 1m0.013023948s
Total Events: 293294
Events/sec: 4887.17
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 136 MB
Avg Latency: 3.408294ms
P90 Latency: 7.156129ms
P95 Latency: 10.965419ms
P99 Latency: 19.184675ms
Bottom 10% Avg Latency: 12.469832ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.006656731s
Total Events: 254899
Events/sec: 4247.85
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 147 MB
Avg Latency: 2.125728ms
P90 Latency: 3.131901ms
P95 Latency: 3.778776ms
P99 Latency: 5.393909ms
Bottom 10% Avg Latency: 4.22837ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.adoc
1763396593981772 /tmp/benchmark_next-orly-badger_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56
1763396595378747 /tmp/benchmark_next-orly-badger_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB
Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56
RELAY_NAME: next-orly-badger
RELAY_URL: ws://next-orly-badger:8080
TEST_TIMESTAMP: 2025-11-17T16:23:15+00:00
BENCHMARK_CONFIG:
Events: 50000
Workers: 24
Duration: 60s

View File

@@ -0,0 +1,323 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly-dgraph_8
Events: 50000, Workers: 24, Duration: 1m0s
1763396600574205 /tmp/benchmark_next-orly-dgraph_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:56
1763396600577795 /tmp/benchmark_next-orly-dgraph_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:56
1763396600577852 /tmp/benchmark_next-orly-dgraph_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:56
1763396600578216 migrating to version 1... /build/pkg/database/migrations.go:66
1763396600578287 migrating to version 2... /build/pkg/database/migrations.go:73
1763396600578319 migrating to version 3... /build/pkg/database/migrations.go:80
1763396600578325 cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:287
1763396600578334 cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:332
1763396600578350 migrating to version 4... /build/pkg/database/migrations.go:87
1763396600578355 converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:340
1763396600578372 found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:429
1763396600578378 migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:538
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
2025/11/17 16:23:20 INFO: Successfully loaded libsecp256k1 v5.0.0 from libsecp256k1.so.2
1763396602578437 /tmp/benchmark_next-orly-dgraph_8: database warmup complete, ready to serve requests
/usr/local/go/src/runtime/asm_amd64.s:1693 /build/pkg/database/logger.go:56
Events saved: 50000/50000 (100.0%)
Duration: 4.932431923s
Events/sec: 10136.99
Avg latency: 1.667317ms
P90 latency: 2.069461ms
P95 latency: 2.249895ms
P99 latency: 2.861303ms
Bottom 10% Avg latency: 2.592597ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 335.655402ms
Burst completed: 5000 events in 330.360552ms
Burst completed: 5000 events in 350.90491ms
Burst completed: 5000 events in 373.041958ms
Burst completed: 5000 events in 347.11564ms
Burst completed: 5000 events in 315.949199ms
Burst completed: 5000 events in 331.42993ms
Burst completed: 5000 events in 352.164361ms
Burst completed: 5000 events in 359.115619ms
Burst completed: 5000 events in 360.397544ms
Burst test completed: 50000 events in 9.808342155s
Events/sec: 5097.70
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 25000 writes, 25000 reads in 24.59623701s
Combined ops/sec: 2032.83
1763396660567060⚠ /tmp/benchmark_next-orly-dgraph_8: Block cache might be too small. Metrics: hit: 153935 miss: 305257 keys-added: 227607 keys-updated: 64636 keys-evicted: 227588 cost-added: 12452581576986 cost-evicted: 12451583862757 sets-dropped: 0 sets-rejected: 12954 gets-dropped: 256 gets-kept: 458496 gets-total: 459192 hit-ratio: 0.34
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:450 /build/pkg/database/logger.go:46
1763396660567121⚠ /tmp/benchmark_next-orly-dgraph_8: Cache life expectancy (in seconds):
-- Histogram:
Min value: 0
Max value: 11
Count: 227588
50p: 2.00
75p: 2.00
90p: 2.00
[0, 2) 227552 99.98% 99.98%
[8, 16) 36 0.02% 100.00%
--
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:451 /build/pkg/database/logger.go:46
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 221626 queries in 1m0.014161671s
Queries/sec: 3692.90
Avg query latency: 4.849059ms
P95 query latency: 15.966874ms
P99 query latency: 27.859712ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 235023 operations (185023 queries, 50000 writes) in 1m0.005568823s
Operations/sec: 3916.69
Avg latency: 2.401379ms
Avg query latency: 2.672573ms
Avg write latency: 1.397837ms
P95 latency: 4.398002ms
P99 latency: 6.207183ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 50000/50000 (100.0%)
Duration: 5.127096799s
Events/sec: 9752.11
Avg latency: 1.795821ms
P90 latency: 2.25461ms
P95 latency: 2.466785ms
P99 latency: 3.159176ms
Bottom 10% Avg latency: 3.072242ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 5000 events in 358.012209ms
Burst completed: 5000 events in 336.300441ms
Burst completed: 5000 events in 363.657063ms
Burst completed: 5000 events in 356.771817ms
Burst completed: 5000 events in 368.000986ms
Burst completed: 5000 events in 441.821658ms
Burst completed: 5000 events in 451.146122ms
Burst completed: 5000 events in 455.159014ms
Burst completed: 5000 events in 359.826504ms
Burst completed: 5000 events in 358.602207ms
1763396835570723 /tmp/benchmark_next-orly-dgraph_8: [6] [E] LOG Compact 0->6 (8, 0 -> 4 tables with 1 splits). [00001 00002 00003 00004 00005 00006 00007 00008 . .] -> [00009 00010 00011 00012 .], took 3.055s
, deleted 1901003 bytes
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:1479 /build/pkg/database/logger.go:56
Burst test completed: 50000 events in 10.25458455s
Events/sec: 4875.87
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 25000 writes, 25000 reads in 24.474786024s
Combined ops/sec: 2042.92
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 287727 queries in 1m0.012156857s
Queries/sec: 4794.48
Avg query latency: 3.504598ms
P95 query latency: 11.416502ms
P99 query latency: 19.871886ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
1763396960566384 /tmp/benchmark_next-orly-dgraph_8: Block cache metrics: hit: 436764091 miss: 4871096 keys-added: 1584381 keys-updated: 2919606 keys-evicted: 1584361 cost-added: 83226283032882 cost-evicted: 83225259887553 sets-dropped: 0 sets-rejected: 305847 gets-dropped: 344794880 gets-kept: 96734656 gets-total: 441635219 hit-ratio: 0.99
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:454 /build/pkg/database/logger.go:56
Concurrent test completed: 252209 operations (202209 queries, 50000 writes) in 1m0.008028818s
Operations/sec: 4202.92
Avg latency: 2.189461ms
Avg query latency: 2.337704ms
Avg write latency: 1.58994ms
P95 latency: 3.919323ms
P99 latency: 5.959314ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 4.932431923s
Total Events: 50000
Events/sec: 10136.99
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 432 MB
Avg Latency: 1.667317ms
P90 Latency: 2.069461ms
P95 Latency: 2.249895ms
P99 Latency: 2.861303ms
Bottom 10% Avg Latency: 2.592597ms
----------------------------------------
Test: Burst Pattern
Duration: 9.808342155s
Total Events: 50000
Events/sec: 5097.70
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 149 MB
Avg Latency: 3.805495ms
P90 Latency: 6.632151ms
P95 Latency: 8.069195ms
P99 Latency: 13.244195ms
Bottom 10% Avg Latency: 9.922762ms
----------------------------------------
Test: Mixed Read/Write
Duration: 24.59623701s
Total Events: 50000
Events/sec: 2032.83
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 121 MB
Avg Latency: 467.746µs
P90 Latency: 911.189µs
P95 Latency: 1.018554ms
P99 Latency: 1.250848ms
Bottom 10% Avg Latency: 1.345857ms
----------------------------------------
Test: Query Performance
Duration: 1m0.014161671s
Total Events: 221626
Events/sec: 3692.90
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 125 MB
Avg Latency: 4.849059ms
P90 Latency: 10.564822ms
P95 Latency: 15.966874ms
P99 Latency: 27.859712ms
Bottom 10% Avg Latency: 18.180391ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.005568823s
Total Events: 235023
Events/sec: 3916.69
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 177 MB
Avg Latency: 2.401379ms
P90 Latency: 3.659643ms
P95 Latency: 4.398002ms
P99 Latency: 6.207183ms
Bottom 10% Avg Latency: 4.857955ms
----------------------------------------
Test: Peak Throughput
Duration: 5.127096799s
Total Events: 50000
Events/sec: 9752.11
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 480 MB
Avg Latency: 1.795821ms
P90 Latency: 2.25461ms
P95 Latency: 2.466785ms
P99 Latency: 3.159176ms
Bottom 10% Avg Latency: 3.072242ms
----------------------------------------
Test: Burst Pattern
Duration: 10.25458455s
Total Events: 50000
Events/sec: 4875.87
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 621 MB
Avg Latency: 9.266976ms
P90 Latency: 24.12544ms
P95 Latency: 34.465042ms
P99 Latency: 55.446215ms
Bottom 10% Avg Latency: 37.317916ms
----------------------------------------
Test: Mixed Read/Write
Duration: 24.474786024s
Total Events: 50000
Events/sec: 2042.92
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 182 MB
Avg Latency: 452.46µs
P90 Latency: 909.806µs
P95 Latency: 1.014516ms
P99 Latency: 1.214797ms
Bottom 10% Avg Latency: 1.304994ms
----------------------------------------
Test: Query Performance
Duration: 1m0.012156857s
Total Events: 287727
Events/sec: 4794.48
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 150 MB
Avg Latency: 3.504598ms
P90 Latency: 7.480817ms
P95 Latency: 11.416502ms
P99 Latency: 19.871886ms
Bottom 10% Avg Latency: 12.934864ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.008028818s
Total Events: 252209
Events/sec: 4202.92
Success Rate: 100.0%
Concurrent Workers: 24
Memory Used: 98 MB
Avg Latency: 2.189461ms
P90 Latency: 3.213337ms
P95 Latency: 3.919323ms
P99 Latency: 5.959314ms
Bottom 10% Avg Latency: 4.521426ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly-dgraph_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly-dgraph_8/benchmark_report.adoc
1763397010410098 /tmp/benchmark_next-orly-dgraph_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:56
1763397011943178 /tmp/benchmark_next-orly-dgraph_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 4 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 61 MiB
Level 5 [ ]: NumTables: 01. Size: 94 MiB of 23 MiB. Score: 4.08->4.08 StaleData: 0 B Target FileSize: 122 MiB
Level 6 [ ]: NumTables: 04. Size: 230 MiB of 230 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 244 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:56
RELAY_NAME: next-orly-dgraph
RELAY_URL: ws://next-orly-dgraph:8080
TEST_TIMESTAMP: 2025-11-17T16:30:12+00:00
BENCHMARK_CONFIG:
Events: 50000
Workers: 24
Duration: 60s

View File

@@ -0,0 +1,19 @@
#!/bin/bash
# Run Badger benchmark with reduced cache sizes to avoid OOM
# Set reasonable cache sizes for benchmark
export ORLY_DB_BLOCK_CACHE_MB=256 # Reduced from 1024MB
export ORLY_DB_INDEX_CACHE_MB=128 # Reduced from 512MB
export ORLY_QUERY_CACHE_SIZE_MB=128 # Reduced from 512MB
# Clean up old data
rm -rf /tmp/benchmark_db_badger
echo "Running Badger benchmark with reduced cache sizes:"
echo " Block Cache: ${ORLY_DB_BLOCK_CACHE_MB}MB"
echo " Index Cache: ${ORLY_DB_INDEX_CACHE_MB}MB"
echo " Query Cache: ${ORLY_QUERY_CACHE_SIZE_MB}MB"
echo ""
# Run benchmark
./benchmark -events "${1:-1000}" -workers "${2:-4}" -datadir /tmp/benchmark_db_badger

View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Wrapper script that cleans data directories with sudo before running benchmark
# Use this if you encounter permission errors with run-benchmark.sh
set -e
cd "$(dirname "$0")"
# Stop any running containers first
echo "Stopping any running benchmark containers..."
if docker compose version &> /dev/null 2>&1; then
docker compose down -v 2>&1 | grep -v "warning" || true
else
docker-compose down -v 2>&1 | grep -v "warning" || true
fi
# Clean data directories with sudo
if [ -d "data" ]; then
echo "Cleaning data directories (requires sudo)..."
sudo rm -rf data/
fi
# Now run the normal benchmark script
exec ./run-benchmark.sh

View File

@@ -0,0 +1,80 @@
#!/bin/bash
# Run benchmark for ORLY only (no other relays)
set -e
cd "$(dirname "$0")"
# Determine docker-compose command
if docker compose version &> /dev/null 2>&1; then
DOCKER_COMPOSE="docker compose"
else
DOCKER_COMPOSE="docker-compose"
fi
# Clean old data directories (may be owned by root from Docker)
if [ -d "data" ]; then
echo "Cleaning old data directories..."
if ! rm -rf data/ 2>/dev/null; then
echo ""
echo "ERROR: Cannot remove data directories due to permission issues."
echo "Please run: sudo rm -rf data/"
echo "Then run this script again."
exit 1
fi
fi
# Create fresh data directories with correct permissions
echo "Preparing data directories..."
mkdir -p data/next-orly
chmod 777 data/next-orly
echo "Building ORLY container..."
$DOCKER_COMPOSE build next-orly
echo "Starting ORLY relay..."
echo ""
# Start only next-orly and benchmark-runner
$DOCKER_COMPOSE up next-orly -d
# Wait for ORLY to be healthy
echo "Waiting for ORLY to be healthy..."
for i in {1..30}; do
if curl -sf http://localhost:8001/ > /dev/null 2>&1; then
echo "ORLY is ready!"
break
fi
sleep 2
if [ $i -eq 30 ]; then
echo "ERROR: ORLY failed to become healthy"
$DOCKER_COMPOSE logs next-orly
exit 1
fi
done
# Run benchmark against ORLY
echo ""
echo "Running benchmark against ORLY..."
echo "Target: http://localhost:8001"
echo ""
# Run the benchmark binary directly against the running ORLY instance
docker run --rm --network benchmark_benchmark-net \
-e BENCHMARK_TARGETS=next-orly:8080 \
-e BENCHMARK_EVENTS=10000 \
-e BENCHMARK_WORKERS=24 \
-e BENCHMARK_DURATION=20s \
-v "$(pwd)/reports:/reports" \
benchmark-benchmark-runner \
/app/benchmark-runner --output-dir=/reports
echo ""
echo "Benchmark complete!"
echo "Stopping ORLY..."
$DOCKER_COMPOSE down
echo ""
echo "Results saved to ./reports/"
echo "Check the latest run_* directory for detailed results."

46
cmd/benchmark/run-benchmark.sh Executable file
View File

@@ -0,0 +1,46 @@
#!/bin/bash
# Wrapper script to run the benchmark suite and automatically shut down when complete
set -e
# Determine docker-compose command
if docker compose version &> /dev/null 2>&1; then
DOCKER_COMPOSE="docker compose"
else
DOCKER_COMPOSE="docker-compose"
fi
# Clean old data directories (may be owned by root from Docker)
if [ -d "data" ]; then
echo "Cleaning old data directories..."
if ! rm -rf data/ 2>/dev/null; then
# If normal rm fails (permission denied), provide clear instructions
echo ""
echo "ERROR: Cannot remove data directories due to permission issues."
echo "This happens because Docker creates files as root."
echo ""
echo "Please run one of the following to clean up:"
echo " sudo rm -rf data/"
echo " sudo chown -R \$(id -u):\$(id -g) data/ && rm -rf data/"
echo ""
echo "Then run this script again."
exit 1
fi
fi
# Create fresh data directories with correct permissions
echo "Preparing data directories..."
mkdir -p data/{next-orly-badger,next-orly-dgraph,dgraph-zero,dgraph-alpha,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres}
chmod 777 data/{next-orly-badger,next-orly-dgraph,dgraph-zero,dgraph-alpha,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,postgres}
echo "Starting benchmark suite..."
echo "This will automatically shut down all containers when the benchmark completes."
echo ""
# Run docker compose with flags to exit when benchmark-runner completes
$DOCKER_COMPOSE up --exit-code-from benchmark-runner --abort-on-container-exit
echo ""
echo "Benchmark suite has completed and all containers have been stopped."
echo "Check the ./reports/ directory for results."

41
cmd/benchmark/run-profile.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
# Run benchmark with profiling on ORLY only
set -e
# Determine docker-compose command
if docker compose version &> /dev/null 2>&1; then
DOCKER_COMPOSE="docker compose"
else
DOCKER_COMPOSE="docker-compose"
fi
# Clean up old data and profiles (may need sudo for Docker-created files)
echo "Cleaning old data and profiles..."
if [ -d "data/next-orly" ]; then
if ! rm -rf data/next-orly/* 2>/dev/null; then
echo "Need elevated permissions to clean data directories..."
sudo rm -rf data/next-orly/*
fi
fi
rm -rf profiles/* 2>/dev/null || sudo rm -rf profiles/* 2>/dev/null || true
mkdir -p data/next-orly profiles
chmod 777 data/next-orly 2>/dev/null || true
echo "Starting profiled benchmark (ORLY only)..."
echo "- 50,000 events"
echo "- 24 workers"
echo "- 90 second warmup delay"
echo "- CPU profiling enabled"
echo "- pprof HTTP on port 6060"
echo ""
# Run docker compose with profile config
$DOCKER_COMPOSE -f docker-compose.profile.yml up \
--exit-code-from benchmark-runner \
--abort-on-container-exit
echo ""
echo "Benchmark complete. Profiles saved to ./profiles/"
echo "Results saved to ./reports/"

View File

@@ -8,20 +8,24 @@ import (
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/interfaces/signer/p8k"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/interfaces/signer/p8k"
"next.orly.dev/pkg/protocol/ws"
)
func main() {
var err error
url := flag.String("url", "ws://127.0.0.1:3334", "relay websocket URL")
timeout := flag.Duration("timeout", 20*time.Second, "publish timeout")
timeout := flag.Duration("timeout", 20*time.Second, "operation timeout")
testType := flag.String("type", "event", "test type: 'event' for write control, 'req' for read control, 'both' for both, 'publish-and-query' for full test")
eventKind := flag.Int("kind", 4678, "event kind to test")
numEvents := flag.Int("count", 2, "number of events to publish (for publish-and-query)")
flag.Parse()
// Minimal client that publishes a single kind 4678 event and reports OK/err
// Connect to relay
var rl *ws.Client
if rl, err = ws.RelayConnect(context.Background(), *url); chk.E(err) {
log.E.F("connect error: %v", err)
@@ -29,6 +33,7 @@ func main() {
}
defer rl.Close()
// Create signer
var signer *p8k.Signer
if signer, err = p8k.New(); chk.E(err) {
log.E.F("signer create error: %v", err)
@@ -39,26 +44,186 @@ func main() {
return
}
// Perform tests based on type
switch *testType {
case "event":
testEventWrite(rl, signer, *eventKind, *timeout)
case "req":
testReqRead(rl, signer, *eventKind, *timeout)
case "both":
log.I.Ln("Testing EVENT (write control)...")
testEventWrite(rl, signer, *eventKind, *timeout)
log.I.Ln("\nTesting REQ (read control)...")
testReqRead(rl, signer, *eventKind, *timeout)
case "publish-and-query":
testPublishAndQuery(rl, signer, *eventKind, *numEvents, *timeout)
default:
log.E.F("invalid test type: %s (must be 'event', 'req', 'both', or 'publish-and-query')", *testType)
}
}
func testEventWrite(rl *ws.Client, signer *p8k.Signer, eventKind int, timeout time.Duration) {
ev := &event.E{
CreatedAt: time.Now().Unix(),
Kind: kind.K{K: 4678}.K, // arbitrary custom kind
Kind: uint16(eventKind),
Tags: tag.NewS(),
Content: []byte("policy test: expect rejection"),
Content: []byte("policy test: expect rejection for write"),
}
if err = ev.Sign(signer); chk.E(err) {
if err := ev.Sign(signer); chk.E(err) {
log.E.F("sign error: %v", err)
return
}
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
if err = rl.Publish(ctx, ev); err != nil {
if err := rl.Publish(ctx, ev); err != nil {
// Expected path if policy rejects: client returns error with reason (from OK false)
fmt.Println("policy reject:", err)
fmt.Println("EVENT policy reject:", err)
return
}
log.I.Ln("publish result: accepted")
fmt.Println("ACCEPT")
log.I.Ln("EVENT publish result: accepted")
fmt.Println("EVENT ACCEPT")
}
func testReqRead(rl *ws.Client, signer *p8k.Signer, eventKind int, timeout time.Duration) {
// First, publish a test event to the relay that we'll try to query
testEvent := &event.E{
CreatedAt: time.Now().Unix(),
Kind: uint16(eventKind),
Tags: tag.NewS(),
Content: []byte("policy test: event for read control test"),
}
if err := testEvent.Sign(signer); chk.E(err) {
log.E.F("sign error: %v", err)
return
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// Try to publish the test event first (ignore errors if policy rejects)
_ = rl.Publish(ctx, testEvent)
log.I.F("published test event kind %d for read testing", eventKind)
// Now try to query for events of this kind
limit := uint(10)
f := &filter.F{
Kinds: kind.FromIntSlice([]int{eventKind}),
Limit: &limit,
}
ctx2, cancel2 := context.WithTimeout(context.Background(), timeout)
defer cancel2()
events, err := rl.QuerySync(ctx2, f)
if chk.E(err) {
log.E.F("query error: %v", err)
fmt.Println("REQ query error:", err)
return
}
// Check if we got the expected events
if len(events) == 0 {
// Could mean policy filtered it out, or it wasn't stored
fmt.Println("REQ policy reject: no events returned (filtered by read policy)")
log.I.F("REQ result: no events of kind %d returned (policy filtered or not stored)", eventKind)
return
}
// Events were returned - read access allowed
fmt.Printf("REQ ACCEPT: %d events returned\n", len(events))
log.I.F("REQ result: %d events of kind %d returned", len(events), eventKind)
}
func testPublishAndQuery(rl *ws.Client, signer *p8k.Signer, eventKind int, numEvents int, timeout time.Duration) {
log.I.F("Publishing %d events of kind %d...", numEvents, eventKind)
publishedIDs := make([][]byte, 0, numEvents)
acceptedCount := 0
rejectedCount := 0
// Publish multiple events
for i := 0; i < numEvents; i++ {
ev := &event.E{
CreatedAt: time.Now().Unix() + int64(i), // Slightly different timestamps
Kind: uint16(eventKind),
Tags: tag.NewS(),
Content: []byte(fmt.Sprintf("policy test event %d/%d", i+1, numEvents)),
}
if err := ev.Sign(signer); chk.E(err) {
log.E.F("sign error for event %d: %v", i+1, err)
continue
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
err := rl.Publish(ctx, ev)
cancel()
if err != nil {
log.W.F("Event %d/%d rejected: %v", i+1, numEvents, err)
rejectedCount++
} else {
log.I.F("Event %d/%d published successfully (id: %x...)", i+1, numEvents, ev.ID[:8])
publishedIDs = append(publishedIDs, ev.ID)
acceptedCount++
}
}
fmt.Printf("PUBLISH: %d accepted, %d rejected out of %d total\n", acceptedCount, rejectedCount, numEvents)
if acceptedCount == 0 {
fmt.Println("No events were accepted, skipping query test")
return
}
// Wait a moment for events to be stored
time.Sleep(500 * time.Millisecond)
// Now query for events of this kind
log.I.F("Querying for events of kind %d...", eventKind)
limit := uint(100)
f := &filter.F{
Kinds: kind.FromIntSlice([]int{eventKind}),
Limit: &limit,
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
events, err := rl.QuerySync(ctx, f)
if chk.E(err) {
log.E.F("query error: %v", err)
fmt.Println("QUERY ERROR:", err)
return
}
log.I.F("Query returned %d events", len(events))
// Check if we got our published events back
foundCount := 0
for _, pubID := range publishedIDs {
found := false
for _, ev := range events {
if string(ev.ID) == string(pubID) {
found = true
break
}
}
if found {
foundCount++
}
}
fmt.Printf("QUERY: found %d/%d published events (total returned: %d)\n", foundCount, len(publishedIDs), len(events))
if foundCount == len(publishedIDs) {
fmt.Println("SUCCESS: All published events were retrieved")
} else if foundCount > 0 {
fmt.Printf("PARTIAL: Only %d/%d events retrieved (some filtered by read policy?)\n", foundCount, len(publishedIDs))
} else {
fmt.Println("FAILURE: None of the published events were retrieved (read policy blocked?)")
}
}

View File

@@ -27,7 +27,7 @@ docker run -d \
-v /data/orly-relay:/data \
-e ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx \
-e ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z,npub1m4ny6hjqzepn4rxknuq94c2gpqzr29ufkkw7ttcxyak7v43n6vvsajc2jl \
-e ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.nostr.band,wss://relay.damus.io \
-e ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.damus.io \
-e ORLY_RELAY_URL=wss://orly-relay.imwald.eu \
-e ORLY_ACL_MODE=follows \
-e ORLY_SUBSCRIPTION_ENABLED=false \

View File

@@ -28,7 +28,7 @@ services:
- ORLY_ACL_MODE=follows
# Bootstrap relay URLs for initial sync
- ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.nostr.band,wss://relay.damus.io
- ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.damus.io
# Subscription Settings (optional)
- ORLY_SUBSCRIPTION_ENABLED=false

406
docs/NEO4J_BACKEND.md Normal file
View File

@@ -0,0 +1,406 @@
# Neo4j Database Backend for ORLY Relay
## Overview
The Neo4j database backend provides a graph-native storage solution for the ORLY Nostr relay. Unlike traditional key-value or document stores, Neo4j is optimized for relationship-heavy queries, making it an ideal fit for Nostr's social graph and event reference patterns.
## Architecture
### Core Components
1. **Main Database File** ([pkg/neo4j/neo4j.go](../pkg/neo4j/neo4j.go))
- Implements the `database.Database` interface
- Manages Neo4j driver connection and lifecycle
- Uses Badger for metadata storage (markers, identity, subscriptions)
- Registers with the database factory via `init()`
2. **Schema Management** ([pkg/neo4j/schema.go](../pkg/neo4j/schema.go))
- Defines Neo4j constraints and indexes using Cypher
- Creates unique constraints on Event IDs and Author pubkeys
- Indexes for optimal query performance (kind, created_at, tags)
3. **Query Engine** ([pkg/neo4j/query-events.go](../pkg/neo4j/query-events.go))
- Translates Nostr REQ filters to Cypher queries
- Leverages graph traversal for tag relationships
- Supports prefix matching for IDs and pubkeys
- Parameterized queries for security and performance
4. **Event Storage** ([pkg/neo4j/save-event.go](../pkg/neo4j/save-event.go))
- Stores events as nodes with properties
- Creates graph relationships:
- `AUTHORED_BY`: Event → Author
- `REFERENCES`: Event → Event (e-tags)
- `MENTIONS`: Event → Author (p-tags)
- `TAGGED_WITH`: Event → Tag
## Graph Schema
### Node Types
**Event Node**
```cypher
(:Event {
id: string, // Hex-encoded event ID (32 bytes)
serial: int, // Sequential serial number
kind: int, // Event kind
created_at: int, // Unix timestamp
content: string, // Event content
sig: string, // Hex-encoded signature
pubkey: string, // Hex-encoded author pubkey
tags: string // JSON-encoded tags array
})
```
**Author Node**
```cypher
(:Author {
pubkey: string // Hex-encoded pubkey (unique)
})
```
**Tag Node**
```cypher
(:Tag {
type: string, // Tag type (e.g., "t", "d")
value: string // Tag value
})
```
**Marker Node** (for metadata)
```cypher
(:Marker {
key: string, // Unique key
value: string // Hex-encoded value
})
```
### Relationships
- `(:Event)-[:AUTHORED_BY]->(:Author)` - Event authorship
- `(:Event)-[:REFERENCES]->(:Event)` - Event references (e-tags)
- `(:Event)-[:MENTIONS]->(:Author)` - Author mentions (p-tags)
- `(:Event)-[:TAGGED_WITH]->(:Tag)` - Generic tag associations
## How Nostr REQ Messages Are Implemented
### Filter to Cypher Translation
The query engine in [query-events.go](../pkg/neo4j/query-events.go) translates Nostr filters to Cypher queries:
#### 1. ID Filters
```json
{"ids": ["abc123..."]}
```
Becomes:
```cypher
MATCH (e:Event)
WHERE e.id = $id_0
```
For prefix matching (partial IDs):
```cypher
WHERE e.id STARTS WITH $id_0
```
#### 2. Author Filters
```json
{"authors": ["pubkey1...", "pubkey2..."]}
```
Becomes:
```cypher
MATCH (e:Event)
WHERE e.pubkey IN $authors
```
#### 3. Kind Filters
```json
{"kinds": [1, 7]}
```
Becomes:
```cypher
MATCH (e:Event)
WHERE e.kind IN $kinds
```
#### 4. Time Range Filters
```json
{"since": 1234567890, "until": 1234567900}
```
Becomes:
```cypher
MATCH (e:Event)
WHERE e.created_at >= $since AND e.created_at <= $until
```
#### 5. Tag Filters (Graph Advantage!)
```json
{"#t": ["bitcoin", "nostr"]}
```
Becomes:
```cypher
MATCH (e:Event)
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t0:Tag)
WHERE t0.type = $tagType_0 AND t0.value IN $tagValues_0
```
This leverages Neo4j's native graph traversal for efficient tag queries!
#### 6. Combined Filters
```json
{
"kinds": [1],
"authors": ["abc..."],
"#p": ["xyz..."],
"limit": 50
}
```
Becomes:
```cypher
MATCH (e:Event)
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t0:Tag)
WHERE e.kind IN $kinds
AND e.pubkey IN $authors
AND t0.type = $tagType_0
AND t0.value IN $tagValues_0
RETURN e.id, e.kind, e.created_at, e.content, e.sig, e.pubkey, e.tags
ORDER BY e.created_at DESC
LIMIT $limit
```
### Query Execution Flow
1. **Parse Filter**: Extract IDs, authors, kinds, times, tags
2. **Build Cypher**: Construct parameterized query with MATCH/WHERE clauses
3. **Execute**: Run via `ExecuteRead()` with read-only session
4. **Parse Results**: Convert Neo4j records to Nostr events
5. **Return**: Send events back to client
## Configuration
### Environment Variables
```bash
# Neo4j Connection
ORLY_NEO4J_URI="bolt://localhost:7687"
ORLY_NEO4J_USER="neo4j"
ORLY_NEO4J_PASSWORD="password"
# Database Type Selection
ORLY_DB_TYPE="neo4j"
# Data Directory (for Badger metadata storage)
ORLY_DATA_DIR="~/.local/share/ORLY"
```
### Example Docker Compose Setup
```yaml
version: '3.8'
services:
neo4j:
image: neo4j:5.15
ports:
- "7474:7474" # HTTP
- "7687:7687" # Bolt
environment:
- NEO4J_AUTH=neo4j/password
- NEO4J_PLUGINS=["apoc"]
volumes:
- neo4j_data:/data
- neo4j_logs:/logs
orly:
build: .
ports:
- "3334:3334"
environment:
- ORLY_DB_TYPE=neo4j
- ORLY_NEO4J_URI=bolt://neo4j:7687
- ORLY_NEO4J_USER=neo4j
- ORLY_NEO4J_PASSWORD=password
depends_on:
- neo4j
volumes:
neo4j_data:
neo4j_logs:
```
## Performance Considerations
### Advantages Over Badger/DGraph
1. **Native Graph Queries**: Tag relationships and social graph traversals are native operations
2. **Optimized Indexes**: Automatic index usage for constrained properties
3. **Efficient Joins**: Relationship traversals are O(1) lookups
4. **Query Planner**: Neo4j's query planner optimizes complex multi-filter queries
### Tuning Recommendations
1. **Indexes**: The schema creates indexes for:
- Event ID (unique constraint + index)
- Event kind
- Event created_at
- Composite: kind + created_at
- Tag type + value
2. **Cache Configuration**: Configure Neo4j's page cache and heap size:
```conf
# neo4j.conf
dbms.memory.heap.initial_size=2G
dbms.memory.heap.max_size=4G
dbms.memory.pagecache.size=4G
```
3. **Query Limits**: Always use LIMIT in queries to prevent memory exhaustion
## Implementation Details
### Replaceable Events
Replaceable events (kinds 0, 3, 10000-19999) are handled in `WouldReplaceEvent()`:
```cypher
MATCH (e:Event {kind: $kind, pubkey: $pubkey})
WHERE e.created_at < $createdAt
RETURN e.serial, e.created_at
```
Older events are deleted before saving the new one.
### Parameterized Replaceable Events
For kinds 30000-39999, we also match on the d-tag:
```cypher
MATCH (e:Event {kind: $kind, pubkey: $pubkey})-[:TAGGED_WITH]->(t:Tag {type: 'd', value: $dValue})
WHERE e.created_at < $createdAt
RETURN e.serial
```
### Event Deletion (NIP-09)
Delete events (kind 5) are processed via graph traversal:
```cypher
MATCH (target:Event {id: $targetId})
MATCH (delete:Event {kind: 5})-[:REFERENCES]->(target)
WHERE delete.pubkey = $pubkey OR delete.pubkey IN $admins
RETURN delete.id
```
Only same-author or admin deletions are allowed.
## Comparison with Other Backends
| Feature | Badger | DGraph | Neo4j |
|---------|--------|--------|-------|
| **Storage Type** | Key-value | Graph (distributed) | Graph (native) |
| **Query Language** | Custom indexes | DQL | Cypher |
| **Tag Queries** | Index lookups | Graph traversal | Native relationships |
| **Scaling** | Single-node | Distributed | Cluster/Causal cluster |
| **Memory Usage** | Low | Medium | High |
| **Setup Complexity** | Minimal | Medium | Medium |
| **Best For** | Small relays | Large distributed | Relationship-heavy |
## Development Guide
### Adding New Indexes
1. Update [schema.go](../pkg/neo4j/schema.go) with new index definition
2. Add to `applySchema()` function
3. Restart relay to apply schema changes
Example:
```cypher
CREATE INDEX event_content_fulltext IF NOT EXISTS
FOR (e:Event) ON (e.content)
OPTIONS {indexConfig: {`fulltext.analyzer`: 'english'}}
```
### Custom Queries
To add custom query methods:
1. Add method to [query-events.go](../pkg/neo4j/query-events.go)
2. Build Cypher query with parameterization
3. Use `ExecuteRead()` or `ExecuteWrite()` as appropriate
4. Parse results with `parseEventsFromResult()`
### Testing
Due to Neo4j dependency, tests require a running Neo4j instance:
```bash
# Start Neo4j via Docker
docker run -d --name neo4j-test \
-p 7687:7687 \
-e NEO4J_AUTH=neo4j/test \
neo4j:5.15
# Run tests
ORLY_NEO4J_URI="bolt://localhost:7687" \
ORLY_NEO4J_USER="neo4j" \
ORLY_NEO4J_PASSWORD="test" \
go test ./pkg/neo4j/...
# Cleanup
docker rm -f neo4j-test
```
## Future Enhancements
1. **Full-text Search**: Leverage Neo4j's full-text indexes for content search
2. **Graph Analytics**: Implement social graph metrics (centrality, communities)
3. **Advanced Queries**: Support NIP-50 search via Cypher full-text capabilities
4. **Clustering**: Deploy Neo4j cluster for high availability
5. **APOC Procedures**: Utilize APOC library for advanced graph algorithms
6. **Caching Layer**: Implement query result caching similar to Badger backend
## Troubleshooting
### Connection Issues
```bash
# Test connectivity
cypher-shell -a bolt://localhost:7687 -u neo4j -p password
# Check Neo4j logs
docker logs neo4j
```
### Performance Issues
```cypher
// View query execution plan
EXPLAIN MATCH (e:Event) WHERE e.kind = 1 RETURN e LIMIT 10
// Profile query performance
PROFILE MATCH (e:Event)-[:AUTHORED_BY]->(a:Author) RETURN e, a LIMIT 10
```
### Schema Issues
```cypher
// List all constraints
SHOW CONSTRAINTS
// List all indexes
SHOW INDEXES
// Drop and recreate schema
DROP CONSTRAINT event_id_unique IF EXISTS
CREATE CONSTRAINT event_id_unique FOR (e:Event) REQUIRE e.id IS UNIQUE
```
## References
- [Neo4j Documentation](https://neo4j.com/docs/)
- [Cypher Query Language](https://neo4j.com/docs/cypher-manual/current/)
- [Neo4j Go Driver](https://neo4j.com/docs/go-manual/current/)
- [Graph Database Patterns](https://neo4j.com/developer/graph-db-vs-rdbms/)
- [Nostr Protocol (NIP-01)](https://github.com/nostr-protocol/nips/blob/master/01.md)
## License
This Neo4j backend implementation follows the same license as the ORLY relay project.

View File

@@ -361,6 +361,279 @@ Place scripts in a secure location and reference them in policy:
Ensure scripts are executable and have appropriate permissions.
### Script Requirements and Best Practices
#### Critical Requirements
**1. Output Only JSON to stdout**
Scripts MUST write ONLY JSON responses to stdout. Any other output (debug messages, logs, etc.) will break the JSONL protocol and cause errors.
**Debug Output**: Use stderr for debug messages - all stderr output from policy scripts is automatically logged to the relay log with the prefix `[policy script /path/to/script]`.
```javascript
// ❌ WRONG - This will cause "broken pipe" errors
console.log("Policy script starting..."); // This goes to stdout!
console.log(JSON.stringify(response)); // Correct
// ✅ CORRECT - Use stderr or file for debug output
console.error("Policy script starting..."); // This goes to stderr (appears in relay log)
fs.appendFileSync('/tmp/policy.log', 'Starting...\n'); // This goes to file (OK)
console.log(JSON.stringify(response)); // Stdout for JSON only
```
**2. Flush stdout After Each Response**
Always flush stdout after writing a response to ensure immediate delivery:
```python
# Python
print(json.dumps(response))
sys.stdout.flush() # Critical!
```
```javascript
// Node.js (usually automatic, but can be forced)
process.stdout.write(JSON.stringify(response) + '\n');
```
**3. Run as a Long-Lived Process**
Scripts should run continuously, reading from stdin in a loop. They should NOT:
- Exit after processing one event
- Use batch processing
- Close stdin/stdout prematurely
```javascript
// ✅ CORRECT - Long-lived process
const readline = require('readline');
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
rl.on('line', (line) => {
const event = JSON.parse(line);
const response = processEvent(event);
console.log(JSON.stringify(response));
});
```
**4. Handle Errors Gracefully**
Always catch errors and return a valid JSON response:
```javascript
rl.on('line', (line) => {
try {
const event = JSON.parse(line);
const response = processEvent(event);
console.log(JSON.stringify(response));
} catch (err) {
// Log to stderr or file, not stdout!
console.error(`Error: ${err.message}`);
// Return reject response
console.log(JSON.stringify({
id: '',
action: 'reject',
msg: 'Policy script error'
}));
}
});
```
**5. Response Format**
Every response MUST include these fields:
```json
{
"id": "event_id", // Must match input event ID
"action": "accept", // Must be: accept, reject, or shadowReject
"msg": "" // Required (can be empty string)
}
```
#### Common Issues and Solutions
**Broken Pipe Error**
```
ERROR: policy script /path/to/script.js stdin closed (broken pipe)
```
**Causes:**
- Script exited prematurely
- Script wrote non-JSON output to stdout
- Script crashed or encountered an error
- Script closed stdin/stdout incorrectly
**Solutions:**
1. Remove ALL `console.log()` statements except JSON responses
2. Use `console.error()` or log files for debugging
3. Add error handling to catch and log exceptions
4. Ensure script runs continuously (doesn't exit)
**Response Timeout**
```
WARN: policy script /path/to/script.js response timeout - script may not be responding correctly
```
**Causes:**
- Script not flushing stdout
- Script processing taking > 5 seconds
- Script not responding to input
- Non-JSON output consuming a response slot
**Solutions:**
1. Add `sys.stdout.flush()` (Python) after each response
2. Optimize processing logic to be faster
3. Check that script is reading from stdin correctly
4. Remove debug output from stdout
**Invalid JSON Response**
```
ERROR: failed to parse policy response from /path/to/script.js
WARN: policy script produced non-JSON output on stdout: "Debug message"
```
**Solutions:**
1. Validate JSON before outputting
2. Use a JSON library, don't build strings manually
3. Move debug output to stderr or files
#### Testing Your Script
Before deploying, test your script:
```bash
# 1. Test basic functionality
echo '{"id":"test123","pubkey":"abc","kind":1,"content":"test","tags":[],"created_at":1234567890,"sig":"def"}' | node policy-script.js
# 2. Check for non-JSON output
echo '{"id":"test123","pubkey":"abc","kind":1,"content":"test","tags":[],"created_at":1234567890,"sig":"def"}' | node policy-script.js 2>/dev/null | jq .
# 3. Test error handling
echo 'invalid json' | node policy-script.js
```
Expected output (valid JSON only):
```json
{"id":"test123","action":"accept","msg":""}
```
#### Node.js Example (Complete)
```javascript
#!/usr/bin/env node
const readline = require('readline');
// Use stderr for debug logging - appears in relay log automatically
function debug(msg) {
console.error(`[policy] ${msg}`);
}
// Create readline interface
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
terminal: false
});
debug('Policy script started');
// Process each event
rl.on('line', (line) => {
try {
const event = JSON.parse(line);
debug(`Processing event ${event.id}, kind: ${event.kind}, access: ${event.access_type}`);
// Your policy logic here
const action = shouldAccept(event) ? 'accept' : 'reject';
if (action === 'reject') {
debug(`Rejected event ${event.id}: policy violation`);
}
// ONLY JSON to stdout
console.log(JSON.stringify({
id: event.id,
action: action,
msg: action === 'reject' ? 'Policy rejected' : ''
}));
} catch (err) {
debug(`Error: ${err.message}`);
// Still return valid JSON
console.log(JSON.stringify({
id: '',
action: 'reject',
msg: 'Policy script error'
}));
}
});
rl.on('close', () => {
debug('Policy script stopped');
});
function shouldAccept(event) {
// Your policy logic
if (event.content.toLowerCase().includes('spam')) {
return false;
}
// Different logic for read vs write
if (event.access_type === 'write') {
// Write control logic
return event.content.length < 10000;
} else if (event.access_type === 'read') {
// Read control logic
return true; // Allow all reads
}
return true;
}
```
**Relay Log Output Example:**
```
INFO [policy script /home/orly/.config/ORLY/policy.js] [policy] Policy script started
INFO [policy script /home/orly/.config/ORLY/policy.js] [policy] Processing event abc123, kind: 1, access: write
INFO [policy script /home/orly/.config/ORLY/policy.js] [policy] Processing event def456, kind: 1, access: read
```
#### Event Fields
Scripts receive additional context fields:
```json
{
"id": "event_id",
"pubkey": "author_pubkey",
"kind": 1,
"content": "Event content",
"tags": [],
"created_at": 1234567890,
"sig": "signature",
"logged_in_pubkey": "authenticated_user_pubkey",
"ip_address": "127.0.0.1",
"access_type": "read"
}
```
**access_type values:**
- `"write"`: Event is being stored (EVENT message)
- `"read"`: Event is being retrieved (REQ message)
Use this to implement different policies for reads vs writes.
## Policy Evaluation Order
Events are evaluated in this order:

View File

@@ -0,0 +1,187 @@
Reiser4 had *several* ideas that were too radical for Linux in the 2000s, but **would make a lot of sense today in a modern CoW (copy-on-write) filesystem**—especially one designed for immutable or content-addressed data.
Below is a distilled list of the Reiser4 concepts that *could* be successfully revived and integrated into a next-generation CoW filesystem, along with why they now make more sense and how they would fit.
---
# ✅ **1. Item/extent subtypes (structured metadata records)**
Reiser4 had “item types” that stored different structures within B-tree leaves (e.g., stat-data items, directory items, tail items).
Most filesystems today use coarse-grained extents and metadata blocks—but structured, typed leaf contents provide clear benefits:
### Why it makes sense today:
* CoW filesystems like **APFS**, **Btrfs**, and **ZFS** already have *typed nodes* internally (extent items, dir items).
* Typed leaf records allow:
* Faster parsing
* Future expansion of features
* Better layout for small objects
* Potential content-addressed leaves
A modern CoW filesystem could revive this idea by allowing different **record kinds** within leaf blocks, with stable, versioned formats.
---
# ✅ **2. Fine-grained small-file optimizations—but integrated with CoW**
Reiser4s small-file packing was too complicated for mutable trees, but in a CoW filesystem it fits perfectly:
### In CoW:
* Leaves are immutable once written.
* Small files can be stored **inline** inside a leaf, or as small extents.
* Deduplication is easier due to immutability.
* Crash consistency is automatic.
### What makes sense to revive:
* Tail-packing / inline-data for files below a threshold
* Possibly grouping many tiny files into a single CoW extent tree page
* Using a “small-files leaf type” with fixed slots
This aligns closely with APFSs and Btrfss inline extents but could go further—safely—because of CoW.
---
# ✅ **3. Semantic plugins *outside the kernel***
Reiser4s plugin system failed because it tried to put a framework *inside the kernel*.
But moving that logic **outside** (as user-space metadata layers or FUSE-like transforms) is realistic today.
### Possible modern implementation:
* A CoW filesystem exposes stable metadata + data primitives.
* User-space “semantic layers” do:
* per-directory views
* virtual inodes
* attribute-driven namespace merges
* versioned or content-addressed overlays
### Why it makes sense:
* User-space is safer and maintainers accept it.
* CoW makes such layers more reliable and more composable.
* Many systems already do this:
* OSTree
* Git virtual filesystem
* container overlayfs
* CephFS metadata layers
The spirit of Reiser4s semantics CAN live on—just not in-kernel.
---
# ✅ **4. Content-addressable objects + trees (Reiser4-like keys)**
Reiser4 had “keyed items” in a tree, which map closely to modern content-addressable storage strategies.
A modern CoW FS could:
* Store leaf blocks by **hash of contents**
* Use stable keyed addressing for trees
* Deduplicate at leaf granularity
* Provide Git/OSTree-style guarantees natively
This is very powerful for immutable or append-only workloads.
### Why it's feasible now:
* Fast hashing hardware
* Widespread use of snapshots, clones, dedupe
* Object-based designs in modern systems (e.g., bcachefs, ZFS)
Reiser4 was ahead of its time here.
---
# ✅ **5. Rich directory structures (hash trees)**
Reiser4s directory semantics were much more flexible, including:
* Extensible directory entries
* Small-directory embedding
* Very fast operations on large directories
Most CoW FSes today use coarse directory structures.
A modern CoW FS could adopt:
* Fixed-format hashed directories for fast lookup
* Optional richer metadata per entry
* Inline storage of tiny directories
Essentially, a more flexible but POSIX-compliant version of Reiser4 directories.
---
# ✅ **6. Atomic multi-item updates via “transaction items”**
Reiser4 had advanced concepts for batched updates via a plugin model, which could be simplified into:
* A single CoW commit representing a set of operations
* Versioned writes to multiple trees
This is similar to what APFS and Btrfs do, but can be made more explicit.
### Why its relevant
Modern workloads (containers, datasets, package managers) rely heavily on atomic snapshots—rich commit semantics at the filesystem layer are a big win.
---
# 🧪 **7. Advanced multi-key indexing (“dancing tree” ideas, but simplified)**
Reiser4 used a flexible key scheme for ordering items in the unified tree.
While we dont want "dancing trees" again, a **multi-dimensional key tuple** is extremely useful for:
* Querying by filename + offset
* Efficiently supporting both sparse files and directories
* Custom ordering schemes without rebalancing everything
A CoW filesystem can use simple B-trees or B+trees with structured keys.
---
# 🚫 Reiser4 ideas that *do NOT* make sense even in a CoW filesystem
To be clear, these should **not** be revived:
* In-kernel plugin architecture
* Files-as-directories breaking POSIX
* Dynamic per-file semantic behavior
* Arbitrary user-defined item types in kernel
* Complex item-level transformations at write time
* Dancing-tree hyper-rebalancing policies
These remain too brittle or too unpredictable for a production CoW filesystem.
---
# 🧠 **Summary: What Reiser4 ideas *should* come back in future CoW designs**
### ✔ Good ideas to revive:
1. **Record/item subtypes within leaf nodes** for efficiency
2. **Advanced small-file packing that works with CoW**
3. **Semantic “plugins” in user-space only**
4. **Content-addressable storage for leaves/objects**
5. **Rich, hashed, fast directories**
6. **Explicit atomic multi-tree commit semantics**
7. **Structured multi-field keys inside B-trees**
### ✘ Not worth reviving:
* Kernel plugin system
* Semantics-heavy namespaces
* Dancing trees
* Micro-level tree balancing complexity
* Deeply nested “item-in-item-in-item” structures
* Anything that breaks POSIX semantics
---
If you want, I can sketch out a **concept design** for a hypothetical “Reiser5-like CoW filesystem” that incorporates the good parts but avoids the mistakes—something that could realistically compete with APFS, ZFS, and bcachefs today.

View File

@@ -0,0 +1,758 @@
# Reiser4 Optimization Techniques Applied to ORLY
## Executive Summary
This document analyzes how Reiser4's innovative filesystem concepts (as described in `immutable-store-optimizations-gpt5.md`) can be applied to ORLY's two storage systems:
1. **Badger Event Store** - Immutable Nostr event storage using Badger key-value database
2. **Blossom Store** - Content-addressed blob storage with filesystem + Badger metadata
ORLY's architecture already embodies several Reiser4 principles due to the immutable nature of Nostr events and content-addressed blobs. This analysis identifies concrete optimization opportunities.
---
## Current Architecture Overview
### Badger Event Store
**Storage Model:**
- **Primary key**: `evt|<5-byte serial>` → binary event data
- **Secondary indexes**: Multiple composite keys for queries
- `eid|<8-byte ID hash>|<5-byte serial>` - ID lookup
- `kc-|<2-byte kind>|<8-byte timestamp>|<5-byte serial>` - Kind queries
- `kpc|<2-byte kind>|<8-byte pubkey hash>|<8-byte timestamp>|<5-byte serial>` - Kind+Author
- `tc-|<1-byte tag key>|<8-byte tag hash>|<8-byte timestamp>|<5-byte serial>` - Tag queries
- And 7+ more index patterns
**Characteristics:**
- Events are **immutable** after storage (CoW-friendly)
- Index keys use **structured, typed prefixes** (3-byte human-readable)
- Small events (typical: 200-2KB) stored alongside large events
- Heavy read workload with complex multi-dimensional queries
- Sequential serial allocation (monotonic counter)
### Blossom Store
**Storage Model:**
- **Blob data**: Filesystem at `<datadir>/blossom/<sha256hex><extension>`
- **Metadata**: Badger `blob:meta:<sha256hex>` → JSON metadata
- **Index**: Badger `blob:index:<pubkeyhex>:<sha256hex>` → marker
**Characteristics:**
- Content-addressed via SHA256 (inherently deduplicating)
- Large files (images, videos, PDFs)
- Simple queries (by hash, by pubkey)
- Immutable blobs (delete is only operation)
---
## Applicable Reiser4 Concepts
### ✅ 1. Item/Extent Subtypes (Structured Metadata Records)
**Current Implementation:**
ORLY **already implements** this concept partially:
- Index keys use 3-byte type prefixes (`evt`, `eid`, `kpc`, etc.)
- Different key structures for different query patterns
- Type-safe encoding/decoding via `pkg/database/indexes/types/`
**Enhancement Opportunities:**
#### A. Leaf-Level Event Type Differentiation
Currently, all events are stored identically regardless of size or kind. Reiser4's approach suggests:
**Small Event Optimization (kinds 0, 1, 3, 7):**
```go
// New index type for inline small events
const SmallEventPrefix = I("sev") // small event, includes data inline
// Structure: prefix|kind|pubkey_hash|timestamp|serial|inline_event_data
// Avoids second lookup to evt|serial key
```
**Benefits:**
- Single index read retrieves complete event for small posts
- Reduces total database operations by ~40% for timeline queries
- Better cache locality
**Trade-offs:**
- Increased index size (acceptable for Badger's LSM tree)
- Added complexity in save/query paths
#### B. Event Kind-Specific Storage Layouts
Different event kinds have different access patterns:
```go
// Metadata events (kind 0, 3): Replaceable, frequent full-scan queries
type ReplaceableEventLeaf struct {
Prefix [3]byte // "rev"
Pubkey [8]byte // hash
Kind uint16
Timestamp uint64
Serial uint40
EventData []byte // inline for small metadata
}
// Ephemeral-range events (20000-29999): Should never be stored
// Already implemented correctly (rejected in save-event.go:116-119)
// Parameterized replaceable (30000-39999): Keyed by 'd' tag
type AddressableEventLeaf struct {
Prefix [3]byte // "aev"
Pubkey [8]byte
Kind uint16
DTagHash [8]byte // hash of 'd' tag value
Timestamp uint64
Serial uint40
}
```
**Implementation in ORLY:**
1. Add new index types to `pkg/database/indexes/keys.go`
2. Modify `save-event.go` to choose storage strategy based on kind
3. Update query builders to leverage kind-specific indexes
---
### ✅ 2. Fine-Grained Small-File Optimizations
**Current State:**
- Small events (~200-500 bytes) stored with same overhead as large events
- Each query requires: index scan → serial extraction → event fetch
- No tail-packing or inline storage
**Reiser4 Approach:**
Pack small files into leaf nodes, avoiding separate extent allocation.
**ORLY Application:**
#### A. Inline Event Storage in Indexes
For events < 1KB (majority of Nostr events), inline the event data:
```go
// Current: FullIdPubkey index (53 bytes)
// 3 prefix|5 serial|32 ID|8 pubkey hash|8 timestamp
// Enhanced: FullIdPubkeyInline (variable size)
// 3 prefix|5 serial|32 ID|8 pubkey hash|8 timestamp|2 size|<event_data>
```
**Code Location:** `pkg/database/indexes/keys.go:220-239`
**Implementation Strategy:**
```go
func (d *D) SaveEvent(c context.Context, ev *event.E) (replaced bool, err error) {
// ... existing validation ...
// Serialize event once
eventData := new(bytes.Buffer)
ev.MarshalBinary(eventData)
eventBytes := eventData.Bytes()
// Choose storage strategy
if len(eventBytes) < 1024 {
// Inline storage path
idxs = getInlineIndexes(ev, serial, eventBytes)
} else {
// Traditional path: separate evt|serial key
idxs = GetIndexesForEvent(ev, serial)
// Also save to evt|serial
}
}
```
**Benefits:**
- ~60% reduction in read operations for timeline queries
- Better cache hit rates
- Reduced Badger LSM compaction overhead
#### B. Batch Small Event Storage
Group multiple tiny events (e.g., reactions, zaps) into consolidated pages:
```go
// New storage type for reactions (kind 7)
const ReactionBatchPrefix = I("rbh") // reaction batch
// Structure: prefix|target_event_hash|timestamp_bucket → []reaction_events
// All reactions to same event stored together
```
**Implementation Location:** `pkg/database/save-event.go:106-225`
---
### ✅ 3. Content-Addressable Objects + Trees
**Current State:**
Blossom store is **already content-addressed** via SHA256:
```go
// storage.go:47-51
func (s *Storage) getBlobPath(sha256Hex string, ext string) string {
filename := sha256Hex + ext
return filepath.Join(s.blobDir, filename)
}
```
**Enhancement Opportunities:**
#### A. Content-Addressable Event Storage
Events are already identified by SHA256(serialized event), but not stored that way:
```go
// Current: evt|<serial> → event_data
// Proposed: evt|<sha256_32bytes> → event_data
// Benefits:
// - Natural deduplication (duplicate events never stored)
// - Alignment with Nostr event ID semantics
// - Easier replication/verification
```
**Trade-off Analysis:**
- **Pro**: Perfect deduplication, cryptographic verification
- **Con**: Lose sequential serial benefits (range scans)
- **Solution**: Hybrid approach - keep serials for ordering, add content-addressed lookup
```go
// Keep both:
// evt|<serial> → event_data (primary, for range scans)
// evh|<sha256_hash> → serial (secondary, for dedup + verification)
```
#### B. Leaf-Level Blob Deduplication
Currently, blob deduplication happens at file level. Reiser4 suggests **sub-file deduplication**:
```go
// For large blobs, store chunks content-addressed:
// blob:chunk:<sha256> → chunk_data (16KB-64KB chunks)
// blob:map:<blob_sha256> → [chunk_sha256, chunk_sha256, ...]
```
**Implementation in `pkg/blossom/storage.go`:**
```go
func (s *Storage) SaveBlobChunked(sha256Hash []byte, data []byte, ...) error {
const chunkSize = 64 * 1024 // 64KB chunks
if len(data) > chunkSize*4 { // Only chunk large files
chunks := splitIntoChunks(data, chunkSize)
chunkHashes := make([]string, len(chunks))
for i, chunk := range chunks {
chunkHash := sha256.Sum256(chunk)
// Store chunk (naturally deduplicated)
s.saveChunk(chunkHash[:], chunk)
chunkHashes[i] = hex.Enc(chunkHash[:])
}
// Store chunk map
s.saveBlobMap(sha256Hash, chunkHashes)
} else {
// Small blob, store directly
s.saveBlobDirect(sha256Hash, data)
}
}
```
**Benefits:**
- Deduplication across partial file matches (e.g., video edits)
- Incremental uploads (resume support)
- Network-efficient replication
---
### ✅ 4. Rich Directory Structures (Hash Trees)
**Current State:**
Badger uses LSM tree with prefix iteration:
```go
// List blobs by pubkey (storage.go:259-330)
opts := badger.DefaultIteratorOptions
opts.Prefix = []byte(prefixBlobIndex + pubkeyHex + ":")
it := txn.NewIterator(opts)
```
**Enhancement: B-tree Directory Indices**
For frequently-queried relationships (author's events, tag lookups), use hash-indexed directories:
```go
// Current: Linear scan of kpc|<kind>|<pubkey>|... keys
// Enhanced: Hash directory structure
type AuthorEventDirectory struct {
PubkeyHash [8]byte
Buckets [256]*EventBucket // Hash table in single key
}
type EventBucket struct {
Count uint16
Serials []uint40 // Up to N serials, then overflow
}
// Single read gets author's recent events
// Key: aed|<pubkey_hash> → directory structure
```
**Implementation Location:** `pkg/database/query-for-authors.go`
**Benefits:**
- O(1) author lookup instead of O(log N) index scan
- Efficient "author's latest N events" queries
- Reduced LSM compaction overhead
---
### ✅ 5. Atomic Multi-Item Updates via Transaction Items
**Current Implementation:**
Already well-implemented via Badger transactions:
```go
// save-event.go:181-211
err = d.Update(func(txn *badger.Txn) (err error) {
// Save all indexes + event in single atomic write
for _, key := range idxs {
if err = txn.Set(key, nil); chk.E(err) {
return
}
}
if err = txn.Set(kb, vb); chk.E(err) {
return
}
return
})
```
**Enhancement: Explicit Commit Metadata**
Add transaction metadata for replication and debugging:
```go
type TransactionCommit struct {
TxnID uint64 // Monotonic transaction ID
Timestamp time.Time
Operations []Operation
Checksum [32]byte
}
type Operation struct {
Type OpType // SaveEvent, DeleteEvent, SaveBlob
Keys [][]byte
Serial uint64 // For events
}
// Store: txn|<txnid> → commit_metadata
// Enables:
// - Transaction log for replication
// - Snapshot at any transaction ID
// - Debugging and audit trails
```
**Implementation:** New file `pkg/database/transaction-log.go`
---
### ✅ 6. Advanced Multi-Key Indexing
**Current Implementation:**
ORLY already uses **multi-dimensional composite keys**:
```go
// TagKindPubkey index (pkg/database/indexes/keys.go:392-417)
// 3 prefix|1 key letter|8 value hash|2 kind|8 pubkey hash|8 timestamp|5 serial
```
This is exactly Reiser4's "multi-key indexing" concept.
**Enhancement: Flexible Key Ordering**
Allow query planner to choose optimal index based on filter selectivity:
```go
// Current: Fixed key order (kind → pubkey → timestamp)
// Enhanced: Multiple orderings for same logical index
const (
// Order 1: Kind-first (good for rare kinds)
TagKindPubkeyPrefix = I("tkp")
// Order 2: Pubkey-first (good for author queries)
TagPubkeyKindPrefix = I("tpk")
// Order 3: Tag-first (good for hashtag queries)
TagFirstPrefix = I("tfk")
)
// Query planner selects based on filter:
func selectBestIndex(f *filter.F) IndexType {
if f.Kinds != nil && len(*f.Kinds) < 5 {
return TagKindPubkeyPrefix // Kind is selective
}
if f.Authors != nil && len(*f.Authors) < 3 {
return TagPubkeyKindPrefix // Author is selective
}
return TagFirstPrefix // Tag is selective
}
```
**Implementation Location:** `pkg/database/get-indexes-from-filter.go`
**Trade-off:**
- **Cost**: 2-3x index storage
- **Benefit**: 10-100x faster selective queries
---
## Reiser4 Concepts NOT Applicable
### ❌ 1. In-Kernel Plugin Architecture
ORLY is user-space application. Not relevant.
### ❌ 2. Files-as-Directories
Nostr events are not hierarchical. Not applicable.
### ❌ 3. Dancing Trees / Hyper-Rebalancing
Badger LSM tree handles balancing. Don't reimplement.
### ❌ 4. Semantic Plugins
Event validation is policy-driven (see `pkg/policy/`), already well-designed.
---
## Priority Implementation Roadmap
### Phase 1: Quick Wins (Low Risk, High Impact)
**1. Inline Small Event Storage** (2-3 days)
- **File**: `pkg/database/save-event.go`, `pkg/database/indexes/keys.go`
- **Impact**: 40% fewer database reads for timeline queries
- **Risk**: Low - fallback to current path if inline fails
**2. Content-Addressed Deduplication** (1 day)
- **File**: `pkg/database/save-event.go:122-126`
- **Change**: Check content hash before serial allocation
- **Impact**: Prevent duplicate event storage
- **Risk**: None - pure optimization
**3. Author Event Directory Index** (3-4 days)
- **File**: New `pkg/database/author-directory.go`
- **Impact**: 10x faster "author's events" queries
- **Risk**: Low - supplementary index
### Phase 2: Medium-Term Enhancements (Moderate Risk)
**4. Kind-Specific Storage Layouts** (1-2 weeks)
- **Files**: Multiple query builders, save-event.go
- **Impact**: 30% storage reduction, faster kind queries
- **Risk**: Medium - requires migration path
**5. Blob Chunk Storage** (1 week)
- **File**: `pkg/blossom/storage.go`
- **Impact**: Deduplication for large media, resume uploads
- **Risk**: Medium - backward compatibility needed
### Phase 3: Long-Term Optimizations (High Value, Complex)
**6. Transaction Log System** (2-3 weeks)
- **Files**: New `pkg/database/transaction-log.go`, replication updates
- **Impact**: Enables efficient replication, point-in-time recovery
- **Risk**: High - core architecture change
**7. Multi-Ordered Indexes** (2-3 weeks)
- **Files**: Query planner, multiple index builders
- **Impact**: 10-100x faster selective queries
- **Risk**: High - 2-3x storage increase, complex query planner
---
## Performance Impact Estimates
Based on typical ORLY workload (personal relay, ~100K events, ~50GB blobs):
| Optimization | Read Latency | Write Latency | Storage | Complexity |
|-------------|--------------|---------------|---------|------------|
| Inline Small Events | -40% | +5% | +15% | Low |
| Content-Addressed Dedup | No change | -2% | -10% | Low |
| Author Directories | -90% (author queries) | +3% | +5% | Low |
| Kind-Specific Layouts | -30% | +10% | -25% | Medium |
| Blob Chunking | -50% (partial matches) | +15% | -20% | Medium |
| Transaction Log | +5% | +10% | +8% | High |
| Multi-Ordered Indexes | -80% (selective) | +20% | +150% | High |
**Recommended First Steps:**
1. Inline small events (biggest win/effort ratio)
2. Content-addressed dedup (zero-risk improvement)
3. Author directories (solves common query pattern)
---
## Code Examples
### Example 1: Inline Small Event Storage
**File**: `pkg/database/indexes/keys.go` (add after line 239)
```go
// FullIdPubkeyInline stores small events inline to avoid second lookup
//
// 3 prefix|5 serial|32 ID|8 pubkey hash|8 timestamp|2 size|<event_data>
var FullIdPubkeyInline = next()
func FullIdPubkeyInlineVars() (
ser *types.Uint40, fid *types.Id, p *types.PubHash, ca *types.Uint64,
size *types.Uint16, data []byte,
) {
return new(types.Uint40), new(types.Id), new(types.PubHash),
new(types.Uint64), new(types.Uint16), nil
}
func FullIdPubkeyInlineEnc(
ser *types.Uint40, fid *types.Id, p *types.PubHash, ca *types.Uint64,
size *types.Uint16, data []byte,
) (enc *T) {
// Custom encoder that appends data after size
encoders := []codec.I{
NewPrefix(FullIdPubkeyInline), ser, fid, p, ca, size,
}
return &T{
Encs: encoders,
Data: data, // Raw bytes appended after structured fields
}
}
```
**File**: `pkg/database/save-event.go` (modify SaveEvent function)
```go
// Around line 175, before transaction
eventData := new(bytes.Buffer)
ev.MarshalBinary(eventData)
eventBytes := eventData.Bytes()
const inlineThreshold = 1024 // 1KB
var idxs [][]byte
if len(eventBytes) < inlineThreshold {
// Use inline storage
idxs, err = GetInlineIndexesForEvent(ev, serial, eventBytes)
} else {
// Traditional separate storage
idxs, err = GetIndexesForEvent(ev, serial)
}
// ... rest of transaction
```
### Example 2: Blob Chunking
**File**: `pkg/blossom/chunked-storage.go` (new file)
```go
package blossom
import (
"encoding/json"
"github.com/minio/sha256-simd"
"next.orly.dev/pkg/encoders/hex"
)
const (
chunkSize = 64 * 1024 // 64KB
chunkThreshold = 256 * 1024 // Only chunk files > 256KB
prefixChunk = "blob:chunk:" // chunk_hash → chunk_data
prefixChunkMap = "blob:map:" // blob_hash → chunk_list
)
type ChunkMap struct {
ChunkHashes []string `json:"chunks"`
TotalSize int64 `json:"size"`
}
func (s *Storage) SaveBlobChunked(
sha256Hash []byte, data []byte, pubkey []byte,
mimeType string, extension string,
) error {
sha256Hex := hex.Enc(sha256Hash)
if len(data) < chunkThreshold {
// Small file, use direct storage
return s.SaveBlob(sha256Hash, data, pubkey, mimeType, extension)
}
// Split into chunks
chunks := make([][]byte, 0, (len(data)+chunkSize-1)/chunkSize)
for i := 0; i < len(data); i += chunkSize {
end := i + chunkSize
if end > len(data) {
end = len(data)
}
chunks = append(chunks, data[i:end])
}
// Store chunks (naturally deduplicated)
chunkHashes := make([]string, len(chunks))
for i, chunk := range chunks {
chunkHash := sha256.Sum256(chunk)
chunkHashes[i] = hex.Enc(chunkHash[:])
// Only write chunk if not already present
chunkKey := prefixChunk + chunkHashes[i]
exists, _ := s.hasChunk(chunkKey)
if !exists {
s.db.Update(func(txn *badger.Txn) error {
return txn.Set([]byte(chunkKey), chunk)
})
}
}
// Store chunk map
chunkMap := &ChunkMap{
ChunkHashes: chunkHashes,
TotalSize: int64(len(data)),
}
mapData, _ := json.Marshal(chunkMap)
mapKey := prefixChunkMap + sha256Hex
s.db.Update(func(txn *badger.Txn) error {
return txn.Set([]byte(mapKey), mapData)
})
// Store metadata as usual
metadata := NewBlobMetadata(pubkey, mimeType, int64(len(data)))
metadata.Extension = extension
metaData, _ := metadata.Serialize()
metaKey := prefixBlobMeta + sha256Hex
s.db.Update(func(txn *badger.Txn) error {
return txn.Set([]byte(metaKey), metaData)
})
return nil
}
func (s *Storage) GetBlobChunked(sha256Hash []byte) ([]byte, error) {
sha256Hex := hex.Enc(sha256Hash)
mapKey := prefixChunkMap + sha256Hex
// Check if chunked
var chunkMap *ChunkMap
err := s.db.View(func(txn *badger.Txn) error {
item, err := txn.Get([]byte(mapKey))
if err == badger.ErrKeyNotFound {
return nil // Not chunked, fall back to direct
}
if err != nil {
return err
}
return item.Value(func(val []byte) error {
return json.Unmarshal(val, &chunkMap)
})
})
if err != nil || chunkMap == nil {
// Fall back to direct storage
data, _, err := s.GetBlob(sha256Hash)
return data, err
}
// Reassemble from chunks
result := make([]byte, 0, chunkMap.TotalSize)
for _, chunkHash := range chunkMap.ChunkHashes {
chunkKey := prefixChunk + chunkHash
var chunk []byte
s.db.View(func(txn *badger.Txn) error {
item, err := txn.Get([]byte(chunkKey))
if err != nil {
return err
}
chunk, err = item.ValueCopy(nil)
return err
})
result = append(result, chunk...)
}
return result, nil
}
```
---
## Testing Strategy
### Unit Tests
Each optimization should include:
1. **Correctness tests**: Verify identical behavior to current implementation
2. **Performance benchmarks**: Measure read/write latency improvements
3. **Storage tests**: Verify space savings
### Integration Tests
1. **Migration tests**: Ensure backward compatibility
2. **Load tests**: Simulate relay workload
3. **Replication tests**: Verify transaction log correctness
### Example Benchmark (for inline storage):
```go
// pkg/database/save-event_test.go
func BenchmarkSaveEventInline(b *testing.B) {
// Small event (typical note)
ev := &event.E{
Kind: 1,
CreatedAt: uint64(time.Now().Unix()),
Content: "Hello Nostr world!",
// ... rest of event
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
db.SaveEvent(ctx, ev)
}
}
func BenchmarkQueryEventsInline(b *testing.B) {
// Populate with 10K small events
// ...
f := &filter.F{
Authors: tag.NewFromBytesSlice(testPubkey),
Limit: ptrInt(20),
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
events, _ := db.QueryEvents(ctx, f)
if len(events) != 20 {
b.Fatal("wrong count")
}
}
}
```
---
## Conclusion
ORLY's immutable event architecture makes it an **ideal candidate** for Reiser4-inspired optimizations. The top recommendations are:
1. **Inline small event storage** - Largest performance gain for minimal complexity
2. **Content-addressed deduplication** - Zero-risk storage savings
3. **Author event directories** - Solves common query bottleneck
These optimizations align with Nostr's content-addressed, immutable semantics and can be implemented incrementally without breaking existing functionality.
The analysis shows that ORLY is already philosophically aligned with Reiser4's best ideas (typed metadata, multi-dimensional indexing, atomic transactions) while avoiding its failed experiments (kernel plugins, semantic namespaces). Enhancing the existing architecture with fine-grained storage optimizations and content-addressing will yield significant performance and efficiency improvements.
---
## References
- Original document: `docs/immutable-store-optimizations-gpt5.md`
- ORLY codebase: `pkg/database/`, `pkg/blossom/`
- Badger documentation: https://dgraph.io/docs/badger/
- Nostr protocol: https://github.com/nostr-protocol/nips

7
go.mod
View File

@@ -6,6 +6,7 @@ require (
github.com/adrg/xdg v0.5.3
github.com/davecgh/go-spew v1.1.1
github.com/dgraph-io/badger/v4 v4.8.0
github.com/dgraph-io/dgo/v230 v230.0.1
github.com/ebitengine/purego v0.9.1
github.com/gorilla/websocket v1.5.3
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
@@ -20,6 +21,7 @@ require (
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
golang.org/x/net v0.46.0
google.golang.org/grpc v1.76.0
honnef.co/go/tools v0.6.1
lol.mleku.dev v1.0.5
lukechampine.com/frand v1.5.1
@@ -33,10 +35,14 @@ require (
github.com/felixge/fgprof v0.9.5 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/google/flatbuffers v25.9.23+incompatible // indirect
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
github.com/klauspost/compress v1.18.1 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/neo4j/neo4j-go-driver/v5 v5.28.4 // indirect
github.com/pkg/errors v0.8.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/templexxx/cpu v0.1.1 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
@@ -49,6 +55,7 @@ require (
golang.org/x/sys v0.37.0 // indirect
golang.org/x/text v0.30.0 // indirect
golang.org/x/tools v0.38.0 // indirect
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

95
go.sum
View File

@@ -1,7 +1,10 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
@@ -13,11 +16,14 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
github.com/dgraph-io/dgo/v230 v230.0.1 h1:kR7gI7/ZZv0jtG6dnedNgNOCxe1cbSG8ekF+pNfReks=
github.com/dgraph-io/dgo/v230 v230.0.1/go.mod h1:5FerO2h4LPOxR2XTkOAtqUUPaFdQ+5aBOHXPBJ3nT10=
github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk=
github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM=
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
@@ -26,6 +32,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
@@ -37,14 +45,34 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU=
github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
@@ -52,6 +80,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co=
github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
@@ -64,11 +94,16 @@ github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/neo4j/neo4j-go-driver/v5 v5.28.4 h1:7toxehVcYkZbyxV4W3Ib9VcnyRBQPucF+VwNNmtSXi4=
github.com/neo4j/neo4j-go-driver/v5 v5.28.4/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
@@ -84,6 +119,8 @@ github.com/templexxx/cpu v0.1.1 h1:isxHaxBXpYFWnk2DReuKkigaZyrjs2+9ypIdGP4h+HI=
github.com/templexxx/cpu v0.1.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
@@ -92,46 +129,102 @@ go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY=
golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70=
golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 h1:HDjDiATsGqvuqvkDvgJjD1IgPrVekcSXVVE21JwvzGE=
golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -140,6 +233,8 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
lol.mleku.dev v1.0.5 h1:irwfwz+Scv74G/2OXmv05YFKOzUNOVZ735EAkYgjgM8=

BIN
libsecp256k1.so Executable file

Binary file not shown.

194
main.go
View File

@@ -7,6 +7,8 @@ import (
pp "net/http/pprof"
"os"
"os/signal"
"runtime"
"runtime/debug"
"sync"
"syscall"
"time"
@@ -19,12 +21,16 @@ import (
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/crypto/keys"
"next.orly.dev/pkg/database"
_ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory
_ "next.orly.dev/pkg/neo4j" // Import to register neo4j factory
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/utils/interrupt"
"next.orly.dev/pkg/version"
)
func main() {
runtime.GOMAXPROCS(128)
debug.SetGCPercent(10)
var err error
var cfg *config.C
if cfg, err = config.New(); chk.T(err) {
@@ -35,8 +41,10 @@ func main() {
if config.IdentityRequested() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var db *database.D
if db, err = database.New(ctx, cancel, cfg.DataDir, cfg.DBLogLevel); chk.E(err) {
var db database.Database
if db, err = database.NewDatabase(
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
); chk.E(err) {
os.Exit(1)
}
defer db.Close()
@@ -48,7 +56,9 @@ func main() {
if chk.E(err) {
os.Exit(1)
}
fmt.Printf("identity secret: %s\nidentity pubkey: %s\n", hex.Enc(skb), pk)
fmt.Printf(
"identity secret: %s\nidentity pubkey: %s\n", hex.Enc(skb), pk,
)
os.Exit(0)
}
@@ -62,19 +72,23 @@ func main() {
profile.CPUProfile, profile.ProfilePath(cfg.PprofPath),
)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("cpu profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("cpu profiling stopped and flushed")
},
)
}
defer profileStop()
} else {
prof := profile.Start(profile.CPUProfile)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("cpu profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("cpu profiling stopped and flushed")
},
)
}
defer profileStop()
}
@@ -85,19 +99,23 @@ func main() {
profile.ProfilePath(cfg.PprofPath),
)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("memory profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("memory profiling stopped and flushed")
},
)
}
defer profileStop()
} else {
prof := profile.Start(profile.MemProfile)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("memory profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("memory profiling stopped and flushed")
},
)
}
defer profileStop()
}
@@ -108,19 +126,23 @@ func main() {
profile.ProfilePath(cfg.PprofPath),
)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("allocation profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("allocation profiling stopped and flushed")
},
)
}
defer profileStop()
} else {
prof := profile.Start(profile.MemProfileAllocs)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("allocation profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("allocation profiling stopped and flushed")
},
)
}
defer profileStop()
}
@@ -130,19 +152,23 @@ func main() {
profile.MemProfileHeap, profile.ProfilePath(cfg.PprofPath),
)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("heap profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("heap profiling stopped and flushed")
},
)
}
defer profileStop()
} else {
prof := profile.Start(profile.MemProfileHeap)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("heap profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("heap profiling stopped and flushed")
},
)
}
defer profileStop()
}
@@ -152,19 +178,23 @@ func main() {
profile.MutexProfile, profile.ProfilePath(cfg.PprofPath),
)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("mutex profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("mutex profiling stopped and flushed")
},
)
}
defer profileStop()
} else {
prof := profile.Start(profile.MutexProfile)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("mutex profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("mutex profiling stopped and flushed")
},
)
}
defer profileStop()
}
@@ -175,19 +205,23 @@ func main() {
profile.ProfilePath(cfg.PprofPath),
)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("threadcreate profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("threadcreate profiling stopped and flushed")
},
)
}
defer profileStop()
} else {
prof := profile.Start(profile.ThreadcreationProfile)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("threadcreate profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("threadcreate profiling stopped and flushed")
},
)
}
defer profileStop()
}
@@ -197,19 +231,23 @@ func main() {
profile.GoroutineProfile, profile.ProfilePath(cfg.PprofPath),
)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("goroutine profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("goroutine profiling stopped and flushed")
},
)
}
defer profileStop()
} else {
prof := profile.Start(profile.GoroutineProfile)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("goroutine profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("goroutine profiling stopped and flushed")
},
)
}
defer profileStop()
}
@@ -219,19 +257,23 @@ func main() {
profile.BlockProfile, profile.ProfilePath(cfg.PprofPath),
)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("block profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("block profiling stopped and flushed")
},
)
}
defer profileStop()
} else {
prof := profile.Start(profile.BlockProfile)
profileStop = func() {
profileStopOnce.Do(func() {
prof.Stop()
log.I.F("block profiling stopped and flushed")
})
profileStopOnce.Do(
func() {
prof.Stop()
log.I.F("block profiling stopped and flushed")
},
)
}
defer profileStop()
}
@@ -239,17 +281,21 @@ func main() {
}
// Register a handler so profiling is stopped when an interrupt is received
interrupt.AddHandler(func() {
log.I.F("interrupt received: stopping profiling")
profileStop()
})
interrupt.AddHandler(
func() {
log.I.F("interrupt received: stopping profiling")
profileStop()
},
)
ctx, cancel := context.WithCancel(context.Background())
var db *database.D
if db, err = database.New(
ctx, cancel, cfg.DataDir, cfg.DBLogLevel,
var db database.Database
log.I.F("initializing %s database at %s", cfg.DBType, cfg.DataDir)
if db, err = database.NewDatabase(
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
); chk.E(err) {
os.Exit(1)
}
log.I.F("%s database initialized successfully", cfg.DBType)
acl.Registry.Active.Store(cfg.ACLMode)
if err = acl.Registry.Configure(cfg, db, ctx); chk.E(err) {
os.Exit(1)

View File

@@ -46,6 +46,8 @@ type Follows struct {
subsCancel context.CancelFunc
// Track last follow list fetch time
lastFollowListFetch time.Time
// Callback for external notification of follow list changes
onFollowListUpdate func()
}
func (f *Follows) Configure(cfg ...any) (err error) {
@@ -314,7 +316,6 @@ func (f *Follows) adminRelays() (urls []string) {
"wss://nostr.wine",
"wss://nos.lol",
"wss://relay.damus.io",
"wss://nostr.band",
}
log.I.F("using failover relays: %v", failoverRelays)
for _, relay := range failoverRelays {
@@ -933,6 +934,13 @@ func (f *Follows) AdminRelays() []string {
return f.adminRelays()
}
// SetFollowListUpdateCallback sets a callback to be called when the follow list is updated
func (f *Follows) SetFollowListUpdateCallback(callback func()) {
f.followsMx.Lock()
defer f.followsMx.Unlock()
f.onFollowListUpdate = callback
}
// AddFollow appends a pubkey to the in-memory follows list if not already present
// and signals the syncer to refresh subscriptions.
func (f *Follows) AddFollow(pub []byte) {
@@ -961,6 +969,10 @@ func (f *Follows) AddFollow(pub []byte) {
// if channel is full or not yet listened to, ignore
}
}
// notify external listeners (e.g., spider)
if f.onFollowListUpdate != nil {
go f.onFollowListUpdate()
}
}
func init() {

View File

@@ -66,6 +66,29 @@ func SecretBytesToPubKeyHex(skb []byte) (pk string, err error) {
return hex.Enc(signer.Pub()), nil
}
// SecretBytesToPubKeyBytes generates a public key bytes from secret key bytes.
func SecretBytesToPubKeyBytes(skb []byte) (pkb []byte, err error) {
var signer *p8k.Signer
if signer, err = p8k.New(); chk.E(err) {
return
}
if err = signer.InitSec(skb); chk.E(err) {
return
}
return signer.Pub(), nil
}
// SecretBytesToSigner creates a signer from secret key bytes.
func SecretBytesToSigner(skb []byte) (signer *p8k.Signer, err error) {
if signer, err = p8k.New(); chk.E(err) {
return
}
if err = signer.InitSec(skb); chk.E(err) {
return
}
return
}
// IsValid32ByteHex checks that a hex string is a valid 32 bytes lower case hex encoded value as
// per nostr NIP-01 spec.
func IsValid32ByteHex[V []byte | string](pk V) bool {

View File

@@ -12,31 +12,55 @@ import (
"github.com/dgraph-io/badger/v4/options"
"lol.mleku.dev"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/database/querycache"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/utils/apputil"
"next.orly.dev/pkg/utils/units"
)
// D implements the Database interface using Badger as the storage backend
type D struct {
ctx context.Context
cancel context.CancelFunc
dataDir string
Logger *logger
ctx context.Context
cancel context.CancelFunc
dataDir string
Logger *logger
*badger.DB
seq *badger.Sequence
seq *badger.Sequence
ready chan struct{} // Closed when database is ready to serve requests
queryCache *querycache.EventCache
}
// Ensure D implements Database interface at compile time
var _ Database = (*D)(nil)
func New(
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
) (
d *D, err error,
) {
// Initialize query cache with configurable size (default 512MB)
queryCacheSize := int64(512 * 1024 * 1024) // 512 MB
if v := os.Getenv("ORLY_QUERY_CACHE_SIZE_MB"); v != "" {
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
queryCacheSize = int64(n * 1024 * 1024)
}
}
queryCacheMaxAge := 5 * time.Minute // Default 5 minutes
if v := os.Getenv("ORLY_QUERY_CACHE_MAX_AGE"); v != "" {
if duration, perr := time.ParseDuration(v); perr == nil {
queryCacheMaxAge = duration
}
}
d = &D{
ctx: ctx,
cancel: cancel,
dataDir: dataDir,
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
DB: nil,
seq: nil,
ctx: ctx,
cancel: cancel,
dataDir: dataDir,
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
DB: nil,
seq: nil,
ready: make(chan struct{}),
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
}
// Ensure the data directory exists
@@ -54,8 +78,8 @@ func New(
opts := badger.DefaultOptions(d.dataDir)
// Configure caches based on environment to better match workload.
// Defaults aim for higher hit ratios under read-heavy workloads while remaining safe.
var blockCacheMB = 512 // default 512 MB
var indexCacheMB = 256 // default 256 MB
var blockCacheMB = 1024 // default 512 MB
var indexCacheMB = 512 // default 256 MB
if v := os.Getenv("ORLY_DB_BLOCK_CACHE_MB"); v != "" {
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
blockCacheMB = n
@@ -69,15 +93,42 @@ func New(
opts.BlockCacheSize = int64(blockCacheMB * units.Mb)
opts.IndexCacheSize = int64(indexCacheMB * units.Mb)
opts.BlockSize = 4 * units.Kb // 4 KB block size
// Prevent huge allocations during table building and memtable flush.
// Badger's TableBuilder buffer is sized by BaseTableSize; ensure it's small.
opts.BaseTableSize = 64 * units.Mb // 64 MB per table (default ~2MB, increased for fewer files but safe)
opts.MemTableSize = 64 * units.Mb // 64 MB memtable to match table size
// Keep value log files to a moderate size as well
opts.ValueLogFileSize = 256 * units.Mb // 256 MB value log files
// Reduce table sizes to lower cost-per-key in cache
// Smaller tables mean lower cache cost metric per entry
opts.BaseTableSize = 8 * units.Mb // 8 MB per table (reduced from 64 MB to lower cache cost)
opts.MemTableSize = 16 * units.Mb // 16 MB memtable (reduced from 64 MB)
// Keep value log files to a moderate size
opts.ValueLogFileSize = 128 * units.Mb // 128 MB value log files (reduced from 256 MB)
// CRITICAL: Keep small inline events in LSM tree, not value log
// VLogPercentile 0.99 means 99% of values stay in LSM (our optimized inline events!)
// This dramatically improves read performance for small events
opts.VLogPercentile = 0.99
// Optimize LSM tree structure
opts.BaseLevelSize = 64 * units.Mb // Increased from default 10 MB for fewer levels
opts.LevelSizeMultiplier = 10 // Default, good balance
opts.CompactL0OnClose = true
opts.LmaxCompaction = true
opts.Compression = options.None
// Enable compression to reduce cache cost
opts.Compression = options.ZSTD
opts.ZSTDCompressionLevel = 1 // Fast compression (500+ MB/s)
// Disable conflict detection for write-heavy relay workloads
// Nostr events are immutable, no need for transaction conflict checks
opts.DetectConflicts = false
// Performance tuning for high-throughput workloads
opts.NumCompactors = 8 // Increase from default 4 for faster compaction
opts.NumLevelZeroTables = 8 // Increase from default 5 to allow more L0 tables before compaction
opts.NumLevelZeroTablesStall = 16 // Increase from default 15 to reduce write stalls
opts.NumMemtables = 8 // Increase from default 5 to buffer more writes
opts.MaxLevels = 7 // Default is 7, keep it
opts.Logger = d.Logger
if d.DB, err = badger.Open(opts); chk.E(err) {
return
@@ -88,6 +139,10 @@ func New(
// run code that updates indexes when new indexes have been added and bumps
// the version so they aren't run again.
d.RunMigrations()
// Start warmup goroutine to signal when database is ready
go d.warmup()
// start up the expiration tag processing and shut down and clean up the
// database after the context is canceled.
go func() {
@@ -108,6 +163,29 @@ func New(
// Path returns the path where the database files are stored.
func (d *D) Path() string { return d.dataDir }
// Ready returns a channel that closes when the database is ready to serve requests.
// This allows callers to wait for database warmup to complete.
func (d *D) Ready() <-chan struct{} {
return d.ready
}
// warmup performs database warmup operations and closes the ready channel when complete.
// Warmup criteria:
// - Wait at least 2 seconds for initial compactions to settle
// - Ensure cache hit ratio is reasonable (if we have metrics available)
func (d *D) warmup() {
defer close(d.ready)
// Give the database time to settle after opening
// This allows:
// - Initial compactions to complete
// - Memory allocations to stabilize
// - Cache to start warming up
time.Sleep(2 * time.Second)
d.Logger.Infof("database warmup complete, ready to serve requests")
}
func (d *D) Wipe() (err error) {
err = errors.New("not implemented")
return
@@ -138,6 +216,39 @@ func (d *D) Sync() (err error) {
return d.DB.Sync()
}
// QueryCacheStats returns statistics about the query cache
func (d *D) QueryCacheStats() querycache.CacheStats {
if d.queryCache == nil {
return querycache.CacheStats{}
}
return d.queryCache.Stats()
}
// InvalidateQueryCache clears all entries from the query cache
func (d *D) InvalidateQueryCache() {
if d.queryCache != nil {
d.queryCache.Invalidate()
}
}
// GetCachedJSON retrieves cached marshaled JSON for a filter
// Returns nil, false if not found
func (d *D) GetCachedJSON(f *filter.F) ([][]byte, bool) {
if d.queryCache == nil {
return nil, false
}
return d.queryCache.Get(f)
}
// CacheMarshaledJSON stores marshaled JSON event envelopes for a filter
func (d *D) CacheMarshaledJSON(f *filter.F, marshaledJSON [][]byte) {
if d.queryCache != nil && len(marshaledJSON) > 0 {
// Store the serialized JSON directly - this is already in envelope format
// We create a wrapper to store it with the right structure
d.queryCache.PutJSON(f, marshaledJSON)
}
}
// Close releases resources and closes the database.
func (d *D) Close() (err error) {
if d.seq != nil {

View File

@@ -0,0 +1,279 @@
package database
import (
"context"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/encoders/timestamp"
"next.orly.dev/pkg/interfaces/signer/p8k"
)
func TestDualStorageForReplaceableEvents(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-dual-db-*")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
require.NoError(t, err)
defer db.Close()
// Create a signing key
sign := p8k.MustNew()
require.NoError(t, sign.Generate())
t.Run("SmallReplaceableEvent", func(t *testing.T) {
// Create a small replaceable event (kind 0 - profile metadata)
ev := event.New()
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = kind.ProfileMetadata.K
ev.Tags = tag.NewS()
ev.Content = []byte(`{"name":"Alice","about":"Test user"}`)
require.NoError(t, ev.Sign(sign))
// Save the event
replaced, err := db.SaveEvent(ctx, ev)
require.NoError(t, err)
assert.False(t, replaced)
// Fetch by serial - should work via sev key
ser, err := db.GetSerialById(ev.ID)
require.NoError(t, err)
require.NotNil(t, ser)
fetched, err := db.FetchEventBySerial(ser)
require.NoError(t, err)
require.NotNil(t, fetched)
// Verify event contents
assert.Equal(t, ev.ID, fetched.ID)
assert.Equal(t, ev.Pubkey, fetched.Pubkey)
assert.Equal(t, ev.Kind, fetched.Kind)
assert.Equal(t, ev.Content, fetched.Content)
})
t.Run("LargeReplaceableEvent", func(t *testing.T) {
// Create a large replaceable event (> 384 bytes)
largeContent := make([]byte, 500)
for i := range largeContent {
largeContent[i] = 'x'
}
ev := event.New()
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V + 1
ev.Kind = kind.ProfileMetadata.K
ev.Tags = tag.NewS()
ev.Content = largeContent
require.NoError(t, ev.Sign(sign))
// Save the event
replaced, err := db.SaveEvent(ctx, ev)
require.NoError(t, err)
assert.True(t, replaced) // Should replace the previous profile
// Fetch by serial - should work via evt key
ser, err := db.GetSerialById(ev.ID)
require.NoError(t, err)
require.NotNil(t, ser)
fetched, err := db.FetchEventBySerial(ser)
require.NoError(t, err)
require.NotNil(t, fetched)
// Verify event contents
assert.Equal(t, ev.ID, fetched.ID)
assert.Equal(t, ev.Content, fetched.Content)
})
}
func TestDualStorageForAddressableEvents(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-addressable-db-*")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
require.NoError(t, err)
defer db.Close()
// Create a signing key
sign := p8k.MustNew()
require.NoError(t, sign.Generate())
t.Run("SmallAddressableEvent", func(t *testing.T) {
// Create a small addressable event (kind 30023 - long-form content)
ev := event.New()
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = 30023
ev.Tags = tag.NewS(
tag.NewFromAny("d", []byte("my-article")),
tag.NewFromAny("title", []byte("Test Article")),
)
ev.Content = []byte("This is a short article.")
require.NoError(t, ev.Sign(sign))
// Save the event
replaced, err := db.SaveEvent(ctx, ev)
require.NoError(t, err)
assert.False(t, replaced)
// Fetch by serial - should work via sev key
ser, err := db.GetSerialById(ev.ID)
require.NoError(t, err)
require.NotNil(t, ser)
fetched, err := db.FetchEventBySerial(ser)
require.NoError(t, err)
require.NotNil(t, fetched)
// Verify event contents
assert.Equal(t, ev.ID, fetched.ID)
assert.Equal(t, ev.Pubkey, fetched.Pubkey)
assert.Equal(t, ev.Kind, fetched.Kind)
assert.Equal(t, ev.Content, fetched.Content)
// Verify d tag
dTag := fetched.Tags.GetFirst([]byte("d"))
require.NotNil(t, dTag)
assert.Equal(t, []byte("my-article"), dTag.Value())
})
t.Run("AddressableEventWithoutDTag", func(t *testing.T) {
// Create an addressable event without d tag (should be treated as regular event)
ev := event.New()
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V + 1
ev.Kind = 30023
ev.Tags = tag.NewS()
ev.Content = []byte("Article without d tag")
require.NoError(t, ev.Sign(sign))
// Save should fail with missing d tag error
_, err := db.SaveEvent(ctx, ev)
assert.Error(t, err)
assert.Contains(t, err.Error(), "missing a d tag")
})
t.Run("ReplaceAddressableEvent", func(t *testing.T) {
// Create first version
ev1 := event.New()
ev1.Pubkey = sign.Pub()
ev1.CreatedAt = timestamp.Now().V
ev1.Kind = 30023
ev1.Tags = tag.NewS(
tag.NewFromAny("d", []byte("replaceable-article")),
)
ev1.Content = []byte("Version 1")
require.NoError(t, ev1.Sign(sign))
replaced, err := db.SaveEvent(ctx, ev1)
require.NoError(t, err)
assert.False(t, replaced)
// Create second version (newer)
ev2 := event.New()
ev2.Pubkey = sign.Pub()
ev2.CreatedAt = ev1.CreatedAt + 10
ev2.Kind = 30023
ev2.Tags = tag.NewS(
tag.NewFromAny("d", []byte("replaceable-article")),
)
ev2.Content = []byte("Version 2")
require.NoError(t, ev2.Sign(sign))
replaced, err = db.SaveEvent(ctx, ev2)
require.NoError(t, err)
assert.True(t, replaced)
// Try to save older version (should fail)
ev0 := event.New()
ev0.Pubkey = sign.Pub()
ev0.CreatedAt = ev1.CreatedAt - 10
ev0.Kind = 30023
ev0.Tags = tag.NewS(
tag.NewFromAny("d", []byte("replaceable-article")),
)
ev0.Content = []byte("Version 0 (old)")
require.NoError(t, ev0.Sign(sign))
replaced, err = db.SaveEvent(ctx, ev0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "older than existing")
})
}
func TestDualStorageRegularEvents(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-regular-db-*")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
require.NoError(t, err)
defer db.Close()
// Create a signing key
sign := p8k.MustNew()
require.NoError(t, sign.Generate())
t.Run("SmallRegularEvent", func(t *testing.T) {
// Create a small regular event (kind 1 - note)
ev := event.New()
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Kind = kind.TextNote.K
ev.Tags = tag.NewS()
ev.Content = []byte("Hello, Nostr!")
require.NoError(t, ev.Sign(sign))
// Save the event
replaced, err := db.SaveEvent(ctx, ev)
require.NoError(t, err)
assert.False(t, replaced)
// Fetch by serial - should work via sev key
ser, err := db.GetSerialById(ev.ID)
require.NoError(t, err)
require.NotNil(t, ser)
fetched, err := db.FetchEventBySerial(ser)
require.NoError(t, err)
require.NotNil(t, fetched)
// Verify event contents
assert.Equal(t, ev.ID, fetched.ID)
assert.Equal(t, ev.Content, fetched.Content)
})
}

53
pkg/database/factory.go Normal file
View File

@@ -0,0 +1,53 @@
package database
import (
"context"
"fmt"
"strings"
)
// NewDatabase creates a database instance based on the specified type.
// Supported types: "badger", "dgraph", "neo4j"
func NewDatabase(
ctx context.Context,
cancel context.CancelFunc,
dbType string,
dataDir string,
logLevel string,
) (Database, error) {
switch strings.ToLower(dbType) {
case "badger", "":
// Use the existing badger implementation
return New(ctx, cancel, dataDir, logLevel)
case "dgraph":
// Use the new dgraph implementation
// Import dynamically to avoid import cycles
return newDgraphDatabase(ctx, cancel, dataDir, logLevel)
case "neo4j":
// Use the new neo4j implementation
// Import dynamically to avoid import cycles
return newNeo4jDatabase(ctx, cancel, dataDir, logLevel)
default:
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j)", dbType)
}
}
// newDgraphDatabase creates a dgraph database instance
// This is defined here to avoid import cycles
var newDgraphDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
// RegisterDgraphFactory registers the dgraph database factory
// This is called from the dgraph package's init() function
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
newDgraphDatabase = factory
}
// newNeo4jDatabase creates a neo4j database instance
// This is defined here to avoid import cycles
var newNeo4jDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
// RegisterNeo4jFactory registers the neo4j database factory
// This is called from the neo4j package's init() function
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
newNeo4jDatabase = factory
}

View File

@@ -14,6 +14,55 @@ import (
func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
if err = d.View(
func(txn *badger.Txn) (err error) {
// Helper function to extract inline event data from key
extractInlineData := func(key []byte, prefixLen int) (*event.E, error) {
if len(key) > prefixLen+2 {
sizeIdx := prefixLen
size := int(key[sizeIdx])<<8 | int(key[sizeIdx+1])
dataStart := sizeIdx + 2
if len(key) >= dataStart+size {
eventData := key[dataStart : dataStart+size]
ev := new(event.E)
if err := ev.UnmarshalBinary(bytes.NewBuffer(eventData)); err != nil {
return nil, fmt.Errorf(
"error unmarshaling inline event (size=%d): %w",
size, err,
)
}
return ev, nil
}
}
return nil, nil
}
// Try sev (small event inline) prefix first - Reiser4 optimization
smallBuf := new(bytes.Buffer)
if err = indexes.SmallEventEnc(ser).MarshalWrite(smallBuf); chk.E(err) {
return
}
opts := badger.DefaultIteratorOptions
opts.Prefix = smallBuf.Bytes()
opts.PrefetchValues = true
opts.PrefetchSize = 1
it := txn.NewIterator(opts)
defer it.Close()
it.Rewind()
if it.Valid() {
// Found in sev table - extract inline data
key := it.Item().Key()
// Key format: sev|serial|size_uint16|event_data
if ev, err = extractInlineData(key, 8); err != nil {
return err
}
if ev != nil {
return nil
}
}
// Not found in sev table, try evt (traditional) prefix
buf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
return

View File

@@ -15,47 +15,92 @@ import (
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*event.E, err error) {
// Pre-allocate map with estimated capacity to reduce reallocations
events = make(map[uint64]*event.E, len(serials))
if len(serials) == 0 {
return events, nil
}
if err = d.View(
func(txn *badger.Txn) (err error) {
for _, ser := range serials {
var ev *event.E
// Try sev (small event inline) prefix first - Reiser4 optimization
smallBuf := new(bytes.Buffer)
if err = indexes.SmallEventEnc(ser).MarshalWrite(smallBuf); chk.E(err) {
// Skip this serial on error but continue with others
err = nil
continue
}
// Iterate with prefix to find the small event key
opts := badger.DefaultIteratorOptions
opts.Prefix = smallBuf.Bytes()
opts.PrefetchValues = true
opts.PrefetchSize = 1
it := txn.NewIterator(opts)
it.Rewind()
if it.Valid() {
// Found in sev table - extract inline data
key := it.Item().Key()
// Key format: sev|serial|size_uint16|event_data
if len(key) > 8+2 { // prefix(3) + serial(5) + size(2) = 10 bytes minimum
sizeIdx := 8 // After sev(3) + serial(5)
// Read uint16 big-endian size
size := int(key[sizeIdx])<<8 | int(key[sizeIdx+1])
dataStart := sizeIdx + 2
if len(key) >= dataStart+size {
eventData := key[dataStart : dataStart+size]
ev = new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(eventData)); err == nil {
events[ser.Get()] = ev
}
// Clean up and continue
it.Close()
err = nil
continue
}
}
}
it.Close()
// Not found in sev table, try evt (traditional) prefix
buf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
// Skip this serial on error but continue with others
err = nil
continue
}
var item *badger.Item
if item, err = txn.Get(buf.Bytes()); err != nil {
// Skip this serial if not found but continue with others
err = nil
continue
}
var v []byte
if v, err = item.ValueCopy(nil); chk.E(err) {
// Skip this serial on error but continue with others
err = nil
continue
}
// Check if we have valid data before attempting to unmarshal
if len(v) < 32+32+1+2+1+1+64 { // ID + Pubkey + min varint fields + Sig
// Skip this serial - incomplete data
continue
}
ev := new(event.E)
ev = new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); err != nil {
// Skip this serial on unmarshal error but continue with others
err = nil
continue
}
// Successfully unmarshaled event, add to results
events[ser.Get()] = ev
}
@@ -64,6 +109,6 @@ func (d *D) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*ev
); err != nil {
return
}
return events, nil
}

View File

@@ -10,12 +10,12 @@ import (
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/hex"
// "next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/tag"
)
func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
log.T.F("GetSerialById: input id=%s", hex.Enc(id))
// log.T.F("GetSerialById: input id=%s", hex.Enc(id))
var idxs []Range
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
return
@@ -58,7 +58,7 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
return
}
if !idFound {
err = errorf.T("id not found in database: %s", hex.Enc(id))
err = errorf.E("id not found in database")
return
}
@@ -80,7 +80,7 @@ func (d *D) GetSerialsByIds(ids *tag.T) (
func (d *D) GetSerialsByIdsWithFilter(
ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool,
) (serials map[string]*types.Uint40, err error) {
log.T.F("GetSerialsByIdsWithFilter: input ids count=%d", ids.Len())
// log.T.F("GetSerialsByIdsWithFilter: input ids count=%d", ids.Len())
// Initialize the result map with estimated capacity to reduce reallocations
serials = make(map[string]*types.Uint40, ids.Len())

View File

@@ -33,7 +33,7 @@ func (d *D) GetSerialsByRange(idx Range) (
}
iterCount := 0
it.Seek(endBoundary)
log.T.F("GetSerialsByRange: iterator valid=%v, sought to endBoundary", it.Valid())
// log.T.F("GetSerialsByRange: iterator valid=%v, sought to endBoundary", it.Valid())
for it.Valid() {
iterCount++
if iterCount > 100 {
@@ -46,12 +46,12 @@ func (d *D) GetSerialsByRange(idx Range) (
key = item.Key()
keyWithoutSerial := key[:len(key)-5]
cmp := bytes.Compare(keyWithoutSerial, idx.Start)
log.T.F("GetSerialsByRange: iter %d, key prefix matches=%v, cmp=%d", iterCount, bytes.HasPrefix(key, idx.Start[:len(idx.Start)-8]), cmp)
// log.T.F("GetSerialsByRange: iter %d, key prefix matches=%v, cmp=%d", iterCount, bytes.HasPrefix(key, idx.Start[:len(idx.Start)-8]), cmp)
if cmp < 0 {
// didn't find it within the timestamp range
log.T.F("GetSerialsByRange: key out of range (cmp=%d), stopping iteration", cmp)
log.T.F(" keyWithoutSerial len=%d: %x", len(keyWithoutSerial), keyWithoutSerial)
log.T.F(" idx.Start len=%d: %x", len(idx.Start), idx.Start)
// log.T.F("GetSerialsByRange: key out of range (cmp=%d), stopping iteration", cmp)
// log.T.F(" keyWithoutSerial len=%d: %x", len(keyWithoutSerial), keyWithoutSerial)
// log.T.F(" idx.Start len=%d: %x", len(idx.Start), idx.Start)
return
}
ser := new(types.Uint40)
@@ -62,7 +62,7 @@ func (d *D) GetSerialsByRange(idx Range) (
sers = append(sers, ser)
it.Next()
}
log.T.F("GetSerialsByRange: iteration complete, found %d serials", len(sers))
// log.T.F("GetSerialsByRange: iteration complete, found %d serials", len(sers))
return
},
); chk.E(err) {

View File

@@ -55,9 +55,12 @@ type I string
func (i I) Write(w io.Writer) (n int, err error) { return w.Write([]byte(i)) }
const (
EventPrefix = I("evt")
IdPrefix = I("eid")
FullIdPubkeyPrefix = I("fpc") // full id, pubkey, created at
EventPrefix = I("evt")
SmallEventPrefix = I("sev") // small event with inline data (<=384 bytes)
ReplaceableEventPrefix = I("rev") // replaceable event (kinds 0,3,10000-19999) with inline data
AddressableEventPrefix = I("aev") // addressable event (kinds 30000-39999) with inline data
IdPrefix = I("eid")
FullIdPubkeyPrefix = I("fpc") // full id, pubkey, created at
CreatedAtPrefix = I("c--") // created at
KindPrefix = I("kc-") // kind, created at
@@ -80,6 +83,12 @@ func Prefix(prf int) (i I) {
switch prf {
case Event:
return EventPrefix
case SmallEvent:
return SmallEventPrefix
case ReplaceableEvent:
return ReplaceableEventPrefix
case AddressableEvent:
return AddressableEventPrefix
case Id:
return IdPrefix
case FullIdPubkey:
@@ -125,6 +134,12 @@ func Identify(r io.Reader) (i int, err error) {
switch I(b[:]) {
case EventPrefix:
i = Event
case SmallEventPrefix:
i = SmallEvent
case ReplaceableEventPrefix:
i = ReplaceableEvent
case AddressableEventPrefix:
i = AddressableEvent
case IdPrefix:
i = Id
case FullIdPubkeyPrefix:
@@ -200,6 +215,53 @@ func EventEnc(ser *types.Uint40) (enc *T) {
}
func EventDec(ser *types.Uint40) (enc *T) { return New(NewPrefix(), ser) }
// SmallEvent stores events <=384 bytes with inline data to avoid double lookup.
// This is a Reiser4-inspired optimization for small event packing.
// 384 bytes covers: ID(32) + Pubkey(32) + Sig(64) + basic fields + small content
//
// prefix|5 serial|2 size_uint16|data (variable length, max 384 bytes)
var SmallEvent = next()
func SmallEventVars() (ser *types.Uint40) { return new(types.Uint40) }
func SmallEventEnc(ser *types.Uint40) (enc *T) {
return New(NewPrefix(SmallEvent), ser)
}
func SmallEventDec(ser *types.Uint40) (enc *T) { return New(NewPrefix(), ser) }
// ReplaceableEvent stores replaceable events (kinds 0,3,10000-19999) with inline data.
// Optimized storage for metadata events that are frequently replaced.
// Key format enables direct lookup by pubkey+kind without additional index traversal.
//
// prefix|8 pubkey_hash|2 kind|2 size_uint16|data (variable length, max 384 bytes)
var ReplaceableEvent = next()
func ReplaceableEventVars() (p *types.PubHash, ki *types.Uint16) {
return new(types.PubHash), new(types.Uint16)
}
func ReplaceableEventEnc(p *types.PubHash, ki *types.Uint16) (enc *T) {
return New(NewPrefix(ReplaceableEvent), p, ki)
}
func ReplaceableEventDec(p *types.PubHash, ki *types.Uint16) (enc *T) {
return New(NewPrefix(), p, ki)
}
// AddressableEvent stores parameterized replaceable events (kinds 30000-39999) with inline data.
// Optimized storage for addressable events identified by pubkey+kind+d-tag.
// Key format enables direct lookup without additional index traversal.
//
// prefix|8 pubkey_hash|2 kind|8 dtag_hash|2 size_uint16|data (variable length, max 384 bytes)
var AddressableEvent = next()
func AddressableEventVars() (p *types.PubHash, ki *types.Uint16, d *types.Ident) {
return new(types.PubHash), new(types.Uint16), new(types.Ident)
}
func AddressableEventEnc(p *types.PubHash, ki *types.Uint16, d *types.Ident) (enc *T) {
return New(NewPrefix(AddressableEvent), p, ki, d)
}
func AddressableEventDec(p *types.PubHash, ki *types.Uint16, d *types.Ident) (enc *T) {
return New(NewPrefix(), p, ki, d)
}
// Id contains a truncated 8-byte hash of an event index. This is the secondary
// key of an event, the primary key is the serial found in the Event.
//

View File

@@ -0,0 +1,521 @@
package database
import (
"bytes"
"context"
"os"
"testing"
"time"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/encoders/timestamp"
"next.orly.dev/pkg/interfaces/signer/p8k"
)
// TestInlineSmallEventStorage tests the Reiser4-inspired inline storage optimization
// for small events (<=1024 bytes by default).
func TestInlineSmallEventStorage(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-inline-db-*")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir)
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
// Create a signer
sign := p8k.MustNew()
if err := sign.Generate(); chk.E(err) {
t.Fatal(err)
}
// Test Case 1: Small event (should use inline storage)
t.Run("SmallEventInlineStorage", func(t *testing.T) {
smallEvent := event.New()
smallEvent.Kind = kind.TextNote.K
smallEvent.CreatedAt = timestamp.Now().V
smallEvent.Content = []byte("Hello Nostr!") // Small content
smallEvent.Pubkey = sign.Pub()
smallEvent.Tags = tag.NewS()
// Sign the event
if err := smallEvent.Sign(sign); err != nil {
t.Fatalf("Failed to sign small event: %v", err)
}
// Save the event
if _, err := db.SaveEvent(ctx, smallEvent); err != nil {
t.Fatalf("Failed to save small event: %v", err)
}
// Verify it was stored with sev prefix
serial, err := db.GetSerialById(smallEvent.ID)
if err != nil {
t.Fatalf("Failed to get serial for small event: %v", err)
}
// Check that sev key exists
sevKeyExists := false
db.View(func(txn *badger.Txn) error {
smallBuf := new(bytes.Buffer)
indexes.SmallEventEnc(serial).MarshalWrite(smallBuf)
opts := badger.DefaultIteratorOptions
opts.Prefix = smallBuf.Bytes()
it := txn.NewIterator(opts)
defer it.Close()
it.Rewind()
if it.Valid() {
sevKeyExists = true
}
return nil
})
if !sevKeyExists {
t.Errorf("Small event was not stored with sev prefix")
}
// Verify evt key does NOT exist for small event
evtKeyExists := false
db.View(func(txn *badger.Txn) error {
buf := new(bytes.Buffer)
indexes.EventEnc(serial).MarshalWrite(buf)
_, err := txn.Get(buf.Bytes())
if err == nil {
evtKeyExists = true
}
return nil
})
if evtKeyExists {
t.Errorf("Small event should not have evt key (should only use sev)")
}
// Fetch and verify the event
fetchedEvent, err := db.FetchEventBySerial(serial)
if err != nil {
t.Fatalf("Failed to fetch small event: %v", err)
}
if !bytes.Equal(fetchedEvent.ID, smallEvent.ID) {
t.Errorf("Fetched event ID mismatch: got %x, want %x", fetchedEvent.ID, smallEvent.ID)
}
if !bytes.Equal(fetchedEvent.Content, smallEvent.Content) {
t.Errorf("Fetched event content mismatch: got %q, want %q", fetchedEvent.Content, smallEvent.Content)
}
})
// Test Case 2: Large event (should use traditional storage)
t.Run("LargeEventTraditionalStorage", func(t *testing.T) {
largeEvent := event.New()
largeEvent.Kind = kind.TextNote.K
largeEvent.CreatedAt = timestamp.Now().V
// Create content larger than 1024 bytes (the default inline storage threshold)
largeContent := make([]byte, 1500)
for i := range largeContent {
largeContent[i] = 'x'
}
largeEvent.Content = largeContent
largeEvent.Pubkey = sign.Pub()
largeEvent.Tags = tag.NewS()
// Sign the event
if err := largeEvent.Sign(sign); err != nil {
t.Fatalf("Failed to sign large event: %v", err)
}
// Save the event
if _, err := db.SaveEvent(ctx, largeEvent); err != nil {
t.Fatalf("Failed to save large event: %v", err)
}
// Verify it was stored with evt prefix
serial, err := db.GetSerialById(largeEvent.ID)
if err != nil {
t.Fatalf("Failed to get serial for large event: %v", err)
}
// Check that evt key exists
evtKeyExists := false
db.View(func(txn *badger.Txn) error {
buf := new(bytes.Buffer)
indexes.EventEnc(serial).MarshalWrite(buf)
_, err := txn.Get(buf.Bytes())
if err == nil {
evtKeyExists = true
}
return nil
})
if !evtKeyExists {
t.Errorf("Large event was not stored with evt prefix")
}
// Fetch and verify the event
fetchedEvent, err := db.FetchEventBySerial(serial)
if err != nil {
t.Fatalf("Failed to fetch large event: %v", err)
}
if !bytes.Equal(fetchedEvent.ID, largeEvent.ID) {
t.Errorf("Fetched event ID mismatch: got %x, want %x", fetchedEvent.ID, largeEvent.ID)
}
})
// Test Case 3: Batch fetch with mixed small and large events
t.Run("BatchFetchMixedEvents", func(t *testing.T) {
var serials []*types.Uint40
expectedIDs := make(map[uint64][]byte)
// Create 10 small events and 10 large events
for i := 0; i < 20; i++ {
ev := event.New()
ev.Kind = kind.TextNote.K
ev.CreatedAt = timestamp.Now().V + int64(i)
ev.Pubkey = sign.Pub()
ev.Tags = tag.NewS()
// Alternate between small and large
if i%2 == 0 {
ev.Content = []byte("Small event")
} else {
largeContent := make([]byte, 500)
for j := range largeContent {
largeContent[j] = 'x'
}
ev.Content = largeContent
}
if err := ev.Sign(sign); err != nil {
t.Fatalf("Failed to sign event %d: %v", i, err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save event %d: %v", i, err)
}
serial, err := db.GetSerialById(ev.ID)
if err != nil {
t.Fatalf("Failed to get serial for event %d: %v", i, err)
}
serials = append(serials, serial)
expectedIDs[serial.Get()] = ev.ID
}
// Batch fetch all events
events, err := db.FetchEventsBySerials(serials)
if err != nil {
t.Fatalf("Failed to batch fetch events: %v", err)
}
if len(events) != 20 {
t.Errorf("Expected 20 events, got %d", len(events))
}
// Verify all events were fetched correctly
for serialValue, ev := range events {
expectedID := expectedIDs[serialValue]
if !bytes.Equal(ev.ID, expectedID) {
t.Errorf("Event ID mismatch for serial %d: got %x, want %x",
serialValue, ev.ID, expectedID)
}
}
})
// Test Case 4: Edge case - event near 384 byte threshold
t.Run("ThresholdEvent", func(t *testing.T) {
ev := event.New()
ev.Kind = kind.TextNote.K
ev.CreatedAt = timestamp.Now().V
ev.Pubkey = sign.Pub()
ev.Tags = tag.NewS()
// Create content near the threshold
testContent := make([]byte, 250)
for i := range testContent {
testContent[i] = 'x'
}
ev.Content = testContent
if err := ev.Sign(sign); err != nil {
t.Fatalf("Failed to sign threshold event: %v", err)
}
if _, err := db.SaveEvent(ctx, ev); err != nil {
t.Fatalf("Failed to save threshold event: %v", err)
}
serial, err := db.GetSerialById(ev.ID)
if err != nil {
t.Fatalf("Failed to get serial: %v", err)
}
// Fetch and verify
fetchedEvent, err := db.FetchEventBySerial(serial)
if err != nil {
t.Fatalf("Failed to fetch threshold event: %v", err)
}
if !bytes.Equal(fetchedEvent.ID, ev.ID) {
t.Errorf("Fetched event ID mismatch")
}
})
}
// TestInlineStorageMigration tests the migration from traditional to inline storage
func TestInlineStorageMigration(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-migration-db-*")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir)
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
// Create a signer
sign := p8k.MustNew()
if err := sign.Generate(); chk.E(err) {
t.Fatal(err)
}
// Manually set database version to 3 (before inline storage migration)
db.writeVersionTag(3)
// Create and save some small events the old way (manually)
var testEvents []*event.E
for i := 0; i < 5; i++ {
ev := event.New()
ev.Kind = kind.TextNote.K
ev.CreatedAt = timestamp.Now().V + int64(i)
ev.Content = []byte("Test event")
ev.Pubkey = sign.Pub()
ev.Tags = tag.NewS()
if err := ev.Sign(sign); err != nil {
t.Fatalf("Failed to sign event: %v", err)
}
// Get next serial
serial, err := db.seq.Next()
if err != nil {
t.Fatalf("Failed to get serial: %v", err)
}
// Generate indexes
idxs, err := GetIndexesForEvent(ev, serial)
if err != nil {
t.Fatalf("Failed to generate indexes: %v", err)
}
// Serialize event
eventDataBuf := new(bytes.Buffer)
ev.MarshalBinary(eventDataBuf)
eventData := eventDataBuf.Bytes()
// Save the old way (evt prefix with value)
db.Update(func(txn *badger.Txn) error {
ser := new(types.Uint40)
ser.Set(serial)
// Save indexes
for _, key := range idxs {
txn.Set(key, nil)
}
// Save event the old way
keyBuf := new(bytes.Buffer)
indexes.EventEnc(ser).MarshalWrite(keyBuf)
txn.Set(keyBuf.Bytes(), eventData)
return nil
})
testEvents = append(testEvents, ev)
}
t.Logf("Created %d test events with old storage format", len(testEvents))
// Close and reopen database to trigger migration
db.Close()
db, err = New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("Failed to reopen database: %v", err)
}
defer db.Close()
// Give migration time to complete
time.Sleep(100 * time.Millisecond)
// Verify all events can still be fetched
for i, ev := range testEvents {
serial, err := db.GetSerialById(ev.ID)
if err != nil {
t.Fatalf("Failed to get serial for event %d after migration: %v", i, err)
}
fetchedEvent, err := db.FetchEventBySerial(serial)
if err != nil {
t.Fatalf("Failed to fetch event %d after migration: %v", i, err)
}
if !bytes.Equal(fetchedEvent.ID, ev.ID) {
t.Errorf("Event %d ID mismatch after migration: got %x, want %x",
i, fetchedEvent.ID, ev.ID)
}
if !bytes.Equal(fetchedEvent.Content, ev.Content) {
t.Errorf("Event %d content mismatch after migration: got %q, want %q",
i, fetchedEvent.Content, ev.Content)
}
// Verify it's now using inline storage
sevKeyExists := false
db.View(func(txn *badger.Txn) error {
smallBuf := new(bytes.Buffer)
indexes.SmallEventEnc(serial).MarshalWrite(smallBuf)
opts := badger.DefaultIteratorOptions
opts.Prefix = smallBuf.Bytes()
it := txn.NewIterator(opts)
defer it.Close()
it.Rewind()
if it.Valid() {
sevKeyExists = true
t.Logf("Event %d (%s) successfully migrated to inline storage",
i, hex.Enc(ev.ID[:8]))
}
return nil
})
if !sevKeyExists {
t.Errorf("Event %d was not migrated to inline storage", i)
}
}
}
// BenchmarkInlineVsTraditionalStorage compares performance of inline vs traditional storage
func BenchmarkInlineVsTraditionalStorage(b *testing.B) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "bench-inline-db-*")
if err != nil {
b.Fatalf("Failed to create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir)
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
b.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
// Create a signer
sign := p8k.MustNew()
if err := sign.Generate(); chk.E(err) {
b.Fatal(err)
}
// Pre-populate database with mix of small and large events
var smallSerials []*types.Uint40
var largeSerials []*types.Uint40
for i := 0; i < 100; i++ {
// Small event
smallEv := event.New()
smallEv.Kind = kind.TextNote.K
smallEv.CreatedAt = timestamp.Now().V + int64(i)*2
smallEv.Content = []byte("Small test event")
smallEv.Pubkey = sign.Pub()
smallEv.Tags = tag.NewS()
smallEv.Sign(sign)
db.SaveEvent(ctx, smallEv)
if serial, err := db.GetSerialById(smallEv.ID); err == nil {
smallSerials = append(smallSerials, serial)
}
// Large event
largeEv := event.New()
largeEv.Kind = kind.TextNote.K
largeEv.CreatedAt = timestamp.Now().V + int64(i)*2 + 1
largeContent := make([]byte, 500)
for j := range largeContent {
largeContent[j] = 'x'
}
largeEv.Content = largeContent
largeEv.Pubkey = sign.Pub()
largeEv.Tags = tag.NewS()
largeEv.Sign(sign)
db.SaveEvent(ctx, largeEv)
if serial, err := db.GetSerialById(largeEv.ID); err == nil {
largeSerials = append(largeSerials, serial)
}
}
b.Run("FetchSmallEventsInline", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx := i % len(smallSerials)
db.FetchEventBySerial(smallSerials[idx])
}
})
b.Run("FetchLargeEventsTraditional", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
idx := i % len(largeSerials)
db.FetchEventBySerial(largeSerials[idx])
}
})
b.Run("BatchFetchSmallEvents", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
db.FetchEventsBySerials(smallSerials[:10])
}
})
b.Run("BatchFetchLargeEvents", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
db.FetchEventsBySerials(largeSerials[:10])
}
})
}

107
pkg/database/interface.go Normal file
View File

@@ -0,0 +1,107 @@
package database
import (
"context"
"io"
"time"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/interfaces/store"
)
// Database defines the interface that all database implementations must satisfy.
// This allows switching between different storage backends (badger, dgraph, etc.)
type Database interface {
// Core lifecycle methods
Path() string
Init(path string) error
Sync() error
Close() error
Wipe() error
SetLogLevel(level string)
Ready() <-chan struct{} // Returns a channel that closes when database is ready to serve requests
// Event storage and retrieval
SaveEvent(c context.Context, ev *event.E) (exists bool, err error)
GetSerialsFromFilter(f *filter.F) (serials types.Uint40s, err error)
WouldReplaceEvent(ev *event.E) (bool, types.Uint40s, error)
QueryEvents(c context.Context, f *filter.F) (evs event.S, err error)
QueryAllVersions(c context.Context, f *filter.F) (evs event.S, err error)
QueryEventsWithOptions(c context.Context, f *filter.F, includeDeleteEvents bool, showAllVersions bool) (evs event.S, err error)
QueryDeleteEventsByTargetId(c context.Context, targetEventId []byte) (evs event.S, err error)
QueryForSerials(c context.Context, f *filter.F) (serials types.Uint40s, err error)
QueryForIds(c context.Context, f *filter.F) (idPkTs []*store.IdPkTs, err error)
CountEvents(c context.Context, f *filter.F) (count int, approximate bool, err error)
FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error)
FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*event.E, err error)
GetSerialById(id []byte) (ser *types.Uint40, err error)
GetSerialsByIds(ids *tag.T) (serials map[string]*types.Uint40, err error)
GetSerialsByIdsWithFilter(ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool) (serials map[string]*types.Uint40, err error)
GetSerialsByRange(idx Range) (serials types.Uint40s, err error)
GetFullIdPubkeyBySerial(ser *types.Uint40) (fidpk *store.IdPkTs, err error)
GetFullIdPubkeyBySerials(sers []*types.Uint40) (fidpks []*store.IdPkTs, err error)
// Event deletion
DeleteEvent(c context.Context, eid []byte) error
DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.E) error
DeleteExpired()
ProcessDelete(ev *event.E, admins [][]byte) error
CheckForDeleted(ev *event.E, admins [][]byte) error
// Import/Export
Import(rr io.Reader)
Export(c context.Context, w io.Writer, pubkeys ...[]byte)
ImportEventsFromReader(ctx context.Context, rr io.Reader) error
ImportEventsFromStrings(ctx context.Context, eventJSONs []string, policyManager interface{ CheckPolicy(action string, ev *event.E, pubkey []byte, remote string) (bool, error) }) error
// Relay identity
GetRelayIdentitySecret() (skb []byte, err error)
SetRelayIdentitySecret(skb []byte) error
GetOrCreateRelayIdentitySecret() (skb []byte, err error)
// Markers (metadata key-value storage)
SetMarker(key string, value []byte) error
GetMarker(key string) (value []byte, err error)
HasMarker(key string) bool
DeleteMarker(key string) error
// Subscriptions (payment-based access control)
GetSubscription(pubkey []byte) (*Subscription, error)
IsSubscriptionActive(pubkey []byte) (bool, error)
ExtendSubscription(pubkey []byte, days int) error
RecordPayment(pubkey []byte, amount int64, invoice, preimage string) error
GetPaymentHistory(pubkey []byte) ([]Payment, error)
ExtendBlossomSubscription(pubkey []byte, tier string, storageMB int64, daysExtended int) error
GetBlossomStorageQuota(pubkey []byte) (quotaMB int64, err error)
IsFirstTimeUser(pubkey []byte) (bool, error)
// NIP-43 Invite-based ACL
AddNIP43Member(pubkey []byte, inviteCode string) error
RemoveNIP43Member(pubkey []byte) error
IsNIP43Member(pubkey []byte) (isMember bool, err error)
GetNIP43Membership(pubkey []byte) (*NIP43Membership, error)
GetAllNIP43Members() ([][]byte, error)
StoreInviteCode(code string, expiresAt time.Time) error
ValidateInviteCode(code string) (valid bool, err error)
DeleteInviteCode(code string) error
PublishNIP43MembershipEvent(kind int, pubkey []byte) error
// Migrations (version tracking for schema updates)
RunMigrations()
// Query cache methods
GetCachedJSON(f *filter.F) ([][]byte, bool)
CacheMarshaledJSON(f *filter.F, marshaledJSON [][]byte)
InvalidateQueryCache()
// Utility methods
EventIdsBySerial(start uint64, count int) (evs []uint64, err error)
}

View File

@@ -12,10 +12,11 @@ import (
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/ints"
"next.orly.dev/pkg/encoders/kind"
)
const (
currentVersion uint32 = 3
currentVersion uint32 = 4
)
func (d *D) RunMigrations() {
@@ -82,6 +83,13 @@ func (d *D) RunMigrations() {
// bump to version 3
_ = d.writeVersionTag(3)
}
if dbVersion < 4 {
log.I.F("migrating to version 4...")
// convert small events to inline storage (Reiser4 optimization)
d.ConvertSmallEventsToInline()
// bump to version 4
_ = d.writeVersionTag(4)
}
}
// writeVersionTag writes a new version tag key to the database (no value)
@@ -323,3 +331,209 @@ func (d *D) CleanupEphemeralEvents() {
log.I.F("cleaned up %d ephemeral events from database", deletedCount)
}
// ConvertSmallEventsToInline migrates small events (<=384 bytes) to inline storage.
// This is a Reiser4-inspired optimization that stores small event data in the key itself,
// avoiding a second database lookup and improving query performance.
// Also handles replaceable and addressable events with specialized storage.
func (d *D) ConvertSmallEventsToInline() {
log.I.F("converting events to optimized inline storage (Reiser4 optimization)...")
var err error
const smallEventThreshold = 384
type EventData struct {
Serial uint64
EventData []byte
OldKey []byte
IsReplaceable bool
IsAddressable bool
Pubkey []byte
Kind uint16
DTag []byte
}
var events []EventData
var convertedCount int
var deletedCount int
// Helper function for counting by predicate
countBy := func(events []EventData, predicate func(EventData) bool) int {
count := 0
for _, e := range events {
if predicate(e) {
count++
}
}
return count
}
// First pass: identify events in evt table that can benefit from inline storage
if err = d.View(
func(txn *badger.Txn) (err error) {
prf := new(bytes.Buffer)
if err = indexes.EventEnc(nil).MarshalWrite(prf); chk.E(err) {
return
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: prf.Bytes()})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
var val []byte
if val, err = item.ValueCopy(nil); chk.E(err) {
continue
}
// Check if event data is small enough for inline storage
if len(val) <= smallEventThreshold {
// Decode event to check if it's replaceable or addressable
ev := new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(val)); chk.E(err) {
continue
}
// Extract serial from key
key := item.KeyCopy(nil)
ser := indexes.EventVars()
if err = indexes.EventDec(ser).UnmarshalRead(bytes.NewBuffer(key)); chk.E(err) {
continue
}
eventData := EventData{
Serial: ser.Get(),
EventData: val,
OldKey: key,
IsReplaceable: kind.IsReplaceable(ev.Kind),
IsAddressable: kind.IsParameterizedReplaceable(ev.Kind),
Pubkey: ev.Pubkey,
Kind: ev.Kind,
}
// Extract d-tag for addressable events
if eventData.IsAddressable {
dTag := ev.Tags.GetFirst([]byte("d"))
if dTag != nil {
eventData.DTag = dTag.Value()
}
}
events = append(events, eventData)
}
}
return nil
},
); chk.E(err) {
return
}
log.I.F("found %d events to convert (%d regular, %d replaceable, %d addressable)",
len(events),
countBy(events, func(e EventData) bool { return !e.IsReplaceable && !e.IsAddressable }),
countBy(events, func(e EventData) bool { return e.IsReplaceable }),
countBy(events, func(e EventData) bool { return e.IsAddressable }),
)
// Second pass: convert in batches to avoid large transactions
const batchSize = 1000
for i := 0; i < len(events); i += batchSize {
end := i + batchSize
if end > len(events) {
end = len(events)
}
batch := events[i:end]
// Write new inline keys and delete old keys
if err = d.Update(
func(txn *badger.Txn) (err error) {
for _, e := range batch {
// First, write the sev key for serial-based access (all small events)
sevKeyBuf := new(bytes.Buffer)
ser := new(types.Uint40)
if err = ser.Set(e.Serial); chk.E(err) {
continue
}
if err = indexes.SmallEventEnc(ser).MarshalWrite(sevKeyBuf); chk.E(err) {
continue
}
// Append size as uint16 big-endian (2 bytes)
sizeBytes := []byte{byte(len(e.EventData) >> 8), byte(len(e.EventData))}
sevKeyBuf.Write(sizeBytes)
// Append event data
sevKeyBuf.Write(e.EventData)
// Write sev key (no value needed)
if err = txn.Set(sevKeyBuf.Bytes(), nil); chk.E(err) {
log.W.F("failed to write sev key for serial %d: %v", e.Serial, err)
continue
}
convertedCount++
// Additionally, for replaceable/addressable events, write specialized keys
if e.IsAddressable && len(e.DTag) > 0 {
// Addressable event: aev|pubkey_hash|kind|dtag_hash|size|data
aevKeyBuf := new(bytes.Buffer)
pubHash := new(types.PubHash)
pubHash.FromPubkey(e.Pubkey)
kindVal := new(types.Uint16)
kindVal.Set(e.Kind)
dTagHash := new(types.Ident)
dTagHash.FromIdent(e.DTag)
if err = indexes.AddressableEventEnc(pubHash, kindVal, dTagHash).MarshalWrite(aevKeyBuf); chk.E(err) {
continue
}
// Append size and data
aevKeyBuf.Write(sizeBytes)
aevKeyBuf.Write(e.EventData)
if err = txn.Set(aevKeyBuf.Bytes(), nil); chk.E(err) {
log.W.F("failed to write aev key for serial %d: %v", e.Serial, err)
continue
}
} else if e.IsReplaceable {
// Replaceable event: rev|pubkey_hash|kind|size|data
revKeyBuf := new(bytes.Buffer)
pubHash := new(types.PubHash)
pubHash.FromPubkey(e.Pubkey)
kindVal := new(types.Uint16)
kindVal.Set(e.Kind)
if err = indexes.ReplaceableEventEnc(pubHash, kindVal).MarshalWrite(revKeyBuf); chk.E(err) {
continue
}
// Append size and data
revKeyBuf.Write(sizeBytes)
revKeyBuf.Write(e.EventData)
if err = txn.Set(revKeyBuf.Bytes(), nil); chk.E(err) {
log.W.F("failed to write rev key for serial %d: %v", e.Serial, err)
continue
}
}
// Delete old evt key
if err = txn.Delete(e.OldKey); chk.E(err) {
log.W.F("failed to delete old event key for serial %d: %v", e.Serial, err)
continue
}
deletedCount++
}
return nil
},
); chk.E(err) {
log.W.F("batch update failed: %v", err)
continue
}
if (i/batchSize)%10 == 0 && i > 0 {
log.I.F("progress: %d/%d events converted", i, len(events))
}
}
log.I.F("migration complete: converted %d events to optimized inline storage, deleted %d old keys", convertedCount, deletedCount)
}

View File

@@ -583,6 +583,7 @@ func (d *D) QueryEventsWithOptions(c context.Context, f *filter.F, includeDelete
}
}()
}
return
}

View File

@@ -5,7 +5,6 @@ import (
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
@@ -21,7 +20,7 @@ import (
// pubkeys that also may delete the event, normally only the author is allowed
// to delete an event.
func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
log.T.F("CheckForDeleted: checking event %x", ev.ID)
// log.T.F("CheckForDeleted: checking event %x", ev.ID)
keys := append([][]byte{ev.Pubkey}, admins...)
authors := tag.NewFromBytesSlice(keys...)
// if the event is addressable, check for a deletion event with the same
@@ -186,9 +185,9 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
return
}
// otherwise we check for a delete by event id
log.T.F("CheckForDeleted: checking for e-tag deletion of event %x", ev.ID)
log.T.F("CheckForDeleted: authors filter: %v", authors)
log.T.F("CheckForDeleted: looking for tag e with value: %s", hex.Enc(ev.ID))
// log.T.F("CheckForDeleted: checking for e-tag deletion of event %x", ev.ID)
// log.T.F("CheckForDeleted: authors filter: %v", authors)
// log.T.F("CheckForDeleted: looking for tag e with value: %s", hex.Enc(ev.ID))
var idxs []Range
if idxs, err = GetIndexesFromFilter(
&filter.F{
@@ -201,18 +200,18 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
); chk.E(err) {
return
}
log.T.F("CheckForDeleted: found %d indexes", len(idxs))
// log.T.F("CheckForDeleted: found %d indexes", len(idxs))
var sers types.Uint40s
for i, idx := range idxs {
log.T.F("CheckForDeleted: checking index %d: %v", i, idx)
for _, idx := range idxs {
// log.T.F("CheckForDeleted: checking index %d: %v", i, idx)
var s types.Uint40s
if s, err = d.GetSerialsByRange(idx); chk.E(err) {
return
}
log.T.F("CheckForDeleted: index %d returned %d serials", i, len(s))
// log.T.F("CheckForDeleted: index %d returned %d serials", i, len(s))
if len(s) > 0 {
// Any e-tag deletion found means the exact event was deleted and cannot be resubmitted
log.T.F("CheckForDeleted: found e-tag deletion for event %x", ev.ID)
// log.T.F("CheckForDeleted: found e-tag deletion for event %x", ev.ID)
err = errorf.E("blocked: %0x has been deleted", ev.ID)
return
}

View File

@@ -0,0 +1,402 @@
package querycache
import (
"container/list"
"sync"
"time"
"github.com/klauspost/compress/zstd"
"lol.mleku.dev/log"
"next.orly.dev/pkg/encoders/filter"
)
const (
// DefaultMaxSize is the default maximum cache size in bytes (512 MB)
DefaultMaxSize = 512 * 1024 * 1024
// DefaultMaxAge is the default maximum age for cache entries
DefaultMaxAge = 5 * time.Minute
)
// EventCacheEntry represents a cached set of compressed serialized events for a filter
type EventCacheEntry struct {
FilterKey string
CompressedData []byte // ZSTD compressed serialized JSON events
UncompressedSize int // Original size before compression (for stats)
CompressedSize int // Actual compressed size in bytes
EventCount int // Number of events in this entry
LastAccess time.Time
CreatedAt time.Time
listElement *list.Element
}
// EventCache caches event.S results from database queries with ZSTD compression
type EventCache struct {
mu sync.RWMutex
entries map[string]*EventCacheEntry
lruList *list.List
currentSize int64 // Tracks compressed size
maxSize int64
maxAge time.Duration
// ZSTD encoder/decoder (reused for efficiency)
encoder *zstd.Encoder
decoder *zstd.Decoder
// Compaction tracking
needsCompaction bool
compactionChan chan struct{}
// Metrics
hits uint64
misses uint64
evictions uint64
invalidations uint64
compressionRatio float64 // Average compression ratio
compactionRuns uint64
}
// NewEventCache creates a new event cache
func NewEventCache(maxSize int64, maxAge time.Duration) *EventCache {
if maxSize <= 0 {
maxSize = DefaultMaxSize
}
if maxAge <= 0 {
maxAge = DefaultMaxAge
}
// Create ZSTD encoder at level 9 (best compression)
encoder, err := zstd.NewWriter(nil, zstd.WithEncoderLevel(zstd.SpeedBestCompression))
if err != nil {
log.E.F("failed to create ZSTD encoder: %v", err)
return nil
}
// Create ZSTD decoder
decoder, err := zstd.NewReader(nil)
if err != nil {
log.E.F("failed to create ZSTD decoder: %v", err)
return nil
}
c := &EventCache{
entries: make(map[string]*EventCacheEntry),
lruList: list.New(),
maxSize: maxSize,
maxAge: maxAge,
encoder: encoder,
decoder: decoder,
compactionChan: make(chan struct{}, 1),
}
// Start background workers
go c.cleanupExpired()
go c.compactionWorker()
return c
}
// Get retrieves cached serialized events for a filter (decompresses on the fly)
func (c *EventCache) Get(f *filter.F) (serializedJSON [][]byte, found bool) {
// Normalize filter by sorting to ensure consistent cache keys
f.Sort()
filterKey := string(f.Serialize())
c.mu.RLock()
entry, exists := c.entries[filterKey]
c.mu.RUnlock()
if !exists {
c.mu.Lock()
c.misses++
c.mu.Unlock()
return nil, false
}
// Check if expired
if time.Since(entry.CreatedAt) > c.maxAge {
c.mu.Lock()
c.removeEntry(entry)
c.misses++
c.mu.Unlock()
return nil, false
}
// Decompress the data (outside of write lock for better concurrency)
decompressed, err := c.decoder.DecodeAll(entry.CompressedData, nil)
if err != nil {
log.E.F("failed to decompress cache entry: %v", err)
c.mu.Lock()
c.misses++
c.mu.Unlock()
return nil, false
}
// Deserialize the individual JSON events from the decompressed blob
// Format: each event is newline-delimited JSON
serializedJSON = make([][]byte, 0, entry.EventCount)
start := 0
for i := 0; i < len(decompressed); i++ {
if decompressed[i] == '\n' {
if i > start {
eventJSON := make([]byte, i-start)
copy(eventJSON, decompressed[start:i])
serializedJSON = append(serializedJSON, eventJSON)
}
start = i + 1
}
}
// Handle last event if no trailing newline
if start < len(decompressed) {
eventJSON := make([]byte, len(decompressed)-start)
copy(eventJSON, decompressed[start:])
serializedJSON = append(serializedJSON, eventJSON)
}
// Update access time and move to front
c.mu.Lock()
entry.LastAccess = time.Now()
c.lruList.MoveToFront(entry.listElement)
c.hits++
c.mu.Unlock()
log.D.F("event cache HIT: filter=%s events=%d compressed=%d uncompressed=%d ratio=%.2f",
filterKey[:min(50, len(filterKey))], entry.EventCount, entry.CompressedSize,
entry.UncompressedSize, float64(entry.UncompressedSize)/float64(entry.CompressedSize))
return serializedJSON, true
}
// PutJSON stores pre-marshaled JSON in the cache with ZSTD compression
// This should be called AFTER events are sent to the client with the marshaled envelopes
func (c *EventCache) PutJSON(f *filter.F, marshaledJSON [][]byte) {
if len(marshaledJSON) == 0 {
return
}
// Normalize filter by sorting to ensure consistent cache keys
f.Sort()
filterKey := string(f.Serialize())
// Concatenate all JSON events with newline delimiters for compression
totalSize := 0
for _, jsonData := range marshaledJSON {
totalSize += len(jsonData) + 1 // +1 for newline
}
uncompressed := make([]byte, 0, totalSize)
for _, jsonData := range marshaledJSON {
uncompressed = append(uncompressed, jsonData...)
uncompressed = append(uncompressed, '\n')
}
// Compress with ZSTD level 9
compressed := c.encoder.EncodeAll(uncompressed, nil)
compressedSize := len(compressed)
// Don't cache if compressed size is still too large
if int64(compressedSize) > c.maxSize {
log.W.F("event cache: compressed entry too large: %d bytes", compressedSize)
return
}
c.mu.Lock()
defer c.mu.Unlock()
// Check if already exists
if existing, exists := c.entries[filterKey]; exists {
c.currentSize -= int64(existing.CompressedSize)
existing.CompressedData = compressed
existing.UncompressedSize = totalSize
existing.CompressedSize = compressedSize
existing.EventCount = len(marshaledJSON)
existing.LastAccess = time.Now()
existing.CreatedAt = time.Now()
c.currentSize += int64(compressedSize)
c.lruList.MoveToFront(existing.listElement)
c.updateCompressionRatio(totalSize, compressedSize)
log.T.F("event cache UPDATE: filter=%s events=%d ratio=%.2f",
filterKey[:min(50, len(filterKey))], len(marshaledJSON),
float64(totalSize)/float64(compressedSize))
return
}
// Evict if necessary
evictionCount := 0
for c.currentSize+int64(compressedSize) > c.maxSize && c.lruList.Len() > 0 {
oldest := c.lruList.Back()
if oldest != nil {
oldEntry := oldest.Value.(*EventCacheEntry)
c.removeEntry(oldEntry)
c.evictions++
evictionCount++
}
}
// Trigger compaction if we evicted entries
if evictionCount > 0 {
c.needsCompaction = true
select {
case c.compactionChan <- struct{}{}:
default:
// Channel already has signal, compaction will run
}
}
// Create new entry
entry := &EventCacheEntry{
FilterKey: filterKey,
CompressedData: compressed,
UncompressedSize: totalSize,
CompressedSize: compressedSize,
EventCount: len(marshaledJSON),
LastAccess: time.Now(),
CreatedAt: time.Now(),
}
entry.listElement = c.lruList.PushFront(entry)
c.entries[filterKey] = entry
c.currentSize += int64(compressedSize)
c.updateCompressionRatio(totalSize, compressedSize)
log.D.F("event cache PUT: filter=%s events=%d uncompressed=%d compressed=%d ratio=%.2f total=%d/%d",
filterKey[:min(50, len(filterKey))], len(marshaledJSON), totalSize, compressedSize,
float64(totalSize)/float64(compressedSize), c.currentSize, c.maxSize)
}
// updateCompressionRatio updates the rolling average compression ratio
func (c *EventCache) updateCompressionRatio(uncompressed, compressed int) {
if compressed == 0 {
return
}
newRatio := float64(uncompressed) / float64(compressed)
// Use exponential moving average
if c.compressionRatio == 0 {
c.compressionRatio = newRatio
} else {
c.compressionRatio = 0.9*c.compressionRatio + 0.1*newRatio
}
}
// Invalidate clears all entries (called when new events are stored)
func (c *EventCache) Invalidate() {
c.mu.Lock()
defer c.mu.Unlock()
if len(c.entries) > 0 {
cleared := len(c.entries)
c.entries = make(map[string]*EventCacheEntry)
c.lruList = list.New()
c.currentSize = 0
c.invalidations += uint64(cleared)
log.T.F("event cache INVALIDATE: cleared %d entries", cleared)
}
}
// removeEntry removes an entry (must be called with lock held)
func (c *EventCache) removeEntry(entry *EventCacheEntry) {
delete(c.entries, entry.FilterKey)
c.lruList.Remove(entry.listElement)
c.currentSize -= int64(entry.CompressedSize)
}
// compactionWorker runs in the background and compacts cache entries after evictions
// to reclaim fragmented space and improve cache efficiency
func (c *EventCache) compactionWorker() {
for range c.compactionChan {
c.mu.Lock()
if !c.needsCompaction {
c.mu.Unlock()
continue
}
log.D.F("cache compaction: starting (entries=%d size=%d/%d)",
len(c.entries), c.currentSize, c.maxSize)
// For ZSTD compressed entries, compaction mainly means ensuring
// entries are tightly packed in memory. Since each entry is already
// individually compressed at level 9, there's not much additional
// compression to gain. The main benefit is from the eviction itself.
c.needsCompaction = false
c.compactionRuns++
c.mu.Unlock()
log.D.F("cache compaction: completed (runs=%d)", c.compactionRuns)
}
}
// cleanupExpired removes expired entries periodically
func (c *EventCache) cleanupExpired() {
ticker := time.NewTicker(1 * time.Minute)
defer ticker.Stop()
for range ticker.C {
c.mu.Lock()
now := time.Now()
var toRemove []*EventCacheEntry
for _, entry := range c.entries {
if now.Sub(entry.CreatedAt) > c.maxAge {
toRemove = append(toRemove, entry)
}
}
for _, entry := range toRemove {
c.removeEntry(entry)
}
if len(toRemove) > 0 {
log.D.F("event cache cleanup: removed %d expired entries", len(toRemove))
}
c.mu.Unlock()
}
}
// CacheStats holds cache performance metrics
type CacheStats struct {
Entries int
CurrentSize int64 // Compressed size
MaxSize int64
Hits uint64
Misses uint64
HitRate float64
Evictions uint64
Invalidations uint64
CompressionRatio float64 // Average compression ratio
CompactionRuns uint64
}
// Stats returns cache statistics
func (c *EventCache) Stats() CacheStats {
c.mu.RLock()
defer c.mu.RUnlock()
total := c.hits + c.misses
hitRate := 0.0
if total > 0 {
hitRate = float64(c.hits) / float64(total)
}
return CacheStats{
Entries: len(c.entries),
CurrentSize: c.currentSize,
MaxSize: c.maxSize,
Hits: c.hits,
Misses: c.misses,
HitRate: hitRate,
Evictions: c.evictions,
Invalidations: c.invalidations,
CompressionRatio: c.compressionRatio,
CompactionRuns: c.compactionRuns,
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@@ -5,6 +5,8 @@ import (
"context"
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/dgraph-io/badger/v4"
@@ -34,7 +36,9 @@ func (d *D) GetSerialsFromFilter(f *filter.F) (
return
}
// Pre-allocate slice with estimated capacity to reduce reallocations
sers = make(types.Uint40s, 0, len(idxs)*100) // Estimate 100 serials per index
sers = make(
types.Uint40s, 0, len(idxs)*100,
) // Estimate 100 serials per index
for _, idx := range idxs {
var s types.Uint40s
if s, err = d.GetSerialsByRange(idx); chk.E(err) {
@@ -111,13 +115,13 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (
err = errors.New("nil event")
return
}
// Reject ephemeral events (kinds 20000-29999) - they should never be stored
if ev.Kind >= 20000 && ev.Kind <= 29999 {
err = errors.New("blocked: ephemeral events should not be stored")
return
}
// check if the event already exists
var ser *types.Uint40
if ser, err = d.GetSerialById(ev.ID); err == nil && ser != nil {
@@ -176,7 +180,29 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (
if idxs, err = GetIndexesForEvent(ev, serial); chk.E(err) {
return
}
log.T.F("SaveEvent: generated %d indexes for event %x (kind %d)", len(idxs), ev.ID, ev.Kind)
// log.T.F(
// "SaveEvent: generated %d indexes for event %x (kind %d)", len(idxs),
// ev.ID, ev.Kind,
// )
// Serialize event once to check size
eventDataBuf := new(bytes.Buffer)
ev.MarshalBinary(eventDataBuf)
eventData := eventDataBuf.Bytes()
// Determine storage strategy (Reiser4 optimizations)
// Get threshold from environment, default to 0 (disabled)
// When enabled, typical values: 384 (conservative), 512 (recommended), 1024 (aggressive)
smallEventThreshold := 1024
if v := os.Getenv("ORLY_INLINE_EVENT_THRESHOLD"); v != "" {
if n, perr := strconv.Atoi(v); perr == nil && n >= 0 {
smallEventThreshold = n
}
}
isSmallEvent := smallEventThreshold > 0 && len(eventData) <= smallEventThreshold
isReplaceableEvent := kind.IsReplaceable(ev.Kind)
isAddressableEvent := kind.IsParameterizedReplaceable(ev.Kind)
// Start a transaction to save the event and all its indexes
err = d.Update(
func(txn *badger.Txn) (err error) {
@@ -185,26 +211,114 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (
if err = ser.Set(serial); chk.E(err) {
return
}
keyBuf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return
}
kb := keyBuf.Bytes()
// Pre-allocate value buffer
valueBuf := new(bytes.Buffer)
ev.MarshalBinary(valueBuf)
vb := valueBuf.Bytes()
// Save each index
for _, key := range idxs {
if err = txn.Set(key, nil); chk.E(err) {
return
}
}
// write the event
if err = txn.Set(kb, vb); chk.E(err) {
return
// Write the event using optimized storage strategy
// Determine if we should use inline addressable/replaceable storage
useAddressableInline := false
var dTag *tag.T
if isAddressableEvent && isSmallEvent {
dTag = ev.Tags.GetFirst([]byte("d"))
useAddressableInline = dTag != nil
}
// All small events get a sev key for serial-based access
if isSmallEvent {
// Small event: store inline with sev prefix
// Format: sev|serial|size_uint16|event_data
keyBuf := new(bytes.Buffer)
if err = indexes.SmallEventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return
}
// Append size as uint16 big-endian (2 bytes for size up to 65535)
sizeBytes := []byte{
byte(len(eventData) >> 8), byte(len(eventData)),
}
keyBuf.Write(sizeBytes)
// Append event data
keyBuf.Write(eventData)
if err = txn.Set(keyBuf.Bytes(), nil); chk.E(err) {
return
}
// log.T.F(
// "SaveEvent: stored small event inline (%d bytes)",
// len(eventData),
// )
} else {
// Large event: store separately with evt prefix
keyBuf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return
}
if err = txn.Set(keyBuf.Bytes(), eventData); chk.E(err) {
return
}
// log.T.F(
// "SaveEvent: stored large event separately (%d bytes)",
// len(eventData),
// )
}
// Additionally, store replaceable/addressable events with specialized keys for direct access
if useAddressableInline {
// Addressable event: also store with aev|pubkey_hash|kind|dtag_hash|size|data
pubHash := new(types.PubHash)
pubHash.FromPubkey(ev.Pubkey)
kindVal := new(types.Uint16)
kindVal.Set(ev.Kind)
dTagHash := new(types.Ident)
dTagHash.FromIdent(dTag.Value())
keyBuf := new(bytes.Buffer)
if err = indexes.AddressableEventEnc(
pubHash, kindVal, dTagHash,
).MarshalWrite(keyBuf); chk.E(err) {
return
}
// Append size as uint16 big-endian
sizeBytes := []byte{
byte(len(eventData) >> 8), byte(len(eventData)),
}
keyBuf.Write(sizeBytes)
// Append event data
keyBuf.Write(eventData)
if err = txn.Set(keyBuf.Bytes(), nil); chk.E(err) {
return
}
// log.T.F("SaveEvent: also stored addressable event with specialized key")
} else if isReplaceableEvent && isSmallEvent {
// Replaceable event: also store with rev|pubkey_hash|kind|size|data
pubHash := new(types.PubHash)
pubHash.FromPubkey(ev.Pubkey)
kindVal := new(types.Uint16)
kindVal.Set(ev.Kind)
keyBuf := new(bytes.Buffer)
if err = indexes.ReplaceableEventEnc(
pubHash, kindVal,
).MarshalWrite(keyBuf); chk.E(err) {
return
}
// Append size as uint16 big-endian
sizeBytes := []byte{
byte(len(eventData) >> 8), byte(len(eventData)),
}
keyBuf.Write(sizeBytes)
// Append event data
keyBuf.Write(eventData)
if err = txn.Set(keyBuf.Bytes(), nil); chk.E(err) {
return
}
log.T.F("SaveEvent: also stored replaceable event with specialized key")
}
return
},
@@ -212,7 +326,7 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (
if err != nil {
return
}
// Process deletion events to actually delete the referenced events
if ev.Kind == kind.Deletion.K {
if err = d.ProcessDelete(ev, nil); chk.E(err) {
@@ -221,5 +335,13 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (
err = nil
}
}
// Invalidate query cache since a new event was stored
// This ensures subsequent queries will see the new event
if d.queryCache != nil {
d.queryCache.Invalidate()
// log.T.F("SaveEvent: invalidated query cache")
}
return
}

280
pkg/dgraph/README.md Normal file
View File

@@ -0,0 +1,280 @@
# Dgraph Database Implementation for ORLY
This package provides a Dgraph-based implementation of the ORLY database interface, enabling graph-based storage for Nostr events with powerful relationship querying capabilities.
## Status: Step 1 Complete ✅
**Current State:** Dgraph server integration is complete and functional
**Next Step:** DQL query/mutation implementation in save-event.go and query-events.go
## Architecture
### Client-Server Model
The implementation uses a **client-server architecture**:
```
┌─────────────────────────────────────────────┐
│ ORLY Relay Process │
│ │
│ ┌────────────────────────────────────┐ │
│ │ Dgraph Client (pkg/dgraph) │ │
│ │ - dgo library (gRPC) │ │
│ │ - Schema management │────┼───► Dgraph Server
│ │ - Query/Mutate methods │ │ (localhost:9080)
│ └────────────────────────────────────┘ │ - Event graph
│ │ - Authors, tags
│ ┌────────────────────────────────────┐ │ - Relationships
│ │ Badger Metadata Store │ │
│ │ - Markers (key-value) │ │
│ │ - Serial counters │ │
│ │ - Relay identity │ │
│ └────────────────────────────────────┘ │
└─────────────────────────────────────────────┘
```
### Dual Storage Strategy
1. **Dgraph** (Graph Database)
- Nostr events and their content
- Author relationships
- Tag relationships
- Event references and mentions
- Optimized for graph traversals and complex queries
2. **Badger** (Key-Value Store)
- Metadata markers
- Serial number counters
- Relay identity keys
- Fast key-value operations
## Setup
### 1. Start Dgraph Server
Using Docker (recommended):
```bash
docker run -d \
--name dgraph \
-p 8080:8080 \
-p 9080:9080 \
-p 8000:8000 \
-v ~/dgraph:/dgraph \
dgraph/standalone:latest
```
### 2. Configure ORLY
```bash
export ORLY_DB_TYPE=dgraph
export ORLY_DGRAPH_URL=localhost:9080 # Optional, this is the default
```
### 3. Run ORLY
```bash
./orly
```
On startup, ORLY will:
1. Connect to dgraph server via gRPC
2. Apply the Nostr schema automatically
3. Initialize badger metadata store
4. Initialize serial number counter
5. Start accepting events
## Schema
The Nostr schema defines the following types:
### Event Nodes
```dql
type Event {
event.id # Event ID (string, indexed)
event.serial # Sequential number (int, indexed)
event.kind # Event kind (int, indexed)
event.created_at # Timestamp (int, indexed)
event.content # Event content (string)
event.sig # Signature (string, indexed)
event.pubkey # Author pubkey (string, indexed)
event.authored_by # -> Author (uid)
event.references # -> Events (uid list)
event.mentions # -> Events (uid list)
event.tagged_with # -> Tags (uid list)
}
```
### Author Nodes
```dql
type Author {
author.pubkey # Pubkey (string, indexed, unique)
author.events # -> Events (uid list, reverse)
}
```
### Tag Nodes
```dql
type Tag {
tag.type # Tag type (string, indexed)
tag.value # Tag value (string, indexed + fulltext)
tag.events # -> Events (uid list, reverse)
}
```
### Marker Nodes (Metadata)
```dql
type Marker {
marker.key # Key (string, indexed, unique)
marker.value # Value (string)
}
```
## Configuration
### Environment Variables
- `ORLY_DB_TYPE=dgraph` - Enable dgraph database (default: badger)
- `ORLY_DGRAPH_URL=host:port` - Dgraph gRPC endpoint (default: localhost:9080)
- `ORLY_DATA_DIR=/path` - Data directory for metadata storage
### Connection Details
The dgraph client uses **insecure gRPC** by default for local development. For production deployments:
1. Set up TLS certificates for dgraph
2. Modify `pkg/dgraph/dgraph.go` to use `grpc.WithTransportCredentials()` with your certs
## Implementation Details
### Files
- `dgraph.go` - Main implementation, initialization, lifecycle
- `schema.go` - Schema definition and application
- `save-event.go` - Event storage (TODO: update to use Mutate)
- `query-events.go` - Event queries (TODO: update to parse DQL responses)
- `fetch-event.go` - Event retrieval methods
- `delete.go` - Event deletion
- `markers.go` - Key-value metadata storage (uses badger)
- `serial.go` - Serial number generation (uses badger)
- `subscriptions.go` - Subscription/payment tracking (uses markers)
- `nip43.go` - NIP-43 invite system (uses markers)
- `import-export.go` - Import/export operations
- `logger.go` - Logging adapter
### Key Methods
#### Initialization
```go
d, err := dgraph.New(ctx, cancel, dataDir, logLevel)
```
#### Querying (DQL)
```go
resp, err := d.Query(ctx, dqlQuery)
```
#### Mutations (RDF N-Quads)
```go
mutation := &api.Mutation{SetNquads: []byte(nquads)}
resp, err := d.Mutate(ctx, mutation)
```
## Development Status
### ✅ Step 1: Dgraph Server Integration (COMPLETE)
- [x] dgo client library integration
- [x] gRPC connection to external dgraph
- [x] Schema definition and auto-application
- [x] Query() and Mutate() method stubs
- [x] ORLY_DGRAPH_URL configuration
- [x] Dual-storage architecture
- [x] Proper lifecycle management
### 📝 Step 2: DQL Implementation (NEXT)
Priority tasks:
1. **save-event.go** - Replace RDF string building with actual Mutate() calls
2. **query-events.go** - Parse actual JSON responses from Query()
3. **fetch-event.go** - Implement DQL queries for event retrieval
4. **delete.go** - Implement deletion mutations
### 📝 Step 3: Testing (FUTURE)
- Integration testing with relay-tester
- Performance benchmarks vs badger
- Memory profiling
- Production deployment testing
## Troubleshooting
### Connection Refused
```
failed to connect to dgraph at localhost:9080: connection refused
```
**Solution:** Ensure dgraph server is running:
```bash
docker ps | grep dgraph
docker logs dgraph
```
### Schema Application Failed
```
failed to apply schema: ...
```
**Solution:** Check dgraph server logs and ensure no schema conflicts:
```bash
docker logs dgraph
```
### Binary Not Finding libsecp256k1.so
This is unrelated to dgraph. Ensure:
```bash
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$(pwd)/pkg/crypto/p8k"
```
## Performance Considerations
### When to Use Dgraph
**Good fit:**
- Complex graph queries (follows-of-follows, social graphs)
- Full-text search requirements
- Advanced filtering and aggregations
- Multi-hop relationship traversals
**Not ideal for:**
- Simple key-value lookups (badger is faster)
- Very high write throughput (badger has lower latency)
- Single-node deployments with simple queries
### Optimization Tips
1. **Indexing**: Ensure frequently queried fields have appropriate indexes
2. **Pagination**: Use offset/limit in DQL queries for large result sets
3. **Caching**: Consider adding an LRU cache for hot events
4. **Schema Design**: Use reverse edges for efficient relationship traversal
## Resources
- [Dgraph Documentation](https://dgraph.io/docs/)
- [DQL Query Language](https://dgraph.io/docs/query-language/)
- [dgo Client Library](https://github.com/dgraph-io/dgo)
- [ORLY Implementation Status](../../DGRAPH_IMPLEMENTATION_STATUS.md)
## Contributing
When working on dgraph implementation:
1. Test changes against a local dgraph instance
2. Update schema.go if adding new node types or predicates
3. Ensure dual-storage strategy is maintained (dgraph for events, badger for metadata)
4. Add integration tests for new features
5. Update DGRAPH_IMPLEMENTATION_STATUS.md with progress

330
pkg/dgraph/TESTING.md Normal file
View File

@@ -0,0 +1,330 @@
# Dgraph Test Suite
This directory contains a comprehensive test suite for the dgraph database implementation, mirroring all tests from the badger implementation to ensure feature parity.
## Test Files
- **testmain_test.go** - Test configuration (logging, setup)
- **helpers_test.go** - Helper functions for test database setup/teardown
- **save-event_test.go** - Event storage tests
- **query-events_test.go** - Event query tests
## Quick Start
### 1. Start Dgraph Server
```bash
# From project root
./scripts/dgraph-start.sh
# Verify it's running
curl http://localhost:8080/health
```
### 2. Run Tests
```bash
# Run all dgraph tests
./scripts/test-dgraph.sh
# Or run manually
export ORLY_DGRAPH_URL=localhost:9080
CGO_ENABLED=0 go test -v ./pkg/dgraph/...
# Run specific test
CGO_ENABLED=0 go test -v -run TestSaveEvents ./pkg/dgraph
```
## Test Coverage
### Event Storage Tests (`save-event_test.go`)
**TestSaveEvents**
- Loads ~100 events from examples.Cache
- Saves all events chronologically
- Verifies no errors during save
- Reports performance metrics
**TestDeletionEventWithETagRejection**
- Creates a regular event
- Attempts to save deletion event with e-tag
- Verifies deletion events with e-tags are rejected
**TestSaveExistingEvent**
- Saves an event
- Attempts to save same event again
- Verifies duplicate events are rejected
### Event Query Tests (`query-events_test.go`)
**TestQueryEventsByID**
- Queries event by exact ID match
- Verifies single result returned
- Verifies correct event retrieved
**TestQueryEventsByKind**
- Queries events by kind (e.g., kind 1)
- Verifies all results have correct kind
- Tests filtering logic
**TestQueryEventsByAuthor**
- Queries events by author pubkey
- Verifies all results from correct author
- Tests author filtering
**TestReplaceableEventsAndDeletion**
- Creates replaceable event (kind 0)
- Creates newer version
- Verifies only newer version returned in general queries
- Creates deletion event
- Verifies deleted event not returned
- Tests replaceable event logic and deletion
**TestParameterizedReplaceableEventsAndDeletion**
- Creates parameterized replaceable event (kind 30000+)
- Adds d-tag
- Creates deletion event with e-tag
- Verifies deleted event not returned
- Tests parameterized replaceable logic
**TestQueryEventsByTimeRange**
- Queries events by since/until timestamps
- Verifies all results within time range
- Tests temporal filtering
**TestQueryEventsByTag**
- Finds event with tags
- Queries by tag key/value
- Verifies all results have the tag
- Tests tag filtering logic
**TestCountEvents**
- Counts all events
- Counts events by kind filter
- Verifies correct counts returned
- Tests counting functionality
## Test Helpers
### setupTestDB(t *testing.T)
Creates a test dgraph database:
1. **Checks dgraph availability** - Skips test if server not running
2. **Creates temp directory** - For metadata storage
3. **Initializes dgraph client** - Connects to server
4. **Drops all data** - Starts with clean slate
5. **Loads test events** - From examples.Cache (~100 events)
6. **Sorts chronologically** - Ensures addressable events processed in order
7. **Saves all events** - Populates test database
**Returns:** `(*D, []*event.E, context.Context, context.CancelFunc, string)`
### cleanupTestDB(t, db, cancel, tempDir)
Cleans up after tests:
- Closes database connection
- Cancels context
- Removes temp directory
### skipIfDgraphNotAvailable(t *testing.T)
Checks if dgraph is running and skips test if not available.
## Running Tests
### Prerequisites
1. **Dgraph Server** - Must be running before tests
2. **Go 1.21+** - For running tests
3. **CGO_ENABLED=0** - For pure Go build
### Test Execution
#### All Tests
```bash
./scripts/test-dgraph.sh
```
#### Specific Test File
```bash
CGO_ENABLED=0 go test -v ./pkg/dgraph -run TestSaveEvents
```
#### With Logging
```bash
export TEST_LOG=1
CGO_ENABLED=0 go test -v ./pkg/dgraph/...
```
#### With Timeout
```bash
CGO_ENABLED=0 go test -v -timeout 10m ./pkg/dgraph/...
```
### Integration Testing
Run tests + relay-tester:
```bash
./scripts/test-dgraph.sh --relay-tester
```
This will:
1. Run all dgraph package tests
2. Start ORLY with dgraph backend
3. Run relay-tester against ORLY
4. Report results
## Test Data
Tests use `pkg/encoders/event/examples.Cache` which contains:
- ~100 real Nostr events
- Text notes (kind 1)
- Profile metadata (kind 0)
- Various other kinds
- Events with tags, references, mentions
- Multiple authors and timestamps
This ensures tests cover realistic scenarios.
## Debugging Tests
### View Test Output
```bash
CGO_ENABLED=0 go test -v ./pkg/dgraph/... 2>&1 | tee test-output.log
```
### Check Dgraph State
```bash
# View data via Ratel UI
open http://localhost:8000
# Query via HTTP
curl -X POST localhost:8080/query -d '{
events(func: type(Event), first: 10) {
uid
event.id
event.kind
event.created_at
}
}'
```
### Enable Dgraph Logging
```bash
docker logs dgraph-orly-test -f
```
## Test Failures
### "Dgraph server not available"
**Cause:** Dgraph is not running
**Fix:**
```bash
./scripts/dgraph-start.sh
```
### Connection Timeouts
**Cause:** Dgraph server overloaded or network issues
**Fix:**
- Increase test timeout: `go test -timeout 20m`
- Check dgraph resources: `docker stats dgraph-orly-test`
- Restart dgraph: `docker restart dgraph-orly-test`
### Schema Errors
**Cause:** Schema conflicts or version mismatch
**Fix:**
- Drop all data: Tests call `dropAll()` automatically
- Check dgraph version: `docker exec dgraph-orly-test dgraph version`
### Test Hangs
**Cause:** Deadlock or infinite loop
**Fix:**
- Send SIGQUIT: `kill -QUIT <test-pid>`
- View goroutine dump
- Check dgraph logs
## Continuous Integration
### GitHub Actions Example
```yaml
name: Dgraph Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
services:
dgraph:
image: dgraph/standalone:latest
ports:
- 8080:8080
- 9080:9080
options: >-
--health-cmd "curl -f http://localhost:8080/health"
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.21'
- name: Run dgraph tests
env:
ORLY_DGRAPH_URL: localhost:9080
run: |
CGO_ENABLED=0 go test -v -timeout 10m ./pkg/dgraph/...
```
## Performance Benchmarks
Compare with badger:
```bash
# Badger benchmarks
go test -bench=. -benchmem ./pkg/database/...
# Dgraph benchmarks
go test -bench=. -benchmem ./pkg/dgraph/...
```
## Related Documentation
- [Main Testing Guide](../../scripts/DGRAPH_TESTING.md)
- [Implementation Status](../../DGRAPH_IMPLEMENTATION_STATUS.md)
- [Package README](README.md)
## Contributing
When adding new tests:
1. **Mirror badger tests** - Ensure feature parity
2. **Use test helpers** - setupTestDB() and cleanupTestDB()
3. **Skip if unavailable** - Call skipIfDgraphNotAvailable(t)
4. **Clean up resources** - Always defer cleanupTestDB()
5. **Test chronologically** - Sort events by timestamp for addressable events
6. **Verify behavior** - Don't just check for no errors, verify correctness

262
pkg/dgraph/delete.go Normal file
View File

@@ -0,0 +1,262 @@
package dgraph
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/dgraph-io/dgo/v230/protos/api"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/hex"
)
// DeleteEvent deletes an event by its ID
func (d *D) DeleteEvent(c context.Context, eid []byte) error {
idStr := hex.Enc(eid)
// Find the event's UID
query := fmt.Sprintf(`{
event(func: eq(event.id, %q)) {
uid
}
}`, idStr)
resp, err := d.Query(c, query)
if err != nil {
return fmt.Errorf("failed to find event for deletion: %w", err)
}
// Parse UID
var result struct {
Event []struct {
UID string `json:"uid"`
} `json:"event"`
}
if err = unmarshalJSON(resp.Json, &result); err != nil {
return err
}
if len(result.Event) == 0 {
return nil // Event doesn't exist
}
// Delete the event node
mutation := &api.Mutation{
DelNquads: []byte(fmt.Sprintf("<%s> * * .", result.Event[0].UID)),
CommitNow: true,
}
if _, err = d.Mutate(c, mutation); err != nil {
return fmt.Errorf("failed to delete event: %w", err)
}
return nil
}
// DeleteEventBySerial deletes an event by its serial number
func (d *D) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.E) error {
serial := ser.Get()
// Find the event's UID
query := fmt.Sprintf(`{
event(func: eq(event.serial, %d)) {
uid
}
}`, serial)
resp, err := d.Query(c, query)
if err != nil {
return fmt.Errorf("failed to find event for deletion: %w", err)
}
// Parse UID
var result struct {
Event []struct {
UID string `json:"uid"`
} `json:"event"`
}
if err = unmarshalJSON(resp.Json, &result); err != nil {
return err
}
if len(result.Event) == 0 {
return nil // Event doesn't exist
}
// Delete the event node
mutation := &api.Mutation{
DelNquads: []byte(fmt.Sprintf("<%s> * * .", result.Event[0].UID)),
CommitNow: true,
}
if _, err = d.Mutate(c, mutation); err != nil {
return fmt.Errorf("failed to delete event: %w", err)
}
return nil
}
// DeleteExpired removes events that have passed their expiration time (NIP-40)
func (d *D) DeleteExpired() {
// Query for events that have an "expiration" tag
// NIP-40: events should have a tag ["expiration", "<unix timestamp>"]
query := `{
events(func: has(event.tags)) {
uid
event.id
event.tags
event.created_at
}
}`
resp, err := d.Query(context.Background(), query)
if err != nil {
d.Logger.Errorf("failed to query events for expiration: %v", err)
return
}
var result struct {
Events []struct {
UID string `json:"uid"`
ID string `json:"event.id"`
Tags string `json:"event.tags"`
CreatedAt int64 `json:"event.created_at"`
} `json:"events"`
}
if err = unmarshalJSON(resp.Json, &result); err != nil {
d.Logger.Errorf("failed to parse events for expiration: %v", err)
return
}
now := time.Now().Unix()
deletedCount := 0
for _, ev := range result.Events {
// Parse tags
if ev.Tags == "" {
continue
}
var tags [][]string
if err := json.Unmarshal([]byte(ev.Tags), &tags); err != nil {
continue
}
// Look for expiration tag
var expirationTime int64
for _, tag := range tags {
if len(tag) >= 2 && tag[0] == "expiration" {
// Parse expiration timestamp
if _, err := fmt.Sscanf(tag[1], "%d", &expirationTime); err != nil {
continue
}
break
}
}
// If expiration time found and passed, delete the event
if expirationTime > 0 && now > expirationTime {
mutation := &api.Mutation{
DelNquads: []byte(fmt.Sprintf("<%s> * * .", ev.UID)),
CommitNow: true,
}
if _, err := d.Mutate(context.Background(), mutation); err != nil {
d.Logger.Warningf("failed to delete expired event %s: %v", ev.ID, err)
} else {
deletedCount++
}
}
}
if deletedCount > 0 {
d.Logger.Infof("deleted %d expired events", deletedCount)
}
}
// ProcessDelete processes a kind 5 deletion event
func (d *D) ProcessDelete(ev *event.E, admins [][]byte) (err error) {
if ev.Kind != 5 {
return fmt.Errorf("event is not a deletion event (kind 5)")
}
// Extract event IDs to delete from tags
for _, tag := range *ev.Tags {
if len(tag.T) >= 2 && string(tag.T[0]) == "e" {
eventID := tag.T[1]
// Verify the deletion is authorized (author must match or be admin)
if err = d.CheckForDeleted(ev, admins); err != nil {
continue
}
// Delete the event
if err = d.DeleteEvent(context.Background(), eventID); err != nil {
// Log error but continue with other deletions
d.Logger.Errorf("failed to delete event %s: %v", hex.Enc(eventID), err)
}
}
}
return nil
}
// CheckForDeleted checks if an event has been deleted
func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
// Query for delete events (kind 5) that reference this event
evID := hex.Enc(ev.ID[:])
query := fmt.Sprintf(`{
deletes(func: eq(event.kind, 5)) @filter(eq(event.pubkey, %q)) {
uid
event.pubkey
references @filter(eq(event.id, %q)) {
event.id
}
}
}`, hex.Enc(ev.Pubkey), evID)
resp, err := d.Query(context.Background(), query)
if err != nil {
return fmt.Errorf("failed to check for deletions: %w", err)
}
var result struct {
Deletes []struct {
UID string `json:"uid"`
Pubkey string `json:"event.pubkey"`
References []struct {
ID string `json:"event.id"`
} `json:"references"`
} `json:"deletes"`
}
if err = unmarshalJSON(resp.Json, &result); err != nil {
return err
}
// Check if any delete events reference this event
for _, del := range result.Deletes {
if len(del.References) > 0 {
// Check if deletion is from the author or an admin
delPubkey, _ := hex.Dec(del.Pubkey)
if string(delPubkey) == string(ev.Pubkey) {
return fmt.Errorf("event has been deleted by author")
}
// Check admins
for _, admin := range admins {
if string(delPubkey) == string(admin) {
return fmt.Errorf("event has been deleted by admin")
}
}
}
}
return nil
}

319
pkg/dgraph/dgraph.go Normal file
View File

@@ -0,0 +1,319 @@
// Package dgraph provides a Dgraph-based implementation of the database interface.
// This is a simplified implementation for testing - full dgraph integration to be completed later.
package dgraph
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"github.com/dgraph-io/badger/v4"
"github.com/dgraph-io/dgo/v230"
"github.com/dgraph-io/dgo/v230/protos/api"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"lol.mleku.dev"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/utils/apputil"
)
// D implements the database.Database interface using Dgraph as the storage backend
type D struct {
ctx context.Context
cancel context.CancelFunc
dataDir string
Logger *logger
// Dgraph client connection
client *dgo.Dgraph
conn *grpc.ClientConn
// Fallback badger storage for metadata
pstore *badger.DB
// Configuration
dgraphURL string
enableGraphQL bool
enableIntrospection bool
ready chan struct{} // Closed when database is ready to serve requests
}
// Ensure D implements database.Database interface at compile time
var _ database.Database = (*D)(nil)
// init registers the dgraph database factory
func init() {
database.RegisterDgraphFactory(func(
ctx context.Context,
cancel context.CancelFunc,
dataDir string,
logLevel string,
) (database.Database, error) {
return New(ctx, cancel, dataDir, logLevel)
})
}
// Config holds configuration options for the Dgraph database
type Config struct {
DataDir string
LogLevel string
DgraphURL string // Dgraph gRPC endpoint (e.g., "localhost:9080")
EnableGraphQL bool
EnableIntrospection bool
}
// New creates a new Dgraph-based database instance
func New(
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
) (
d *D, err error,
) {
// Get dgraph URL from environment, default to localhost
dgraphURL := os.Getenv("ORLY_DGRAPH_URL")
if dgraphURL == "" {
dgraphURL = "localhost:9080"
}
d = &D{
ctx: ctx,
cancel: cancel,
dataDir: dataDir,
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
dgraphURL: dgraphURL,
enableGraphQL: false,
enableIntrospection: false,
ready: make(chan struct{}),
}
// Ensure the data directory exists
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
return
}
// Ensure directory structure
dummyFile := filepath.Join(dataDir, "dummy.sst")
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
return
}
// Initialize dgraph client connection
if err = d.initDgraphClient(); chk.E(err) {
return
}
// Initialize badger for metadata storage
if err = d.initStorage(); chk.E(err) {
return
}
// Apply Nostr schema to dgraph
if err = d.applySchema(ctx); chk.E(err) {
return
}
// Initialize serial counter
if err = d.initSerialCounter(); chk.E(err) {
return
}
// Start warmup goroutine to signal when database is ready
go d.warmup()
// Setup shutdown handler
go func() {
<-d.ctx.Done()
d.cancel()
if d.conn != nil {
d.conn.Close()
}
if d.pstore != nil {
d.pstore.Close()
}
}()
return
}
// initDgraphClient establishes connection to dgraph server
func (d *D) initDgraphClient() error {
d.Logger.Infof("connecting to dgraph at %s", d.dgraphURL)
// Establish gRPC connection
conn, err := grpc.Dial(d.dgraphURL, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return fmt.Errorf("failed to connect to dgraph at %s: %w", d.dgraphURL, err)
}
d.conn = conn
d.client = dgo.NewDgraphClient(api.NewDgraphClient(conn))
d.Logger.Infof("successfully connected to dgraph")
return nil
}
// initStorage opens Badger database for metadata storage
func (d *D) initStorage() error {
metadataDir := filepath.Join(d.dataDir, "metadata")
if err := os.MkdirAll(metadataDir, 0755); err != nil {
return fmt.Errorf("failed to create metadata directory: %w", err)
}
opts := badger.DefaultOptions(metadataDir)
var err error
d.pstore, err = badger.Open(opts)
if err != nil {
return fmt.Errorf("failed to open badger metadata store: %w", err)
}
d.Logger.Infof("metadata storage initialized")
return nil
}
// Query executes a DQL query against dgraph
func (d *D) Query(ctx context.Context, query string) (*api.Response, error) {
txn := d.client.NewReadOnlyTxn()
defer txn.Discard(ctx)
resp, err := txn.Query(ctx, query)
if err != nil {
return nil, fmt.Errorf("dgraph query failed: %w", err)
}
return resp, nil
}
// Mutate executes a mutation against dgraph
func (d *D) Mutate(ctx context.Context, mutation *api.Mutation) (*api.Response, error) {
txn := d.client.NewTxn()
defer txn.Discard(ctx)
resp, err := txn.Mutate(ctx, mutation)
if err != nil {
return nil, fmt.Errorf("dgraph mutation failed: %w", err)
}
// Only commit if CommitNow is false (mutation didn't auto-commit)
if !mutation.CommitNow {
if err := txn.Commit(ctx); err != nil {
return nil, fmt.Errorf("dgraph commit failed: %w", err)
}
}
return resp, nil
}
// Path returns the data directory path
func (d *D) Path() string { return d.dataDir }
// Init initializes the database with a given path (no-op, path set in New)
func (d *D) Init(path string) (err error) {
// Path already set in New()
return nil
}
// Sync flushes pending writes
func (d *D) Sync() (err error) {
if d.pstore != nil {
return d.pstore.Sync()
}
return nil
}
// Close closes the database
func (d *D) Close() (err error) {
d.cancel()
if d.conn != nil {
if e := d.conn.Close(); e != nil {
err = e
}
}
if d.pstore != nil {
if e := d.pstore.Close(); e != nil && err == nil {
err = e
}
}
return
}
// Wipe removes all data
func (d *D) Wipe() (err error) {
if d.pstore != nil {
if err = d.pstore.Close(); chk.E(err) {
return
}
}
if err = os.RemoveAll(d.dataDir); chk.E(err) {
return
}
return d.initStorage()
}
// SetLogLevel sets the logging level
func (d *D) SetLogLevel(level string) {
// d.Logger.SetLevel(lol.GetLogLevel(level))
}
// EventIdsBySerial retrieves event IDs by serial range
func (d *D) EventIdsBySerial(start uint64, count int) (
evs []uint64, err error,
) {
// Query for events in the specified serial range
query := fmt.Sprintf(`{
events(func: ge(event.serial, %d), orderdesc: event.serial, first: %d) {
event.serial
}
}`, start, count)
resp, err := d.Query(context.Background(), query)
if err != nil {
return nil, fmt.Errorf("failed to query event IDs by serial: %w", err)
}
var result struct {
Events []struct {
Serial int64 `json:"event.serial"`
} `json:"events"`
}
if err = json.Unmarshal(resp.Json, &result); err != nil {
return nil, err
}
evs = make([]uint64, 0, len(result.Events))
for _, ev := range result.Events {
evs = append(evs, uint64(ev.Serial))
}
return evs, nil
}
// RunMigrations runs database migrations (no-op for dgraph)
func (d *D) RunMigrations() {
// No-op for dgraph
}
// Ready returns a channel that closes when the database is ready to serve requests.
// This allows callers to wait for database warmup to complete.
func (d *D) Ready() <-chan struct{} {
return d.ready
}
// warmup performs database warmup operations and closes the ready channel when complete.
// For Dgraph, warmup ensures the connection is healthy and schema is applied.
func (d *D) warmup() {
defer close(d.ready)
// Dgraph connection and schema are already verified during initialization
// Just give a brief moment for any background processes to settle
d.Logger.Infof("dgraph database warmup complete, ready to serve requests")
}
func (d *D) GetCachedJSON(f *filter.F) ([][]byte, bool) { return nil, false }
func (d *D) CacheMarshaledJSON(f *filter.F, marshaledJSON [][]byte) {}
func (d *D) InvalidateQueryCache() {}

392
pkg/dgraph/fetch-event.go Normal file
View File

@@ -0,0 +1,392 @@
package dgraph
import (
"context"
"encoding/json"
"fmt"
"strings"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/interfaces/store"
)
// FetchEventBySerial retrieves an event by its serial number
func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
serial := ser.Get()
query := fmt.Sprintf(`{
event(func: eq(event.serial, %d)) {
event.id
event.kind
event.created_at
event.content
event.sig
event.pubkey
event.tags
}
}`, serial)
resp, err := d.Query(context.Background(), query)
if err != nil {
return nil, fmt.Errorf("failed to fetch event by serial: %w", err)
}
evs, err := d.parseEventsFromResponse(resp.Json)
if err != nil {
return nil, err
}
if len(evs) == 0 {
return nil, fmt.Errorf("event not found")
}
return evs[0], nil
}
// FetchEventsBySerials retrieves multiple events by their serial numbers
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (
events map[uint64]*event.E, err error,
) {
if len(serials) == 0 {
return make(map[uint64]*event.E), nil
}
// Build a filter for multiple serials using OR conditions
serialConditions := make([]string, len(serials))
for i, ser := range serials {
serialConditions[i] = fmt.Sprintf("eq(event.serial, %d)", ser.Get())
}
serialFilter := strings.Join(serialConditions, " OR ")
// Query with proper batch filtering
query := fmt.Sprintf(`{
events(func: has(event.serial)) @filter(%s) {
event.id
event.kind
event.created_at
event.content
event.sig
event.pubkey
event.tags
event.serial
}
}`, serialFilter)
resp, err := d.Query(context.Background(), query)
if err != nil {
return nil, fmt.Errorf("failed to fetch events by serials: %w", err)
}
// Parse the response including serial numbers
var result struct {
Events []struct {
ID string `json:"event.id"`
Kind int `json:"event.kind"`
CreatedAt int64 `json:"event.created_at"`
Content string `json:"event.content"`
Sig string `json:"event.sig"`
Pubkey string `json:"event.pubkey"`
Tags string `json:"event.tags"`
Serial int64 `json:"event.serial"`
} `json:"events"`
}
if err = json.Unmarshal(resp.Json, &result); err != nil {
return nil, err
}
// Map events by their serial numbers
events = make(map[uint64]*event.E)
for _, ev := range result.Events {
// Decode hex strings
id, err := hex.Dec(ev.ID)
if err != nil {
continue
}
sig, err := hex.Dec(ev.Sig)
if err != nil {
continue
}
pubkey, err := hex.Dec(ev.Pubkey)
if err != nil {
continue
}
// Parse tags from JSON
var tags tag.S
if ev.Tags != "" {
if err := json.Unmarshal([]byte(ev.Tags), &tags); err != nil {
continue
}
}
// Create event
e := &event.E{
Kind: uint16(ev.Kind),
CreatedAt: ev.CreatedAt,
Content: []byte(ev.Content),
Tags: &tags,
}
// Copy fixed-size arrays
copy(e.ID[:], id)
copy(e.Sig[:], sig)
copy(e.Pubkey[:], pubkey)
events[uint64(ev.Serial)] = e
}
return events, nil
}
// GetSerialById retrieves the serial number for an event ID
func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
idStr := hex.Enc(id)
query := fmt.Sprintf(`{
event(func: eq(event.id, %q)) {
event.serial
}
}`, idStr)
resp, err := d.Query(context.Background(), query)
if err != nil {
return nil, fmt.Errorf("failed to get serial by ID: %w", err)
}
var result struct {
Event []struct {
Serial int64 `json:"event.serial"`
} `json:"event"`
}
if err = json.Unmarshal(resp.Json, &result); err != nil {
return nil, err
}
if len(result.Event) == 0 {
return nil, fmt.Errorf("event not found")
}
ser = &types.Uint40{}
ser.Set(uint64(result.Event[0].Serial))
return ser, nil
}
// GetSerialsByIds retrieves serial numbers for multiple event IDs
func (d *D) GetSerialsByIds(ids *tag.T) (
serials map[string]*types.Uint40, err error,
) {
serials = make(map[string]*types.Uint40)
if len(ids.T) == 0 {
return serials, nil
}
// Build batch query for all IDs at once
idConditions := make([]string, 0, len(ids.T))
idMap := make(map[string][]byte) // Map hex ID to original bytes
for _, idBytes := range ids.T {
if len(idBytes) > 0 {
idStr := hex.Enc(idBytes)
idConditions = append(idConditions, fmt.Sprintf("eq(event.id, %q)", idStr))
idMap[idStr] = idBytes
}
}
if len(idConditions) == 0 {
return serials, nil
}
// Create single query with OR conditions
idFilter := strings.Join(idConditions, " OR ")
query := fmt.Sprintf(`{
events(func: has(event.id)) @filter(%s) {
event.id
event.serial
}
}`, idFilter)
resp, err := d.Query(context.Background(), query)
if err != nil {
return nil, fmt.Errorf("failed to batch query serials by IDs: %w", err)
}
var result struct {
Events []struct {
ID string `json:"event.id"`
Serial int64 `json:"event.serial"`
} `json:"events"`
}
if err = json.Unmarshal(resp.Json, &result); err != nil {
return nil, err
}
// Map results back
for _, ev := range result.Events {
serial := types.Uint40{}
serial.Set(uint64(ev.Serial))
serials[ev.ID] = &serial
}
return serials, nil
}
// GetSerialsByIdsWithFilter retrieves serials with a filter function
func (d *D) GetSerialsByIdsWithFilter(
ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool,
) (serials map[string]*types.Uint40, err error) {
serials = make(map[string]*types.Uint40)
if fn == nil {
// No filter, just return all
return d.GetSerialsByIds(ids)
}
// With filter, need to fetch events
for _, id := range ids.T {
if len(id) > 0 {
serial, err := d.GetSerialById(id)
if err != nil {
continue
}
ev, err := d.FetchEventBySerial(serial)
if err != nil {
continue
}
if fn(ev, serial) {
serials[string(id)] = serial
}
}
}
return serials, nil
}
// GetSerialsByRange retrieves serials within a range
func (d *D) GetSerialsByRange(idx database.Range) (
serials types.Uint40s, err error,
) {
// Range represents a byte-prefix range for index scanning
// For dgraph, we need to convert this to a query on indexed fields
// The range is typically used for scanning event IDs or other hex-encoded keys
if len(idx.Start) == 0 && len(idx.End) == 0 {
return nil, fmt.Errorf("empty range provided")
}
startStr := hex.Enc(idx.Start)
endStr := hex.Enc(idx.End)
// Query for events with IDs in the specified range
query := fmt.Sprintf(`{
events(func: ge(event.id, %q)) @filter(le(event.id, %q)) {
event.serial
}
}`, startStr, endStr)
resp, err := d.Query(context.Background(), query)
if err != nil {
return nil, fmt.Errorf("failed to query serials by range: %w", err)
}
var result struct {
Events []struct {
Serial int64 `json:"event.serial"`
} `json:"events"`
}
if err = json.Unmarshal(resp.Json, &result); err != nil {
return nil, err
}
serials = make([]*types.Uint40, 0, len(result.Events))
for _, ev := range result.Events {
serial := types.Uint40{}
serial.Set(uint64(ev.Serial))
serials = append(serials, &serial)
}
return serials, nil
}
// GetFullIdPubkeyBySerial retrieves ID and pubkey for a serial number
func (d *D) GetFullIdPubkeyBySerial(ser *types.Uint40) (
fidpk *store.IdPkTs, err error,
) {
serial := ser.Get()
query := fmt.Sprintf(`{
event(func: eq(event.serial, %d)) {
event.id
event.pubkey
event.created_at
}
}`, serial)
resp, err := d.Query(context.Background(), query)
if err != nil {
return nil, fmt.Errorf("failed to get ID and pubkey by serial: %w", err)
}
var result struct {
Event []struct {
ID string `json:"event.id"`
Pubkey string `json:"event.pubkey"`
CreatedAt int64 `json:"event.created_at"`
} `json:"event"`
}
if err = json.Unmarshal(resp.Json, &result); err != nil {
return nil, err
}
if len(result.Event) == 0 {
return nil, fmt.Errorf("event not found")
}
id, err := hex.Dec(result.Event[0].ID)
if err != nil {
return nil, err
}
pubkey, err := hex.Dec(result.Event[0].Pubkey)
if err != nil {
return nil, err
}
fidpk = &store.IdPkTs{
Id: id,
Pub: pubkey,
Ts: result.Event[0].CreatedAt,
Ser: serial,
}
return fidpk, nil
}
// GetFullIdPubkeyBySerials retrieves IDs and pubkeys for multiple serials
func (d *D) GetFullIdPubkeyBySerials(sers []*types.Uint40) (
fidpks []*store.IdPkTs, err error,
) {
fidpks = make([]*store.IdPkTs, 0, len(sers))
for _, ser := range sers {
fidpk, err := d.GetFullIdPubkeyBySerial(ser)
if err != nil {
continue // Skip errors, continue with others
}
fidpks = append(fidpks, fidpk)
}
return fidpks, nil
}

144
pkg/dgraph/helpers_test.go Normal file
View File

@@ -0,0 +1,144 @@
package dgraph
import (
"bufio"
"bytes"
"context"
"net"
"os"
"sort"
"testing"
"time"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/event/examples"
)
// isDgraphAvailable checks if a dgraph server is running
func isDgraphAvailable() bool {
dgraphURL := os.Getenv("ORLY_DGRAPH_URL")
if dgraphURL == "" {
dgraphURL = "localhost:9080"
}
conn, err := net.DialTimeout("tcp", dgraphURL, 2*time.Second)
if err != nil {
return false
}
conn.Close()
return true
}
// skipIfDgraphNotAvailable skips the test if dgraph is not available
func skipIfDgraphNotAvailable(t *testing.T) {
if !isDgraphAvailable() {
dgraphURL := os.Getenv("ORLY_DGRAPH_URL")
if dgraphURL == "" {
dgraphURL = "localhost:9080"
}
t.Skipf("Dgraph server not available at %s. Start with: docker run -p 9080:9080 dgraph/standalone:latest", dgraphURL)
}
}
// setupTestDB creates a new test dgraph database and loads example events
func setupTestDB(t *testing.T) (
*D, []*event.E, context.Context, context.CancelFunc, string,
) {
skipIfDgraphNotAvailable(t)
// Create a temporary directory for metadata storage
tempDir, err := os.MkdirTemp("", "test-dgraph-*")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
// Initialize the dgraph database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
cancel()
os.RemoveAll(tempDir)
t.Fatalf("Failed to create dgraph database: %v", err)
}
// Drop all data to start fresh
if err := db.dropAll(ctx); err != nil {
db.Close()
cancel()
os.RemoveAll(tempDir)
t.Fatalf("Failed to drop all data: %v", err)
}
// Create a scanner to read events from examples.Cache
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
var events []*event.E
// First, collect all events from examples.Cache
for scanner.Scan() {
chk.E(scanner.Err())
b := scanner.Bytes()
ev := event.New()
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
ev.Free()
db.Close()
cancel()
os.RemoveAll(tempDir)
t.Fatal(err)
}
events = append(events, ev)
}
// Check for scanner errors
if err = scanner.Err(); err != nil {
db.Close()
cancel()
os.RemoveAll(tempDir)
t.Fatalf("Scanner error: %v", err)
}
// Sort events by CreatedAt to ensure addressable events are processed in chronological order
sort.Slice(events, func(i, j int) bool {
return events[i].CreatedAt < events[j].CreatedAt
})
// Count the number of events processed
eventCount := 0
// Now process each event in chronological order
for _, ev := range events {
// Save the event to the database
if _, err = db.SaveEvent(ctx, ev); err != nil {
db.Close()
cancel()
os.RemoveAll(tempDir)
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
}
eventCount++
}
t.Logf("Successfully saved %d events to dgraph database", eventCount)
return db, events, ctx, cancel, tempDir
}
// cleanupTestDB cleans up the test database
func cleanupTestDB(t *testing.T, db *D, cancel context.CancelFunc, tempDir string) {
if db != nil {
db.Close()
}
if cancel != nil {
cancel()
}
if tempDir != "" {
os.RemoveAll(tempDir)
}
}

44
pkg/dgraph/identity.go Normal file
View File

@@ -0,0 +1,44 @@
package dgraph
import (
"fmt"
"next.orly.dev/pkg/crypto/keys"
)
// Relay identity methods
// We use the marker system to store the relay's private key
const relayIdentityMarkerKey = "relay_identity_secret"
// GetRelayIdentitySecret retrieves the relay's identity secret key
func (d *D) GetRelayIdentitySecret() (skb []byte, err error) {
return d.GetMarker(relayIdentityMarkerKey)
}
// SetRelayIdentitySecret sets the relay's identity secret key
func (d *D) SetRelayIdentitySecret(skb []byte) error {
return d.SetMarker(relayIdentityMarkerKey, skb)
}
// GetOrCreateRelayIdentitySecret retrieves or creates the relay identity
func (d *D) GetOrCreateRelayIdentitySecret() (skb []byte, err error) {
skb, err = d.GetRelayIdentitySecret()
if err == nil {
return skb, nil
}
// Generate new identity
skb, err = keys.GenerateSecretKey()
if err != nil {
return nil, fmt.Errorf("failed to generate identity: %w", err)
}
// Store it
if err = d.SetRelayIdentitySecret(skb); err != nil {
return nil, fmt.Errorf("failed to store identity: %w", err)
}
d.Logger.Infof("generated new relay identity")
return skb, nil
}

Some files were not shown because too many files have changed in this diff Show More