Migrate internal module imports to unified package path.
Replaced legacy `*.orly` module imports with `next.orly.dev/pkg` paths across the codebase for consistency. Removed legacy `go.mod` files from sub-packages, consolidating dependency management. Added Dockerfiles and configurations for benchmarking environments.
This commit is contained in:
46
cmd/benchmark/Dockerfile.benchmark
Normal file
46
cmd/benchmark/Dockerfile.benchmark
Normal file
@@ -0,0 +1,46 @@
|
||||
# Dockerfile for benchmark runner
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the benchmark tool
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o benchmark cmd/benchmark/main.go
|
||||
|
||||
# Final stage
|
||||
FROM alpine:latest
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk --no-cache add ca-certificates curl wget
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy benchmark binary
|
||||
COPY --from=builder /build/benchmark /app/benchmark
|
||||
|
||||
# Copy benchmark runner script
|
||||
COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /app/benchmark-runner
|
||||
|
||||
# Create reports directory
|
||||
RUN mkdir -p /reports
|
||||
|
||||
# Environment variables
|
||||
ENV BENCHMARK_EVENTS=10000
|
||||
ENV BENCHMARK_WORKERS=8
|
||||
ENV BENCHMARK_DURATION=60s
|
||||
|
||||
# Run the benchmark runner
|
||||
CMD ["/app/benchmark-runner"]
|
||||
23
cmd/benchmark/Dockerfile.khatru-badger
Normal file
23
cmd/benchmark/Dockerfile.khatru-badger
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-badger example
|
||||
RUN cd examples/basic-badger && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=0 go build -o khatru-badger .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/badger
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/khatru-badger"]
|
||||
23
cmd/benchmark/Dockerfile.khatru-sqlite
Normal file
23
cmd/benchmark/Dockerfile.khatru-sqlite
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-sqlite example
|
||||
RUN cd examples/basic-sqlite3 && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o khatru-sqlite .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-sqlite3/khatru-sqlite /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/khatru.db
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/khatru-sqlite"]
|
||||
80
cmd/benchmark/Dockerfile.next-orly
Normal file
80
cmd/benchmark/Dockerfile.next-orly
Normal file
@@ -0,0 +1,80 @@
|
||||
# Dockerfile for next.orly.dev relay
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
# Set environment variables
|
||||
ARG GOLANG_VERSION=1.25.1
|
||||
|
||||
# Update package list and install dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y wget ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Download Go binary
|
||||
RUN wget https://go.dev/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz && \
|
||||
rm -rf /usr/local/go && \
|
||||
tar -C /usr/local -xzf go${GOLANG_VERSION}.linux-amd64.tar.gz && \
|
||||
rm go${GOLANG_VERSION}.linux-amd64.tar.gz
|
||||
|
||||
# Set PATH environment variable
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
# Verify installation
|
||||
RUN go version
|
||||
|
||||
RUN apt update && \
|
||||
apt -y install build-essential autoconf libtool git wget
|
||||
RUN cd /tmp && \
|
||||
rm -rf secp256k1 && \
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git && \
|
||||
cd secp256k1 && \
|
||||
git checkout v0.6.0 && \
|
||||
git submodule init && \
|
||||
git submodule update && \
|
||||
./autogen.sh && \
|
||||
./configure --enable-module-schnorrsig --enable-module-ecdh --prefix=/usr && \
|
||||
make -j1 && \
|
||||
make install
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -o relay .
|
||||
|
||||
# Final stage
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y ca-certificates curl libsecp256k1-0 libsecp256k1-dev && rm -rf /var/lib/apt/lists/* && \
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/libsecp256k1.so.0 /usr/lib/x86_64-linux-gnu/libsecp256k1.so.5
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /build/relay /app/relay
|
||||
|
||||
# Create data directory
|
||||
RUN mkdir -p /data
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8080
|
||||
|
||||
# Set environment variables
|
||||
ENV DATA_DIR=/data
|
||||
ENV LISTEN=0.0.0.0
|
||||
ENV PORT=8080
|
||||
ENV LOG_LEVEL=info
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8080 || exit 1
|
||||
|
||||
# Run the relay
|
||||
CMD ["/app/relay"]
|
||||
33
cmd/benchmark/Dockerfile.nostr-rs-relay
Normal file
33
cmd/benchmark/Dockerfile.nostr-rs-relay
Normal file
@@ -0,0 +1,33 @@
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
build-essential \
|
||||
libsqlite3-dev \
|
||||
pkg-config \
|
||||
protobuf-compiler \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN cargo build --release
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y ca-certificates sqlite3 wget && rm -rf /var/lib/apt/lists/*
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/target/release/nostr-rs-relay /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV RUST_LOG=info
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/nostr-rs-relay"]
|
||||
23
cmd/benchmark/Dockerfile.relayer-basic
Normal file
23
cmd/benchmark/Dockerfile.relayer-basic
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic example
|
||||
RUN cd examples/basic && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o relayer-basic .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic/relayer-basic /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/relayer.db
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/relayer-basic"]
|
||||
54
cmd/benchmark/Dockerfile.strfry
Normal file
54
cmd/benchmark/Dockerfile.strfry
Normal file
@@ -0,0 +1,54 @@
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y git g++ make libssl-dev zlib1g-dev liblmdb-dev libflatbuffers-dev libsecp256k1-dev libzstd-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Initialize git submodules
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
# Build strfry
|
||||
RUN make setup-golpe && \
|
||||
make -j$(nproc)
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y \
|
||||
liblmdb0 \
|
||||
libsecp256k1-0 \
|
||||
curl \
|
||||
bash \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/strfry /app/
|
||||
COPY --from=builder /build/strfry.conf /app/
|
||||
|
||||
# Create the data directory placeholder (may be masked by volume at runtime)
|
||||
RUN mkdir -p /data && \
|
||||
chmod 755 /data
|
||||
|
||||
# Update strfry.conf to bind to all interfaces and use port 8080
|
||||
RUN sed -i 's/bind = "127.0.0.1"/bind = "0.0.0.0"/' /app/strfry.conf && \
|
||||
sed -i 's/port = 7777/port = 8080/' /app/strfry.conf
|
||||
|
||||
# Entrypoint ensures the LMDB directory exists inside the mounted volume before starting
|
||||
ENV STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
RUN echo '#!/usr/bin/env bash' > /entrypoint.sh && \
|
||||
echo 'set -euo pipefail' >> /entrypoint.sh && \
|
||||
echo 'DB_PATH="${STRFRY_DB_PATH:-/data/strfry.lmdb}"' >> /entrypoint.sh && \
|
||||
echo 'mkdir -p "$DB_PATH"' >> /entrypoint.sh && \
|
||||
echo 'chown -R root:root "$(dirname "$DB_PATH")"' >> /entrypoint.sh && \
|
||||
echo 'exec /app/strfry relay' >> /entrypoint.sh && \
|
||||
chmod +x /entrypoint.sh
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8080 || exit 1
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
260
cmd/benchmark/README.md
Normal file
260
cmd/benchmark/README.md
Normal file
@@ -0,0 +1,260 @@
|
||||
# Nostr Relay Benchmark Suite
|
||||
|
||||
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including:
|
||||
|
||||
- **next.orly.dev** (this repository) - BadgerDB-based relay
|
||||
- **Khatru** - SQLite and Badger variants
|
||||
- **Relayer** - Basic example implementation
|
||||
- **Strfry** - C++ LMDB-based relay
|
||||
- **nostr-rs-relay** - Rust-based relay with SQLite
|
||||
|
||||
## Features
|
||||
|
||||
### Benchmark Tests
|
||||
|
||||
1. **Peak Throughput Test**
|
||||
- Tests maximum event ingestion rate
|
||||
- Concurrent workers pushing events as fast as possible
|
||||
- Measures events/second, latency distribution, success rate
|
||||
|
||||
2. **Burst Pattern Test**
|
||||
- Simulates real-world traffic patterns
|
||||
- Alternating high-activity bursts and quiet periods
|
||||
- Tests relay behavior under varying loads
|
||||
|
||||
3. **Mixed Read/Write Test**
|
||||
- Concurrent read and write operations
|
||||
- Tests query performance while events are being ingested
|
||||
- Measures combined throughput and latency
|
||||
|
||||
### Performance Metrics
|
||||
|
||||
- **Throughput**: Events processed per second
|
||||
- **Latency**: Average, P95, and P99 response times
|
||||
- **Success Rate**: Percentage of successful operations
|
||||
- **Memory Usage**: Peak memory consumption during tests
|
||||
- **Error Analysis**: Detailed error reporting and categorization
|
||||
|
||||
### Reporting
|
||||
|
||||
- Individual relay reports with detailed metrics
|
||||
- Aggregate comparison report across all relays
|
||||
- Comparison tables for easy performance analysis
|
||||
- Timestamped results for tracking improvements over time
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Setup External Relays
|
||||
|
||||
Run the setup script to download and configure all external relay repositories:
|
||||
|
||||
```bash
|
||||
cd cmd/benchmark
|
||||
./setup-external-relays.sh
|
||||
```
|
||||
|
||||
This will:
|
||||
- Clone all external relay repositories
|
||||
- Create Docker configurations for each relay
|
||||
- Set up configuration files
|
||||
- Create data and report directories
|
||||
|
||||
### 2. Run Benchmarks
|
||||
|
||||
Start all relays and run the benchmark suite:
|
||||
|
||||
```bash
|
||||
docker compose up --build
|
||||
```
|
||||
|
||||
The system will:
|
||||
- Build and start all relay containers
|
||||
- Wait for all relays to become healthy
|
||||
- Run benchmarks against each relay sequentially
|
||||
- Generate individual and aggregate reports
|
||||
|
||||
### 3. View Results
|
||||
|
||||
Results are stored in the `reports/` directory with timestamps:
|
||||
|
||||
```bash
|
||||
# View the aggregate report
|
||||
cat reports/run_YYYYMMDD_HHMMSS/aggregate_report.txt
|
||||
|
||||
# View individual relay results
|
||||
ls reports/run_YYYYMMDD_HHMMSS/
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Docker Compose Services
|
||||
|
||||
| Service | Port | Description |
|
||||
|---------|------|-------------|
|
||||
| next-orly | 8001 | This repository's BadgerDB relay |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
| relayer-basic | 8004 | Basic relayer example |
|
||||
| strfry | 8005 | Strfry C++ LMDB relay |
|
||||
| nostr-rs-relay | 8006 | Rust SQLite relay |
|
||||
| benchmark-runner | - | Orchestrates tests and aggregates results |
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
cmd/benchmark/
|
||||
├── main.go # Benchmark tool implementation
|
||||
├── docker-compose.yml # Service orchestration
|
||||
├── setup-external-relays.sh # Repository setup script
|
||||
├── benchmark-runner.sh # Test orchestration script
|
||||
├── Dockerfile.next-orly # This repo's relay container
|
||||
├── Dockerfile.benchmark # Benchmark runner container
|
||||
├── Dockerfile.khatru-sqlite # Khatru SQLite variant
|
||||
├── Dockerfile.khatru-badger # Khatru Badger variant
|
||||
├── Dockerfile.relayer-basic # Relayer basic example
|
||||
├── Dockerfile.strfry # Strfry relay
|
||||
├── Dockerfile.nostr-rs-relay # Rust relay
|
||||
├── configs/
|
||||
│ ├── strfry.conf # Strfry configuration
|
||||
│ └── config.toml # nostr-rs-relay configuration
|
||||
├── external/ # External relay repositories
|
||||
├── data/ # Persistent data for each relay
|
||||
└── reports/ # Benchmark results
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The benchmark can be configured via environment variables in `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- BENCHMARK_EVENTS=10000 # Number of events per test
|
||||
- BENCHMARK_WORKERS=8 # Concurrent workers
|
||||
- BENCHMARK_DURATION=60s # Test duration
|
||||
- BENCHMARK_TARGETS=... # Relay endpoints to test
|
||||
```
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
1. **Modify test parameters**: Edit environment variables in `docker-compose.yml`
|
||||
2. **Add new relays**:
|
||||
- Add service to `docker-compose.yml`
|
||||
- Create appropriate Dockerfile
|
||||
- Update `BENCHMARK_TARGETS` environment variable
|
||||
3. **Adjust relay configs**: Edit files in `configs/` directory
|
||||
|
||||
## Manual Usage
|
||||
|
||||
### Run Individual Relay
|
||||
|
||||
```bash
|
||||
# Build and run a specific relay
|
||||
docker-compose up next-orly
|
||||
|
||||
# Run benchmark against specific endpoint
|
||||
./benchmark -datadir=/tmp/test -events=1000 -workers=4
|
||||
```
|
||||
|
||||
### Run Benchmark Tool Directly
|
||||
|
||||
```bash
|
||||
# Build the benchmark tool
|
||||
go build -o benchmark main.go
|
||||
|
||||
# Run with custom parameters
|
||||
./benchmark \
|
||||
-datadir=/tmp/benchmark_db \
|
||||
-events=5000 \
|
||||
-workers=4 \
|
||||
-duration=30s
|
||||
```
|
||||
|
||||
## Benchmark Results Interpretation
|
||||
|
||||
### Peak Throughput Test
|
||||
- **High events/sec**: Good write performance
|
||||
- **Low latency**: Efficient event processing
|
||||
- **High success rate**: Stable under load
|
||||
|
||||
### Burst Pattern Test
|
||||
- **Consistent performance**: Good handling of variable loads
|
||||
- **Low P95/P99 latency**: Predictable response times
|
||||
- **No errors during bursts**: Robust queuing/buffering
|
||||
|
||||
### Mixed Read/Write Test
|
||||
- **Balanced throughput**: Good concurrent operation handling
|
||||
- **Low read latency**: Efficient query processing
|
||||
- **Stable write performance**: Queries don't significantly impact writes
|
||||
|
||||
## Development
|
||||
|
||||
### Adding New Tests
|
||||
|
||||
1. Extend the `Benchmark` struct in `main.go`
|
||||
2. Add new test method following existing patterns
|
||||
3. Update `main()` function to call new test
|
||||
4. Update result aggregation in `benchmark-runner.sh`
|
||||
|
||||
### Modifying Relay Configurations
|
||||
|
||||
Each relay's Dockerfile and configuration can be customized:
|
||||
- **Resource limits**: Adjust memory/CPU limits in docker-compose.yml
|
||||
- **Database settings**: Modify configuration files in `configs/`
|
||||
- **Network settings**: Update port mappings and health checks
|
||||
|
||||
### Debugging
|
||||
|
||||
```bash
|
||||
# View logs for specific relay
|
||||
docker-compose logs next-orly
|
||||
|
||||
# Run benchmark with debug output
|
||||
docker-compose up --build benchmark-runner
|
||||
|
||||
# Check individual container health
|
||||
docker-compose ps
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Relay fails to start**: Check logs with `docker-compose logs <service>`
|
||||
2. **Connection refused**: Ensure relay health checks are passing
|
||||
3. **Build failures**: Verify external repositories were cloned correctly
|
||||
4. **Permission errors**: Ensure setup script is executable
|
||||
|
||||
### Performance Issues
|
||||
|
||||
- **Low throughput**: Check resource limits and concurrent worker count
|
||||
- **High memory usage**: Monitor container resource consumption
|
||||
- **Network bottlenecks**: Test on different host configurations
|
||||
|
||||
### Reset Environment
|
||||
|
||||
```bash
|
||||
# Clean up everything
|
||||
docker-compose down -v
|
||||
docker system prune -f
|
||||
rm -rf external/ data/ reports/
|
||||
|
||||
# Start fresh
|
||||
./setup-external-relays.sh
|
||||
docker-compose up --build
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
To add support for new relay implementations:
|
||||
|
||||
1. Create appropriate Dockerfile following existing patterns
|
||||
2. Add service definition to `docker-compose.yml`
|
||||
3. Update `BENCHMARK_TARGETS` environment variable
|
||||
4. Test the new relay integration
|
||||
5. Update documentation
|
||||
|
||||
## License
|
||||
|
||||
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms.
|
||||
265
cmd/benchmark/benchmark-runner.sh
Normal file
265
cmd/benchmark/benchmark-runner.sh
Normal file
@@ -0,0 +1,265 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Benchmark runner script for testing multiple Nostr relay implementations
|
||||
# This script coordinates testing all relays and aggregates results
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration from environment variables
|
||||
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-10000}"
|
||||
BENCHMARK_WORKERS="${BENCHMARK_WORKERS:-8}"
|
||||
BENCHMARK_DURATION="${BENCHMARK_DURATION:-60s}"
|
||||
BENCHMARK_TARGETS="${BENCHMARK_TARGETS:-next-orly:8001,khatru-sqlite:8002,khatru-badger:8003,relayer-basic:8004,strfry:8005,nostr-rs-relay:8006}"
|
||||
OUTPUT_DIR="${OUTPUT_DIR:-/reports}"
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
# Generate timestamp for this benchmark run
|
||||
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||
RUN_DIR="${OUTPUT_DIR}/run_${TIMESTAMP}"
|
||||
mkdir -p "${RUN_DIR}"
|
||||
|
||||
echo "=================================================="
|
||||
echo "Nostr Relay Benchmark Suite"
|
||||
echo "=================================================="
|
||||
echo "Timestamp: $(date)"
|
||||
echo "Events per test: ${BENCHMARK_EVENTS}"
|
||||
echo "Concurrent workers: ${BENCHMARK_WORKERS}"
|
||||
echo "Test duration: ${BENCHMARK_DURATION}"
|
||||
echo "Output directory: ${RUN_DIR}"
|
||||
echo "=================================================="
|
||||
|
||||
# Function to wait for relay to be ready
|
||||
wait_for_relay() {
|
||||
local name="$1"
|
||||
local url="$2"
|
||||
local max_attempts=60
|
||||
local attempt=0
|
||||
|
||||
echo "Waiting for ${name} to be ready at ${url}..."
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
if wget --quiet --tries=1 --spider --timeout=5 "http://${url}" 2>/dev/null || \
|
||||
curl -f --connect-timeout 5 --max-time 5 "http://${url}" >/dev/null 2>&1; then
|
||||
echo "${name} is ready!"
|
||||
return 0
|
||||
fi
|
||||
|
||||
attempt=$((attempt + 1))
|
||||
echo " Attempt ${attempt}/${max_attempts}: ${name} not ready yet..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "ERROR: ${name} failed to become ready after ${max_attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to run benchmark against a specific relay
|
||||
run_benchmark() {
|
||||
local relay_name="$1"
|
||||
local relay_url="$2"
|
||||
local output_file="$3"
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Testing ${relay_name} at ws://${relay_url}"
|
||||
echo "=================================================="
|
||||
|
||||
# Wait for relay to be ready
|
||||
if ! wait_for_relay "${relay_name}" "${relay_url}"; then
|
||||
echo "ERROR: ${relay_name} is not responding, skipping..."
|
||||
echo "RELAY: ${relay_name}" > "${output_file}"
|
||||
echo "STATUS: FAILED - Relay not responding" >> "${output_file}"
|
||||
echo "ERROR: Connection failed" >> "${output_file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Run the benchmark
|
||||
echo "Running benchmark against ${relay_name}..."
|
||||
|
||||
# Create temporary directory for this relay's data
|
||||
TEMP_DATA_DIR="/tmp/benchmark_${relay_name}_$$"
|
||||
mkdir -p "${TEMP_DATA_DIR}"
|
||||
|
||||
# Run benchmark and capture both stdout and stderr
|
||||
if /app/benchmark \
|
||||
-datadir="${TEMP_DATA_DIR}" \
|
||||
-events="${BENCHMARK_EVENTS}" \
|
||||
-workers="${BENCHMARK_WORKERS}" \
|
||||
-duration="${BENCHMARK_DURATION}" \
|
||||
> "${output_file}" 2>&1; then
|
||||
|
||||
echo "✓ Benchmark completed successfully for ${relay_name}"
|
||||
|
||||
# Add relay identification to the report
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
echo "BENCHMARK_CONFIG:" >> "${output_file}"
|
||||
echo " Events: ${BENCHMARK_EVENTS}" >> "${output_file}"
|
||||
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||
echo " Duration: ${BENCHMARK_DURATION}" >> "${output_file}"
|
||||
|
||||
else
|
||||
echo "✗ Benchmark failed for ${relay_name}"
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "STATUS: FAILED" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
fi
|
||||
|
||||
# Clean up temporary data
|
||||
rm -rf "${TEMP_DATA_DIR}"
|
||||
}
|
||||
|
||||
# Function to generate aggregate report
|
||||
generate_aggregate_report() {
|
||||
local aggregate_file="${RUN_DIR}/aggregate_report.txt"
|
||||
|
||||
echo "Generating aggregate report..."
|
||||
|
||||
cat > "${aggregate_file}" << EOF
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: $(date -Iseconds)
|
||||
Benchmark Configuration:
|
||||
Events per test: ${BENCHMARK_EVENTS}
|
||||
Concurrent workers: ${BENCHMARK_WORKERS}
|
||||
Test duration: ${BENCHMARK_DURATION}
|
||||
|
||||
Relays tested: $(echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | wc -l)
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
EOF
|
||||
|
||||
# Process each relay's results
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
echo "Relay: ${relay_name}" >> "${aggregate_file}"
|
||||
echo "----------------------------------------" >> "${aggregate_file}"
|
||||
|
||||
if [ -f "${relay_file}" ]; then
|
||||
# Extract key metrics from the relay's report
|
||||
if grep -q "STATUS: FAILED" "${relay_file}"; then
|
||||
echo "Status: FAILED" >> "${aggregate_file}"
|
||||
grep "ERROR:" "${relay_file}" | head -1 >> "${aggregate_file}" || echo "Error: Unknown failure" >> "${aggregate_file}"
|
||||
else
|
||||
echo "Status: COMPLETED" >> "${aggregate_file}"
|
||||
|
||||
# Extract performance metrics
|
||||
grep "Events/sec:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Success Rate:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Avg Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "P95 Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Memory:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
fi
|
||||
else
|
||||
echo "Status: NO RESULTS FILE" >> "${aggregate_file}"
|
||||
echo "Error: Results file not found" >> "${aggregate_file}"
|
||||
fi
|
||||
|
||||
echo "" >> "${aggregate_file}"
|
||||
done
|
||||
|
||||
cat >> "${aggregate_file}" << EOF
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
$(ls "${RUN_DIR}"/*_results.txt 2>/dev/null | sed 's|^| - |' || echo " No individual reports found")
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
EOF
|
||||
|
||||
# Create a comparison table
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "Relay" "Status" "Peak Tput/s" "Avg Latency" "Success Rate" >> "${aggregate_file}"
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "----" "------" "-----------" "-----------" "------------" >> "${aggregate_file}"
|
||||
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
if [ -f "${relay_file}" ]; then
|
||||
if grep -q "STATUS: FAILED" "${relay_file}"; then
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "FAILED" "-" "-" "-" >> "${aggregate_file}"
|
||||
else
|
||||
# Extract metrics for the table
|
||||
peak_tput=$(grep "Events/sec:" "${relay_file}" | head -1 | awk '{print $2}' || echo "-")
|
||||
avg_latency=$(grep "Avg Latency:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-")
|
||||
success_rate=$(grep "Success Rate:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-")
|
||||
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "OK" "${peak_tput}" "${avg_latency}" "${success_rate}" >> "${aggregate_file}"
|
||||
fi
|
||||
else
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "NO DATA" "-" "-" "-" >> "${aggregate_file}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "" >> "${aggregate_file}"
|
||||
echo "================================================================" >> "${aggregate_file}"
|
||||
echo "End of Report" >> "${aggregate_file}"
|
||||
echo "================================================================" >> "${aggregate_file}"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
echo "Starting relay benchmark suite..."
|
||||
|
||||
# Parse targets and run benchmarks
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
echo "WARNING: Skipping invalid target: ${relay_name}:${relay_port}"
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_url="${relay_name}:${relay_port}"
|
||||
output_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
run_benchmark "${relay_name}" "${relay_url}" "${output_file}"
|
||||
|
||||
# Small delay between tests
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Generate aggregate report
|
||||
generate_aggregate_report
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Benchmark Suite Completed!"
|
||||
echo "=================================================="
|
||||
echo "Results directory: ${RUN_DIR}"
|
||||
echo "Aggregate report: ${RUN_DIR}/aggregate_report.txt"
|
||||
echo ""
|
||||
|
||||
# Display summary
|
||||
if [ -f "${RUN_DIR}/aggregate_report.txt" ]; then
|
||||
echo "Quick Summary:"
|
||||
echo "=============="
|
||||
grep -A 10 "BENCHMARK COMPARISON TABLE" "${RUN_DIR}/aggregate_report.txt" | tail -n +4
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "All benchmark files:"
|
||||
ls -la "${RUN_DIR}/"
|
||||
echo ""
|
||||
echo "Benchmark suite finished at: $(date)"
|
||||
36
cmd/benchmark/configs/config.toml
Normal file
36
cmd/benchmark/configs/config.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[info]
|
||||
relay_url = "ws://localhost:8080"
|
||||
name = "nostr-rs-relay benchmark"
|
||||
description = "A nostr-rs-relay for benchmarking"
|
||||
pubkey = ""
|
||||
contact = ""
|
||||
|
||||
[database]
|
||||
data_directory = "/data"
|
||||
in_memory = false
|
||||
engine = "sqlite"
|
||||
|
||||
[network]
|
||||
port = 8080
|
||||
address = "0.0.0.0"
|
||||
|
||||
[limits]
|
||||
messages_per_sec = 0
|
||||
subscriptions_per_min = 0
|
||||
max_event_bytes = 65535
|
||||
max_ws_message_bytes = 131072
|
||||
max_ws_frame_bytes = 131072
|
||||
|
||||
[authorization]
|
||||
pubkey_whitelist = []
|
||||
|
||||
[verified_users]
|
||||
mode = "passive"
|
||||
domain_whitelist = []
|
||||
domain_blacklist = []
|
||||
|
||||
[pay_to_relay]
|
||||
enabled = false
|
||||
|
||||
[options]
|
||||
reject_future_seconds = 30
|
||||
101
cmd/benchmark/configs/strfry.conf
Normal file
101
cmd/benchmark/configs/strfry.conf
Normal file
@@ -0,0 +1,101 @@
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "/data/strfry.lmdb"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required)
|
||||
mapsize = 1099511627776
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 8080
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 1000000
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "strfry benchmark"
|
||||
|
||||
# NIP-11: Detailed description of this server, free-form
|
||||
description = "A strfry relay for benchmarking"
|
||||
|
||||
# NIP-11: Administrative pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative contact for this server
|
||||
contact = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 20
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = ""
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = true
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# yesstr threads: experimental yesstr protocol (restart required)
|
||||
yesstr = 1
|
||||
}
|
||||
}
|
||||
183
cmd/benchmark/docker-compose.yml
Normal file
183
cmd/benchmark/docker-compose.yml
Normal file
@@ -0,0 +1,183 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# Next.orly.dev relay (this repository)
|
||||
next-orly:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly
|
||||
environment:
|
||||
- DATA_DIR=/data
|
||||
- LISTEN=0.0.0.0
|
||||
- PORT=8080
|
||||
- LOG_LEVEL=info
|
||||
volumes:
|
||||
- ./data/next-orly:/data
|
||||
ports:
|
||||
- "8001:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Khatru with SQLite
|
||||
khatru-sqlite:
|
||||
build:
|
||||
context: ./external/khatru
|
||||
dockerfile: ../../Dockerfile.khatru-sqlite
|
||||
container_name: benchmark-khatru-sqlite
|
||||
environment:
|
||||
- DATABASE_TYPE=sqlite
|
||||
- DATABASE_PATH=/data/khatru.db
|
||||
- PORT=8080
|
||||
volumes:
|
||||
- ./data/khatru-sqlite:/data
|
||||
ports:
|
||||
- "8002:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Khatru with Badger
|
||||
khatru-badger:
|
||||
build:
|
||||
context: ./external/khatru
|
||||
dockerfile: ../../Dockerfile.khatru-badger
|
||||
container_name: benchmark-khatru-badger
|
||||
environment:
|
||||
- DATABASE_TYPE=badger
|
||||
- DATABASE_PATH=/data/badger
|
||||
- PORT=8080
|
||||
volumes:
|
||||
- ./data/khatru-badger:/data
|
||||
ports:
|
||||
- "8003:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Relayer basic example
|
||||
relayer-basic:
|
||||
build:
|
||||
context: ./external/relayer
|
||||
dockerfile: ../../Dockerfile.relayer-basic
|
||||
container_name: benchmark-relayer-basic
|
||||
environment:
|
||||
- PORT=8080
|
||||
- DATABASE_PATH=/data/relayer.db
|
||||
volumes:
|
||||
- ./data/relayer-basic:/data
|
||||
ports:
|
||||
- "8004:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Strfry
|
||||
strfry:
|
||||
build:
|
||||
context: ./external/strfry
|
||||
dockerfile: ../../Dockerfile.strfry
|
||||
container_name: benchmark-strfry
|
||||
environment:
|
||||
- STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
- STRFRY_RELAY_PORT=8080
|
||||
volumes:
|
||||
- ./data/strfry:/data
|
||||
- ./configs/strfry.conf:/etc/strfry.conf
|
||||
ports:
|
||||
- "8005:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Nostr-rs-relay
|
||||
nostr-rs-relay:
|
||||
build:
|
||||
context: ./external/nostr-rs-relay
|
||||
dockerfile: ../../Dockerfile.nostr-rs-relay
|
||||
container_name: benchmark-nostr-rs-relay
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
volumes:
|
||||
- ./data/nostr-rs-relay:/data
|
||||
- ./configs/config.toml:/app/config.toml
|
||||
ports:
|
||||
- "8006:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Benchmark runner
|
||||
benchmark-runner:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.benchmark
|
||||
container_name: benchmark-runner
|
||||
depends_on:
|
||||
next-orly:
|
||||
condition: service_healthy
|
||||
khatru-sqlite:
|
||||
condition: service_healthy
|
||||
khatru-badger:
|
||||
condition: service_healthy
|
||||
relayer-basic:
|
||||
condition: service_healthy
|
||||
strfry:
|
||||
condition: service_healthy
|
||||
nostr-rs-relay:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- BENCHMARK_TARGETS=next-orly:8001,khatru-sqlite:8002,khatru-badger:8003,relayer-basic:8004,strfry:8005,nostr-rs-relay:8006
|
||||
- BENCHMARK_EVENTS=10000
|
||||
- BENCHMARK_WORKERS=8
|
||||
- BENCHMARK_DURATION=60s
|
||||
volumes:
|
||||
- ./reports:/reports
|
||||
networks:
|
||||
- benchmark-net
|
||||
command: >
|
||||
sh -c "
|
||||
echo 'Waiting for all relays to be ready...' &&
|
||||
sleep 30 &&
|
||||
echo 'Starting benchmark tests...' &&
|
||||
/app/benchmark-runner --output-dir=/reports
|
||||
"
|
||||
|
||||
networks:
|
||||
benchmark-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
benchmark-data:
|
||||
driver: local
|
||||
1
cmd/benchmark/external/khatru
vendored
Submodule
1
cmd/benchmark/external/khatru
vendored
Submodule
Submodule cmd/benchmark/external/khatru added at 668c41b988
573
cmd/benchmark/main.go
Normal file
573
cmd/benchmark/main.go
Normal file
@@ -0,0 +1,573 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
)
|
||||
|
||||
type BenchmarkConfig struct {
|
||||
DataDir string
|
||||
NumEvents int
|
||||
ConcurrentWorkers int
|
||||
TestDuration time.Duration
|
||||
BurstPattern bool
|
||||
ReportInterval time.Duration
|
||||
}
|
||||
|
||||
type BenchmarkResult struct {
|
||||
TestName string
|
||||
Duration time.Duration
|
||||
TotalEvents int
|
||||
EventsPerSecond float64
|
||||
AvgLatency time.Duration
|
||||
P95Latency time.Duration
|
||||
P99Latency time.Duration
|
||||
SuccessRate float64
|
||||
ConcurrentWorkers int
|
||||
MemoryUsed uint64
|
||||
Errors []string
|
||||
}
|
||||
|
||||
type Benchmark struct {
|
||||
config *BenchmarkConfig
|
||||
db *database.D
|
||||
results []*BenchmarkResult
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func main() {
|
||||
config := parseFlags()
|
||||
|
||||
fmt.Printf("Starting Nostr Relay Benchmark\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
fmt.Printf(
|
||||
"Events: %d, Workers: %d, Duration: %v\n",
|
||||
config.NumEvents, config.ConcurrentWorkers, config.TestDuration,
|
||||
)
|
||||
|
||||
benchmark := NewBenchmark(config)
|
||||
defer benchmark.Close()
|
||||
|
||||
// Run benchmark tests
|
||||
benchmark.RunPeakThroughputTest()
|
||||
benchmark.RunBurstPatternTest()
|
||||
benchmark.RunMixedReadWriteTest()
|
||||
|
||||
// Generate report
|
||||
benchmark.GenerateReport()
|
||||
}
|
||||
|
||||
func parseFlags() *BenchmarkConfig {
|
||||
config := &BenchmarkConfig{}
|
||||
|
||||
flag.StringVar(
|
||||
&config.DataDir, "datadir", "/tmp/benchmark_db", "Database directory",
|
||||
)
|
||||
flag.IntVar(
|
||||
&config.NumEvents, "events", 10000, "Number of events to generate",
|
||||
)
|
||||
flag.IntVar(
|
||||
&config.ConcurrentWorkers, "workers", runtime.NumCPU(),
|
||||
"Number of concurrent workers",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&config.TestDuration, "duration", 60*time.Second, "Test duration",
|
||||
)
|
||||
flag.BoolVar(
|
||||
&config.BurstPattern, "burst", true, "Enable burst pattern testing",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&config.ReportInterval, "report-interval", 10*time.Second,
|
||||
"Report interval",
|
||||
)
|
||||
|
||||
flag.Parse()
|
||||
return config
|
||||
}
|
||||
|
||||
func NewBenchmark(config *BenchmarkConfig) *Benchmark {
|
||||
// Clean up existing data directory
|
||||
os.RemoveAll(config.DataDir)
|
||||
|
||||
ctx := context.Background()
|
||||
cancel := func() {}
|
||||
|
||||
db, err := database.New(ctx, cancel, config.DataDir, "info")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
|
||||
return &Benchmark{
|
||||
config: config,
|
||||
db: db,
|
||||
results: make([]*BenchmarkResult, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Benchmark) Close() {
|
||||
if b.db != nil {
|
||||
b.db.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Benchmark) RunPeakThroughputTest() {
|
||||
fmt.Println("\n=== Peak Throughput Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
var totalEvents int64
|
||||
var errors []error
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
|
||||
events := b.generateEvents(b.config.NumEvents)
|
||||
eventChan := make(chan *event.E, len(events))
|
||||
|
||||
// Fill event channel
|
||||
for _, ev := range events {
|
||||
eventChan <- ev
|
||||
}
|
||||
close(eventChan)
|
||||
|
||||
// Start workers
|
||||
for i := 0; i < b.config.ConcurrentWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
ctx := context.Background()
|
||||
for ev := range eventChan {
|
||||
eventStart := time.Now()
|
||||
|
||||
_, _, err := b.db.SaveEvent(ctx, ev)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalEvents++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Peak Throughput",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalEvents),
|
||||
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
||||
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
result.AvgLatency = calculateAvgLatency(latencies)
|
||||
result.P95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||
result.P99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||
}
|
||||
|
||||
result.SuccessRate = float64(totalEvents) / float64(b.config.NumEvents) * 100
|
||||
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.results = append(b.results, result)
|
||||
b.mu.Unlock()
|
||||
|
||||
fmt.Printf(
|
||||
"Events saved: %d/%d (%.1f%%)\n", totalEvents, b.config.NumEvents,
|
||||
result.SuccessRate,
|
||||
)
|
||||
fmt.Printf("Duration: %v\n", duration)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Avg latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P95 latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 latency: %v\n", result.P99Latency)
|
||||
}
|
||||
|
||||
func (b *Benchmark) RunBurstPatternTest() {
|
||||
fmt.Println("\n=== Burst Pattern Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var totalEvents int64
|
||||
var errors []error
|
||||
var latencies []time.Duration
|
||||
var mu sync.Mutex
|
||||
|
||||
// Generate events for burst pattern
|
||||
events := b.generateEvents(b.config.NumEvents)
|
||||
|
||||
// Simulate burst pattern: high activity periods followed by quiet periods
|
||||
burstSize := b.config.NumEvents / 10 // 10% of events in each burst
|
||||
quietPeriod := 500 * time.Millisecond
|
||||
burstPeriod := 100 * time.Millisecond
|
||||
|
||||
ctx := context.Background()
|
||||
eventIndex := 0
|
||||
|
||||
for eventIndex < len(events) && time.Since(start) < b.config.TestDuration {
|
||||
// Burst period - send events rapidly
|
||||
burstStart := time.Now()
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < burstSize && eventIndex < len(events); i++ {
|
||||
wg.Add(1)
|
||||
go func(ev *event.E) {
|
||||
defer wg.Done()
|
||||
|
||||
eventStart := time.Now()
|
||||
_, _, err := b.db.SaveEvent(ctx, ev)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalEvents++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
}(events[eventIndex])
|
||||
|
||||
eventIndex++
|
||||
time.Sleep(burstPeriod / time.Duration(burstSize))
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
fmt.Printf(
|
||||
"Burst completed: %d events in %v\n", burstSize,
|
||||
time.Since(burstStart),
|
||||
)
|
||||
|
||||
// Quiet period
|
||||
time.Sleep(quietPeriod)
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Burst Pattern",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalEvents),
|
||||
EventsPerSecond: float64(totalEvents) / duration.Seconds(),
|
||||
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
if len(latencies) > 0 {
|
||||
result.AvgLatency = calculateAvgLatency(latencies)
|
||||
result.P95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||
result.P99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||
}
|
||||
|
||||
result.SuccessRate = float64(totalEvents) / float64(eventIndex) * 100
|
||||
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.results = append(b.results, result)
|
||||
b.mu.Unlock()
|
||||
|
||||
fmt.Printf("Burst test completed: %d events in %v\n", totalEvents, duration)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
}
|
||||
|
||||
func (b *Benchmark) RunMixedReadWriteTest() {
|
||||
fmt.Println("\n=== Mixed Read/Write Test ===")
|
||||
|
||||
start := time.Now()
|
||||
var totalWrites, totalReads int64
|
||||
var writeLatencies, readLatencies []time.Duration
|
||||
var errors []error
|
||||
var mu sync.Mutex
|
||||
|
||||
// Pre-populate with some events for reading
|
||||
seedEvents := b.generateEvents(1000)
|
||||
ctx := context.Background()
|
||||
|
||||
fmt.Println("Pre-populating database for read tests...")
|
||||
for _, ev := range seedEvents {
|
||||
b.db.SaveEvent(ctx, ev)
|
||||
}
|
||||
|
||||
events := b.generateEvents(b.config.NumEvents)
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Start mixed read/write workers
|
||||
for i := 0; i < b.config.ConcurrentWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
eventIndex := workerID
|
||||
for time.Since(start) < b.config.TestDuration && eventIndex < len(events) {
|
||||
// Alternate between write and read operations
|
||||
if eventIndex%2 == 0 {
|
||||
// Write operation
|
||||
writeStart := time.Now()
|
||||
_, _, err := b.db.SaveEvent(ctx, events[eventIndex])
|
||||
writeLatency := time.Since(writeStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalWrites++
|
||||
writeLatencies = append(writeLatencies, writeLatency)
|
||||
}
|
||||
mu.Unlock()
|
||||
} else {
|
||||
// Read operation
|
||||
readStart := time.Now()
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
limit := uint(10)
|
||||
f.Limit = &limit
|
||||
_, err := b.db.GetSerialsFromFilter(f)
|
||||
readLatency := time.Since(readStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
totalReads++
|
||||
readLatencies = append(readLatencies, readLatency)
|
||||
}
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
eventIndex += b.config.ConcurrentWorkers
|
||||
time.Sleep(10 * time.Millisecond) // Small delay between operations
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate metrics
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Mixed Read/Write",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalWrites + totalReads),
|
||||
EventsPerSecond: float64(totalWrites+totalReads) / duration.Seconds(),
|
||||
ConcurrentWorkers: b.config.ConcurrentWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
}
|
||||
|
||||
// Calculate combined latencies for overall metrics
|
||||
allLatencies := append(writeLatencies, readLatencies...)
|
||||
if len(allLatencies) > 0 {
|
||||
result.AvgLatency = calculateAvgLatency(allLatencies)
|
||||
result.P95Latency = calculatePercentileLatency(allLatencies, 0.95)
|
||||
result.P99Latency = calculatePercentileLatency(allLatencies, 0.99)
|
||||
}
|
||||
|
||||
result.SuccessRate = float64(totalWrites+totalReads) / float64(len(events)) * 100
|
||||
|
||||
for _, err := range errors {
|
||||
result.Errors = append(result.Errors, err.Error())
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
b.results = append(b.results, result)
|
||||
b.mu.Unlock()
|
||||
|
||||
fmt.Printf(
|
||||
"Mixed test completed: %d writes, %d reads in %v\n", totalWrites,
|
||||
totalReads, duration,
|
||||
)
|
||||
fmt.Printf("Combined ops/sec: %.2f\n", result.EventsPerSecond)
|
||||
}
|
||||
|
||||
func (b *Benchmark) generateEvents(count int) []*event.E {
|
||||
events := make([]*event.E, count)
|
||||
now := timestamp.Now()
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
ev := event.New()
|
||||
|
||||
// Generate random 32-byte ID
|
||||
ev.ID = make([]byte, 32)
|
||||
rand.Read(ev.ID)
|
||||
|
||||
// Generate random 32-byte pubkey
|
||||
ev.Pubkey = make([]byte, 32)
|
||||
rand.Read(ev.Pubkey)
|
||||
|
||||
ev.CreatedAt = now.I64()
|
||||
ev.Kind = kind.TextNote.K
|
||||
ev.Content = []byte(fmt.Sprintf(
|
||||
"This is test event number %d with some content", i,
|
||||
))
|
||||
|
||||
// Create tags using NewFromBytesSlice
|
||||
ev.Tags = tag.NewS(
|
||||
tag.NewFromBytesSlice([]byte("t"), []byte("benchmark")),
|
||||
tag.NewFromBytesSlice(
|
||||
[]byte("e"), []byte(fmt.Sprintf("ref_%d", i%50)),
|
||||
),
|
||||
)
|
||||
|
||||
events[i] = ev
|
||||
}
|
||||
|
||||
return events
|
||||
}
|
||||
|
||||
func (b *Benchmark) GenerateReport() {
|
||||
fmt.Println("\n" + strings.Repeat("=", 80))
|
||||
fmt.Println("BENCHMARK REPORT")
|
||||
fmt.Println(strings.Repeat("=", 80))
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
for _, result := range b.results {
|
||||
fmt.Printf("\nTest: %s\n", result.TestName)
|
||||
fmt.Printf("Duration: %v\n", result.Duration)
|
||||
fmt.Printf("Total Events: %d\n", result.TotalEvents)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Success Rate: %.1f%%\n", result.SuccessRate)
|
||||
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers)
|
||||
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024))
|
||||
fmt.Printf("Avg Latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P95 Latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 Latency: %v\n", result.P99Latency)
|
||||
|
||||
if len(result.Errors) > 0 {
|
||||
fmt.Printf("Errors (%d):\n", len(result.Errors))
|
||||
for i, err := range result.Errors {
|
||||
if i < 5 { // Show first 5 errors
|
||||
fmt.Printf(" - %s\n", err)
|
||||
}
|
||||
}
|
||||
if len(result.Errors) > 5 {
|
||||
fmt.Printf(" ... and %d more errors\n", len(result.Errors)-5)
|
||||
}
|
||||
}
|
||||
fmt.Println(strings.Repeat("-", 40))
|
||||
}
|
||||
|
||||
// Save report to file
|
||||
reportPath := filepath.Join(b.config.DataDir, "benchmark_report.txt")
|
||||
b.saveReportToFile(reportPath)
|
||||
fmt.Printf("\nReport saved to: %s\n", reportPath)
|
||||
}
|
||||
|
||||
func (b *Benchmark) saveReportToFile(path string) error {
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString("NOSTR RELAY BENCHMARK REPORT\n")
|
||||
file.WriteString("============================\n\n")
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Generated: %s\n", time.Now().Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
file.WriteString(fmt.Sprintf("Relay: next.orly.dev\n"))
|
||||
file.WriteString(fmt.Sprintf("Database: BadgerDB\n"))
|
||||
file.WriteString(fmt.Sprintf("Workers: %d\n", b.config.ConcurrentWorkers))
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Test Duration: %v\n\n", b.config.TestDuration,
|
||||
),
|
||||
)
|
||||
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
|
||||
for _, result := range b.results {
|
||||
file.WriteString(fmt.Sprintf("Test: %s\n", result.TestName))
|
||||
file.WriteString(fmt.Sprintf("Duration: %v\n", result.Duration))
|
||||
file.WriteString(fmt.Sprintf("Events: %d\n", result.TotalEvents))
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Events/sec: %.2f\n", result.EventsPerSecond,
|
||||
),
|
||||
)
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Success Rate: %.1f%%\n", result.SuccessRate,
|
||||
),
|
||||
)
|
||||
file.WriteString(fmt.Sprintf("Avg Latency: %v\n", result.AvgLatency))
|
||||
file.WriteString(fmt.Sprintf("P95 Latency: %v\n", result.P95Latency))
|
||||
file.WriteString(fmt.Sprintf("P99 Latency: %v\n", result.P99Latency))
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Memory: %d MB\n", result.MemoryUsed/(1024*1024),
|
||||
),
|
||||
)
|
||||
file.WriteString("\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func calculateAvgLatency(latencies []time.Duration) time.Duration {
|
||||
if len(latencies) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var total time.Duration
|
||||
for _, l := range latencies {
|
||||
total += l
|
||||
}
|
||||
return total / time.Duration(len(latencies))
|
||||
}
|
||||
|
||||
func calculatePercentileLatency(
|
||||
latencies []time.Duration, percentile float64,
|
||||
) time.Duration {
|
||||
if len(latencies) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Simple percentile calculation - in production would sort first
|
||||
index := int(float64(len(latencies)) * percentile)
|
||||
if index >= len(latencies) {
|
||||
index = len(latencies) - 1
|
||||
}
|
||||
return latencies[index]
|
||||
}
|
||||
|
||||
func getMemUsage() uint64 {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
return m.Alloc
|
||||
}
|
||||
368
cmd/benchmark/setup-external-relays.sh
Executable file
368
cmd/benchmark/setup-external-relays.sh
Executable file
@@ -0,0 +1,368 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Setup script for downloading and configuring external relay repositories
|
||||
# for benchmarking
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
EXTERNAL_DIR="${SCRIPT_DIR}/external"
|
||||
|
||||
echo "Setting up external relay repositories for benchmarking..."
|
||||
|
||||
# Create external directory
|
||||
mkdir -p "${EXTERNAL_DIR}"
|
||||
|
||||
# Function to clone or update repository
|
||||
clone_or_update() {
|
||||
local repo_url="$1"
|
||||
local repo_dir="$2"
|
||||
local repo_name="$3"
|
||||
|
||||
echo "Setting up ${repo_name}..."
|
||||
|
||||
if [ -d "${repo_dir}" ]; then
|
||||
echo " ${repo_name} already exists, updating..."
|
||||
cd "${repo_dir}"
|
||||
git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true
|
||||
cd - > /dev/null
|
||||
else
|
||||
echo " Cloning ${repo_name}..."
|
||||
git clone "${repo_url}" "${repo_dir}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clone khatru
|
||||
clone_or_update "https://github.com/fiatjaf/khatru.git" "${EXTERNAL_DIR}/khatru" "Khatru"
|
||||
|
||||
# Clone relayer
|
||||
clone_or_update "https://github.com/fiatjaf/relayer.git" "${EXTERNAL_DIR}/relayer" "Relayer"
|
||||
|
||||
# Clone strfry
|
||||
clone_or_update "https://github.com/hoytech/strfry.git" "${EXTERNAL_DIR}/strfry" "Strfry"
|
||||
|
||||
# Clone nostr-rs-relay
|
||||
clone_or_update "https://git.sr.ht/~gheartsfield/nostr-rs-relay" "${EXTERNAL_DIR}/nostr-rs-relay" "Nostr-rs-relay"
|
||||
|
||||
echo "Creating Dockerfiles for external relays..."
|
||||
|
||||
# Create Dockerfile for Khatru SQLite
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.khatru-sqlite" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-sqlite example
|
||||
RUN cd examples/basic-sqlite && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o khatru-sqlite .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-sqlite/khatru-sqlite /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/khatru.db
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/khatru-sqlite"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Khatru Badger
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.khatru-badger" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-badger example
|
||||
RUN cd examples/basic-badger && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=0 go build -o khatru-badger .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/badger
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/khatru-badger"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Relayer basic example
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.relayer-basic" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic example
|
||||
RUN cd examples/basic && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o relayer-basic .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic/relayer-basic /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/relayer.db
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/relayer-basic"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Strfry
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.strfry" << 'EOF'
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
build-essential \
|
||||
liblmdb-dev \
|
||||
libsecp256k1-dev \
|
||||
pkg-config \
|
||||
libtool \
|
||||
autoconf \
|
||||
automake \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build strfry
|
||||
RUN make setup-golpe && \
|
||||
make -j$(nproc)
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y \
|
||||
liblmdb0 \
|
||||
libsecp256k1-0 \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/strfry /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
ENV STRFRY_RELAY_PORT=8080
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/strfry", "relay"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for nostr-rs-relay
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.nostr-rs-relay" << 'EOF'
|
||||
FROM rust:1.70-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache musl-dev sqlite-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN cargo build --release
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/target/release/nostr-rs-relay /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV RUST_LOG=info
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/nostr-rs-relay"]
|
||||
EOF
|
||||
|
||||
echo "Creating configuration files..."
|
||||
|
||||
# Create configs directory
|
||||
mkdir -p "${SCRIPT_DIR}/configs"
|
||||
|
||||
# Create strfry configuration
|
||||
cat > "${SCRIPT_DIR}/configs/strfry.conf" << 'EOF'
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "/data/strfry.lmdb"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required)
|
||||
mapsize = 1099511627776
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 8080
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 1000000
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "strfry benchmark"
|
||||
|
||||
# NIP-11: Detailed description of this server, free-form
|
||||
description = "A strfry relay for benchmarking"
|
||||
|
||||
# NIP-11: Administrative pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative contact for this server
|
||||
contact = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 20
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = ""
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = true
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# yesstr threads: experimental yesstr protocol (restart required)
|
||||
yesstr = 1
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create nostr-rs-relay configuration
|
||||
cat > "${SCRIPT_DIR}/configs/config.toml" << 'EOF'
|
||||
[info]
|
||||
relay_url = "ws://localhost:8080"
|
||||
name = "nostr-rs-relay benchmark"
|
||||
description = "A nostr-rs-relay for benchmarking"
|
||||
pubkey = ""
|
||||
contact = ""
|
||||
|
||||
[database]
|
||||
data_directory = "/data"
|
||||
in_memory = false
|
||||
engine = "sqlite"
|
||||
|
||||
[network]
|
||||
port = 8080
|
||||
address = "0.0.0.0"
|
||||
|
||||
[limits]
|
||||
messages_per_sec = 0
|
||||
subscriptions_per_min = 0
|
||||
max_event_bytes = 65535
|
||||
max_ws_message_bytes = 131072
|
||||
max_ws_frame_bytes = 131072
|
||||
|
||||
[authorization]
|
||||
pubkey_whitelist = []
|
||||
|
||||
[verified_users]
|
||||
mode = "passive"
|
||||
domain_whitelist = []
|
||||
domain_blacklist = []
|
||||
|
||||
[pay_to_relay]
|
||||
enabled = false
|
||||
|
||||
[options]
|
||||
reject_future_seconds = 30
|
||||
EOF
|
||||
|
||||
echo "Creating data directories..."
|
||||
mkdir -p "${SCRIPT_DIR}/data"/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay}
|
||||
mkdir -p "${SCRIPT_DIR}/reports"
|
||||
|
||||
echo "Setup complete!"
|
||||
echo ""
|
||||
echo "External relay repositories have been cloned to: ${EXTERNAL_DIR}"
|
||||
echo "Dockerfiles have been created for all relay implementations"
|
||||
echo "Configuration files have been created in: ${SCRIPT_DIR}/configs"
|
||||
echo "Data directories have been created in: ${SCRIPT_DIR}/data"
|
||||
echo ""
|
||||
echo "To run the benchmark:"
|
||||
echo " cd ${SCRIPT_DIR}"
|
||||
echo " docker-compose up --build"
|
||||
echo ""
|
||||
echo "Reports will be generated in: ${SCRIPT_DIR}/reports"
|
||||
Reference in New Issue
Block a user