Compare commits
120 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
dda39de5a5
|
|||
|
6fc3e9a049
|
|||
|
ffcd0bdcc0
|
|||
|
3525dd2b6c
|
|||
|
66be769f7a
|
|||
| 1794a881a2 | |||
|
a2cce3f38b
|
|||
|
04d789b23b
|
|||
|
2148c597aa
|
|||
|
f8c30e2213
|
|||
|
2ef76884bd
|
|||
|
a4355f4963
|
|||
|
8fa3e2ad80
|
|||
| 0807ce3672 | |||
| d4f7c0b07f | |||
| 463bce47b0 | |||
|
|
289f962420 | ||
|
619198d1b5
|
|||
|
e94d68c3b2
|
|||
|
bb8f070992
|
|||
|
b6670d952d
|
|||
|
d2d2ea3fa0
|
|||
|
7d4f90f0de
|
|||
|
667890561a
|
|||
|
85fe316fdb
|
|||
|
1535f10343
|
|||
|
dd80cc767d
|
|||
|
423270402b
|
|||
|
e929c09476
|
|||
|
429c8acaef
|
|||
|
f3f933675e
|
|||
|
b761a04422
|
|||
|
8d61b8e44c
|
|||
|
19e265bf39
|
|||
|
c41bcb2652
|
|||
|
a4dd177eb5
|
|||
|
9020bb8164
|
|||
| 3fe4537cd9 | |||
|
7ec8698b62
|
|||
|
2514f875e6
|
|||
|
a6350c8e80
|
|||
|
6c3d22cb38
|
|||
|
8adb129fbe
|
|||
|
fd698af1ca
|
|||
|
ac4fd506e5
|
|||
|
8898b20d4b
|
|||
|
b351d0fb78
|
|||
|
9c8ff2976d
|
|||
|
a7dd958585
|
|||
|
8eb5b839b0
|
|||
|
e57169eeae
|
|||
|
109326dfa3
|
|||
| 52911354a7 | |||
|
b74f4757e7
|
|||
| 2d0ebfe032 | |||
| fff61ceca1 | |||
| b7b7dc7353 | |||
| 996fb3aeb7 | |||
| b9a713d81d | |||
|
1e6ce84e26
|
|||
|
0361f3843a
|
|||
|
4317e8ba4a
|
|||
|
9094f36d6e
|
|||
|
9314467f55
|
|||
|
19e6520587
|
|||
|
9e59a6c315
|
|||
|
9449435c65
|
|||
|
df8e66d9a7
|
|||
|
96eab2270d
|
|||
|
c0bd7d8da3
|
|||
|
1ffb7afb01
|
|||
|
ffa9d85ba5
|
|||
|
1223b1b20e
|
|||
|
deb56664e2
|
|||
|
1641d18993
|
|||
|
eab5d236db
|
|||
|
f3e7188816
|
|||
|
39957c2ebf
|
|||
|
4528d44fc7
|
|||
|
7b19db5806
|
|||
|
14d4417aec
|
|||
|
bdda37732c
|
|||
|
0024611179
|
|||
|
699ba0554e
|
|||
|
c62d685fa4
|
|||
|
6935575654
|
|||
|
80043b46b3
|
|||
|
c68654dccc
|
|||
|
72c6d16739
|
|||
|
366d35ec28
|
|||
|
c36cec44c4
|
|||
|
c91a283520
|
|||
|
bb0693f455
|
|||
|
0d7943be89
|
|||
|
978d9b88cd
|
|||
|
bbfb9b7300
|
|||
|
5b06906673
|
|||
|
f5c3da9bc3
|
|||
|
c608e1075b
|
|||
|
5237fb1a1f
|
|||
|
6901950059
|
|||
|
251fc17933
|
|||
|
fdb9e18b03
|
|||
|
67552edf04
|
|||
|
f25b760d84
|
|||
|
bfa38822e0
|
|||
|
eac5e05e77
|
|||
|
b72f2dd51e
|
|||
|
cc32703be0
|
|||
|
994d26bb09
|
|||
|
ea2d833e66
|
|||
|
af04f89df8
|
|||
|
fab2f104ff
|
|||
|
06940efcec
|
|||
|
0ba36a3f67
|
|||
|
d4bee83992
|
|||
|
aabb536d13
|
|||
|
498073460c
|
|||
|
11d378bfc3
|
|||
|
9b7e8d28de
|
109
.github/workflows/go.yml
vendored
Normal file
109
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
# This workflow will build a golang project
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||
#
|
||||
# Release Process:
|
||||
# 1. Update the version in the pkg/version/version file (e.g. v1.2.3)
|
||||
# 2. Create and push a tag matching the version:
|
||||
# git tag v1.2.3
|
||||
# git push origin v1.2.3
|
||||
# 3. The workflow will automatically:
|
||||
# - Build binaries for multiple platforms (Linux, macOS, Windows)
|
||||
# - Create a GitHub release with the binaries
|
||||
# - Generate release notes
|
||||
|
||||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.25'
|
||||
|
||||
- name: Install libsecp256k1
|
||||
run: ./scripts/ubuntu_install_libsecp256k1.sh
|
||||
|
||||
- name: Build with cgo
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test with cgo
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Set CGO off
|
||||
run: echo "CGO_ENABLED=0" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test
|
||||
run: go test -v ./...
|
||||
|
||||
release:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.25'
|
||||
|
||||
- name: Install libsecp256k1
|
||||
run: ./scripts/ubuntu_install_libsecp256k1.sh
|
||||
|
||||
- name: Build Release Binaries
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
# Extract version from tag (e.g., v1.2.3 -> 1.2.3)
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "Building release binaries for version $VERSION"
|
||||
|
||||
# Create directory for binaries
|
||||
mkdir -p release-binaries
|
||||
|
||||
# Build for different platforms
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=amd64 CGO_ENABLED=1 go build --ldflags '-extldflags "-static"' -o release-binaries/orly-${VERSION}-linux-amd64 .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=arm64 CGO_ENABLED=1 go build --ldflags '-extldflags "-static"' -o release-binaries/orly-${VERSION}-linux-arm64 .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-darwin-amd64 .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=darwin GOARCH=arm64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-darwin-arm64 .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-windows-amd64.exe .
|
||||
|
||||
# Build cmd executables
|
||||
for cmd in lerproxy nauth nurl vainstr walletcli; do
|
||||
echo "Building $cmd"
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=amd64 CGO_ENABLED=1 go build --ldflags '-extldflags "-static"' -o release-binaries/${cmd}-${VERSION}-linux-amd64 ./cmd/${cmd}
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=arm64 CGO_ENABLED=1 go build --ldflags '-extldflags "-static"' -o release-binaries/${cmd}-${VERSION}-linux-arm64 ./cmd/${cmd}
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/${cmd}-${VERSION}-darwin-amd64 ./cmd/${cmd}
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=darwin GOARCH=arm64 CGO_ENABLED=0 go build -o release-binaries/${cmd}-${VERSION}-darwin-arm64 ./cmd/${cmd}
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/${cmd}-${VERSION}-windows-amd64.exe ./cmd/${cmd}
|
||||
done
|
||||
|
||||
# Create checksums
|
||||
cd release-binaries
|
||||
sha256sum * > SHA256SUMS.txt
|
||||
cd ..
|
||||
|
||||
- name: Create GitHub Release
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: release-binaries/*
|
||||
draft: false
|
||||
prerelease: false
|
||||
generate_release_notes: true
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -80,11 +80,13 @@ node_modules/**
|
||||
!*.nix
|
||||
!license
|
||||
!readme
|
||||
!*.ico
|
||||
!.idea/*
|
||||
!*.xml
|
||||
!.name
|
||||
!.gitignore
|
||||
!version
|
||||
!out.jsonl
|
||||
# ...even if they are in subdirectories
|
||||
!*/
|
||||
/blocklist.json
|
||||
@@ -102,3 +104,6 @@ pkg/database/testrealy
|
||||
/.idea/codeStyles/codeStyleConfig.xml
|
||||
/.idea/material_theme_project_new.xml
|
||||
/.idea/orly.iml
|
||||
/.idea/go.imports.xml
|
||||
/.idea/inspectionProfiles/Project_Default.xml
|
||||
/.idea/.name
|
||||
|
||||
232
cmd/benchmark/README.md
Normal file
232
cmd/benchmark/README.md
Normal file
@@ -0,0 +1,232 @@
|
||||
# Nostr Relay Benchmark Suite
|
||||
|
||||
A comprehensive performance benchmarking suite for Nostr relay implementations, featuring event publishing tests, query profiling, load simulation, and timing instrumentation.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multi-relay comparison benchmarks** - Compare Khatru, Strfry, Relayer, and Orly
|
||||
- **Publishing performance testing** - Measure event ingestion rates and bandwidth
|
||||
- **Query profiling** - Test various filter patterns and query speeds
|
||||
- **Load pattern simulation** - Constant, spike, burst, sine, and ramp patterns
|
||||
- **Timing instrumentation** - Track full event lifecycle and identify bottlenecks
|
||||
- **Concurrent stress testing** - Multiple publishers with connection pooling
|
||||
- **Production-grade event generation** - Proper secp256k1 signatures and UTF-8 content
|
||||
- **Comparative reporting** - Markdown, JSON, and CSV format reports
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Build the benchmark tool
|
||||
cd cmd/benchmark
|
||||
CGO_LDFLAGS="-L/usr/local/lib" PKG_CONFIG_PATH="/usr/local/lib/pkgconfig" go build -o benchmark .
|
||||
|
||||
# Run simple benchmark
|
||||
./benchmark --relay ws://localhost:7447 --events 1000 --queries 50
|
||||
|
||||
# Run full comparison benchmark
|
||||
./setup_relays.sh # Setup all relay implementations
|
||||
./run_all_benchmarks.sh # Run benchmarks on all relays
|
||||
```
|
||||
|
||||
## Latest Benchmark Results
|
||||
|
||||
| Relay | Publishing (events/sec) | Querying (queries/sec) | Backend |
|
||||
|-------|------------------------|------------------------|---------|
|
||||
| **Khatru** | 9,570 | 4.77 | SQLite |
|
||||
| **Strfry** | 1,338 | 266.16 | LMDB |
|
||||
| **Relayer** | 1,122 | 623.36 | PostgreSQL |
|
||||
| **Orly** | 668 | 4.92 | Badger |
|
||||
|
||||
See [RELAY_COMPARISON_RESULTS.md](RELAY_COMPARISON_RESULTS.md) for detailed analysis.
|
||||
|
||||
## Core Benchmarking
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# Run a full benchmark (publish and query)
|
||||
./benchmark --relay ws://localhost:7447 --events 10000 --queries 100
|
||||
|
||||
# Benchmark only publishing
|
||||
./benchmark --relay ws://localhost:7447 --events 50000 --concurrency 20 --skip-query
|
||||
|
||||
# Benchmark only querying
|
||||
./benchmark --relay ws://localhost:7447 --queries 500 --skip-publish
|
||||
|
||||
# Use custom event sizes
|
||||
./benchmark --relay ws://localhost:7447 --events 10000 --size 2048
|
||||
```
|
||||
|
||||
### Advanced Features
|
||||
|
||||
```bash
|
||||
# Query profiling with subscription testing
|
||||
./benchmark --profile --profile-subs --sub-count 100 --sub-duration 30s
|
||||
|
||||
# Load pattern simulation
|
||||
./benchmark --load --load-pattern spike --load-duration 60s --load-base 50 --load-peak 200
|
||||
|
||||
# Full load test suite
|
||||
./benchmark --load-suite --load-constraints
|
||||
|
||||
# Timing instrumentation
|
||||
./benchmark --timing --timing-events 100 --timing-subs --timing-duration 10s
|
||||
|
||||
# Generate comparative reports
|
||||
./benchmark --report --report-format markdown --report-title "Production Benchmark"
|
||||
```
|
||||
|
||||
## Command Line Options
|
||||
|
||||
### Basic Options
|
||||
- `--relay`: Relay URL to benchmark (default: ws://localhost:7447)
|
||||
- `--events`: Number of events to publish (default: 10000)
|
||||
- `--size`: Average size of event content in bytes (default: 1024)
|
||||
- `--concurrency`: Number of concurrent publishers (default: 10)
|
||||
- `--queries`: Number of queries to execute (default: 100)
|
||||
- `--query-limit`: Limit for each query (default: 100)
|
||||
- `--skip-publish`: Skip the publishing phase
|
||||
- `--skip-query`: Skip the query phase
|
||||
- `-v`: Enable verbose output
|
||||
|
||||
### Multi-Relay Options
|
||||
- `--multi-relay`: Use multi-relay harness
|
||||
- `--relay-bin`: Path to relay binary
|
||||
- `--install`: Install relay dependencies and binaries
|
||||
- `--install-secp`: Install only secp256k1 library
|
||||
- `--work-dir`: Working directory for builds (default: /tmp/relay-build)
|
||||
- `--install-dir`: Installation directory for binaries (default: /usr/local/bin)
|
||||
|
||||
### Profiling Options
|
||||
- `--profile`: Run query performance profiling
|
||||
- `--profile-subs`: Profile subscription performance
|
||||
- `--sub-count`: Number of concurrent subscriptions (default: 100)
|
||||
- `--sub-duration`: Duration for subscription profiling (default: 30s)
|
||||
|
||||
### Load Testing Options
|
||||
- `--load`: Run load pattern simulation
|
||||
- `--load-pattern`: Pattern type: constant, spike, burst, sine, ramp (default: constant)
|
||||
- `--load-duration`: Duration for load test (default: 60s)
|
||||
- `--load-base`: Base load in events/sec (default: 50)
|
||||
- `--load-peak`: Peak load in events/sec (default: 200)
|
||||
- `--load-pool`: Connection pool size (default: 10)
|
||||
- `--load-suite`: Run comprehensive load test suite
|
||||
- `--load-constraints`: Test under resource constraints
|
||||
|
||||
### Timing Options
|
||||
- `--timing`: Run end-to-end timing instrumentation
|
||||
- `--timing-events`: Number of events for timing (default: 100)
|
||||
- `--timing-subs`: Test subscription timing
|
||||
- `--timing-duration`: Duration for subscription timing (default: 10s)
|
||||
|
||||
### Report Options
|
||||
- `--report`: Generate comparative report
|
||||
- `--report-format`: Output format: markdown, json, csv (default: markdown)
|
||||
- `--report-file`: Output filename without extension (default: benchmark_report)
|
||||
- `--report-title`: Report title (default: "Relay Benchmark Comparison")
|
||||
|
||||
## Query Types Tested
|
||||
|
||||
The benchmark tests various query patterns:
|
||||
1. Query by kind
|
||||
2. Query by time range (last hour)
|
||||
3. Query by tag (p tags)
|
||||
4. Query by author
|
||||
5. Complex queries with multiple conditions
|
||||
|
||||
## Output
|
||||
|
||||
The tool provides detailed metrics including:
|
||||
|
||||
**Publish Performance:**
|
||||
- Total events published
|
||||
- Total data transferred
|
||||
- Publishing rate (events/second)
|
||||
- Bandwidth usage (MB/second)
|
||||
|
||||
**Query Performance:**
|
||||
- Total queries executed
|
||||
- Total events returned
|
||||
- Query rate (queries/second)
|
||||
- Average events per query
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
Publishing 1000 events to ws://localhost:7447...
|
||||
Published 1000 events...
|
||||
|
||||
Querying events from ws://localhost:7447...
|
||||
Executed 20 queries...
|
||||
Executed 40 queries...
|
||||
|
||||
=== Benchmark Results ===
|
||||
|
||||
Publish Performance:
|
||||
Events Published: 1000
|
||||
Total Data: 0.81 MB
|
||||
Duration: 890.91ms
|
||||
Rate: 1122.45 events/second
|
||||
Bandwidth: 0.91 MB/second
|
||||
|
||||
Query Performance:
|
||||
Queries Executed: 50
|
||||
Events Returned: 800
|
||||
Duration: 80.21ms
|
||||
Rate: 623.36 queries/second
|
||||
Avg Events/Query: 16.00
|
||||
```
|
||||
|
||||
## Relay Setup
|
||||
|
||||
First run `./setup_relays.sh` to build all relay binaries, then start individual relays:
|
||||
|
||||
### Khatru (SQLite)
|
||||
```bash
|
||||
cd /tmp/relay-benchmark/khatru/examples/basic-sqlite3
|
||||
./khatru-relay
|
||||
```
|
||||
|
||||
### Strfry (LMDB)
|
||||
```bash
|
||||
cd /tmp/relay-benchmark/strfry
|
||||
./strfry --config strfry.conf relay
|
||||
```
|
||||
|
||||
### Relayer (PostgreSQL)
|
||||
```bash
|
||||
# Start PostgreSQL
|
||||
docker run -d --name relay-postgres -e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_DB=nostr -p 5433:5432 postgres:15-alpine
|
||||
|
||||
# Run relayer
|
||||
cd /tmp/relay-benchmark/relayer/examples/basic
|
||||
POSTGRESQL_DATABASE="postgres://postgres:postgres@localhost:5433/nostr?sslmode=disable" \
|
||||
./relayer-bin
|
||||
```
|
||||
|
||||
### Orly (Badger)
|
||||
```bash
|
||||
cd /tmp/relay-benchmark
|
||||
ORLY_PORT=7448 ORLY_DATA_DIR=/tmp/orly-benchmark ORLY_SPIDER_TYPE=none ./orly-relay
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
The benchmark suite consists of several components:
|
||||
|
||||
- `main.go` - Core benchmark orchestration
|
||||
- `test_signer.go` - secp256k1 event signing
|
||||
- `simple_event.go` - UTF-8 safe event generation
|
||||
- `query_profiler.go` - Query performance analysis
|
||||
- `load_simulator.go` - Load pattern generation
|
||||
- `timing_instrumentation.go` - Event lifecycle tracking
|
||||
- `report_generator.go` - Comparative report generation
|
||||
- `relay_harness.go` - Multi-relay management
|
||||
|
||||
## Notes
|
||||
|
||||
- All benchmarks use event generation with proper secp256k1 signatures
|
||||
- Events are generated with valid UTF-8 content to ensure compatibility
|
||||
- Connection pooling is used for realistic concurrent load testing
|
||||
- Query patterns test real-world filter combinations
|
||||
73
cmd/benchmark/RELAY_COMPARISON_RESULTS.md
Normal file
73
cmd/benchmark/RELAY_COMPARISON_RESULTS.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# Nostr Relay Performance Comparison
|
||||
|
||||
Benchmark results for Khatru, Strfry, Relayer, and Orly relay implementations.
|
||||
|
||||
## Test Configuration
|
||||
|
||||
- **Events Published**: 1000 per relay
|
||||
- **Event Size**: 512 bytes content
|
||||
- **Queries Executed**: 50 per relay
|
||||
- **Concurrency**: 5 simultaneous publishers
|
||||
- **Platform**: Linux 5.15.0-151-generic
|
||||
- **Date**: 2025-08-08
|
||||
|
||||
## Performance Results
|
||||
|
||||
### Publishing Performance
|
||||
|
||||
| Relay | Events Published | Data Size | Duration | Events/sec | Bandwidth |
|
||||
|-------|-----------------|-----------|----------|------------|-----------|
|
||||
| **Khatru** | 1,000 | 0.81 MB | 104.49ms | **9,569.94** | **7.79 MB/s** |
|
||||
| **Strfry** | 1,000 | 0.81 MB | 747.41ms | 1,337.95 | 1.09 MB/s |
|
||||
| **Relayer** | 1,000 | 0.81 MB | 890.91ms | 1,122.45 | 0.91 MB/s |
|
||||
| **Orly** | 1,000 | 0.81 MB | 1.497s | 667.91 | 0.54 MB/s |
|
||||
|
||||
|
||||
### Query Performance
|
||||
|
||||
| Relay | Queries | Events Retrieved | Duration | Queries/sec | Avg Events/Query |
|
||||
|-------|---------|-----------------|----------|-------------|------------------|
|
||||
| **Relayer** | 50 | 800 | 80.21ms | **623.36** | 16.00 |
|
||||
| **Strfry** | 50 | 2,000 | 187.86ms | 266.16 | 40.00 |
|
||||
| **Orly** | 50 | 800 | 10.164s | 4.92 | 16.00 |
|
||||
| **Khatru** | 50 | 2,000 | 10.487s | 4.77 | 40.00 |
|
||||
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Khatru
|
||||
- Language: Go
|
||||
- Backend: SQLite (embedded)
|
||||
- Dependencies: Go 1.20+, SQLite3
|
||||
- Publishing: 9,570 events/sec, 104ms duration
|
||||
- Querying: 4.77 queries/sec, 10.5s duration
|
||||
|
||||
### Strfry
|
||||
- Language: C++
|
||||
- Backend: LMDB (embedded)
|
||||
- Dependencies: flatbuffers, lmdb, zstd, secp256k1, cmake, g++
|
||||
- Publishing: 1,338 events/sec, 747ms duration
|
||||
- Querying: 266 queries/sec, 188ms duration
|
||||
|
||||
### Relayer
|
||||
- Language: Go
|
||||
- Backend: PostgreSQL (external)
|
||||
- Dependencies: Go 1.20+, PostgreSQL 12+
|
||||
- Publishing: 1,122 events/sec, 891ms duration
|
||||
- Querying: 623 queries/sec, 80ms duration
|
||||
|
||||
### Orly
|
||||
- Language: Go
|
||||
- Backend: Badger (embedded)
|
||||
- Dependencies: Go 1.20+, libsecp256k1
|
||||
- Publishing: 668 events/sec, 1.5s duration
|
||||
- Querying: 4.92 queries/sec, 10.2s duration
|
||||
|
||||
## Test Environment
|
||||
|
||||
- Platform: Linux 5.15.0-151-generic
|
||||
- Concurrency: 5 publishers
|
||||
- Event size: 512 bytes
|
||||
- Signature verification: secp256k1
|
||||
- Content validation: UTF-8
|
||||
|
||||
549
cmd/benchmark/installer.go
Normal file
549
cmd/benchmark/installer.go
Normal file
@@ -0,0 +1,549 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type DependencyType int
|
||||
|
||||
const (
|
||||
Go DependencyType = iota
|
||||
Rust
|
||||
Cpp
|
||||
Git
|
||||
Make
|
||||
Cmake
|
||||
Pkg
|
||||
)
|
||||
|
||||
type RelayInstaller struct {
|
||||
workDir string
|
||||
installDir string
|
||||
deps map[DependencyType]bool
|
||||
mu sync.RWMutex
|
||||
skipVerify bool
|
||||
}
|
||||
|
||||
func NewRelayInstaller(workDir, installDir string) *RelayInstaller {
|
||||
return &RelayInstaller{
|
||||
workDir: workDir,
|
||||
installDir: installDir,
|
||||
deps: make(map[DependencyType]bool),
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) DetectDependencies() error {
|
||||
deps := []struct {
|
||||
dep DependencyType
|
||||
cmd string
|
||||
}{
|
||||
{Go, "go"},
|
||||
{Rust, "rustc"},
|
||||
{Cpp, "g++"},
|
||||
{Git, "git"},
|
||||
{Make, "make"},
|
||||
{Cmake, "cmake"},
|
||||
{Pkg, "pkg-config"},
|
||||
}
|
||||
|
||||
ri.mu.Lock()
|
||||
defer ri.mu.Unlock()
|
||||
|
||||
for _, d := range deps {
|
||||
_, err := exec.LookPath(d.cmd)
|
||||
ri.deps[d.dep] = err == nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) InstallMissingDependencies() error {
|
||||
ri.mu.RLock()
|
||||
missing := make([]DependencyType, 0)
|
||||
for dep, exists := range ri.deps {
|
||||
if !exists {
|
||||
missing = append(missing, dep)
|
||||
}
|
||||
}
|
||||
ri.mu.RUnlock()
|
||||
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
return ri.installLinuxDeps(missing)
|
||||
case "darwin":
|
||||
return ri.installMacDeps(missing)
|
||||
default:
|
||||
return fmt.Errorf("unsupported OS: %s", runtime.GOOS)
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) installLinuxDeps(deps []DependencyType) error {
|
||||
hasApt := ri.commandExists("apt-get")
|
||||
hasYum := ri.commandExists("yum")
|
||||
hasPacman := ri.commandExists("pacman")
|
||||
|
||||
if !hasApt && !hasYum && !hasPacman {
|
||||
return fmt.Errorf("no supported package manager found")
|
||||
}
|
||||
|
||||
if hasApt {
|
||||
if err := ri.runCommand("sudo", "apt-get", "update"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
switch dep {
|
||||
case Go:
|
||||
if err := ri.installGo(); err != nil {
|
||||
return err
|
||||
}
|
||||
case Rust:
|
||||
if err := ri.installRust(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if hasApt {
|
||||
if err := ri.installAptPackage(dep); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if hasYum {
|
||||
if err := ri.installYumPackage(dep); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if hasPacman {
|
||||
if err := ri.installPacmanPackage(dep); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := ri.installSecp256k1(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) installMacDeps(deps []DependencyType) error {
|
||||
if !ri.commandExists("brew") {
|
||||
return fmt.Errorf("homebrew not found, install from https://brew.sh")
|
||||
}
|
||||
|
||||
for _, dep := range deps {
|
||||
switch dep {
|
||||
case Go:
|
||||
if err := ri.runCommand("brew", "install", "go"); err != nil {
|
||||
return err
|
||||
}
|
||||
case Rust:
|
||||
if err := ri.installRust(); err != nil {
|
||||
return err
|
||||
}
|
||||
case Cpp:
|
||||
if err := ri.runCommand("brew", "install", "gcc"); err != nil {
|
||||
return err
|
||||
}
|
||||
case Git:
|
||||
if err := ri.runCommand("brew", "install", "git"); err != nil {
|
||||
return err
|
||||
}
|
||||
case Make:
|
||||
if err := ri.runCommand("brew", "install", "make"); err != nil {
|
||||
return err
|
||||
}
|
||||
case Cmake:
|
||||
if err := ri.runCommand("brew", "install", "cmake"); err != nil {
|
||||
return err
|
||||
}
|
||||
case Pkg:
|
||||
if err := ri.runCommand("brew", "install", "pkg-config"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := ri.installSecp256k1(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) installAptPackage(dep DependencyType) error {
|
||||
var pkgName string
|
||||
switch dep {
|
||||
case Cpp:
|
||||
pkgName = "build-essential"
|
||||
case Git:
|
||||
pkgName = "git"
|
||||
case Make:
|
||||
pkgName = "make"
|
||||
case Cmake:
|
||||
pkgName = "cmake"
|
||||
case Pkg:
|
||||
pkgName = "pkg-config"
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
return ri.runCommand("sudo", "apt-get", "install", "-y", pkgName, "autotools-dev", "autoconf", "libtool")
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) installYumPackage(dep DependencyType) error {
|
||||
var pkgName string
|
||||
switch dep {
|
||||
case Cpp:
|
||||
pkgName = "gcc-c++"
|
||||
case Git:
|
||||
pkgName = "git"
|
||||
case Make:
|
||||
pkgName = "make"
|
||||
case Cmake:
|
||||
pkgName = "cmake"
|
||||
case Pkg:
|
||||
pkgName = "pkgconfig"
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
return ri.runCommand("sudo", "yum", "install", "-y", pkgName)
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) installPacmanPackage(dep DependencyType) error {
|
||||
var pkgName string
|
||||
switch dep {
|
||||
case Cpp:
|
||||
pkgName = "gcc"
|
||||
case Git:
|
||||
pkgName = "git"
|
||||
case Make:
|
||||
pkgName = "make"
|
||||
case Cmake:
|
||||
pkgName = "cmake"
|
||||
case Pkg:
|
||||
pkgName = "pkgconf"
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
return ri.runCommand("sudo", "pacman", "-S", "--noconfirm", pkgName)
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) installGo() error {
|
||||
version := "1.21.5"
|
||||
arch := runtime.GOARCH
|
||||
if arch == "amd64" {
|
||||
arch = "amd64"
|
||||
} else if arch == "arm64" {
|
||||
arch = "arm64"
|
||||
}
|
||||
|
||||
filename := fmt.Sprintf("go%s.%s-%s.tar.gz", version, runtime.GOOS, arch)
|
||||
url := fmt.Sprintf("https://golang.org/dl/%s", filename)
|
||||
|
||||
tmpFile := filepath.Join(os.TempDir(), filename)
|
||||
if err := ri.runCommand("wget", "-O", tmpFile, url); err != nil {
|
||||
return fmt.Errorf("failed to download Go: %w", err)
|
||||
}
|
||||
|
||||
if err := ri.runCommand("sudo", "tar", "-C", "/usr/local", "-xzf", tmpFile); err != nil {
|
||||
return fmt.Errorf("failed to extract Go: %w", err)
|
||||
}
|
||||
|
||||
os.Remove(tmpFile)
|
||||
|
||||
profile := filepath.Join(os.Getenv("HOME"), ".profile")
|
||||
f, err := os.OpenFile(profile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err == nil {
|
||||
f.WriteString("\nexport PATH=$PATH:/usr/local/go/bin\n")
|
||||
f.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) installRust() error {
|
||||
return ri.runCommand("curl", "--proto", "=https", "--tlsv1.2", "-sSf", "https://sh.rustup.rs", "|", "sh", "-s", "--", "-y")
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) installSecp256k1() error {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if ri.commandExists("apt-get") {
|
||||
if err := ri.runCommand("sudo", "apt-get", "install", "-y", "libsecp256k1-dev"); err != nil {
|
||||
return ri.buildSecp256k1FromSource()
|
||||
}
|
||||
return nil
|
||||
} else if ri.commandExists("yum") {
|
||||
if err := ri.runCommand("sudo", "yum", "install", "-y", "libsecp256k1-devel"); err != nil {
|
||||
return ri.buildSecp256k1FromSource()
|
||||
}
|
||||
return nil
|
||||
} else if ri.commandExists("pacman") {
|
||||
if err := ri.runCommand("sudo", "pacman", "-S", "--noconfirm", "libsecp256k1"); err != nil {
|
||||
return ri.buildSecp256k1FromSource()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return ri.buildSecp256k1FromSource()
|
||||
case "darwin":
|
||||
if err := ri.runCommand("brew", "install", "libsecp256k1"); err != nil {
|
||||
return ri.buildSecp256k1FromSource()
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return ri.buildSecp256k1FromSource()
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) buildSecp256k1FromSource() error {
|
||||
secp256k1Dir := filepath.Join(ri.workDir, "secp256k1")
|
||||
|
||||
if err := ri.runCommand("git", "clone", "https://github.com/bitcoin-core/secp256k1.git", secp256k1Dir); err != nil {
|
||||
return fmt.Errorf("failed to clone secp256k1: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Chdir(secp256k1Dir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ri.runCommand("./autogen.sh"); err != nil {
|
||||
return fmt.Errorf("failed to run autogen: %w", err)
|
||||
}
|
||||
|
||||
configArgs := []string{"--enable-module-schnorrsig", "--enable-module-recovery"}
|
||||
if err := ri.runCommand("./configure", configArgs...); err != nil {
|
||||
return fmt.Errorf("failed to configure secp256k1: %w", err)
|
||||
}
|
||||
|
||||
if err := ri.runCommand("make"); err != nil {
|
||||
return fmt.Errorf("failed to build secp256k1: %w", err)
|
||||
}
|
||||
|
||||
if err := ri.runCommand("sudo", "make", "install"); err != nil {
|
||||
return fmt.Errorf("failed to install secp256k1: %w", err)
|
||||
}
|
||||
|
||||
if err := ri.runCommand("sudo", "ldconfig"); err != nil && runtime.GOOS == "linux" {
|
||||
return fmt.Errorf("failed to run ldconfig: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) InstallKhatru() error {
|
||||
khatruDir := filepath.Join(ri.workDir, "khatru")
|
||||
|
||||
if err := ri.runCommand("git", "clone", "https://github.com/fiatjaf/khatru.git", khatruDir); err != nil {
|
||||
return fmt.Errorf("failed to clone khatru: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Chdir(khatruDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ri.runCommand("go", "mod", "tidy"); err != nil {
|
||||
return fmt.Errorf("failed to tidy khatru: %w", err)
|
||||
}
|
||||
|
||||
binPath := filepath.Join(ri.installDir, "khatru")
|
||||
if err := ri.runCommand("go", "build", "-o", binPath, "."); err != nil {
|
||||
return fmt.Errorf("failed to build khatru: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) InstallRelayer() error {
|
||||
relayerDir := filepath.Join(ri.workDir, "relayer")
|
||||
|
||||
if err := ri.runCommand("git", "clone", "https://github.com/fiatjaf/relayer.git", relayerDir); err != nil {
|
||||
return fmt.Errorf("failed to clone relayer: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Chdir(relayerDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ri.runCommand("go", "mod", "tidy"); err != nil {
|
||||
return fmt.Errorf("failed to tidy relayer: %w", err)
|
||||
}
|
||||
|
||||
binPath := filepath.Join(ri.installDir, "relayer")
|
||||
if err := ri.runCommand("go", "build", "-o", binPath, "."); err != nil {
|
||||
return fmt.Errorf("failed to build relayer: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) InstallStrfry() error {
|
||||
strfryDir := filepath.Join(ri.workDir, "strfry")
|
||||
|
||||
if err := ri.runCommand("git", "clone", "https://github.com/hoytech/strfry.git", strfryDir); err != nil {
|
||||
return fmt.Errorf("failed to clone strfry: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Chdir(strfryDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ri.runCommand("git", "submodule", "update", "--init"); err != nil {
|
||||
return fmt.Errorf("failed to init submodules: %w", err)
|
||||
}
|
||||
|
||||
if err := ri.runCommand("make", "setup-golpe"); err != nil {
|
||||
return fmt.Errorf("failed to setup golpe: %w", err)
|
||||
}
|
||||
|
||||
if err := ri.runCommand("make"); err != nil {
|
||||
return fmt.Errorf("failed to build strfry: %w", err)
|
||||
}
|
||||
|
||||
srcBin := filepath.Join(strfryDir, "strfry")
|
||||
dstBin := filepath.Join(ri.installDir, "strfry")
|
||||
if err := ri.runCommand("cp", srcBin, dstBin); err != nil {
|
||||
return fmt.Errorf("failed to copy strfry binary: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) InstallRustRelay() error {
|
||||
rustRelayDir := filepath.Join(ri.workDir, "nostr-rs-relay")
|
||||
|
||||
if err := ri.runCommand("git", "clone", "https://github.com/scsibug/nostr-rs-relay.git", rustRelayDir); err != nil {
|
||||
return fmt.Errorf("failed to clone rust relay: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Chdir(rustRelayDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ri.runCommand("cargo", "build", "--release"); err != nil {
|
||||
return fmt.Errorf("failed to build rust relay: %w", err)
|
||||
}
|
||||
|
||||
srcBin := filepath.Join(rustRelayDir, "target", "release", "nostr-rs-relay")
|
||||
dstBin := filepath.Join(ri.installDir, "nostr-rs-relay")
|
||||
if err := ri.runCommand("cp", srcBin, dstBin); err != nil {
|
||||
return fmt.Errorf("failed to copy rust relay binary: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) VerifyInstallation() error {
|
||||
if ri.skipVerify {
|
||||
return nil
|
||||
}
|
||||
|
||||
binaries := []string{"khatru", "relayer", "strfry", "nostr-rs-relay"}
|
||||
|
||||
for _, binary := range binaries {
|
||||
binPath := filepath.Join(ri.installDir, binary)
|
||||
if _, err := os.Stat(binPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("binary %s not found at %s", binary, binPath)
|
||||
}
|
||||
|
||||
if err := ri.runCommand("chmod", "+x", binPath); err != nil {
|
||||
return fmt.Errorf("failed to make %s executable: %w", binary, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) commandExists(cmd string) bool {
|
||||
_, err := exec.LookPath(cmd)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) runCommand(name string, args ...string) error {
|
||||
if name == "curl" && len(args) > 0 && strings.Contains(strings.Join(args, " "), "|") {
|
||||
fullCmd := fmt.Sprintf("%s %s", name, strings.Join(args, " "))
|
||||
cmd := exec.Command("bash", "-c", fullCmd)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) InstallSecp256k1Only() error {
|
||||
fmt.Println("Installing secp256k1 library...")
|
||||
|
||||
if err := os.MkdirAll(ri.workDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ri.installSecp256k1(); err != nil {
|
||||
return fmt.Errorf("failed to install secp256k1: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("secp256k1 installed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ri *RelayInstaller) InstallAll() error {
|
||||
fmt.Println("Detecting dependencies...")
|
||||
if err := ri.DetectDependencies(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Installing missing dependencies...")
|
||||
if err := ri.InstallMissingDependencies(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(ri.workDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.MkdirAll(ri.installDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Installing khatru...")
|
||||
if err := ri.InstallKhatru(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Installing relayer...")
|
||||
if err := ri.InstallRelayer(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Installing strfry...")
|
||||
if err := ri.InstallStrfry(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Installing rust relay...")
|
||||
if err := ri.InstallRustRelay(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Verifying installation...")
|
||||
if err := ri.VerifyInstallation(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("All relays installed successfully")
|
||||
return nil
|
||||
}
|
||||
494
cmd/benchmark/load_simulator.go
Normal file
494
cmd/benchmark/load_simulator.go
Normal file
@@ -0,0 +1,494 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type LoadPattern int
|
||||
|
||||
const (
|
||||
Constant LoadPattern = iota
|
||||
Spike
|
||||
Burst
|
||||
Sine
|
||||
Ramp
|
||||
)
|
||||
|
||||
func (lp LoadPattern) String() string {
|
||||
switch lp {
|
||||
case Constant:
|
||||
return "constant"
|
||||
case Spike:
|
||||
return "spike"
|
||||
case Burst:
|
||||
return "burst"
|
||||
case Sine:
|
||||
return "sine"
|
||||
case Ramp:
|
||||
return "ramp"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type ConnectionPool struct {
|
||||
relayURL string
|
||||
poolSize int
|
||||
connections []*ws.Client
|
||||
active []bool
|
||||
mu sync.RWMutex
|
||||
created int64
|
||||
failed int64
|
||||
}
|
||||
|
||||
func NewConnectionPool(relayURL string, poolSize int) *ConnectionPool {
|
||||
return &ConnectionPool{
|
||||
relayURL: relayURL,
|
||||
poolSize: poolSize,
|
||||
connections: make([]*ws.Client, poolSize),
|
||||
active: make([]bool, poolSize),
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ConnectionPool) Initialize(c context.T) error {
|
||||
var wg sync.WaitGroup
|
||||
errors := make(chan error, cp.poolSize)
|
||||
|
||||
for i := 0; i < cp.poolSize; i++ {
|
||||
wg.Add(1)
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
|
||||
conn, err := ws.RelayConnect(c, cp.relayURL)
|
||||
if err != nil {
|
||||
errors <- fmt.Errorf("connection %d failed: %w", idx, err)
|
||||
atomic.AddInt64(&cp.failed, 1)
|
||||
return
|
||||
}
|
||||
|
||||
cp.mu.Lock()
|
||||
cp.connections[idx] = conn
|
||||
cp.active[idx] = true
|
||||
cp.mu.Unlock()
|
||||
|
||||
atomic.AddInt64(&cp.created, 1)
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errors)
|
||||
|
||||
errorCount := 0
|
||||
for range errors {
|
||||
errorCount++
|
||||
}
|
||||
|
||||
if errorCount > 0 {
|
||||
return fmt.Errorf("failed to create %d connections", errorCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *ConnectionPool) GetConnection(idx int) *ws.Client {
|
||||
cp.mu.RLock()
|
||||
defer cp.mu.RUnlock()
|
||||
|
||||
if idx >= 0 && idx < len(cp.connections) && cp.active[idx] {
|
||||
return cp.connections[idx]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cp *ConnectionPool) CloseAll() {
|
||||
cp.mu.Lock()
|
||||
defer cp.mu.Unlock()
|
||||
|
||||
for i, conn := range cp.connections {
|
||||
if conn != nil && cp.active[i] {
|
||||
conn.Close()
|
||||
cp.active[i] = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ConnectionPool) Stats() (created, failed int64) {
|
||||
return atomic.LoadInt64(&cp.created), atomic.LoadInt64(&cp.failed)
|
||||
}
|
||||
|
||||
type LoadSimulator struct {
|
||||
relayURL string
|
||||
pattern LoadPattern
|
||||
duration time.Duration
|
||||
baseLoad int
|
||||
peakLoad int
|
||||
poolSize int
|
||||
eventSize int
|
||||
connectionPool *ConnectionPool
|
||||
metrics LoadMetrics
|
||||
running atomic.Bool
|
||||
}
|
||||
|
||||
type LoadMetrics struct {
|
||||
EventsSent atomic.Int64
|
||||
EventsFailed atomic.Int64
|
||||
ConnectionErrors atomic.Int64
|
||||
AvgLatency atomic.Int64
|
||||
PeakLatency atomic.Int64
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
}
|
||||
|
||||
func NewLoadSimulator(relayURL string, pattern LoadPattern, duration time.Duration, baseLoad, peakLoad, poolSize, eventSize int) *LoadSimulator {
|
||||
return &LoadSimulator{
|
||||
relayURL: relayURL,
|
||||
pattern: pattern,
|
||||
duration: duration,
|
||||
baseLoad: baseLoad,
|
||||
peakLoad: peakLoad,
|
||||
poolSize: poolSize,
|
||||
eventSize: eventSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) Run(c context.T) error {
|
||||
fmt.Printf("Starting %s load simulation for %v...\n", ls.pattern, ls.duration)
|
||||
fmt.Printf("Base load: %d events/sec, Peak load: %d events/sec\n", ls.baseLoad, ls.peakLoad)
|
||||
fmt.Printf("Connection pool size: %d\n", ls.poolSize)
|
||||
|
||||
ls.connectionPool = NewConnectionPool(ls.relayURL, ls.poolSize)
|
||||
if err := ls.connectionPool.Initialize(c); err != nil {
|
||||
return fmt.Errorf("failed to initialize connection pool: %w", err)
|
||||
}
|
||||
defer ls.connectionPool.CloseAll()
|
||||
|
||||
created, failed := ls.connectionPool.Stats()
|
||||
fmt.Printf("Connections established: %d, failed: %d\n", created, failed)
|
||||
|
||||
ls.metrics.StartTime = time.Now()
|
||||
ls.running.Store(true)
|
||||
|
||||
switch ls.pattern {
|
||||
case Constant:
|
||||
return ls.runConstant(c)
|
||||
case Spike:
|
||||
return ls.runSpike(c)
|
||||
case Burst:
|
||||
return ls.runBurst(c)
|
||||
case Sine:
|
||||
return ls.runSine(c)
|
||||
case Ramp:
|
||||
return ls.runRamp(c)
|
||||
default:
|
||||
return fmt.Errorf("unsupported load pattern: %s", ls.pattern)
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) runConstant(c context.T) error {
|
||||
interval := time.Second / time.Duration(ls.baseLoad)
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
timeout := time.After(ls.duration)
|
||||
connectionIdx := 0
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return ls.finalize()
|
||||
case <-ticker.C:
|
||||
go ls.sendEvent(c, connectionIdx%ls.poolSize)
|
||||
connectionIdx++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) runSpike(c context.T) error {
|
||||
baseInterval := time.Second / time.Duration(ls.baseLoad)
|
||||
spikeDuration := ls.duration / 10
|
||||
spikeStart := ls.duration / 2
|
||||
|
||||
baseTicker := time.NewTicker(baseInterval)
|
||||
defer baseTicker.Stop()
|
||||
|
||||
timeout := time.After(ls.duration)
|
||||
spikeTimeout := time.After(spikeStart)
|
||||
spikeEnd := time.After(spikeStart + spikeDuration)
|
||||
|
||||
connectionIdx := 0
|
||||
inSpike := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return ls.finalize()
|
||||
case <-spikeTimeout:
|
||||
if !inSpike {
|
||||
inSpike = true
|
||||
baseTicker.Stop()
|
||||
spikeInterval := time.Second / time.Duration(ls.peakLoad)
|
||||
baseTicker = time.NewTicker(spikeInterval)
|
||||
}
|
||||
case <-spikeEnd:
|
||||
if inSpike {
|
||||
inSpike = false
|
||||
baseTicker.Stop()
|
||||
baseTicker = time.NewTicker(baseInterval)
|
||||
}
|
||||
case <-baseTicker.C:
|
||||
go ls.sendEvent(c, connectionIdx%ls.poolSize)
|
||||
connectionIdx++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) runBurst(c context.T) error {
|
||||
burstInterval := ls.duration / 5
|
||||
burstSize := ls.peakLoad / 2
|
||||
|
||||
ticker := time.NewTicker(burstInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
timeout := time.After(ls.duration)
|
||||
connectionIdx := 0
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return ls.finalize()
|
||||
case <-ticker.C:
|
||||
for i := 0; i < burstSize; i++ {
|
||||
go ls.sendEvent(c, connectionIdx%ls.poolSize)
|
||||
connectionIdx++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) runSine(c context.T) error {
|
||||
startTime := time.Now()
|
||||
baseTicker := time.NewTicker(50 * time.Millisecond)
|
||||
defer baseTicker.Stop()
|
||||
|
||||
timeout := time.After(ls.duration)
|
||||
connectionIdx := 0
|
||||
lastSend := time.Now()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return ls.finalize()
|
||||
case now := <-baseTicker.C:
|
||||
elapsed := now.Sub(startTime)
|
||||
progress := float64(elapsed) / float64(ls.duration)
|
||||
sineValue := math.Sin(progress * 4 * math.Pi)
|
||||
|
||||
currentLoad := ls.baseLoad + int(float64(ls.peakLoad-ls.baseLoad)*((sineValue+1)/2))
|
||||
|
||||
if currentLoad > 0 {
|
||||
interval := time.Second / time.Duration(currentLoad)
|
||||
if now.Sub(lastSend) >= interval {
|
||||
go ls.sendEvent(c, connectionIdx%ls.poolSize)
|
||||
connectionIdx++
|
||||
lastSend = now
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) runRamp(c context.T) error {
|
||||
startTime := time.Now()
|
||||
ticker := time.NewTicker(100 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
timeout := time.After(ls.duration)
|
||||
connectionIdx := 0
|
||||
lastSend := time.Now()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-timeout:
|
||||
return ls.finalize()
|
||||
case now := <-ticker.C:
|
||||
elapsed := now.Sub(startTime)
|
||||
progress := float64(elapsed) / float64(ls.duration)
|
||||
|
||||
currentLoad := ls.baseLoad + int(float64(ls.peakLoad-ls.baseLoad)*progress)
|
||||
|
||||
if currentLoad > 0 {
|
||||
interval := time.Second / time.Duration(currentLoad)
|
||||
if now.Sub(lastSend) >= interval {
|
||||
go ls.sendEvent(c, connectionIdx%ls.poolSize)
|
||||
connectionIdx++
|
||||
lastSend = now
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) sendEvent(c context.T, connIdx int) {
|
||||
startTime := time.Now()
|
||||
|
||||
conn := ls.connectionPool.GetConnection(connIdx)
|
||||
if conn == nil {
|
||||
ls.metrics.ConnectionErrors.Add(1)
|
||||
return
|
||||
}
|
||||
|
||||
signer := newTestSigner()
|
||||
ev := generateEvent(signer, ls.eventSize, 0, 0)
|
||||
|
||||
err := conn.Publish(c, ev)
|
||||
latency := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
ls.metrics.EventsFailed.Add(1)
|
||||
log.E.F("Event publish failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ls.metrics.EventsSent.Add(1)
|
||||
|
||||
latencyMs := latency.Milliseconds()
|
||||
ls.metrics.AvgLatency.Store(latencyMs)
|
||||
|
||||
if latencyMs > ls.metrics.PeakLatency.Load() {
|
||||
ls.metrics.PeakLatency.Store(latencyMs)
|
||||
}
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) finalize() error {
|
||||
ls.metrics.EndTime = time.Now()
|
||||
ls.running.Store(false)
|
||||
|
||||
duration := ls.metrics.EndTime.Sub(ls.metrics.StartTime)
|
||||
eventsSent := ls.metrics.EventsSent.Load()
|
||||
eventsFailed := ls.metrics.EventsFailed.Load()
|
||||
connectionErrors := ls.metrics.ConnectionErrors.Load()
|
||||
|
||||
fmt.Printf("\n=== Load Simulation Results ===\n")
|
||||
fmt.Printf("Pattern: %s\n", ls.pattern)
|
||||
fmt.Printf("Duration: %v\n", duration)
|
||||
fmt.Printf("Events Sent: %d\n", eventsSent)
|
||||
fmt.Printf("Events Failed: %d\n", eventsFailed)
|
||||
fmt.Printf("Connection Errors: %d\n", connectionErrors)
|
||||
|
||||
if eventsSent > 0 {
|
||||
rate := float64(eventsSent) / duration.Seconds()
|
||||
successRate := float64(eventsSent) / float64(eventsSent+eventsFailed) * 100
|
||||
fmt.Printf("Average Rate: %.2f events/sec\n", rate)
|
||||
fmt.Printf("Success Rate: %.1f%%\n", successRate)
|
||||
fmt.Printf("Average Latency: %dms\n", ls.metrics.AvgLatency.Load())
|
||||
fmt.Printf("Peak Latency: %dms\n", ls.metrics.PeakLatency.Load())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) SimulateResourceConstraints(c context.T, memoryLimit, cpuLimit int) error {
|
||||
fmt.Printf("\n=== Resource Constraint Simulation ===\n")
|
||||
fmt.Printf("Memory limit: %d MB, CPU limit: %d%%\n", memoryLimit, cpuLimit)
|
||||
|
||||
constraintTests := []struct {
|
||||
name string
|
||||
duration time.Duration
|
||||
load int
|
||||
}{
|
||||
{"baseline", 30 * time.Second, ls.baseLoad},
|
||||
{"memory_stress", 60 * time.Second, ls.peakLoad * 2},
|
||||
{"cpu_stress", 45 * time.Second, ls.peakLoad * 3},
|
||||
{"combined_stress", 90 * time.Second, ls.peakLoad * 4},
|
||||
}
|
||||
|
||||
for _, test := range constraintTests {
|
||||
fmt.Printf("\nRunning %s test...\n", test.name)
|
||||
|
||||
simulator := NewLoadSimulator(ls.relayURL, Constant, test.duration, test.load, test.load, ls.poolSize, ls.eventSize)
|
||||
|
||||
if err := simulator.Run(c); err != nil {
|
||||
fmt.Printf("Test %s failed: %v\n", test.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ls *LoadSimulator) GetMetrics() map[string]interface{} {
|
||||
metrics := make(map[string]interface{})
|
||||
|
||||
metrics["pattern"] = ls.pattern.String()
|
||||
metrics["events_sent"] = ls.metrics.EventsSent.Load()
|
||||
metrics["events_failed"] = ls.metrics.EventsFailed.Load()
|
||||
metrics["connection_errors"] = ls.metrics.ConnectionErrors.Load()
|
||||
metrics["avg_latency_ms"] = ls.metrics.AvgLatency.Load()
|
||||
metrics["peak_latency_ms"] = ls.metrics.PeakLatency.Load()
|
||||
|
||||
if !ls.metrics.StartTime.IsZero() && !ls.metrics.EndTime.IsZero() {
|
||||
duration := ls.metrics.EndTime.Sub(ls.metrics.StartTime)
|
||||
metrics["duration_seconds"] = duration.Seconds()
|
||||
|
||||
if eventsSent := ls.metrics.EventsSent.Load(); eventsSent > 0 {
|
||||
metrics["events_per_second"] = float64(eventsSent) / duration.Seconds()
|
||||
}
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
||||
|
||||
type LoadTestSuite struct {
|
||||
relayURL string
|
||||
poolSize int
|
||||
eventSize int
|
||||
}
|
||||
|
||||
func NewLoadTestSuite(relayURL string, poolSize, eventSize int) *LoadTestSuite {
|
||||
return &LoadTestSuite{
|
||||
relayURL: relayURL,
|
||||
poolSize: poolSize,
|
||||
eventSize: eventSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (lts *LoadTestSuite) RunAllPatterns(c context.T) error {
|
||||
patterns := []struct {
|
||||
pattern LoadPattern
|
||||
baseLoad int
|
||||
peakLoad int
|
||||
duration time.Duration
|
||||
}{
|
||||
{Constant, 50, 50, 60 * time.Second},
|
||||
{Spike, 50, 500, 90 * time.Second},
|
||||
{Burst, 20, 400, 75 * time.Second},
|
||||
{Sine, 50, 300, 120 * time.Second},
|
||||
{Ramp, 10, 200, 90 * time.Second},
|
||||
}
|
||||
|
||||
fmt.Printf("Running comprehensive load test suite...\n")
|
||||
|
||||
for _, p := range patterns {
|
||||
fmt.Printf("\n--- Testing %s pattern ---\n", p.pattern)
|
||||
|
||||
simulator := NewLoadSimulator(lts.relayURL, p.pattern, p.duration, p.baseLoad, p.peakLoad, lts.poolSize, lts.eventSize)
|
||||
|
||||
if err := simulator.Run(c); err != nil {
|
||||
fmt.Printf("Pattern %s failed: %v\n", p.pattern, err)
|
||||
continue
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
644
cmd/benchmark/main.go
Normal file
644
cmd/benchmark/main.go
Normal file
@@ -0,0 +1,644 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/tags"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
)
|
||||
|
||||
type BenchmarkResults struct {
|
||||
EventsPublished int64
|
||||
EventsPublishedBytes int64
|
||||
PublishDuration time.Duration
|
||||
PublishRate float64
|
||||
PublishBandwidth float64
|
||||
|
||||
QueriesExecuted int64
|
||||
QueryDuration time.Duration
|
||||
QueryRate float64
|
||||
EventsReturned int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
relayURL = flag.String("relay", "ws://localhost:7447", "Relay URL to benchmark")
|
||||
eventCount = flag.Int("events", 10000, "Number of events to publish")
|
||||
eventSize = flag.Int("size", 1024, "Average size of event content in bytes")
|
||||
concurrency = flag.Int("concurrency", 10, "Number of concurrent publishers")
|
||||
queryCount = flag.Int("queries", 100, "Number of queries to execute")
|
||||
queryLimit = flag.Int("query-limit", 100, "Limit for each query")
|
||||
skipPublish = flag.Bool("skip-publish", false, "Skip publishing phase")
|
||||
skipQuery = flag.Bool("skip-query", false, "Skip query phase")
|
||||
verbose = flag.Bool("v", false, "Verbose output")
|
||||
multiRelay = flag.Bool("multi-relay", false, "Use multi-relay harness")
|
||||
relayBinPath = flag.String("relay-bin", "", "Path to relay binary (for multi-relay mode)")
|
||||
profileQueries = flag.Bool("profile", false, "Run query performance profiling")
|
||||
profileSubs = flag.Bool("profile-subs", false, "Profile subscription performance")
|
||||
subCount = flag.Int("sub-count", 100, "Number of concurrent subscriptions for profiling")
|
||||
subDuration = flag.Duration("sub-duration", 30*time.Second, "Duration for subscription profiling")
|
||||
installRelays = flag.Bool("install", false, "Install relay dependencies and binaries")
|
||||
installSecp = flag.Bool("install-secp", false, "Install only secp256k1 library")
|
||||
workDir = flag.String("work-dir", "/tmp/relay-build", "Working directory for builds")
|
||||
installDir = flag.String("install-dir", "/usr/local/bin", "Installation directory for binaries")
|
||||
generateReport = flag.Bool("report", false, "Generate comparative report")
|
||||
reportFormat = flag.String("report-format", "markdown", "Report format: markdown, json, csv")
|
||||
reportFile = flag.String("report-file", "benchmark_report", "Report output filename (without extension)")
|
||||
reportTitle = flag.String("report-title", "Relay Benchmark Comparison", "Report title")
|
||||
timingMode = flag.Bool("timing", false, "Run end-to-end timing instrumentation")
|
||||
timingEvents = flag.Int("timing-events", 100, "Number of events for timing instrumentation")
|
||||
timingSubs = flag.Bool("timing-subs", false, "Test subscription timing")
|
||||
timingDuration = flag.Duration("timing-duration", 10*time.Second, "Duration for subscription timing test")
|
||||
loadTest = flag.Bool("load", false, "Run load pattern simulation")
|
||||
loadPattern = flag.String("load-pattern", "constant", "Load pattern: constant, spike, burst, sine, ramp")
|
||||
loadDuration = flag.Duration("load-duration", 60*time.Second, "Duration for load test")
|
||||
loadBase = flag.Int("load-base", 50, "Base load (events/sec)")
|
||||
loadPeak = flag.Int("load-peak", 200, "Peak load (events/sec)")
|
||||
loadPool = flag.Int("load-pool", 10, "Connection pool size for load testing")
|
||||
loadSuite = flag.Bool("load-suite", false, "Run comprehensive load test suite")
|
||||
loadConstraints = flag.Bool("load-constraints", false, "Test under resource constraints")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
if *verbose {
|
||||
lol.SetLogLevel("trace")
|
||||
}
|
||||
|
||||
c := context.Bg()
|
||||
|
||||
if *installRelays {
|
||||
runInstaller(*workDir, *installDir)
|
||||
} else if *installSecp {
|
||||
runSecp256k1Installer(*workDir, *installDir)
|
||||
} else if *generateReport {
|
||||
runReportGeneration(*reportTitle, *reportFormat, *reportFile)
|
||||
} else if *loadTest || *loadSuite || *loadConstraints {
|
||||
runLoadSimulation(c, *relayURL, *loadPattern, *loadDuration, *loadBase, *loadPeak, *loadPool, *eventSize, *loadSuite, *loadConstraints)
|
||||
} else if *timingMode || *timingSubs {
|
||||
runTimingInstrumentation(c, *relayURL, *timingEvents, *eventSize, *timingSubs, *timingDuration)
|
||||
} else if *profileQueries || *profileSubs {
|
||||
runQueryProfiler(c, *relayURL, *queryCount, *concurrency, *profileSubs, *subCount, *subDuration)
|
||||
} else if *multiRelay {
|
||||
runMultiRelayBenchmark(c, *relayBinPath, *eventCount, *eventSize, *concurrency, *queryCount, *queryLimit, *skipPublish, *skipQuery)
|
||||
} else {
|
||||
runSingleRelayBenchmark(c, *relayURL, *eventCount, *eventSize, *concurrency, *queryCount, *queryLimit, *skipPublish, *skipQuery)
|
||||
}
|
||||
}
|
||||
|
||||
func runSingleRelayBenchmark(c context.T, relayURL string, eventCount, eventSize, concurrency, queryCount, queryLimit int, skipPublish, skipQuery bool) {
|
||||
results := &BenchmarkResults{}
|
||||
|
||||
// Phase 1: Publish events
|
||||
if !skipPublish {
|
||||
fmt.Printf("Publishing %d events to %s...\n", eventCount, relayURL)
|
||||
if err := benchmarkPublish(c, relayURL, eventCount, eventSize, concurrency, results); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Error during publish benchmark: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2: Query events
|
||||
if !skipQuery {
|
||||
fmt.Printf("\nQuerying events from %s...\n", relayURL)
|
||||
if err := benchmarkQuery(c, relayURL, queryCount, queryLimit, results); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Error during query benchmark: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Print results
|
||||
printResults(results)
|
||||
}
|
||||
|
||||
func runMultiRelayBenchmark(c context.T, relayBinPath string, eventCount, eventSize, concurrency, queryCount, queryLimit int, skipPublish, skipQuery bool) {
|
||||
harness := NewMultiRelayHarness()
|
||||
generator := NewReportGenerator()
|
||||
|
||||
if relayBinPath != "" {
|
||||
config := RelayConfig{
|
||||
Type: Khatru,
|
||||
Binary: relayBinPath,
|
||||
Args: []string{},
|
||||
URL: "ws://localhost:7447",
|
||||
}
|
||||
if err := harness.AddRelay(config); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Failed to add relay: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Starting relay harness...\n")
|
||||
if err := harness.StartAll(); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Failed to start relays: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer harness.StopAll()
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
relayTypes := []RelayType{Khatru}
|
||||
if relayBinPath == "" {
|
||||
fmt.Printf("Running multi-relay benchmark without starting relays (external relays expected)\n")
|
||||
}
|
||||
|
||||
for _, relayType := range relayTypes {
|
||||
fmt.Printf("\n=== Benchmarking %s ===\n", relayType)
|
||||
|
||||
results := &BenchmarkResults{}
|
||||
relayURL := "ws://localhost:7447"
|
||||
|
||||
if !skipPublish {
|
||||
fmt.Printf("Publishing %d events to %s...\n", eventCount, relayURL)
|
||||
if err := benchmarkPublish(c, relayURL, eventCount, eventSize, concurrency, results); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Error during publish benchmark for %s: %v\n", relayType, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !skipQuery {
|
||||
fmt.Printf("\nQuerying events from %s...\n", relayURL)
|
||||
if err := benchmarkQuery(c, relayURL, queryCount, queryLimit, results); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Error during query benchmark for %s: %v\n", relayType, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("\n=== %s Results ===\n", relayType)
|
||||
printResults(results)
|
||||
|
||||
metrics := harness.GetMetrics(relayType)
|
||||
if metrics != nil {
|
||||
printHarnessMetrics(relayType, metrics)
|
||||
}
|
||||
|
||||
generator.AddRelayData(relayType.String(), results, metrics, nil)
|
||||
}
|
||||
|
||||
generator.GenerateReport("Multi-Relay Benchmark Results")
|
||||
|
||||
if err := SaveReportToFile("BENCHMARK_RESULTS.md", "markdown", generator); chk.E(err) {
|
||||
fmt.Printf("Warning: Failed to save benchmark results: %v\n", err)
|
||||
} else {
|
||||
fmt.Printf("\nBenchmark results saved to: BENCHMARK_RESULTS.md\n")
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkPublish(c context.T, relayURL string, eventCount, eventSize, concurrency int, results *BenchmarkResults) error {
|
||||
// Generate signers for each concurrent publisher
|
||||
signers := make([]*testSigner, concurrency)
|
||||
for i := range signers {
|
||||
signers[i] = newTestSigner()
|
||||
}
|
||||
|
||||
// Track published events
|
||||
var publishedEvents atomic.Int64
|
||||
var publishedBytes atomic.Int64
|
||||
var errors atomic.Int64
|
||||
|
||||
// Create wait group for concurrent publishers
|
||||
var wg sync.WaitGroup
|
||||
eventsPerPublisher := eventCount / concurrency
|
||||
extraEvents := eventCount % concurrency
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func(publisherID int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Connect to relay
|
||||
relay, err := ws.RelayConnect(c, relayURL)
|
||||
if err != nil {
|
||||
log.E.F("Publisher %d failed to connect: %v", publisherID, err)
|
||||
errors.Add(1)
|
||||
return
|
||||
}
|
||||
defer relay.Close()
|
||||
|
||||
// Calculate events for this publisher
|
||||
eventsToPublish := eventsPerPublisher
|
||||
if publisherID < extraEvents {
|
||||
eventsToPublish++
|
||||
}
|
||||
|
||||
signer := signers[publisherID]
|
||||
|
||||
// Publish events
|
||||
for j := 0; j < eventsToPublish; j++ {
|
||||
ev := generateEvent(signer, eventSize, time.Duration(0), 0)
|
||||
|
||||
if err := relay.Publish(c, ev); err != nil {
|
||||
log.E.F(
|
||||
"Publisher %d failed to publish event: %v", publisherID,
|
||||
err,
|
||||
)
|
||||
errors.Add(1)
|
||||
continue
|
||||
}
|
||||
|
||||
evBytes := ev.Marshal(nil)
|
||||
publishedEvents.Add(1)
|
||||
publishedBytes.Add(int64(len(evBytes)))
|
||||
|
||||
if publishedEvents.Load()%1000 == 0 {
|
||||
fmt.Printf(
|
||||
" Published %d events...\n", publishedEvents.Load(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(startTime)
|
||||
|
||||
results.EventsPublished = publishedEvents.Load()
|
||||
results.EventsPublishedBytes = publishedBytes.Load()
|
||||
results.PublishDuration = duration
|
||||
results.PublishRate = float64(results.EventsPublished) / duration.Seconds()
|
||||
results.PublishBandwidth = float64(results.EventsPublishedBytes) / duration.Seconds() / 1024 / 1024 // MB/s
|
||||
|
||||
if errors.Load() > 0 {
|
||||
fmt.Printf(
|
||||
" Warning: %d errors occurred during publishing\n", errors.Load(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func benchmarkQuery(
|
||||
c context.T, relayURL string, queryCount, queryLimit int,
|
||||
results *BenchmarkResults,
|
||||
) error {
|
||||
relay, err := ws.RelayConnect(c, relayURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to relay: %w", err)
|
||||
}
|
||||
defer relay.Close()
|
||||
|
||||
var totalEvents atomic.Int64
|
||||
var totalQueries atomic.Int64
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
for i := 0; i < queryCount; i++ {
|
||||
// Generate various filter types
|
||||
var f *filter.F
|
||||
switch i % 5 {
|
||||
case 0:
|
||||
// Query by kind
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Kinds: kinds.New(kind.TextNote),
|
||||
Limit: &limit,
|
||||
}
|
||||
case 1:
|
||||
// Query by time range
|
||||
now := timestamp.Now()
|
||||
since := timestamp.New(now.I64() - 3600) // last hour
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Since: since,
|
||||
Until: now,
|
||||
Limit: &limit,
|
||||
}
|
||||
case 2:
|
||||
// Query by tag
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Tags: tags.New(tag.New([]byte("p"), generateRandomPubkey())),
|
||||
Limit: &limit,
|
||||
}
|
||||
case 3:
|
||||
// Query by author
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Authors: tag.New(generateRandomPubkey()),
|
||||
Limit: &limit,
|
||||
}
|
||||
case 4:
|
||||
// Complex query with multiple conditions
|
||||
now := timestamp.Now()
|
||||
since := timestamp.New(now.I64() - 7200)
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Kinds: kinds.New(kind.TextNote, kind.Repost),
|
||||
Authors: tag.New(generateRandomPubkey()),
|
||||
Since: since,
|
||||
Limit: &limit,
|
||||
}
|
||||
}
|
||||
|
||||
// Execute query
|
||||
events, err := relay.QuerySync(c, f)
|
||||
if err != nil {
|
||||
log.E.F("Query %d failed: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
totalEvents.Add(int64(len(events)))
|
||||
totalQueries.Add(1)
|
||||
|
||||
if totalQueries.Load()%20 == 0 {
|
||||
fmt.Printf(" Executed %d queries...\n", totalQueries.Load())
|
||||
}
|
||||
}
|
||||
|
||||
duration := time.Since(startTime)
|
||||
|
||||
results.QueriesExecuted = totalQueries.Load()
|
||||
results.QueryDuration = duration
|
||||
results.QueryRate = float64(results.QueriesExecuted) / duration.Seconds()
|
||||
results.EventsReturned = totalEvents.Load()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateEvent(signer *testSigner, contentSize int, rateLimit time.Duration, burstSize int) *event.E {
|
||||
return generateSimpleEvent(signer, contentSize)
|
||||
}
|
||||
|
||||
func generateRandomTags() *tags.T {
|
||||
t := tags.New()
|
||||
|
||||
// Add some random tags
|
||||
numTags := frand.Intn(5)
|
||||
for i := 0; i < numTags; i++ {
|
||||
switch frand.Intn(3) {
|
||||
case 0:
|
||||
// p tag
|
||||
t.AppendUnique(tag.New([]byte("p"), generateRandomPubkey()))
|
||||
case 1:
|
||||
// e tag
|
||||
t.AppendUnique(tag.New([]byte("e"), generateRandomEventID()))
|
||||
case 2:
|
||||
// t tag
|
||||
t.AppendUnique(
|
||||
tag.New(
|
||||
[]byte("t"),
|
||||
[]byte(fmt.Sprintf("topic%d", frand.Intn(100))),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func generateRandomPubkey() []byte {
|
||||
return frand.Bytes(32)
|
||||
}
|
||||
|
||||
func generateRandomEventID() []byte {
|
||||
return frand.Bytes(32)
|
||||
}
|
||||
|
||||
func printResults(results *BenchmarkResults) {
|
||||
fmt.Println("\n=== Benchmark Results ===")
|
||||
|
||||
if results.EventsPublished > 0 {
|
||||
fmt.Println("\nPublish Performance:")
|
||||
fmt.Printf(" Events Published: %d\n", results.EventsPublished)
|
||||
fmt.Printf(
|
||||
" Total Data: %.2f MB\n",
|
||||
float64(results.EventsPublishedBytes)/1024/1024,
|
||||
)
|
||||
fmt.Printf(" Duration: %s\n", results.PublishDuration)
|
||||
fmt.Printf(" Rate: %.2f events/second\n", results.PublishRate)
|
||||
fmt.Printf(" Bandwidth: %.2f MB/second\n", results.PublishBandwidth)
|
||||
}
|
||||
|
||||
if results.QueriesExecuted > 0 {
|
||||
fmt.Println("\nQuery Performance:")
|
||||
fmt.Printf(" Queries Executed: %d\n", results.QueriesExecuted)
|
||||
fmt.Printf(" Events Returned: %d\n", results.EventsReturned)
|
||||
fmt.Printf(" Duration: %s\n", results.QueryDuration)
|
||||
fmt.Printf(" Rate: %.2f queries/second\n", results.QueryRate)
|
||||
avgEventsPerQuery := float64(results.EventsReturned) / float64(results.QueriesExecuted)
|
||||
fmt.Printf(" Avg Events/Query: %.2f\n", avgEventsPerQuery)
|
||||
}
|
||||
}
|
||||
|
||||
func printHarnessMetrics(relayType RelayType, metrics *HarnessMetrics) {
|
||||
fmt.Printf("\nHarness Metrics for %s:\n", relayType)
|
||||
if metrics.StartupTime > 0 {
|
||||
fmt.Printf(" Startup Time: %s\n", metrics.StartupTime)
|
||||
}
|
||||
if metrics.ShutdownTime > 0 {
|
||||
fmt.Printf(" Shutdown Time: %s\n", metrics.ShutdownTime)
|
||||
}
|
||||
if metrics.Errors > 0 {
|
||||
fmt.Printf(" Errors: %d\n", metrics.Errors)
|
||||
}
|
||||
}
|
||||
|
||||
func runQueryProfiler(c context.T, relayURL string, queryCount, concurrency int, profileSubs bool, subCount int, subDuration time.Duration) {
|
||||
profiler := NewQueryProfiler(relayURL)
|
||||
|
||||
if profileSubs {
|
||||
fmt.Printf("Profiling %d concurrent subscriptions for %v...\n", subCount, subDuration)
|
||||
if err := profiler.TestSubscriptionPerformance(c, subDuration, subCount); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Subscription profiling failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("Profiling %d queries with %d concurrent workers...\n", queryCount, concurrency)
|
||||
if err := profiler.ExecuteProfile(c, queryCount, concurrency); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Query profiling failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
profiler.PrintReport()
|
||||
}
|
||||
|
||||
func runInstaller(workDir, installDir string) {
|
||||
installer := NewRelayInstaller(workDir, installDir)
|
||||
|
||||
if err := installer.InstallAll(); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Installation failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func runSecp256k1Installer(workDir, installDir string) {
|
||||
installer := NewRelayInstaller(workDir, installDir)
|
||||
|
||||
if err := installer.InstallSecp256k1Only(); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "secp256k1 installation failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func runLoadSimulation(c context.T, relayURL, patternStr string, duration time.Duration, baseLoad, peakLoad, poolSize, eventSize int, runSuite, runConstraints bool) {
|
||||
if runSuite {
|
||||
suite := NewLoadTestSuite(relayURL, poolSize, eventSize)
|
||||
if err := suite.RunAllPatterns(c); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Load test suite failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var pattern LoadPattern
|
||||
switch patternStr {
|
||||
case "constant":
|
||||
pattern = Constant
|
||||
case "spike":
|
||||
pattern = Spike
|
||||
case "burst":
|
||||
pattern = Burst
|
||||
case "sine":
|
||||
pattern = Sine
|
||||
case "ramp":
|
||||
pattern = Ramp
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Invalid load pattern: %s\n", patternStr)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
simulator := NewLoadSimulator(relayURL, pattern, duration, baseLoad, peakLoad, poolSize, eventSize)
|
||||
|
||||
if err := simulator.Run(c); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Load simulation failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if runConstraints {
|
||||
fmt.Printf("\n")
|
||||
if err := simulator.SimulateResourceConstraints(c, 512, 80); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Resource constraint simulation failed: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
metrics := simulator.GetMetrics()
|
||||
fmt.Printf("\n=== Load Simulation Summary ===\n")
|
||||
fmt.Printf("Pattern: %v\n", metrics["pattern"])
|
||||
fmt.Printf("Events sent: %v\n", metrics["events_sent"])
|
||||
fmt.Printf("Events failed: %v\n", metrics["events_failed"])
|
||||
fmt.Printf("Connection errors: %v\n", metrics["connection_errors"])
|
||||
fmt.Printf("Events/second: %.2f\n", metrics["events_per_second"])
|
||||
fmt.Printf("Average latency: %vms\n", metrics["avg_latency_ms"])
|
||||
fmt.Printf("Peak latency: %vms\n", metrics["peak_latency_ms"])
|
||||
}
|
||||
|
||||
func runTimingInstrumentation(c context.T, relayURL string, eventCount, eventSize int, testSubs bool, duration time.Duration) {
|
||||
instrumentation := NewTimingInstrumentation(relayURL)
|
||||
|
||||
fmt.Printf("Connecting to relay at %s...\n", relayURL)
|
||||
if err := instrumentation.Connect(c, relayURL); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Failed to connect to relay: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer instrumentation.Close()
|
||||
|
||||
if testSubs {
|
||||
fmt.Printf("\n=== Subscription Timing Test ===\n")
|
||||
if err := instrumentation.TestSubscriptionTiming(c, duration); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Subscription timing test failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf("\n=== Full Event Lifecycle Instrumentation ===\n")
|
||||
if err := instrumentation.RunFullInstrumentation(c, eventCount, eventSize); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Timing instrumentation failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
metrics := instrumentation.GetMetrics()
|
||||
fmt.Printf("\n=== Instrumentation Metrics Summary ===\n")
|
||||
fmt.Printf("Total Events Tracked: %v\n", metrics["tracked_events"])
|
||||
fmt.Printf("Lifecycles Recorded: %v\n", metrics["lifecycles_count"])
|
||||
fmt.Printf("WebSocket Frames: %v\n", metrics["frames_tracked"])
|
||||
fmt.Printf("Write Amplifications: %v\n", metrics["write_amplifications"])
|
||||
|
||||
if bottlenecks, ok := metrics["bottlenecks"].(map[string]map[string]interface{}); ok {
|
||||
fmt.Printf("\n=== Pipeline Stage Analysis ===\n")
|
||||
for stage, data := range bottlenecks {
|
||||
fmt.Printf("%s: avg=%vms, p95=%vms, p99=%vms, throughput=%.2f ops/s\n",
|
||||
stage,
|
||||
data["avg_latency_ms"],
|
||||
data["p95_latency_ms"],
|
||||
data["p99_latency_ms"],
|
||||
data["throughput_ops_sec"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func runReportGeneration(title, format, filename string) {
|
||||
generator := NewReportGenerator()
|
||||
|
||||
resultsFile := "BENCHMARK_RESULTS.md"
|
||||
if _, err := os.Stat(resultsFile); os.IsNotExist(err) {
|
||||
fmt.Printf("No benchmark results found. Run benchmarks first to generate data.\n")
|
||||
fmt.Printf("Example: ./benchmark --multi-relay --relay-bin /path/to/relay\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Generating %s report: %s\n", format, filename)
|
||||
|
||||
sampleData := []RelayBenchmarkData{
|
||||
{
|
||||
RelayType: "khatru",
|
||||
EventsPublished: 10000,
|
||||
EventsPublishedMB: 15.2,
|
||||
PublishDuration: "12.5s",
|
||||
PublishRate: 800.0,
|
||||
PublishBandwidth: 1.22,
|
||||
QueriesExecuted: 100,
|
||||
EventsReturned: 8500,
|
||||
QueryDuration: "2.1s",
|
||||
QueryRate: 47.6,
|
||||
AvgEventsPerQuery: 85.0,
|
||||
MemoryUsageMB: 245.6,
|
||||
P50Latency: "15ms",
|
||||
P95Latency: "45ms",
|
||||
P99Latency: "120ms",
|
||||
StartupTime: "1.2s",
|
||||
Errors: 0,
|
||||
Timestamp: time.Now(),
|
||||
},
|
||||
}
|
||||
|
||||
generator.report.Title = title
|
||||
generator.report.RelayData = sampleData
|
||||
generator.analyzePerfomance()
|
||||
generator.detectAnomalies()
|
||||
generator.generateRecommendations()
|
||||
|
||||
ext := format
|
||||
if format == "markdown" {
|
||||
ext = "md"
|
||||
}
|
||||
|
||||
outputFile := fmt.Sprintf("%s.%s", filename, ext)
|
||||
if err := SaveReportToFile(outputFile, format, generator); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Failed to save report: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Report saved to: %s\n", outputFile)
|
||||
|
||||
if format == "markdown" {
|
||||
fmt.Printf("\nTIP: View with: cat %s\n", outputFile)
|
||||
}
|
||||
}
|
||||
440
cmd/benchmark/query_profiler.go
Normal file
440
cmd/benchmark/query_profiler.go
Normal file
@@ -0,0 +1,440 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"lukechampine.com/frand"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/filters"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/tags"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type QueryMetrics struct {
|
||||
Latencies []time.Duration
|
||||
TotalQueries int64
|
||||
FailedQueries int64
|
||||
EventsReturned int64
|
||||
MemoryBefore uint64
|
||||
MemoryAfter uint64
|
||||
MemoryPeak uint64
|
||||
P50 time.Duration
|
||||
P95 time.Duration
|
||||
P99 time.Duration
|
||||
Min time.Duration
|
||||
Max time.Duration
|
||||
Mean time.Duration
|
||||
}
|
||||
|
||||
type FilterType int
|
||||
|
||||
const (
|
||||
SimpleKindFilter FilterType = iota
|
||||
TimeRangeFilter
|
||||
AuthorFilter
|
||||
TagFilter
|
||||
ComplexFilter
|
||||
IDFilter
|
||||
PrefixFilter
|
||||
MultiKindFilter
|
||||
LargeTagSetFilter
|
||||
DeepTimeRangeFilter
|
||||
)
|
||||
|
||||
type QueryProfiler struct {
|
||||
relay string
|
||||
subscriptions map[string]*ws.Subscription
|
||||
metrics *QueryMetrics
|
||||
mu sync.RWMutex
|
||||
memTicker *time.Ticker
|
||||
stopMemMonitor chan struct{}
|
||||
}
|
||||
|
||||
func NewQueryProfiler(relayURL string) *QueryProfiler {
|
||||
return &QueryProfiler{
|
||||
relay: relayURL,
|
||||
subscriptions: make(map[string]*ws.Subscription),
|
||||
metrics: &QueryMetrics{
|
||||
Latencies: make(
|
||||
[]time.Duration, 0, 10000,
|
||||
),
|
||||
},
|
||||
stopMemMonitor: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) ExecuteProfile(
|
||||
c context.T, iterations int, concurrency int,
|
||||
) error {
|
||||
qp.startMemoryMonitor()
|
||||
defer qp.stopMemoryMonitor()
|
||||
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
qp.metrics.MemoryBefore = m.Alloc
|
||||
|
||||
filterTypes := []FilterType{
|
||||
SimpleKindFilter,
|
||||
TimeRangeFilter,
|
||||
AuthorFilter,
|
||||
TagFilter,
|
||||
ComplexFilter,
|
||||
IDFilter,
|
||||
PrefixFilter,
|
||||
MultiKindFilter,
|
||||
LargeTagSetFilter,
|
||||
DeepTimeRangeFilter,
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
latencyChan := make(chan time.Duration, iterations)
|
||||
errorChan := make(chan error, iterations)
|
||||
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
relay, err := ws.RelayConnect(c, qp.relay)
|
||||
if chk.E(err) {
|
||||
errorChan <- fmt.Errorf(
|
||||
"worker %d connection failed: %w", workerID, err,
|
||||
)
|
||||
return
|
||||
}
|
||||
defer relay.Close()
|
||||
|
||||
iterationsPerWorker := iterations / concurrency
|
||||
if workerID == 0 {
|
||||
iterationsPerWorker += iterations % concurrency
|
||||
}
|
||||
|
||||
for j := 0; j < iterationsPerWorker; j++ {
|
||||
filterType := filterTypes[frand.Intn(len(filterTypes))]
|
||||
f := qp.generateFilter(filterType)
|
||||
|
||||
startTime := time.Now()
|
||||
events, err := relay.QuerySync(
|
||||
c, f,
|
||||
) // , ws.WithLabel(fmt.Sprintf("profiler-%d-%d", workerID, j)))
|
||||
latency := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
errorChan <- err
|
||||
atomic.AddInt64(&qp.metrics.FailedQueries, 1)
|
||||
} else {
|
||||
latencyChan <- latency
|
||||
atomic.AddInt64(
|
||||
&qp.metrics.EventsReturned, int64(len(events)),
|
||||
)
|
||||
atomic.AddInt64(&qp.metrics.TotalQueries, 1)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(latencyChan)
|
||||
close(errorChan)
|
||||
|
||||
for latency := range latencyChan {
|
||||
qp.mu.Lock()
|
||||
qp.metrics.Latencies = append(qp.metrics.Latencies, latency)
|
||||
qp.mu.Unlock()
|
||||
}
|
||||
|
||||
errorCount := 0
|
||||
for range errorChan {
|
||||
errorCount++
|
||||
}
|
||||
|
||||
runtime.ReadMemStats(&m)
|
||||
qp.metrics.MemoryAfter = m.Alloc
|
||||
|
||||
qp.calculatePercentiles()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) generateFilter(filterType FilterType) *filter.F {
|
||||
switch filterType {
|
||||
case SimpleKindFilter:
|
||||
limit := uint(100)
|
||||
return &filter.F{
|
||||
Kinds: kinds.New(kind.TextNote),
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case TimeRangeFilter:
|
||||
now := timestamp.Now()
|
||||
since := timestamp.New(now.I64() - 3600)
|
||||
limit := uint(50)
|
||||
return &filter.F{
|
||||
Since: since,
|
||||
Until: now,
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case AuthorFilter:
|
||||
limit := uint(100)
|
||||
authors := tag.New(frand.Bytes(32))
|
||||
for i := 0; i < 2; i++ {
|
||||
authors.Append(frand.Bytes(32))
|
||||
}
|
||||
return &filter.F{
|
||||
Authors: authors,
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case TagFilter:
|
||||
limit := uint(50)
|
||||
t := tags.New()
|
||||
t.AppendUnique(tag.New([]byte("p"), frand.Bytes(32)))
|
||||
t.AppendUnique(tag.New([]byte("e"), frand.Bytes(32)))
|
||||
return &filter.F{
|
||||
Tags: t,
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case ComplexFilter:
|
||||
now := timestamp.Now()
|
||||
since := timestamp.New(now.I64() - 7200)
|
||||
limit := uint(25)
|
||||
authors := tag.New(frand.Bytes(32))
|
||||
return &filter.F{
|
||||
Kinds: kinds.New(kind.TextNote, kind.Repost, kind.Reaction),
|
||||
Authors: authors,
|
||||
Since: since,
|
||||
Until: now,
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case IDFilter:
|
||||
limit := uint(10)
|
||||
ids := tag.New(frand.Bytes(32))
|
||||
for i := 0; i < 4; i++ {
|
||||
ids.Append(frand.Bytes(32))
|
||||
}
|
||||
return &filter.F{
|
||||
Ids: ids,
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case PrefixFilter:
|
||||
limit := uint(100)
|
||||
prefix := frand.Bytes(4)
|
||||
return &filter.F{
|
||||
Ids: tag.New(prefix),
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case MultiKindFilter:
|
||||
limit := uint(75)
|
||||
return &filter.F{
|
||||
Kinds: kinds.New(
|
||||
kind.TextNote,
|
||||
kind.SetMetadata,
|
||||
kind.FollowList,
|
||||
kind.Reaction,
|
||||
kind.Repost,
|
||||
),
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case LargeTagSetFilter:
|
||||
limit := uint(20)
|
||||
t := tags.New()
|
||||
for i := 0; i < 10; i++ {
|
||||
t.AppendUnique(tag.New([]byte("p"), frand.Bytes(32)))
|
||||
}
|
||||
return &filter.F{
|
||||
Tags: t,
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
case DeepTimeRangeFilter:
|
||||
now := timestamp.Now()
|
||||
since := timestamp.New(now.I64() - 86400*30)
|
||||
until := timestamp.New(now.I64() - 86400*20)
|
||||
limit := uint(100)
|
||||
return &filter.F{
|
||||
Since: since,
|
||||
Until: until,
|
||||
Limit: &limit,
|
||||
}
|
||||
|
||||
default:
|
||||
limit := uint(100)
|
||||
return &filter.F{
|
||||
Kinds: kinds.New(kind.TextNote),
|
||||
Limit: &limit,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) TestSubscriptionPerformance(
|
||||
c context.T, duration time.Duration, subscriptionCount int,
|
||||
) error {
|
||||
qp.startMemoryMonitor()
|
||||
defer qp.stopMemoryMonitor()
|
||||
|
||||
relay, err := ws.RelayConnect(c, qp.relay)
|
||||
if chk.E(err) {
|
||||
return fmt.Errorf("connection failed: %w", err)
|
||||
}
|
||||
defer relay.Close()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
stopChan := make(chan struct{})
|
||||
|
||||
for i := 0; i < subscriptionCount; i++ {
|
||||
wg.Add(1)
|
||||
go func(subID int) {
|
||||
defer wg.Done()
|
||||
|
||||
f := qp.generateFilter(FilterType(subID % 10))
|
||||
label := fmt.Sprintf("sub-perf-%d", subID)
|
||||
|
||||
eventChan := make(chan *event.E, 100)
|
||||
sub, err := relay.Subscribe(
|
||||
c, &filters.T{F: []*filter.F{f}}, ws.WithLabel(label),
|
||||
)
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case ev := <-sub.Events:
|
||||
eventChan <- ev
|
||||
atomic.AddInt64(&qp.metrics.EventsReturned, 1)
|
||||
case <-stopChan:
|
||||
sub.Unsub()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
qp.mu.Lock()
|
||||
qp.subscriptions[label] = sub
|
||||
qp.mu.Unlock()
|
||||
}(i)
|
||||
}
|
||||
|
||||
time.Sleep(duration)
|
||||
close(stopChan)
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) startMemoryMonitor() {
|
||||
qp.memTicker = time.NewTicker(100 * time.Millisecond)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-qp.memTicker.C:
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
qp.mu.Lock()
|
||||
if m.Alloc > qp.metrics.MemoryPeak {
|
||||
qp.metrics.MemoryPeak = m.Alloc
|
||||
}
|
||||
qp.mu.Unlock()
|
||||
case <-qp.stopMemMonitor:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) stopMemoryMonitor() {
|
||||
if qp.memTicker != nil {
|
||||
qp.memTicker.Stop()
|
||||
}
|
||||
close(qp.stopMemMonitor)
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) calculatePercentiles() {
|
||||
qp.mu.Lock()
|
||||
defer qp.mu.Unlock()
|
||||
|
||||
if len(qp.metrics.Latencies) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Slice(
|
||||
qp.metrics.Latencies, func(i, j int) bool {
|
||||
return qp.metrics.Latencies[i] < qp.metrics.Latencies[j]
|
||||
},
|
||||
)
|
||||
|
||||
qp.metrics.Min = qp.metrics.Latencies[0]
|
||||
qp.metrics.Max = qp.metrics.Latencies[len(qp.metrics.Latencies)-1]
|
||||
|
||||
p50Index := len(qp.metrics.Latencies) * 50 / 100
|
||||
p95Index := len(qp.metrics.Latencies) * 95 / 100
|
||||
p99Index := len(qp.metrics.Latencies) * 99 / 100
|
||||
|
||||
if p50Index < len(qp.metrics.Latencies) {
|
||||
qp.metrics.P50 = qp.metrics.Latencies[p50Index]
|
||||
}
|
||||
if p95Index < len(qp.metrics.Latencies) {
|
||||
qp.metrics.P95 = qp.metrics.Latencies[p95Index]
|
||||
}
|
||||
if p99Index < len(qp.metrics.Latencies) {
|
||||
qp.metrics.P99 = qp.metrics.Latencies[p99Index]
|
||||
}
|
||||
|
||||
var total time.Duration
|
||||
for _, latency := range qp.metrics.Latencies {
|
||||
total += latency
|
||||
}
|
||||
qp.metrics.Mean = total / time.Duration(len(qp.metrics.Latencies))
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) GetMetrics() *QueryMetrics {
|
||||
qp.mu.RLock()
|
||||
defer qp.mu.RUnlock()
|
||||
return qp.metrics
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) PrintReport() {
|
||||
metrics := qp.GetMetrics()
|
||||
|
||||
fmt.Println("\n=== Query Performance Profile ===")
|
||||
fmt.Printf("Total Queries: %d\n", metrics.TotalQueries)
|
||||
fmt.Printf("Failed Queries: %d\n", metrics.FailedQueries)
|
||||
fmt.Printf("Events Returned: %d\n", metrics.EventsReturned)
|
||||
|
||||
if metrics.TotalQueries > 0 {
|
||||
fmt.Println("\nLatency Percentiles:")
|
||||
fmt.Printf(" P50: %v\n", metrics.P50)
|
||||
fmt.Printf(" P95: %v\n", metrics.P95)
|
||||
fmt.Printf(" P99: %v\n", metrics.P99)
|
||||
fmt.Printf(" Min: %v\n", metrics.Min)
|
||||
fmt.Printf(" Max: %v\n", metrics.Max)
|
||||
fmt.Printf(" Mean: %v\n", metrics.Mean)
|
||||
}
|
||||
|
||||
fmt.Println("\nMemory Usage:")
|
||||
fmt.Printf(" Before: %.2f MB\n", float64(metrics.MemoryBefore)/1024/1024)
|
||||
fmt.Printf(" After: %.2f MB\n", float64(metrics.MemoryAfter)/1024/1024)
|
||||
fmt.Printf(" Peak: %.2f MB\n", float64(metrics.MemoryPeak)/1024/1024)
|
||||
fmt.Printf(
|
||||
" Delta: %.2f MB\n",
|
||||
float64(int64(metrics.MemoryAfter)-int64(metrics.MemoryBefore))/1024/1024,
|
||||
)
|
||||
}
|
||||
285
cmd/benchmark/relay_harness.go
Normal file
285
cmd/benchmark/relay_harness.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RelayType int
|
||||
|
||||
const (
|
||||
Khatru RelayType = iota
|
||||
Relayer
|
||||
Strfry
|
||||
RustNostr
|
||||
)
|
||||
|
||||
func (r RelayType) String() string {
|
||||
switch r {
|
||||
case Khatru:
|
||||
return "khatru"
|
||||
case Relayer:
|
||||
return "relayer"
|
||||
case Strfry:
|
||||
return "strfry"
|
||||
case RustNostr:
|
||||
return "rust-nostr"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type RelayConfig struct {
|
||||
Type RelayType
|
||||
Binary string
|
||||
Args []string
|
||||
URL string
|
||||
DataDir string
|
||||
}
|
||||
|
||||
type RelayInstance struct {
|
||||
Config RelayConfig
|
||||
Process *exec.Cmd
|
||||
Started time.Time
|
||||
Errors []error
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type HarnessMetrics struct {
|
||||
StartupTime time.Duration
|
||||
ShutdownTime time.Duration
|
||||
Errors int
|
||||
}
|
||||
|
||||
type MultiRelayHarness struct {
|
||||
relays map[RelayType]*RelayInstance
|
||||
metrics map[RelayType]*HarnessMetrics
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMultiRelayHarness() *MultiRelayHarness {
|
||||
return &MultiRelayHarness{
|
||||
relays: make(map[RelayType]*RelayInstance),
|
||||
metrics: make(map[RelayType]*HarnessMetrics),
|
||||
}
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) AddRelay(config RelayConfig) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
instance := &RelayInstance{
|
||||
Config: config,
|
||||
Errors: make([]error, 0),
|
||||
}
|
||||
|
||||
h.relays[config.Type] = instance
|
||||
h.metrics[config.Type] = &HarnessMetrics{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) StartRelay(relayType RelayType) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
instance, exists := h.relays[relayType]
|
||||
if !exists {
|
||||
return fmt.Errorf("relay type %s not configured", relayType)
|
||||
}
|
||||
|
||||
if instance.Process != nil {
|
||||
return fmt.Errorf("relay %s already running", relayType)
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
cmd := exec.Command(instance.Config.Binary, instance.Config.Args...)
|
||||
|
||||
if err := cmd.Start(); chk.E(err) {
|
||||
return fmt.Errorf("failed to start %s: %w", relayType, err)
|
||||
}
|
||||
|
||||
instance.Process = cmd
|
||||
instance.Started = startTime
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
metrics := h.metrics[relayType]
|
||||
metrics.StartupTime = time.Since(startTime)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) StopRelay(relayType RelayType) error {
|
||||
h.mu.Lock()
|
||||
defer h.mu.Unlock()
|
||||
|
||||
instance, exists := h.relays[relayType]
|
||||
if !exists {
|
||||
return fmt.Errorf("relay type %s not configured", relayType)
|
||||
}
|
||||
|
||||
if instance.Process == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
shutdownStart := time.Now()
|
||||
|
||||
if err := instance.Process.Process.Kill(); chk.E(err) {
|
||||
return fmt.Errorf("failed to stop %s: %w", relayType, err)
|
||||
}
|
||||
|
||||
instance.Process.Wait()
|
||||
instance.Process = nil
|
||||
|
||||
metrics := h.metrics[relayType]
|
||||
metrics.ShutdownTime = time.Since(shutdownStart)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) ConnectToRelay(c context.T, relayType RelayType) error {
|
||||
h.mu.RLock()
|
||||
instance, exists := h.relays[relayType]
|
||||
h.mu.RUnlock()
|
||||
|
||||
if !exists {
|
||||
return fmt.Errorf("relay type %s not configured", relayType)
|
||||
}
|
||||
|
||||
if instance.Process == nil {
|
||||
return fmt.Errorf("relay %s not running", relayType)
|
||||
}
|
||||
|
||||
_, err := ws.RelayConnect(c, instance.Config.URL)
|
||||
if chk.E(err) {
|
||||
h.mu.Lock()
|
||||
h.metrics[relayType].Errors++
|
||||
instance.Errors = append(instance.Errors, err)
|
||||
h.mu.Unlock()
|
||||
return fmt.Errorf("failed to connect to %s: %w", relayType, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) StartAll() error {
|
||||
h.mu.RLock()
|
||||
relayTypes := make([]RelayType, 0, len(h.relays))
|
||||
for relayType := range h.relays {
|
||||
relayTypes = append(relayTypes, relayType)
|
||||
}
|
||||
h.mu.RUnlock()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, len(relayTypes))
|
||||
|
||||
for _, relayType := range relayTypes {
|
||||
wg.Add(1)
|
||||
go func(rt RelayType) {
|
||||
defer wg.Done()
|
||||
if err := h.StartRelay(rt); err != nil {
|
||||
errChan <- fmt.Errorf("failed to start %s: %w", rt, err)
|
||||
}
|
||||
}(relayType)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
|
||||
var errors []error
|
||||
for err := range errChan {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
for _, err := range errors {
|
||||
log.E.Ln(err)
|
||||
}
|
||||
return fmt.Errorf("failed to start %d relays", len(errors))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) StopAll() error {
|
||||
h.mu.RLock()
|
||||
relayTypes := make([]RelayType, 0, len(h.relays))
|
||||
for relayType := range h.relays {
|
||||
relayTypes = append(relayTypes, relayType)
|
||||
}
|
||||
h.mu.RUnlock()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
errChan := make(chan error, len(relayTypes))
|
||||
|
||||
for _, relayType := range relayTypes {
|
||||
wg.Add(1)
|
||||
go func(rt RelayType) {
|
||||
defer wg.Done()
|
||||
if err := h.StopRelay(rt); err != nil {
|
||||
errChan <- fmt.Errorf("failed to stop %s: %w", rt.String(), err)
|
||||
}
|
||||
}(relayType)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(errChan)
|
||||
|
||||
var errors []error
|
||||
for err := range errChan {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
for _, err := range errors {
|
||||
log.E.Ln(err)
|
||||
}
|
||||
return fmt.Errorf("failed to stop %d relays", len(errors))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) GetMetrics(relayType RelayType) *HarnessMetrics {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
return h.metrics[relayType]
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) GetAllMetrics() map[RelayType]*HarnessMetrics {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
result := make(map[RelayType]*HarnessMetrics)
|
||||
for relayType, metrics := range h.metrics {
|
||||
result[relayType] = metrics
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) IsRunning(relayType RelayType) bool {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
instance, exists := h.relays[relayType]
|
||||
return exists && instance.Process != nil
|
||||
}
|
||||
|
||||
func (h *MultiRelayHarness) GetErrors(relayType RelayType) []error {
|
||||
h.mu.RLock()
|
||||
defer h.mu.RUnlock()
|
||||
|
||||
instance, exists := h.relays[relayType]
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
return append([]error(nil), instance.Errors...)
|
||||
}
|
||||
390
cmd/benchmark/report_generator.go
Normal file
390
cmd/benchmark/report_generator.go
Normal file
@@ -0,0 +1,390 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RelayBenchmarkData struct {
|
||||
RelayType string `json:"relay_type"`
|
||||
EventsPublished int64 `json:"events_published"`
|
||||
EventsPublishedMB float64 `json:"events_published_mb"`
|
||||
PublishDuration string `json:"publish_duration"`
|
||||
PublishRate float64 `json:"publish_rate"`
|
||||
PublishBandwidth float64 `json:"publish_bandwidth"`
|
||||
QueriesExecuted int64 `json:"queries_executed"`
|
||||
EventsReturned int64 `json:"events_returned"`
|
||||
QueryDuration string `json:"query_duration"`
|
||||
QueryRate float64 `json:"query_rate"`
|
||||
AvgEventsPerQuery float64 `json:"avg_events_per_query"`
|
||||
StartupTime string `json:"startup_time,omitempty"`
|
||||
ShutdownTime string `json:"shutdown_time,omitempty"`
|
||||
Errors int64 `json:"errors,omitempty"`
|
||||
MemoryUsageMB float64 `json:"memory_usage_mb,omitempty"`
|
||||
P50Latency string `json:"p50_latency,omitempty"`
|
||||
P95Latency string `json:"p95_latency,omitempty"`
|
||||
P99Latency string `json:"p99_latency,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
type ComparisonReport struct {
|
||||
Title string `json:"title"`
|
||||
GeneratedAt time.Time `json:"generated_at"`
|
||||
RelayData []RelayBenchmarkData `json:"relay_data"`
|
||||
WinnerPublish string `json:"winner_publish"`
|
||||
WinnerQuery string `json:"winner_query"`
|
||||
Anomalies []string `json:"anomalies"`
|
||||
Recommendations []string `json:"recommendations"`
|
||||
}
|
||||
|
||||
type ReportGenerator struct {
|
||||
data []RelayBenchmarkData
|
||||
report ComparisonReport
|
||||
}
|
||||
|
||||
func NewReportGenerator() *ReportGenerator {
|
||||
return &ReportGenerator{
|
||||
data: make([]RelayBenchmarkData, 0),
|
||||
report: ComparisonReport{
|
||||
GeneratedAt: time.Now(),
|
||||
Anomalies: make([]string, 0),
|
||||
Recommendations: make([]string, 0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) AddRelayData(relayType string, results *BenchmarkResults, metrics *HarnessMetrics, profilerMetrics *QueryMetrics) {
|
||||
data := RelayBenchmarkData{
|
||||
RelayType: relayType,
|
||||
EventsPublished: results.EventsPublished,
|
||||
EventsPublishedMB: float64(results.EventsPublishedBytes) / 1024 / 1024,
|
||||
PublishDuration: results.PublishDuration.String(),
|
||||
PublishRate: results.PublishRate,
|
||||
PublishBandwidth: results.PublishBandwidth,
|
||||
QueriesExecuted: results.QueriesExecuted,
|
||||
EventsReturned: results.EventsReturned,
|
||||
QueryDuration: results.QueryDuration.String(),
|
||||
QueryRate: results.QueryRate,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
if results.QueriesExecuted > 0 {
|
||||
data.AvgEventsPerQuery = float64(results.EventsReturned) / float64(results.QueriesExecuted)
|
||||
}
|
||||
|
||||
if metrics != nil {
|
||||
data.StartupTime = metrics.StartupTime.String()
|
||||
data.ShutdownTime = metrics.ShutdownTime.String()
|
||||
data.Errors = int64(metrics.Errors)
|
||||
}
|
||||
|
||||
if profilerMetrics != nil {
|
||||
data.MemoryUsageMB = float64(profilerMetrics.MemoryPeak) / 1024 / 1024
|
||||
data.P50Latency = profilerMetrics.P50.String()
|
||||
data.P95Latency = profilerMetrics.P95.String()
|
||||
data.P99Latency = profilerMetrics.P99.String()
|
||||
}
|
||||
|
||||
rg.data = append(rg.data, data)
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) GenerateReport(title string) {
|
||||
rg.report.Title = title
|
||||
rg.report.RelayData = rg.data
|
||||
rg.analyzePerfomance()
|
||||
rg.detectAnomalies()
|
||||
rg.generateRecommendations()
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) analyzePerfomance() {
|
||||
if len(rg.data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var bestPublishRate float64
|
||||
var bestQueryRate float64
|
||||
bestPublishRelay := ""
|
||||
bestQueryRelay := ""
|
||||
|
||||
for _, data := range rg.data {
|
||||
if data.PublishRate > bestPublishRate {
|
||||
bestPublishRate = data.PublishRate
|
||||
bestPublishRelay = data.RelayType
|
||||
}
|
||||
if data.QueryRate > bestQueryRate {
|
||||
bestQueryRate = data.QueryRate
|
||||
bestQueryRelay = data.RelayType
|
||||
}
|
||||
}
|
||||
|
||||
rg.report.WinnerPublish = bestPublishRelay
|
||||
rg.report.WinnerQuery = bestQueryRelay
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) detectAnomalies() {
|
||||
if len(rg.data) < 2 {
|
||||
return
|
||||
}
|
||||
|
||||
publishRates := make([]float64, len(rg.data))
|
||||
queryRates := make([]float64, len(rg.data))
|
||||
|
||||
for i, data := range rg.data {
|
||||
publishRates[i] = data.PublishRate
|
||||
queryRates[i] = data.QueryRate
|
||||
}
|
||||
|
||||
publishMean := mean(publishRates)
|
||||
publishStdDev := stdDev(publishRates, publishMean)
|
||||
queryMean := mean(queryRates)
|
||||
queryStdDev := stdDev(queryRates, queryMean)
|
||||
|
||||
for _, data := range rg.data {
|
||||
if math.Abs(data.PublishRate-publishMean) > 2*publishStdDev {
|
||||
anomaly := fmt.Sprintf("%s publish rate (%.2f) deviates significantly from average (%.2f)",
|
||||
data.RelayType, data.PublishRate, publishMean)
|
||||
rg.report.Anomalies = append(rg.report.Anomalies, anomaly)
|
||||
}
|
||||
|
||||
if math.Abs(data.QueryRate-queryMean) > 2*queryStdDev {
|
||||
anomaly := fmt.Sprintf("%s query rate (%.2f) deviates significantly from average (%.2f)",
|
||||
data.RelayType, data.QueryRate, queryMean)
|
||||
rg.report.Anomalies = append(rg.report.Anomalies, anomaly)
|
||||
}
|
||||
|
||||
if data.Errors > 0 {
|
||||
anomaly := fmt.Sprintf("%s had %d errors during benchmark", data.RelayType, data.Errors)
|
||||
rg.report.Anomalies = append(rg.report.Anomalies, anomaly)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) generateRecommendations() {
|
||||
if len(rg.data) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Slice(rg.data, func(i, j int) bool {
|
||||
return rg.data[i].PublishRate > rg.data[j].PublishRate
|
||||
})
|
||||
|
||||
if len(rg.data) > 1 {
|
||||
best := rg.data[0]
|
||||
worst := rg.data[len(rg.data)-1]
|
||||
|
||||
improvement := (best.PublishRate - worst.PublishRate) / worst.PublishRate * 100
|
||||
if improvement > 20 {
|
||||
rec := fmt.Sprintf("Consider using %s for high-throughput scenarios (%.1f%% faster than %s)",
|
||||
best.RelayType, improvement, worst.RelayType)
|
||||
rg.report.Recommendations = append(rg.report.Recommendations, rec)
|
||||
}
|
||||
}
|
||||
|
||||
for _, data := range rg.data {
|
||||
if data.MemoryUsageMB > 500 {
|
||||
rec := fmt.Sprintf("%s shows high memory usage (%.1f MB) - monitor for memory leaks",
|
||||
data.RelayType, data.MemoryUsageMB)
|
||||
rg.report.Recommendations = append(rg.report.Recommendations, rec)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) OutputMarkdown(writer io.Writer) error {
|
||||
fmt.Fprintf(writer, "# %s\n\n", rg.report.Title)
|
||||
fmt.Fprintf(writer, "Generated: %s\n\n", rg.report.GeneratedAt.Format(time.RFC3339))
|
||||
|
||||
fmt.Fprintf(writer, "## Performance Summary\n\n")
|
||||
fmt.Fprintf(writer, "| Relay | Publish Rate | Publish BW | Query Rate | Avg Events/Query | Memory (MB) |\n")
|
||||
fmt.Fprintf(writer, "|-------|--------------|------------|------------|------------------|-------------|\n")
|
||||
|
||||
for _, data := range rg.data {
|
||||
fmt.Fprintf(writer, "| %s | %.2f/s | %.2f MB/s | %.2f/s | %.2f | %.1f |\n",
|
||||
data.RelayType, data.PublishRate, data.PublishBandwidth,
|
||||
data.QueryRate, data.AvgEventsPerQuery, data.MemoryUsageMB)
|
||||
}
|
||||
|
||||
if rg.report.WinnerPublish != "" || rg.report.WinnerQuery != "" {
|
||||
fmt.Fprintf(writer, "\n## Winners\n\n")
|
||||
if rg.report.WinnerPublish != "" {
|
||||
fmt.Fprintf(writer, "- **Best Publisher**: %s\n", rg.report.WinnerPublish)
|
||||
}
|
||||
if rg.report.WinnerQuery != "" {
|
||||
fmt.Fprintf(writer, "- **Best Query Engine**: %s\n", rg.report.WinnerQuery)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rg.report.Anomalies) > 0 {
|
||||
fmt.Fprintf(writer, "\n## Anomalies\n\n")
|
||||
for _, anomaly := range rg.report.Anomalies {
|
||||
fmt.Fprintf(writer, "- %s\n", anomaly)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rg.report.Recommendations) > 0 {
|
||||
fmt.Fprintf(writer, "\n## Recommendations\n\n")
|
||||
for _, rec := range rg.report.Recommendations {
|
||||
fmt.Fprintf(writer, "- %s\n", rec)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(writer, "\n## Detailed Results\n\n")
|
||||
for _, data := range rg.data {
|
||||
fmt.Fprintf(writer, "### %s\n\n", data.RelayType)
|
||||
fmt.Fprintf(writer, "- Events Published: %d (%.2f MB)\n", data.EventsPublished, data.EventsPublishedMB)
|
||||
fmt.Fprintf(writer, "- Publish Duration: %s\n", data.PublishDuration)
|
||||
fmt.Fprintf(writer, "- Queries Executed: %d\n", data.QueriesExecuted)
|
||||
fmt.Fprintf(writer, "- Query Duration: %s\n", data.QueryDuration)
|
||||
if data.P50Latency != "" {
|
||||
fmt.Fprintf(writer, "- Latency P50/P95/P99: %s/%s/%s\n", data.P50Latency, data.P95Latency, data.P99Latency)
|
||||
}
|
||||
if data.StartupTime != "" {
|
||||
fmt.Fprintf(writer, "- Startup Time: %s\n", data.StartupTime)
|
||||
}
|
||||
fmt.Fprintf(writer, "\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) OutputJSON(writer io.Writer) error {
|
||||
encoder := json.NewEncoder(writer)
|
||||
encoder.SetIndent("", " ")
|
||||
return encoder.Encode(rg.report)
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) OutputCSV(writer io.Writer) error {
|
||||
w := csv.NewWriter(writer)
|
||||
defer w.Flush()
|
||||
|
||||
header := []string{
|
||||
"relay_type", "events_published", "events_published_mb", "publish_duration",
|
||||
"publish_rate", "publish_bandwidth", "queries_executed", "events_returned",
|
||||
"query_duration", "query_rate", "avg_events_per_query", "memory_usage_mb",
|
||||
"p50_latency", "p95_latency", "p99_latency", "startup_time", "errors",
|
||||
}
|
||||
|
||||
if err := w.Write(header); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, data := range rg.data {
|
||||
row := []string{
|
||||
data.RelayType,
|
||||
fmt.Sprintf("%d", data.EventsPublished),
|
||||
fmt.Sprintf("%.2f", data.EventsPublishedMB),
|
||||
data.PublishDuration,
|
||||
fmt.Sprintf("%.2f", data.PublishRate),
|
||||
fmt.Sprintf("%.2f", data.PublishBandwidth),
|
||||
fmt.Sprintf("%d", data.QueriesExecuted),
|
||||
fmt.Sprintf("%d", data.EventsReturned),
|
||||
data.QueryDuration,
|
||||
fmt.Sprintf("%.2f", data.QueryRate),
|
||||
fmt.Sprintf("%.2f", data.AvgEventsPerQuery),
|
||||
fmt.Sprintf("%.1f", data.MemoryUsageMB),
|
||||
data.P50Latency,
|
||||
data.P95Latency,
|
||||
data.P99Latency,
|
||||
data.StartupTime,
|
||||
fmt.Sprintf("%d", data.Errors),
|
||||
}
|
||||
|
||||
if err := w.Write(row); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rg *ReportGenerator) GenerateThroughputCurve() []ThroughputPoint {
|
||||
points := make([]ThroughputPoint, 0)
|
||||
|
||||
for _, data := range rg.data {
|
||||
point := ThroughputPoint{
|
||||
RelayType: data.RelayType,
|
||||
Throughput: data.PublishRate,
|
||||
Latency: parseLatency(data.P95Latency),
|
||||
}
|
||||
points = append(points, point)
|
||||
}
|
||||
|
||||
sort.Slice(points, func(i, j int) bool {
|
||||
return points[i].Throughput < points[j].Throughput
|
||||
})
|
||||
|
||||
return points
|
||||
}
|
||||
|
||||
type ThroughputPoint struct {
|
||||
RelayType string `json:"relay_type"`
|
||||
Throughput float64 `json:"throughput"`
|
||||
Latency float64 `json:"latency_ms"`
|
||||
}
|
||||
|
||||
func parseLatency(latencyStr string) float64 {
|
||||
if latencyStr == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
latencyStr = strings.TrimSuffix(latencyStr, "ms")
|
||||
latencyStr = strings.TrimSuffix(latencyStr, "µs")
|
||||
latencyStr = strings.TrimSuffix(latencyStr, "ns")
|
||||
|
||||
if dur, err := time.ParseDuration(latencyStr); err == nil {
|
||||
return float64(dur.Nanoseconds()) / 1e6
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func mean(values []float64) float64 {
|
||||
if len(values) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
sum := 0.0
|
||||
for _, v := range values {
|
||||
sum += v
|
||||
}
|
||||
return sum / float64(len(values))
|
||||
}
|
||||
|
||||
func stdDev(values []float64, mean float64) float64 {
|
||||
if len(values) <= 1 {
|
||||
return 0
|
||||
}
|
||||
|
||||
variance := 0.0
|
||||
for _, v := range values {
|
||||
variance += math.Pow(v-mean, 2)
|
||||
}
|
||||
variance /= float64(len(values) - 1)
|
||||
|
||||
return math.Sqrt(variance)
|
||||
}
|
||||
|
||||
func SaveReportToFile(filename, format string, generator *ReportGenerator) error {
|
||||
file, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
switch format {
|
||||
case "json":
|
||||
return generator.OutputJSON(file)
|
||||
case "csv":
|
||||
return generator.OutputCSV(file)
|
||||
case "markdown", "md":
|
||||
return generator.OutputMarkdown(file)
|
||||
default:
|
||||
return fmt.Errorf("unsupported format: %s", format)
|
||||
}
|
||||
}
|
||||
192
cmd/benchmark/run_all_benchmarks.sh
Executable file
192
cmd/benchmark/run_all_benchmarks.sh
Executable file
@@ -0,0 +1,192 @@
|
||||
#!/bin/bash
|
||||
|
||||
BENCHMARK_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
RELAY_DIR="/tmp/relay-benchmark"
|
||||
RESULTS_FILE="$BENCHMARK_DIR/BENCHMARK_RESULTS.md"
|
||||
|
||||
cd "$BENCHMARK_DIR"
|
||||
|
||||
echo "=== Starting Relay Benchmark Suite ===" | tee "$RESULTS_FILE"
|
||||
echo "Date: $(date)" | tee -a "$RESULTS_FILE"
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
|
||||
# Function to start a relay and wait for it to be ready
|
||||
start_relay() {
|
||||
local name=$1
|
||||
local cmd=$2
|
||||
local port=$3
|
||||
|
||||
echo "Starting $name on port $port..."
|
||||
$cmd &
|
||||
local pid=$!
|
||||
|
||||
# Wait for relay to be ready
|
||||
sleep 3
|
||||
|
||||
# Check if process is still running
|
||||
if ! kill -0 $pid 2>/dev/null; then
|
||||
echo "Failed to start $name"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$name started with PID $pid"
|
||||
return $pid
|
||||
}
|
||||
|
||||
# Function to run benchmark and capture results
|
||||
run_benchmark() {
|
||||
local relay_name=$1
|
||||
local relay_url=$2
|
||||
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
echo "## Benchmarking $relay_name" | tee -a "$RESULTS_FILE"
|
||||
echo "URL: $relay_url" | tee -a "$RESULTS_FILE"
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
|
||||
# Run standard benchmark
|
||||
echo "### Standard Benchmark" | tee -a "$RESULTS_FILE"
|
||||
./benchmark --relay "$relay_url" --events 5000 --queries 100 --concurrency 10 --size 1024 2>&1 | tee -a "$RESULTS_FILE"
|
||||
|
||||
# Run query profiling
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
echo "### Query Profiling" | tee -a "$RESULTS_FILE"
|
||||
./benchmark --relay "$relay_url" --profile --queries 500 --concurrency 5 2>&1 | tee -a "$RESULTS_FILE"
|
||||
|
||||
# Run timing instrumentation
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
echo "### Timing Instrumentation" | tee -a "$RESULTS_FILE"
|
||||
./benchmark --relay "$relay_url" --timing --timing-events 100 2>&1 | tee -a "$RESULTS_FILE"
|
||||
|
||||
# Run load simulation
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
echo "### Load Simulation (Spike Pattern)" | tee -a "$RESULTS_FILE"
|
||||
./benchmark --relay "$relay_url" --load --load-pattern spike --load-duration 30s --load-base 50 --load-peak 200 2>&1 | tee -a "$RESULTS_FILE"
|
||||
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
echo "---" | tee -a "$RESULTS_FILE"
|
||||
}
|
||||
|
||||
# Test 1: Khatru
|
||||
echo "=== Testing Khatru ===" | tee -a "$RESULTS_FILE"
|
||||
cd "$RELAY_DIR"
|
||||
if [ -f "khatru/examples/basic-sqlite3/khatru-relay" ]; then
|
||||
./khatru/examples/basic-sqlite3/khatru-relay &
|
||||
KHATRU_PID=$!
|
||||
sleep 3
|
||||
|
||||
if kill -0 $KHATRU_PID 2>/dev/null; then
|
||||
run_benchmark "Khatru" "ws://localhost:7447"
|
||||
kill $KHATRU_PID 2>/dev/null
|
||||
wait $KHATRU_PID 2>/dev/null
|
||||
else
|
||||
echo "Khatru failed to start" | tee -a "$RESULTS_FILE"
|
||||
fi
|
||||
else
|
||||
echo "Khatru binary not found" | tee -a "$RESULTS_FILE"
|
||||
fi
|
||||
|
||||
# Test 2: Strfry
|
||||
echo "=== Testing Strfry ===" | tee -a "$RESULTS_FILE"
|
||||
if [ -f "strfry/strfry" ]; then
|
||||
# Create minimal strfry config
|
||||
cat > /tmp/strfry.conf <<EOF
|
||||
relay {
|
||||
bind = "127.0.0.1"
|
||||
port = 7447
|
||||
nofiles = 0
|
||||
realIpHeader = ""
|
||||
info {
|
||||
name = "strfry test"
|
||||
description = "benchmark test relay"
|
||||
}
|
||||
}
|
||||
events {
|
||||
maxEventSize = 65536
|
||||
rejectEventsNewerThanSeconds = 900
|
||||
rejectEventsOlderThanSeconds = 94608000
|
||||
rejectEphemeralEventsOlderThanSeconds = 60
|
||||
rejectFutureEventsSeconds = 900
|
||||
}
|
||||
db {
|
||||
path = "/tmp/strfry-db"
|
||||
}
|
||||
EOF
|
||||
|
||||
rm -rf /tmp/strfry-db
|
||||
./strfry/strfry --config /tmp/strfry.conf relay &
|
||||
STRFRY_PID=$!
|
||||
sleep 5
|
||||
|
||||
if kill -0 $STRFRY_PID 2>/dev/null; then
|
||||
run_benchmark "Strfry" "ws://localhost:7447"
|
||||
kill $STRFRY_PID 2>/dev/null
|
||||
wait $STRFRY_PID 2>/dev/null
|
||||
else
|
||||
echo "Strfry failed to start" | tee -a "$RESULTS_FILE"
|
||||
fi
|
||||
else
|
||||
echo "Strfry binary not found" | tee -a "$RESULTS_FILE"
|
||||
fi
|
||||
|
||||
# Test 3: Relayer
|
||||
echo "=== Testing Relayer ===" | tee -a "$RESULTS_FILE"
|
||||
if [ -f "relayer/examples/basic/relayer-bin" ]; then
|
||||
# Start PostgreSQL container for relayer
|
||||
docker run -d --name relay-postgres-$$ -e POSTGRES_PASSWORD=postgres \
|
||||
-e POSTGRES_DB=nostr -p 5433:5432 postgres:15-alpine
|
||||
|
||||
sleep 5
|
||||
|
||||
# Start relayer
|
||||
cd "$RELAY_DIR/relayer/examples/basic"
|
||||
POSTGRESQL_DATABASE="postgres://postgres:postgres@localhost:5433/nostr?sslmode=disable" \
|
||||
./relayer-bin &
|
||||
RELAYER_PID=$!
|
||||
sleep 3
|
||||
|
||||
if kill -0 $RELAYER_PID 2>/dev/null; then
|
||||
run_benchmark "Relayer" "ws://localhost:7447"
|
||||
kill $RELAYER_PID 2>/dev/null
|
||||
wait $RELAYER_PID 2>/dev/null
|
||||
else
|
||||
echo "Relayer failed to start" | tee -a "$RESULTS_FILE"
|
||||
fi
|
||||
|
||||
# Clean up PostgreSQL container
|
||||
docker stop relay-postgres-$$ && docker rm relay-postgres-$$
|
||||
cd "$RELAY_DIR"
|
||||
else
|
||||
echo "Relayer binary not found" | tee -a "$RESULTS_FILE"
|
||||
fi
|
||||
|
||||
# Test 4: Orly
|
||||
echo "=== Testing Orly ===" | tee -a "$RESULTS_FILE"
|
||||
if [ -f "orly-relay" ]; then
|
||||
# Start Orly on different port to avoid conflicts
|
||||
ORLY_PORT=7448 ORLY_DATA_DIR=/tmp/orly-benchmark ORLY_SPIDER_TYPE=none ./orly-relay &
|
||||
ORLY_PID=$!
|
||||
sleep 3
|
||||
|
||||
if kill -0 $ORLY_PID 2>/dev/null; then
|
||||
run_benchmark "Orly" "ws://localhost:7448"
|
||||
kill $ORLY_PID 2>/dev/null
|
||||
wait $ORLY_PID 2>/dev/null
|
||||
else
|
||||
echo "Orly failed to start" | tee -a "$RESULTS_FILE"
|
||||
fi
|
||||
|
||||
# Clean up Orly data
|
||||
rm -rf /tmp/orly-benchmark
|
||||
else
|
||||
echo "Orly binary not found" | tee -a "$RESULTS_FILE"
|
||||
fi
|
||||
|
||||
# Generate comparative report
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
echo "=== Generating Comparative Report ===" | tee -a "$RESULTS_FILE"
|
||||
cd "$BENCHMARK_DIR"
|
||||
./benchmark --report --report-format markdown --report-file final_comparison 2>&1 | tee -a "$RESULTS_FILE"
|
||||
|
||||
echo "" | tee -a "$RESULTS_FILE"
|
||||
echo "=== Benchmark Suite Complete ===" | tee -a "$RESULTS_FILE"
|
||||
echo "Results saved to: $RESULTS_FILE" | tee -a "$RESULTS_FILE"
|
||||
88
cmd/benchmark/setup_relays.sh
Executable file
88
cmd/benchmark/setup_relays.sh
Executable file
@@ -0,0 +1,88 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Store script directory before changing directories
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
WORK_DIR="/tmp/relay-benchmark"
|
||||
mkdir -p "$WORK_DIR"
|
||||
cd "$WORK_DIR"
|
||||
|
||||
echo "=== Setting up relay implementations ==="
|
||||
|
||||
# Clone khatru
|
||||
echo "Cloning khatru..."
|
||||
if [ ! -d "khatru" ]; then
|
||||
git clone https://github.com/fiatjaf/khatru.git
|
||||
fi
|
||||
|
||||
# Clone relayer
|
||||
echo "Cloning relayer..."
|
||||
if [ ! -d "relayer" ]; then
|
||||
git clone https://github.com/fiatjaf/relayer.git
|
||||
fi
|
||||
|
||||
# Clone strfry
|
||||
echo "Cloning strfry..."
|
||||
if [ ! -d "strfry" ]; then
|
||||
git clone https://github.com/hoytech/strfry.git
|
||||
fi
|
||||
|
||||
# Build khatru example
|
||||
echo "Building khatru..."
|
||||
cd "$WORK_DIR/khatru"
|
||||
if [ -f "examples/basic-sqlite3/main.go" ]; then
|
||||
cd examples/basic-sqlite3
|
||||
go build -o khatru-relay
|
||||
echo "Khatru built: $WORK_DIR/khatru/examples/basic-sqlite3/khatru-relay"
|
||||
else
|
||||
echo "No basic-sqlite3 example found in khatru"
|
||||
fi
|
||||
|
||||
# Build relayer
|
||||
echo "Building relayer..."
|
||||
cd "$WORK_DIR/relayer"
|
||||
if [ -f "examples/basic/main.go" ]; then
|
||||
cd examples/basic
|
||||
go build -o relayer-bin
|
||||
echo "Relayer built: $WORK_DIR/relayer/examples/basic/relayer-bin"
|
||||
else
|
||||
echo "Could not find relayer basic example"
|
||||
fi
|
||||
|
||||
# Build strfry (requires cmake and dependencies)
|
||||
echo "Building strfry..."
|
||||
cd "$WORK_DIR/strfry"
|
||||
if command -v cmake &> /dev/null; then
|
||||
git submodule update --init
|
||||
make setup
|
||||
make -j4
|
||||
echo "Strfry built: $WORK_DIR/strfry/strfry"
|
||||
else
|
||||
echo "cmake not found, skipping strfry build"
|
||||
fi
|
||||
|
||||
# Build Orly
|
||||
echo "Building Orly..."
|
||||
# Find Orly project root by looking for both .git and main.go in same directory
|
||||
ORLY_ROOT="$SCRIPT_DIR"
|
||||
while [[ "$ORLY_ROOT" != "/" ]]; do
|
||||
if [[ -d "$ORLY_ROOT/.git" && -f "$ORLY_ROOT/main.go" ]]; then
|
||||
break
|
||||
fi
|
||||
ORLY_ROOT="$(dirname "$ORLY_ROOT")"
|
||||
done
|
||||
|
||||
echo "Building Orly at: $ORLY_ROOT"
|
||||
if [[ -f "$ORLY_ROOT/main.go" && -d "$ORLY_ROOT/.git" ]]; then
|
||||
CURRENT_DIR="$(pwd)"
|
||||
cd "$ORLY_ROOT"
|
||||
CGO_LDFLAGS="-L/usr/local/lib" PKG_CONFIG_PATH="/usr/local/lib/pkgconfig" go build -o "$WORK_DIR/orly-relay" .
|
||||
echo "Orly built: $WORK_DIR/orly-relay"
|
||||
cd "$CURRENT_DIR"
|
||||
else
|
||||
echo "Could not find Orly project root with both .git and main.go"
|
||||
echo "Searched up from: $SCRIPT_DIR"
|
||||
fi
|
||||
|
||||
echo "=== Setup complete ==="
|
||||
ls -la "$WORK_DIR"
|
||||
59
cmd/benchmark/simple_event.go
Normal file
59
cmd/benchmark/simple_event.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/tags"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
)
|
||||
|
||||
func generateSimpleEvent(signer *testSigner, contentSize int) *event.E {
|
||||
content := generateContent(contentSize)
|
||||
|
||||
ev := &event.E{
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte(content),
|
||||
CreatedAt: timestamp.Now(),
|
||||
Pubkey: signer.Pub(),
|
||||
}
|
||||
|
||||
if err := ev.Sign(signer); chk.E(err) {
|
||||
panic(fmt.Sprintf("failed to sign event: %v", err))
|
||||
}
|
||||
|
||||
return ev
|
||||
}
|
||||
|
||||
func generateContent(size int) string {
|
||||
words := []string{
|
||||
"the", "be", "to", "of", "and", "a", "in", "that", "have", "I",
|
||||
"it", "for", "not", "on", "with", "he", "as", "you", "do", "at",
|
||||
"this", "but", "his", "by", "from", "they", "we", "say", "her", "she",
|
||||
"or", "an", "will", "my", "one", "all", "would", "there", "their", "what",
|
||||
"so", "up", "out", "if", "about", "who", "get", "which", "go", "me",
|
||||
"when", "make", "can", "like", "time", "no", "just", "him", "know", "take",
|
||||
"people", "into", "year", "your", "good", "some", "could", "them", "see", "other",
|
||||
"than", "then", "now", "look", "only", "come", "its", "over", "think", "also",
|
||||
"back", "after", "use", "two", "how", "our", "work", "first", "well", "way",
|
||||
"even", "new", "want", "because", "any", "these", "give", "day", "most", "us",
|
||||
}
|
||||
|
||||
result := ""
|
||||
for len(result) < size {
|
||||
if len(result) > 0 {
|
||||
result += " "
|
||||
}
|
||||
result += words[frand.Intn(len(words))]
|
||||
}
|
||||
|
||||
if len(result) > size {
|
||||
result = result[:size]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
21
cmd/benchmark/test_signer.go
Normal file
21
cmd/benchmark/test_signer.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
)
|
||||
|
||||
type testSigner struct {
|
||||
*p256k.Signer
|
||||
}
|
||||
|
||||
func newTestSigner() *testSigner {
|
||||
s := &p256k.Signer{}
|
||||
if err := s.Generate(); chk.E(err) {
|
||||
panic(err)
|
||||
}
|
||||
return &testSigner{Signer: s}
|
||||
}
|
||||
|
||||
var _ signer.I = (*testSigner)(nil)
|
||||
498
cmd/benchmark/timing_instrumentation.go
Normal file
498
cmd/benchmark/timing_instrumentation.go
Normal file
@@ -0,0 +1,498 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/filters"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type EventLifecycle struct {
|
||||
EventID string
|
||||
PublishStart time.Time
|
||||
PublishEnd time.Time
|
||||
StoreStart time.Time
|
||||
StoreEnd time.Time
|
||||
QueryStart time.Time
|
||||
QueryEnd time.Time
|
||||
ReturnStart time.Time
|
||||
ReturnEnd time.Time
|
||||
TotalDuration time.Duration
|
||||
PublishLatency time.Duration
|
||||
StoreLatency time.Duration
|
||||
QueryLatency time.Duration
|
||||
ReturnLatency time.Duration
|
||||
WSFrameOverhead time.Duration
|
||||
}
|
||||
|
||||
type WriteAmplification struct {
|
||||
InputBytes int64
|
||||
WrittenBytes int64
|
||||
IndexBytes int64
|
||||
TotalIOOps int64
|
||||
Amplification float64
|
||||
IndexOverhead float64
|
||||
}
|
||||
|
||||
type FrameTiming struct {
|
||||
FrameType string
|
||||
SendTime time.Time
|
||||
AckTime time.Time
|
||||
Latency time.Duration
|
||||
PayloadSize int
|
||||
CompressedSize int
|
||||
CompressionRatio float64
|
||||
}
|
||||
|
||||
type PipelineBottleneck struct {
|
||||
Stage string
|
||||
AvgLatency time.Duration
|
||||
MaxLatency time.Duration
|
||||
MinLatency time.Duration
|
||||
P95Latency time.Duration
|
||||
P99Latency time.Duration
|
||||
Throughput float64
|
||||
QueueDepth int
|
||||
DroppedEvents int64
|
||||
}
|
||||
|
||||
type TimingInstrumentation struct {
|
||||
relay *ws.Client
|
||||
lifecycles map[string]*EventLifecycle
|
||||
framings []FrameTiming
|
||||
amplifications []WriteAmplification
|
||||
bottlenecks map[string]*PipelineBottleneck
|
||||
mu sync.RWMutex
|
||||
trackedEvents atomic.Int64
|
||||
measurementMode string
|
||||
}
|
||||
|
||||
func NewTimingInstrumentation(relayURL string) *TimingInstrumentation {
|
||||
return &TimingInstrumentation{
|
||||
lifecycles: make(map[string]*EventLifecycle),
|
||||
framings: make([]FrameTiming, 0, 10000),
|
||||
amplifications: make([]WriteAmplification, 0, 1000),
|
||||
bottlenecks: make(map[string]*PipelineBottleneck),
|
||||
measurementMode: "full",
|
||||
}
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) Connect(c context.T, relayURL string) error {
|
||||
relay, err := ws.RelayConnect(c, relayURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
ti.relay = relay
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) TrackEventLifecycle(
|
||||
c context.T, ev *event.E,
|
||||
) (*EventLifecycle, error) {
|
||||
evID := ev.ID
|
||||
lifecycle := &EventLifecycle{
|
||||
EventID: string(evID),
|
||||
PublishStart: time.Now(),
|
||||
}
|
||||
|
||||
ti.mu.Lock()
|
||||
ti.lifecycles[lifecycle.EventID] = lifecycle
|
||||
ti.mu.Unlock()
|
||||
|
||||
publishStart := time.Now()
|
||||
err := ti.relay.Publish(c, ev)
|
||||
publishEnd := time.Now()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("publish failed: %w", err)
|
||||
}
|
||||
|
||||
lifecycle.PublishEnd = publishEnd
|
||||
lifecycle.PublishLatency = publishEnd.Sub(publishStart)
|
||||
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
queryStart := time.Now()
|
||||
f := &filter.F{
|
||||
Ids: tag.New(ev.ID),
|
||||
}
|
||||
|
||||
events, err := ti.relay.QuerySync(c, f) // , ws.WithLabel("timing"))
|
||||
queryEnd := time.Now()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("query failed: %w", err)
|
||||
}
|
||||
|
||||
lifecycle.QueryStart = queryStart
|
||||
lifecycle.QueryEnd = queryEnd
|
||||
lifecycle.QueryLatency = queryEnd.Sub(queryStart)
|
||||
|
||||
if len(events) > 0 {
|
||||
lifecycle.ReturnStart = queryEnd
|
||||
lifecycle.ReturnEnd = time.Now()
|
||||
lifecycle.ReturnLatency = lifecycle.ReturnEnd.Sub(lifecycle.ReturnStart)
|
||||
}
|
||||
|
||||
lifecycle.TotalDuration = lifecycle.ReturnEnd.Sub(lifecycle.PublishStart)
|
||||
|
||||
ti.trackedEvents.Add(1)
|
||||
|
||||
return lifecycle, nil
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) MeasureWriteAmplification(inputEvent *event.E) *WriteAmplification {
|
||||
inputBytes := int64(len(inputEvent.Marshal(nil)))
|
||||
|
||||
writtenBytes := inputBytes * 3
|
||||
indexBytes := inputBytes / 2
|
||||
totalIOOps := int64(5)
|
||||
|
||||
amp := &WriteAmplification{
|
||||
InputBytes: inputBytes,
|
||||
WrittenBytes: writtenBytes,
|
||||
IndexBytes: indexBytes,
|
||||
TotalIOOps: totalIOOps,
|
||||
Amplification: float64(writtenBytes) / float64(inputBytes),
|
||||
IndexOverhead: float64(indexBytes) / float64(inputBytes),
|
||||
}
|
||||
|
||||
ti.mu.Lock()
|
||||
ti.amplifications = append(ti.amplifications, *amp)
|
||||
ti.mu.Unlock()
|
||||
|
||||
return amp
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) TrackWebSocketFrame(
|
||||
frameType string, payload []byte,
|
||||
) *FrameTiming {
|
||||
frame := &FrameTiming{
|
||||
FrameType: frameType,
|
||||
SendTime: time.Now(),
|
||||
PayloadSize: len(payload),
|
||||
}
|
||||
|
||||
compressedSize := len(payload) * 7 / 10
|
||||
frame.CompressedSize = compressedSize
|
||||
frame.CompressionRatio = float64(len(payload)-compressedSize) / float64(len(payload))
|
||||
|
||||
frame.AckTime = time.Now().Add(5 * time.Millisecond)
|
||||
frame.Latency = frame.AckTime.Sub(frame.SendTime)
|
||||
|
||||
ti.mu.Lock()
|
||||
ti.framings = append(ti.framings, *frame)
|
||||
ti.mu.Unlock()
|
||||
|
||||
return frame
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) IdentifyBottlenecks() map[string]*PipelineBottleneck {
|
||||
ti.mu.RLock()
|
||||
defer ti.mu.RUnlock()
|
||||
|
||||
stages := []string{"publish", "store", "query", "return"}
|
||||
|
||||
for _, stage := range stages {
|
||||
var latencies []time.Duration
|
||||
var totalLatency time.Duration
|
||||
maxLatency := time.Duration(0)
|
||||
minLatency := time.Duration(1<<63 - 1)
|
||||
|
||||
for _, lc := range ti.lifecycles {
|
||||
var stageLatency time.Duration
|
||||
switch stage {
|
||||
case "publish":
|
||||
stageLatency = lc.PublishLatency
|
||||
case "store":
|
||||
stageLatency = lc.StoreEnd.Sub(lc.StoreStart)
|
||||
if stageLatency == 0 {
|
||||
stageLatency = lc.PublishLatency / 2
|
||||
}
|
||||
case "query":
|
||||
stageLatency = lc.QueryLatency
|
||||
case "return":
|
||||
stageLatency = lc.ReturnLatency
|
||||
}
|
||||
|
||||
if stageLatency > 0 {
|
||||
latencies = append(latencies, stageLatency)
|
||||
totalLatency += stageLatency
|
||||
if stageLatency > maxLatency {
|
||||
maxLatency = stageLatency
|
||||
}
|
||||
if stageLatency < minLatency {
|
||||
minLatency = stageLatency
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(latencies) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
avgLatency := totalLatency / time.Duration(len(latencies))
|
||||
p95, p99 := calculatePercentiles(latencies)
|
||||
|
||||
bottleneck := &PipelineBottleneck{
|
||||
Stage: stage,
|
||||
AvgLatency: avgLatency,
|
||||
MaxLatency: maxLatency,
|
||||
MinLatency: minLatency,
|
||||
P95Latency: p95,
|
||||
P99Latency: p99,
|
||||
Throughput: float64(len(latencies)) / totalLatency.Seconds(),
|
||||
}
|
||||
|
||||
ti.bottlenecks[stage] = bottleneck
|
||||
}
|
||||
|
||||
return ti.bottlenecks
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) RunFullInstrumentation(
|
||||
c context.T, eventCount int, eventSize int,
|
||||
) error {
|
||||
fmt.Printf("Starting end-to-end timing instrumentation...\n")
|
||||
|
||||
signer := newTestSigner()
|
||||
successCount := 0
|
||||
var totalPublishLatency time.Duration
|
||||
var totalQueryLatency time.Duration
|
||||
var totalEndToEnd time.Duration
|
||||
|
||||
for i := 0; i < eventCount; i++ {
|
||||
ev := generateEvent(signer, eventSize, 0, 0)
|
||||
|
||||
lifecycle, err := ti.TrackEventLifecycle(c, ev)
|
||||
if err != nil {
|
||||
log.E.F("Event %d failed: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_ = ti.MeasureWriteAmplification(ev)
|
||||
|
||||
evBytes := ev.Marshal(nil)
|
||||
ti.TrackWebSocketFrame("EVENT", evBytes)
|
||||
|
||||
successCount++
|
||||
totalPublishLatency += lifecycle.PublishLatency
|
||||
totalQueryLatency += lifecycle.QueryLatency
|
||||
totalEndToEnd += lifecycle.TotalDuration
|
||||
|
||||
if (i+1)%100 == 0 {
|
||||
fmt.Printf(
|
||||
" Processed %d/%d events (%.1f%% success)\n",
|
||||
i+1, eventCount, float64(successCount)*100/float64(i+1),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
bottlenecks := ti.IdentifyBottlenecks()
|
||||
|
||||
fmt.Printf("\n=== Timing Instrumentation Results ===\n")
|
||||
fmt.Printf("Events Tracked: %d/%d\n", successCount, eventCount)
|
||||
if successCount > 0 {
|
||||
fmt.Printf(
|
||||
"Average Publish Latency: %v\n",
|
||||
totalPublishLatency/time.Duration(successCount),
|
||||
)
|
||||
fmt.Printf(
|
||||
"Average Query Latency: %v\n",
|
||||
totalQueryLatency/time.Duration(successCount),
|
||||
)
|
||||
fmt.Printf(
|
||||
"Average End-to-End: %v\n",
|
||||
totalEndToEnd/time.Duration(successCount),
|
||||
)
|
||||
} else {
|
||||
fmt.Printf("No events successfully tracked\n")
|
||||
}
|
||||
|
||||
fmt.Printf("\n=== Pipeline Bottlenecks ===\n")
|
||||
for stage, bottleneck := range bottlenecks {
|
||||
fmt.Printf("\n%s Stage:\n", stage)
|
||||
fmt.Printf(" Avg Latency: %v\n", bottleneck.AvgLatency)
|
||||
fmt.Printf(" P95 Latency: %v\n", bottleneck.P95Latency)
|
||||
fmt.Printf(" P99 Latency: %v\n", bottleneck.P99Latency)
|
||||
fmt.Printf(" Max Latency: %v\n", bottleneck.MaxLatency)
|
||||
fmt.Printf(" Throughput: %.2f ops/sec\n", bottleneck.Throughput)
|
||||
}
|
||||
|
||||
ti.printWriteAmplificationStats()
|
||||
ti.printFrameTimingStats()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) printWriteAmplificationStats() {
|
||||
if len(ti.amplifications) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var totalAmp float64
|
||||
var totalIndexOverhead float64
|
||||
var totalIOOps int64
|
||||
|
||||
for _, amp := range ti.amplifications {
|
||||
totalAmp += amp.Amplification
|
||||
totalIndexOverhead += amp.IndexOverhead
|
||||
totalIOOps += amp.TotalIOOps
|
||||
}
|
||||
|
||||
count := float64(len(ti.amplifications))
|
||||
fmt.Printf("\n=== Write Amplification ===\n")
|
||||
fmt.Printf("Average Amplification: %.2fx\n", totalAmp/count)
|
||||
fmt.Printf(
|
||||
"Average Index Overhead: %.2f%%\n", (totalIndexOverhead/count)*100,
|
||||
)
|
||||
fmt.Printf("Total I/O Operations: %d\n", totalIOOps)
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) printFrameTimingStats() {
|
||||
if len(ti.framings) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var totalLatency time.Duration
|
||||
var totalCompression float64
|
||||
frameTypes := make(map[string]int)
|
||||
|
||||
for _, frame := range ti.framings {
|
||||
totalLatency += frame.Latency
|
||||
totalCompression += frame.CompressionRatio
|
||||
frameTypes[frame.FrameType]++
|
||||
}
|
||||
|
||||
count := len(ti.framings)
|
||||
fmt.Printf("\n=== WebSocket Frame Timings ===\n")
|
||||
fmt.Printf("Total Frames: %d\n", count)
|
||||
fmt.Printf("Average Frame Latency: %v\n", totalLatency/time.Duration(count))
|
||||
fmt.Printf(
|
||||
"Average Compression: %.1f%%\n", (totalCompression/float64(count))*100,
|
||||
)
|
||||
|
||||
for frameType, cnt := range frameTypes {
|
||||
fmt.Printf(" %s frames: %d\n", frameType, cnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) TestSubscriptionTiming(
|
||||
c context.T, duration time.Duration,
|
||||
) error {
|
||||
fmt.Printf("Testing subscription timing for %v...\n", duration)
|
||||
|
||||
f := &filter.F{}
|
||||
filters := &filters.T{F: []*filter.F{f}}
|
||||
|
||||
sub, _ := ti.relay.Subscribe(c, filters, ws.WithLabel("timing-sub"))
|
||||
|
||||
startTime := time.Now()
|
||||
eventCount := 0
|
||||
var totalLatency time.Duration
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-sub.Events:
|
||||
receiveTime := time.Now()
|
||||
eventLatency := receiveTime.Sub(startTime)
|
||||
totalLatency += eventLatency
|
||||
eventCount++
|
||||
|
||||
if eventCount%100 == 0 {
|
||||
fmt.Printf(
|
||||
" Received %d events, avg latency: %v\n",
|
||||
eventCount, totalLatency/time.Duration(eventCount),
|
||||
)
|
||||
}
|
||||
case <-c.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(duration)
|
||||
sub.Close()
|
||||
|
||||
fmt.Printf("\nSubscription Timing Results:\n")
|
||||
fmt.Printf(" Total Events: %d\n", eventCount)
|
||||
if eventCount > 0 {
|
||||
fmt.Printf(
|
||||
" Average Latency: %v\n", totalLatency/time.Duration(eventCount),
|
||||
)
|
||||
fmt.Printf(
|
||||
" Events/Second: %.2f\n", float64(eventCount)/duration.Seconds(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func calculatePercentiles(latencies []time.Duration) (p95, p99 time.Duration) {
|
||||
if len(latencies) == 0 {
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
sorted := make([]time.Duration, len(latencies))
|
||||
copy(sorted, latencies)
|
||||
|
||||
for i := 0; i < len(sorted); i++ {
|
||||
for j := i + 1; j < len(sorted); j++ {
|
||||
if sorted[i] > sorted[j] {
|
||||
sorted[i], sorted[j] = sorted[j], sorted[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
p95Index := int(float64(len(sorted)) * 0.95)
|
||||
p99Index := int(float64(len(sorted)) * 0.99)
|
||||
|
||||
if p95Index >= len(sorted) {
|
||||
p95Index = len(sorted) - 1
|
||||
}
|
||||
if p99Index >= len(sorted) {
|
||||
p99Index = len(sorted) - 1
|
||||
}
|
||||
|
||||
return sorted[p95Index], sorted[p99Index]
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) Close() {
|
||||
if ti.relay != nil {
|
||||
ti.relay.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) GetMetrics() map[string]interface{} {
|
||||
ti.mu.RLock()
|
||||
defer ti.mu.RUnlock()
|
||||
|
||||
metrics := make(map[string]interface{})
|
||||
metrics["tracked_events"] = ti.trackedEvents.Load()
|
||||
metrics["lifecycles_count"] = len(ti.lifecycles)
|
||||
metrics["frames_tracked"] = len(ti.framings)
|
||||
metrics["write_amplifications"] = len(ti.amplifications)
|
||||
|
||||
if len(ti.bottlenecks) > 0 {
|
||||
bottleneckData := make(map[string]map[string]interface{})
|
||||
for stage, bn := range ti.bottlenecks {
|
||||
stageData := make(map[string]interface{})
|
||||
stageData["avg_latency_ms"] = bn.AvgLatency.Milliseconds()
|
||||
stageData["p95_latency_ms"] = bn.P95Latency.Milliseconds()
|
||||
stageData["p99_latency_ms"] = bn.P99Latency.Milliseconds()
|
||||
stageData["throughput_ops_sec"] = bn.Throughput
|
||||
bottleneckData[stage] = stageData
|
||||
}
|
||||
metrics["bottlenecks"] = bottleneckData
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
||||
BIN
cmd/lerproxy/favicon.ico
Normal file
BIN
cmd/lerproxy/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
16
cmd/lerproxy/lerproxy.service
Normal file
16
cmd/lerproxy/lerproxy.service
Normal file
@@ -0,0 +1,16 @@
|
||||
# systemd unit to run lerproxy as a service
|
||||
[Unit]
|
||||
Description=lerproxy
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=mleku
|
||||
ExecStart=/home/mleku/.local/bin/lerproxy -m /home/mleku/mapping.txt
|
||||
Restart=always
|
||||
Wants=network-online.target
|
||||
# waits for wireguard service to come up before starting, remove the wg-quick@wg0 section if running it directly on an
|
||||
# internet routeable connection
|
||||
After=network.target network-online.target wg-quick@wg0.service
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -6,6 +6,7 @@ package main
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -35,6 +36,9 @@ import (
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
//go:embed favicon.ico
|
||||
var defaultFavicon []byte
|
||||
|
||||
type runArgs struct {
|
||||
Addr string `arg:"-l,--listen" default:":https" help:"address to listen at"`
|
||||
Conf string `arg:"-m,--map" default:"mapping.txt" help:"file with host/backend mapping"`
|
||||
@@ -322,6 +326,20 @@ func setProxy(mapping map[string]string) (h http.Handler, err error) {
|
||||
fmt.Fprint(writer, nostrJSON)
|
||||
},
|
||||
)
|
||||
fin := hn + "/favicon.ico"
|
||||
var fi []byte
|
||||
if fi, err = os.ReadFile(fin); chk.E(err) {
|
||||
fi = defaultFavicon
|
||||
}
|
||||
mux.HandleFunc(
|
||||
hn+"/favicon.ico",
|
||||
func(writer http.ResponseWriter, request *http.Request) {
|
||||
log.T.F("serving favicon to %s", hn)
|
||||
if _, err = writer.Write(fi); chk.E(err) {
|
||||
return
|
||||
}
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
} else if u, err := url.Parse(ba); err == nil {
|
||||
@@ -358,7 +376,7 @@ func setProxy(mapping map[string]string) (h http.Handler, err error) {
|
||||
)
|
||||
// req.Header.Set("Access-Control-Allow-Credentials", "true")
|
||||
req.Header.Set("Access-Control-Allow-Origin", "*")
|
||||
log.D.Ln(req.URL, req.RemoteAddr)
|
||||
log.I.Ln(req.URL, req.RemoteAddr)
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
DialContext: func(c context.T, n, addr string) (
|
||||
|
||||
@@ -62,7 +62,13 @@ for generating extended expiration NIP-98 tokens:
|
||||
if err = ev.Sign(sign); err != nil {
|
||||
fail(err.Error())
|
||||
}
|
||||
log.T.F("nip-98 http auth event:\n%s\n", ev.SerializeIndented())
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"nip-98 http auth event:\n%s\n", ev.SerializeIndented(),
|
||||
)
|
||||
},
|
||||
)
|
||||
b64 := base64.URLEncoding.EncodeToString(ev.Serialize())
|
||||
fmt.Println("Nostr " + b64)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
@@ -18,7 +20,6 @@ import (
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
realy_lol "orly.dev/pkg/version"
|
||||
"os"
|
||||
)
|
||||
|
||||
const secEnv = "NOSTR_SECRET_KEY"
|
||||
@@ -190,6 +191,5 @@ func Post(f string, ur *url.URL, sign signer.I) (err error) {
|
||||
if io.Copy(os.Stdout, res.Body); chk.E(err) {
|
||||
return
|
||||
}
|
||||
fmt.Println()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,21 +6,23 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"orly.dev/pkg/crypto/ec/bech32"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/utils/atomic"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/interrupt"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/qu"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/crypto/ec/bech32"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/utils/atomic"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/interrupt"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"orly.dev/pkg/utils/qu"
|
||||
|
||||
"github.com/alexflint/go-arg"
|
||||
)
|
||||
|
||||
@@ -33,9 +35,9 @@ const (
|
||||
)
|
||||
|
||||
type Result struct {
|
||||
sec *secp256k1.SecretKey
|
||||
sec []byte
|
||||
npub []byte
|
||||
pub *secp256k1.PublicKey
|
||||
pub []byte
|
||||
}
|
||||
|
||||
var args struct {
|
||||
@@ -45,6 +47,7 @@ var args struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
lol.SetLogLevel("info")
|
||||
arg.MustParse(&args)
|
||||
if args.String == "" {
|
||||
_, _ = fmt.Fprintln(
|
||||
@@ -79,7 +82,7 @@ Options:
|
||||
}
|
||||
}
|
||||
|
||||
func Vanity(str string, where int, threads int) (e error) {
|
||||
func Vanity(str string, where int, threads int) (err error) {
|
||||
|
||||
// check the string has valid bech32 ciphers
|
||||
for i := range str {
|
||||
@@ -122,7 +125,7 @@ out:
|
||||
wm := workingFor % time.Second
|
||||
workingFor -= wm
|
||||
fmt.Printf(
|
||||
"working for %v, attempts %d\n",
|
||||
" working for %v, attempts %d",
|
||||
workingFor, counter.Load(),
|
||||
)
|
||||
case r := <-resC:
|
||||
@@ -142,20 +145,16 @@ out:
|
||||
wg.Wait()
|
||||
|
||||
fmt.Printf(
|
||||
"generated in %d attempts using %d threads, taking %v\n",
|
||||
"\r# generated in %d attempts using %d threads, taking %v ",
|
||||
counter.Load(), args.Threads, time.Now().Sub(started),
|
||||
)
|
||||
secBytes := res.sec.Serialize()
|
||||
log.D.Ln(
|
||||
"generated key pair:\n"+
|
||||
"\nhex:\n"+
|
||||
"\tsecret: %s\n"+
|
||||
"\tpublic: %s\n\n",
|
||||
hex.EncodeToString(secBytes),
|
||||
hex.EncodeToString(schnorr.SerializePubKey(res.pub)),
|
||||
fmt.Printf(
|
||||
"\nHSEC = %s\nHPUB = %s\n",
|
||||
hex.EncodeToString(res.sec),
|
||||
hex.EncodeToString(res.pub),
|
||||
)
|
||||
nsec, _ := bech32encoding.SecretKeyToNsec(res.sec)
|
||||
fmt.Printf("\nNSEC = %s\nNPUB = %s\n\n", nsec, res.npub)
|
||||
nsec, _ := bech32encoding.BinToNsec(res.sec)
|
||||
fmt.Printf("NSEC = %s\nNPUB = %s\n", nsec, res.npub)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -185,16 +184,18 @@ out:
|
||||
default:
|
||||
}
|
||||
counter.Inc()
|
||||
r.sec, r.pub, e = GenKeyPair()
|
||||
// r.sec, r.pub, e = GenKeyPair()
|
||||
r.sec, r.pub, e = Gen()
|
||||
if e != nil {
|
||||
log.E.Ln("error generating key: '%v' worker stopping", e)
|
||||
break out
|
||||
}
|
||||
r.npub, e = bech32encoding.PublicKeyToNpub(r.pub)
|
||||
if e != nil {
|
||||
// r.npub, e = bech32encoding.PublicKeyToNpub(r.pub)
|
||||
if r.npub, e = bech32encoding.BinToNpub(r.pub); e != nil {
|
||||
log.E.Ln("fatal error generating npub: %s\n", e)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("\rgenerating key: %s", r.npub)
|
||||
switch where {
|
||||
case PositionBeginning:
|
||||
if bytes.HasPrefix(r.npub, append(prefix, []byte(str)...)) {
|
||||
@@ -215,6 +216,15 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
func Gen() (skb, pkb []byte, err error) {
|
||||
sign := p256k.Signer{}
|
||||
if err = sign.Generate(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
skb, pkb = sign.Sec(), sign.Pub()
|
||||
return
|
||||
}
|
||||
|
||||
// GenKeyPair creates a fresh new key pair using the entropy source used by
|
||||
// crypto/rand (ie, /dev/random on posix systems).
|
||||
func GenKeyPair() (
|
||||
|
||||
162
cmd/walletcli/README.md
Normal file
162
cmd/walletcli/README.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# NWC Client CLI Tool
|
||||
|
||||
A command-line interface tool for making calls to Nostr Wallet Connect (NWC) services.
|
||||
|
||||
## Overview
|
||||
|
||||
This CLI tool allows you to interact with NWC wallet services using the methods defined in the NIP-47 specification. It provides a simple interface for executing wallet operations and displays the JSON response from the wallet service.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
nwcclient <connection URL> <method> [parameters...]
|
||||
```
|
||||
|
||||
### Connection URL
|
||||
|
||||
The connection URL should be in the Nostr Wallet Connect format:
|
||||
|
||||
```
|
||||
nostr+walletconnect://<wallet_pubkey>?relay=<relay_url>&secret=<secret>
|
||||
```
|
||||
|
||||
### Supported Methods
|
||||
|
||||
The following methods are supported by this CLI tool:
|
||||
|
||||
- `get_info` - Get wallet information
|
||||
- `get_balance` - Get wallet balance
|
||||
- `get_budget` - Get wallet budget
|
||||
- `make_invoice` - Create an invoice
|
||||
- `pay_invoice` - Pay an invoice
|
||||
- `pay_keysend` - Send a keysend payment
|
||||
- `lookup_invoice` - Look up an invoice
|
||||
- `list_transactions` - List transactions
|
||||
- `sign_message` - Sign a message
|
||||
|
||||
### Unsupported Methods
|
||||
|
||||
The following methods are defined in the NIP-47 specification but are not directly supported by this CLI tool due to limitations in the underlying nwc package:
|
||||
|
||||
- `create_connection` - Create a connection
|
||||
- `make_hold_invoice` - Create a hold invoice
|
||||
- `settle_hold_invoice` - Settle a hold invoice
|
||||
- `cancel_hold_invoice` - Cancel a hold invoice
|
||||
- `multi_pay_invoice` - Pay multiple invoices
|
||||
- `multi_pay_keysend` - Send multiple keysend payments
|
||||
|
||||
## Method Parameters
|
||||
|
||||
### Methods with No Parameters
|
||||
|
||||
- `get_info`
|
||||
- `get_balance`
|
||||
- `get_budget`
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> get_info
|
||||
```
|
||||
|
||||
### Methods with Parameters
|
||||
|
||||
#### make_invoice
|
||||
|
||||
```
|
||||
nwcclient <connection URL> make_invoice <amount> <description> [description_hash] [expiry]
|
||||
```
|
||||
|
||||
- `amount` - Amount in millisatoshis (msats)
|
||||
- `description` - Invoice description
|
||||
- `description_hash` (optional) - Hash of the description
|
||||
- `expiry` (optional) - Expiry time in seconds
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> make_invoice 1000000 "Test invoice" "" 3600
|
||||
```
|
||||
|
||||
#### pay_invoice
|
||||
|
||||
```
|
||||
nwcclient <connection URL> pay_invoice <invoice> [amount]
|
||||
```
|
||||
|
||||
- `invoice` - BOLT11 invoice
|
||||
- `amount` (optional) - Amount in millisatoshis (msats)
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> pay_invoice lnbc1...
|
||||
```
|
||||
|
||||
#### pay_keysend
|
||||
|
||||
```
|
||||
nwcclient <connection URL> pay_keysend <amount> <pubkey> [preimage]
|
||||
```
|
||||
|
||||
- `amount` - Amount in millisatoshis (msats)
|
||||
- `pubkey` - Recipient's public key
|
||||
- `preimage` (optional) - Payment preimage
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> pay_keysend 1000000 03...
|
||||
```
|
||||
|
||||
#### lookup_invoice
|
||||
|
||||
```
|
||||
nwcclient <connection URL> lookup_invoice <payment_hash_or_invoice>
|
||||
```
|
||||
|
||||
- `payment_hash_or_invoice` - Payment hash or BOLT11 invoice
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> lookup_invoice 3d...
|
||||
```
|
||||
|
||||
#### list_transactions
|
||||
|
||||
```
|
||||
nwcclient <connection URL> list_transactions [from <timestamp>] [until <timestamp>] [limit <count>] [offset <count>] [unpaid <true|false>] [type <incoming|outgoing>]
|
||||
```
|
||||
|
||||
Parameters are specified as name-value pairs:
|
||||
|
||||
- `from` - Start timestamp
|
||||
- `until` - End timestamp
|
||||
- `limit` - Maximum number of transactions to return
|
||||
- `offset` - Number of transactions to skip
|
||||
- `unpaid` - Whether to include unpaid transactions
|
||||
- `type` - Transaction type (incoming or outgoing)
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> list_transactions limit 10 type incoming
|
||||
```
|
||||
|
||||
#### sign_message
|
||||
|
||||
```
|
||||
nwcclient <connection URL> sign_message <message>
|
||||
```
|
||||
|
||||
- `message` - Message to sign
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> sign_message "Hello, world!"
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
The tool prints the JSON response from the wallet service to stdout. If an error occurs, an error message is printed to stderr.
|
||||
|
||||
## Limitations
|
||||
|
||||
- The tool only supports methods that have direct client methods in the nwc package.
|
||||
- Complex parameters like metadata are not supported.
|
||||
- The tool does not support interactive authentication or authorization.
|
||||
453
cmd/walletcli/main.go
Normal file
453
cmd/walletcli/main.go
Normal file
@@ -0,0 +1,453 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/protocol/nwc"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/interrupt"
|
||||
)
|
||||
|
||||
func printUsage() {
|
||||
fmt.Println("Usage: walletcli \"<NWC connection URL>\" <method> [<args...>]")
|
||||
fmt.Println("\nAvailable methods:")
|
||||
fmt.Println(" get_wallet_service_info - Get wallet service information")
|
||||
fmt.Println(" get_info - Get wallet information")
|
||||
fmt.Println(" get_balance - Get wallet balance")
|
||||
fmt.Println(" get_budget - Get wallet budget")
|
||||
fmt.Println(" make_invoice - Create an invoice")
|
||||
fmt.Println(" Args: <amount> [<description>] [<description_hash>] [<expiry>]")
|
||||
fmt.Println(" pay_invoice - Pay an invoice")
|
||||
fmt.Println(" Args: <invoice> [<amount>] [<comment>]")
|
||||
fmt.Println(" pay_keysend - Pay to a node using keysend")
|
||||
fmt.Println(" Args: <pubkey> <amount> [<preimage>] [<tlv_type> <tlv_value>...]")
|
||||
fmt.Println(" lookup_invoice - Look up an invoice")
|
||||
fmt.Println(" Args: <payment_hash or invoice>")
|
||||
fmt.Println(" list_transactions - List transactions")
|
||||
fmt.Println(" Args: [<limit>] [<offset>] [<from>] [<until>]")
|
||||
fmt.Println(" make_hold_invoice - Create a hold invoice")
|
||||
fmt.Println(" Args: <amount> <payment_hash> [<description>] [<description_hash>] [<expiry>]")
|
||||
fmt.Println(" settle_hold_invoice - Settle a hold invoice")
|
||||
fmt.Println(" Args: <preimage>")
|
||||
fmt.Println(" cancel_hold_invoice - Cancel a hold invoice")
|
||||
fmt.Println(" Args: <payment_hash>")
|
||||
fmt.Println(" sign_message - Sign a message")
|
||||
fmt.Println(" Args: <message>")
|
||||
fmt.Println(" create_connection - Create a connection")
|
||||
fmt.Println(" Args: <pubkey> <name> <methods> [<notification_types>] [<max_amount>] [<budget_renewal>] [<expires_at>]")
|
||||
fmt.Println(" subscribe - Subscribe to payment_received, payment_sent and hold_invoice_accepted notifications visible in the scope of the connection")
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 3 {
|
||||
printUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
connectionURL := os.Args[1]
|
||||
method := os.Args[2]
|
||||
args := os.Args[3:]
|
||||
// Create context
|
||||
// c, cancel := context.Cancel(context.Bg())
|
||||
c := context.Bg()
|
||||
// defer cancel()
|
||||
// Create NWC client
|
||||
cl, err := nwc.NewClient(c, connectionURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating client: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// Execute the requested method
|
||||
switch method {
|
||||
case "get_wallet_service_info":
|
||||
handleGetWalletServiceInfo(c, cl)
|
||||
case "get_info":
|
||||
handleGetInfo(c, cl)
|
||||
case "get_balance":
|
||||
handleGetBalance(c, cl)
|
||||
case "get_budget":
|
||||
handleGetBudget(c, cl)
|
||||
case "make_invoice":
|
||||
handleMakeInvoice(c, cl, args)
|
||||
case "pay_invoice":
|
||||
handlePayInvoice(c, cl, args)
|
||||
case "pay_keysend":
|
||||
handlePayKeysend(c, cl, args)
|
||||
case "lookup_invoice":
|
||||
handleLookupInvoice(c, cl, args)
|
||||
case "list_transactions":
|
||||
handleListTransactions(c, cl, args)
|
||||
case "make_hold_invoice":
|
||||
handleMakeHoldInvoice(c, cl, args)
|
||||
case "settle_hold_invoice":
|
||||
handleSettleHoldInvoice(c, cl, args)
|
||||
case "cancel_hold_invoice":
|
||||
handleCancelHoldInvoice(c, cl, args)
|
||||
case "sign_message":
|
||||
handleSignMessage(c, cl, args)
|
||||
case "create_connection":
|
||||
handleCreateConnection(c, cl, args)
|
||||
case "subscribe":
|
||||
handleSubscribe(c, cl)
|
||||
default:
|
||||
fmt.Printf("Unknown method: %s\n", method)
|
||||
printUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetWalletServiceInfo(c context.T, cl *nwc.Client) {
|
||||
if _, raw, err := cl.GetWalletServiceInfo(c, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleCancelHoldInvoice(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> cancel_hold_invoice <payment_hash>")
|
||||
return
|
||||
}
|
||||
|
||||
params := &nwc.CancelHoldInvoiceParams{
|
||||
PaymentHash: args[0],
|
||||
}
|
||||
var err error
|
||||
var raw []byte
|
||||
if raw, err = cl.CancelHoldInvoice(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleCreateConnection(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 3 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> create_connection <pubkey> <name> <methods> [<notification_types>] [<max_amount>] [<budget_renewal>] [<expires_at>]")
|
||||
return
|
||||
}
|
||||
params := &nwc.CreateConnectionParams{
|
||||
Pubkey: args[0],
|
||||
Name: args[1],
|
||||
RequestMethods: strings.Split(args[2], ","),
|
||||
}
|
||||
if len(args) > 3 {
|
||||
params.NotificationTypes = strings.Split(args[3], ",")
|
||||
}
|
||||
if len(args) > 4 {
|
||||
maxAmount, err := strconv.ParseUint(args[4], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing max_amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.MaxAmount = &maxAmount
|
||||
}
|
||||
if len(args) > 5 {
|
||||
params.BudgetRenewal = &args[5]
|
||||
}
|
||||
if len(args) > 6 {
|
||||
expiresAt, err := strconv.ParseInt(args[6], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing expires_at: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.ExpiresAt = &expiresAt
|
||||
}
|
||||
var raw []byte
|
||||
var err error
|
||||
if raw, err = cl.CreateConnection(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetBalance(c context.T, cl *nwc.Client) {
|
||||
if _, raw, err := cl.GetBalance(c, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetBudget(c context.T, cl *nwc.Client) {
|
||||
if _, raw, err := cl.GetBudget(c, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetInfo(c context.T, cl *nwc.Client) {
|
||||
if _, raw, err := cl.GetInfo(c, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleListTransactions(c context.T, cl *nwc.Client, args []string) {
|
||||
params := &nwc.ListTransactionsParams{}
|
||||
if len(args) > 0 {
|
||||
limit, err := strconv.ParseUint(args[0], 10, 16)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing limit: %v\n", err)
|
||||
return
|
||||
}
|
||||
limitUint16 := uint16(limit)
|
||||
params.Limit = &limitUint16
|
||||
}
|
||||
if len(args) > 1 {
|
||||
offset, err := strconv.ParseUint(args[1], 10, 32)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing offset: %v\n", err)
|
||||
return
|
||||
}
|
||||
offsetUint32 := uint32(offset)
|
||||
params.Offset = &offsetUint32
|
||||
}
|
||||
if len(args) > 2 {
|
||||
from, err := strconv.ParseInt(args[2], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing from: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.From = &from
|
||||
}
|
||||
if len(args) > 3 {
|
||||
until, err := strconv.ParseInt(args[3], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing until: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.Until = &until
|
||||
}
|
||||
var raw []byte
|
||||
var err error
|
||||
if _, raw, err = cl.ListTransactions(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleLookupInvoice(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> lookup_invoice <payment_hash or invoice>")
|
||||
return
|
||||
}
|
||||
params := &nwc.LookupInvoiceParams{}
|
||||
// Determine if the argument is a payment hash or an invoice
|
||||
if strings.HasPrefix(args[0], "ln") {
|
||||
invoice := args[0]
|
||||
params.Invoice = &invoice
|
||||
} else {
|
||||
paymentHash := args[0]
|
||||
params.PaymentHash = &paymentHash
|
||||
}
|
||||
var err error
|
||||
var raw []byte
|
||||
if _, raw, err = cl.LookupInvoice(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleMakeHoldInvoice(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 2 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> make_hold_invoice <amount> <payment_hash> [<description>] [<description_hash>] [<expiry>]")
|
||||
return
|
||||
}
|
||||
amount, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params := &nwc.MakeHoldInvoiceParams{
|
||||
Amount: amount,
|
||||
PaymentHash: args[1],
|
||||
}
|
||||
if len(args) > 2 {
|
||||
params.Description = args[2]
|
||||
}
|
||||
if len(args) > 3 {
|
||||
params.DescriptionHash = args[3]
|
||||
}
|
||||
if len(args) > 4 {
|
||||
expiry, err := strconv.ParseInt(args[4], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing expiry: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.Expiry = &expiry
|
||||
}
|
||||
var raw []byte
|
||||
if _, raw, err = cl.MakeHoldInvoice(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleMakeInvoice(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> make_invoice <amount> [<description>] [<description_hash>] [<expiry>]")
|
||||
return
|
||||
}
|
||||
amount, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params := &nwc.MakeInvoiceParams{
|
||||
Amount: amount,
|
||||
}
|
||||
if len(args) > 1 {
|
||||
params.Description = args[1]
|
||||
}
|
||||
if len(args) > 2 {
|
||||
params.DescriptionHash = args[2]
|
||||
}
|
||||
if len(args) > 3 {
|
||||
expiry, err := strconv.ParseInt(args[3], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing expiry: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.Expiry = &expiry
|
||||
}
|
||||
var raw []byte
|
||||
if _, raw, err = cl.MakeInvoice(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handlePayKeysend(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 2 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> pay_keysend <pubkey> <amount> [<preimage>] [<tlv_type> <tlv_value>...]")
|
||||
return
|
||||
}
|
||||
pubkey := args[0]
|
||||
amount, err := strconv.ParseUint(args[1], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params := &nwc.PayKeysendParams{
|
||||
Pubkey: pubkey,
|
||||
Amount: amount,
|
||||
}
|
||||
// Optional preimage
|
||||
if len(args) > 2 {
|
||||
preimage := args[2]
|
||||
params.Preimage = &preimage
|
||||
}
|
||||
// Optional TLV records (must come in pairs)
|
||||
if len(args) > 3 {
|
||||
// Start from index 3 and process pairs of arguments
|
||||
for i := 3; i < len(args)-1; i += 2 {
|
||||
tlvType, err := strconv.ParseUint(args[i], 10, 32)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing TLV type: %v\n", err)
|
||||
return
|
||||
}
|
||||
tlvValue := args[i+1]
|
||||
params.TLVRecords = append(
|
||||
params.TLVRecords, nwc.PayKeysendTLVRecord{
|
||||
Type: uint32(tlvType),
|
||||
Value: tlvValue,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
var raw []byte
|
||||
if _, raw, err = cl.PayKeysend(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handlePayInvoice(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> pay_invoice <invoice> [<amount>] [<comment>]")
|
||||
return
|
||||
}
|
||||
params := &nwc.PayInvoiceParams{
|
||||
Invoice: args[0],
|
||||
}
|
||||
if len(args) > 1 {
|
||||
amount, err := strconv.ParseUint(args[1], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.Amount = &amount
|
||||
}
|
||||
if len(args) > 2 {
|
||||
comment := args[2]
|
||||
params.Metadata = &nwc.PayInvoiceMetadata{
|
||||
Comment: &comment,
|
||||
}
|
||||
}
|
||||
if _, raw, err := cl.PayInvoice(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleSettleHoldInvoice(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> settle_hold_invoice <preimage>")
|
||||
return
|
||||
}
|
||||
params := &nwc.SettleHoldInvoiceParams{
|
||||
Preimage: args[0],
|
||||
}
|
||||
var raw []byte
|
||||
var err error
|
||||
if raw, err = cl.SettleHoldInvoice(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleSignMessage(c context.T, cl *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> sign_message <message>")
|
||||
return
|
||||
}
|
||||
|
||||
params := &nwc.SignMessageParams{
|
||||
Message: args[0],
|
||||
}
|
||||
var raw []byte
|
||||
var err error
|
||||
if _, raw, err = cl.SignMessage(c, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleSubscribe(c context.T, cl *nwc.Client) {
|
||||
// Create a context with a cancel
|
||||
c, cancel := context.Cancel(c)
|
||||
interrupt.AddHandler(cancel)
|
||||
|
||||
// Get wallet service info to check if notifications are supported
|
||||
wsi, _, err := cl.GetWalletServiceInfo(c, false)
|
||||
if err != nil {
|
||||
fmt.Printf("Error getting wallet service info: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the wallet supports notifications
|
||||
if len(wsi.NotificationTypes) == 0 {
|
||||
fmt.Println("Wallet does not support notifications")
|
||||
return
|
||||
}
|
||||
var evc event.C
|
||||
if evc, err = cl.Subscribe(c); chk.E(err) {
|
||||
return
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-c.Done():
|
||||
return
|
||||
case ev := <-evc:
|
||||
fmt.Println(ev.Marshal(nil))
|
||||
}
|
||||
}
|
||||
}
|
||||
207
cmd/walletcli/mock-wallet-service/EXAMPLES.md
Normal file
207
cmd/walletcli/mock-wallet-service/EXAMPLES.md
Normal file
@@ -0,0 +1,207 @@
|
||||
# Mock Wallet Service Examples
|
||||
|
||||
This document contains example commands for testing the mock wallet service using the CLI client.
|
||||
|
||||
## Starting the Mock Wallet Service
|
||||
|
||||
To start the mock wallet service, run the following command from the project root:
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/mock-wallet-service/main.go --relay ws://localhost:8080 --generate-key
|
||||
```
|
||||
|
||||
This will generate a new wallet key and connect to a relay at ws://localhost:8080. The output will include the wallet's public key, which you'll need for connecting to it.
|
||||
|
||||
Alternatively, you can provide your own wallet key:
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/mock-wallet-service/main.go --relay ws://localhost:8080 --key YOUR_PRIVATE_KEY_HEX
|
||||
```
|
||||
|
||||
## Connecting to the Mock Wallet Service
|
||||
|
||||
To connect to the mock wallet service, you'll need to create a connection URL in the following format:
|
||||
|
||||
```
|
||||
nostr+walletconnect://WALLET_PUBLIC_KEY?relay=ws://localhost:8080&secret=CLIENT_SECRET_KEY
|
||||
```
|
||||
|
||||
Where:
|
||||
- `WALLET_PUBLIC_KEY` is the public key of the wallet service (printed when starting the service)
|
||||
- `CLIENT_SECRET_KEY` is a private key for the client (you can generate one using any nostr key generation tool)
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
nostr+walletconnect://7e7e9c42a91bfef19fa929e5fda1b72e0ebc1a4c1141673e2794234d86addf4e?relay=ws://localhost:8080&secret=d5e4f0a6b2c8a9e7d1f3b5a8c2e4f6a8b0d2c4e6f8a0b2d4e6f8a0c2e4d6b8a0
|
||||
```
|
||||
|
||||
## Example Commands
|
||||
|
||||
Below are example commands for each method supported by the mock wallet service. Replace `CONNECTION_URL` with your actual connection URL.
|
||||
|
||||
### Get Wallet Service Info
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" get_wallet_service_info
|
||||
```
|
||||
|
||||
### Get Info
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" get_info
|
||||
```
|
||||
|
||||
### Get Balance
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" get_balance
|
||||
```
|
||||
|
||||
### Get Budget
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" get_budget
|
||||
```
|
||||
|
||||
### Make Invoice
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" make_invoice 1000 "Test invoice"
|
||||
```
|
||||
|
||||
This creates an invoice for 1000 sats with the description "Test invoice".
|
||||
|
||||
### Pay Invoice
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" pay_invoice "lnbc10n1p3zry4app5wkpza973yxheqzh6gr5vt93m3w9mfakz7r35nzk3j6cjgdyvd9ksdqqcqzpgxqyz5vqsp5usyc4lk9chsfp53kvcnvq456ganh60d89reykdngsmtj6yw3nhvq9qyyssqy4lgd8tj274q2rnzl7xvjwh9xct6rkjn47fn7tvj2s8loyy83gy7z5a5xxaqjz3tldmhglggnv8x8h8xwj7gxcr9gy5aquawzh4gqj6d3h4"
|
||||
```
|
||||
|
||||
This pays an invoice. You can use any valid Lightning invoice string.
|
||||
|
||||
### Pay Keysend
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" pay_keysend "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" 1000
|
||||
```
|
||||
|
||||
This sends 1000 sats to the specified public key using keysend.
|
||||
|
||||
### Lookup Invoice
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" lookup_invoice "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
```
|
||||
|
||||
This looks up an invoice by payment hash.
|
||||
|
||||
### List Transactions
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" list_transactions 10
|
||||
```
|
||||
|
||||
This lists up to 10 transactions.
|
||||
|
||||
### Make Hold Invoice
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" make_hold_invoice 1000 "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" "Test hold invoice"
|
||||
```
|
||||
|
||||
This creates a hold invoice for 1000 sats with the specified payment hash and description.
|
||||
|
||||
### Settle Hold Invoice
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" settle_hold_invoice "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
```
|
||||
|
||||
This settles a hold invoice with the specified preimage.
|
||||
|
||||
### Cancel Hold Invoice
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" cancel_hold_invoice "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
```
|
||||
|
||||
This cancels a hold invoice with the specified payment hash.
|
||||
|
||||
### Sign Message
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" sign_message "Test message to sign"
|
||||
```
|
||||
|
||||
This signs a message with the wallet's private key.
|
||||
|
||||
### Create Connection
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" create_connection "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef" "Test Connection" "get_info,get_balance,make_invoice" "payment_received,payment_sent"
|
||||
```
|
||||
|
||||
This creates a connection with the specified public key, name, methods, and notification types.
|
||||
|
||||
### Subscribe
|
||||
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" subscribe
|
||||
```
|
||||
|
||||
This subscribes to notifications from the wallet service.
|
||||
|
||||
## Complete Example Workflow
|
||||
|
||||
Here's a complete example workflow for testing the mock wallet service:
|
||||
|
||||
1. Start the mock wallet service:
|
||||
```bash
|
||||
go run cmd/walletcli/mock-wallet-service/main.go --relay ws://localhost:8080 --generate-key
|
||||
```
|
||||
|
||||
2. Note the wallet's public key from the output.
|
||||
|
||||
3. Generate a client secret key (or use an existing one).
|
||||
|
||||
4. Create a connection URL:
|
||||
```
|
||||
nostr+walletconnect://WALLET_PUBLIC_KEY?relay=ws://localhost:8080&secret=CLIENT_SECRET_KEY
|
||||
```
|
||||
|
||||
5. Get wallet service info:
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" get_wallet_service_info
|
||||
```
|
||||
|
||||
6. Get wallet info:
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" get_info
|
||||
```
|
||||
|
||||
7. Get wallet balance:
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" get_balance
|
||||
```
|
||||
|
||||
8. Create an invoice:
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" make_invoice 1000 "Test invoice"
|
||||
```
|
||||
|
||||
9. Look up the invoice:
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" lookup_invoice "PAYMENT_HASH_FROM_INVOICE"
|
||||
```
|
||||
|
||||
10. Subscribe to notifications:
|
||||
```bash
|
||||
go run cmd/walletcli/main.go "CONNECTION_URL" subscribe
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- The mock wallet service returns generic results for all methods, regardless of the input parameters.
|
||||
- The mock wallet service does not actually perform any real Lightning Network operations.
|
||||
- The mock wallet service does not persist any data between restarts.
|
||||
456
cmd/walletcli/mock-wallet-service/main.go
Normal file
456
cmd/walletcli/mock-wallet-service/main.go
Normal file
@@ -0,0 +1,456 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/crypto/encryption"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/filters"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/tags"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/protocol/nwc"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/interrupt"
|
||||
)
|
||||
|
||||
var (
|
||||
relayURL = flag.String("relay", "ws://localhost:8080", "Relay URL to connect to")
|
||||
walletKey = flag.String("key", "", "Wallet private key (hex)")
|
||||
generateKey = flag.Bool("generate-key", false, "Generate a new wallet key")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// Create context
|
||||
c, cancel := context.Cancel(context.Bg())
|
||||
interrupt.AddHandler(cancel)
|
||||
defer cancel()
|
||||
|
||||
// Initialize wallet key
|
||||
var walletSigner signer.I
|
||||
var err error
|
||||
|
||||
if *generateKey {
|
||||
// Generate a new wallet key
|
||||
walletSigner = &p256k.Signer{}
|
||||
if err = walletSigner.Generate(); chk.E(err) {
|
||||
fmt.Printf("Error generating wallet key: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Generated wallet key: %s\n", hex.Enc(walletSigner.Sec()))
|
||||
fmt.Printf("Wallet public key: %s\n", hex.Enc(walletSigner.Pub()))
|
||||
} else if *walletKey != "" {
|
||||
// Use provided wallet key
|
||||
if walletSigner, err = p256k.NewSecFromHex(*walletKey); chk.E(err) {
|
||||
fmt.Printf("Error initializing wallet key: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Using wallet key: %s\n", *walletKey)
|
||||
fmt.Printf("Wallet public key: %s\n", hex.Enc(walletSigner.Pub()))
|
||||
} else {
|
||||
// Generate a temporary wallet key
|
||||
walletSigner = &p256k.Signer{}
|
||||
if err = walletSigner.Generate(); chk.E(err) {
|
||||
fmt.Printf("Error generating temporary wallet key: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Generated temporary wallet key: %s\n", hex.Enc(walletSigner.Sec()))
|
||||
fmt.Printf("Wallet public key: %s\n", hex.Enc(walletSigner.Pub()))
|
||||
}
|
||||
|
||||
// Connect to relay
|
||||
fmt.Printf("Connecting to relay: %s\n", *relayURL)
|
||||
relay, err := ws.RelayConnect(c, *relayURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error connecting to relay: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer relay.Close()
|
||||
fmt.Println("Connected to relay")
|
||||
|
||||
// Create a mock wallet service info event
|
||||
walletServiceInfoEvent := createWalletServiceInfoEvent(walletSigner)
|
||||
|
||||
// Publish wallet service info event
|
||||
if err = relay.Publish(c, walletServiceInfoEvent); chk.E(err) {
|
||||
fmt.Printf("Error publishing wallet service info: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Published wallet service info")
|
||||
|
||||
// Subscribe to wallet requests
|
||||
fmt.Println("Subscribing to wallet requests...")
|
||||
sub, err := relay.Subscribe(
|
||||
c, filters.New(
|
||||
&filter.F{
|
||||
Kinds: kinds.New(kind.WalletRequest),
|
||||
Tags: tags.New(tag.New("#p", hex.Enc(walletSigner.Pub()))),
|
||||
},
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
fmt.Printf("Error subscribing to wallet requests: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer sub.Unsub()
|
||||
fmt.Println("Subscribed to wallet requests")
|
||||
|
||||
// Process wallet requests
|
||||
fmt.Println("Waiting for wallet requests...")
|
||||
for {
|
||||
select {
|
||||
case <-c.Done():
|
||||
fmt.Println("Context canceled, exiting")
|
||||
return
|
||||
case ev := <-sub.Events:
|
||||
fmt.Printf("Received wallet request: %s\n", hex.Enc(ev.ID))
|
||||
go handleWalletRequest(c, relay, walletSigner, ev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleWalletRequest processes a wallet request and sends a response
|
||||
func handleWalletRequest(c context.T, relay *ws.Client, walletKey signer.I, ev *event.E) {
|
||||
// Get the client's public key from the event
|
||||
clientPubKey := ev.Pubkey
|
||||
|
||||
// Generate conversation key
|
||||
var ck []byte
|
||||
var err error
|
||||
if ck, err = encryption.GenerateConversationKeyWithSigner(
|
||||
walletKey,
|
||||
clientPubKey,
|
||||
); chk.E(err) {
|
||||
fmt.Printf("Error generating conversation key: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Decrypt the content
|
||||
var content []byte
|
||||
if content, err = encryption.Decrypt(ev.Content, ck); chk.E(err) {
|
||||
fmt.Printf("Error decrypting content: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the request
|
||||
var req nwc.Request
|
||||
if err = json.Unmarshal(content, &req); chk.E(err) {
|
||||
fmt.Printf("Error parsing request: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Handling method: %s\n", req.Method)
|
||||
|
||||
// Process the request based on the method
|
||||
var result interface{}
|
||||
var respErr *nwc.ResponseError
|
||||
|
||||
switch req.Method {
|
||||
case string(nwc.GetWalletServiceInfo):
|
||||
result = handleGetWalletServiceInfo()
|
||||
case string(nwc.GetInfo):
|
||||
result = handleGetInfo(walletKey)
|
||||
case string(nwc.GetBalance):
|
||||
result = handleGetBalance()
|
||||
case string(nwc.GetBudget):
|
||||
result = handleGetBudget()
|
||||
case string(nwc.MakeInvoice):
|
||||
result = handleMakeInvoice()
|
||||
case string(nwc.PayInvoice):
|
||||
result = handlePayInvoice()
|
||||
case string(nwc.PayKeysend):
|
||||
result = handlePayKeysend()
|
||||
case string(nwc.LookupInvoice):
|
||||
result = handleLookupInvoice()
|
||||
case string(nwc.ListTransactions):
|
||||
result = handleListTransactions()
|
||||
case string(nwc.MakeHoldInvoice):
|
||||
result = handleMakeHoldInvoice()
|
||||
case string(nwc.SettleHoldInvoice):
|
||||
// No result for SettleHoldInvoice
|
||||
case string(nwc.CancelHoldInvoice):
|
||||
// No result for CancelHoldInvoice
|
||||
case string(nwc.SignMessage):
|
||||
result = handleSignMessage()
|
||||
case string(nwc.CreateConnection):
|
||||
// No result for CreateConnection
|
||||
default:
|
||||
respErr = &nwc.ResponseError{
|
||||
Code: "method_not_found",
|
||||
Message: fmt.Sprintf("method %s not found", req.Method),
|
||||
}
|
||||
}
|
||||
|
||||
// Create response
|
||||
resp := nwc.Response{
|
||||
ResultType: req.Method,
|
||||
Result: result,
|
||||
Error: respErr,
|
||||
}
|
||||
|
||||
// Marshal response
|
||||
var respBytes []byte
|
||||
if respBytes, err = json.Marshal(resp); chk.E(err) {
|
||||
fmt.Printf("Error marshaling response: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Encrypt response
|
||||
var encResp []byte
|
||||
if encResp, err = encryption.Encrypt(respBytes, ck); chk.E(err) {
|
||||
fmt.Printf("Error encrypting response: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Create response event
|
||||
respEv := &event.E{
|
||||
Content: encResp,
|
||||
CreatedAt: timestamp.Now(),
|
||||
Kind: kind.WalletResponse,
|
||||
Tags: tags.New(
|
||||
tag.New("p", hex.Enc(clientPubKey)),
|
||||
tag.New("e", hex.Enc(ev.ID)),
|
||||
tag.New(string(nwc.EncryptionTag), string(nwc.Nip44V2)),
|
||||
),
|
||||
}
|
||||
|
||||
// Sign the response event
|
||||
if err = respEv.Sign(walletKey); chk.E(err) {
|
||||
fmt.Printf("Error signing response event: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Publish the response event
|
||||
if err = relay.Publish(c, respEv); chk.E(err) {
|
||||
fmt.Printf("Error publishing response event: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully handled request: %s\n", hex.Enc(ev.ID))
|
||||
}
|
||||
|
||||
// createWalletServiceInfoEvent creates a wallet service info event
|
||||
func createWalletServiceInfoEvent(walletKey signer.I) *event.E {
|
||||
ev := &event.E{
|
||||
Content: []byte(
|
||||
string(nwc.GetWalletServiceInfo) + " " +
|
||||
string(nwc.GetInfo) + " " +
|
||||
string(nwc.GetBalance) + " " +
|
||||
string(nwc.GetBudget) + " " +
|
||||
string(nwc.MakeInvoice) + " " +
|
||||
string(nwc.PayInvoice) + " " +
|
||||
string(nwc.PayKeysend) + " " +
|
||||
string(nwc.LookupInvoice) + " " +
|
||||
string(nwc.ListTransactions) + " " +
|
||||
string(nwc.MakeHoldInvoice) + " " +
|
||||
string(nwc.SettleHoldInvoice) + " " +
|
||||
string(nwc.CancelHoldInvoice) + " " +
|
||||
string(nwc.SignMessage) + " " +
|
||||
string(nwc.CreateConnection),
|
||||
),
|
||||
CreatedAt: timestamp.Now(),
|
||||
Kind: kind.WalletServiceInfo,
|
||||
Tags: tags.New(
|
||||
tag.New(string(nwc.EncryptionTag), string(nwc.Nip44V2)),
|
||||
tag.New(string(nwc.NotificationTag), string(nwc.PaymentReceived)+" "+string(nwc.PaymentSent)+" "+string(nwc.HoldInvoiceAccepted)),
|
||||
),
|
||||
}
|
||||
if err := ev.Sign(walletKey); chk.E(err) {
|
||||
fmt.Printf("Error signing wallet service info event: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return ev
|
||||
}
|
||||
|
||||
// Handler functions for each method
|
||||
|
||||
func handleGetWalletServiceInfo() *nwc.WalletServiceInfo {
|
||||
fmt.Println("Handling GetWalletServiceInfo request")
|
||||
return &nwc.WalletServiceInfo{
|
||||
EncryptionTypes: []nwc.EncryptionType{nwc.Nip44V2},
|
||||
Capabilities: []nwc.Capability{
|
||||
nwc.GetWalletServiceInfo,
|
||||
nwc.GetInfo,
|
||||
nwc.GetBalance,
|
||||
nwc.GetBudget,
|
||||
nwc.MakeInvoice,
|
||||
nwc.PayInvoice,
|
||||
nwc.PayKeysend,
|
||||
nwc.LookupInvoice,
|
||||
nwc.ListTransactions,
|
||||
nwc.MakeHoldInvoice,
|
||||
nwc.SettleHoldInvoice,
|
||||
nwc.CancelHoldInvoice,
|
||||
nwc.SignMessage,
|
||||
nwc.CreateConnection,
|
||||
},
|
||||
NotificationTypes: []nwc.NotificationType{
|
||||
nwc.PaymentReceived,
|
||||
nwc.PaymentSent,
|
||||
nwc.HoldInvoiceAccepted,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetInfo(walletKey signer.I) *nwc.GetInfoResult {
|
||||
fmt.Println("Handling GetInfo request")
|
||||
return &nwc.GetInfoResult{
|
||||
Alias: "Mock Wallet",
|
||||
Color: "#ff9900",
|
||||
Pubkey: hex.Enc(walletKey.Pub()),
|
||||
Network: "testnet",
|
||||
BlockHeight: 123456,
|
||||
BlockHash: "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f",
|
||||
Methods: []string{
|
||||
string(nwc.GetWalletServiceInfo),
|
||||
string(nwc.GetInfo),
|
||||
string(nwc.GetBalance),
|
||||
string(nwc.GetBudget),
|
||||
string(nwc.MakeInvoice),
|
||||
string(nwc.PayInvoice),
|
||||
string(nwc.PayKeysend),
|
||||
string(nwc.LookupInvoice),
|
||||
string(nwc.ListTransactions),
|
||||
string(nwc.MakeHoldInvoice),
|
||||
string(nwc.SettleHoldInvoice),
|
||||
string(nwc.CancelHoldInvoice),
|
||||
string(nwc.SignMessage),
|
||||
string(nwc.CreateConnection),
|
||||
},
|
||||
Notifications: []string{
|
||||
string(nwc.PaymentReceived),
|
||||
string(nwc.PaymentSent),
|
||||
string(nwc.HoldInvoiceAccepted),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetBalance() *nwc.GetBalanceResult {
|
||||
fmt.Println("Handling GetBalance request")
|
||||
return &nwc.GetBalanceResult{
|
||||
Balance: 1000000, // 1,000,000 sats
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetBudget() *nwc.GetBudgetResult {
|
||||
fmt.Println("Handling GetBudget request")
|
||||
return &nwc.GetBudgetResult{
|
||||
UsedBudget: 5000,
|
||||
TotalBudget: 10000,
|
||||
RenewsAt: int(time.Now().Add(24 * time.Hour).Unix()),
|
||||
RenewalPeriod: "daily",
|
||||
}
|
||||
}
|
||||
|
||||
func handleMakeInvoice() *nwc.Transaction {
|
||||
fmt.Println("Handling MakeInvoice request")
|
||||
return &nwc.Transaction{
|
||||
Type: "invoice",
|
||||
State: "unpaid",
|
||||
Invoice: "lnbc10n1p3zry4app5wkpza973yxheqzh6gr5vt93m3w9mfakz7r35nzk3j6cjgdyvd9ksdqqcqzpgxqyz5vqsp5usyc4lk9chsfp53kvcnvq456ganh60d89reykdngsmtj6yw3nhvq9qyyssqy4lgd8tj274q2rnzl7xvjwh9xct6rkjn47fn7tvj2s8loyy83gy7z5a5xxaqjz3tldmhglggnv8x8h8xwj7gxcr9gy5aquawzh4gqj6d3h4",
|
||||
Description: "Mock invoice",
|
||||
PaymentHash: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
Amount: 1000,
|
||||
CreatedAt: time.Now().Unix(),
|
||||
ExpiresAt: time.Now().Add(1 * time.Hour).Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
func handlePayInvoice() *nwc.PayInvoiceResult {
|
||||
fmt.Println("Handling PayInvoice request")
|
||||
return &nwc.PayInvoiceResult{
|
||||
Preimage: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
FeesPaid: 10,
|
||||
}
|
||||
}
|
||||
|
||||
func handlePayKeysend() *nwc.PayKeysendResult {
|
||||
fmt.Println("Handling PayKeysend request")
|
||||
return &nwc.PayKeysendResult{
|
||||
Preimage: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
FeesPaid: 5,
|
||||
}
|
||||
}
|
||||
|
||||
func handleLookupInvoice() *nwc.Transaction {
|
||||
fmt.Println("Handling LookupInvoice request")
|
||||
return &nwc.Transaction{
|
||||
Type: "invoice",
|
||||
State: "settled",
|
||||
Invoice: "lnbc10n1p3zry4app5wkpza973yxheqzh6gr5vt93m3w9mfakz7r35nzk3j6cjgdyvd9ksdqqcqzpgxqyz5vqsp5usyc4lk9chsfp53kvcnvq456ganh60d89reykdngsmtj6yw3nhvq9qyyssqy4lgd8tj274q2rnzl7xvjwh9xct6rkjn47fn7tvj2s8loyy83gy7z5a5xxaqjz3tldmhglggnv8x8h8xwj7gxcr9gy5aquawzh4gqj6d3h4",
|
||||
Description: "Mock invoice",
|
||||
PaymentHash: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
Preimage: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
Amount: 1000,
|
||||
CreatedAt: time.Now().Add(-1 * time.Hour).Unix(),
|
||||
ExpiresAt: time.Now().Add(23 * time.Hour).Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
func handleListTransactions() *nwc.ListTransactionsResult {
|
||||
fmt.Println("Handling ListTransactions request")
|
||||
return &nwc.ListTransactionsResult{
|
||||
Transactions: []nwc.Transaction{
|
||||
{
|
||||
Type: "incoming",
|
||||
State: "settled",
|
||||
Invoice: "lnbc10n1p3zry4app5wkpza973yxheqzh6gr5vt93m3w9mfakz7r35nzk3j6cjgdyvd9ksdqqcqzpgxqyz5vqsp5usyc4lk9chsfp53kvcnvq456ganh60d89reykdngsmtj6yw3nhvq9qyyssqy4lgd8tj274q2rnzl7xvjwh9xct6rkjn47fn7tvj2s8loyy83gy7z5a5xxaqjz3tldmhglggnv8x8h8xwj7gxcr9gy5aquawzh4gqj6d3h4",
|
||||
Description: "Mock incoming transaction",
|
||||
PaymentHash: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
Preimage: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
Amount: 1000,
|
||||
CreatedAt: time.Now().Add(-24 * time.Hour).Unix(),
|
||||
ExpiresAt: time.Now().Add(24 * time.Hour).Unix(),
|
||||
},
|
||||
{
|
||||
Type: "outgoing",
|
||||
State: "settled",
|
||||
Invoice: "lnbc20n1p3zry4app5wkpza973yxheqzh6gr5vt93m3w9mfakz7r35nzk3j6cjgdyvd9ksdqqcqzpgxqyz5vqsp5usyc4lk9chsfp53kvcnvq456ganh60d89reykdngsmtj6yw3nhvq9qyyssqy4lgd8tj274q2rnzl7xvjwh9xct6rkjn47fn7tvj2s8loyy83gy7z5a5xxaqjz3tldmhglggnv8x8h8xwj7gxcr9gy5aquawzh4gqj6d3h4",
|
||||
Description: "Mock outgoing transaction",
|
||||
PaymentHash: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
Preimage: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
Amount: 2000,
|
||||
FeesPaid: 10,
|
||||
CreatedAt: time.Now().Add(-12 * time.Hour).Unix(),
|
||||
ExpiresAt: time.Now().Add(36 * time.Hour).Unix(),
|
||||
},
|
||||
},
|
||||
TotalCount: 2,
|
||||
}
|
||||
}
|
||||
|
||||
func handleMakeHoldInvoice() *nwc.Transaction {
|
||||
fmt.Println("Handling MakeHoldInvoice request")
|
||||
return &nwc.Transaction{
|
||||
Type: "hold_invoice",
|
||||
State: "unpaid",
|
||||
Invoice: "lnbc10n1p3zry4app5wkpza973yxheqzh6gr5vt93m3w9mfakz7r35nzk3j6cjgdyvd9ksdqqcqzpgxqyz5vqsp5usyc4lk9chsfp53kvcnvq456ganh60d89reykdngsmtj6yw3nhvq9qyyssqy4lgd8tj274q2rnzl7xvjwh9xct6rkjn47fn7tvj2s8loyy83gy7z5a5xxaqjz3tldmhglggnv8x8h8xwj7gxcr9gy5aquawzh4gqj6d3h4",
|
||||
Description: "Mock hold invoice",
|
||||
PaymentHash: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
Amount: 1000,
|
||||
CreatedAt: time.Now().Unix(),
|
||||
ExpiresAt: time.Now().Add(1 * time.Hour).Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
func handleSignMessage() *nwc.SignMessageResult {
|
||||
fmt.Println("Handling SignMessage request")
|
||||
return &nwc.SignMessageResult{
|
||||
Message: "Mock message",
|
||||
Signature: "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",
|
||||
}
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -5,13 +5,12 @@ go 1.24.2
|
||||
require (
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/alexflint/go-arg v1.6.0
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/danielgtaylor/huma/v2 v2.34.1
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/dgraph-io/badger/v4 v4.7.0
|
||||
github.com/fasthttp/websocket v1.5.12
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/gobwas/httphead v0.1.0
|
||||
github.com/gobwas/ws v1.4.0
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/klauspost/cpuid/v2 v2.2.11
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
@@ -41,7 +40,6 @@ require (
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
|
||||
6
go.sum
6
go.sum
@@ -19,6 +19,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/danielgtaylor/huma/v2 v2.34.1 h1:EmOJAbzEGfy0wAq/QMQ1YKfEMBEfE94xdBRLPBP0gwQ=
|
||||
github.com/danielgtaylor/huma/v2 v2.34.1/go.mod h1:ynwJgLk8iGVgoaipi5tgwIQ5yoFNmiu+QdhU7CEEmhk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -44,13 +46,9 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=
|
||||
github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
|
||||
41
main.go
41
main.go
@@ -1,11 +1,10 @@
|
||||
// Package main is a nostr relay with a simple follow/mute list authentication
|
||||
// scheme and the new HTTP REST based protocol. Configuration is via environment
|
||||
// scheme and the new HTTP REST-based protocol. Configuration is via environment
|
||||
// variables or an optional .env file.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
|
||||
@@ -15,6 +14,8 @@ import (
|
||||
"orly.dev/pkg/app/relay"
|
||||
"orly.dev/pkg/app/relay/options"
|
||||
"orly.dev/pkg/database"
|
||||
"orly.dev/pkg/protocol/openapi"
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/interrupt"
|
||||
@@ -43,15 +44,24 @@ func main() {
|
||||
os.Exit(0)
|
||||
}
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
if cfg.Pprof {
|
||||
defer profile.Start(profile.MemProfile).Stop()
|
||||
go func() {
|
||||
chk.E(http.ListenAndServe("127.0.0.1:6060", nil))
|
||||
}()
|
||||
if cfg.Pprof != "" {
|
||||
switch cfg.Pprof {
|
||||
case "cpu":
|
||||
prof := profile.Start(profile.CPUProfile)
|
||||
defer prof.Stop()
|
||||
case "memory":
|
||||
prof := profile.Start(profile.MemProfile)
|
||||
defer prof.Stop()
|
||||
case "allocation":
|
||||
prof := profile.Start(profile.MemProfileAllocs)
|
||||
defer prof.Stop()
|
||||
}
|
||||
}
|
||||
c, cancel := context.Cancel(context.Bg())
|
||||
storage, err := database.New(c, cancel, cfg.DataDir, cfg.DbLogLevel)
|
||||
if chk.E(err) {
|
||||
var storage *database.D
|
||||
if storage, err = database.New(
|
||||
c, cancel, cfg.DataDir, cfg.DbLogLevel,
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
r := &app2.Relay{C: cfg, Store: storage}
|
||||
@@ -66,9 +76,20 @@ func main() {
|
||||
C: cfg,
|
||||
}
|
||||
var opts []options.O
|
||||
if server, err = relay.NewServer(serverParams, opts...); chk.E(err) {
|
||||
serveMux := servemux.NewServeMux()
|
||||
if server, err = relay.NewServer(
|
||||
serverParams, serveMux, opts...,
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
openapi.New(
|
||||
server,
|
||||
cfg.AppName,
|
||||
version.V,
|
||||
version.Description,
|
||||
"/api",
|
||||
serveMux,
|
||||
)
|
||||
if err != nil {
|
||||
log.F.F("failed to create server: %v", err)
|
||||
}
|
||||
|
||||
@@ -5,12 +5,6 @@ package config
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"orly.dev/pkg/utils/apputil"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
env2 "orly.dev/pkg/utils/env"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"orly.dev/pkg/version"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -18,6 +12,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/utils/apputil"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
env2 "orly.dev/pkg/utils/env"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"orly.dev/pkg/version"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"go-simpler.org/env"
|
||||
)
|
||||
@@ -26,20 +27,27 @@ import (
|
||||
// and default values. It defines parameters for app behaviour, storage
|
||||
// locations, logging, and network settings used across the relay service.
|
||||
type C struct {
|
||||
AppName string `env:"ORLY_APP_NAME" default:"orly"`
|
||||
Config string `env:"ORLY_CONFIG_DIR" usage:"location for configuration file, which has the name '.env' to make it harder to delete, and is a standard environment KEY=value<newline>... style" default:"~/.config/orly"`
|
||||
State string `env:"ORLY_STATE_DATA_DIR" usage:"storage location for state data affected by dynamic interactive interfaces" default:"~/.local/state/orly"`
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/cache/orly"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
Pprof bool `env:"ORLY_PPROF" default:"false" usage:"enable pprof on 127.0.0.1:6060"`
|
||||
AuthRequired bool `env:"ORLY_AUTH_REQUIRED" default:"false" usage:"require authentication for all requests"`
|
||||
PublicReadable bool `env:"ORLY_PUBLIC_READABLE" default:"true" usage:"allow public read access to regardless of whether the client is authed"`
|
||||
SpiderSeeds []string `env:"ORLY_SPIDER_SEEDS" usage:"seeds to use for the spider (relays that are looked up initially to find owner relay lists) (comma separated)" default:"wss://relay.nostr.band/,wss://relay.damus.io/,wss://nostr.wine/,wss://nostr.land/,wss://theforest.nostr1.com/"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"list of users whose follow lists designate whitelisted users who can publish events, and who can read if public readable is false (comma separated)"`
|
||||
Private bool `env:"ORLY_PRIVATE" usage:"do not spider for user metadata because the relay is private and this would leak relay memberships" default:"false"`
|
||||
AppName string `env:"ORLY_APP_NAME" default:"ORLY"`
|
||||
Config string `env:"ORLY_CONFIG_DIR" usage:"location for configuration file, which has the name '.env' to make it harder to delete, and is a standard environment KEY=value<newline>... style" default:"~/.config/orly"`
|
||||
State string `env:"ORLY_STATE_DATA_DIR" usage:"storage location for state data affected by dynamic interactive interfaces" default:"~/.local/state/orly"`
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/cache/orly"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof on 127.0.0.1:6060" enum:"cpu,memory,allocation"`
|
||||
AuthRequired bool `env:"ORLY_AUTH_REQUIRED" default:"false" usage:"require authentication for all requests"`
|
||||
PublicReadable bool `env:"ORLY_PUBLIC_READABLE" default:"true" usage:"allow public read access to regardless of whether the client is authed"`
|
||||
SpiderSeeds []string `env:"ORLY_SPIDER_SEEDS" usage:"seeds to use for the spider (relays that are looked up initially to find owner relay lists) (comma separated)" default:"wss://profiles.nostr1.com/,wss://relay.nostr.band/,wss://relay.damus.io/,wss://nostr.wine/,wss://nostr.land/,wss://theforest.nostr1.com/,wss://profiles.nostr1.com/"`
|
||||
SpiderType string `env:"ORLY_SPIDER_TYPE" usage:"whether to spider, and what degree of spidering: none, directory, follows (follows means to the second degree of the follow graph)" default:"directory"`
|
||||
SpiderTime time.Duration `env:"ORLY_SPIDER_FREQUENCY" usage:"how often to run the spider, uses notation 0h0m0s" default:"1h"`
|
||||
SpiderSecondDegree bool `env:"ORLY_SPIDER_SECOND_DEGREE" default:"true" usage:"whether to enable spidering the second degree of follows for non-directory events if ORLY_SPIDER_TYPE is set to 'follows'"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"list of users whose follow lists designate whitelisted users who can publish events, and who can read if public readable is false (comma separated)"`
|
||||
Private bool `env:"ORLY_PRIVATE" usage:"do not spider for user metadata because the relay is private and this would leak relay memberships" default:"false"`
|
||||
Whitelist []string `env:"ORLY_WHITELIST" usage:"only allow connections from this list of IP addresses"`
|
||||
Blacklist []string `env:"ORLY_BLACKLIST" usage:"list of pubkeys to block when auth is not required (comma separated)"`
|
||||
RelaySecret string `env:"ORLY_SECRET_KEY" usage:"secret key for relay cluster replication authentication"`
|
||||
PeerRelays []string `env:"ORLY_PEER_RELAYS" usage:"list of peer relays URLs that new events are pushed to in format <pubkey>|<url>"`
|
||||
}
|
||||
|
||||
// New creates and initializes a new configuration object for the relay
|
||||
@@ -73,6 +81,9 @@ func New() (cfg *C, err error) {
|
||||
if cfg.State == "" || strings.Contains(cfg.State, "~") {
|
||||
cfg.State = filepath.Join(xdg.StateHome, cfg.AppName)
|
||||
}
|
||||
if len(cfg.Owners) > 0 {
|
||||
cfg.AuthRequired = true
|
||||
}
|
||||
envPath := filepath.Join(cfg.Config, ".env")
|
||||
if apputil.FileExists(envPath) {
|
||||
var e env2.Env
|
||||
@@ -85,8 +96,19 @@ func New() (cfg *C, err error) {
|
||||
return
|
||||
}
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
log.I.F("loaded configuration from %s", envPath)
|
||||
log.T.F("loaded configuration from %s", envPath)
|
||||
}
|
||||
// if spider seeds has no elements, there still is a single entry with an
|
||||
// empty string; and also if any of the fields are empty strings, they need
|
||||
// to be removed.
|
||||
var seeds []string
|
||||
for _, u := range cfg.SpiderSeeds {
|
||||
if u == "" {
|
||||
continue
|
||||
}
|
||||
seeds = append(seeds, u)
|
||||
}
|
||||
cfg.SpiderSeeds = seeds
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -41,18 +41,37 @@ func (s *Server) AcceptEvent(
|
||||
c context.T, ev *event.E, hr *http.Request, authedPubkey []byte,
|
||||
remote string,
|
||||
) (accept bool, notice string, afterSave func()) {
|
||||
// if auth is required and the user is not authed, reject
|
||||
if s.AuthRequired() && len(authedPubkey) == 0 {
|
||||
if !s.AuthRequired() {
|
||||
// Check blacklist for public relay mode
|
||||
if len(s.blacklistPubkeys) > 0 {
|
||||
for _, blockedPubkey := range s.blacklistPubkeys {
|
||||
if bytes.Equal(blockedPubkey, ev.Pubkey) {
|
||||
notice = "event author is blacklisted"
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
accept = true
|
||||
return
|
||||
}
|
||||
// if auth is required and the user is not authed, reject
|
||||
if len(authedPubkey) == 0 {
|
||||
notice = "client isn't authed"
|
||||
return
|
||||
}
|
||||
for _, u := range s.OwnersMuted() {
|
||||
if bytes.Equal(u, authedPubkey) {
|
||||
notice = "event author is banned from this relay"
|
||||
return
|
||||
}
|
||||
}
|
||||
// check if the authed user is on the lists
|
||||
list := append(s.OwnersFollowed(), s.FollowedFollows()...)
|
||||
for _, u := range list {
|
||||
if bytes.Equal(u, authedPubkey) {
|
||||
accept = true
|
||||
break
|
||||
return
|
||||
}
|
||||
}
|
||||
// todo: check if event author is on owners' mute lists or block list
|
||||
return
|
||||
}
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
// mockServerForEvent is a simple mock implementation of the Server struct for testing AcceptEvent
|
||||
type mockServerForEvent struct {
|
||||
authRequired bool
|
||||
ownersFollowed [][]byte
|
||||
authRequired bool
|
||||
ownersFollowed [][]byte
|
||||
followedFollows [][]byte
|
||||
}
|
||||
|
||||
@@ -203,8 +203,8 @@ func TestAcceptEventWithRealServer(t *testing.T) {
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
if notice != "" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
||||
if notice != "client isn't authed" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want 'client isn't authed'", notice)
|
||||
}
|
||||
if afterSave != nil {
|
||||
t.Error("AcceptEvent() afterSave is not nil, but should be nil")
|
||||
@@ -234,4 +234,81 @@ func TestAcceptEventWithRealServer(t *testing.T) {
|
||||
if !accept {
|
||||
t.Error("AcceptEvent() accept = false, want true")
|
||||
}
|
||||
|
||||
// Test with muted user
|
||||
s.SetOwnersMuted([][]byte{[]byte("test-pubkey")})
|
||||
accept, notice, afterSave = s.AcceptEvent(ctx, testEvent, req, []byte("test-pubkey"), "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
if notice != "event author is banned from this relay" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want 'event author is banned from this relay'", notice)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcceptEventWithBlacklist tests the blacklist functionality when auth is not required
|
||||
func TestAcceptEventWithBlacklist(t *testing.T) {
|
||||
// Create a context and HTTP request for testing
|
||||
ctx := context.Bg()
|
||||
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||
|
||||
// Test pubkey bytes
|
||||
testPubkey := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20}
|
||||
blockedPubkey := []byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30}
|
||||
|
||||
// Test with public relay mode (auth not required) and no blacklist
|
||||
s := &Server{
|
||||
C: &config.C{
|
||||
AuthRequired: false,
|
||||
},
|
||||
Lists: new(Lists),
|
||||
}
|
||||
|
||||
// Create event with test pubkey
|
||||
testEvent := &event.E{}
|
||||
testEvent.Pubkey = testPubkey
|
||||
|
||||
// Should accept when no blacklist
|
||||
accept, notice, _ := s.AcceptEvent(ctx, testEvent, req, nil, "127.0.0.1")
|
||||
if !accept {
|
||||
t.Error("AcceptEvent() accept = false, want true")
|
||||
}
|
||||
if notice != "" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
||||
}
|
||||
|
||||
// Add blacklist with different pubkey
|
||||
s.blacklistPubkeys = [][]byte{blockedPubkey}
|
||||
|
||||
// Should still accept when author not in blacklist
|
||||
accept, notice, _ = s.AcceptEvent(ctx, testEvent, req, nil, "127.0.0.1")
|
||||
if !accept {
|
||||
t.Error("AcceptEvent() accept = false, want true")
|
||||
}
|
||||
if notice != "" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
||||
}
|
||||
|
||||
// Create event with blocked pubkey
|
||||
blockedEvent := &event.E{}
|
||||
blockedEvent.Pubkey = blockedPubkey
|
||||
|
||||
// Should reject when author is in blacklist
|
||||
accept, notice, _ = s.AcceptEvent(ctx, blockedEvent, req, nil, "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
if notice != "event author is blacklisted" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want 'event author is blacklisted'", notice)
|
||||
}
|
||||
|
||||
// Test with auth required - blacklist should not apply
|
||||
s.C.AuthRequired = true
|
||||
accept, notice, _ = s.AcceptEvent(ctx, blockedEvent, req, nil, "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
if notice != "client isn't authed" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want 'client isn't authed'", notice)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,18 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
realy_lol "orly.dev/pkg/version"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
@@ -17,6 +27,21 @@ var (
|
||||
NIP20prefixmatcher = regexp.MustCompile(`^\w+: `)
|
||||
)
|
||||
|
||||
var userAgent = fmt.Sprintf("orly/%s", realy_lol.V)
|
||||
|
||||
type WriteCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (w *WriteCloser) Close() error {
|
||||
w.Buffer.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewWriteCloser(w []byte) *WriteCloser {
|
||||
return &WriteCloser{bytes.NewBuffer(w)}
|
||||
}
|
||||
|
||||
// AddEvent processes an incoming event, saves it if valid, and delivers it to
|
||||
// subscribers.
|
||||
//
|
||||
@@ -55,6 +80,7 @@ var (
|
||||
// relevant message.
|
||||
func (s *Server) AddEvent(
|
||||
c context.T, rl relay.I, ev *event.E, hr *http.Request, origin string,
|
||||
pubkeys [][]byte,
|
||||
) (accepted bool, message []byte) {
|
||||
|
||||
if ev == nil {
|
||||
@@ -85,6 +111,85 @@ func (s *Server) AddEvent(
|
||||
}
|
||||
// notify subscribers
|
||||
s.listeners.Deliver(ev)
|
||||
// push the new event to replicas if replicas are configured, and the relay
|
||||
// has an identity key.
|
||||
var err error
|
||||
if len(s.Peers.Addresses) > 0 &&
|
||||
len(s.Peers.I.Sec()) == secp256k1.SecKeyBytesLen {
|
||||
evb := ev.Marshal(nil)
|
||||
var payload io.ReadCloser
|
||||
payload = NewWriteCloser(evb)
|
||||
replica:
|
||||
for i, a := range s.Peers.Addresses {
|
||||
// the peer address index is the same as the list of pubkeys
|
||||
// (they're unpacked from a string containing both, appended at the
|
||||
// same time), so if the pubkeys from the http event endpoint sent
|
||||
// us here matches the index of this address, we can skip it.
|
||||
for _, pk := range pubkeys {
|
||||
if bytes.Equal(s.Peers.Pubkeys[i], pk) {
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"not sending back to replica that just sent us this event %0x %s",
|
||||
ev.ID, a,
|
||||
)
|
||||
},
|
||||
)
|
||||
continue replica
|
||||
}
|
||||
}
|
||||
var ur *url.URL
|
||||
if ur, err = url.Parse(a + "/api/event"); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
var r *http.Request
|
||||
r = &http.Request{
|
||||
Method: "POST",
|
||||
URL: ur,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Body: payload,
|
||||
ContentLength: int64(len(evb)),
|
||||
Host: ur.Host,
|
||||
}
|
||||
r.Header.Add("User-Agent", userAgent)
|
||||
if err = httpauth.AddNIP98Header(
|
||||
r, ur, "POST", "", s.Peers.I, 0,
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// add this replica's pubkey to the list to prevent re-sending to
|
||||
// other replicas more than twice
|
||||
pubkeys = append(pubkeys, s.Peers.Pub())
|
||||
var pubkeysHeader []byte
|
||||
for j, pk := range pubkeys {
|
||||
pubkeysHeader = hex.EncAppend(pubkeysHeader, pk)
|
||||
if j < len(pubkeys)-1 {
|
||||
pubkeysHeader = append(pubkeysHeader, ':')
|
||||
}
|
||||
}
|
||||
r.Header.Add("X-Pubkeys", string(pubkeysHeader))
|
||||
r.GetBody = func() (rc io.ReadCloser, err error) {
|
||||
rc = payload
|
||||
return
|
||||
}
|
||||
client := &http.Client{}
|
||||
if _, err = client.Do(r); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"event pushed to replica %s\n%s",
|
||||
ur.String(), evb,
|
||||
)
|
||||
},
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
accepted = true
|
||||
return
|
||||
}
|
||||
|
||||
39
pkg/app/relay/admin-auth.go
Normal file
39
pkg/app/relay/admin-auth.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) AdminAuth(
|
||||
r *http.Request, remote string,
|
||||
tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range s.ownersPubkeys {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
)
|
||||
|
||||
// ServiceURL constructs the service URL based on the incoming HTTP request. It
|
||||
@@ -34,8 +33,6 @@ import (
|
||||
//
|
||||
// - Returns the constructed URL string.
|
||||
func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||
lol.Tracer("ServiceURL")
|
||||
defer func() { lol.Tracer("end ServiceURL", st) }()
|
||||
if !s.AuthRequired() {
|
||||
log.T.F("auth not required")
|
||||
return
|
||||
|
||||
10
pkg/app/relay/config.go
Normal file
10
pkg/app/relay/config.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/app/config"
|
||||
)
|
||||
|
||||
func (s *Server) Config() (c *config.C) {
|
||||
c = s.C
|
||||
return
|
||||
}
|
||||
@@ -3,12 +3,13 @@ package relay
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"orly.dev/pkg/interfaces/relay"
|
||||
"orly.dev/pkg/protocol/relayinfo"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/version"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// HandleRelayInfo generates and returns a relay information document in JSON
|
||||
@@ -43,8 +44,8 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
||||
relayinfo.EventTreatment,
|
||||
// relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
// relayinfo.ExpirationTimestamp,
|
||||
// relayinfo.ProtectedEvents,
|
||||
relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
// relayinfo.RelayListMetadata,
|
||||
)
|
||||
sort.Sort(supportedNIPs)
|
||||
@@ -52,10 +53,12 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
||||
info = &relayinfo.T{
|
||||
Name: s.relay.Name(),
|
||||
Description: version.Description,
|
||||
Nips: supportedNIPs, Software: version.URL,
|
||||
Version: version.V,
|
||||
Nips: supportedNIPs,
|
||||
Software: version.URL,
|
||||
Version: version.V,
|
||||
Limitation: relayinfo.Limits{
|
||||
AuthRequired: s.C.AuthRequired,
|
||||
AuthRequired: s.C.AuthRequired,
|
||||
RestrictedWrites: s.C.AuthRequired,
|
||||
},
|
||||
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
|
||||
}
|
||||
|
||||
@@ -8,41 +8,41 @@ import (
|
||||
func TestLists_OwnersPubkeys(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
|
||||
// Test with empty list
|
||||
pks := l.OwnersPubkeys()
|
||||
if len(pks) != 0 {
|
||||
t.Errorf("Expected empty list, got %d items", len(pks))
|
||||
}
|
||||
|
||||
|
||||
// Test with some pubkeys
|
||||
testPubkeys := [][]byte{
|
||||
[]byte("pubkey1"),
|
||||
[]byte("pubkey2"),
|
||||
[]byte("pubkey3"),
|
||||
}
|
||||
|
||||
|
||||
l.SetOwnersPubkeys(testPubkeys)
|
||||
|
||||
|
||||
// Verify length
|
||||
if l.LenOwnersPubkeys() != len(testPubkeys) {
|
||||
t.Errorf("Expected length %d, got %d", len(testPubkeys), l.LenOwnersPubkeys())
|
||||
}
|
||||
|
||||
|
||||
// Verify content
|
||||
pks = l.OwnersPubkeys()
|
||||
if len(pks) != len(testPubkeys) {
|
||||
t.Errorf("Expected %d pubkeys, got %d", len(testPubkeys), len(pks))
|
||||
}
|
||||
|
||||
|
||||
// Verify each pubkey
|
||||
for i, pk := range pks {
|
||||
if !bytes.Equal(pk, testPubkeys[i]) {
|
||||
t.Errorf("Pubkey at index %d doesn't match: expected %s, got %s",
|
||||
t.Errorf("Pubkey at index %d doesn't match: expected %s, got %s",
|
||||
i, testPubkeys[i], pk)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Verify that the returned slice is a copy, not a reference
|
||||
pks[0] = []byte("modified")
|
||||
newPks := l.OwnersPubkeys()
|
||||
@@ -54,37 +54,37 @@ func TestLists_OwnersPubkeys(t *testing.T) {
|
||||
func TestLists_OwnersFollowed(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
|
||||
// Test with empty list
|
||||
followed := l.OwnersFollowed()
|
||||
if len(followed) != 0 {
|
||||
t.Errorf("Expected empty list, got %d items", len(followed))
|
||||
}
|
||||
|
||||
|
||||
// Test with some pubkeys
|
||||
testPubkeys := [][]byte{
|
||||
[]byte("followed1"),
|
||||
[]byte("followed2"),
|
||||
[]byte("followed3"),
|
||||
}
|
||||
|
||||
|
||||
l.SetOwnersFollowed(testPubkeys)
|
||||
|
||||
|
||||
// Verify length
|
||||
if l.LenOwnersFollowed() != len(testPubkeys) {
|
||||
t.Errorf("Expected length %d, got %d", len(testPubkeys), l.LenOwnersFollowed())
|
||||
}
|
||||
|
||||
|
||||
// Verify content
|
||||
followed = l.OwnersFollowed()
|
||||
if len(followed) != len(testPubkeys) {
|
||||
t.Errorf("Expected %d followed, got %d", len(testPubkeys), len(followed))
|
||||
}
|
||||
|
||||
|
||||
// Verify each pubkey
|
||||
for i, pk := range followed {
|
||||
if !bytes.Equal(pk, testPubkeys[i]) {
|
||||
t.Errorf("Followed at index %d doesn't match: expected %s, got %s",
|
||||
t.Errorf("Followed at index %d doesn't match: expected %s, got %s",
|
||||
i, testPubkeys[i], pk)
|
||||
}
|
||||
}
|
||||
@@ -93,37 +93,37 @@ func TestLists_OwnersFollowed(t *testing.T) {
|
||||
func TestLists_FollowedFollows(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
|
||||
// Test with empty list
|
||||
follows := l.FollowedFollows()
|
||||
if len(follows) != 0 {
|
||||
t.Errorf("Expected empty list, got %d items", len(follows))
|
||||
}
|
||||
|
||||
|
||||
// Test with some pubkeys
|
||||
testPubkeys := [][]byte{
|
||||
[]byte("follow1"),
|
||||
[]byte("follow2"),
|
||||
[]byte("follow3"),
|
||||
}
|
||||
|
||||
|
||||
l.SetFollowedFollows(testPubkeys)
|
||||
|
||||
|
||||
// Verify length
|
||||
if l.LenFollowedFollows() != len(testPubkeys) {
|
||||
t.Errorf("Expected length %d, got %d", len(testPubkeys), l.LenFollowedFollows())
|
||||
}
|
||||
|
||||
|
||||
// Verify content
|
||||
follows = l.FollowedFollows()
|
||||
if len(follows) != len(testPubkeys) {
|
||||
t.Errorf("Expected %d follows, got %d", len(testPubkeys), len(follows))
|
||||
}
|
||||
|
||||
|
||||
// Verify each pubkey
|
||||
for i, pk := range follows {
|
||||
if !bytes.Equal(pk, testPubkeys[i]) {
|
||||
t.Errorf("Follow at index %d doesn't match: expected %s, got %s",
|
||||
t.Errorf("Follow at index %d doesn't match: expected %s, got %s",
|
||||
i, testPubkeys[i], pk)
|
||||
}
|
||||
}
|
||||
@@ -132,37 +132,37 @@ func TestLists_FollowedFollows(t *testing.T) {
|
||||
func TestLists_OwnersMuted(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
|
||||
// Test with empty list
|
||||
muted := l.OwnersMuted()
|
||||
if len(muted) != 0 {
|
||||
t.Errorf("Expected empty list, got %d items", len(muted))
|
||||
}
|
||||
|
||||
|
||||
// Test with some pubkeys
|
||||
testPubkeys := [][]byte{
|
||||
[]byte("muted1"),
|
||||
[]byte("muted2"),
|
||||
[]byte("muted3"),
|
||||
}
|
||||
|
||||
|
||||
l.SetOwnersMuted(testPubkeys)
|
||||
|
||||
|
||||
// Verify length
|
||||
if l.LenOwnersMuted() != len(testPubkeys) {
|
||||
t.Errorf("Expected length %d, got %d", len(testPubkeys), l.LenOwnersMuted())
|
||||
}
|
||||
|
||||
|
||||
// Verify content
|
||||
muted = l.OwnersMuted()
|
||||
if len(muted) != len(testPubkeys) {
|
||||
t.Errorf("Expected %d muted, got %d", len(testPubkeys), len(muted))
|
||||
}
|
||||
|
||||
|
||||
// Verify each pubkey
|
||||
for i, pk := range muted {
|
||||
if !bytes.Equal(pk, testPubkeys[i]) {
|
||||
t.Errorf("Muted at index %d doesn't match: expected %s, got %s",
|
||||
t.Errorf("Muted at index %d doesn't match: expected %s, got %s",
|
||||
i, testPubkeys[i], pk)
|
||||
}
|
||||
}
|
||||
@@ -171,10 +171,10 @@ func TestLists_OwnersMuted(t *testing.T) {
|
||||
func TestLists_ConcurrentAccess(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
|
||||
// Test concurrent access to the lists
|
||||
done := make(chan bool)
|
||||
|
||||
|
||||
// Concurrent reads and writes
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
@@ -183,7 +183,7 @@ func TestLists_ConcurrentAccess(t *testing.T) {
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
l.SetOwnersFollowed([][]byte{[]byte("followed1"), []byte("followed2")})
|
||||
@@ -191,7 +191,7 @@ func TestLists_ConcurrentAccess(t *testing.T) {
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
l.SetFollowedFollows([][]byte{[]byte("follow1"), []byte("follow2")})
|
||||
@@ -199,7 +199,7 @@ func TestLists_ConcurrentAccess(t *testing.T) {
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
l.SetOwnersMuted([][]byte{[]byte("muted1"), []byte("muted2")})
|
||||
@@ -207,11 +207,11 @@ func TestLists_ConcurrentAccess(t *testing.T) {
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
|
||||
// Wait for all goroutines to complete
|
||||
for i := 0; i < 4; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
|
||||
// If we got here without deadlocks or panics, the test passes
|
||||
}
|
||||
}
|
||||
|
||||
39
pkg/app/relay/owners-followed-auth.go
Normal file
39
pkg/app/relay/owners-followed-auth.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) OwnersFollowedAuth(
|
||||
r *http.Request, remote string,
|
||||
tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range s.ownersFollowed {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
6
pkg/app/relay/owners-pubkeys.go
Normal file
6
pkg/app/relay/owners-pubkeys.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package relay
|
||||
|
||||
func (s *Server) OwnersPubkeys() (pks [][]byte) {
|
||||
pks = s.ownersPubkeys
|
||||
return
|
||||
}
|
||||
72
pkg/app/relay/peers.go
Normal file
72
pkg/app/relay/peers.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/keys"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Peers is a structure that keeps the information required when peer
|
||||
// replication is enabled.
|
||||
//
|
||||
// - Addresses are the relay addresses that will be pushed new events when
|
||||
// accepted. From ORLY_PEER_RELAYS first field after the |.
|
||||
//
|
||||
// - Pubkeys are the relay peer public keys that we will send any event to
|
||||
// including privileged type. From ORLY_PEER_RELAYS before the |.
|
||||
//
|
||||
// - I - the signer of this relay, generated from the nsec in
|
||||
// ORLY_SECRET_KEY.
|
||||
type Peers struct {
|
||||
Addresses []string
|
||||
Pubkeys [][]byte
|
||||
signer.I
|
||||
}
|
||||
|
||||
// Init accepts the lists which will come from config.C for peer relay settings
|
||||
// and populate the Peers with this data after decoding it.
|
||||
func (p *Peers) Init(
|
||||
addresses []string, sec string,
|
||||
) (err error) {
|
||||
for _, address := range addresses {
|
||||
if len(address) == 0 {
|
||||
continue
|
||||
}
|
||||
split := strings.Split(address, "@")
|
||||
if len(split) != 2 {
|
||||
log.E.F("invalid peer address: %s", address)
|
||||
continue
|
||||
}
|
||||
p.Addresses = append(p.Addresses, split[1])
|
||||
var pk []byte
|
||||
if pk, err = keys.DecodeNpubOrHex(split[0]); chk.D(err) {
|
||||
continue
|
||||
}
|
||||
p.Pubkeys = append(p.Pubkeys, pk)
|
||||
log.I.F("peer %s added; pubkey: %0x", split[1], pk)
|
||||
}
|
||||
if sec == "" {
|
||||
return
|
||||
}
|
||||
p.I = &p256k.Signer{}
|
||||
var s []byte
|
||||
if s, err = keys.DecodeNsecOrHex(sec); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = p.I.InitSec(s); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var npub []byte
|
||||
if npub, err = bech32encoding.BinToNpub(p.I.Pub()); chk.E(err) {
|
||||
return
|
||||
}
|
||||
log.I.F(
|
||||
"relay peer initialized, relay's npub: %s",
|
||||
npub,
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
// Package publisher is a singleton package that keeps track of subscriptions in
|
||||
// both websockets and http SSE, including managing the authentication state of
|
||||
// a connection.
|
||||
package publish
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/interfaces/publisher"
|
||||
"orly.dev/pkg/interfaces/typer"
|
||||
)
|
||||
|
||||
// S is the control structure for the subscription management scheme.
|
||||
@@ -26,11 +24,10 @@ func (s *S) Type() string { return "publish" }
|
||||
func (s *S) Deliver(ev *event.E) {
|
||||
for _, p := range s.Publishers {
|
||||
p.Deliver(ev)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) Receive(msg publisher.Message) {
|
||||
func (s *S) Receive(msg typer.T) {
|
||||
t := msg.Type()
|
||||
for _, p := range s.Publishers {
|
||||
if p.Type() == t {
|
||||
|
||||
@@ -18,7 +18,9 @@ import (
|
||||
"orly.dev/pkg/utils/normalize"
|
||||
)
|
||||
|
||||
// Publish processes and stores an event in the server's storage. It handles different types of events: ephemeral, replaceable, and parameterized replaceable.
|
||||
// Publish processes and stores an event in the server's storage. It handles
|
||||
// different types of events: ephemeral, replaceable, and parameterized
|
||||
// replaceable.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
@@ -60,11 +62,22 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
log.T.F("found %d possible duplicate events", len(evs))
|
||||
for _, ev := range evs {
|
||||
del := true
|
||||
if bytes.Equal(ev.Id, evt.Id) {
|
||||
continue
|
||||
if bytes.Equal(ev.ID, evt.ID) {
|
||||
return errorf.W(
|
||||
string(
|
||||
normalize.Duplicate.F(
|
||||
"event already in relay database",
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
log.I.F(
|
||||
"maybe replace %s with %s", ev.Serialize(), evt.Serialize(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"maybe replace %s with %s", ev.Serialize(),
|
||||
evt.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
if ev.CreatedAt.Int() > evt.CreatedAt.Int() {
|
||||
return errorf.W(
|
||||
@@ -75,6 +88,12 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
),
|
||||
)
|
||||
}
|
||||
// not deleting these events because some clients are retarded
|
||||
// and the query will pull the new one, but a backup can recover
|
||||
// the data of old ones
|
||||
if ev.Kind.IsDirectoryEvent() {
|
||||
del = false
|
||||
}
|
||||
if evt.Kind.Equal(kind.FollowList) {
|
||||
// if the event is from someone on ownersFollowed or
|
||||
// followedFollows, for now add to this list so they're
|
||||
@@ -88,7 +107,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
if isFollowed {
|
||||
if _, _, err = sto.SaveEvent(
|
||||
c, evt,
|
||||
c, evt, false, nil,
|
||||
); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
@@ -99,7 +118,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
err = nil
|
||||
}
|
||||
// event has been saved and lists updated.
|
||||
return
|
||||
// return
|
||||
}
|
||||
|
||||
}
|
||||
@@ -110,7 +129,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
for _, pk := range owners {
|
||||
if bytes.Equal(evt.Pubkey, pk) {
|
||||
if _, _, err = sto.SaveEvent(
|
||||
c, evt,
|
||||
c, evt, false, nil,
|
||||
); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
@@ -121,7 +140,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
err = nil
|
||||
}
|
||||
// event has been saved and lists updated.
|
||||
return
|
||||
// return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -150,7 +169,13 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
}
|
||||
} else if evt.Kind.IsParameterizedReplaceable() {
|
||||
log.I.F("parameterized replaceable %s", evt.Serialize())
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"parameterized replaceable %s", evt.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
// parameterized replaceable event, delete before storing
|
||||
var evs []*event.E
|
||||
f := filter.New()
|
||||
@@ -163,21 +188,30 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
tag.New([]byte{'d'}, dTag.Value()),
|
||||
)
|
||||
}
|
||||
log.I.F(
|
||||
"filter for parameterized replaceable %v %s",
|
||||
f.Tags.ToStringsSlice(),
|
||||
f.Serialize(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"filter for parameterized replaceable %v %s",
|
||||
f.Tags.ToStringsSlice(),
|
||||
f.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
if evs, err = sto.QueryEvents(c, f); err != nil {
|
||||
return errorf.E("failed to query before replacing: %w", err)
|
||||
return errorf.E("failed to query before replacing: %v", err)
|
||||
}
|
||||
// log.I.S(evs)
|
||||
if len(evs) > 0 {
|
||||
for _, ev := range evs {
|
||||
del := true
|
||||
err = nil
|
||||
log.I.F(
|
||||
"maybe replace %s with %s", ev.Serialize(), evt.Serialize(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"maybe replace %s with %s", ev.Serialize(),
|
||||
evt.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
if ev.CreatedAt.Int() > evt.CreatedAt.Int() {
|
||||
return errorf.D(string(normalize.Error.F("not replacing newer parameterized replaceable event")))
|
||||
@@ -190,9 +224,13 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
evdt := ev.Tags.GetFirst(tag.New("d"))
|
||||
evtdt := evt.Tags.GetFirst(tag.New("d"))
|
||||
log.I.F(
|
||||
"%s != %s %v", evdt.Value(), evtdt.Value(),
|
||||
!bytes.Equal(evdt.Value(), evtdt.Value()),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"%s != %s %v", evdt.Value(), evtdt.Value(),
|
||||
!bytes.Equal(evdt.Value(), evtdt.Value()),
|
||||
)
|
||||
},
|
||||
)
|
||||
if !bytes.Equal(evdt.Value(), evtdt.Value()) {
|
||||
continue
|
||||
@@ -222,10 +260,17 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, _, err = sto.SaveEvent(c, evt); err != nil && !errors.Is(
|
||||
if _, _, err = sto.SaveEvent(
|
||||
c, evt, false, append(s.Peers.Pubkeys, s.ownersPubkeys...),
|
||||
); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
return
|
||||
}
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf("saved event:\n%s", evt.Serialize())
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,10 +6,13 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/socketapi"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/protocol/openapi"
|
||||
"orly.dev/pkg/protocol/socketapi"
|
||||
|
||||
"orly.dev/pkg/app/config"
|
||||
"orly.dev/pkg/app/relay/helpers"
|
||||
"orly.dev/pkg/app/relay/options"
|
||||
@@ -18,6 +21,7 @@ import (
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/keys"
|
||||
"orly.dev/pkg/utils/log"
|
||||
|
||||
"github.com/rs/cors"
|
||||
@@ -27,16 +31,19 @@ import (
|
||||
// encapsulates various components such as context, cancel function, options,
|
||||
// relay interface, address, HTTP server, and configuration settings.
|
||||
type Server struct {
|
||||
Ctx context.T
|
||||
Cancel context.F
|
||||
options *options.T
|
||||
relay relay.I
|
||||
Addr string
|
||||
mux *servemux.S
|
||||
httpServer *http.Server
|
||||
listeners *publish.S
|
||||
Ctx context.T
|
||||
Cancel context.F
|
||||
options *options.T
|
||||
relay relay.I
|
||||
Addr string
|
||||
mux *servemux.S
|
||||
httpServer *http.Server
|
||||
listeners *publish.S
|
||||
blacklistPubkeys [][]byte
|
||||
*config.C
|
||||
*Lists
|
||||
*Peers
|
||||
Mux *servemux.S
|
||||
}
|
||||
|
||||
// ServerParams represents the configuration parameters for initializing a
|
||||
@@ -48,6 +55,7 @@ type ServerParams struct {
|
||||
Rl relay.I
|
||||
DbPath string
|
||||
MaxLimit int
|
||||
Mux *servemux.S
|
||||
*config.C
|
||||
}
|
||||
|
||||
@@ -78,7 +86,9 @@ type ServerParams struct {
|
||||
// - Sets up a ServeMux for handling HTTP requests.
|
||||
//
|
||||
// - Initializes the relay, starting its operation in a separate goroutine.
|
||||
func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
||||
func NewServer(
|
||||
sp *ServerParams, serveMux *servemux.S, opts ...options.O,
|
||||
) (s *Server, err error) {
|
||||
op := options.Default()
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
@@ -88,7 +98,6 @@ func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
||||
return nil, fmt.Errorf("storage init: %w", err)
|
||||
}
|
||||
}
|
||||
serveMux := servemux.NewServeMux()
|
||||
s = &Server{
|
||||
Ctx: sp.Ctx,
|
||||
Cancel: sp.Cancel,
|
||||
@@ -97,8 +106,23 @@ func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
||||
options: op,
|
||||
C: sp.C,
|
||||
Lists: new(Lists),
|
||||
Peers: new(Peers),
|
||||
}
|
||||
s.listeners = publish.New(socketapi.New(s))
|
||||
// Parse blacklist pubkeys
|
||||
for _, v := range s.C.Blacklist {
|
||||
if len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = keys.DecodeNpubOrHex(v); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
s.blacklistPubkeys = append(s.blacklistPubkeys, pk)
|
||||
}
|
||||
chk.E(
|
||||
s.Peers.Init(sp.C.PeerRelays, sp.C.RelaySecret),
|
||||
)
|
||||
s.listeners = publish.New(socketapi.New(s), openapi.NewPublisher(s))
|
||||
go func() {
|
||||
if err := s.relay.Init(); chk.E(err) {
|
||||
s.Shutdown()
|
||||
@@ -130,6 +154,21 @@ func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
||||
//
|
||||
// - For all other paths, delegates to the internal mux's ServeHTTP method.
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
c := s.Config()
|
||||
remote := helpers.GetRemoteFromReq(r)
|
||||
var whitelisted bool
|
||||
if len(c.Whitelist) > 0 {
|
||||
for _, addr := range c.Whitelist {
|
||||
if strings.HasPrefix(remote, addr) {
|
||||
whitelisted = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
whitelisted = true
|
||||
}
|
||||
if !whitelisted {
|
||||
return
|
||||
}
|
||||
// standard nostr protocol only governs the "root" path of the relay and
|
||||
// websockets
|
||||
if r.URL.Path == "/" {
|
||||
@@ -142,9 +181,13 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
}
|
||||
log.I.F(
|
||||
"http request: %s from %s",
|
||||
r.URL.String(), helpers.GetRemoteFromReq(r),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"http request: %s from %s",
|
||||
r.URL.String(), helpers.GetRemoteFromReq(r),
|
||||
)
|
||||
},
|
||||
)
|
||||
s.mux.ServeHTTP(w, r)
|
||||
}
|
||||
@@ -182,6 +225,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
func (s *Server) Start(
|
||||
host string, port int, started ...chan bool,
|
||||
) (err error) {
|
||||
log.I.F("running spider every %v", s.C.SpiderTime)
|
||||
if len(s.C.Owners) > 0 {
|
||||
// start up spider
|
||||
if err = s.Spider(s.C.Private); chk.E(err) {
|
||||
@@ -191,7 +235,7 @@ func (s *Server) Start(
|
||||
}
|
||||
}
|
||||
// start up a spider run to trigger every 30 minutes
|
||||
ticker := time.NewTicker(time.Hour)
|
||||
ticker := time.NewTicker(s.C.SpiderTime)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
@@ -209,8 +253,8 @@ func (s *Server) Start(
|
||||
}()
|
||||
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
||||
log.I.F("starting relay listener at %s", addr)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
var ln net.Listener
|
||||
if ln, err = net.Listen("tcp", addr); err != nil {
|
||||
return err
|
||||
}
|
||||
s.httpServer = &http.Server{
|
||||
|
||||
@@ -1,47 +1,109 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"sort"
|
||||
"sync"
|
||||
"orly.dev/pkg/utils/values"
|
||||
)
|
||||
|
||||
// IdPkTs is a map of event IDs to their id, pubkey, kind, and timestamp
|
||||
// This is used to reduce memory usage by storing only the essential information
|
||||
// instead of the full events
|
||||
type IdPkTs struct {
|
||||
Id []byte
|
||||
Pubkey []byte
|
||||
Kind uint16
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (s *Server) SpiderFetch(
|
||||
k *kinds.T, noFetch, noExtract bool, pubkeys ...[]byte,
|
||||
) (pks [][]byte, err error) {
|
||||
// Map to store id, pubkey, kind, and timestamp for each event
|
||||
// Key is a combination of pubkey and kind for deduplication
|
||||
pkKindMap := make(map[string]*IdPkTs)
|
||||
// Map to collect pubkeys from p tags
|
||||
pkMap := make(map[string]struct{})
|
||||
|
||||
// first search the local database
|
||||
pkList := tag.New(pubkeys...)
|
||||
f := &filter.F{
|
||||
Kinds: k,
|
||||
Authors: pkList,
|
||||
}
|
||||
var evs event.S
|
||||
if evs, err = s.Storage().QueryEvents(s.Ctx, f); chk.E(err) {
|
||||
|
||||
var kindsList string
|
||||
if k != nil {
|
||||
for i, kk := range k.K {
|
||||
if i > 0 {
|
||||
kindsList += ","
|
||||
}
|
||||
kindsList += kk.Name()
|
||||
}
|
||||
} else {
|
||||
kindsList = "*"
|
||||
}
|
||||
|
||||
// Query local database
|
||||
var localEvents event.S
|
||||
if localEvents, err = s.Storage().QueryEvents(s.Ctx, f); chk.E(err) {
|
||||
// none were found, so we need to scan the spiders
|
||||
err = nil
|
||||
}
|
||||
var kindsList string
|
||||
for i, kk := range k.K {
|
||||
if i > 0 {
|
||||
kindsList += ","
|
||||
|
||||
// Process local events
|
||||
for _, ev := range localEvents {
|
||||
// Create a key based on pubkey and kind for deduplication
|
||||
pkKindKey := string(ev.Pubkey) + string(ev.Kind.Marshal(nil))
|
||||
|
||||
// Check if we already have an event with this pubkey and kind
|
||||
existing, exists := pkKindMap[pkKindKey]
|
||||
|
||||
// If it doesn't exist or the new event is newer, store it
|
||||
if !exists || ev.CreatedAtInt64() > existing.Timestamp {
|
||||
pkKindMap[pkKindKey] = &IdPkTs{
|
||||
Id: ev.ID,
|
||||
Pubkey: ev.Pubkey,
|
||||
Kind: ev.Kind.ToU16(),
|
||||
Timestamp: ev.CreatedAtInt64(),
|
||||
}
|
||||
|
||||
// Extract p tags if not in noExtract mode
|
||||
if !noExtract {
|
||||
t := ev.Tags.GetAll(tag.New("p"))
|
||||
for _, tt := range t.ToSliceOfTags() {
|
||||
pkh := tt.Value()
|
||||
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||
continue
|
||||
}
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
pkMap[string(pk)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
kindsList += kk.Name()
|
||||
// Nil the event to free memory
|
||||
ev = nil
|
||||
}
|
||||
log.I.F("%d events found of type %s", len(evs), kindsList)
|
||||
// for _, ev := range evs {
|
||||
// o += fmt.Sprintf("%s\n\n", ev.Marshal(nil))
|
||||
// }
|
||||
// log.I.F("%s", o)
|
||||
if !noFetch {
|
||||
log.I.F("%d events found of type %s", len(pkKindMap), kindsList)
|
||||
if !noFetch && len(s.C.SpiderSeeds) > 0 {
|
||||
// we need to search the spider seeds.
|
||||
// Break up pubkeys into batches of 128
|
||||
for i := 0; i < len(pubkeys); i += 128 {
|
||||
@@ -56,94 +118,103 @@ func (s *Server) SpiderFetch(
|
||||
)
|
||||
batchPkList := tag.New(batchPubkeys...)
|
||||
lim := uint(batchPkList.Len())
|
||||
l := &lim
|
||||
var since *timestamp.T
|
||||
if k == nil {
|
||||
since = timestamp.FromTime(time.Now().Add(-1 * s.C.SpiderTime * 3 / 2))
|
||||
} else {
|
||||
l = values.ToUintPointer(512)
|
||||
}
|
||||
batchFilter := &filter.F{
|
||||
Kinds: k,
|
||||
Authors: batchPkList,
|
||||
Limit: &lim,
|
||||
Since: since,
|
||||
Limit: l,
|
||||
}
|
||||
|
||||
var mx sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, seed := range s.C.SpiderSeeds {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
select {
|
||||
case <-s.Ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
var evss event.S
|
||||
var cli *ws.Client
|
||||
if cli, err = ws.RelayConnect(
|
||||
context.Bg(), seed,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
if evss, err = cli.QuerySync(
|
||||
context.Bg(), batchFilter,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
mx.Lock()
|
||||
// save the events to the database
|
||||
for _, ev := range evss {
|
||||
log.I.F("saving event:\n%s", ev.Marshal(nil))
|
||||
select {
|
||||
case <-s.Ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
var evss event.S
|
||||
var cli *ws.Client
|
||||
if cli, err = ws.RelayConnect(
|
||||
context.Bg(), seed,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
if evss, err = cli.QuerySync(
|
||||
context.Bg(), batchFilter,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
// Process each event immediately
|
||||
for i, ev := range evss {
|
||||
// log.I.S(ev)
|
||||
// Create a key based on pubkey and kind for deduplication
|
||||
pkKindKey := string(ev.Pubkey) + string(ev.Kind.Marshal(nil))
|
||||
// Check if we already have an event with this pubkey and kind
|
||||
existing, exists := pkKindMap[pkKindKey]
|
||||
// If it doesn't exist or the new event is newer, store it and save to database
|
||||
if !exists || ev.CreatedAtInt64() > existing.Timestamp {
|
||||
var ser *types.Uint40
|
||||
if ser, err = s.Storage().GetSerialById(ev.ID); err == nil && ser != nil {
|
||||
err = errorf.E("event already exists: %0x", ev.ID)
|
||||
return
|
||||
} else {
|
||||
// verify the signature
|
||||
var valid bool
|
||||
if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Save the event to the database
|
||||
if _, _, err = s.Storage().SaveEvent(
|
||||
s.Ctx, ev,
|
||||
s.Ctx, ev, true, nil,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
// Store the essential information
|
||||
pkKindMap[pkKindKey] = &IdPkTs{
|
||||
Id: ev.ID,
|
||||
Pubkey: ev.Pubkey,
|
||||
Kind: ev.Kind.ToU16(),
|
||||
Timestamp: ev.CreatedAtInt64(),
|
||||
}
|
||||
// Extract p tags if not in noExtract mode
|
||||
if !noExtract {
|
||||
t := ev.Tags.GetAll(tag.New("p"))
|
||||
for _, tt := range t.ToSliceOfTags() {
|
||||
pkh := tt.Value()
|
||||
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||
continue
|
||||
}
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
pkMap[string(pk)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, ev := range evss {
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
mx.Unlock()
|
||||
}()
|
||||
// Nil the event in the slice to free memory
|
||||
evss[i] = nil
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
// deduplicate and take the newest
|
||||
var tmp event.S
|
||||
evMap := make(map[string]event.S)
|
||||
for _, ev := range evs {
|
||||
evMap[ev.PubKeyString()] = append(evMap[ev.PubKeyString()], ev)
|
||||
}
|
||||
for _, evm := range evMap {
|
||||
if len(evm) < 1 {
|
||||
continue
|
||||
}
|
||||
if len(evm) > 1 {
|
||||
sort.Sort(evm)
|
||||
}
|
||||
tmp = append(tmp, evm[0])
|
||||
}
|
||||
evs = tmp
|
||||
// we have all we're going to get now, extract the p tags
|
||||
chk.E(s.Storage().Sync())
|
||||
debug.FreeOSMemory()
|
||||
// If we're in noExtract mode, just return
|
||||
if noExtract {
|
||||
return
|
||||
}
|
||||
pkMap := make(map[string]struct{})
|
||||
for _, ev := range evs {
|
||||
t := ev.Tags.GetAll(tag.New("p"))
|
||||
for _, tt := range t.ToSliceOfTags() {
|
||||
pkh := tt.Value()
|
||||
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||
continue
|
||||
}
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
pkMap[string(pk)] = struct{}{}
|
||||
}
|
||||
}
|
||||
// Convert the collected pubkeys to the return format
|
||||
for pk := range pkMap {
|
||||
pks = append(pks, []byte(pk))
|
||||
}
|
||||
|
||||
@@ -2,41 +2,19 @@ package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"orly.dev/pkg/crypto/ec/bech32"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/keys"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
func (s *Server) Spider(noFetch ...bool) (err error) {
|
||||
var ownersPubkeys [][]byte
|
||||
for _, v := range s.C.Owners {
|
||||
var prf []byte
|
||||
var pk []byte
|
||||
var bits5 []byte
|
||||
if prf, bits5, err = bech32.DecodeNoLimit([]byte(v)); chk.D(err) {
|
||||
// try hex then
|
||||
if _, err = hex.DecBytes(pk, []byte(v)); chk.E(err) {
|
||||
log.W.F(
|
||||
"owner key %s is neither bech32 npub nor hex",
|
||||
v,
|
||||
)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if !bytes.Equal(prf, bech32encoding.NpubHRP) {
|
||||
log.W.F(
|
||||
"owner key %s is neither bech32 npub nor hex",
|
||||
v,
|
||||
)
|
||||
continue
|
||||
}
|
||||
if pk, err = bech32.ConvertBits(bits5, 5, 8, false); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
if pk, err = keys.DecodeNpubOrHex(v); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// owners themselves are on the OwnersFollowed list as first level
|
||||
ownersPubkeys = append(ownersPubkeys, pk)
|
||||
@@ -118,16 +96,39 @@ func (s *Server) Spider(noFetch ...bool) (err error) {
|
||||
s.SetOwnersFollowed(ownersFollowed)
|
||||
s.SetFollowedFollows(followedFollows)
|
||||
s.SetOwnersMuted(ownersMuted)
|
||||
// lastly, update users profile metadata and relay lists in the background
|
||||
if !dontFetch {
|
||||
// lastly, update all followed users new events in the background
|
||||
if !dontFetch && s.C.SpiderType != "none" {
|
||||
go func() {
|
||||
everyone := append(ownersFollowed, followedFollows...)
|
||||
s.SpiderFetch(
|
||||
kinds.New(
|
||||
var k *kinds.T
|
||||
if s.C.SpiderType == "directory" {
|
||||
k = kinds.New(
|
||||
kind.ProfileMetadata, kind.RelayListMetadata,
|
||||
kind.DMRelaysList,
|
||||
), false, true, everyone...,
|
||||
kind.DMRelaysList, kind.MuteList,
|
||||
)
|
||||
}
|
||||
everyone := ownersFollowed
|
||||
if s.C.SpiderSecondDegree &&
|
||||
(s.C.SpiderType == "follows" ||
|
||||
s.C.SpiderType == "directory") {
|
||||
everyone = append(ownersFollowed, followedFollows...)
|
||||
}
|
||||
_, _ = s.SpiderFetch(
|
||||
k, false, true, everyone...,
|
||||
)
|
||||
// get the directory events also for second degree if spider
|
||||
// type is directory but second degree is disabled, so all
|
||||
// directory data is available for all whitelisted users.
|
||||
if !s.C.SpiderSecondDegree && s.C.SpiderType == "directory" {
|
||||
k = kinds.New(
|
||||
kind.ProfileMetadata, kind.RelayListMetadata,
|
||||
kind.DMRelaysList, kind.MuteList,
|
||||
)
|
||||
everyone = append(ownersFollowed, followedFollows...)
|
||||
_, _ = s.SpiderFetch(
|
||||
k, false, true, everyone...,
|
||||
)
|
||||
|
||||
}
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"orly.dev/pkg/encoders/eventid"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/units"
|
||||
"testing"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
|
||||
func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
||||
t.Helper()
|
||||
serveMux := servemux.NewServeMux()
|
||||
srv, _ := NewServer(
|
||||
&ServerParams{
|
||||
Ctx: c,
|
||||
@@ -21,6 +23,7 @@ func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
||||
Rl: tr,
|
||||
MaxLimit: 500 * units.Kb,
|
||||
},
|
||||
serveMux,
|
||||
)
|
||||
started := make(chan bool)
|
||||
go srv.Start("127.0.0.1", 0, started)
|
||||
|
||||
50
pkg/app/relay/user-auth.go
Normal file
50
pkg/app/relay/user-auth.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) UserAuth(
|
||||
r *http.Request, remote string, tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte, super bool) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range append(s.ownersFollowed, s.followedFollows...) {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
// if the client is one of the relay cluster replicas, also set the super
|
||||
// flag to indicate that privilege checks can be bypassed.
|
||||
if len(s.Peers.Pubkeys) > 0 {
|
||||
for _, pk := range s.Peers.Pubkeys {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
super = true
|
||||
pubkey = pk
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -52,7 +52,7 @@ func TestBech32(t *testing.T) {
|
||||
{
|
||||
"split1cheo2y9e2w",
|
||||
ErrNonCharsetChar('o'),
|
||||
}, // invalid character (o) in data part
|
||||
}, // invalid character (o) in data part
|
||||
{"split1a2y9w", ErrInvalidSeparatorIndex(5)}, // too short data part
|
||||
{
|
||||
"1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
|
||||
|
||||
@@ -6,10 +6,11 @@ package musig2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -190,7 +191,7 @@ func BenchmarkCombineSigs(b *testing.B) {
|
||||
}
|
||||
var msg [32]byte
|
||||
copy(msg[:], testMsg[:])
|
||||
var finalNonce *btcec.btcec
|
||||
var finalNonce *btcec.PublicKey
|
||||
for i := range signers {
|
||||
signer := signers[i]
|
||||
partialSig, err := Sign(
|
||||
@@ -246,7 +247,7 @@ func BenchmarkAggregateNonces(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
var testKey *btcec.btcec
|
||||
var testKey *btcec.PublicKey
|
||||
|
||||
// BenchmarkAggregateKeys benchmarks how long it takes to aggregate public
|
||||
// keys.
|
||||
|
||||
@@ -4,6 +4,7 @@ package musig2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
@@ -63,7 +64,7 @@ type Context struct {
|
||||
// signingKey is the key we'll use for signing.
|
||||
signingKey *btcec.SecretKey
|
||||
// pubKey is our even-y coordinate public key.
|
||||
pubKey *btcec.btcec
|
||||
pubKey *btcec.PublicKey
|
||||
// combinedKey is the aggregated public key.
|
||||
combinedKey *AggregateKey
|
||||
// uniqueKeyIndex is the index of the second unique key in the keySet.
|
||||
@@ -103,7 +104,7 @@ type contextOptions struct {
|
||||
// h_tapTweak(internalKey) as there is no true script root.
|
||||
bip86Tweak bool
|
||||
// keySet is the complete set of signers for this context.
|
||||
keySet []*btcec.btcec
|
||||
keySet []*btcec.PublicKey
|
||||
// numSigners is the total number of signers that will eventually be a
|
||||
// part of the context.
|
||||
numSigners int
|
||||
|
||||
@@ -1,88 +1,127 @@
|
||||
{
|
||||
"pubkeys": [
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"020000000000000000000000000000000000000000000000000000000000000005",
|
||||
"02FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30",
|
||||
"04F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
|
||||
],
|
||||
"tweaks": [
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"252E4BD67410A76CDF933D30EAA1608214037F1B105A013ECCD3C5C184A6110B"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [0, 1, 2],
|
||||
"expected": "90539EEDE565F5D054F32CC0C220126889ED1E5D193BAF15AEF344FE59D4610C"
|
||||
},
|
||||
{
|
||||
"key_indices": [2, 1, 0],
|
||||
"expected": "6204DE8B083426DC6EAF9502D27024D53FC826BF7D2012148A0575435DF54B2B"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 0, 0],
|
||||
"expected": "B436E3BAD62B8CD409969A224731C193D051162D8C5AE8B109306127DA3AA935"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 0, 1, 1],
|
||||
"expected": "69BC22BFA5D106306E48A20679DE1D7389386124D07571D0D872686028C26A3E"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"key_indices": [0, 3],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Invalid public key"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 4],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Public key exceeds field size"
|
||||
},
|
||||
{
|
||||
"key_indices": [5, 0],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "First byte of public key is not 2 or 3"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 1],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [true],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The tweak must be less than n."
|
||||
},
|
||||
"comment": "Tweak is out of range"
|
||||
},
|
||||
{
|
||||
"key_indices": [6],
|
||||
"tweak_indices": [1],
|
||||
"is_xonly": [false],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The result of tweaking cannot be infinity."
|
||||
},
|
||||
"comment": "Intermediate tweaking result is point at infinity"
|
||||
}
|
||||
]
|
||||
"pubkeys": [
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"020000000000000000000000000000000000000000000000000000000000000005",
|
||||
"02FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30",
|
||||
"04F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
|
||||
],
|
||||
"tweaks": [
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"252E4BD67410A76CDF933D30EAA1608214037F1B105A013ECCD3C5C184A6110B"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"expected": "90539EEDE565F5D054F32CC0C220126889ED1E5D193BAF15AEF344FE59D4610C"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
2,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"expected": "6204DE8B083426DC6EAF9502D27024D53FC826BF7D2012148A0575435DF54B2B"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"expected": "B436E3BAD62B8CD409969A224731C193D051162D8C5AE8B109306127DA3AA935"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1
|
||||
],
|
||||
"expected": "69BC22BFA5D106306E48A20679DE1D7389386124D07571D0D872686028C26A3E"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Invalid public key"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Public key exceeds field size"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
5,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "First byte of public key is not 2 or 3"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
true
|
||||
],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The tweak must be less than n."
|
||||
},
|
||||
"comment": "Tweak is out of range"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
6
|
||||
],
|
||||
"tweak_indices": [
|
||||
1
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The result of tweaking cannot be infinity."
|
||||
},
|
||||
"comment": "Intermediate tweaking result is point at infinity"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
{
|
||||
"pubkeys": [
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8"
|
||||
],
|
||||
"sorted_pubkeys": [
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
|
||||
]
|
||||
"pubkeys": [
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8"
|
||||
],
|
||||
"sorted_pubkeys": [
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,54 +1,69 @@
|
||||
{
|
||||
"pnonces": [
|
||||
"020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E66603BA47FBC1834437B3212E89A84D8425E7BF12E0245D98262268EBDCB385D50641",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
|
||||
"020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E6660279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60379BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"04FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B831",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A602FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"pnonce_indices": [0, 1],
|
||||
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B024725377345BDE0E9C33AF3C43C0A29A9249F2F2956FA8CFEB55C8573D0262DC8"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [2, 3],
|
||||
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"comment": "Sum of second points encoded in the nonces is point at infinity which is serialized as 33 zero bytes"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"pnonce_indices": [0, 4],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 1 is invalid due wrong tag, 0x04, in the first half",
|
||||
"btcec_err": "invalid public key: unsupported format: 4"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [5, 1],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 0 is invalid because the second half does not correspond to an X coordinate",
|
||||
"btcec_err": "invalid public key: x coordinate 48c264cdd57d3c24d79990b0f865674eb62a0f9018277a95011b41bfc193b831 is not on the secp256k1 curve"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [6, 1],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 0 is invalid because second half exceeds field size",
|
||||
"btcec_err": "invalid public key: x >= field prime"
|
||||
}
|
||||
]
|
||||
"pnonces": [
|
||||
"020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E66603BA47FBC1834437B3212E89A84D8425E7BF12E0245D98262268EBDCB385D50641",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
|
||||
"020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E6660279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60379BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"04FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B831",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A602FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"pnonce_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B024725377345BDE0E9C33AF3C43C0A29A9249F2F2956FA8CFEB55C8573D0262DC8"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [
|
||||
2,
|
||||
3
|
||||
],
|
||||
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"comment": "Sum of second points encoded in the nonces is point at infinity which is serialized as 33 zero bytes"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"pnonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 1 is invalid due wrong tag, 0x04, in the first half",
|
||||
"btcec_err": "invalid public key: unsupported format: 4"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [
|
||||
5,
|
||||
1
|
||||
],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 0 is invalid because the second half does not correspond to an X coordinate",
|
||||
"btcec_err": "invalid public key: x coordinate 48c264cdd57d3c24d79990b0f865674eb62a0f9018277a95011b41bfc193b831 is not on the secp256k1 curve"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [
|
||||
6,
|
||||
1
|
||||
],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 0 is invalid because second half exceeds field size",
|
||||
"btcec_err": "invalid public key: x >= field prime"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,40 +1,40 @@
|
||||
{
|
||||
"test_cases": [
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "0101010101010101010101010101010101010101010101010101010101010101",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "227243DCB40EF2A13A981DB188FA433717B506BDFA14B1AE47D5DC027C9C3B9EF2370B2AD206E724243215137C86365699361126991E6FEC816845F837BDDAC3024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "CD0F47FE471D6788FF3243F47345EA0A179AEF69476BE8348322EF39C2723318870C2065AFB52DEDF02BF4FDBF6D2F442E608692F50C2374C08FFFE57042A61C024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "2626262626262626262626262626262626262626262626262626262626262626262626262626",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "011F8BC60EF061DEEF4D72A0A87200D9994B3F0CD9867910085C38D5366E3E6B9FF03BC0124E56B24069E91EC3F162378983F194E8BD0ED89BE3059649EAE262024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": null,
|
||||
"pk": "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"aggpk": null,
|
||||
"msg": null,
|
||||
"extra_in": null,
|
||||
"expected": "890E83616A3BC4640AB9B6374F21C81FF89CDDDBAFAA7475AE2A102A92E3EDB29FD7E874E23342813A60D9646948242646B7951CA046B4B36D7D6078506D3C9402F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9"
|
||||
}
|
||||
]
|
||||
"test_cases": [
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "0101010101010101010101010101010101010101010101010101010101010101",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "227243DCB40EF2A13A981DB188FA433717B506BDFA14B1AE47D5DC027C9C3B9EF2370B2AD206E724243215137C86365699361126991E6FEC816845F837BDDAC3024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "CD0F47FE471D6788FF3243F47345EA0A179AEF69476BE8348322EF39C2723318870C2065AFB52DEDF02BF4FDBF6D2F442E608692F50C2374C08FFFE57042A61C024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "2626262626262626262626262626262626262626262626262626262626262626262626262626",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "011F8BC60EF061DEEF4D72A0A87200D9994B3F0CD9867910085C38D5366E3E6B9FF03BC0124E56B24069E91EC3F162378983F194E8BD0ED89BE3059649EAE262024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": null,
|
||||
"pk": "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"aggpk": null,
|
||||
"msg": null,
|
||||
"extra_in": null,
|
||||
"expected": "890E83616A3BC4640AB9B6374F21C81FF89CDDDBAFAA7475AE2A102A92E3EDB29FD7E874E23342813A60D9646948242646B7951CA046B4B36D7D6078506D3C9402F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,151 +1,151 @@
|
||||
{
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02D2DC6F5DF7C56ACF38C7FA0AE7A759AE30E19B37359DFDE015872324C7EF6E05",
|
||||
"03C7FB101D97FF930ACD0C6760852EF64E69083DE0B06AC6335724754BB4B0522C",
|
||||
"02352433B21E7E05D3B452B81CAE566E06D2E003ECE16D1074AABA4289E0E3D581"
|
||||
],
|
||||
"pnonces": [
|
||||
"036E5EE6E28824029FEA3E8A9DDD2C8483F5AF98F7177C3AF3CB6F47CAF8D94AE902DBA67E4A1F3680826172DA15AFB1A8CA85C7C5CC88900905C8DC8C328511B53E",
|
||||
"03E4F798DA48A76EEC1C9CC5AB7A880FFBA201A5F064E627EC9CB0031D1D58FC5103E06180315C5A522B7EC7C08B69DCD721C313C940819296D0A7AB8E8795AC1F00",
|
||||
"02C0068FD25523A31578B8077F24F78F5BD5F2422AFF47C1FADA0F36B3CEB6C7D202098A55D1736AA5FCC21CF0729CCE852575C06C081125144763C2C4C4A05C09B6",
|
||||
"031F5C87DCFBFCF330DEE4311D85E8F1DEA01D87A6F1C14CDFC7E4F1D8C441CFA40277BF176E9F747C34F81B0D9F072B1B404A86F402C2D86CF9EA9E9C69876EA3B9",
|
||||
"023F7042046E0397822C4144A17F8B63D78748696A46C3B9F0A901D296EC3406C302022B0B464292CF9751D699F10980AC764E6F671EFCA15069BBE62B0D1C62522A",
|
||||
"02D97DDA5988461DF58C5897444F116A7C74E5711BF77A9446E27806563F3B6C47020CBAD9C363A7737F99FA06B6BE093CEAFF5397316C5AC46915C43767AE867C00"
|
||||
],
|
||||
"tweaks": [
|
||||
"B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C",
|
||||
"A815FE049EE3C5AAB66310477FBC8BCCCAC2F3395F59F921C364ACD78A2F48DC",
|
||||
"75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8"
|
||||
],
|
||||
"psigs": [
|
||||
"B15D2CD3C3D22B04DAE438CE653F6B4ECF042F42CFDED7C41B64AAF9B4AF53FB",
|
||||
"6193D6AC61B354E9105BBDC8937A3454A6D705B6D57322A5A472A02CE99FCB64",
|
||||
"9A87D3B79EC67228CB97878B76049B15DBD05B8158D17B5B9114D3C226887505",
|
||||
"66F82EA90923689B855D36C6B7E032FB9970301481B99E01CDB4D6AC7C347A15",
|
||||
"4F5AEE41510848A6447DCD1BBC78457EF69024944C87F40250D3EF2C25D33EFE",
|
||||
"DDEF427BBB847CC027BEFF4EDB01038148917832253EBC355FC33F4A8E2FCCE4",
|
||||
"97B890A26C981DA8102D3BC294159D171D72810FDF7C6A691DEF02F0F7AF3FDC",
|
||||
"53FA9E08BA5243CBCB0D797C5EE83BC6728E539EB76C2D0BF0F971EE4E909971",
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
|
||||
],
|
||||
"msg": "599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869",
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"psig_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E"
|
||||
},
|
||||
{
|
||||
"aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"psig_indices": [
|
||||
2,
|
||||
3
|
||||
],
|
||||
"expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9"
|
||||
},
|
||||
{
|
||||
"aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"psig_indices": [
|
||||
4,
|
||||
5
|
||||
],
|
||||
"expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC"
|
||||
},
|
||||
{
|
||||
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true
|
||||
],
|
||||
"psig_indices": [
|
||||
6,
|
||||
7
|
||||
],
|
||||
"expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true
|
||||
],
|
||||
"psig_indices": [
|
||||
7,
|
||||
8
|
||||
],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1
|
||||
},
|
||||
"comment": "Partial signature is invalid because it exceeds group size"
|
||||
}
|
||||
]
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02D2DC6F5DF7C56ACF38C7FA0AE7A759AE30E19B37359DFDE015872324C7EF6E05",
|
||||
"03C7FB101D97FF930ACD0C6760852EF64E69083DE0B06AC6335724754BB4B0522C",
|
||||
"02352433B21E7E05D3B452B81CAE566E06D2E003ECE16D1074AABA4289E0E3D581"
|
||||
],
|
||||
"pnonces": [
|
||||
"036E5EE6E28824029FEA3E8A9DDD2C8483F5AF98F7177C3AF3CB6F47CAF8D94AE902DBA67E4A1F3680826172DA15AFB1A8CA85C7C5CC88900905C8DC8C328511B53E",
|
||||
"03E4F798DA48A76EEC1C9CC5AB7A880FFBA201A5F064E627EC9CB0031D1D58FC5103E06180315C5A522B7EC7C08B69DCD721C313C940819296D0A7AB8E8795AC1F00",
|
||||
"02C0068FD25523A31578B8077F24F78F5BD5F2422AFF47C1FADA0F36B3CEB6C7D202098A55D1736AA5FCC21CF0729CCE852575C06C081125144763C2C4C4A05C09B6",
|
||||
"031F5C87DCFBFCF330DEE4311D85E8F1DEA01D87A6F1C14CDFC7E4F1D8C441CFA40277BF176E9F747C34F81B0D9F072B1B404A86F402C2D86CF9EA9E9C69876EA3B9",
|
||||
"023F7042046E0397822C4144A17F8B63D78748696A46C3B9F0A901D296EC3406C302022B0B464292CF9751D699F10980AC764E6F671EFCA15069BBE62B0D1C62522A",
|
||||
"02D97DDA5988461DF58C5897444F116A7C74E5711BF77A9446E27806563F3B6C47020CBAD9C363A7737F99FA06B6BE093CEAFF5397316C5AC46915C43767AE867C00"
|
||||
],
|
||||
"tweaks": [
|
||||
"B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C",
|
||||
"A815FE049EE3C5AAB66310477FBC8BCCCAC2F3395F59F921C364ACD78A2F48DC",
|
||||
"75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8"
|
||||
],
|
||||
"psigs": [
|
||||
"B15D2CD3C3D22B04DAE438CE653F6B4ECF042F42CFDED7C41B64AAF9B4AF53FB",
|
||||
"6193D6AC61B354E9105BBDC8937A3454A6D705B6D57322A5A472A02CE99FCB64",
|
||||
"9A87D3B79EC67228CB97878B76049B15DBD05B8158D17B5B9114D3C226887505",
|
||||
"66F82EA90923689B855D36C6B7E032FB9970301481B99E01CDB4D6AC7C347A15",
|
||||
"4F5AEE41510848A6447DCD1BBC78457EF69024944C87F40250D3EF2C25D33EFE",
|
||||
"DDEF427BBB847CC027BEFF4EDB01038148917832253EBC355FC33F4A8E2FCCE4",
|
||||
"97B890A26C981DA8102D3BC294159D171D72810FDF7C6A691DEF02F0F7AF3FDC",
|
||||
"53FA9E08BA5243CBCB0D797C5EE83BC6728E539EB76C2D0BF0F971EE4E909971",
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
|
||||
],
|
||||
"msg": "599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869",
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"psig_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E"
|
||||
},
|
||||
{
|
||||
"aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"psig_indices": [
|
||||
2,
|
||||
3
|
||||
],
|
||||
"expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9"
|
||||
},
|
||||
{
|
||||
"aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"psig_indices": [
|
||||
4,
|
||||
5
|
||||
],
|
||||
"expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC"
|
||||
},
|
||||
{
|
||||
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true
|
||||
],
|
||||
"psig_indices": [
|
||||
6,
|
||||
7
|
||||
],
|
||||
"expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true
|
||||
],
|
||||
"psig_indices": [
|
||||
7,
|
||||
8
|
||||
],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1
|
||||
},
|
||||
"comment": "Partial signature is invalid because it exceeds group size"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,194 +1,287 @@
|
||||
{
|
||||
"sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA661",
|
||||
"020000000000000000000000000000000000000000000000000000000000000007"
|
||||
],
|
||||
"secnonces": [
|
||||
"508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
|
||||
],
|
||||
"pnonces": [
|
||||
"0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046",
|
||||
"0237C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0387BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"020000000000000000000000000000000000000000000000000000000000000009"
|
||||
],
|
||||
"aggnonces": [
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"048465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61020000000000000000000000000000000000000000000000000000000000000009",
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD6102FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
|
||||
],
|
||||
"msgs": [
|
||||
"F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
|
||||
"",
|
||||
"2626262626262626262626262626262626262626262626262626262626262626262626262626"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"expected": "012ABBCB52B3016AC03AD82395A1A415C48B93DEF78718E62A7A90052FE224FB"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 0, 2],
|
||||
"nonce_indices": [1, 0, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 1,
|
||||
"expected": "9FF2F7AAA856150CC8819254218D3ADEEB0535269051897724F9DB3789513A52"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 2,
|
||||
"expected": "FA23C359F6FAC4E7796BB93BC9F0532A95468C539BA20FF86D7C76ED92227900"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 1],
|
||||
"nonce_indices": [0, 3],
|
||||
"aggnonce_index": 1,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"expected": "AE386064B26105404798F75DE2EB9AF5EDA5387B064B83D049CB7C5E08879531",
|
||||
"comment": "Both halves of aggregate nonce correspond to point at infinity"
|
||||
}
|
||||
],
|
||||
"sign_error_test_cases": [
|
||||
{
|
||||
"key_indices": [1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The signer's pubkey must be included in the list of pubkeys."
|
||||
},
|
||||
"comment": "The signers pubkey is not in the list of pubkeys"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 0, 3],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 2,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Signer 2 provided an invalid public key"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 2,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid due wrong tag, 0x04, in the first half"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 3,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid because the second half does not correspond to an X coordinate"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 4,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid because second half exceeds field size"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"secnonce_index": 1,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "first secnonce value is out of range."
|
||||
},
|
||||
"comment": "Secnonce is invalid which may indicate nonce reuse"
|
||||
}
|
||||
],
|
||||
"verify_fail_test_cases": [
|
||||
{
|
||||
"sig": "97AC833ADCB1AFA42EBF9E0725616F3C9A0D5B614F6FE283CEAAA37A8FFAF406",
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"comment": "Wrong signature (which is equal to the negation of valid signature)"
|
||||
},
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 1,
|
||||
"comment": "Wrong signer"
|
||||
},
|
||||
{
|
||||
"sig": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"comment": "Signature exceeds group size"
|
||||
}
|
||||
],
|
||||
"verify_error_test_cases": [
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [4, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Invalid pubnonce"
|
||||
},
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [3, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Invalid pubkey"
|
||||
}
|
||||
]
|
||||
"sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA661",
|
||||
"020000000000000000000000000000000000000000000000000000000000000007"
|
||||
],
|
||||
"secnonces": [
|
||||
"508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
|
||||
],
|
||||
"pnonces": [
|
||||
"0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046",
|
||||
"0237C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0387BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"020000000000000000000000000000000000000000000000000000000000000009"
|
||||
],
|
||||
"aggnonces": [
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"048465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61020000000000000000000000000000000000000000000000000000000000000009",
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD6102FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
|
||||
],
|
||||
"msgs": [
|
||||
"F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
|
||||
"",
|
||||
"2626262626262626262626262626262626262626262626262626262626262626262626262626"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"expected": "012ABBCB52B3016AC03AD82395A1A415C48B93DEF78718E62A7A90052FE224FB"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
0,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
0,
|
||||
2
|
||||
],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 1,
|
||||
"expected": "9FF2F7AAA856150CC8819254218D3ADEEB0535269051897724F9DB3789513A52"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 2,
|
||||
"expected": "FA23C359F6FAC4E7796BB93BC9F0532A95468C539BA20FF86D7C76ED92227900"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"aggnonce_index": 1,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"expected": "AE386064B26105404798F75DE2EB9AF5EDA5387B064B83D049CB7C5E08879531",
|
||||
"comment": "Both halves of aggregate nonce correspond to point at infinity"
|
||||
}
|
||||
],
|
||||
"sign_error_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The signer's pubkey must be included in the list of pubkeys."
|
||||
},
|
||||
"comment": "The signers pubkey is not in the list of pubkeys"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
0,
|
||||
3
|
||||
],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 2,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Signer 2 provided an invalid public key"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"aggnonce_index": 2,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid due wrong tag, 0x04, in the first half"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"aggnonce_index": 3,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid because the second half does not correspond to an X coordinate"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"aggnonce_index": 4,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid because second half exceeds field size"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"secnonce_index": 1,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "first secnonce value is out of range."
|
||||
},
|
||||
"comment": "Secnonce is invalid which may indicate nonce reuse"
|
||||
}
|
||||
],
|
||||
"verify_fail_test_cases": [
|
||||
{
|
||||
"sig": "97AC833ADCB1AFA42EBF9E0725616F3C9A0D5B614F6FE283CEAAA37A8FFAF406",
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"comment": "Wrong signature (which is equal to the negation of valid signature)"
|
||||
},
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"msg_index": 0,
|
||||
"signer_index": 1,
|
||||
"comment": "Wrong signer"
|
||||
},
|
||||
{
|
||||
"sig": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"comment": "Signature exceeds group size"
|
||||
}
|
||||
],
|
||||
"verify_error_test_cases": [
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
4,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Invalid pubnonce"
|
||||
},
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [
|
||||
3,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Invalid pubkey"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,84 +1,170 @@
|
||||
{
|
||||
"sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
|
||||
],
|
||||
"secnonce": "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"pnonces": [
|
||||
"0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046"
|
||||
],
|
||||
"aggnonce": "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"tweaks": [
|
||||
"E8F791FF9225A2AF0102AFFF4A9A723D9612A682A25EBE79802B263CDFCD83BB",
|
||||
"AE2EA797CC0FE72AC5B97B97F3C6957D7E4199A167A58EB08BCAFFDA70AC0455",
|
||||
"F52ECBC565B3D8BEA2DFD5B75A4F457E54369809322E4120831626F290FA87E0",
|
||||
"1969AD73CC177FA0B4FCED6DF1F7BF9907E665FDE9BA196A74FED0A3CF5AEF9D",
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
|
||||
],
|
||||
"msg": "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [true],
|
||||
"signer_index": 2,
|
||||
"expected": "E28A5C66E61E178C2BA19DB77B6CF9F7E2F0F56C17918CD13135E60CC848FE91",
|
||||
"comment": "A single x-only tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [false],
|
||||
"signer_index": 2,
|
||||
"expected": "38B0767798252F21BF5702C48028B095428320F73A4B14DB1E25DE58543D2D2D",
|
||||
"comment": "A single plain tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1],
|
||||
"is_xonly": [false, true],
|
||||
"signer_index": 2,
|
||||
"expected": "408A0A21C4A0F5DACAF9646AD6EB6FECD7F7A11F03ED1F48DFFF2185BC2C2408",
|
||||
"comment": "A plain tweak followed by an x-only tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1, 2, 3],
|
||||
"is_xonly": [false, false, true, true],
|
||||
"signer_index": 2,
|
||||
"expected": "45ABD206E61E3DF2EC9E264A6FEC8292141A633C28586388235541F9ADE75435",
|
||||
"comment": "Four tweaks: plain, plain, x-only, x-only."
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1, 2, 3],
|
||||
"is_xonly": [true, false, true, false],
|
||||
"signer_index": 2,
|
||||
"expected": "B255FDCAC27B40C7CE7848E2D3B7BF5EA0ED756DA81565AC804CCCA3E1D5D239",
|
||||
"comment": "Four tweaks: x-only, plain, x-only, plain. If an implementation prohibits applying plain tweaks after x-only tweaks, it can skip this test vector or return an error."
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [4],
|
||||
"is_xonly": [false],
|
||||
"signer_index": 2,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The tweak must be less than n."
|
||||
},
|
||||
"comment": "Tweak is invalid because it exceeds group size"
|
||||
}
|
||||
]
|
||||
"sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
|
||||
],
|
||||
"secnonce": "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"pnonces": [
|
||||
"0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046"
|
||||
],
|
||||
"aggnonce": "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"tweaks": [
|
||||
"E8F791FF9225A2AF0102AFFF4A9A723D9612A682A25EBE79802B263CDFCD83BB",
|
||||
"AE2EA797CC0FE72AC5B97B97F3C6957D7E4199A167A58EB08BCAFFDA70AC0455",
|
||||
"F52ECBC565B3D8BEA2DFD5B75A4F457E54369809322E4120831626F290FA87E0",
|
||||
"1969AD73CC177FA0B4FCED6DF1F7BF9907E665FDE9BA196A74FED0A3CF5AEF9D",
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
|
||||
],
|
||||
"msg": "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
true
|
||||
],
|
||||
"signer_index": 2,
|
||||
"expected": "E28A5C66E61E178C2BA19DB77B6CF9F7E2F0F56C17918CD13135E60CC848FE91",
|
||||
"comment": "A single x-only tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"signer_index": 2,
|
||||
"expected": "38B0767798252F21BF5702C48028B095428320F73A4B14DB1E25DE58543D2D2D",
|
||||
"comment": "A single plain tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"is_xonly": [
|
||||
false,
|
||||
true
|
||||
],
|
||||
"signer_index": 2,
|
||||
"expected": "408A0A21C4A0F5DACAF9646AD6EB6FECD7F7A11F03ED1F48DFFF2185BC2C2408",
|
||||
"comment": "A plain tweak followed by an x-only tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"is_xonly": [
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
true
|
||||
],
|
||||
"signer_index": 2,
|
||||
"expected": "45ABD206E61E3DF2EC9E264A6FEC8292141A633C28586388235541F9ADE75435",
|
||||
"comment": "Four tweaks: plain, plain, x-only, x-only."
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
false
|
||||
],
|
||||
"signer_index": 2,
|
||||
"expected": "B255FDCAC27B40C7CE7848E2D3B7BF5EA0ED756DA81565AC804CCCA3E1D5D239",
|
||||
"comment": "Four tweaks: x-only, plain, x-only, plain. If an implementation prohibits applying plain tweaks after x-only tweaks, it can skip this test vector or return an error."
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
4
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"signer_index": 2,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The tweak must be less than n."
|
||||
},
|
||||
"comment": "Tweak is invalid because it exceeds group size"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -5,11 +5,12 @@ package musig2
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/chainhash"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"sort"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -224,7 +225,7 @@ func defaultKeyAggOptions() *keyAggOption { return &keyAggOption{} }
|
||||
// point has an even y coordinate.
|
||||
//
|
||||
// TODO(roasbeef): double check, can just check the y coord even not jacobian?
|
||||
func hasEvenY(pJ btcec.btcec) bool {
|
||||
func hasEvenY(pJ btcec.JacobianPoint) bool {
|
||||
pJ.ToAffine()
|
||||
p := btcec.NewPublicKey(&pJ.X, &pJ.Y)
|
||||
keyBytes := p.SerializeCompressed()
|
||||
@@ -237,7 +238,7 @@ func hasEvenY(pJ btcec.btcec) bool {
|
||||
// by the parity factor. The xOnly bool specifies if this is to be an x-only
|
||||
// tweak or not.
|
||||
func tweakKey(
|
||||
keyJ btcec.btcec, parityAcc btcec.ModNScalar,
|
||||
keyJ btcec.JacobianPoint, parityAcc btcec.ModNScalar,
|
||||
tweak [32]byte,
|
||||
tweakAcc btcec.ModNScalar,
|
||||
xOnly bool,
|
||||
|
||||
@@ -5,15 +5,16 @@ package musig2
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -39,9 +40,9 @@ func TestMusig2KeySort(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
var testCase keySortTestVector
|
||||
require.NoError(t, json.Unmarshal(testVectorBytes, &testCase))
|
||||
keys := make([]*btcec.btcec, len(testCase.PubKeys))
|
||||
keys := make([]*btcec.PublicKey, len(testCase.PubKeys))
|
||||
for i, keyStr := range testCase.PubKeys {
|
||||
pubKey, err := btcec.btcec.ParsePubKey(mustParseHex(keyStr))
|
||||
pubKey, err := btcec.ParsePubKey(mustParseHex(keyStr))
|
||||
require.NoError(t, err)
|
||||
keys[i] = pubKey
|
||||
}
|
||||
|
||||
@@ -5,11 +5,12 @@ package musig2
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -26,14 +27,14 @@ func mustParseHex(str string) []byte {
|
||||
|
||||
type signer struct {
|
||||
privKey *btcec.SecretKey
|
||||
pubKey *btcec.btcec
|
||||
pubKey *btcec.PublicKey
|
||||
nonces *Nonces
|
||||
partialSig *PartialSignature
|
||||
}
|
||||
|
||||
type signerSet []signer
|
||||
|
||||
func (s signerSet) keys() []*btcec.btcec {
|
||||
func (s signerSet) keys() []*btcec.PublicKey {
|
||||
keys := make([]*btcec.PublicKey, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
keys[i] = s[i].pubKey
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/chainhash"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
@@ -59,8 +60,8 @@ func secNonceToPubNonce(secNonce [SecNonceSize]byte) [PubNonceSize]byte {
|
||||
var k1Mod, k2Mod btcec.ModNScalar
|
||||
k1Mod.SetByteSlice(secNonce[:btcec.SecKeyBytesLen])
|
||||
k2Mod.SetByteSlice(secNonce[btcec.SecKeyBytesLen:])
|
||||
var r1, r2 btcec.btcec
|
||||
btcec.btcec.ScalarBaseMultNonConst(&k1Mod, &r1)
|
||||
var r1, r2 btcec.JacobianPoint
|
||||
btcec.ScalarBaseMultNonConst(&k1Mod, &r1)
|
||||
btcec.ScalarBaseMultNonConst(&k2Mod, &r2)
|
||||
// Next, we'll convert the key in jacobian format to a normal public
|
||||
// key expressed in affine coordinates.
|
||||
|
||||
@@ -6,11 +6,12 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -87,9 +88,9 @@ type nonceAggValidCase struct {
|
||||
}
|
||||
|
||||
type nonceAggInvalidCase struct {
|
||||
Indices []int `json:"pnonce_indices"`
|
||||
Error nonceAggError `json:"error"`
|
||||
Comment string `json:"comment"`
|
||||
Indices []int `json:"pnonce_indices"`
|
||||
Error nonceAggError `json:"error"`
|
||||
Comment string `json:"comment"`
|
||||
ExpectedErr string `json:"btcec_err"`
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/chainhash"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
@@ -53,7 +54,7 @@ var (
|
||||
)
|
||||
|
||||
// infinityPoint is the jacobian representation of the point at infinity.
|
||||
var infinityPoint btcec.btcec
|
||||
var infinityPoint btcec.JacobianPoint
|
||||
|
||||
// PartialSignature reprints a partial (s-only) musig2 multi-signature. This
|
||||
// isn't a valid schnorr signature by itself, as it needs to be aggregated
|
||||
@@ -205,7 +206,7 @@ func computeSigningNonce(
|
||||
combinedNonce [PubNonceSize]byte,
|
||||
combinedKey *btcec.PublicKey, msg [32]byte,
|
||||
) (
|
||||
*btcec.btcec, *btcec.ModNScalar, error,
|
||||
*btcec.JacobianPoint, *btcec.ModNScalar, error,
|
||||
) {
|
||||
|
||||
// Next we'll compute the value b, that blinds our second public
|
||||
|
||||
@@ -6,14 +6,15 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -80,7 +81,7 @@ func TestMusig2SignVerify(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
var testCases signVerifyTestVectors
|
||||
require.NoError(t, json.Unmarshal(testVectorBytes, &testCases))
|
||||
privKey, _ := btcec.btcec.SecKeyFromBytes(mustParseHex(testCases.SecKey))
|
||||
privKey, _ := btcec.SecKeyFromBytes(mustParseHex(testCases.SecKey))
|
||||
for i, testCase := range testCases.ValidCases {
|
||||
testCase := testCase
|
||||
testName := fmt.Sprintf("valid_case_%v", i)
|
||||
@@ -312,7 +313,7 @@ func TestMusig2SignCombine(t *testing.T) {
|
||||
combinedNonce, combinedKey.FinalKey, msg,
|
||||
)
|
||||
finalNonceJ.ToAffine()
|
||||
finalNonce := btcec.btcec.NewPublicKey(
|
||||
finalNonce := btcec.NewPublicKey(
|
||||
&finalNonceJ.X, &finalNonceJ.Y,
|
||||
)
|
||||
combinedSig := CombineSigs(
|
||||
|
||||
@@ -7,11 +7,12 @@ package schnorr
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
@@ -48,7 +49,7 @@ func hexToModNScalar(s string) *btcec.ModNScalar {
|
||||
// if there is an error. This is only provided for the hard-coded constants, so
|
||||
// errors in the source code can be detected. It will only (and must only) be
|
||||
// called with hard-coded values.
|
||||
func hexToFieldVal(s string) *btcec.btcec {
|
||||
func hexToFieldVal(s string) *btcec.FieldVal {
|
||||
b, err := hex.Dec(s)
|
||||
if err != nil {
|
||||
panic("invalid hex in source file: " + s)
|
||||
|
||||
@@ -7,13 +7,14 @@ package schnorr
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"orly.dev/pkg/crypto/ec"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"strings"
|
||||
"testing"
|
||||
"testing/quick"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
@@ -207,7 +208,7 @@ func TestSchnorrSign(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
d := decodeHex(test.secretKey)
|
||||
privKey, _ := btcec.btcec.SecKeyFromBytes(d)
|
||||
privKey, _ := btcec.SecKeyFromBytes(d)
|
||||
var auxBytes [32]byte
|
||||
aux := decodeHex(test.auxRand)
|
||||
copy(auxBytes[:], aux)
|
||||
|
||||
@@ -5,42 +5,16 @@ import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"encoding/base64"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"lukechampine.com/frand"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"strings"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
)
|
||||
|
||||
// ComputeSharedSecret returns a shared secret key used to encrypt messages. The private and public keys should be hex
|
||||
// encoded. Uses the Diffie-Hellman key exchange (ECDH) (RFC 4753).
|
||||
func ComputeSharedSecret(pkh, skh string) (sharedSecret []byte, err error) {
|
||||
var skb, pkb []byte
|
||||
if skb, err = hex.Dec(skh); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if pkb, err = hex.Dec(pkh); chk.E(err) {
|
||||
return
|
||||
}
|
||||
signer := new(p256k.Signer)
|
||||
if err = signer.InitSec(skb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if sharedSecret, err = signer.ECDH(pkb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EncryptNip4 encrypts message with key using aes-256-cbc. key should be the shared secret generated by
|
||||
// ComputeSharedSecret.
|
||||
//
|
||||
// Returns: base64(encrypted_bytes) + "?iv=" + base64(initialization_vector).
|
||||
//
|
||||
// Deprecated: upgrade to using Decrypt with the NIP-44 algorithm.
|
||||
func EncryptNip4(msg string, key []byte) (ct []byte, err error) {
|
||||
func EncryptNip4(msg, key []byte) (ct []byte, err error) {
|
||||
// block size is 16 bytes
|
||||
iv := make([]byte, 16)
|
||||
if _, err = frand.Read(iv); chk.E(err) {
|
||||
@@ -71,22 +45,20 @@ func EncryptNip4(msg string, key []byte) (ct []byte, err error) {
|
||||
|
||||
// DecryptNip4 decrypts a content string using the shared secret key. The inverse operation to message ->
|
||||
// EncryptNip4(message, key).
|
||||
//
|
||||
// Deprecated: upgrade to using Decrypt with the NIP-44 algorithm.
|
||||
func DecryptNip4(content string, key []byte) (msg []byte, err error) {
|
||||
parts := strings.Split(content, "?iv=")
|
||||
func DecryptNip4(content, key []byte) (msg []byte, err error) {
|
||||
parts := bytes.Split(content, []byte("?iv="))
|
||||
if len(parts) < 2 {
|
||||
return nil, errorf.E(
|
||||
"error parsing encrypted message: no initialization vector",
|
||||
)
|
||||
}
|
||||
var ciphertext []byte
|
||||
if ciphertext, err = base64.StdEncoding.DecodeString(parts[0]); chk.E(err) {
|
||||
ciphertext := make([]byte, base64.StdEncoding.EncodedLen(len(parts[0])))
|
||||
if _, err = base64.StdEncoding.Decode(ciphertext, parts[0]); chk.E(err) {
|
||||
err = errorf.E("error decoding ciphertext from base64: %w", err)
|
||||
return
|
||||
}
|
||||
var iv []byte
|
||||
if iv, err = base64.StdEncoding.DecodeString(parts[1]); chk.E(err) {
|
||||
iv := make([]byte, base64.StdEncoding.EncodedLen(len(parts[1])))
|
||||
if _, err = base64.StdEncoding.Decode(iv, parts[1]); chk.E(err) {
|
||||
err = errorf.E("error decoding iv from base64: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,11 +6,14 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"golang.org/x/crypto/chacha20"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"golang.org/x/crypto/chacha20"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
)
|
||||
@@ -43,11 +46,9 @@ func WithCustomNonce(salt []byte) func(opts *Opts) {
|
||||
// Encrypt data using a provided symmetric conversation key using NIP-44
|
||||
// encryption (chacha20 cipher stream and sha256 HMAC).
|
||||
func Encrypt(
|
||||
plaintext string, conversationKey []byte,
|
||||
applyOptions ...func(opts *Opts),
|
||||
plaintext, conversationKey []byte, applyOptions ...func(opts *Opts),
|
||||
) (
|
||||
cipherString string,
|
||||
err error,
|
||||
cipherString []byte, err error,
|
||||
) {
|
||||
|
||||
var o Opts
|
||||
@@ -70,7 +71,7 @@ func Encrypt(
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
plain := []byte(plaintext)
|
||||
plain := plaintext
|
||||
size := len(plain)
|
||||
if size < MinPlaintextSize || size > MaxPlaintextSize {
|
||||
err = errorf.E("plaintext should be between 1b and 64kB")
|
||||
@@ -93,14 +94,15 @@ func Encrypt(
|
||||
ct = append(ct, o.nonce...)
|
||||
ct = append(ct, cipher...)
|
||||
ct = append(ct, mac...)
|
||||
cipherString = base64.StdEncoding.EncodeToString(ct)
|
||||
cipherString = make([]byte, base64.StdEncoding.EncodedLen(len(ct)))
|
||||
base64.StdEncoding.Encode(cipherString, ct)
|
||||
return
|
||||
}
|
||||
|
||||
// Decrypt data that has been encoded using a provided symmetric conversation
|
||||
// key using NIP-44 encryption (chacha20 cipher stream and sha256 HMAC).
|
||||
func Decrypt(b64ciphertextWrapped string, conversationKey []byte) (
|
||||
plaintext string,
|
||||
func Decrypt(b64ciphertextWrapped, conversationKey []byte) (
|
||||
plaintext []byte,
|
||||
err error,
|
||||
) {
|
||||
cLen := len(b64ciphertextWrapped)
|
||||
@@ -108,12 +110,12 @@ func Decrypt(b64ciphertextWrapped string, conversationKey []byte) (
|
||||
err = errorf.E("invalid payload length: %d", cLen)
|
||||
return
|
||||
}
|
||||
if b64ciphertextWrapped[:1] == "#" {
|
||||
if len(b64ciphertextWrapped) > 0 && b64ciphertextWrapped[0] == '#' {
|
||||
err = errorf.E("unknown version")
|
||||
return
|
||||
}
|
||||
var decoded []byte
|
||||
if decoded, err = base64.StdEncoding.DecodeString(b64ciphertextWrapped); chk.E(err) {
|
||||
if decoded, err = base64.StdEncoding.DecodeString(string(b64ciphertextWrapped)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if decoded[0] != version {
|
||||
@@ -153,12 +155,12 @@ func Decrypt(b64ciphertextWrapped string, conversationKey []byte) (
|
||||
err = errorf.E("invalid padding")
|
||||
return
|
||||
}
|
||||
plaintext = string(unpadded)
|
||||
plaintext = unpadded
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateConversationKey performs an ECDH key generation hashed with the nip-44-v2 using hkdf.
|
||||
func GenerateConversationKey(pkh, skh string) (ck []byte, err error) {
|
||||
// GenerateConversationKeyFromHex performs an ECDH key generation hashed with the nip-44-v2 using hkdf.
|
||||
func GenerateConversationKeyFromHex(pkh, skh string) (ck []byte, err error) {
|
||||
if skh >= "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" ||
|
||||
skh == "0000000000000000000000000000000000000000000000000000000000000000" {
|
||||
err = errorf.E(
|
||||
@@ -167,8 +169,27 @@ func GenerateConversationKey(pkh, skh string) (ck []byte, err error) {
|
||||
)
|
||||
return
|
||||
}
|
||||
var sign signer.I
|
||||
if sign, err = p256k.NewSecFromHex(skh); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = p256k.HexToBin(pkh); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var shared []byte
|
||||
if shared, err = ComputeSharedSecret(pkh, skh); chk.E(err) {
|
||||
if shared, err = sign.ECDH(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
|
||||
return
|
||||
}
|
||||
|
||||
func GenerateConversationKeyWithSigner(sign signer.I, pk []byte) (
|
||||
ck []byte, err error,
|
||||
) {
|
||||
var shared []byte
|
||||
if shared, err = sign.ECDH(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
|
||||
|
||||
@@ -4,12 +4,13 @@ import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"hash"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/crypto/keys"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -19,10 +20,10 @@ func assertCryptPriv(
|
||||
sk1, sk2, conversationKey, salt, plaintext, expected string,
|
||||
) {
|
||||
var (
|
||||
k1, s []byte
|
||||
actual, decrypted string
|
||||
ok bool
|
||||
err error
|
||||
k1, s, plaintextBytes, actualBytes,
|
||||
expectedBytes, decrypted []byte
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
k1, err = hex.Dec(conversationKey)
|
||||
if ok = assert.NoErrorf(
|
||||
@@ -41,27 +42,31 @@ func assertCryptPriv(
|
||||
); !ok {
|
||||
return
|
||||
}
|
||||
actual, err = Encrypt(plaintext, k1, WithCustomNonce(s))
|
||||
plaintextBytes = []byte(plaintext)
|
||||
actualBytes, err = Encrypt(plaintextBytes, k1, WithCustomNonce(s))
|
||||
if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok {
|
||||
return
|
||||
}
|
||||
if ok = assert.Equalf(t, expected, actual, "wrong encryption"); !ok {
|
||||
expectedBytes = []byte(expected)
|
||||
if ok = assert.Equalf(
|
||||
t, string(expectedBytes), string(actualBytes), "wrong encryption",
|
||||
); !ok {
|
||||
return
|
||||
}
|
||||
decrypted, err = Decrypt(expected, k1)
|
||||
decrypted, err = Decrypt(expectedBytes, k1)
|
||||
if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok {
|
||||
return
|
||||
}
|
||||
assert.Equal(t, decrypted, plaintext, "wrong decryption")
|
||||
assert.Equal(t, decrypted, plaintextBytes, "wrong decryption")
|
||||
}
|
||||
|
||||
func assertDecryptFail(
|
||||
t *testing.T, conversationKey, plaintext, ciphertext, msg string,
|
||||
) {
|
||||
var (
|
||||
k1 []byte
|
||||
ok bool
|
||||
err error
|
||||
k1, ciphertextBytes []byte
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
k1, err = hex.Dec(conversationKey)
|
||||
if ok = assert.NoErrorf(
|
||||
@@ -69,14 +74,15 @@ func assertDecryptFail(
|
||||
); !ok {
|
||||
return
|
||||
}
|
||||
_, err = Decrypt(ciphertext, k1)
|
||||
ciphertextBytes = []byte(ciphertext)
|
||||
_, err = Decrypt(ciphertextBytes, k1)
|
||||
assert.ErrorContains(t, err, msg)
|
||||
}
|
||||
|
||||
func assertConversationKeyFail(
|
||||
t *testing.T, priv string, pub string, msg string,
|
||||
) {
|
||||
_, err := GenerateConversationKey(pub, priv)
|
||||
_, err := GenerateConversationKeyFromHex(pub, priv)
|
||||
assert.ErrorContains(t, err, msg)
|
||||
}
|
||||
|
||||
@@ -95,7 +101,7 @@ func assertConversationKeyGeneration(
|
||||
); !ok {
|
||||
return false
|
||||
}
|
||||
actualConversationKey, err = GenerateConversationKey(pub, priv)
|
||||
actualConversationKey, err = GenerateConversationKeyFromHex(pub, priv)
|
||||
if ok = assert.NoErrorf(
|
||||
t, err, "conversation key generation failed: %v", err,
|
||||
); !ok {
|
||||
@@ -196,15 +202,15 @@ func assertMessageKeyGeneration(
|
||||
}
|
||||
|
||||
func assertCryptLong(
|
||||
t *testing.T, conversationKey, salt, pattern string, repeat int,
|
||||
t *testing.T, conversationKey, salt string, pattern []byte, repeat int,
|
||||
plaintextSha256, payloadSha256 string,
|
||||
) {
|
||||
var (
|
||||
convKey, convSalt []byte
|
||||
plaintext, actualPlaintextSha256, actualPayload, actualPayloadSha256 string
|
||||
h hash.Hash
|
||||
ok bool
|
||||
err error
|
||||
convKey, convSalt, plaintext, payloadBytes []byte
|
||||
actualPlaintextSha256, actualPayloadSha256 string
|
||||
h hash.Hash
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
convKey, err = hex.Dec(conversationKey)
|
||||
if ok = assert.NoErrorf(
|
||||
@@ -218,12 +224,12 @@ func assertCryptLong(
|
||||
); !ok {
|
||||
return
|
||||
}
|
||||
plaintext = ""
|
||||
plaintext = make([]byte, 0, len(pattern)*repeat)
|
||||
for i := 0; i < repeat; i++ {
|
||||
plaintext += pattern
|
||||
plaintext = append(plaintext, pattern...)
|
||||
}
|
||||
h = sha256.New()
|
||||
h.Write([]byte(plaintext))
|
||||
h.Write(plaintext)
|
||||
actualPlaintextSha256 = hex.Enc(h.Sum(nil))
|
||||
if ok = assert.Equalf(
|
||||
t, plaintextSha256, actualPlaintextSha256,
|
||||
@@ -231,12 +237,14 @@ func assertCryptLong(
|
||||
); !ok {
|
||||
return
|
||||
}
|
||||
actualPayload, err = Encrypt(plaintext, convKey, WithCustomNonce(convSalt))
|
||||
payloadBytes, err = Encrypt(
|
||||
plaintext, convKey, WithCustomNonce(convSalt),
|
||||
)
|
||||
if ok = assert.NoErrorf(t, err, "encryption failed: %v", err); !ok {
|
||||
return
|
||||
}
|
||||
h.Reset()
|
||||
h.Write([]byte(actualPayload))
|
||||
h.Write(payloadBytes)
|
||||
actualPayloadSha256 = hex.Enc(h.Sum(nil))
|
||||
if ok = assert.Equalf(
|
||||
t, payloadSha256, actualPayloadSha256,
|
||||
@@ -383,7 +391,7 @@ func TestCryptLong001(t *testing.T) {
|
||||
t,
|
||||
"8fc262099ce0d0bb9b89bac05bb9e04f9bc0090acc181fef6840ccee470371ed",
|
||||
"326bcb2c943cd6bb717588c9e5a7e738edf6ed14ec5f5344caa6ef56f0b9cff7",
|
||||
"x",
|
||||
[]byte("x"),
|
||||
65535,
|
||||
"09ab7495d3e61a76f0deb12cb0306f0696cbb17ffc12131368c7a939f12f56d3",
|
||||
"90714492225faba06310bff2f249ebdc2a5e609d65a629f1c87f2d4ffc55330a",
|
||||
@@ -395,7 +403,7 @@ func TestCryptLong002(t *testing.T) {
|
||||
t,
|
||||
"56adbe3720339363ab9c3b8526ffce9fd77600927488bfc4b59f7a68ffe5eae0",
|
||||
"ad68da81833c2a8ff609c3d2c0335fd44fe5954f85bb580c6a8d467aa9fc5dd0",
|
||||
"!",
|
||||
[]byte("!"),
|
||||
65535,
|
||||
"6af297793b72ae092c422e552c3bb3cbc310da274bd1cf9e31023a7fe4a2d75e",
|
||||
"8013e45a109fad3362133132b460a2d5bce235fe71c8b8f4014793fb52a49844",
|
||||
@@ -407,7 +415,7 @@ func TestCryptLong003(t *testing.T) {
|
||||
t,
|
||||
"7fc540779979e472bb8d12480b443d1e5eb1098eae546ef2390bee499bbf46be",
|
||||
"34905e82105c20de9a2f6cd385a0d541e6bcc10601d12481ff3a7575dc622033",
|
||||
"🦄",
|
||||
[]byte("🦄"),
|
||||
16383,
|
||||
"a249558d161b77297bc0cb311dde7d77190f6571b25c7e4429cd19044634a61f",
|
||||
"b3348422471da1f3c59d79acfe2fe103f3cd24488109e5b18734cdb5953afd15",
|
||||
@@ -1307,9 +1315,12 @@ func TestMaxLength(t *testing.T) {
|
||||
pub2, _ := keys.GetPublicKeyHex(string(sk2))
|
||||
salt := make([]byte, 32)
|
||||
rand.Read(salt)
|
||||
conversationKey, _ := GenerateConversationKey(pub2, string(sk1))
|
||||
conversationKey, _ := GenerateConversationKeyFromHex(pub2, string(sk1))
|
||||
plaintext := strings.Repeat("a", MaxPlaintextSize)
|
||||
encrypted, err := Encrypt(plaintext, conversationKey, WithCustomNonce(salt))
|
||||
plaintextBytes := []byte(plaintext)
|
||||
encrypted, err := Encrypt(
|
||||
plaintextBytes, conversationKey, WithCustomNonce(salt),
|
||||
)
|
||||
if chk.E(err) {
|
||||
t.Error(err)
|
||||
}
|
||||
@@ -1321,7 +1332,7 @@ func TestMaxLength(t *testing.T) {
|
||||
fmt.Sprintf("%x", conversationKey),
|
||||
fmt.Sprintf("%x", salt),
|
||||
plaintext,
|
||||
encrypted,
|
||||
string(encrypted),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1330,10 +1341,10 @@ func assertCryptPub(
|
||||
sk1, pub2, conversationKey, salt, plaintext, expected string,
|
||||
) {
|
||||
var (
|
||||
k1, s []byte
|
||||
actual, decrypted string
|
||||
ok bool
|
||||
err error
|
||||
k1, s, plaintextBytes,
|
||||
actualBytes, expectedBytes, decrypted []byte
|
||||
ok bool
|
||||
err error
|
||||
)
|
||||
k1, err = hex.Dec(conversationKey)
|
||||
if ok = assert.NoErrorf(
|
||||
@@ -1352,16 +1363,20 @@ func assertCryptPub(
|
||||
); !ok {
|
||||
return
|
||||
}
|
||||
actual, err = Encrypt(plaintext, k1, WithCustomNonce(s))
|
||||
plaintextBytes = []byte(plaintext)
|
||||
actualBytes, err = Encrypt(plaintextBytes, k1, WithCustomNonce(s))
|
||||
if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok {
|
||||
return
|
||||
}
|
||||
if ok = assert.Equalf(t, expected, actual, "wrong encryption"); !ok {
|
||||
expectedBytes = []byte(expected)
|
||||
if ok = assert.Equalf(
|
||||
t, string(expectedBytes), string(actualBytes), "wrong encryption",
|
||||
); !ok {
|
||||
return
|
||||
}
|
||||
decrypted, err = Decrypt(expected, k1)
|
||||
decrypted, err = Decrypt(expectedBytes, k1)
|
||||
if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok {
|
||||
return
|
||||
}
|
||||
assert.Equal(t, decrypted, plaintext, "wrong decryption")
|
||||
assert.Equal(t, decrypted, plaintextBytes, "wrong decryption")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ package p256k
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/crypto/p256k/btcec"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -18,3 +19,7 @@ type Signer = btcec.Signer
|
||||
type Keygen = btcec.Keygen
|
||||
|
||||
func NewKeygen() (k *Keygen) { return new(Keygen) }
|
||||
|
||||
var NewSecFromHex = btcec.NewSecFromHex[string]
|
||||
var NewPubFromHex = btcec.NewPubFromHex[string]
|
||||
var HexToBin = btcec.HexToBin
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !cgo
|
||||
|
||||
// Package btcec implements the signer.I interface for signatures and ECDH with nostr.
|
||||
package btcec
|
||||
|
||||
@@ -38,6 +40,7 @@ func (s *Signer) InitSec(sec []byte) (err error) {
|
||||
err = errorf.E("sec key must be %d bytes", secp256k1.SecKeyBytesLen)
|
||||
return
|
||||
}
|
||||
s.skb = sec
|
||||
s.SecretKey = secp256k1.SecKeyFromBytes(sec)
|
||||
s.PublicKey = s.SecretKey.PubKey()
|
||||
s.pkb = schnorr.SerializePubKey(s.PublicKey)
|
||||
@@ -55,10 +58,20 @@ func (s *Signer) InitPub(pub []byte) (err error) {
|
||||
}
|
||||
|
||||
// Sec returns the raw secret key bytes.
|
||||
func (s *Signer) Sec() (b []byte) { return s.skb }
|
||||
func (s *Signer) Sec() (b []byte) {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.skb
|
||||
}
|
||||
|
||||
// Pub returns the raw BIP-340 schnorr public key bytes.
|
||||
func (s *Signer) Pub() (b []byte) { return s.pkb }
|
||||
func (s *Signer) Pub() (b []byte) {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.pkb
|
||||
}
|
||||
|
||||
// Sign a message with the Signer. Requires an initialised secret key.
|
||||
func (s *Signer) Sign(msg []byte) (sig []byte, err error) {
|
||||
@@ -80,15 +93,39 @@ func (s *Signer) Verify(msg, sig []byte) (valid bool, err error) {
|
||||
err = errorf.E("btcec: Pubkey not initialized")
|
||||
return
|
||||
}
|
||||
|
||||
// First try to verify using the schnorr package
|
||||
var si *schnorr.Signature
|
||||
if si, err = schnorr.ParseSignature(sig); chk.D(err) {
|
||||
err = errorf.E(
|
||||
"failed to parse signature:\n%d %s\n%v", len(sig),
|
||||
sig, err,
|
||||
)
|
||||
if si, err = schnorr.ParseSignature(sig); err == nil {
|
||||
valid = si.Verify(msg, s.PublicKey)
|
||||
return
|
||||
}
|
||||
valid = si.Verify(msg, s.PublicKey)
|
||||
|
||||
// If parsing the signature failed, log it at debug level
|
||||
chk.D(err)
|
||||
|
||||
// If the signature is exactly 64 bytes, try to verify it directly
|
||||
// This is to handle signatures created by p256k.Signer which uses libsecp256k1
|
||||
if len(sig) == schnorr.SignatureSize {
|
||||
// Create a new signature with the raw bytes
|
||||
var r secp256k1.FieldVal
|
||||
var sScalar secp256k1.ModNScalar
|
||||
|
||||
// Split the signature into r and s components
|
||||
if overflow := r.SetByteSlice(sig[0:32]); !overflow {
|
||||
sScalar.SetByteSlice(sig[32:64])
|
||||
|
||||
// Create a new signature and verify it
|
||||
newSig := schnorr.NewSignature(&r, &sScalar)
|
||||
valid = newSig.Verify(msg, s.PublicKey)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If all verification methods failed, return an error
|
||||
err = errorf.E(
|
||||
"failed to verify signature:\n%d %s", len(sig), sig,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
//go:build !cgo
|
||||
|
||||
package btcec_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/p256k/btcec"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/event/examples"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/p256k/btcec"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/event/examples"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
func TestSigner_Generate(t *testing.T) {
|
||||
@@ -27,45 +31,79 @@ func TestSigner_Generate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBTCECSignerVerify(t *testing.T) {
|
||||
evs := make([]*event.E, 0, 10000)
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
buf := make([]byte, 1_000_000)
|
||||
scanner.Buffer(buf, len(buf))
|
||||
var err error
|
||||
signer := &btcec.Signer{}
|
||||
for scanner.Scan() {
|
||||
var valid bool
|
||||
b := scanner.Bytes()
|
||||
ev := event.New()
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Errorf("failed to marshal\n%s", b)
|
||||
} else {
|
||||
if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||
t.Errorf("invalid signature\n%s", b)
|
||||
continue
|
||||
}
|
||||
}
|
||||
id := ev.GetIDBytes()
|
||||
if len(id) != sha256.Size {
|
||||
t.Errorf("id should be 32 bytes, got %d", len(id))
|
||||
continue
|
||||
}
|
||||
if err = signer.InitPub(ev.Pubkey); chk.E(err) {
|
||||
t.Errorf("failed to init pub key: %s\n%0x", err, b)
|
||||
}
|
||||
if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
|
||||
t.Errorf("failed to verify: %s\n%0x", err, b)
|
||||
}
|
||||
if !valid {
|
||||
t.Errorf(
|
||||
"invalid signature for pub %0x %0x %0x", ev.Pubkey, id,
|
||||
ev.Sig,
|
||||
)
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
}
|
||||
// func TestBTCECSignerVerify(t *testing.T) {
|
||||
// evs := make([]*event.E, 0, 10000)
|
||||
// scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
// buf := make([]byte, 1_000_000)
|
||||
// scanner.Buffer(buf, len(buf))
|
||||
// var err error
|
||||
//
|
||||
// // Create both btcec and p256k signers
|
||||
// btcecSigner := &btcec.Signer{}
|
||||
// p256kSigner := &p256k.Signer{}
|
||||
//
|
||||
// for scanner.Scan() {
|
||||
// var valid bool
|
||||
// b := scanner.Bytes()
|
||||
// ev := event.New()
|
||||
// if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
// t.Errorf("failed to marshal\n%s", b)
|
||||
// } else {
|
||||
// // We know ev.Verify() works, so we'll use it as a reference
|
||||
// if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||
// t.Errorf("invalid signature\n%s", b)
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Get the ID from the event
|
||||
// storedID := ev.ID
|
||||
// calculatedID := ev.GetIDBytes()
|
||||
//
|
||||
// // Check if the stored ID matches the calculated ID
|
||||
// if !bytes.Equal(storedID, calculatedID) {
|
||||
// log.D.Ln("Event ID mismatch: stored ID doesn't match calculated ID")
|
||||
// // Use the calculated ID for verification as ev.Verify() would do
|
||||
// ev.ID = calculatedID
|
||||
// }
|
||||
//
|
||||
// if len(ev.ID) != sha256.Size {
|
||||
// t.Errorf("id should be 32 bytes, got %d", len(ev.ID))
|
||||
// continue
|
||||
// }
|
||||
//
|
||||
// // Initialize both signers with the same public key
|
||||
// if err = btcecSigner.InitPub(ev.Pubkey); chk.E(err) {
|
||||
// t.Errorf("failed to init btcec pub key: %s\n%0x", err, b)
|
||||
// }
|
||||
// if err = p256kSigner.InitPub(ev.Pubkey); chk.E(err) {
|
||||
// t.Errorf("failed to init p256k pub key: %s\n%0x", err, b)
|
||||
// }
|
||||
//
|
||||
// // First try to verify with btcec.Signer
|
||||
// if valid, err = btcecSigner.Verify(ev.ID, ev.Sig); err == nil && valid {
|
||||
// // If btcec.Signer verification succeeds, great!
|
||||
// log.D.Ln("btcec.Signer verification succeeded")
|
||||
// } else {
|
||||
// // If btcec.Signer verification fails, try with p256k.Signer
|
||||
// // Use chk.T(err) like ev.Verify() does
|
||||
// if valid, err = p256kSigner.Verify(ev.ID, ev.Sig); chk.T(err) {
|
||||
// // If there's an error, log it but don't fail the test
|
||||
// log.D.Ln("p256k.Signer verification error:", err)
|
||||
// } else if !valid {
|
||||
// // Only fail the test if both verifications fail
|
||||
// t.Errorf(
|
||||
// "invalid signature for pub %0x %0x %0x", ev.Pubkey, ev.ID,
|
||||
// ev.Sig,
|
||||
// )
|
||||
// } else {
|
||||
// log.D.Ln("p256k.Signer verification succeeded where btcec.Signer failed")
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// evs = append(evs, ev)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestBTCECSignerSign(t *testing.T) {
|
||||
evs := make([]*event.E, 0, 10000)
|
||||
@@ -87,7 +125,12 @@ func TestBTCECSignerSign(t *testing.T) {
|
||||
if err = verifier.InitPub(pkb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
counter := 0
|
||||
for scanner.Scan() {
|
||||
counter++
|
||||
if counter > 1000 {
|
||||
break
|
||||
}
|
||||
b := scanner.Bytes()
|
||||
ev := event.New()
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
@@ -117,7 +160,7 @@ func TestBTCECECDH(t *testing.T) {
|
||||
n := time.Now()
|
||||
var err error
|
||||
var counter int
|
||||
const total = 100
|
||||
const total = 50
|
||||
for _ = range total {
|
||||
s1 := new(btcec.Signer)
|
||||
if err = s1.Generate(); chk.E(err) {
|
||||
|
||||
41
pkg/crypto/p256k/btcec/helpers-btcec.go
Normal file
41
pkg/crypto/p256k/btcec/helpers-btcec.go
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build !cgo
|
||||
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
)
|
||||
|
||||
func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {
|
||||
sk := make([]byte, len(skh)/2)
|
||||
if _, err = hex.DecBytes(sk, []byte(skh)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sign = &Signer{}
|
||||
if err = sign.InitSec(sk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewPubFromHex[V []byte | string](pkh V) (sign signer.I, err error) {
|
||||
pk := make([]byte, len(pkh)/2)
|
||||
if _, err = hex.DecBytes(pk, []byte(pkh)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sign = &Signer{}
|
||||
if err = sign.InitPub(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func HexToBin(hexStr string) (b []byte, err error) {
|
||||
b = make([]byte, len(hexStr)/2)
|
||||
if _, err = hex.DecBytes(b, []byte(hexStr)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package btcec_test
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/utils/lol"
|
||||
)
|
||||
|
||||
var (
|
||||
log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
|
||||
)
|
||||
40
pkg/crypto/p256k/helpers.go
Normal file
40
pkg/crypto/p256k/helpers.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build cgo
|
||||
|
||||
package p256k
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
)
|
||||
|
||||
func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {
|
||||
sk := make([]byte, len(skh)/2)
|
||||
if _, err = hex.DecBytes(sk, []byte(skh)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sign = &Signer{}
|
||||
if err = sign.InitSec(sk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewPubFromHex[V []byte | string](pkh V) (sign signer.I, err error) {
|
||||
pk := make([]byte, len(pkh)/2)
|
||||
if _, err = hex.DecBytes(pk, []byte(pkh)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sign = &Signer{}
|
||||
if err = sign.InitPub(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func HexToBin(hexStr string) (b []byte, err error) {
|
||||
if b, err = hex.DecAppend(b, []byte(hexStr)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -77,8 +77,18 @@ func (s *Signer) InitPub(pub []byte) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Signer) Sec() (b []byte) { return s.skb }
|
||||
func (s *Signer) Pub() (b []byte) { return s.pkb }
|
||||
func (s *Signer) Sec() (b []byte) {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.skb
|
||||
}
|
||||
func (s *Signer) Pub() (b []byte) {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.pkb
|
||||
}
|
||||
|
||||
// func (s *Signer) ECPub() (b []byte) { return s.pkb }
|
||||
|
||||
@@ -117,7 +127,8 @@ func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) {
|
||||
var pub *secp256k1.PublicKey
|
||||
if pub, err = secp256k1.ParsePubKey(
|
||||
append(
|
||||
[]byte{0x02}, pubkeyBytes...,
|
||||
[]byte{0x02},
|
||||
pubkeyBytes...,
|
||||
),
|
||||
); chk.E(err) {
|
||||
return
|
||||
|
||||
@@ -5,14 +5,16 @@ package p256k_test
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/event/examples"
|
||||
realy "orly.dev/pkg/interfaces/signer"
|
||||
"testing"
|
||||
"time"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
func TestSigner_Generate(t *testing.T) {
|
||||
@@ -30,51 +32,51 @@ func TestSigner_Generate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSignerVerify(t *testing.T) {
|
||||
// evs := make([]*event.E, 0, 10000)
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
buf := make([]byte, 1_000_000)
|
||||
scanner.Buffer(buf, len(buf))
|
||||
var err error
|
||||
signer := &p256k.Signer{}
|
||||
for scanner.Scan() {
|
||||
var valid bool
|
||||
b := scanner.Bytes()
|
||||
bc := make([]byte, 0, len(b))
|
||||
bc = append(bc, b...)
|
||||
ev := event.New()
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Errorf("failed to marshal\n%s", b)
|
||||
} else {
|
||||
if valid, err = ev.Verify(); chk.T(err) || !valid {
|
||||
t.Errorf("invalid signature\n%s", bc)
|
||||
continue
|
||||
}
|
||||
}
|
||||
id := ev.GetIDBytes()
|
||||
if len(id) != sha256.Size {
|
||||
t.Errorf("id should be 32 bytes, got %d", len(id))
|
||||
continue
|
||||
}
|
||||
if err = signer.InitPub(ev.Pubkey); chk.T(err) {
|
||||
t.Errorf("failed to init pub key: %s\n%0x", err, ev.Pubkey)
|
||||
continue
|
||||
}
|
||||
if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
|
||||
t.Errorf("failed to verify: %s\n%0x", err, ev.Id)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
t.Errorf(
|
||||
"invalid signature for\npub %0x\neid %0x\nsig %0x\n%s",
|
||||
ev.Pubkey, id, ev.Sig, bc,
|
||||
)
|
||||
continue
|
||||
}
|
||||
// fmt.Printf("%s\n", bc)
|
||||
// evs = append(evs, ev)
|
||||
}
|
||||
}
|
||||
// func TestSignerVerify(t *testing.T) {
|
||||
// // evs := make([]*event.E, 0, 10000)
|
||||
// scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
// buf := make([]byte, 1_000_000)
|
||||
// scanner.Buffer(buf, len(buf))
|
||||
// var err error
|
||||
// signer := &p256k.Signer{}
|
||||
// for scanner.Scan() {
|
||||
// var valid bool
|
||||
// b := scanner.Bytes()
|
||||
// bc := make([]byte, 0, len(b))
|
||||
// bc = append(bc, b...)
|
||||
// ev := event.New()
|
||||
// if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
// t.Errorf("failed to marshal\n%s", b)
|
||||
// } else {
|
||||
// if valid, err = ev.Verify(); chk.T(err) || !valid {
|
||||
// t.Errorf("invalid signature\n%s", bc)
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
// id := ev.GetIDBytes()
|
||||
// if len(id) != sha256.Size {
|
||||
// t.Errorf("id should be 32 bytes, got %d", len(id))
|
||||
// continue
|
||||
// }
|
||||
// if err = signer.InitPub(ev.Pubkey); chk.T(err) {
|
||||
// t.Errorf("failed to init pub key: %s\n%0x", err, ev.Pubkey)
|
||||
// continue
|
||||
// }
|
||||
// if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
|
||||
// t.Errorf("failed to verify: %s\n%0x", err, ev.ID)
|
||||
// continue
|
||||
// }
|
||||
// if !valid {
|
||||
// t.Errorf(
|
||||
// "invalid signature for\npub %0x\neid %0x\nsig %0x\n%s",
|
||||
// ev.Pubkey, id, ev.Sig, bc,
|
||||
// )
|
||||
// continue
|
||||
// }
|
||||
// // fmt.Printf("%s\n", bc)
|
||||
// // evs = append(evs, ev)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestSignerSign(t *testing.T) {
|
||||
evs := make([]*event.E, 0, 10000)
|
||||
|
||||
@@ -4,13 +4,14 @@ package p256k
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"unsafe"
|
||||
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
@@ -5,44 +5,45 @@ package p256k_test
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/event/examples"
|
||||
"testing"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
)
|
||||
|
||||
func TestVerify(t *testing.T) {
|
||||
evs := make([]*event.E, 0, 10000)
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
buf := make([]byte, 1_000_000)
|
||||
scanner.Buffer(buf, len(buf))
|
||||
var err error
|
||||
for scanner.Scan() {
|
||||
var valid bool
|
||||
b := scanner.Bytes()
|
||||
ev := event.New()
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Errorf("failed to marshal\n%s", b)
|
||||
} else {
|
||||
if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||
t.Errorf("btcec: invalid signature\n%s", b)
|
||||
continue
|
||||
}
|
||||
}
|
||||
id := ev.GetIDBytes()
|
||||
if len(id) != sha256.Size {
|
||||
t.Errorf("id should be 32 bytes, got %d", len(id))
|
||||
continue
|
||||
}
|
||||
if err = p256k.VerifyFromBytes(id, ev.Sig, ev.Pubkey); chk.E(err) {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
}
|
||||
// func TestVerify(t *testing.T) {
|
||||
// evs := make([]*event.E, 0, 10000)
|
||||
// scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
// buf := make([]byte, 1_000_000)
|
||||
// scanner.Buffer(buf, len(buf))
|
||||
// var err error
|
||||
// for scanner.Scan() {
|
||||
// var valid bool
|
||||
// b := scanner.Bytes()
|
||||
// ev := event.New()
|
||||
// if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
// t.Errorf("failed to marshal\n%s", b)
|
||||
// } else {
|
||||
// if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||
// t.Errorf("btcec: invalid signature\n%s", b)
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
// id := ev.GetIDBytes()
|
||||
// if len(id) != sha256.Size {
|
||||
// t.Errorf("id should be 32 bytes, got %d", len(id))
|
||||
// continue
|
||||
// }
|
||||
// if err = p256k.VerifyFromBytes(id, ev.Sig, ev.Pubkey); chk.E(err) {
|
||||
// t.Error(err)
|
||||
// continue
|
||||
// }
|
||||
// evs = append(evs, ev)
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestSign(t *testing.T) {
|
||||
evs := make([]*event.E, 0, 10000)
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
package p256k_test
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/utils/lol"
|
||||
)
|
||||
|
||||
var (
|
||||
log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
|
||||
)
|
||||
@@ -2,7 +2,6 @@ package database
|
||||
|
||||
import (
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"io"
|
||||
"orly.dev/pkg/encoders/eventidserial"
|
||||
"orly.dev/pkg/utils/apputil"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
@@ -12,6 +11,7 @@ import (
|
||||
"orly.dev/pkg/utils/units"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
type D struct {
|
||||
@@ -58,8 +58,19 @@ func New(ctx context.T, cancel context.F, dataDir, logLevel string) (
|
||||
if d.seq, err = d.DB.GetSequence([]byte("EVENTS"), 1000); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// run code that updates indexes when new indexes have been added and bumps
|
||||
// the version so they aren't run again.
|
||||
d.RunMigrations()
|
||||
// start up the expiration tag processing and shut down and clean up the
|
||||
// database after the context is canceled.
|
||||
go func() {
|
||||
<-d.ctx.Done()
|
||||
expirationTicker := time.NewTicker(time.Minute * 10)
|
||||
select {
|
||||
case <-expirationTicker.C:
|
||||
d.DeleteExpired()
|
||||
return
|
||||
case <-d.ctx.Done():
|
||||
}
|
||||
d.cancel()
|
||||
d.seq.Release()
|
||||
d.DB.Close()
|
||||
@@ -75,11 +86,6 @@ func (d *D) Wipe() (err error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d *D) Import(r io.Reader) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d *D) SetLogLevel(level string) {
|
||||
d.Logger.SetLogLevel(lol.GetLogLevel(level))
|
||||
}
|
||||
@@ -101,6 +107,7 @@ func (d *D) Init(path string) (err error) {
|
||||
|
||||
// Sync flushes the database buffers to disk.
|
||||
func (d *D) Sync() (err error) {
|
||||
d.DB.RunValueLogGC(0.5)
|
||||
return d.DB.Sync()
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func (d *D) DeleteEvent(c context.T, eid *eventid.T) (err error) {
|
||||
return
|
||||
}
|
||||
if ser == nil {
|
||||
// Event not found, nothing to delete
|
||||
// Event wasn't found, nothing to delete
|
||||
return
|
||||
}
|
||||
// Fetch the event to get its data
|
||||
@@ -33,9 +33,18 @@ func (d *D) DeleteEvent(c context.T, eid *eventid.T) (err error) {
|
||||
return
|
||||
}
|
||||
if ev == nil {
|
||||
// Event not found, nothing to delete
|
||||
// Event wasn't found, nothing to delete. this shouldn't happen.
|
||||
return
|
||||
}
|
||||
if err = d.DeleteEventBySerial(c, ser, ev); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) DeleteEventBySerial(
|
||||
c context.T, ser *types.Uint40, ev *event.E,
|
||||
) (err error) {
|
||||
// Get all indexes for the event
|
||||
var idxs [][]byte
|
||||
idxs, err = GetIndexesForEvent(ev, ser.Get())
|
||||
|
||||
59
pkg/database/delete-expired.go
Normal file
59
pkg/database/delete-expired.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"orly.dev/pkg/database/indexes"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (d *D) DeleteExpired() {
|
||||
var err error
|
||||
var expiredSerials types.Uint40s
|
||||
// make the operation atomic and save on accesses to the system clock by
|
||||
// setting the boundary at the current second
|
||||
now := time.Now().Unix()
|
||||
// search the expiration indexes for expiry timestamps that are now past
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
exp, ser := indexes.ExpirationVars()
|
||||
expPrf := new(bytes.Buffer)
|
||||
if _, err = indexes.ExpirationPrefix.Write(expPrf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: expPrf.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
buf := bytes.NewBuffer(key)
|
||||
if err = indexes.ExpirationDec(
|
||||
exp, ser,
|
||||
).UnmarshalRead(buf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
if int64(exp.Get()) > now {
|
||||
// not expired yet
|
||||
continue
|
||||
}
|
||||
expiredSerials = append(expiredSerials, ser)
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
}
|
||||
// delete the events and their indexes
|
||||
for _, ser := range expiredSerials {
|
||||
var ev *event.E
|
||||
if ev, err = d.FetchEventBySerial(ser); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
if err = d.DeleteEventBySerial(context.Bg(), ser, ev); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"orly.dev/pkg/database/indexes"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/codecbuf"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
@@ -17,34 +16,39 @@ import (
|
||||
// JSON.
|
||||
func (d *D) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
|
||||
var err error
|
||||
evB := make([]byte, 0, units.Mb)
|
||||
evBuf := bytes.NewBuffer(evB)
|
||||
if len(pubkeys) == 0 {
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
buf := codecbuf.Get()
|
||||
defer codecbuf.Put(buf)
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(nil).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: buf.Bytes()})
|
||||
evB := make([]byte, 0, units.Mb)
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
if evB, err = item.ValueCopy(evB); chk.E(err) {
|
||||
if err = item.Value(
|
||||
func(val []byte) (err error) {
|
||||
evBuf.Write(val)
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
evBuf := bytes.NewBuffer(evB)
|
||||
ev := event.New()
|
||||
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Serialize the event to JSON and write it to the output
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
continue
|
||||
return
|
||||
}
|
||||
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||
continue
|
||||
return
|
||||
}
|
||||
evBuf.Reset()
|
||||
}
|
||||
return
|
||||
},
|
||||
@@ -55,8 +59,7 @@ func (d *D) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
|
||||
for _, pubkey := range pubkeys {
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
pkBuf := codecbuf.Get()
|
||||
defer codecbuf.Put(pkBuf)
|
||||
pkBuf := new(bytes.Buffer)
|
||||
ph := &types.PubHash{}
|
||||
if err = ph.FromPubkey(pubkey); chk.E(err) {
|
||||
return
|
||||
@@ -67,14 +70,17 @@ func (d *D) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: pkBuf.Bytes()})
|
||||
evB := make([]byte, 0, units.Mb)
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
if evB, err = item.ValueCopy(evB); chk.E(err) {
|
||||
if err = item.Value(
|
||||
func(val []byte) (err error) {
|
||||
evBuf.Write(val)
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
evBuf := bytes.NewBuffer(evB)
|
||||
ev := event.New()
|
||||
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
|
||||
continue
|
||||
@@ -86,6 +92,7 @@ func (d *D) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
|
||||
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
evBuf.Reset()
|
||||
}
|
||||
return
|
||||
},
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestExport(t *testing.T) {
|
||||
}
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"orly.dev/pkg/database/indexes"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/codecbuf"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
)
|
||||
@@ -13,13 +12,12 @@ import (
|
||||
func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
buf := codecbuf.Get()
|
||||
defer codecbuf.Put(buf)
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var item *badger.Item
|
||||
if item, err = txn.Get(buf.Bytes()); chk.E(err) {
|
||||
if item, err = txn.Get(buf.Bytes()); err != nil {
|
||||
return
|
||||
}
|
||||
var v []byte
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestFetchEventBySerial(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestFetchEventBySerial(t *testing.T) {
|
||||
var sers types.Uint40s
|
||||
sers, err = db.QueryForSerials(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.New(testEvent.Id),
|
||||
Ids: tag.New(testEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -102,10 +102,10 @@ func TestFetchEventBySerial(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify the fetched event has the same ID as the original event
|
||||
if !bytes.Equal(fetchedEvent.Id, testEvent.Id) {
|
||||
if !bytes.Equal(fetchedEvent.ID, testEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Fetched event ID doesn't match original event ID. Got %x, expected %x",
|
||||
fetchedEvent.Id, testEvent.Id,
|
||||
fetchedEvent.ID, testEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,10 +5,8 @@ import (
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"orly.dev/pkg/database/indexes"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/codecbuf"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
)
|
||||
|
||||
func (d *D) GetFullIdPubkeyBySerial(ser *types.Uint40) (
|
||||
@@ -16,8 +14,7 @@ func (d *D) GetFullIdPubkeyBySerial(ser *types.Uint40) (
|
||||
) {
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
buf := codecbuf.Get()
|
||||
defer codecbuf.Put(buf)
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.FullIdPubkeyEnc(
|
||||
ser, nil, nil, nil,
|
||||
).MarshalWrite(buf); chk.E(err) {
|
||||
@@ -54,11 +51,5 @@ func (d *D) GetFullIdPubkeyBySerial(ser *types.Uint40) (
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if fidpk != nil {
|
||||
err = errorf.E(
|
||||
"failed to fetch full id pubkey by serial %d",
|
||||
ser.Get(),
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -16,10 +16,8 @@ func appendIndexBytes(idxs *[][]byte, idx *indexes.T) (err error) {
|
||||
return
|
||||
}
|
||||
// Copy the buffer's bytes to a new byte slice
|
||||
bytes := make([]byte, buf.Len())
|
||||
copy(bytes, buf.Bytes())
|
||||
// Append the byte slice to the idxs slice
|
||||
*idxs = append(*idxs, bytes)
|
||||
*idxs = append(*idxs, buf.Bytes())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -39,9 +37,9 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
||||
if err = ser.Set(serial); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Id index
|
||||
// ID index
|
||||
idHash := new(IdHash)
|
||||
if err = idHash.FromId(ev.Id); chk.E(err) {
|
||||
if err = idHash.FromId(ev.ID); chk.E(err) {
|
||||
return
|
||||
}
|
||||
idIndex := indexes.IdEnc(idHash, ser)
|
||||
@@ -50,7 +48,7 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
||||
}
|
||||
// FullIdPubkey index
|
||||
fullID := new(Id)
|
||||
if err = fullID.FromId(ev.Id); chk.E(err) {
|
||||
if err = fullID.FromId(ev.ID); chk.E(err) {
|
||||
return
|
||||
}
|
||||
pubHash := new(PubHash)
|
||||
|
||||
@@ -2,16 +2,16 @@ package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/database/indexes"
|
||||
types2 "orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/codecbuf"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/tags"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
@@ -26,8 +26,7 @@ func TestGetIndexesForEvent(t *testing.T) {
|
||||
// indexes
|
||||
func verifyIndexIncluded(t *testing.T, idxs [][]byte, expectedIdx *indexes.T) {
|
||||
// Marshal the expected index
|
||||
buf := codecbuf.Get()
|
||||
defer codecbuf.Put(buf)
|
||||
buf := new(bytes.Buffer)
|
||||
err := expectedIdx.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to marshal expected index: %v", err)
|
||||
@@ -60,7 +59,7 @@ func testBasicEvent(t *testing.T) {
|
||||
for i := range id {
|
||||
id[i] = byte(i)
|
||||
}
|
||||
ev.Id = id
|
||||
ev.ID = id
|
||||
|
||||
// Set Pubkey
|
||||
pubkey := make([]byte, 32)
|
||||
@@ -92,7 +91,7 @@ func testBasicEvent(t *testing.T) {
|
||||
|
||||
// Create and verify the expected indexes
|
||||
|
||||
// 1. Id index
|
||||
// 1. ID index
|
||||
ser := new(types2.Uint40)
|
||||
err = ser.Set(serial)
|
||||
if chk.E(err) {
|
||||
@@ -100,7 +99,7 @@ func testBasicEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
idHash := new(types2.IdHash)
|
||||
err = idHash.FromId(ev.Id)
|
||||
err = idHash.FromId(ev.ID)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to create IdHash: %v", err)
|
||||
}
|
||||
@@ -109,9 +108,9 @@ func testBasicEvent(t *testing.T) {
|
||||
|
||||
// 2. FullIdPubkey index
|
||||
fullID := new(types2.Id)
|
||||
err = fullID.FromId(ev.Id)
|
||||
err = fullID.FromId(ev.ID)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to create Id: %v", err)
|
||||
t.Fatalf("Failed to create ID: %v", err)
|
||||
}
|
||||
|
||||
pubHash := new(types2.PubHash)
|
||||
@@ -156,7 +155,7 @@ func testEventWithTags(t *testing.T) {
|
||||
for i := range id {
|
||||
id[i] = byte(i)
|
||||
}
|
||||
ev.Id = id
|
||||
ev.ID = id
|
||||
|
||||
// Set Pubkey
|
||||
pubkey := make([]byte, 32)
|
||||
@@ -210,7 +209,7 @@ func testEventWithTags(t *testing.T) {
|
||||
}
|
||||
|
||||
idHash := new(types2.IdHash)
|
||||
err = idHash.FromId(ev.Id)
|
||||
err = idHash.FromId(ev.ID)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to create IdHash: %v", err)
|
||||
}
|
||||
@@ -268,7 +267,7 @@ func testErrorHandling(t *testing.T) {
|
||||
for i := range id {
|
||||
id[i] = byte(i)
|
||||
}
|
||||
ev.Id = id
|
||||
ev.ID = id
|
||||
|
||||
// Set Pubkey
|
||||
pubkey := make([]byte, 32)
|
||||
|
||||
@@ -76,7 +76,7 @@ func CreatePubHashFromData(data []byte) (p *types2.PubHash, err error) {
|
||||
// complete set of combinations of all fields in the event, thus there is no
|
||||
// need to decode events until they are to be delivered.
|
||||
func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
// Id eid
|
||||
// ID eid
|
||||
//
|
||||
// If there is any Ids in the filter, none of the other fields matter. It
|
||||
// should be an error, but convention just ignores it.
|
||||
@@ -115,7 +115,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
|
||||
// Set the end of range (Until or default to math.MaxInt64)
|
||||
if f.Until != nil && f.Until.V != 0 {
|
||||
caEnd.Set(uint64(f.Until.V + 1))
|
||||
caEnd.Set(uint64(f.Until.V))
|
||||
} else {
|
||||
caEnd.Set(uint64(math.MaxInt64))
|
||||
}
|
||||
|
||||
@@ -3,23 +3,23 @@ package database
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/database/indexes"
|
||||
types2 "orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/codecbuf"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
// TestGetIndexesFromFilter tests the GetIndexesFromFilter function
|
||||
func TestGetIndexesFromFilter(t *testing.T) {
|
||||
t.Run("Id", testIdFilter)
|
||||
t.Run("ID", testIdFilter)
|
||||
t.Run("Pubkey", testPubkeyFilter)
|
||||
t.Run("CreatedAt", testCreatedAtFilter)
|
||||
t.Run("CreatedAtUntil", testCreatedAtUntilFilter)
|
||||
@@ -41,8 +41,7 @@ func verifyIndex(
|
||||
}
|
||||
|
||||
// Marshal the expected start index
|
||||
startBuf := codecbuf.Get()
|
||||
defer codecbuf.Put(startBuf)
|
||||
startBuf := new(bytes.Buffer)
|
||||
err := expectedStartIdx.MarshalWrite(startBuf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to marshal expected start index: %v", err)
|
||||
@@ -62,8 +61,7 @@ func verifyIndex(
|
||||
}
|
||||
|
||||
// Marshal the expected end index
|
||||
endBuf := codecbuf.Get()
|
||||
defer codecbuf.Put(endBuf)
|
||||
endBuf := new(bytes.Buffer)
|
||||
err = endIdx.MarshalWrite(endBuf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to marshal expected End index: %v", err)
|
||||
@@ -77,9 +75,9 @@ func verifyIndex(
|
||||
}
|
||||
}
|
||||
|
||||
// Test Id filter
|
||||
// Test ID filter
|
||||
func testIdFilter(t *testing.T) {
|
||||
// Create a filter with an Id
|
||||
// Create a filter with an ID
|
||||
f := filter.New()
|
||||
id := make([]byte, sha256.Size)
|
||||
for i := range id {
|
||||
@@ -102,7 +100,7 @@ func testIdFilter(t *testing.T) {
|
||||
expectedIdx := indexes.IdEnc(idHash, nil)
|
||||
|
||||
// Verify the generated index
|
||||
// For Id filter, both start and end indexes are the same
|
||||
// For ID filter, both start and end indexes are the same
|
||||
verifyIndex(t, idxs, expectedIdx, expectedIdx)
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestGetSerialById(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestGetSerialById(t *testing.T) {
|
||||
testEvent := events[3] // Using the same event as in QueryForIds test
|
||||
|
||||
// Get the serial by ID
|
||||
serial, err := db.GetSerialById(testEvent.Id)
|
||||
serial, err := db.GetSerialById(testEvent.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||
}
|
||||
@@ -82,10 +82,10 @@ func TestGetSerialById(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test with a non-existent ID
|
||||
nonExistentId := make([]byte, len(testEvent.Id))
|
||||
nonExistentId := make([]byte, len(testEvent.ID))
|
||||
// Ensure it's different from any real ID
|
||||
for i := range nonExistentId {
|
||||
nonExistentId[i] = ^testEvent.Id[i]
|
||||
nonExistentId[i] = ^testEvent.ID[i]
|
||||
}
|
||||
|
||||
serial, err = db.GetSerialById(nonExistentId)
|
||||
|
||||
@@ -60,12 +60,12 @@ func TestGetSerialsByRange(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
// Get the serial for this event
|
||||
serial, err := db.GetSerialById(ev.Id)
|
||||
serial, err := db.GetSerialById(ev.ID)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to get serial for event #%d: %v", eventCount+1, err,
|
||||
@@ -73,7 +73,7 @@ func TestGetSerialsByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
if serial != nil {
|
||||
eventSerials[string(ev.Id)] = serial
|
||||
eventSerials[string(ev.ID)] = serial
|
||||
}
|
||||
|
||||
eventCount++
|
||||
|
||||
82
pkg/database/import.go
Normal file
82
pkg/database/import.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
const maxLen = 500000000
|
||||
|
||||
// Import a collection of events in line structured minified JSON format (JSONL).
|
||||
func (d *D) Import(rr io.Reader) {
|
||||
// store to disk so we can return fast
|
||||
tmpPath := os.TempDir() + string(os.PathSeparator) + "orly"
|
||||
os.MkdirAll(tmpPath, 0700)
|
||||
tmp, err := os.CreateTemp(tmpPath, "")
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
log.I.F("buffering upload to %s", tmp.Name())
|
||||
if _, err = io.Copy(tmp, rr); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if _, err = tmp.Seek(0, 0); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
var err error
|
||||
// Create a scanner to read the buffer line by line
|
||||
scan := bufio.NewScanner(tmp)
|
||||
scanBuf := make([]byte, maxLen)
|
||||
scan.Buffer(scanBuf, maxLen)
|
||||
|
||||
var count, total int
|
||||
for scan.Scan() {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
log.I.F("context closed")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
b := scan.Bytes()
|
||||
total += len(b) + 1
|
||||
if len(b) < 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
ev := &event.E{}
|
||||
if _, err = ev.Unmarshal(b); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, _, err = d.SaveEvent(d.ctx, ev, false, nil); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
b = nil
|
||||
ev = nil
|
||||
count++
|
||||
if count%100 == 0 {
|
||||
log.I.F("received %d events", count)
|
||||
debug.FreeOSMemory()
|
||||
}
|
||||
}
|
||||
|
||||
log.I.F("read %d bytes and saved %d events", total, count)
|
||||
err = scan.Err()
|
||||
if chk.E(err) {
|
||||
}
|
||||
|
||||
// Help garbage collection
|
||||
tmp = nil
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
@@ -67,6 +67,9 @@ const (
|
||||
TagKindPrefix = I("tkc") // tag, kind, created at
|
||||
TagPubkeyPrefix = I("tpc") // tag, pubkey, created at
|
||||
TagKindPubkeyPrefix = I("tkp") // tag, kind, pubkey, created at
|
||||
|
||||
ExpirationPrefix = I("exp") // timestamp of expiration
|
||||
VersionPrefix = I("ver") // database version number, for triggering reindexes when new keys are added (policy is add-only).
|
||||
)
|
||||
|
||||
// Prefix returns the three byte human-readable prefixes that go in front of
|
||||
@@ -97,6 +100,11 @@ func Prefix(prf int) (i I) {
|
||||
return TagPubkeyPrefix
|
||||
case TagKindPubkey:
|
||||
return TagKindPubkeyPrefix
|
||||
|
||||
case Expiration:
|
||||
return ExpirationPrefix
|
||||
case Version:
|
||||
return VersionPrefix
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -135,6 +143,9 @@ func Identify(r io.Reader) (i int, err error) {
|
||||
i = TagPubkey
|
||||
case TagKindPubkeyPrefix:
|
||||
i = TagKindPubkey
|
||||
|
||||
case ExpirationPrefix:
|
||||
i = Expiration
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -146,7 +157,7 @@ type Encs []codec.I
|
||||
type T struct{ Encs }
|
||||
|
||||
// New creates a new indexes.T. The helper functions below have an encode and
|
||||
// decode variant, the decode variant does not add the prefix encoder because it
|
||||
// decode variant, the decode variant doesn't add the prefix encoder because it
|
||||
// has been read by Identify or just is being read, and found because it was
|
||||
// written for the prefix in the iteration.
|
||||
func New(encoders ...codec.I) (i *T) { return &T{encoders} }
|
||||
@@ -186,7 +197,7 @@ func EventDec(ser *types.Uint40) (enc *T) { return New(NewPrefix(), ser) }
|
||||
// Id contains a truncated 8-byte hash of an event index. This is the secondary
|
||||
// key of an event, the primary key is the serial found in the Event.
|
||||
//
|
||||
// 3 prefix|8 Id hash|5 serial
|
||||
// 3 prefix|8 ID hash|5 serial
|
||||
var Id = next()
|
||||
|
||||
func IdVars() (id *types.IdHash, ser *types.Uint40) {
|
||||
@@ -202,7 +213,7 @@ func IdDec(id *types.IdHash, ser *types.Uint40) (enc *T) {
|
||||
// FullIdPubkey is an index designed to enable sorting and filtering of
|
||||
// results found via other indexes, without having to decode the event.
|
||||
//
|
||||
// 3 prefix|5 serial|32 Id|8 pubkey hash|8 timestamp
|
||||
// 3 prefix|5 serial|32 ID|8 pubkey hash|8 timestamp
|
||||
var FullIdPubkey = next()
|
||||
|
||||
func FullIdPubkeyVars() (
|
||||
@@ -359,7 +370,7 @@ func TagPubkeyDec(
|
||||
|
||||
// TagKindPubkey
|
||||
//
|
||||
// 3 prefix|1 key letter|8 value hash|2 kind|8 pubkey hash|8 bytes timestamp|5 byte serial
|
||||
// 3 prefix|1 key letter|8 value hash|2 kind|8 pubkey hash|8 bytes timestamp|5 serial
|
||||
var TagKindPubkey = next()
|
||||
|
||||
func TagKindPubkeyVars() (
|
||||
@@ -383,3 +394,45 @@ func TagKindPubkeyDec(
|
||||
) (enc *T) {
|
||||
return New(NewPrefix(), ki, p, k, v, ca, ser)
|
||||
}
|
||||
|
||||
// Expiration
|
||||
//
|
||||
// 3 prefix|8 timestamp|5 serial
|
||||
var Expiration = next()
|
||||
|
||||
func ExpirationVars() (
|
||||
exp *types.Uint64, ser *types.Uint40,
|
||||
) {
|
||||
return new(types.Uint64), new(types.Uint40)
|
||||
}
|
||||
func ExpirationEnc(
|
||||
exp *types.Uint64, ser *types.Uint40,
|
||||
) (enc *T) {
|
||||
return New(NewPrefix(Expiration), exp, ser)
|
||||
}
|
||||
func ExpirationDec(
|
||||
exp *types.Uint64, ser *types.Uint40,
|
||||
) (enc *T) {
|
||||
return New(NewPrefix(), exp, ser)
|
||||
}
|
||||
|
||||
// Version
|
||||
//
|
||||
// 3 prefix|4 version
|
||||
var Version = next()
|
||||
|
||||
func VersionVars() (
|
||||
ver *types.Uint32,
|
||||
) {
|
||||
return new(types.Uint32)
|
||||
}
|
||||
func VersionEnc(
|
||||
ver *types.Uint32,
|
||||
) (enc *T) {
|
||||
return New(NewPrefix(Version), ver)
|
||||
}
|
||||
func VersionDec(
|
||||
ver *types.Uint32,
|
||||
) (enc *T) {
|
||||
return New(NewPrefix(), ver)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"io"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/codecbuf"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"testing"
|
||||
)
|
||||
@@ -49,7 +48,7 @@ func TestPrefixMethods(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test MarshalWrite method
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err := prefix.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -83,7 +82,7 @@ func TestPrefixFunction(t *testing.T) {
|
||||
expected I
|
||||
}{
|
||||
{"Event", Event, EventPrefix},
|
||||
{"Id", Id, IdPrefix},
|
||||
{"ID", Id, IdPrefix},
|
||||
{"FullIdPubkey", FullIdPubkey, FullIdPubkeyPrefix},
|
||||
{"Pubkey", Pubkey, PubkeyPrefix},
|
||||
{"CreatedAt", CreatedAt, CreatedAtPrefix},
|
||||
@@ -122,7 +121,7 @@ func TestIdentify(t *testing.T) {
|
||||
expected int
|
||||
}{
|
||||
{"Event", EventPrefix, Event},
|
||||
{"Id", IdPrefix, Id},
|
||||
{"ID", IdPrefix, Id},
|
||||
{"FullIdPubkey", FullIdPubkeyPrefix, FullIdPubkey},
|
||||
{"Pubkey", PubkeyPrefix, Pubkey},
|
||||
{"CreatedAt", CreatedAtPrefix, CreatedAt},
|
||||
@@ -209,7 +208,7 @@ func TestTStruct(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test MarshalWrite
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err := enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -272,7 +271,7 @@ func TestEventFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err := enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -318,7 +317,7 @@ func TestIdFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err := enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -391,7 +390,7 @@ func TestIdPubkeyFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -452,7 +451,7 @@ func TestCreatedAtFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err := enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -516,7 +515,7 @@ func TestPubkeyFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -588,7 +587,7 @@ func TestPubkeyTagFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -660,7 +659,7 @@ func TestTagFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -724,7 +723,7 @@ func TestKindFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err := enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -789,7 +788,7 @@ func TestKindTagFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -865,7 +864,7 @@ func TestKindPubkeyFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -941,7 +940,7 @@ func TestKindPubkeyTagFunctions(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test marshaling and unmarshaling
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err = enc.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
|
||||
@@ -84,7 +84,7 @@ func testUint16Sorting(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint16 values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint16 values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -115,7 +115,7 @@ func testUint24Sorting(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint24 values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint24 values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func testUint32Sorting(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint32 values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint32 values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func testUint40Sorting(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint40 values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint40 values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -202,7 +202,7 @@ func testUint64Sorting(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint64 values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint64 values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -233,7 +233,7 @@ func testUint16EdgeCases(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint16 edge case values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint16 edge case values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -265,7 +265,7 @@ func testUint24EdgeCases(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint24 edge case values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint24 edge case values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -294,7 +294,7 @@ func testUint32EdgeCases(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint32 edge case values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint32 edge case values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -326,7 +326,7 @@ func testUint40EdgeCases(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint40 edge case values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint40 edge case values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -355,7 +355,7 @@ func testUint64EdgeCases(t *testing.T) {
|
||||
// Check if they sort correctly with bytes.Compare
|
||||
for i := 0; i < len(marshaledValues)-1; i++ {
|
||||
if bytes.Compare(marshaledValues[i], marshaledValues[i+1]) >= 0 {
|
||||
t.Errorf("Uint64 edge case values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("Uint64 edge case values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", marshaledValues[i], marshaledValues[i+1])
|
||||
}
|
||||
@@ -390,7 +390,7 @@ func TestEndianness(t *testing.T) {
|
||||
result := bytes.Compare(bigEndianValues[i], bigEndianValues[i+1])
|
||||
t.Logf("Compare %d with %d: result = %d", values[i], values[i+1], result)
|
||||
if result >= 0 {
|
||||
t.Errorf("BigEndian values don't sort correctly: %v should be less than %v",
|
||||
t.Errorf("BigEndian values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", bigEndianValues[i], bigEndianValues[i+1])
|
||||
}
|
||||
@@ -404,7 +404,7 @@ func TestEndianness(t *testing.T) {
|
||||
t.Logf("Compare %d with %d: result = %d", values[i], values[i+1], result)
|
||||
if result >= 0 {
|
||||
correctOrder = false
|
||||
t.Logf("LittleEndian values don't sort correctly: %v should be less than %v",
|
||||
t.Logf("LittleEndian values don't sort correctly: %v should be less than %v",
|
||||
values[i], values[i+1])
|
||||
t.Logf("Bytes representation: %v vs %v", littleEndianValues[i], littleEndianValues[i+1])
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ type Id struct {
|
||||
func (fi *Id) FromId(id []byte) (err error) {
|
||||
if len(id) != IdLen {
|
||||
err = errorf.E(
|
||||
"fullid.FromId: invalid Id length, got %d require %d", len(id),
|
||||
"fullid.FromId: invalid ID length, got %d require %d", len(id),
|
||||
IdLen,
|
||||
)
|
||||
return
|
||||
|
||||
@@ -2,10 +2,10 @@ package types
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"orly.dev/pkg/encoders/codecbuf"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/utils/chk"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
)
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestFromId(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
||||
// Create a Id with a known value
|
||||
// Create a ID with a known value
|
||||
fi1 := &Id{}
|
||||
validId := make([]byte, sha256.Size)
|
||||
for i := 0; i < sha256.Size; i++ {
|
||||
@@ -55,7 +55,7 @@ func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test MarshalWrite
|
||||
buf := codecbuf.Get()
|
||||
buf := new(bytes.Buffer)
|
||||
err = fi1.MarshalWrite(buf)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
@@ -80,7 +80,7 @@ func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIdUnmarshalReadWithCorruptedData(t *testing.T) {
|
||||
// Create a Id with a known value
|
||||
// Create a ID with a known value
|
||||
fi1 := &Id{}
|
||||
validId := make([]byte, sha256.Size)
|
||||
for i := 0; i < sha256.Size; i++ {
|
||||
@@ -91,7 +91,7 @@ func TestIdUnmarshalReadWithCorruptedData(t *testing.T) {
|
||||
t.Fatalf("FromId failed: %v", err)
|
||||
}
|
||||
|
||||
// Create a second Id with a different value
|
||||
// Create a second ID with a different value
|
||||
fi2 := &Id{}
|
||||
differentId := make([]byte, sha256.Size)
|
||||
for i := 0; i < sha256.Size; i++ {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user