Compare commits
88 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
7ec8698b62
|
|||
|
2514f875e6
|
|||
|
a6350c8e80
|
|||
|
6c3d22cb38
|
|||
|
8adb129fbe
|
|||
|
fd698af1ca
|
|||
|
ac4fd506e5
|
|||
|
8898b20d4b
|
|||
|
b351d0fb78
|
|||
|
9c8ff2976d
|
|||
|
a7dd958585
|
|||
|
8eb5b839b0
|
|||
|
e57169eeae
|
|||
|
109326dfa3
|
|||
| 52911354a7 | |||
|
b74f4757e7
|
|||
| 2d0ebfe032 | |||
| fff61ceca1 | |||
| b7b7dc7353 | |||
| 996fb3aeb7 | |||
| b9a713d81d | |||
|
1e6ce84e26
|
|||
|
0361f3843a
|
|||
|
4317e8ba4a
|
|||
|
9094f36d6e
|
|||
|
9314467f55
|
|||
|
19e6520587
|
|||
|
9e59a6c315
|
|||
|
9449435c65
|
|||
|
df8e66d9a7
|
|||
|
96eab2270d
|
|||
|
c0bd7d8da3
|
|||
|
1ffb7afb01
|
|||
|
ffa9d85ba5
|
|||
|
1223b1b20e
|
|||
|
deb56664e2
|
|||
|
1641d18993
|
|||
|
eab5d236db
|
|||
|
f3e7188816
|
|||
|
39957c2ebf
|
|||
|
4528d44fc7
|
|||
|
7b19db5806
|
|||
|
14d4417aec
|
|||
|
bdda37732c
|
|||
|
0024611179
|
|||
|
699ba0554e
|
|||
|
c62d685fa4
|
|||
|
6935575654
|
|||
|
80043b46b3
|
|||
|
c68654dccc
|
|||
|
72c6d16739
|
|||
|
366d35ec28
|
|||
|
c36cec44c4
|
|||
|
c91a283520
|
|||
|
bb0693f455
|
|||
|
0d7943be89
|
|||
|
978d9b88cd
|
|||
|
bbfb9b7300
|
|||
|
5b06906673
|
|||
|
f5c3da9bc3
|
|||
|
c608e1075b
|
|||
|
5237fb1a1f
|
|||
|
6901950059
|
|||
|
251fc17933
|
|||
|
fdb9e18b03
|
|||
|
67552edf04
|
|||
|
f25b760d84
|
|||
|
bfa38822e0
|
|||
|
eac5e05e77
|
|||
|
b72f2dd51e
|
|||
|
cc32703be0
|
|||
|
994d26bb09
|
|||
|
ea2d833e66
|
|||
|
af04f89df8
|
|||
|
fab2f104ff
|
|||
|
06940efcec
|
|||
|
0ba36a3f67
|
|||
|
d4bee83992
|
|||
|
aabb536d13
|
|||
|
498073460c
|
|||
|
11d378bfc3
|
|||
|
9b7e8d28de
|
|||
|
c16ee76638
|
|||
|
132fdc9f36
|
|||
|
4f1d48c247
|
|||
|
651791aec1
|
|||
|
53d649c64e
|
|||
|
4dafab3fd6
|
60
.github/workflows/test-and-release.yml
vendored
Normal file
60
.github/workflows/test-and-release.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
name: Test and Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*.*.*' # Triggers on tags like v1.2.3
|
||||||
|
pull_request:
|
||||||
|
branches: [ main ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.22
|
||||||
|
- name: Cache Go modules
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cache/go-build
|
||||||
|
~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
- name: Install dependencies
|
||||||
|
run: go mod download
|
||||||
|
- name: Run tests
|
||||||
|
run: go test -v ./...
|
||||||
|
|
||||||
|
release:
|
||||||
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: test
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version: 1.22
|
||||||
|
- name: Build binaries
|
||||||
|
run: |
|
||||||
|
mkdir -p dist
|
||||||
|
GOOS=linux GOARCH=amd64 go build -o dist/app-linux-amd64
|
||||||
|
GOOS=darwin GOARCH=amd64 go build -o dist/app-darwin-amd64
|
||||||
|
GOOS=windows GOARCH=amd64 go build -o dist/app-windows-amd64.exe
|
||||||
|
- name: Create Release
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
with:
|
||||||
|
tag_name: ${{ github.ref_name }}
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Upload Release Assets
|
||||||
|
uses: softprops/action-gh-release@v2
|
||||||
|
with:
|
||||||
|
files: dist/*
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -85,6 +85,7 @@ node_modules/**
|
|||||||
!.name
|
!.name
|
||||||
!.gitignore
|
!.gitignore
|
||||||
!version
|
!version
|
||||||
|
!out.jsonl
|
||||||
# ...even if they are in subdirectories
|
# ...even if they are in subdirectories
|
||||||
!*/
|
!*/
|
||||||
/blocklist.json
|
/blocklist.json
|
||||||
@@ -102,3 +103,6 @@ pkg/database/testrealy
|
|||||||
/.idea/codeStyles/codeStyleConfig.xml
|
/.idea/codeStyles/codeStyleConfig.xml
|
||||||
/.idea/material_theme_project_new.xml
|
/.idea/material_theme_project_new.xml
|
||||||
/.idea/orly.iml
|
/.idea/orly.iml
|
||||||
|
/.idea/go.imports.xml
|
||||||
|
/.idea/inspectionProfiles/Project_Default.xml
|
||||||
|
/.idea/.name
|
||||||
|
|||||||
173
cmd/benchmark/BENCHMARK_RESULTS.md
Normal file
173
cmd/benchmark/BENCHMARK_RESULTS.md
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
# Orly Relay Benchmark Results
|
||||||
|
|
||||||
|
## Test Environment
|
||||||
|
|
||||||
|
- **Date**: August 5, 2025
|
||||||
|
- **Relay**: Orly v0.4.14
|
||||||
|
- **Port**: 3334 (WebSocket)
|
||||||
|
- **System**: Linux 5.15.0-151-generic
|
||||||
|
- **Storage**: BadgerDB v4
|
||||||
|
|
||||||
|
## Benchmark Test Results
|
||||||
|
|
||||||
|
### Test 1: Basic Performance (1,000 events, 1KB each)
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- Events: 1,000
|
||||||
|
- Event size: 1,024 bytes
|
||||||
|
- Concurrent publishers: 5
|
||||||
|
- Queries: 50
|
||||||
|
|
||||||
|
**Results:**
|
||||||
|
```
|
||||||
|
Publish Performance:
|
||||||
|
Events Published: 1,000
|
||||||
|
Total Data: 4.01 MB
|
||||||
|
Duration: 1.769s
|
||||||
|
Rate: 565.42 events/second
|
||||||
|
Bandwidth: 2.26 MB/second
|
||||||
|
|
||||||
|
Query Performance:
|
||||||
|
Queries Executed: 50
|
||||||
|
Events Returned: 2,000
|
||||||
|
Duration: 3.058s
|
||||||
|
Rate: 16.35 queries/second
|
||||||
|
Avg Events/Query: 40.00
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test 2: Medium Load (10,000 events, 2KB each)
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- Events: 10,000
|
||||||
|
- Event size: 2,048 bytes
|
||||||
|
- Concurrent publishers: 10
|
||||||
|
- Queries: 100
|
||||||
|
|
||||||
|
**Results:**
|
||||||
|
```
|
||||||
|
Publish Performance:
|
||||||
|
Events Published: 10,000
|
||||||
|
Total Data: 76.81 MB
|
||||||
|
Duration: 598.301ms
|
||||||
|
Rate: 16,714.00 events/second
|
||||||
|
Bandwidth: 128.38 MB/second
|
||||||
|
|
||||||
|
Query Performance:
|
||||||
|
Queries Executed: 100
|
||||||
|
Events Returned: 4,000
|
||||||
|
Duration: 8.923s
|
||||||
|
Rate: 11.21 queries/second
|
||||||
|
Avg Events/Query: 40.00
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test 3: High Concurrency (50,000 events, 512 bytes each)
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- Events: 50,000
|
||||||
|
- Event size: 512 bytes
|
||||||
|
- Concurrent publishers: 50
|
||||||
|
- Queries: 200
|
||||||
|
|
||||||
|
**Results:**
|
||||||
|
```
|
||||||
|
Publish Performance:
|
||||||
|
Events Published: 50,000
|
||||||
|
Total Data: 108.63 MB
|
||||||
|
Duration: 2.368s
|
||||||
|
Rate: 21,118.66 events/second
|
||||||
|
Bandwidth: 45.88 MB/second
|
||||||
|
|
||||||
|
Query Performance:
|
||||||
|
Queries Executed: 200
|
||||||
|
Events Returned: 8,000
|
||||||
|
Duration: 36.146s
|
||||||
|
Rate: 5.53 queries/second
|
||||||
|
Avg Events/Query: 40.00
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test 4: Large Events (5,000 events, 10KB each)
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- Events: 5,000
|
||||||
|
- Event size: 10,240 bytes
|
||||||
|
- Concurrent publishers: 10
|
||||||
|
- Queries: 50
|
||||||
|
|
||||||
|
**Results:**
|
||||||
|
```
|
||||||
|
Publish Performance:
|
||||||
|
Events Published: 5,000
|
||||||
|
Total Data: 185.26 MB
|
||||||
|
Duration: 934.328ms
|
||||||
|
Rate: 5,351.44 events/second
|
||||||
|
Bandwidth: 198.28 MB/second
|
||||||
|
|
||||||
|
Query Performance:
|
||||||
|
Queries Executed: 50
|
||||||
|
Events Returned: 2,000
|
||||||
|
Duration: 9.982s
|
||||||
|
Rate: 5.01 queries/second
|
||||||
|
Avg Events/Query: 40.00
|
||||||
|
```
|
||||||
|
|
||||||
|
### Test 5: Query-Only Performance (500 queries)
|
||||||
|
|
||||||
|
**Parameters:**
|
||||||
|
- Skip publishing phase
|
||||||
|
- Queries: 500
|
||||||
|
- Query limit: 100
|
||||||
|
|
||||||
|
**Results:**
|
||||||
|
```
|
||||||
|
Query Performance:
|
||||||
|
Queries Executed: 500
|
||||||
|
Events Returned: 20,000
|
||||||
|
Duration: 1m14.384s
|
||||||
|
Rate: 6.72 queries/second
|
||||||
|
Avg Events/Query: 40.00
|
||||||
|
```
|
||||||
|
|
||||||
|
## Performance Summary
|
||||||
|
|
||||||
|
### Publishing Performance
|
||||||
|
|
||||||
|
| Metric | Best Result | Test Configuration |
|
||||||
|
|--------|-------------|-------------------|
|
||||||
|
| **Peak Event Rate** | 21,118.66 events/sec | 50 concurrent publishers, 512-byte events |
|
||||||
|
| **Peak Bandwidth** | 198.28 MB/sec | 10 concurrent publishers, 10KB events |
|
||||||
|
| **Optimal Balance** | 16,714.00 events/sec @ 128.38 MB/sec | 10 concurrent publishers, 2KB events |
|
||||||
|
|
||||||
|
### Query Performance
|
||||||
|
|
||||||
|
| Query Type | Avg Rate | Notes |
|
||||||
|
|------------|----------|--------|
|
||||||
|
| **Light Load** | 16.35 queries/sec | 50 queries after 1K events |
|
||||||
|
| **Medium Load** | 11.21 queries/sec | 100 queries after 10K events |
|
||||||
|
| **Heavy Load** | 5.53 queries/sec | 200 queries after 50K events |
|
||||||
|
| **Sustained** | 6.72 queries/sec | 500 continuous queries |
|
||||||
|
|
||||||
|
## Key Findings
|
||||||
|
|
||||||
|
1. **Optimal Concurrency**: The relay performs best with 10-50 concurrent publishers, achieving rates of 16,000-21,000 events/second.
|
||||||
|
|
||||||
|
2. **Event Size Impact**:
|
||||||
|
- Smaller events (512B-2KB) achieve higher event rates
|
||||||
|
- Larger events (10KB) achieve higher bandwidth utilization but lower event rates
|
||||||
|
|
||||||
|
3. **Query Performance**: Query performance varies with database size:
|
||||||
|
- Fresh database: ~16 queries/second
|
||||||
|
- After 50K events: ~6 queries/second
|
||||||
|
|
||||||
|
4. **Scalability**: The relay maintains consistent performance up to 50 concurrent connections and can sustain 21,000+ events/second under optimal conditions.
|
||||||
|
|
||||||
|
## Query Filter Distribution
|
||||||
|
|
||||||
|
The benchmark tested 5 different query patterns in rotation:
|
||||||
|
1. Query by kind (20%)
|
||||||
|
2. Query by time range (20%)
|
||||||
|
3. Query by tag (20%)
|
||||||
|
4. Query by author (20%)
|
||||||
|
5. Complex queries with multiple conditions (20%)
|
||||||
|
|
||||||
|
All query types showed similar performance characteristics, indicating well-balanced indexing.
|
||||||
|
|
||||||
112
cmd/benchmark/README.md
Normal file
112
cmd/benchmark/README.md
Normal file
@@ -0,0 +1,112 @@
|
|||||||
|
# Orly Relay Benchmark Tool
|
||||||
|
|
||||||
|
A performance benchmarking tool for Nostr relays that tests both event ingestion speed and query performance.
|
||||||
|
|
||||||
|
## Quick Start (Simple Version)
|
||||||
|
|
||||||
|
The repository includes a simple standalone benchmark tool that doesn't require the full Orly dependencies:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build the simple benchmark
|
||||||
|
go build -o benchmark-simple ./benchmark_simple.go
|
||||||
|
|
||||||
|
# Run with default settings
|
||||||
|
./benchmark-simple
|
||||||
|
|
||||||
|
# Or use the convenience script
|
||||||
|
chmod +x run_benchmark.sh
|
||||||
|
./run_benchmark.sh --relay ws://localhost:7447 --events 10000
|
||||||
|
```
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Event Publishing Benchmark**: Tests how fast a relay can accept and store events
|
||||||
|
- **Query Performance Benchmark**: Tests various filter types and query speeds
|
||||||
|
- **Concurrent Publishing**: Supports multiple concurrent publishers to stress test the relay
|
||||||
|
- **Detailed Metrics**: Reports events/second, bandwidth usage, and query performance
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build the tool
|
||||||
|
go build -o benchmark ./cmd/benchmark
|
||||||
|
|
||||||
|
# Run a full benchmark (publish and query)
|
||||||
|
./benchmark -relay ws://localhost:7447 -events 10000 -queries 100
|
||||||
|
|
||||||
|
# Benchmark only publishing
|
||||||
|
./benchmark -relay ws://localhost:7447 -events 50000 -concurrency 20 -skip-query
|
||||||
|
|
||||||
|
# Benchmark only querying
|
||||||
|
./benchmark -relay ws://localhost:7447 -queries 500 -skip-publish
|
||||||
|
|
||||||
|
# Use custom event sizes
|
||||||
|
./benchmark -relay ws://localhost:7447 -events 10000 -size 2048
|
||||||
|
```
|
||||||
|
|
||||||
|
## Options
|
||||||
|
|
||||||
|
- `-relay`: Relay URL to benchmark (default: ws://localhost:7447)
|
||||||
|
- `-events`: Number of events to publish (default: 10000)
|
||||||
|
- `-size`: Average size of event content in bytes (default: 1024)
|
||||||
|
- `-concurrency`: Number of concurrent publishers (default: 10)
|
||||||
|
- `-queries`: Number of queries to execute (default: 100)
|
||||||
|
- `-query-limit`: Limit for each query (default: 100)
|
||||||
|
- `-skip-publish`: Skip the publishing phase
|
||||||
|
- `-skip-query`: Skip the query phase
|
||||||
|
- `-v`: Enable verbose output
|
||||||
|
|
||||||
|
## Query Types Tested
|
||||||
|
|
||||||
|
The benchmark tests various query patterns:
|
||||||
|
1. Query by kind
|
||||||
|
2. Query by time range (last hour)
|
||||||
|
3. Query by tag (p tags)
|
||||||
|
4. Query by author
|
||||||
|
5. Complex queries with multiple conditions
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
The tool provides detailed metrics including:
|
||||||
|
|
||||||
|
**Publish Performance:**
|
||||||
|
- Total events published
|
||||||
|
- Total data transferred
|
||||||
|
- Publishing rate (events/second)
|
||||||
|
- Bandwidth usage (MB/second)
|
||||||
|
|
||||||
|
**Query Performance:**
|
||||||
|
- Total queries executed
|
||||||
|
- Total events returned
|
||||||
|
- Query rate (queries/second)
|
||||||
|
- Average events per query
|
||||||
|
|
||||||
|
## Example Output
|
||||||
|
|
||||||
|
```
|
||||||
|
Publishing 10000 events to ws://localhost:7447...
|
||||||
|
Published 1000 events...
|
||||||
|
Published 2000 events...
|
||||||
|
...
|
||||||
|
|
||||||
|
Querying events from ws://localhost:7447...
|
||||||
|
Executed 20 queries...
|
||||||
|
Executed 40 queries...
|
||||||
|
...
|
||||||
|
|
||||||
|
=== Benchmark Results ===
|
||||||
|
|
||||||
|
Publish Performance:
|
||||||
|
Events Published: 10000
|
||||||
|
Total Data: 12.34 MB
|
||||||
|
Duration: 5.2s
|
||||||
|
Rate: 1923.08 events/second
|
||||||
|
Bandwidth: 2.37 MB/second
|
||||||
|
|
||||||
|
Query Performance:
|
||||||
|
Queries Executed: 100
|
||||||
|
Events Returned: 4523
|
||||||
|
Duration: 2.1s
|
||||||
|
Rate: 47.62 queries/second
|
||||||
|
Avg Events/Query: 45.23
|
||||||
|
```
|
||||||
304
cmd/benchmark/benchmark_simple.go
Normal file
304
cmd/benchmark/benchmark_simple.go
Normal file
@@ -0,0 +1,304 @@
|
|||||||
|
// +build ignore
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"math/rand"
|
||||||
|
"net/url"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gobwas/ws"
|
||||||
|
"github.com/gobwas/ws/wsutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Simple event structure for benchmarking
|
||||||
|
type Event struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
Pubkey string `json:"pubkey"`
|
||||||
|
CreatedAt int64 `json:"created_at"`
|
||||||
|
Kind int `json:"kind"`
|
||||||
|
Tags [][]string `json:"tags"`
|
||||||
|
Content string `json:"content"`
|
||||||
|
Sig string `json:"sig"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a test event
|
||||||
|
func generateTestEvent(size int) *Event {
|
||||||
|
content := make([]byte, size)
|
||||||
|
rand.Read(content)
|
||||||
|
|
||||||
|
// Generate random pubkey and sig
|
||||||
|
pubkey := make([]byte, 32)
|
||||||
|
sig := make([]byte, 64)
|
||||||
|
rand.Read(pubkey)
|
||||||
|
rand.Read(sig)
|
||||||
|
|
||||||
|
ev := &Event{
|
||||||
|
Pubkey: hex.EncodeToString(pubkey),
|
||||||
|
CreatedAt: time.Now().Unix(),
|
||||||
|
Kind: 1,
|
||||||
|
Tags: [][]string{},
|
||||||
|
Content: string(content),
|
||||||
|
Sig: hex.EncodeToString(sig),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate ID (simplified)
|
||||||
|
serialized, _ := json.Marshal([]interface{}{
|
||||||
|
0,
|
||||||
|
ev.Pubkey,
|
||||||
|
ev.CreatedAt,
|
||||||
|
ev.Kind,
|
||||||
|
ev.Tags,
|
||||||
|
ev.Content,
|
||||||
|
})
|
||||||
|
hash := sha256.Sum256(serialized)
|
||||||
|
ev.ID = hex.EncodeToString(hash[:])
|
||||||
|
|
||||||
|
return ev
|
||||||
|
}
|
||||||
|
|
||||||
|
func publishEvents(relayURL string, count int, size int, concurrency int) (int64, int64, time.Duration, error) {
|
||||||
|
u, err := url.Parse(relayURL)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var publishedEvents atomic.Int64
|
||||||
|
var publishedBytes atomic.Int64
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
eventsPerWorker := count / concurrency
|
||||||
|
extraEvents := count % concurrency
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
for i := 0; i < concurrency; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
eventsToPublish := eventsPerWorker
|
||||||
|
if i < extraEvents {
|
||||||
|
eventsToPublish++
|
||||||
|
}
|
||||||
|
|
||||||
|
go func(workerID int, eventCount int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Connect to relay
|
||||||
|
ctx := context.Background()
|
||||||
|
conn, _, _, err := ws.Dial(ctx, u.String())
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Worker %d: connection error: %v", workerID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
// Publish events
|
||||||
|
for j := 0; j < eventCount; j++ {
|
||||||
|
ev := generateTestEvent(size)
|
||||||
|
|
||||||
|
// Create EVENT message
|
||||||
|
msg, _ := json.Marshal([]interface{}{"EVENT", ev})
|
||||||
|
|
||||||
|
err := wsutil.WriteClientMessage(conn, ws.OpText, msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Worker %d: write error: %v", workerID, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
publishedEvents.Add(1)
|
||||||
|
publishedBytes.Add(int64(len(msg)))
|
||||||
|
|
||||||
|
// Read response (OK or error)
|
||||||
|
_, _, err = wsutil.ReadServerData(conn)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Worker %d: read error: %v", workerID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(i, eventsToPublish)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
duration := time.Since(start)
|
||||||
|
|
||||||
|
return publishedEvents.Load(), publishedBytes.Load(), duration, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func queryEvents(relayURL string, queries int, limit int) (int64, int64, time.Duration, error) {
|
||||||
|
u, err := url.Parse(relayURL)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
conn, _, _, err := ws.Dial(ctx, u.String())
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
var totalQueries int64
|
||||||
|
var totalEvents int64
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
for i := 0; i < queries; i++ {
|
||||||
|
// Generate various filter types
|
||||||
|
var filter map[string]interface{}
|
||||||
|
|
||||||
|
switch i % 5 {
|
||||||
|
case 0:
|
||||||
|
// Query by kind
|
||||||
|
filter = map[string]interface{}{
|
||||||
|
"kinds": []int{1},
|
||||||
|
"limit": limit,
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
// Query by time range
|
||||||
|
now := time.Now().Unix()
|
||||||
|
filter = map[string]interface{}{
|
||||||
|
"since": now - 3600,
|
||||||
|
"until": now,
|
||||||
|
"limit": limit,
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
// Query by tag
|
||||||
|
filter = map[string]interface{}{
|
||||||
|
"#p": []string{hex.EncodeToString(randBytes(32))},
|
||||||
|
"limit": limit,
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
// Query by author
|
||||||
|
filter = map[string]interface{}{
|
||||||
|
"authors": []string{hex.EncodeToString(randBytes(32))},
|
||||||
|
"limit": limit,
|
||||||
|
}
|
||||||
|
case 4:
|
||||||
|
// Complex query
|
||||||
|
now := time.Now().Unix()
|
||||||
|
filter = map[string]interface{}{
|
||||||
|
"kinds": []int{1, 6},
|
||||||
|
"authors": []string{hex.EncodeToString(randBytes(32))},
|
||||||
|
"since": now - 7200,
|
||||||
|
"limit": limit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send REQ
|
||||||
|
subID := fmt.Sprintf("bench-%d", i)
|
||||||
|
msg, _ := json.Marshal([]interface{}{"REQ", subID, filter})
|
||||||
|
|
||||||
|
err := wsutil.WriteClientMessage(conn, ws.OpText, msg)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Query %d: write error: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read events until EOSE
|
||||||
|
eventCount := 0
|
||||||
|
for {
|
||||||
|
data, err := wsutil.ReadServerText(conn)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Query %d: read error: %v", i, err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
var msg []interface{}
|
||||||
|
if err := json.Unmarshal(data, &msg); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msg) < 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
msgType, ok := msg[0].(string)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
switch msgType {
|
||||||
|
case "EVENT":
|
||||||
|
eventCount++
|
||||||
|
case "EOSE":
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
}
|
||||||
|
done:
|
||||||
|
|
||||||
|
// Send CLOSE
|
||||||
|
closeMsg, _ := json.Marshal([]interface{}{"CLOSE", subID})
|
||||||
|
wsutil.WriteClientMessage(conn, ws.OpText, closeMsg)
|
||||||
|
|
||||||
|
totalQueries++
|
||||||
|
totalEvents += int64(eventCount)
|
||||||
|
|
||||||
|
if totalQueries%20 == 0 {
|
||||||
|
fmt.Printf(" Executed %d queries...\n", totalQueries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(start)
|
||||||
|
return totalQueries, totalEvents, duration, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func randBytes(n int) []byte {
|
||||||
|
b := make([]byte, n)
|
||||||
|
rand.Read(b)
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var (
|
||||||
|
relayURL = flag.String("relay", "ws://localhost:7447", "Relay URL to benchmark")
|
||||||
|
eventCount = flag.Int("events", 10000, "Number of events to publish")
|
||||||
|
eventSize = flag.Int("size", 1024, "Average size of event content in bytes")
|
||||||
|
concurrency = flag.Int("concurrency", 10, "Number of concurrent publishers")
|
||||||
|
queryCount = flag.Int("queries", 100, "Number of queries to execute")
|
||||||
|
queryLimit = flag.Int("query-limit", 100, "Limit for each query")
|
||||||
|
skipPublish = flag.Bool("skip-publish", false, "Skip publishing phase")
|
||||||
|
skipQuery = flag.Bool("skip-query", false, "Skip query phase")
|
||||||
|
)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
fmt.Printf("=== Nostr Relay Benchmark ===\n\n")
|
||||||
|
|
||||||
|
// Phase 1: Publish events
|
||||||
|
if !*skipPublish {
|
||||||
|
fmt.Printf("Publishing %d events to %s...\n", *eventCount, *relayURL)
|
||||||
|
published, bytes, duration, err := publishEvents(*relayURL, *eventCount, *eventSize, *concurrency)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Publishing failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nPublish Performance:\n")
|
||||||
|
fmt.Printf(" Events Published: %d\n", published)
|
||||||
|
fmt.Printf(" Total Data: %.2f MB\n", float64(bytes)/1024/1024)
|
||||||
|
fmt.Printf(" Duration: %s\n", duration)
|
||||||
|
fmt.Printf(" Rate: %.2f events/second\n", float64(published)/duration.Seconds())
|
||||||
|
fmt.Printf(" Bandwidth: %.2f MB/second\n", float64(bytes)/duration.Seconds()/1024/1024)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2: Query events
|
||||||
|
if !*skipQuery {
|
||||||
|
fmt.Printf("\nQuerying events from %s...\n", *relayURL)
|
||||||
|
queries, events, duration, err := queryEvents(*relayURL, *queryCount, *queryLimit)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Querying failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("\nQuery Performance:\n")
|
||||||
|
fmt.Printf(" Queries Executed: %d\n", queries)
|
||||||
|
fmt.Printf(" Events Returned: %d\n", events)
|
||||||
|
fmt.Printf(" Duration: %s\n", duration)
|
||||||
|
fmt.Printf(" Rate: %.2f queries/second\n", float64(queries)/duration.Seconds())
|
||||||
|
fmt.Printf(" Avg Events/Query: %.2f\n", float64(events)/float64(queries))
|
||||||
|
}
|
||||||
|
}
|
||||||
320
cmd/benchmark/main.go
Normal file
320
cmd/benchmark/main.go
Normal file
@@ -0,0 +1,320 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"lukechampine.com/frand"
|
||||||
|
"orly.dev/pkg/encoders/event"
|
||||||
|
"orly.dev/pkg/encoders/filter"
|
||||||
|
"orly.dev/pkg/encoders/kind"
|
||||||
|
"orly.dev/pkg/encoders/kinds"
|
||||||
|
"orly.dev/pkg/encoders/tag"
|
||||||
|
"orly.dev/pkg/encoders/tags"
|
||||||
|
"orly.dev/pkg/encoders/text"
|
||||||
|
"orly.dev/pkg/encoders/timestamp"
|
||||||
|
"orly.dev/pkg/protocol/ws"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/context"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"orly.dev/pkg/utils/lol"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type BenchmarkResults struct {
|
||||||
|
EventsPublished int64
|
||||||
|
EventsPublishedBytes int64
|
||||||
|
PublishDuration time.Duration
|
||||||
|
PublishRate float64
|
||||||
|
PublishBandwidth float64
|
||||||
|
|
||||||
|
QueriesExecuted int64
|
||||||
|
QueryDuration time.Duration
|
||||||
|
QueryRate float64
|
||||||
|
EventsReturned int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var (
|
||||||
|
relayURL = flag.String("relay", "ws://localhost:7447", "Relay URL to benchmark")
|
||||||
|
eventCount = flag.Int("events", 10000, "Number of events to publish")
|
||||||
|
eventSize = flag.Int("size", 1024, "Average size of event content in bytes")
|
||||||
|
concurrency = flag.Int("concurrency", 10, "Number of concurrent publishers")
|
||||||
|
queryCount = flag.Int("queries", 100, "Number of queries to execute")
|
||||||
|
queryLimit = flag.Int("query-limit", 100, "Limit for each query")
|
||||||
|
skipPublish = flag.Bool("skip-publish", false, "Skip publishing phase")
|
||||||
|
skipQuery = flag.Bool("skip-query", false, "Skip query phase")
|
||||||
|
verbose = flag.Bool("v", false, "Verbose output")
|
||||||
|
)
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if *verbose {
|
||||||
|
lol.SetLogLevel("trace")
|
||||||
|
}
|
||||||
|
|
||||||
|
c := context.Bg()
|
||||||
|
results := &BenchmarkResults{}
|
||||||
|
|
||||||
|
// Phase 1: Publish events
|
||||||
|
if !*skipPublish {
|
||||||
|
fmt.Printf("Publishing %d events to %s...\n", *eventCount, *relayURL)
|
||||||
|
if err := benchmarkPublish(c, *relayURL, *eventCount, *eventSize, *concurrency, results); chk.E(err) {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error during publish benchmark: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Phase 2: Query events
|
||||||
|
if !*skipQuery {
|
||||||
|
fmt.Printf("\nQuerying events from %s...\n", *relayURL)
|
||||||
|
if err := benchmarkQuery(c, *relayURL, *queryCount, *queryLimit, results); chk.E(err) {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error during query benchmark: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print results
|
||||||
|
printResults(results)
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkPublish(c context.T, relayURL string, eventCount, eventSize, concurrency int, results *BenchmarkResults) error {
|
||||||
|
// Generate signers for each concurrent publisher
|
||||||
|
signers := make([]*testSigner, concurrency)
|
||||||
|
for i := range signers {
|
||||||
|
signers[i] = newTestSigner()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Track published events
|
||||||
|
var publishedEvents atomic.Int64
|
||||||
|
var publishedBytes atomic.Int64
|
||||||
|
var errors atomic.Int64
|
||||||
|
|
||||||
|
// Create wait group for concurrent publishers
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
eventsPerPublisher := eventCount / concurrency
|
||||||
|
extraEvents := eventCount % concurrency
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
for i := 0; i < concurrency; i++ {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(publisherID int) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Connect to relay
|
||||||
|
relay, err := ws.RelayConnect(c, relayURL)
|
||||||
|
if err != nil {
|
||||||
|
log.E.F("Publisher %d failed to connect: %v", publisherID, err)
|
||||||
|
errors.Add(1)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer relay.Close()
|
||||||
|
|
||||||
|
// Calculate events for this publisher
|
||||||
|
eventsToPublish := eventsPerPublisher
|
||||||
|
if publisherID < extraEvents {
|
||||||
|
eventsToPublish++
|
||||||
|
}
|
||||||
|
|
||||||
|
signer := signers[publisherID]
|
||||||
|
|
||||||
|
// Publish events
|
||||||
|
for j := 0; j < eventsToPublish; j++ {
|
||||||
|
ev := generateEvent(signer, eventSize)
|
||||||
|
|
||||||
|
if err := relay.Publish(c, ev); err != nil {
|
||||||
|
log.E.F("Publisher %d failed to publish event: %v", publisherID, err)
|
||||||
|
errors.Add(1)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
evBytes := ev.Marshal(nil)
|
||||||
|
publishedEvents.Add(1)
|
||||||
|
publishedBytes.Add(int64(len(evBytes)))
|
||||||
|
|
||||||
|
if publishedEvents.Load()%1000 == 0 {
|
||||||
|
fmt.Printf(" Published %d events...\n", publishedEvents.Load())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
|
||||||
|
results.EventsPublished = publishedEvents.Load()
|
||||||
|
results.EventsPublishedBytes = publishedBytes.Load()
|
||||||
|
results.PublishDuration = duration
|
||||||
|
results.PublishRate = float64(results.EventsPublished) / duration.Seconds()
|
||||||
|
results.PublishBandwidth = float64(results.EventsPublishedBytes) / duration.Seconds() / 1024 / 1024 // MB/s
|
||||||
|
|
||||||
|
if errors.Load() > 0 {
|
||||||
|
fmt.Printf(" Warning: %d errors occurred during publishing\n", errors.Load())
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkQuery(c context.T, relayURL string, queryCount, queryLimit int, results *BenchmarkResults) error {
|
||||||
|
relay, err := ws.RelayConnect(c, relayURL)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to connect to relay: %w", err)
|
||||||
|
}
|
||||||
|
defer relay.Close()
|
||||||
|
|
||||||
|
var totalEvents atomic.Int64
|
||||||
|
var totalQueries atomic.Int64
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
for i := 0; i < queryCount; i++ {
|
||||||
|
// Generate various filter types
|
||||||
|
var f *filter.F
|
||||||
|
switch i % 5 {
|
||||||
|
case 0:
|
||||||
|
// Query by kind
|
||||||
|
limit := uint(queryLimit)
|
||||||
|
f = &filter.F{
|
||||||
|
Kinds: kinds.New(kind.TextNote),
|
||||||
|
Limit: &limit,
|
||||||
|
}
|
||||||
|
case 1:
|
||||||
|
// Query by time range
|
||||||
|
now := timestamp.Now()
|
||||||
|
since := timestamp.New(now.I64() - 3600) // last hour
|
||||||
|
limit := uint(queryLimit)
|
||||||
|
f = &filter.F{
|
||||||
|
Since: since,
|
||||||
|
Until: now,
|
||||||
|
Limit: &limit,
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
// Query by tag
|
||||||
|
limit := uint(queryLimit)
|
||||||
|
f = &filter.F{
|
||||||
|
Tags: tags.New(tag.New([]byte("p"), generateRandomPubkey())),
|
||||||
|
Limit: &limit,
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
// Query by author
|
||||||
|
limit := uint(queryLimit)
|
||||||
|
f = &filter.F{
|
||||||
|
Authors: tag.New(generateRandomPubkey()),
|
||||||
|
Limit: &limit,
|
||||||
|
}
|
||||||
|
case 4:
|
||||||
|
// Complex query with multiple conditions
|
||||||
|
now := timestamp.Now()
|
||||||
|
since := timestamp.New(now.I64() - 7200)
|
||||||
|
limit := uint(queryLimit)
|
||||||
|
f = &filter.F{
|
||||||
|
Kinds: kinds.New(kind.TextNote, kind.Repost),
|
||||||
|
Authors: tag.New(generateRandomPubkey()),
|
||||||
|
Since: since,
|
||||||
|
Limit: &limit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute query
|
||||||
|
events, err := relay.QuerySync(c, f, ws.WithLabel("benchmark"))
|
||||||
|
if err != nil {
|
||||||
|
log.E.F("Query %d failed: %v", i, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
totalEvents.Add(int64(len(events)))
|
||||||
|
totalQueries.Add(1)
|
||||||
|
|
||||||
|
if totalQueries.Load()%20 == 0 {
|
||||||
|
fmt.Printf(" Executed %d queries...\n", totalQueries.Load())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
|
||||||
|
results.QueriesExecuted = totalQueries.Load()
|
||||||
|
results.QueryDuration = duration
|
||||||
|
results.QueryRate = float64(results.QueriesExecuted) / duration.Seconds()
|
||||||
|
results.EventsReturned = totalEvents.Load()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateEvent(signer *testSigner, contentSize int) *event.E {
|
||||||
|
// Generate content with some variation
|
||||||
|
size := contentSize + frand.Intn(contentSize/2) - contentSize/4
|
||||||
|
if size < 10 {
|
||||||
|
size = 10
|
||||||
|
}
|
||||||
|
|
||||||
|
content := text.NostrEscape(nil, frand.Bytes(size))
|
||||||
|
|
||||||
|
ev := &event.E{
|
||||||
|
Pubkey: signer.Pub(),
|
||||||
|
Kind: kind.TextNote,
|
||||||
|
CreatedAt: timestamp.Now(),
|
||||||
|
Content: content,
|
||||||
|
Tags: generateRandomTags(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := ev.Sign(signer); chk.E(err) {
|
||||||
|
panic(fmt.Sprintf("failed to sign event: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return ev
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateRandomTags() *tags.T {
|
||||||
|
t := tags.New()
|
||||||
|
|
||||||
|
// Add some random tags
|
||||||
|
numTags := frand.Intn(5)
|
||||||
|
for i := 0; i < numTags; i++ {
|
||||||
|
switch frand.Intn(3) {
|
||||||
|
case 0:
|
||||||
|
// p tag
|
||||||
|
t.AppendUnique(tag.New([]byte("p"), generateRandomPubkey()))
|
||||||
|
case 1:
|
||||||
|
// e tag
|
||||||
|
t.AppendUnique(tag.New([]byte("e"), generateRandomEventID()))
|
||||||
|
case 2:
|
||||||
|
// t tag
|
||||||
|
t.AppendUnique(tag.New([]byte("t"), []byte(fmt.Sprintf("topic%d", frand.Intn(100)))))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateRandomPubkey() []byte {
|
||||||
|
return frand.Bytes(32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateRandomEventID() []byte {
|
||||||
|
return frand.Bytes(32)
|
||||||
|
}
|
||||||
|
|
||||||
|
func printResults(results *BenchmarkResults) {
|
||||||
|
fmt.Println("\n=== Benchmark Results ===")
|
||||||
|
|
||||||
|
if results.EventsPublished > 0 {
|
||||||
|
fmt.Println("\nPublish Performance:")
|
||||||
|
fmt.Printf(" Events Published: %d\n", results.EventsPublished)
|
||||||
|
fmt.Printf(" Total Data: %.2f MB\n", float64(results.EventsPublishedBytes)/1024/1024)
|
||||||
|
fmt.Printf(" Duration: %s\n", results.PublishDuration)
|
||||||
|
fmt.Printf(" Rate: %.2f events/second\n", results.PublishRate)
|
||||||
|
fmt.Printf(" Bandwidth: %.2f MB/second\n", results.PublishBandwidth)
|
||||||
|
}
|
||||||
|
|
||||||
|
if results.QueriesExecuted > 0 {
|
||||||
|
fmt.Println("\nQuery Performance:")
|
||||||
|
fmt.Printf(" Queries Executed: %d\n", results.QueriesExecuted)
|
||||||
|
fmt.Printf(" Events Returned: %d\n", results.EventsReturned)
|
||||||
|
fmt.Printf(" Duration: %s\n", results.QueryDuration)
|
||||||
|
fmt.Printf(" Rate: %.2f queries/second\n", results.QueryRate)
|
||||||
|
avgEventsPerQuery := float64(results.EventsReturned) / float64(results.QueriesExecuted)
|
||||||
|
fmt.Printf(" Avg Events/Query: %.2f\n", avgEventsPerQuery)
|
||||||
|
}
|
||||||
|
}
|
||||||
82
cmd/benchmark/run_benchmark.sh
Executable file
82
cmd/benchmark/run_benchmark.sh
Executable file
@@ -0,0 +1,82 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Simple Nostr Relay Benchmark Script
|
||||||
|
|
||||||
|
# Default values
|
||||||
|
RELAY_URL="ws://localhost:7447"
|
||||||
|
EVENTS=10000
|
||||||
|
SIZE=1024
|
||||||
|
CONCURRENCY=10
|
||||||
|
QUERIES=100
|
||||||
|
QUERY_LIMIT=100
|
||||||
|
|
||||||
|
# Parse command line arguments
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
--relay)
|
||||||
|
RELAY_URL="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--events)
|
||||||
|
EVENTS="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--size)
|
||||||
|
SIZE="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--concurrency)
|
||||||
|
CONCURRENCY="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--queries)
|
||||||
|
QUERIES="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--query-limit)
|
||||||
|
QUERY_LIMIT="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
--skip-publish)
|
||||||
|
SKIP_PUBLISH="-skip-publish"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
--skip-query)
|
||||||
|
SKIP_QUERY="-skip-query"
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown option: $1"
|
||||||
|
echo "Usage: $0 [--relay URL] [--events N] [--size N] [--concurrency N] [--queries N] [--query-limit N] [--skip-publish] [--skip-query]"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
# Build the benchmark tool if it doesn't exist
|
||||||
|
if [ ! -f benchmark-simple ]; then
|
||||||
|
echo "Building benchmark tool..."
|
||||||
|
go build -o benchmark-simple ./benchmark_simple.go
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Failed to build benchmark tool"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Run the benchmark
|
||||||
|
echo "Running Nostr relay benchmark..."
|
||||||
|
echo "Relay: $RELAY_URL"
|
||||||
|
echo "Events: $EVENTS (size: $SIZE bytes)"
|
||||||
|
echo "Concurrency: $CONCURRENCY"
|
||||||
|
echo "Queries: $QUERIES (limit: $QUERY_LIMIT)"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
./benchmark-simple \
|
||||||
|
-relay "$RELAY_URL" \
|
||||||
|
-events $EVENTS \
|
||||||
|
-size $SIZE \
|
||||||
|
-concurrency $CONCURRENCY \
|
||||||
|
-queries $QUERIES \
|
||||||
|
-query-limit $QUERY_LIMIT \
|
||||||
|
$SKIP_PUBLISH \
|
||||||
|
$SKIP_QUERY
|
||||||
63
cmd/benchmark/test_signer.go
Normal file
63
cmd/benchmark/test_signer.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"lukechampine.com/frand"
|
||||||
|
"orly.dev/pkg/interfaces/signer"
|
||||||
|
)
|
||||||
|
|
||||||
|
// testSigner is a simple signer implementation for benchmarking
|
||||||
|
type testSigner struct {
|
||||||
|
pub []byte
|
||||||
|
sec []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTestSigner() *testSigner {
|
||||||
|
return &testSigner{
|
||||||
|
pub: frand.Bytes(32),
|
||||||
|
sec: frand.Bytes(32),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testSigner) Pub() []byte {
|
||||||
|
return s.pub
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testSigner) Sec() []byte {
|
||||||
|
return s.sec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testSigner) Sign(msg []byte) ([]byte, error) {
|
||||||
|
return frand.Bytes(64), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testSigner) Verify(msg, sig []byte) (bool, error) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testSigner) InitSec(sec []byte) error {
|
||||||
|
s.sec = sec
|
||||||
|
s.pub = frand.Bytes(32)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testSigner) InitPub(pub []byte) error {
|
||||||
|
s.pub = pub
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testSigner) Zero() {
|
||||||
|
for i := range s.sec {
|
||||||
|
s.sec[i] = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
func (s *testSigner) ECDH(pubkey []byte) ([]byte, error) {
|
||||||
|
return frand.Bytes(32), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *testSigner) Generate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ signer.I = (*testSigner)(nil)
|
||||||
@@ -56,12 +56,12 @@ as:
|
|||||||
extensions and become active in place of the LetsEncrypt certificates
|
extensions and become active in place of the LetsEncrypt certificates
|
||||||
|
|
||||||
> Note that the match is greedy, so you can explicitly separately give a subdomain
|
> Note that the match is greedy, so you can explicitly separately give a subdomain
|
||||||
certificate and it will be selected even if there is a wildcard that also matches.
|
certificate, and it will be selected even if there is a wildcard that also matches.
|
||||||
|
|
||||||
# IMPORTANT
|
# IMPORTANT
|
||||||
|
|
||||||
With Comodo SSL (sectigo RSA) certificates you also need to append the intermediate certificate
|
With Comodo SSL (sectigo RSA) certificates you also need to append the intermediate certificate
|
||||||
to the `.crt` file in order to get it to work properly with openssl library based tools like
|
to the `.crt` file to get it to work properly with openssl library based tools like
|
||||||
wget, curl and the go tool, which is quite important if you want to do subdomains on a wildcard
|
wget, curl and the go tool, which is quite important if you want to do subdomains on a wildcard
|
||||||
certificate.
|
certificate.
|
||||||
|
|
||||||
|
|||||||
104
cmd/lerproxy/app/app.go
Normal file
104
cmd/lerproxy/app/app.go
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/context"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type RunArgs struct {
|
||||||
|
Addr string `arg:"-l,--listen" default:":https" help:"address to listen at"`
|
||||||
|
Conf string `arg:"-m,--map" default:"mapping.txt" help:"file with host/backend mapping"`
|
||||||
|
Cache string `arg:"-c,--cachedir" default:"/var/cache/letsencrypt" help:"path to directory to cache key and certificates"`
|
||||||
|
HSTS bool `arg:"-h,--hsts" help:"add Strict-Transport-Security header"`
|
||||||
|
Email string `arg:"-e,--email" help:"contact email address presented to letsencrypt CA"`
|
||||||
|
HTTP string `arg:"--http" default:":http" help:"optional address to serve http-to-https redirects and ACME http-01 challenge responses"`
|
||||||
|
RTO time.Duration `arg:"-r,--rto" default:"1m" help:"maximum duration before timing out read of the request"`
|
||||||
|
WTO time.Duration `arg:"-w,--wto" default:"5m" help:"maximum duration before timing out write of the response"`
|
||||||
|
Idle time.Duration `arg:"-i,--idle" help:"how long idle connection is kept before closing (set rto, wto to 0 to use this)"`
|
||||||
|
Certs []string `arg:"--cert,separate" help:"certificates and the domain they match: eg: orly.dev:/path/to/cert - this will indicate to load two, one with extension .key and one with .crt, each expected to be PEM encoded TLS private and public keys, respectively"`
|
||||||
|
// Rewrites string `arg:"-r,--rewrites" default:"rewrites.txt"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func Run(c context.T, args RunArgs) (err error) {
|
||||||
|
if args.Cache == "" {
|
||||||
|
err = log.E.Err("no cache specified")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var srv *http.Server
|
||||||
|
var httpHandler http.Handler
|
||||||
|
if srv, httpHandler, err = SetupServer(args); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
srv.ReadHeaderTimeout = 5 * time.Second
|
||||||
|
if args.RTO > 0 {
|
||||||
|
srv.ReadTimeout = args.RTO
|
||||||
|
}
|
||||||
|
if args.WTO > 0 {
|
||||||
|
srv.WriteTimeout = args.WTO
|
||||||
|
}
|
||||||
|
group, ctx := errgroup.WithContext(c)
|
||||||
|
if args.HTTP != "" {
|
||||||
|
httpServer := http.Server{
|
||||||
|
Addr: args.HTTP,
|
||||||
|
Handler: httpHandler,
|
||||||
|
ReadTimeout: 10 * time.Second,
|
||||||
|
WriteTimeout: 10 * time.Second,
|
||||||
|
}
|
||||||
|
group.Go(
|
||||||
|
func() (err error) {
|
||||||
|
chk.E(httpServer.ListenAndServe())
|
||||||
|
return
|
||||||
|
},
|
||||||
|
)
|
||||||
|
group.Go(
|
||||||
|
func() error {
|
||||||
|
<-ctx.Done()
|
||||||
|
ctx, cancel := context.Timeout(
|
||||||
|
context.Bg(),
|
||||||
|
time.Second,
|
||||||
|
)
|
||||||
|
defer cancel()
|
||||||
|
return httpServer.Shutdown(ctx)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if srv.ReadTimeout != 0 || srv.WriteTimeout != 0 || args.Idle == 0 {
|
||||||
|
group.Go(
|
||||||
|
func() (err error) {
|
||||||
|
chk.E(srv.ListenAndServeTLS("", ""))
|
||||||
|
return
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
group.Go(
|
||||||
|
func() (err error) {
|
||||||
|
var ln net.Listener
|
||||||
|
if ln, err = net.Listen("tcp", srv.Addr); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer ln.Close()
|
||||||
|
ln = Listener{
|
||||||
|
Duration: args.Idle,
|
||||||
|
TCPListener: ln.(*net.TCPListener),
|
||||||
|
}
|
||||||
|
err = srv.ServeTLS(ln, "", "")
|
||||||
|
chk.E(err)
|
||||||
|
return
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
group.Go(
|
||||||
|
func() error {
|
||||||
|
<-ctx.Done()
|
||||||
|
ctx, cancel := context.Timeout(context.Bg(), time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return srv.Shutdown(ctx)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return group.Wait()
|
||||||
|
}
|
||||||
@@ -1,6 +1,4 @@
|
|||||||
// Package timeout provides a simple extension of a net.TCPConn with a
|
package app
|
||||||
// configurable read/write deadline.
|
|
||||||
package timeout
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
63
cmd/lerproxy/app/go-vanity.go
Normal file
63
cmd/lerproxy/app/go-vanity.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GoVanity configures an HTTP handler for redirecting requests to vanity URLs
|
||||||
|
// based on the provided hostname and backend address.
|
||||||
|
//
|
||||||
|
// # Parameters
|
||||||
|
//
|
||||||
|
// - hn (string): The hostname associated with the vanity URL.
|
||||||
|
//
|
||||||
|
// - ba (string): The backend address, expected to be in the format
|
||||||
|
// "git+<repository-path>".
|
||||||
|
//
|
||||||
|
// - mux (*http.ServeMux): The HTTP serve multiplexer where the handler will be
|
||||||
|
// registered.
|
||||||
|
//
|
||||||
|
// # Expected behaviour
|
||||||
|
//
|
||||||
|
// - Splits the backend address to extract the repository path from the "git+" prefix.
|
||||||
|
//
|
||||||
|
// - If the split fails, logs an error and returns without registering a handler.
|
||||||
|
//
|
||||||
|
// - Generates an HTML redirect page containing metadata for Go import and
|
||||||
|
// redirects to the extracted repository path.
|
||||||
|
//
|
||||||
|
// - Registers a handler on the provided ServeMux that serves this redirect page
|
||||||
|
// when requests are made to the specified hostname.
|
||||||
|
func GoVanity(hn, ba string, mux *http.ServeMux) {
|
||||||
|
split := strings.Split(ba, "git+")
|
||||||
|
if len(split) != 2 {
|
||||||
|
log.E.Ln("invalid go vanity redirect: %s: %s", hn, ba)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
redirector := fmt.Sprintf(
|
||||||
|
`<html><head><meta name="go-import" content="%s git %s"/><meta http-equiv = "refresh" content = " 3 ; url = %s"/></head><body>redirecting to <a href="%s">%s</a></body></html>`,
|
||||||
|
hn, split[1], split[1], split[1], split[1],
|
||||||
|
)
|
||||||
|
mux.HandleFunc(
|
||||||
|
hn+"/",
|
||||||
|
func(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
writer.Header().Set(
|
||||||
|
"Access-Control-Allow-Methods",
|
||||||
|
"GET,HEAD,PUT,PATCH,POST,DELETE",
|
||||||
|
)
|
||||||
|
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
writer.Header().Set("Content-Type", "text/html")
|
||||||
|
writer.Header().Set(
|
||||||
|
"Content-Length", fmt.Sprint(len(redirector)),
|
||||||
|
)
|
||||||
|
writer.Header().Set(
|
||||||
|
"strict-transport-security",
|
||||||
|
"max-age=0; includeSubDomains",
|
||||||
|
)
|
||||||
|
fmt.Fprint(writer, redirector)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -1,20 +1,17 @@
|
|||||||
// Package tcpkeepalive implements a net.TCPListener with a singleton set period
|
package app
|
||||||
// for a default 3 minute keep-aline.
|
|
||||||
package tcpkeepalive
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net"
|
"net"
|
||||||
"orly.dev/cmd/lerproxy/timeout"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Period can be changed prior to opening a Listener to alter its'
|
// Period can be changed before opening a Listener to alter its
|
||||||
// KeepAlivePeriod.
|
// KeepAlivePeriod.
|
||||||
var Period = 3 * time.Minute
|
var Period = 3 * time.Minute
|
||||||
|
|
||||||
// Listener sets TCP keep-alive timeouts on accepted connections.
|
// Listener sets TCP keep-alive timeouts on accepted connections.
|
||||||
// It's used by ListenAndServe and ListenAndServeTLS so dead TCP connections
|
// It is used by ListenAndServe and ListenAndServeTLS so dead TCP connections
|
||||||
// (e.g. closing laptop mid-download) eventually go away.
|
// (e.g. closing laptop mid-download) eventually go away.
|
||||||
type Listener struct {
|
type Listener struct {
|
||||||
time.Duration
|
time.Duration
|
||||||
@@ -33,7 +30,7 @@ func (ln Listener) Accept() (conn net.Conn, e error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ln.Duration != 0 {
|
if ln.Duration != 0 {
|
||||||
return timeout.Conn{Duration: ln.Duration, TCPConn: tc}, nil
|
return Conn{Duration: ln.Duration, TCPConn: tc}, nil
|
||||||
}
|
}
|
||||||
return tc, nil
|
return tc, nil
|
||||||
}
|
}
|
||||||
80
cmd/lerproxy/app/nostr-dns.go
Normal file
80
cmd/lerproxy/app/nostr-dns.go
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NostrJSON struct {
|
||||||
|
Names map[string]string `json:"names"`
|
||||||
|
Relays map[string][]string `json:"relays"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NostrDNS handles the configuration and registration of a Nostr DNS endpoint
|
||||||
|
// for a given hostname and backend address.
|
||||||
|
//
|
||||||
|
// # Parameters
|
||||||
|
//
|
||||||
|
// - hn (string): The hostname for which the Nostr DNS entry is being configured.
|
||||||
|
//
|
||||||
|
// - ba (string): The path to the JSON file containing the Nostr DNS data.
|
||||||
|
//
|
||||||
|
// - mux (*http.ServeMux): The HTTP serve multiplexer to which the Nostr DNS
|
||||||
|
// handler will be registered.
|
||||||
|
//
|
||||||
|
// # Return Values
|
||||||
|
//
|
||||||
|
// - err (error): An error if any step fails during the configuration or
|
||||||
|
// registration process.
|
||||||
|
//
|
||||||
|
// # Expected behaviour
|
||||||
|
//
|
||||||
|
// - Reads the JSON file specified by `ba` and parses its contents into a
|
||||||
|
// NostrJSON struct.
|
||||||
|
//
|
||||||
|
// - Registers a new HTTP handler on the provided `mux` for the
|
||||||
|
// `.well-known/nostr.json` endpoint under the specified hostname.
|
||||||
|
//
|
||||||
|
// - The handler serves the parsed Nostr DNS data with appropriate HTTP headers
|
||||||
|
// set for CORS and content type.
|
||||||
|
func NostrDNS(hn, ba string, mux *http.ServeMux) (err error) {
|
||||||
|
log.T.Ln(hn, ba)
|
||||||
|
var fb []byte
|
||||||
|
if fb, err = os.ReadFile(ba); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var v NostrJSON
|
||||||
|
if err = json.Unmarshal(fb, &v); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var jb []byte
|
||||||
|
if jb, err = json.Marshal(v); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
nostrJSON := string(jb)
|
||||||
|
mux.HandleFunc(
|
||||||
|
hn+"/.well-known/nostr.json",
|
||||||
|
func(writer http.ResponseWriter, request *http.Request) {
|
||||||
|
log.T.Ln("serving nostr json to", hn)
|
||||||
|
writer.Header().Set(
|
||||||
|
"Access-Control-Allow-Methods",
|
||||||
|
"GET,HEAD,PUT,PATCH,POST,DELETE",
|
||||||
|
)
|
||||||
|
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
writer.Header().Set("Content-Type", "application/json")
|
||||||
|
writer.Header().Set(
|
||||||
|
"Content-Length", fmt.Sprint(len(nostrJSON)),
|
||||||
|
)
|
||||||
|
writer.Header().Set(
|
||||||
|
"strict-transport-security",
|
||||||
|
"max-age=0; includeSubDomains",
|
||||||
|
)
|
||||||
|
fmt.Fprint(writer, nostrJSON)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
15
cmd/lerproxy/app/proxy.go
Normal file
15
cmd/lerproxy/app/proxy.go
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
type Proxy struct {
|
||||||
|
http.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set(
|
||||||
|
"Strict-Transport-Security",
|
||||||
|
"max-age=31536000; includeSubDomains; preload",
|
||||||
|
)
|
||||||
|
p.Handler.ServeHTTP(w, r)
|
||||||
|
}
|
||||||
62
cmd/lerproxy/app/read-mapping.go
Normal file
62
cmd/lerproxy/app/read-mapping.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadMapping reads a mapping file and returns a map of hostnames to backend
|
||||||
|
// addresses.
|
||||||
|
//
|
||||||
|
// # Parameters
|
||||||
|
//
|
||||||
|
// - file (string): The path to the mapping file to read.
|
||||||
|
//
|
||||||
|
// # Return Values
|
||||||
|
//
|
||||||
|
// - m (map[string]string): A map containing the hostname to backend address
|
||||||
|
// mappings parsed from the file.
|
||||||
|
//
|
||||||
|
// - err (error): An error if any step during reading or parsing fails.
|
||||||
|
//
|
||||||
|
// # Expected behaviour
|
||||||
|
//
|
||||||
|
// - Opens the specified file and reads its contents line by line.
|
||||||
|
//
|
||||||
|
// - Skips lines that are empty or start with a '#'.
|
||||||
|
//
|
||||||
|
// - Splits each valid line into two parts using the first colon as the
|
||||||
|
// separator.
|
||||||
|
//
|
||||||
|
// - Trims whitespace from both parts and adds them to the map.
|
||||||
|
//
|
||||||
|
// - Returns any error encountered during file operations or parsing.
|
||||||
|
func ReadMapping(file string) (m map[string]string, err error) {
|
||||||
|
var f *os.File
|
||||||
|
if f, err = os.Open(file); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m = make(map[string]string)
|
||||||
|
sc := bufio.NewScanner(f)
|
||||||
|
for sc.Scan() {
|
||||||
|
if b := sc.Bytes(); len(b) == 0 || b[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s := strings.SplitN(sc.Text(), ":", 2)
|
||||||
|
if len(s) != 2 {
|
||||||
|
err = fmt.Errorf("invalid line: %q", sc.Text())
|
||||||
|
log.E.Ln(err)
|
||||||
|
chk.E(f.Close())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m[strings.TrimSpace(s[0])] = strings.TrimSpace(s[1])
|
||||||
|
}
|
||||||
|
err = sc.Err()
|
||||||
|
chk.E(err)
|
||||||
|
chk.E(f.Close())
|
||||||
|
return
|
||||||
|
}
|
||||||
63
cmd/lerproxy/app/reverse.go
Normal file
63
cmd/lerproxy/app/reverse.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"net/url"
|
||||||
|
"orly.dev/cmd/lerproxy/utils"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewSingleHostReverseProxy is a copy of httputil.NewSingleHostReverseProxy
|
||||||
|
// with the addition of forwarding headers:
|
||||||
|
//
|
||||||
|
// - Legacy X-Forwarded-* headers (X-Forwarded-Proto, X-Forwarded-For,
|
||||||
|
// X-Forwarded-Host)
|
||||||
|
//
|
||||||
|
// - Standardized Forwarded header according to RFC 7239
|
||||||
|
// (https://developer.mozilla.org/en-US/docs/Web/HTTP/Reference/Headers/Forwarded)
|
||||||
|
func NewSingleHostReverseProxy(target *url.URL) (rp *httputil.ReverseProxy) {
|
||||||
|
targetQuery := target.RawQuery
|
||||||
|
director := func(req *http.Request) {
|
||||||
|
log.D.S(req)
|
||||||
|
req.URL.Scheme = target.Scheme
|
||||||
|
req.URL.Host = target.Host
|
||||||
|
req.URL.Path = utils.SingleJoiningSlash(target.Path, req.URL.Path)
|
||||||
|
if targetQuery == "" || req.URL.RawQuery == "" {
|
||||||
|
req.URL.RawQuery = targetQuery + req.URL.RawQuery
|
||||||
|
} else {
|
||||||
|
req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
|
||||||
|
}
|
||||||
|
if _, ok := req.Header["User-Agent"]; !ok {
|
||||||
|
req.Header.Set("User-Agent", "")
|
||||||
|
}
|
||||||
|
// Set X-Forwarded-* headers for backward compatibility
|
||||||
|
req.Header.Set("X-Forwarded-Proto", "https")
|
||||||
|
// Get client IP address
|
||||||
|
clientIP := req.RemoteAddr
|
||||||
|
if fwdFor := req.Header.Get("X-Forwarded-For"); fwdFor != "" {
|
||||||
|
clientIP = fwdFor + ", " + clientIP
|
||||||
|
}
|
||||||
|
req.Header.Set("X-Forwarded-For", clientIP)
|
||||||
|
// Set X-Forwarded-Host if not already set
|
||||||
|
if _, exists := req.Header["X-Forwarded-Host"]; !exists {
|
||||||
|
req.Header.Set("X-Forwarded-Host", req.Host)
|
||||||
|
}
|
||||||
|
// Set standardized Forwarded header according to RFC 7239
|
||||||
|
// Format: Forwarded: by=<identifier>;for=<identifier>;host=<host>;proto=<http|https>
|
||||||
|
forwardedProto := "https"
|
||||||
|
forwardedHost := req.Host
|
||||||
|
forwardedFor := clientIP
|
||||||
|
// Build the Forwarded header value
|
||||||
|
forwardedHeader := "proto=" + forwardedProto
|
||||||
|
if forwardedFor != "" {
|
||||||
|
forwardedHeader += ";for=" + forwardedFor
|
||||||
|
}
|
||||||
|
if forwardedHost != "" {
|
||||||
|
forwardedHeader += ";host=" + forwardedHost
|
||||||
|
}
|
||||||
|
req.Header.Set("Forwarded", forwardedHeader)
|
||||||
|
}
|
||||||
|
rp = &httputil.ReverseProxy{Director: director}
|
||||||
|
return
|
||||||
|
}
|
||||||
124
cmd/lerproxy/app/set-proxy.go
Normal file
124
cmd/lerproxy/app/set-proxy.go
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
log2 "log"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httputil"
|
||||||
|
"net/url"
|
||||||
|
"orly.dev/pkg/utils/context"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetProxy creates an HTTP handler that routes incoming requests to specified
|
||||||
|
// backend addresses based on hostname mappings.
|
||||||
|
//
|
||||||
|
// # Parameters
|
||||||
|
//
|
||||||
|
// - mapping (map[string]string): A map where keys are hostnames and values are
|
||||||
|
// the corresponding backend addresses.
|
||||||
|
//
|
||||||
|
// # Return Values
|
||||||
|
//
|
||||||
|
// - h (http.Handler): The HTTP handler configured with the proxy settings.
|
||||||
|
// - err (error): An error if the mapping is empty or invalid.
|
||||||
|
//
|
||||||
|
// # Expected behaviour
|
||||||
|
//
|
||||||
|
// - Validates that the provided hostname to backend address mapping is not empty.
|
||||||
|
//
|
||||||
|
// - Creates a new ServeMux and configures it to route requests based on the
|
||||||
|
// specified hostnames and backend addresses.
|
||||||
|
//
|
||||||
|
// - Handles special cases such as vanity URLs, Nostr DNS entries, and Unix
|
||||||
|
// socket connections.
|
||||||
|
func SetProxy(mapping map[string]string) (h http.Handler, err error) {
|
||||||
|
if len(mapping) == 0 {
|
||||||
|
return nil, fmt.Errorf("empty mapping")
|
||||||
|
}
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
for hostname, backendAddr := range mapping {
|
||||||
|
hn, ba := hostname, backendAddr
|
||||||
|
if strings.ContainsRune(hn, os.PathSeparator) {
|
||||||
|
err = log.E.Err("invalid hostname: %q", hn)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
network := "tcp"
|
||||||
|
if ba != "" && ba[0] == '@' && runtime.GOOS == "linux" {
|
||||||
|
// append \0 to address so addrlen for connect(2) is calculated in a
|
||||||
|
// way compatible with some other implementations (i.e. uwsgi)
|
||||||
|
network, ba = "unix", ba+string(byte(0))
|
||||||
|
} else if strings.HasPrefix(ba, "git+") {
|
||||||
|
GoVanity(hn, ba, mux)
|
||||||
|
continue
|
||||||
|
} else if filepath.IsAbs(ba) {
|
||||||
|
network = "unix"
|
||||||
|
switch {
|
||||||
|
case strings.HasSuffix(ba, string(os.PathSeparator)):
|
||||||
|
// path specified as directory with explicit trailing slash; add
|
||||||
|
// this path as static site
|
||||||
|
fs := http.FileServer(http.Dir(ba))
|
||||||
|
mux.Handle(hn+"/", fs)
|
||||||
|
continue
|
||||||
|
case strings.HasSuffix(ba, "nostr.json"):
|
||||||
|
if err = NostrDNS(hn, ba, mux); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if u, err := url.Parse(ba); err == nil {
|
||||||
|
switch u.Scheme {
|
||||||
|
case "http", "https":
|
||||||
|
rp := NewSingleHostReverseProxy(u)
|
||||||
|
modifyCORSResponse := func(res *http.Response) error {
|
||||||
|
res.Header.Set(
|
||||||
|
"Access-Control-Allow-Methods",
|
||||||
|
"GET,HEAD,PUT,PATCH,POST,DELETE",
|
||||||
|
)
|
||||||
|
// res.Header.Set("Access-Control-Allow-Credentials", "true")
|
||||||
|
res.Header.Set("Access-Control-Allow-Origin", "*")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
rp.ModifyResponse = modifyCORSResponse
|
||||||
|
rp.ErrorLog = log2.New(
|
||||||
|
os.Stderr, "lerproxy", log2.Llongfile,
|
||||||
|
)
|
||||||
|
rp.BufferPool = Pool{}
|
||||||
|
mux.Handle(hn+"/", rp)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
rp := &httputil.ReverseProxy{
|
||||||
|
Director: func(req *http.Request) {
|
||||||
|
req.URL.Scheme = "http"
|
||||||
|
req.URL.Host = req.Host
|
||||||
|
req.Header.Set("X-Forwarded-Proto", "https")
|
||||||
|
req.Header.Set("X-Forwarded-For", req.RemoteAddr)
|
||||||
|
req.Header.Set(
|
||||||
|
"Access-Control-Allow-Methods",
|
||||||
|
"GET,HEAD,PUT,PATCH,POST,DELETE",
|
||||||
|
)
|
||||||
|
req.Header.Set("Access-Control-Allow-Origin", "*")
|
||||||
|
log.D.Ln(req.URL, req.RemoteAddr)
|
||||||
|
},
|
||||||
|
Transport: &http.Transport{
|
||||||
|
DialContext: func(c context.T, n, addr string) (
|
||||||
|
net.Conn, error,
|
||||||
|
) {
|
||||||
|
return net.DialTimeout(network, ba, 5*time.Second)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ErrorLog: log2.New(io.Discard, "", 0),
|
||||||
|
BufferPool: Pool{},
|
||||||
|
}
|
||||||
|
mux.Handle(hn+"/", rp)
|
||||||
|
}
|
||||||
|
return mux, nil
|
||||||
|
}
|
||||||
81
cmd/lerproxy/app/setup-server.go
Normal file
81
cmd/lerproxy/app/setup-server.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"golang.org/x/crypto/acme/autocert"
|
||||||
|
"net/http"
|
||||||
|
"orly.dev/cmd/lerproxy/utils"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetupServer configures and returns an HTTP server instance with proxy
|
||||||
|
// handling and automatic certificate management based on the provided RunArgs
|
||||||
|
// configuration.
|
||||||
|
//
|
||||||
|
// # Parameters
|
||||||
|
//
|
||||||
|
// - a (RunArgs): The configuration arguments containing settings for the server
|
||||||
|
// address, cache directory, mapping file, HSTS header, email, and certificates.
|
||||||
|
//
|
||||||
|
// # Return Values
|
||||||
|
//
|
||||||
|
// - s (*http.Server): The configured HTTP server instance.
|
||||||
|
//
|
||||||
|
// - h (http.Handler): The HTTP handler used for proxying requests and managing
|
||||||
|
// automatic certificate challenges.
|
||||||
|
//
|
||||||
|
// - err (error): An error if any step during setup fails.
|
||||||
|
//
|
||||||
|
// # Expected behaviour
|
||||||
|
//
|
||||||
|
// - Reads the hostname to backend address mapping from the specified
|
||||||
|
// configuration file.
|
||||||
|
//
|
||||||
|
// - Sets up a proxy handler that routes incoming requests based on the defined
|
||||||
|
// mappings.
|
||||||
|
//
|
||||||
|
// - Enables HSTS header support if enabled in the RunArgs.
|
||||||
|
//
|
||||||
|
// - Creates the cache directory for storing certificates and keys if it does not
|
||||||
|
// already exist.
|
||||||
|
//
|
||||||
|
// - Configures an autocert.Manager to handle automatic certificate management,
|
||||||
|
// including hostname whitelisting, email contact, and cache storage.
|
||||||
|
//
|
||||||
|
// - Initializes the HTTP server with proxy handler, address, and TLS
|
||||||
|
// configuration.
|
||||||
|
func SetupServer(a RunArgs) (s *http.Server, h http.Handler, err error) {
|
||||||
|
var mapping map[string]string
|
||||||
|
if mapping, err = ReadMapping(a.Conf); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var proxy http.Handler
|
||||||
|
if proxy, err = SetProxy(mapping); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if a.HSTS {
|
||||||
|
proxy = &Proxy{Handler: proxy}
|
||||||
|
}
|
||||||
|
if err = os.MkdirAll(a.Cache, 0700); chk.E(err) {
|
||||||
|
err = fmt.Errorf(
|
||||||
|
"cannot create cache directory %q: %v",
|
||||||
|
a.Cache, err,
|
||||||
|
)
|
||||||
|
chk.E(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m := autocert.Manager{
|
||||||
|
Prompt: autocert.AcceptTOS,
|
||||||
|
Cache: autocert.DirCache(a.Cache),
|
||||||
|
HostPolicy: autocert.HostWhitelist(utils.GetKeys(mapping)...),
|
||||||
|
Email: a.Email,
|
||||||
|
}
|
||||||
|
s = &http.Server{
|
||||||
|
Handler: proxy,
|
||||||
|
Addr: a.Addr,
|
||||||
|
TLSConfig: TLSConfig(&m, a.Certs...),
|
||||||
|
}
|
||||||
|
h = m.HTTPHandler(nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
87
cmd/lerproxy/app/tls-config.go
Normal file
87
cmd/lerproxy/app/tls-config.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
package app
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"golang.org/x/crypto/acme/autocert"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TLSConfig creates a custom TLS configuration that combines automatic
|
||||||
|
// certificate management with explicitly provided certificates.
|
||||||
|
//
|
||||||
|
// # Parameters
|
||||||
|
//
|
||||||
|
// - m (*autocert.Manager): The autocert manager used for managing automatic
|
||||||
|
// certificate generation and retrieval.
|
||||||
|
//
|
||||||
|
// - certs (...string): A variadic list of certificate definitions in the format
|
||||||
|
// "domain:/path/to/cert", where each domain maps to a certificate file. The
|
||||||
|
// corresponding key file is expected to be at "/path/to/cert.key".
|
||||||
|
//
|
||||||
|
// # Return Values
|
||||||
|
//
|
||||||
|
// - tc (*tls.Config): A new TLS configuration that prioritises explicitly
|
||||||
|
// provided certificates over automatically generated ones.
|
||||||
|
//
|
||||||
|
// # Expected behaviour
|
||||||
|
//
|
||||||
|
// - Loads all explicitly provided certificates and maps them to their
|
||||||
|
// respective domains.
|
||||||
|
//
|
||||||
|
// - Creates a custom GetCertificate function that checks if the requested
|
||||||
|
// domain matches any of the explicitly provided certificates, returning those
|
||||||
|
// first.
|
||||||
|
//
|
||||||
|
// - Falls back to the autocert manager's GetCertificate method if no explicit
|
||||||
|
// certificate is found for the requested domain.
|
||||||
|
func TLSConfig(m *autocert.Manager, certs ...string) (tc *tls.Config) {
|
||||||
|
certMap := make(map[string]*tls.Certificate)
|
||||||
|
var mx sync.Mutex
|
||||||
|
for _, cert := range certs {
|
||||||
|
split := strings.Split(cert, ":")
|
||||||
|
if len(split) != 2 {
|
||||||
|
log.E.F("invalid certificate parameter format: `%s`", cert)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
var c tls.Certificate
|
||||||
|
if c, err = tls.LoadX509KeyPair(
|
||||||
|
split[1]+".crt", split[1]+".key",
|
||||||
|
); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
certMap[split[0]] = &c
|
||||||
|
}
|
||||||
|
tc = m.TLSConfig()
|
||||||
|
tc.GetCertificate = func(helo *tls.ClientHelloInfo) (
|
||||||
|
cert *tls.Certificate, err error,
|
||||||
|
) {
|
||||||
|
mx.Lock()
|
||||||
|
var own string
|
||||||
|
for i := range certMap {
|
||||||
|
// to also handle explicit subdomain certs, prioritize over a root
|
||||||
|
// wildcard.
|
||||||
|
if helo.ServerName == i {
|
||||||
|
own = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// if it got to us and ends in the same-name dot tld assume the
|
||||||
|
// subdomain was redirected, or it is a wildcard certificate; thus
|
||||||
|
// only the ending needs to match.
|
||||||
|
if strings.HasSuffix(helo.ServerName, i) {
|
||||||
|
own = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if own != "" {
|
||||||
|
defer mx.Unlock()
|
||||||
|
return certMap[own], nil
|
||||||
|
}
|
||||||
|
mx.Unlock()
|
||||||
|
return m.GetCertificate(helo)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
// Package buf implements a simple concurrent safe buffer pool for raw bytes.
|
|
||||||
package buf
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
var bufferPool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
buf := make([]byte, 32*1024)
|
|
||||||
return &buf
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
type Pool struct{}
|
|
||||||
|
|
||||||
func (bp Pool) Get() []byte { return *(bufferPool.Get().(*[]byte)) }
|
|
||||||
func (bp Pool) Put(b []byte) { bufferPool.Put(&b) }
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
// Package hsts implements a HTTP handler that enforces HSTS.
|
|
||||||
package hsts
|
|
||||||
|
|
||||||
import "net/http"
|
|
||||||
|
|
||||||
type Proxy struct {
|
|
||||||
http.Handler
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|
||||||
w.Header().
|
|
||||||
Set("Strict-Transport-Security",
|
|
||||||
"max-age=31536000; includeSubDomains; preload")
|
|
||||||
p.ServeHTTP(w, r)
|
|
||||||
}
|
|
||||||
16
cmd/lerproxy/lerproxy.service
Normal file
16
cmd/lerproxy/lerproxy.service
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# systemd unit to run lerproxy as a service
|
||||||
|
[Unit]
|
||||||
|
Description=lerproxy
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=mleku
|
||||||
|
ExecStart=/home/mleku/.local/bin/lerproxy -m /home/mleku/mapping.txt
|
||||||
|
Restart=always
|
||||||
|
Wants=network-online.target
|
||||||
|
# waits for wireguard service to come up before starting, remove the wg-quick@wg0 section if running it directly on an
|
||||||
|
# internet routeable connection
|
||||||
|
After=network.target network-online.target wg-quick@wg0.service
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
@@ -1,402 +1,23 @@
|
|||||||
// Command lerproxy implements https reverse proxy with automatic LetsEncrypt
|
|
||||||
// usage for multiple hostnames/backends,your own SSL certificates, nostr NIP-05
|
|
||||||
// DNS verification hosting and Go vanity redirects.
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"orly.dev/cmd/lerproxy/app"
|
||||||
"crypto/tls"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
stdLog "log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httputil"
|
|
||||||
"net/url"
|
|
||||||
"orly.dev/cmd/lerproxy/buf"
|
|
||||||
"orly.dev/cmd/lerproxy/hsts"
|
|
||||||
"orly.dev/cmd/lerproxy/reverse"
|
|
||||||
"orly.dev/cmd/lerproxy/tcpkeepalive"
|
|
||||||
"orly.dev/cmd/lerproxy/util"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/context"
|
"orly.dev/pkg/utils/context"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alexflint/go-arg"
|
"github.com/alexflint/go-arg"
|
||||||
"golang.org/x/crypto/acme/autocert"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type runArgs struct {
|
var args app.RunArgs
|
||||||
Addr string `arg:"-l,--listen" default:":https" help:"address to listen at"`
|
|
||||||
Conf string `arg:"-m,--map" default:"mapping.txt" help:"file with host/backend mapping"`
|
|
||||||
Cache string `arg:"-c,--cachedir" default:"/var/cache/letsencrypt" help:"path to directory to cache key and certificates"`
|
|
||||||
HSTS bool `arg:"-h,--hsts" help:"add Strict-Transport-Security header"`
|
|
||||||
Email string `arg:"-e,--email" help:"contact email address presented to letsencrypt CA"`
|
|
||||||
HTTP string `arg:"--http" default:":http" help:"optional address to serve http-to-https redirects and ACME http-01 challenge responses"`
|
|
||||||
RTO time.Duration `arg:"-r,--rto" default:"1m" help:"maximum duration before timing out read of the request"`
|
|
||||||
WTO time.Duration `arg:"-w,--wto" default:"5m" help:"maximum duration before timing out write of the response"`
|
|
||||||
Idle time.Duration `arg:"-i,--idle" help:"how long idle connection is kept before closing (set rto, wto to 0 to use this)"`
|
|
||||||
Certs []string `arg:"--cert,separate" help:"certificates and the domain they match: eg: orly.dev:/path/to/cert - this will indicate to load two, one with extension .key and one with .crt, each expected to be PEM encoded TLS private and public keys, respectively"`
|
|
||||||
// Rewrites string `arg:"-r,--rewrites" default:"rewrites.txt"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var args runArgs
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
arg.MustParse(&args)
|
arg.MustParse(&args)
|
||||||
ctx, cancel := signal.NotifyContext(context.Bg(), os.Interrupt)
|
ctx, cancel := signal.NotifyContext(context.Bg(), os.Interrupt)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := run(ctx, args); chk.T(err) {
|
if err := app.Run(ctx, args); chk.T(err) {
|
||||||
log.F.Ln(err)
|
log.F.Ln(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func run(c context.T, args runArgs) (err error) {
|
|
||||||
|
|
||||||
if args.Cache == "" {
|
|
||||||
err = log.E.Err("no cache specified")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var srv *http.Server
|
|
||||||
var httpHandler http.Handler
|
|
||||||
if srv, httpHandler, err = setupServer(args); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
srv.ReadHeaderTimeout = 5 * time.Second
|
|
||||||
if args.RTO > 0 {
|
|
||||||
srv.ReadTimeout = args.RTO
|
|
||||||
}
|
|
||||||
if args.WTO > 0 {
|
|
||||||
srv.WriteTimeout = args.WTO
|
|
||||||
}
|
|
||||||
group, ctx := errgroup.WithContext(c)
|
|
||||||
if args.HTTP != "" {
|
|
||||||
httpServer := http.Server{
|
|
||||||
Addr: args.HTTP,
|
|
||||||
Handler: httpHandler,
|
|
||||||
ReadTimeout: 10 * time.Second,
|
|
||||||
WriteTimeout: 10 * time.Second,
|
|
||||||
}
|
|
||||||
group.Go(
|
|
||||||
func() (err error) {
|
|
||||||
chk.E(httpServer.ListenAndServe())
|
|
||||||
return
|
|
||||||
},
|
|
||||||
)
|
|
||||||
group.Go(
|
|
||||||
func() error {
|
|
||||||
<-ctx.Done()
|
|
||||||
ctx, cancel := context.Timeout(
|
|
||||||
context.Bg(),
|
|
||||||
time.Second,
|
|
||||||
)
|
|
||||||
defer cancel()
|
|
||||||
return httpServer.Shutdown(ctx)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
if srv.ReadTimeout != 0 || srv.WriteTimeout != 0 || args.Idle == 0 {
|
|
||||||
group.Go(
|
|
||||||
func() (err error) {
|
|
||||||
chk.E(srv.ListenAndServeTLS("", ""))
|
|
||||||
return
|
|
||||||
},
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
group.Go(
|
|
||||||
func() (err error) {
|
|
||||||
var ln net.Listener
|
|
||||||
if ln, err = net.Listen("tcp", srv.Addr); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer ln.Close()
|
|
||||||
ln = tcpkeepalive.Listener{
|
|
||||||
Duration: args.Idle,
|
|
||||||
TCPListener: ln.(*net.TCPListener),
|
|
||||||
}
|
|
||||||
err = srv.ServeTLS(ln, "", "")
|
|
||||||
chk.E(err)
|
|
||||||
return
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
group.Go(
|
|
||||||
func() error {
|
|
||||||
<-ctx.Done()
|
|
||||||
ctx, cancel := context.Timeout(context.Bg(), time.Second)
|
|
||||||
defer cancel()
|
|
||||||
return srv.Shutdown(ctx)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return group.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TLSConfig returns a TLSConfig that works with a LetsEncrypt automatic SSL cert issuer as well
|
|
||||||
// as any provided .pem certificates from providers.
|
|
||||||
//
|
|
||||||
// The certs are provided in the form "example.com:/path/to/cert.pem"
|
|
||||||
func TLSConfig(m *autocert.Manager, certs ...string) (tc *tls.Config) {
|
|
||||||
certMap := make(map[string]*tls.Certificate)
|
|
||||||
var mx sync.Mutex
|
|
||||||
for _, cert := range certs {
|
|
||||||
split := strings.Split(cert, ":")
|
|
||||||
if len(split) != 2 {
|
|
||||||
log.E.F("invalid certificate parameter format: `%s`", cert)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
var c tls.Certificate
|
|
||||||
if c, err = tls.LoadX509KeyPair(
|
|
||||||
split[1]+".crt", split[1]+".key",
|
|
||||||
); chk.E(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
certMap[split[0]] = &c
|
|
||||||
}
|
|
||||||
tc = m.TLSConfig()
|
|
||||||
tc.GetCertificate = func(helo *tls.ClientHelloInfo) (
|
|
||||||
cert *tls.Certificate, err error,
|
|
||||||
) {
|
|
||||||
mx.Lock()
|
|
||||||
var own string
|
|
||||||
for i := range certMap {
|
|
||||||
// to also handle explicit subdomain certs, prioritize over a root wildcard.
|
|
||||||
if helo.ServerName == i {
|
|
||||||
own = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// if it got to us and ends in the same name dot tld assume the subdomain was
|
|
||||||
// redirected or it's a wildcard certificate, thus only the ending needs to match.
|
|
||||||
if strings.HasSuffix(helo.ServerName, i) {
|
|
||||||
own = i
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if own != "" {
|
|
||||||
defer mx.Unlock()
|
|
||||||
return certMap[own], nil
|
|
||||||
}
|
|
||||||
mx.Unlock()
|
|
||||||
return m.GetCertificate(helo)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupServer(a runArgs) (s *http.Server, h http.Handler, err error) {
|
|
||||||
var mapping map[string]string
|
|
||||||
if mapping, err = readMapping(a.Conf); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var proxy http.Handler
|
|
||||||
if proxy, err = setProxy(mapping); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if a.HSTS {
|
|
||||||
proxy = &hsts.Proxy{Handler: proxy}
|
|
||||||
}
|
|
||||||
if err = os.MkdirAll(a.Cache, 0700); chk.E(err) {
|
|
||||||
err = fmt.Errorf(
|
|
||||||
"cannot create cache directory %q: %v",
|
|
||||||
a.Cache, err,
|
|
||||||
)
|
|
||||||
chk.E(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m := autocert.Manager{
|
|
||||||
Prompt: autocert.AcceptTOS,
|
|
||||||
Cache: autocert.DirCache(a.Cache),
|
|
||||||
HostPolicy: autocert.HostWhitelist(util.GetKeys(mapping)...),
|
|
||||||
Email: a.Email,
|
|
||||||
}
|
|
||||||
s = &http.Server{
|
|
||||||
Handler: proxy,
|
|
||||||
Addr: a.Addr,
|
|
||||||
TLSConfig: TLSConfig(&m, a.Certs...),
|
|
||||||
}
|
|
||||||
h = m.HTTPHandler(nil)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
type NostrJSON struct {
|
|
||||||
Names map[string]string `json:"names"`
|
|
||||||
Relays map[string][]string `json:"relays"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func setProxy(mapping map[string]string) (h http.Handler, err error) {
|
|
||||||
if len(mapping) == 0 {
|
|
||||||
return nil, fmt.Errorf("empty mapping")
|
|
||||||
}
|
|
||||||
mux := http.NewServeMux()
|
|
||||||
for hostname, backendAddr := range mapping {
|
|
||||||
hn, ba := hostname, backendAddr
|
|
||||||
if strings.ContainsRune(hn, os.PathSeparator) {
|
|
||||||
err = log.E.Err("invalid hostname: %q", hn)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
network := "tcp"
|
|
||||||
if ba != "" && ba[0] == '@' && runtime.GOOS == "linux" {
|
|
||||||
// append \0 to address so addrlen for connect(2) is calculated in a
|
|
||||||
// way compatible with some other implementations (i.e. uwsgi)
|
|
||||||
network, ba = "unix", ba+string(byte(0))
|
|
||||||
} else if strings.HasPrefix(ba, "git+") {
|
|
||||||
split := strings.Split(ba, "git+")
|
|
||||||
if len(split) != 2 {
|
|
||||||
log.E.Ln("invalid go vanity redirect: %s: %s", hn, ba)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
redirector := fmt.Sprintf(
|
|
||||||
`<html><head><meta name="go-import" content="%s git %s"/><meta http-equiv = "refresh" content = " 3 ; url = %s"/></head><body>redirecting to <a href="%s">%s</a></body></html>`,
|
|
||||||
hn, split[1], split[1], split[1], split[1],
|
|
||||||
)
|
|
||||||
mux.HandleFunc(
|
|
||||||
hn+"/",
|
|
||||||
func(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
writer.Header().Set(
|
|
||||||
"Access-Control-Allow-Methods",
|
|
||||||
"GET,HEAD,PUT,PATCH,POST,DELETE",
|
|
||||||
)
|
|
||||||
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
|
||||||
writer.Header().Set("Content-Type", "text/html")
|
|
||||||
writer.Header().Set(
|
|
||||||
"Content-Length", fmt.Sprint(len(redirector)),
|
|
||||||
)
|
|
||||||
writer.Header().Set(
|
|
||||||
"strict-transport-security",
|
|
||||||
"max-age=0; includeSubDomains",
|
|
||||||
)
|
|
||||||
fmt.Fprint(writer, redirector)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
} else if filepath.IsAbs(ba) {
|
|
||||||
network = "unix"
|
|
||||||
switch {
|
|
||||||
case strings.HasSuffix(ba, string(os.PathSeparator)):
|
|
||||||
// path specified as directory with explicit trailing slash; add
|
|
||||||
// this path as static site
|
|
||||||
fs := http.FileServer(http.Dir(ba))
|
|
||||||
mux.Handle(hn+"/", fs)
|
|
||||||
continue
|
|
||||||
case strings.HasSuffix(ba, "nostr.json"):
|
|
||||||
log.I.Ln(hn, ba)
|
|
||||||
var fb []byte
|
|
||||||
if fb, err = os.ReadFile(ba); chk.E(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var v NostrJSON
|
|
||||||
if err = json.Unmarshal(fb, &v); chk.E(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var jb []byte
|
|
||||||
if jb, err = json.Marshal(v); chk.E(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
nostrJSON := string(jb)
|
|
||||||
mux.HandleFunc(
|
|
||||||
hn+"/.well-known/nostr.json",
|
|
||||||
func(writer http.ResponseWriter, request *http.Request) {
|
|
||||||
log.I.Ln("serving nostr json to", hn)
|
|
||||||
writer.Header().Set(
|
|
||||||
"Access-Control-Allow-Methods",
|
|
||||||
"GET,HEAD,PUT,PATCH,POST,DELETE",
|
|
||||||
)
|
|
||||||
writer.Header().Set("Access-Control-Allow-Origin", "*")
|
|
||||||
writer.Header().Set("Content-Type", "application/json")
|
|
||||||
writer.Header().Set(
|
|
||||||
"Content-Length", fmt.Sprint(len(nostrJSON)),
|
|
||||||
)
|
|
||||||
writer.Header().Set(
|
|
||||||
"strict-transport-security",
|
|
||||||
"max-age=0; includeSubDomains",
|
|
||||||
)
|
|
||||||
fmt.Fprint(writer, nostrJSON)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else if u, err := url.Parse(ba); err == nil {
|
|
||||||
switch u.Scheme {
|
|
||||||
case "http", "https":
|
|
||||||
rp := reverse.NewSingleHostReverseProxy(u)
|
|
||||||
modifyCORSResponse := func(res *http.Response) error {
|
|
||||||
res.Header.Set(
|
|
||||||
"Access-Control-Allow-Methods",
|
|
||||||
"GET,HEAD,PUT,PATCH,POST,DELETE",
|
|
||||||
)
|
|
||||||
// res.Header.Set("Access-Control-Allow-Credentials", "true")
|
|
||||||
res.Header.Set("Access-Control-Allow-Origin", "*")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
rp.ModifyResponse = modifyCORSResponse
|
|
||||||
rp.ErrorLog = stdLog.New(
|
|
||||||
os.Stderr, "lerproxy", stdLog.Llongfile,
|
|
||||||
)
|
|
||||||
rp.BufferPool = buf.Pool{}
|
|
||||||
mux.Handle(hn+"/", rp)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rp := &httputil.ReverseProxy{
|
|
||||||
Director: func(req *http.Request) {
|
|
||||||
req.URL.Scheme = "http"
|
|
||||||
req.URL.Host = req.Host
|
|
||||||
req.Header.Set("X-Forwarded-Proto", "https")
|
|
||||||
req.Header.Set("X-Forwarded-For", req.RemoteAddr)
|
|
||||||
req.Header.Set(
|
|
||||||
"Access-Control-Allow-Methods",
|
|
||||||
"GET,HEAD,PUT,PATCH,POST,DELETE",
|
|
||||||
)
|
|
||||||
// req.Header.Set("Access-Control-Allow-Credentials", "true")
|
|
||||||
req.Header.Set("Access-Control-Allow-Origin", "*")
|
|
||||||
log.D.Ln(req.URL, req.RemoteAddr)
|
|
||||||
},
|
|
||||||
Transport: &http.Transport{
|
|
||||||
DialContext: func(c context.T, n, addr string) (
|
|
||||||
net.Conn, error,
|
|
||||||
) {
|
|
||||||
return net.DialTimeout(network, ba, 5*time.Second)
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ErrorLog: stdLog.New(io.Discard, "", 0),
|
|
||||||
BufferPool: buf.Pool{},
|
|
||||||
}
|
|
||||||
mux.Handle(hn+"/", rp)
|
|
||||||
}
|
|
||||||
return mux, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func readMapping(file string) (m map[string]string, err error) {
|
|
||||||
var f *os.File
|
|
||||||
if f, err = os.Open(file); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m = make(map[string]string)
|
|
||||||
sc := bufio.NewScanner(f)
|
|
||||||
for sc.Scan() {
|
|
||||||
if b := sc.Bytes(); len(b) == 0 || b[0] == '#' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s := strings.SplitN(sc.Text(), ":", 2)
|
|
||||||
if len(s) != 2 {
|
|
||||||
err = fmt.Errorf("invalid line: %q", sc.Text())
|
|
||||||
log.E.Ln(err)
|
|
||||||
chk.E(f.Close())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m[strings.TrimSpace(s[0])] = strings.TrimSpace(s[1])
|
|
||||||
}
|
|
||||||
err = sc.Err()
|
|
||||||
chk.E(err)
|
|
||||||
chk.E(f.Close())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
// Package reverse is a copy of httputil.NewSingleHostReverseProxy with addition
|
|
||||||
// of "X-Forwarded-Proto" header.
|
|
||||||
package reverse
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/http/httputil"
|
|
||||||
"net/url"
|
|
||||||
"orly.dev/cmd/lerproxy/util"
|
|
||||||
"orly.dev/pkg/utils/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewSingleHostReverseProxy is a copy of httputil.NewSingleHostReverseProxy
|
|
||||||
// with addition of "X-Forwarded-Proto" header.
|
|
||||||
func NewSingleHostReverseProxy(target *url.URL) (rp *httputil.ReverseProxy) {
|
|
||||||
targetQuery := target.RawQuery
|
|
||||||
director := func(req *http.Request) {
|
|
||||||
log.D.S(req)
|
|
||||||
req.URL.Scheme = target.Scheme
|
|
||||||
req.URL.Host = target.Host
|
|
||||||
req.URL.Path = util.SingleJoiningSlash(target.Path, req.URL.Path)
|
|
||||||
if targetQuery == "" || req.URL.RawQuery == "" {
|
|
||||||
req.URL.RawQuery = targetQuery + req.URL.RawQuery
|
|
||||||
} else {
|
|
||||||
req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
|
|
||||||
}
|
|
||||||
if _, ok := req.Header["User-Agent"]; !ok {
|
|
||||||
req.Header.Set("User-Agent", "")
|
|
||||||
}
|
|
||||||
req.Header.Set("X-Forwarded-Proto", "https")
|
|
||||||
}
|
|
||||||
rp = &httputil.ReverseProxy{Director: director}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
// Package util provides some helpers for lerproxy, a tool to convert maps of
|
|
||||||
// strings to slices of the same strings, and a helper to avoid putting two / in
|
|
||||||
// a URL.
|
|
||||||
package util
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
func GetKeys(m map[string]string) []string {
|
|
||||||
out := make([]string, 0, len(m))
|
|
||||||
for k := range m {
|
|
||||||
out = append(out, k)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func SingleJoiningSlash(a, b string) string {
|
|
||||||
suffixSlash := strings.HasSuffix(a, "/")
|
|
||||||
prefixSlash := strings.HasPrefix(b, "/")
|
|
||||||
switch {
|
|
||||||
case suffixSlash && prefixSlash:
|
|
||||||
return a + b[1:]
|
|
||||||
case !suffixSlash && !prefixSlash:
|
|
||||||
return a + "/" + b
|
|
||||||
}
|
|
||||||
return a + b
|
|
||||||
}
|
|
||||||
62
cmd/lerproxy/utils/utils.go
Normal file
62
cmd/lerproxy/utils/utils.go
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
package utils
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
// GetKeys returns a slice containing all the keys from the provided map.
|
||||||
|
//
|
||||||
|
// # Parameters
|
||||||
|
//
|
||||||
|
// - m (map[string]string): The input map from which to extract keys.
|
||||||
|
//
|
||||||
|
// # Return Values
|
||||||
|
//
|
||||||
|
// - []string: A slice of strings representing the keys in the map.
|
||||||
|
//
|
||||||
|
// # Expected behaviour
|
||||||
|
//
|
||||||
|
// - Iterates over each key in the map and appends it to a new slice.
|
||||||
|
//
|
||||||
|
// - Returns the slice containing all the keys.
|
||||||
|
func GetKeys(m map[string]string) []string {
|
||||||
|
out := make([]string, 0, len(m))
|
||||||
|
for k := range m {
|
||||||
|
out = append(out, k)
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// SingleJoiningSlash joins two strings with a single slash between them,
|
||||||
|
// ensuring that the resulting path doesn't contain multiple consecutive
|
||||||
|
// slashes.
|
||||||
|
//
|
||||||
|
// # Parameters
|
||||||
|
//
|
||||||
|
// - a (string): The first string to join.
|
||||||
|
//
|
||||||
|
// - b (string): The second string to join.
|
||||||
|
//
|
||||||
|
// # Return Values
|
||||||
|
//
|
||||||
|
// - result (string): The joined string with a single slash between them if
|
||||||
|
// needed.
|
||||||
|
//
|
||||||
|
// # Expected behaviour
|
||||||
|
//
|
||||||
|
// - If both a and b start and end with a slash, the resulting string will have
|
||||||
|
// only one slash between them.
|
||||||
|
//
|
||||||
|
// - If neither a nor b starts or ends with a slash, the strings will be joined
|
||||||
|
// with a single slash in between.
|
||||||
|
//
|
||||||
|
// - Otherwise, the two strings are simply concatenated.
|
||||||
|
func SingleJoiningSlash(a, b string) string {
|
||||||
|
suffixSlash := strings.HasSuffix(a, "/")
|
||||||
|
prefixSlash := strings.HasPrefix(b, "/")
|
||||||
|
switch {
|
||||||
|
case suffixSlash && prefixSlash:
|
||||||
|
return a + b[1:]
|
||||||
|
case !suffixSlash && !prefixSlash:
|
||||||
|
return a + "/" + b
|
||||||
|
}
|
||||||
|
return a + b
|
||||||
|
}
|
||||||
@@ -8,6 +8,8 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
|
|
||||||
"orly.dev/pkg/crypto/p256k"
|
"orly.dev/pkg/crypto/p256k"
|
||||||
"orly.dev/pkg/crypto/sha256"
|
"orly.dev/pkg/crypto/sha256"
|
||||||
"orly.dev/pkg/encoders/bech32encoding"
|
"orly.dev/pkg/encoders/bech32encoding"
|
||||||
@@ -18,7 +20,6 @@ import (
|
|||||||
"orly.dev/pkg/utils/errorf"
|
"orly.dev/pkg/utils/errorf"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
realy_lol "orly.dev/pkg/version"
|
realy_lol "orly.dev/pkg/version"
|
||||||
"os"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const secEnv = "NOSTR_SECRET_KEY"
|
const secEnv = "NOSTR_SECRET_KEY"
|
||||||
@@ -190,6 +191,5 @@ func Post(f string, ur *url.URL, sign signer.I) (err error) {
|
|||||||
if io.Copy(os.Stdout, res.Body); chk.E(err) {
|
if io.Copy(os.Stdout, res.Body); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
fmt.Println()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,13 +7,14 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"orly.dev/pkg/crypto/ec/bech32"
|
"orly.dev/pkg/crypto/ec/bech32"
|
||||||
"orly.dev/pkg/crypto/ec/schnorr"
|
|
||||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
|
"orly.dev/pkg/crypto/p256k"
|
||||||
"orly.dev/pkg/encoders/bech32encoding"
|
"orly.dev/pkg/encoders/bech32encoding"
|
||||||
"orly.dev/pkg/utils/atomic"
|
"orly.dev/pkg/utils/atomic"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/interrupt"
|
"orly.dev/pkg/utils/interrupt"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
|
"orly.dev/pkg/utils/lol"
|
||||||
"orly.dev/pkg/utils/qu"
|
"orly.dev/pkg/utils/qu"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
@@ -33,9 +34,9 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Result struct {
|
type Result struct {
|
||||||
sec *secp256k1.SecretKey
|
sec []byte
|
||||||
npub []byte
|
npub []byte
|
||||||
pub *secp256k1.PublicKey
|
pub []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
var args struct {
|
var args struct {
|
||||||
@@ -45,6 +46,7 @@ var args struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
lol.SetLogLevel("info")
|
||||||
arg.MustParse(&args)
|
arg.MustParse(&args)
|
||||||
if args.String == "" {
|
if args.String == "" {
|
||||||
_, _ = fmt.Fprintln(
|
_, _ = fmt.Fprintln(
|
||||||
@@ -79,7 +81,7 @@ Options:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func Vanity(str string, where int, threads int) (e error) {
|
func Vanity(str string, where int, threads int) (err error) {
|
||||||
|
|
||||||
// check the string has valid bech32 ciphers
|
// check the string has valid bech32 ciphers
|
||||||
for i := range str {
|
for i := range str {
|
||||||
@@ -122,7 +124,7 @@ out:
|
|||||||
wm := workingFor % time.Second
|
wm := workingFor % time.Second
|
||||||
workingFor -= wm
|
workingFor -= wm
|
||||||
fmt.Printf(
|
fmt.Printf(
|
||||||
"working for %v, attempts %d\n",
|
" working for %v, attempts %d",
|
||||||
workingFor, counter.Load(),
|
workingFor, counter.Load(),
|
||||||
)
|
)
|
||||||
case r := <-resC:
|
case r := <-resC:
|
||||||
@@ -142,20 +144,16 @@ out:
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
fmt.Printf(
|
fmt.Printf(
|
||||||
"generated in %d attempts using %d threads, taking %v\n",
|
"\r# generated in %d attempts using %d threads, taking %v ",
|
||||||
counter.Load(), args.Threads, time.Now().Sub(started),
|
counter.Load(), args.Threads, time.Now().Sub(started),
|
||||||
)
|
)
|
||||||
secBytes := res.sec.Serialize()
|
fmt.Printf(
|
||||||
log.D.Ln(
|
"\nHSEC = %s\nHPUB = %s\n",
|
||||||
"generated key pair:\n"+
|
hex.EncodeToString(res.sec),
|
||||||
"\nhex:\n"+
|
hex.EncodeToString(res.pub),
|
||||||
"\tsecret: %s\n"+
|
|
||||||
"\tpublic: %s\n\n",
|
|
||||||
hex.EncodeToString(secBytes),
|
|
||||||
hex.EncodeToString(schnorr.SerializePubKey(res.pub)),
|
|
||||||
)
|
)
|
||||||
nsec, _ := bech32encoding.SecretKeyToNsec(res.sec)
|
nsec, _ := bech32encoding.BinToNsec(res.sec)
|
||||||
fmt.Printf("\nNSEC = %s\nNPUB = %s\n\n", nsec, res.npub)
|
fmt.Printf("NSEC = %s\nNPUB = %s\n", nsec, res.npub)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -185,16 +183,19 @@ out:
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
counter.Inc()
|
counter.Inc()
|
||||||
r.sec, r.pub, e = GenKeyPair()
|
// r.sec, r.pub, e = GenKeyPair()
|
||||||
|
r.sec, r.pub, e = Gen()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
log.E.Ln("error generating key: '%v' worker stopping", e)
|
log.E.Ln("error generating key: '%v' worker stopping", e)
|
||||||
break out
|
break out
|
||||||
}
|
}
|
||||||
r.npub, e = bech32encoding.PublicKeyToNpub(r.pub)
|
// r.npub, e = bech32encoding.PublicKeyToNpub(r.pub)
|
||||||
if e != nil {
|
if r.npub, e = bech32encoding.BinToNpub(r.pub); e != nil {
|
||||||
log.E.Ln("fatal error generating npub: %s\n", e)
|
log.E.Ln("fatal error generating npub: %s\n", e)
|
||||||
break out
|
break out
|
||||||
}
|
}
|
||||||
|
fmt.Printf("\rgenerating key: %s", r.npub)
|
||||||
|
// log.I.F("%s", r.npub)
|
||||||
switch where {
|
switch where {
|
||||||
case PositionBeginning:
|
case PositionBeginning:
|
||||||
if bytes.HasPrefix(r.npub, append(prefix, []byte(str)...)) {
|
if bytes.HasPrefix(r.npub, append(prefix, []byte(str)...)) {
|
||||||
@@ -215,6 +216,11 @@ out:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Gen() (skb, pkb []byte, err error) {
|
||||||
|
skb, pkb, _, _, err = p256k.Generate()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// GenKeyPair creates a fresh new key pair using the entropy source used by
|
// GenKeyPair creates a fresh new key pair using the entropy source used by
|
||||||
// crypto/rand (ie, /dev/random on posix systems).
|
// crypto/rand (ie, /dev/random on posix systems).
|
||||||
func GenKeyPair() (
|
func GenKeyPair() (
|
||||||
|
|||||||
162
cmd/walletcli/README.md
Normal file
162
cmd/walletcli/README.md
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
# NWC Client CLI Tool
|
||||||
|
|
||||||
|
A command-line interface tool for making calls to Nostr Wallet Connect (NWC) services.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This CLI tool allows you to interact with NWC wallet services using the methods defined in the NIP-47 specification. It provides a simple interface for executing wallet operations and displays the JSON response from the wallet service.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> <method> [parameters...]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Connection URL
|
||||||
|
|
||||||
|
The connection URL should be in the Nostr Wallet Connect format:
|
||||||
|
|
||||||
|
```
|
||||||
|
nostr+walletconnect://<wallet_pubkey>?relay=<relay_url>&secret=<secret>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Supported Methods
|
||||||
|
|
||||||
|
The following methods are supported by this CLI tool:
|
||||||
|
|
||||||
|
- `get_info` - Get wallet information
|
||||||
|
- `get_balance` - Get wallet balance
|
||||||
|
- `get_budget` - Get wallet budget
|
||||||
|
- `make_invoice` - Create an invoice
|
||||||
|
- `pay_invoice` - Pay an invoice
|
||||||
|
- `pay_keysend` - Send a keysend payment
|
||||||
|
- `lookup_invoice` - Look up an invoice
|
||||||
|
- `list_transactions` - List transactions
|
||||||
|
- `sign_message` - Sign a message
|
||||||
|
|
||||||
|
### Unsupported Methods
|
||||||
|
|
||||||
|
The following methods are defined in the NIP-47 specification but are not directly supported by this CLI tool due to limitations in the underlying nwc package:
|
||||||
|
|
||||||
|
- `create_connection` - Create a connection
|
||||||
|
- `make_hold_invoice` - Create a hold invoice
|
||||||
|
- `settle_hold_invoice` - Settle a hold invoice
|
||||||
|
- `cancel_hold_invoice` - Cancel a hold invoice
|
||||||
|
- `multi_pay_invoice` - Pay multiple invoices
|
||||||
|
- `multi_pay_keysend` - Send multiple keysend payments
|
||||||
|
|
||||||
|
## Method Parameters
|
||||||
|
|
||||||
|
### Methods with No Parameters
|
||||||
|
|
||||||
|
- `get_info`
|
||||||
|
- `get_balance`
|
||||||
|
- `get_budget`
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> get_info
|
||||||
|
```
|
||||||
|
|
||||||
|
### Methods with Parameters
|
||||||
|
|
||||||
|
#### make_invoice
|
||||||
|
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> make_invoice <amount> <description> [description_hash] [expiry]
|
||||||
|
```
|
||||||
|
|
||||||
|
- `amount` - Amount in millisatoshis (msats)
|
||||||
|
- `description` - Invoice description
|
||||||
|
- `description_hash` (optional) - Hash of the description
|
||||||
|
- `expiry` (optional) - Expiry time in seconds
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> make_invoice 1000000 "Test invoice" "" 3600
|
||||||
|
```
|
||||||
|
|
||||||
|
#### pay_invoice
|
||||||
|
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> pay_invoice <invoice> [amount]
|
||||||
|
```
|
||||||
|
|
||||||
|
- `invoice` - BOLT11 invoice
|
||||||
|
- `amount` (optional) - Amount in millisatoshis (msats)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> pay_invoice lnbc1...
|
||||||
|
```
|
||||||
|
|
||||||
|
#### pay_keysend
|
||||||
|
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> pay_keysend <amount> <pubkey> [preimage]
|
||||||
|
```
|
||||||
|
|
||||||
|
- `amount` - Amount in millisatoshis (msats)
|
||||||
|
- `pubkey` - Recipient's public key
|
||||||
|
- `preimage` (optional) - Payment preimage
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> pay_keysend 1000000 03...
|
||||||
|
```
|
||||||
|
|
||||||
|
#### lookup_invoice
|
||||||
|
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> lookup_invoice <payment_hash_or_invoice>
|
||||||
|
```
|
||||||
|
|
||||||
|
- `payment_hash_or_invoice` - Payment hash or BOLT11 invoice
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> lookup_invoice 3d...
|
||||||
|
```
|
||||||
|
|
||||||
|
#### list_transactions
|
||||||
|
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> list_transactions [from <timestamp>] [until <timestamp>] [limit <count>] [offset <count>] [unpaid <true|false>] [type <incoming|outgoing>]
|
||||||
|
```
|
||||||
|
|
||||||
|
Parameters are specified as name-value pairs:
|
||||||
|
|
||||||
|
- `from` - Start timestamp
|
||||||
|
- `until` - End timestamp
|
||||||
|
- `limit` - Maximum number of transactions to return
|
||||||
|
- `offset` - Number of transactions to skip
|
||||||
|
- `unpaid` - Whether to include unpaid transactions
|
||||||
|
- `type` - Transaction type (incoming or outgoing)
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> list_transactions limit 10 type incoming
|
||||||
|
```
|
||||||
|
|
||||||
|
#### sign_message
|
||||||
|
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> sign_message <message>
|
||||||
|
```
|
||||||
|
|
||||||
|
- `message` - Message to sign
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```
|
||||||
|
nwcclient <connection URL> sign_message "Hello, world!"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Output
|
||||||
|
|
||||||
|
The tool prints the JSON response from the wallet service to stdout. If an error occurs, an error message is printed to stderr.
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
|
||||||
|
- The tool only supports methods that have direct client methods in the nwc package.
|
||||||
|
- Complex parameters like metadata are not supported.
|
||||||
|
- The tool does not support interactive authentication or authorization.
|
||||||
417
cmd/walletcli/main.go
Normal file
417
cmd/walletcli/main.go
Normal file
@@ -0,0 +1,417 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"orly.dev/pkg/protocol/nwc"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
func printUsage() {
|
||||||
|
fmt.Println("Usage: walletcli \"<NWC connection URL>\" <method> [<args...>]")
|
||||||
|
fmt.Println("\nAvailable methods:")
|
||||||
|
fmt.Println(" get_wallet_service_info - Get wallet service information")
|
||||||
|
fmt.Println(" get_info - Get wallet information")
|
||||||
|
fmt.Println(" get_balance - Get wallet balance")
|
||||||
|
fmt.Println(" get_budget - Get wallet budget")
|
||||||
|
fmt.Println(" make_invoice - Create an invoice")
|
||||||
|
fmt.Println(" Args: <amount> [<description>] [<description_hash>] [<expiry>]")
|
||||||
|
fmt.Println(" pay_invoice - Pay an invoice")
|
||||||
|
fmt.Println(" Args: <invoice> [<amount>] [<comment>]")
|
||||||
|
fmt.Println(" pay_keysend - Pay to a node using keysend")
|
||||||
|
fmt.Println(" Args: <pubkey> <amount> [<preimage>] [<tlv_type> <tlv_value>...]")
|
||||||
|
fmt.Println(" lookup_invoice - Look up an invoice")
|
||||||
|
fmt.Println(" Args: <payment_hash or invoice>")
|
||||||
|
fmt.Println(" list_transactions - List transactions")
|
||||||
|
fmt.Println(" Args: [<limit>] [<offset>] [<from>] [<until>]")
|
||||||
|
fmt.Println(" make_hold_invoice - Create a hold invoice")
|
||||||
|
fmt.Println(" Args: <amount> <payment_hash> [<description>] [<description_hash>] [<expiry>]")
|
||||||
|
fmt.Println(" settle_hold_invoice - Settle a hold invoice")
|
||||||
|
fmt.Println(" Args: <preimage>")
|
||||||
|
fmt.Println(" cancel_hold_invoice - Cancel a hold invoice")
|
||||||
|
fmt.Println(" Args: <payment_hash>")
|
||||||
|
fmt.Println(" sign_message - Sign a message")
|
||||||
|
fmt.Println(" Args: <message>")
|
||||||
|
fmt.Println(" create_connection - Create a connection")
|
||||||
|
fmt.Println(" Args: <pubkey> <name> <methods> [<notification_types>] [<max_amount>] [<budget_renewal>] [<expires_at>]")
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if len(os.Args) < 3 {
|
||||||
|
printUsage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
connectionURL := os.Args[1]
|
||||||
|
method := os.Args[2]
|
||||||
|
args := os.Args[3:]
|
||||||
|
// Create context
|
||||||
|
// ctx, cancel := context.Cancel(context.Bg())
|
||||||
|
ctx := context.Bg()
|
||||||
|
// defer cancel()
|
||||||
|
// Create NWC client
|
||||||
|
client, err := nwc.NewClient(ctx, connectionURL)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error creating client: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
// Execute the requested method
|
||||||
|
switch method {
|
||||||
|
case "get_wallet_service_info":
|
||||||
|
handleGetWalletServiceInfo(ctx, client)
|
||||||
|
case "get_info":
|
||||||
|
handleGetInfo(ctx, client)
|
||||||
|
case "get_balance":
|
||||||
|
handleGetBalance(ctx, client)
|
||||||
|
case "get_budget":
|
||||||
|
handleGetBudget(ctx, client)
|
||||||
|
case "make_invoice":
|
||||||
|
handleMakeInvoice(ctx, client, args)
|
||||||
|
case "pay_invoice":
|
||||||
|
handlePayInvoice(ctx, client, args)
|
||||||
|
case "pay_keysend":
|
||||||
|
handlePayKeysend(ctx, client, args)
|
||||||
|
case "lookup_invoice":
|
||||||
|
handleLookupInvoice(ctx, client, args)
|
||||||
|
case "list_transactions":
|
||||||
|
handleListTransactions(ctx, client, args)
|
||||||
|
case "make_hold_invoice":
|
||||||
|
handleMakeHoldInvoice(ctx, client, args)
|
||||||
|
case "settle_hold_invoice":
|
||||||
|
handleSettleHoldInvoice(ctx, client, args)
|
||||||
|
case "cancel_hold_invoice":
|
||||||
|
handleCancelHoldInvoice(ctx, client, args)
|
||||||
|
case "sign_message":
|
||||||
|
handleSignMessage(ctx, client, args)
|
||||||
|
case "create_connection":
|
||||||
|
handleCreateConnection(ctx, client, args)
|
||||||
|
default:
|
||||||
|
fmt.Printf("Unknown method: %s\n", method)
|
||||||
|
printUsage()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleGetWalletServiceInfo(ctx context.T, client *nwc.Client) {
|
||||||
|
if _, raw, err := client.GetWalletServiceInfo(ctx, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleGetInfo(ctx context.T, client *nwc.Client) {
|
||||||
|
if _, raw, err := client.GetInfo(ctx, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleGetBalance(ctx context.T, client *nwc.Client) {
|
||||||
|
if _, raw, err := client.GetBalance(ctx, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleGetBudget(ctx context.T, client *nwc.Client) {
|
||||||
|
if _, raw, err := client.GetBudget(ctx, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleMakeInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 1 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> make_invoice <amount> [<description>] [<description_hash>] [<expiry>]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
amount, err := strconv.ParseUint(args[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing amount: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params := &nwc.MakeInvoiceParams{
|
||||||
|
Amount: amount,
|
||||||
|
}
|
||||||
|
if len(args) > 1 {
|
||||||
|
params.Description = args[1]
|
||||||
|
}
|
||||||
|
if len(args) > 2 {
|
||||||
|
params.DescriptionHash = args[2]
|
||||||
|
}
|
||||||
|
if len(args) > 3 {
|
||||||
|
expiry, err := strconv.ParseInt(args[3], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing expiry: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params.Expiry = &expiry
|
||||||
|
}
|
||||||
|
var raw []byte
|
||||||
|
if _, raw, err = client.MakeInvoice(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handlePayInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 1 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> pay_invoice <invoice> [<amount>] [<comment>]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params := &nwc.PayInvoiceParams{
|
||||||
|
Invoice: args[0],
|
||||||
|
}
|
||||||
|
if len(args) > 1 {
|
||||||
|
amount, err := strconv.ParseUint(args[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing amount: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params.Amount = &amount
|
||||||
|
}
|
||||||
|
if len(args) > 2 {
|
||||||
|
comment := args[2]
|
||||||
|
params.Metadata = &nwc.PayInvoiceMetadata{
|
||||||
|
Comment: &comment,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, raw, err := client.PayInvoice(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLookupInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 1 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> lookup_invoice <payment_hash or invoice>")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params := &nwc.LookupInvoiceParams{}
|
||||||
|
// Determine if the argument is a payment hash or an invoice
|
||||||
|
if strings.HasPrefix(args[0], "ln") {
|
||||||
|
invoice := args[0]
|
||||||
|
params.Invoice = &invoice
|
||||||
|
} else {
|
||||||
|
paymentHash := args[0]
|
||||||
|
params.PaymentHash = &paymentHash
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
var raw []byte
|
||||||
|
if _, raw, err = client.LookupInvoice(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleListTransactions(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
params := &nwc.ListTransactionsParams{}
|
||||||
|
if len(args) > 0 {
|
||||||
|
limit, err := strconv.ParseUint(args[0], 10, 16)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing limit: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
limitUint16 := uint16(limit)
|
||||||
|
params.Limit = &limitUint16
|
||||||
|
}
|
||||||
|
if len(args) > 1 {
|
||||||
|
offset, err := strconv.ParseUint(args[1], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing offset: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
offsetUint32 := uint32(offset)
|
||||||
|
params.Offset = &offsetUint32
|
||||||
|
}
|
||||||
|
if len(args) > 2 {
|
||||||
|
from, err := strconv.ParseInt(args[2], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing from: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params.From = &from
|
||||||
|
}
|
||||||
|
if len(args) > 3 {
|
||||||
|
until, err := strconv.ParseInt(args[3], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing until: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params.Until = &until
|
||||||
|
}
|
||||||
|
var raw []byte
|
||||||
|
var err error
|
||||||
|
if _, raw, err = client.ListTransactions(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleMakeHoldInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 2 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> make_hold_invoice <amount> <payment_hash> [<description>] [<description_hash>] [<expiry>]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
amount, err := strconv.ParseUint(args[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing amount: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params := &nwc.MakeHoldInvoiceParams{
|
||||||
|
Amount: amount,
|
||||||
|
PaymentHash: args[1],
|
||||||
|
}
|
||||||
|
if len(args) > 2 {
|
||||||
|
params.Description = args[2]
|
||||||
|
}
|
||||||
|
if len(args) > 3 {
|
||||||
|
params.DescriptionHash = args[3]
|
||||||
|
}
|
||||||
|
if len(args) > 4 {
|
||||||
|
expiry, err := strconv.ParseInt(args[4], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing expiry: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params.Expiry = &expiry
|
||||||
|
}
|
||||||
|
var raw []byte
|
||||||
|
if _, raw, err = client.MakeHoldInvoice(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleSettleHoldInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 1 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> settle_hold_invoice <preimage>")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params := &nwc.SettleHoldInvoiceParams{
|
||||||
|
Preimage: args[0],
|
||||||
|
}
|
||||||
|
var raw []byte
|
||||||
|
var err error
|
||||||
|
if raw, err = client.SettleHoldInvoice(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleCancelHoldInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 1 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> cancel_hold_invoice <payment_hash>")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &nwc.CancelHoldInvoiceParams{
|
||||||
|
PaymentHash: args[0],
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
var raw []byte
|
||||||
|
if raw, err = client.CancelHoldInvoice(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleSignMessage(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 1 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> sign_message <message>")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
params := &nwc.SignMessageParams{
|
||||||
|
Message: args[0],
|
||||||
|
}
|
||||||
|
var raw []byte
|
||||||
|
var err error
|
||||||
|
if _, raw, err = client.SignMessage(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handlePayKeysend(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 2 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> pay_keysend <pubkey> <amount> [<preimage>] [<tlv_type> <tlv_value>...]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pubkey := args[0]
|
||||||
|
amount, err := strconv.ParseUint(args[1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing amount: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params := &nwc.PayKeysendParams{
|
||||||
|
Pubkey: pubkey,
|
||||||
|
Amount: amount,
|
||||||
|
}
|
||||||
|
// Optional preimage
|
||||||
|
if len(args) > 2 {
|
||||||
|
preimage := args[2]
|
||||||
|
params.Preimage = &preimage
|
||||||
|
}
|
||||||
|
// Optional TLV records (must come in pairs)
|
||||||
|
if len(args) > 3 {
|
||||||
|
// Start from index 3 and process pairs of arguments
|
||||||
|
for i := 3; i < len(args)-1; i += 2 {
|
||||||
|
tlvType, err := strconv.ParseUint(args[i], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing TLV type: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tlvValue := args[i+1]
|
||||||
|
params.TLVRecords = append(
|
||||||
|
params.TLVRecords, nwc.PayKeysendTLVRecord{
|
||||||
|
Type: uint32(tlvType),
|
||||||
|
Value: tlvValue,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var raw []byte
|
||||||
|
if _, raw, err = client.PayKeysend(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleCreateConnection(ctx context.T, client *nwc.Client, args []string) {
|
||||||
|
if len(args) < 3 {
|
||||||
|
fmt.Println("Error: Missing required arguments")
|
||||||
|
fmt.Println("Usage: walletcli <NWC connection URL> create_connection <pubkey> <name> <methods> [<notification_types>] [<max_amount>] [<budget_renewal>] [<expires_at>]")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params := &nwc.CreateConnectionParams{
|
||||||
|
Pubkey: args[0],
|
||||||
|
Name: args[1],
|
||||||
|
RequestMethods: strings.Split(args[2], ","),
|
||||||
|
}
|
||||||
|
if len(args) > 3 {
|
||||||
|
params.NotificationTypes = strings.Split(args[3], ",")
|
||||||
|
}
|
||||||
|
if len(args) > 4 {
|
||||||
|
maxAmount, err := strconv.ParseUint(args[4], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing max_amount: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params.MaxAmount = &maxAmount
|
||||||
|
}
|
||||||
|
if len(args) > 5 {
|
||||||
|
params.BudgetRenewal = &args[5]
|
||||||
|
}
|
||||||
|
if len(args) > 6 {
|
||||||
|
expiresAt, err := strconv.ParseInt(args[6], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error parsing expires_at: %v\n", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
params.ExpiresAt = &expiresAt
|
||||||
|
}
|
||||||
|
var raw []byte
|
||||||
|
var err error
|
||||||
|
if raw, err = client.CreateConnection(ctx, params, true); !chk.E(err) {
|
||||||
|
fmt.Println(string(raw))
|
||||||
|
}
|
||||||
|
}
|
||||||
4
go.mod
4
go.mod
@@ -5,13 +5,12 @@ go 1.24.2
|
|||||||
require (
|
require (
|
||||||
github.com/adrg/xdg v0.5.3
|
github.com/adrg/xdg v0.5.3
|
||||||
github.com/alexflint/go-arg v1.6.0
|
github.com/alexflint/go-arg v1.6.0
|
||||||
|
github.com/coder/websocket v1.8.13
|
||||||
github.com/danielgtaylor/huma/v2 v2.34.1
|
github.com/danielgtaylor/huma/v2 v2.34.1
|
||||||
github.com/davecgh/go-spew v1.1.1
|
github.com/davecgh/go-spew v1.1.1
|
||||||
github.com/dgraph-io/badger/v4 v4.7.0
|
github.com/dgraph-io/badger/v4 v4.7.0
|
||||||
github.com/fasthttp/websocket v1.5.12
|
github.com/fasthttp/websocket v1.5.12
|
||||||
github.com/fatih/color v1.18.0
|
github.com/fatih/color v1.18.0
|
||||||
github.com/gobwas/httphead v0.1.0
|
|
||||||
github.com/gobwas/ws v1.4.0
|
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||||
github.com/klauspost/cpuid/v2 v2.2.11
|
github.com/klauspost/cpuid/v2 v2.2.11
|
||||||
github.com/minio/sha256-simd v1.0.1
|
github.com/minio/sha256-simd v1.0.1
|
||||||
@@ -41,7 +40,6 @@ require (
|
|||||||
github.com/felixge/fgprof v0.9.5 // indirect
|
github.com/felixge/fgprof v0.9.5 // indirect
|
||||||
github.com/go-logr/logr v1.4.3 // indirect
|
github.com/go-logr/logr v1.4.3 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/gobwas/pool v0.2.1 // indirect
|
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||||
github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect
|
github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect
|
||||||
github.com/klauspost/compress v1.18.0 // indirect
|
github.com/klauspost/compress v1.18.0 // indirect
|
||||||
|
|||||||
6
go.sum
6
go.sum
@@ -19,6 +19,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
|||||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||||
|
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||||
|
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||||
github.com/danielgtaylor/huma/v2 v2.34.1 h1:EmOJAbzEGfy0wAq/QMQ1YKfEMBEfE94xdBRLPBP0gwQ=
|
github.com/danielgtaylor/huma/v2 v2.34.1 h1:EmOJAbzEGfy0wAq/QMQ1YKfEMBEfE94xdBRLPBP0gwQ=
|
||||||
github.com/danielgtaylor/huma/v2 v2.34.1/go.mod h1:ynwJgLk8iGVgoaipi5tgwIQ5yoFNmiu+QdhU7CEEmhk=
|
github.com/danielgtaylor/huma/v2 v2.34.1/go.mod h1:ynwJgLk8iGVgoaipi5tgwIQ5yoFNmiu+QdhU7CEEmhk=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
@@ -44,13 +46,9 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
|||||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
|
||||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||||
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
|
|
||||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||||
github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=
|
|
||||||
github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
|
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||||
|
|||||||
46
main.go
46
main.go
@@ -1,26 +1,26 @@
|
|||||||
// Package main is a nostr relay with a simple follow/mute list authentication
|
// Package main is a nostr relay with a simple follow/mute list authentication
|
||||||
// scheme and the new HTTP REST based protocol. Configuration is via environment
|
// scheme and the new HTTP REST-based protocol. Configuration is via environment
|
||||||
// variables or an optional .env file.
|
// variables or an optional .env file.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
|
||||||
_ "net/http/pprof"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/pkg/profile"
|
"github.com/pkg/profile"
|
||||||
|
_ "net/http/pprof"
|
||||||
app2 "orly.dev/pkg/app"
|
app2 "orly.dev/pkg/app"
|
||||||
"orly.dev/pkg/app/config"
|
"orly.dev/pkg/app/config"
|
||||||
"orly.dev/pkg/app/relay"
|
"orly.dev/pkg/app/relay"
|
||||||
"orly.dev/pkg/app/relay/options"
|
"orly.dev/pkg/app/relay/options"
|
||||||
"orly.dev/pkg/database"
|
"orly.dev/pkg/database"
|
||||||
|
"orly.dev/pkg/protocol/openapi"
|
||||||
|
"orly.dev/pkg/protocol/servemux"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/context"
|
"orly.dev/pkg/utils/context"
|
||||||
"orly.dev/pkg/utils/interrupt"
|
"orly.dev/pkg/utils/interrupt"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
"orly.dev/pkg/utils/lol"
|
"orly.dev/pkg/utils/lol"
|
||||||
"orly.dev/pkg/version"
|
"orly.dev/pkg/version"
|
||||||
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -43,15 +43,24 @@ func main() {
|
|||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
lol.SetLogLevel(cfg.LogLevel)
|
lol.SetLogLevel(cfg.LogLevel)
|
||||||
if cfg.Pprof {
|
if cfg.Pprof != "" {
|
||||||
defer profile.Start(profile.MemProfile).Stop()
|
switch cfg.Pprof {
|
||||||
go func() {
|
case "cpu":
|
||||||
chk.E(http.ListenAndServe("127.0.0.1:6060", nil))
|
prof := profile.Start(profile.CPUProfile)
|
||||||
}()
|
defer prof.Stop()
|
||||||
|
case "memory":
|
||||||
|
prof := profile.Start(profile.MemProfile)
|
||||||
|
defer prof.Stop()
|
||||||
|
case "allocation":
|
||||||
|
prof := profile.Start(profile.MemProfileAllocs)
|
||||||
|
defer prof.Stop()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
c, cancel := context.Cancel(context.Bg())
|
c, cancel := context.Cancel(context.Bg())
|
||||||
storage, err := database.New(c, cancel, cfg.DataDir, cfg.DbLogLevel)
|
var storage *database.D
|
||||||
if chk.E(err) {
|
if storage, err = database.New(
|
||||||
|
c, cancel, cfg.DataDir, cfg.DbLogLevel,
|
||||||
|
); chk.E(err) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
r := &app2.Relay{C: cfg, Store: storage}
|
r := &app2.Relay{C: cfg, Store: storage}
|
||||||
@@ -66,9 +75,20 @@ func main() {
|
|||||||
C: cfg,
|
C: cfg,
|
||||||
}
|
}
|
||||||
var opts []options.O
|
var opts []options.O
|
||||||
if server, err = relay.NewServer(serverParams, opts...); chk.E(err) {
|
serveMux := servemux.NewServeMux()
|
||||||
|
if server, err = relay.NewServer(
|
||||||
|
serverParams, serveMux, opts...,
|
||||||
|
); chk.E(err) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
openapi.New(
|
||||||
|
server,
|
||||||
|
cfg.AppName,
|
||||||
|
version.V,
|
||||||
|
version.Description,
|
||||||
|
"/api",
|
||||||
|
serveMux,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.F.F("failed to create server: %v", err)
|
log.F.F("failed to create server: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,12 +5,6 @@ package config
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"orly.dev/pkg/utils/apputil"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
env2 "orly.dev/pkg/utils/env"
|
|
||||||
"orly.dev/pkg/utils/log"
|
|
||||||
"orly.dev/pkg/utils/lol"
|
|
||||||
"orly.dev/pkg/version"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
@@ -18,6 +12,13 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/apputil"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
env2 "orly.dev/pkg/utils/env"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"orly.dev/pkg/utils/lol"
|
||||||
|
"orly.dev/pkg/version"
|
||||||
|
|
||||||
"github.com/adrg/xdg"
|
"github.com/adrg/xdg"
|
||||||
"go-simpler.org/env"
|
"go-simpler.org/env"
|
||||||
)
|
)
|
||||||
@@ -26,7 +27,7 @@ import (
|
|||||||
// and default values. It defines parameters for app behaviour, storage
|
// and default values. It defines parameters for app behaviour, storage
|
||||||
// locations, logging, and network settings used across the relay service.
|
// locations, logging, and network settings used across the relay service.
|
||||||
type C struct {
|
type C struct {
|
||||||
AppName string `env:"ORLY_APP_NAME" default:"orly"`
|
AppName string `env:"ORLY_APP_NAME" default:"ORLY"`
|
||||||
Config string `env:"ORLY_CONFIG_DIR" usage:"location for configuration file, which has the name '.env' to make it harder to delete, and is a standard environment KEY=value<newline>... style" default:"~/.config/orly"`
|
Config string `env:"ORLY_CONFIG_DIR" usage:"location for configuration file, which has the name '.env' to make it harder to delete, and is a standard environment KEY=value<newline>... style" default:"~/.config/orly"`
|
||||||
State string `env:"ORLY_STATE_DATA_DIR" usage:"storage location for state data affected by dynamic interactive interfaces" default:"~/.local/state/orly"`
|
State string `env:"ORLY_STATE_DATA_DIR" usage:"storage location for state data affected by dynamic interactive interfaces" default:"~/.local/state/orly"`
|
||||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/cache/orly"`
|
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/cache/orly"`
|
||||||
@@ -34,12 +35,19 @@ type C struct {
|
|||||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||||
DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||||
Pprof bool `env:"ORLY_PPROF" default:"false" usage:"enable pprof on 127.0.0.1:6060"`
|
Pprof string `env:"ORLY_PPROF" usage:"enable pprof on 127.0.0.1:6060" enum:"cpu,memory,allocation"`
|
||||||
AuthRequired bool `env:"ORLY_AUTH_REQUIRED" default:"false" usage:"require authentication for all requests"`
|
AuthRequired bool `env:"ORLY_AUTH_REQUIRED" default:"false" usage:"require authentication for all requests"`
|
||||||
PublicReadable bool `env:"ORLY_PUBLIC_READABLE" default:"true" usage:"allow public read access to regardless of whether the client is authed"`
|
PublicReadable bool `env:"ORLY_PUBLIC_READABLE" default:"true" usage:"allow public read access to regardless of whether the client is authed"`
|
||||||
SpiderSeeds []string `env:"ORLY_SPIDER_SEEDS" usage:"seeds to use for the spider (relays that are looked up initially to find owner relay lists) (comma separated)" default:"wss://relay.nostr.band/,wss://relay.damus.io/,wss://nostr.wine/,wss://nostr.land/,wss://theforest.nostr1.com/"`
|
SpiderSeeds []string `env:"ORLY_SPIDER_SEEDS" usage:"seeds to use for the spider (relays that are looked up initially to find owner relay lists) (comma separated)" default:"wss://profiles.nostr1.com/,wss://relay.nostr.band/,wss://relay.damus.io/,wss://nostr.wine/,wss://nostr.land/,wss://theforest.nostr1.com/,wss://profiles.nostr1.com/"`
|
||||||
|
SpiderType string `env:"ORLY_SPIDER_TYPE" usage:"whether to spider, and what degree of spidering: none, directory, follows (follows means to the second degree of the follow graph)" default:"directory"`
|
||||||
|
SpiderTime time.Duration `env:"ORLY_SPIDER_FREQUENCY" usage:"how often to run the spider, uses notation 0h0m0s" default:"1h"`
|
||||||
|
SpiderSecondDegree bool `env:"ORLY_SPIDER_SECOND_DEGREE" default:"true" usage:"whether to enable spidering the second degree of follows for non-directory events if ORLY_SPIDER_TYPE is set to 'follows'"`
|
||||||
Owners []string `env:"ORLY_OWNERS" usage:"list of users whose follow lists designate whitelisted users who can publish events, and who can read if public readable is false (comma separated)"`
|
Owners []string `env:"ORLY_OWNERS" usage:"list of users whose follow lists designate whitelisted users who can publish events, and who can read if public readable is false (comma separated)"`
|
||||||
Private bool `env:"ORLY_PRIVATE" usage:"do not spider for user metadata because the relay is private and this would leak relay memberships" default:"false"`
|
Private bool `env:"ORLY_PRIVATE" usage:"do not spider for user metadata because the relay is private and this would leak relay memberships" default:"false"`
|
||||||
|
Whitelist []string `env:"ORLY_WHITELIST" usage:"only allow connections from this list of IP addresses"`
|
||||||
|
Blacklist []string `env:"ORLY_BLACKLIST" usage:"list of pubkeys to block when auth is not required (comma separated)"`
|
||||||
|
RelaySecret string `env:"ORLY_SECRET_KEY" usage:"secret key for relay cluster replication authentication"`
|
||||||
|
PeerRelays []string `env:"ORLY_PEER_RELAYS" usage:"list of peer relays URLs that new events are pushed to in format <pubkey>|<url>"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates and initializes a new configuration object for the relay
|
// New creates and initializes a new configuration object for the relay
|
||||||
@@ -73,6 +81,9 @@ func New() (cfg *C, err error) {
|
|||||||
if cfg.State == "" || strings.Contains(cfg.State, "~") {
|
if cfg.State == "" || strings.Contains(cfg.State, "~") {
|
||||||
cfg.State = filepath.Join(xdg.StateHome, cfg.AppName)
|
cfg.State = filepath.Join(xdg.StateHome, cfg.AppName)
|
||||||
}
|
}
|
||||||
|
if len(cfg.Owners) > 0 {
|
||||||
|
cfg.AuthRequired = true
|
||||||
|
}
|
||||||
envPath := filepath.Join(cfg.Config, ".env")
|
envPath := filepath.Join(cfg.Config, ".env")
|
||||||
if apputil.FileExists(envPath) {
|
if apputil.FileExists(envPath) {
|
||||||
var e env2.Env
|
var e env2.Env
|
||||||
@@ -87,6 +98,17 @@ func New() (cfg *C, err error) {
|
|||||||
lol.SetLogLevel(cfg.LogLevel)
|
lol.SetLogLevel(cfg.LogLevel)
|
||||||
log.I.F("loaded configuration from %s", envPath)
|
log.I.F("loaded configuration from %s", envPath)
|
||||||
}
|
}
|
||||||
|
// if spider seeds has no elements, there still is a single entry with an
|
||||||
|
// empty string; and also if any of the fields are empty strings, they need
|
||||||
|
// to be removed.
|
||||||
|
var seeds []string
|
||||||
|
for _, u := range cfg.SpiderSeeds {
|
||||||
|
if u == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seeds = append(seeds, u)
|
||||||
|
}
|
||||||
|
cfg.SpiderSeeds = seeds
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -41,18 +41,36 @@ func (s *Server) AcceptEvent(
|
|||||||
c context.T, ev *event.E, hr *http.Request, authedPubkey []byte,
|
c context.T, ev *event.E, hr *http.Request, authedPubkey []byte,
|
||||||
remote string,
|
remote string,
|
||||||
) (accept bool, notice string, afterSave func()) {
|
) (accept bool, notice string, afterSave func()) {
|
||||||
// if auth is required and the user is not authed, reject
|
if !s.AuthRequired() {
|
||||||
if s.AuthRequired() && len(authedPubkey) == 0 {
|
// Check blacklist for public relay mode
|
||||||
|
if len(s.blacklistPubkeys) > 0 {
|
||||||
|
for _, blockedPubkey := range s.blacklistPubkeys {
|
||||||
|
if bytes.Equal(blockedPubkey, ev.Pubkey) {
|
||||||
|
notice = "event author is blacklisted"
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// if auth is required and the user is not authed, reject
|
||||||
|
if len(authedPubkey) == 0 {
|
||||||
|
notice = "client isn't authed"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, u := range s.OwnersMuted() {
|
||||||
|
if bytes.Equal(u, authedPubkey) {
|
||||||
|
notice = "event author is banned from this relay"
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
// check if the authed user is on the lists
|
// check if the authed user is on the lists
|
||||||
list := append(s.OwnersFollowed(), s.FollowedFollows()...)
|
list := append(s.OwnersFollowed(), s.FollowedFollows()...)
|
||||||
for _, u := range list {
|
for _, u := range list {
|
||||||
if bytes.Equal(u, authedPubkey) {
|
if bytes.Equal(u, authedPubkey) {
|
||||||
accept = true
|
accept = true
|
||||||
break
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// todo: check if event author is on owners' mute lists or block list
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -203,8 +203,8 @@ func TestAcceptEventWithRealServer(t *testing.T) {
|
|||||||
if accept {
|
if accept {
|
||||||
t.Error("AcceptEvent() accept = true, want false")
|
t.Error("AcceptEvent() accept = true, want false")
|
||||||
}
|
}
|
||||||
if notice != "" {
|
if notice != "client isn't authed" {
|
||||||
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
t.Errorf("AcceptEvent() notice = %v, want 'client isn't authed'", notice)
|
||||||
}
|
}
|
||||||
if afterSave != nil {
|
if afterSave != nil {
|
||||||
t.Error("AcceptEvent() afterSave is not nil, but should be nil")
|
t.Error("AcceptEvent() afterSave is not nil, but should be nil")
|
||||||
@@ -234,4 +234,81 @@ func TestAcceptEventWithRealServer(t *testing.T) {
|
|||||||
if !accept {
|
if !accept {
|
||||||
t.Error("AcceptEvent() accept = false, want true")
|
t.Error("AcceptEvent() accept = false, want true")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test with muted user
|
||||||
|
s.SetOwnersMuted([][]byte{[]byte("test-pubkey")})
|
||||||
|
accept, notice, afterSave = s.AcceptEvent(ctx, testEvent, req, []byte("test-pubkey"), "127.0.0.1")
|
||||||
|
if accept {
|
||||||
|
t.Error("AcceptEvent() accept = true, want false")
|
||||||
|
}
|
||||||
|
if notice != "event author is banned from this relay" {
|
||||||
|
t.Errorf("AcceptEvent() notice = %v, want 'event author is banned from this relay'", notice)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestAcceptEventWithBlacklist tests the blacklist functionality when auth is not required
|
||||||
|
func TestAcceptEventWithBlacklist(t *testing.T) {
|
||||||
|
// Create a context and HTTP request for testing
|
||||||
|
ctx := context.Bg()
|
||||||
|
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||||
|
|
||||||
|
// Test pubkey bytes
|
||||||
|
testPubkey := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20}
|
||||||
|
blockedPubkey := []byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30}
|
||||||
|
|
||||||
|
// Test with public relay mode (auth not required) and no blacklist
|
||||||
|
s := &Server{
|
||||||
|
C: &config.C{
|
||||||
|
AuthRequired: false,
|
||||||
|
},
|
||||||
|
Lists: new(Lists),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create event with test pubkey
|
||||||
|
testEvent := &event.E{}
|
||||||
|
testEvent.Pubkey = testPubkey
|
||||||
|
|
||||||
|
// Should accept when no blacklist
|
||||||
|
accept, notice, _ := s.AcceptEvent(ctx, testEvent, req, nil, "127.0.0.1")
|
||||||
|
if !accept {
|
||||||
|
t.Error("AcceptEvent() accept = false, want true")
|
||||||
|
}
|
||||||
|
if notice != "" {
|
||||||
|
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add blacklist with different pubkey
|
||||||
|
s.blacklistPubkeys = [][]byte{blockedPubkey}
|
||||||
|
|
||||||
|
// Should still accept when author not in blacklist
|
||||||
|
accept, notice, _ = s.AcceptEvent(ctx, testEvent, req, nil, "127.0.0.1")
|
||||||
|
if !accept {
|
||||||
|
t.Error("AcceptEvent() accept = false, want true")
|
||||||
|
}
|
||||||
|
if notice != "" {
|
||||||
|
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create event with blocked pubkey
|
||||||
|
blockedEvent := &event.E{}
|
||||||
|
blockedEvent.Pubkey = blockedPubkey
|
||||||
|
|
||||||
|
// Should reject when author is in blacklist
|
||||||
|
accept, notice, _ = s.AcceptEvent(ctx, blockedEvent, req, nil, "127.0.0.1")
|
||||||
|
if accept {
|
||||||
|
t.Error("AcceptEvent() accept = true, want false")
|
||||||
|
}
|
||||||
|
if notice != "event author is blacklisted" {
|
||||||
|
t.Errorf("AcceptEvent() notice = %v, want 'event author is blacklisted'", notice)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test with auth required - blacklist should not apply
|
||||||
|
s.C.AuthRequired = true
|
||||||
|
accept, notice, _ = s.AcceptEvent(ctx, blockedEvent, req, nil, "127.0.0.1")
|
||||||
|
if accept {
|
||||||
|
t.Error("AcceptEvent() accept = true, want false")
|
||||||
|
}
|
||||||
|
if notice != "client isn't authed" {
|
||||||
|
t.Errorf("AcceptEvent() notice = %v, want 'client isn't authed'", notice)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,18 @@
|
|||||||
package relay
|
package relay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||||
|
"orly.dev/pkg/encoders/hex"
|
||||||
|
"orly.dev/pkg/protocol/httpauth"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
realy_lol "orly.dev/pkg/version"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -17,6 +27,21 @@ var (
|
|||||||
NIP20prefixmatcher = regexp.MustCompile(`^\w+: `)
|
NIP20prefixmatcher = regexp.MustCompile(`^\w+: `)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var userAgent = fmt.Sprintf("orly/%s", realy_lol.V)
|
||||||
|
|
||||||
|
type WriteCloser struct {
|
||||||
|
*bytes.Buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WriteCloser) Close() error {
|
||||||
|
w.Buffer.Reset()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWriteCloser(w []byte) *WriteCloser {
|
||||||
|
return &WriteCloser{bytes.NewBuffer(w)}
|
||||||
|
}
|
||||||
|
|
||||||
// AddEvent processes an incoming event, saves it if valid, and delivers it to
|
// AddEvent processes an incoming event, saves it if valid, and delivers it to
|
||||||
// subscribers.
|
// subscribers.
|
||||||
//
|
//
|
||||||
@@ -55,6 +80,7 @@ var (
|
|||||||
// relevant message.
|
// relevant message.
|
||||||
func (s *Server) AddEvent(
|
func (s *Server) AddEvent(
|
||||||
c context.T, rl relay.I, ev *event.E, hr *http.Request, origin string,
|
c context.T, rl relay.I, ev *event.E, hr *http.Request, origin string,
|
||||||
|
pubkeys [][]byte,
|
||||||
) (accepted bool, message []byte) {
|
) (accepted bool, message []byte) {
|
||||||
|
|
||||||
if ev == nil {
|
if ev == nil {
|
||||||
@@ -85,6 +111,77 @@ func (s *Server) AddEvent(
|
|||||||
}
|
}
|
||||||
// notify subscribers
|
// notify subscribers
|
||||||
s.listeners.Deliver(ev)
|
s.listeners.Deliver(ev)
|
||||||
|
// push the new event to replicas if replicas are configured, and the relay
|
||||||
|
// has an identity key.
|
||||||
|
var err error
|
||||||
|
if len(s.Peers.Addresses) > 0 &&
|
||||||
|
len(s.Peers.I.Sec()) == secp256k1.SecKeyBytesLen {
|
||||||
|
evb := ev.Marshal(nil)
|
||||||
|
var payload io.ReadCloser
|
||||||
|
payload = NewWriteCloser(evb)
|
||||||
|
replica:
|
||||||
|
for i, a := range s.Peers.Addresses {
|
||||||
|
// the peer address index is the same as the list of pubkeys
|
||||||
|
// (they're unpacked from a string containing both, appended at the
|
||||||
|
// same time), so if the pubkeys from the http event endpoint sent
|
||||||
|
// us here matches the index of this address, we can skip it.
|
||||||
|
for _, pk := range pubkeys {
|
||||||
|
if bytes.Equal(s.Peers.Pubkeys[i], pk) {
|
||||||
|
log.I.F(
|
||||||
|
"not sending back to replica that just sent us this event %0x %s",
|
||||||
|
ev.ID, a,
|
||||||
|
)
|
||||||
|
continue replica
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var ur *url.URL
|
||||||
|
if ur, err = url.Parse(a + "/api/event"); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var r *http.Request
|
||||||
|
r = &http.Request{
|
||||||
|
Method: "POST",
|
||||||
|
URL: ur,
|
||||||
|
Proto: "HTTP/1.1",
|
||||||
|
ProtoMajor: 1,
|
||||||
|
ProtoMinor: 1,
|
||||||
|
Header: make(http.Header),
|
||||||
|
Body: payload,
|
||||||
|
ContentLength: int64(len(evb)),
|
||||||
|
Host: ur.Host,
|
||||||
|
}
|
||||||
|
r.Header.Add("User-Agent", userAgent)
|
||||||
|
if err = httpauth.AddNIP98Header(
|
||||||
|
r, ur, "POST", "", s.Peers.I, 0,
|
||||||
|
); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// add this replica's pubkey to the list to prevent re-sending to
|
||||||
|
// other replicas more than twice
|
||||||
|
pubkeys = append(pubkeys, s.Peers.Pub())
|
||||||
|
var pubkeysHeader []byte
|
||||||
|
for j, pk := range pubkeys {
|
||||||
|
pubkeysHeader = hex.EncAppend(pubkeysHeader, pk)
|
||||||
|
if j < len(pubkeys)-1 {
|
||||||
|
pubkeysHeader = append(pubkeysHeader, ':')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.Header.Add("X-Pubkeys", string(pubkeysHeader))
|
||||||
|
r.GetBody = func() (rc io.ReadCloser, err error) {
|
||||||
|
rc = payload
|
||||||
|
return
|
||||||
|
}
|
||||||
|
client := &http.Client{}
|
||||||
|
if _, err = client.Do(r); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
log.I.F(
|
||||||
|
"event pushed to replica %s\n%s",
|
||||||
|
ur.String(), evb,
|
||||||
|
)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
accepted = true
|
accepted = true
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
39
pkg/app/relay/admin-auth.go
Normal file
39
pkg/app/relay/admin-auth.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
package relay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"net/http"
|
||||||
|
"orly.dev/pkg/protocol/httpauth"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) AdminAuth(
|
||||||
|
r *http.Request, remote string,
|
||||||
|
tolerance ...time.Duration,
|
||||||
|
) (authed bool, pubkey []byte) {
|
||||||
|
var valid bool
|
||||||
|
var err error
|
||||||
|
var tolerate time.Duration
|
||||||
|
if len(tolerance) > 0 {
|
||||||
|
tolerate = tolerance[0]
|
||||||
|
}
|
||||||
|
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !valid {
|
||||||
|
log.E.F(
|
||||||
|
"invalid auth %s from %s",
|
||||||
|
r.Header.Get("Authorization"), remote,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, pk := range s.ownersPubkeys {
|
||||||
|
if bytes.Equal(pk, pubkey) {
|
||||||
|
authed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
|
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
"orly.dev/pkg/utils/lol"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ServiceURL constructs the service URL based on the incoming HTTP request. It
|
// ServiceURL constructs the service URL based on the incoming HTTP request. It
|
||||||
@@ -34,8 +33,6 @@ import (
|
|||||||
//
|
//
|
||||||
// - Returns the constructed URL string.
|
// - Returns the constructed URL string.
|
||||||
func (s *Server) ServiceURL(req *http.Request) (st string) {
|
func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||||
lol.Tracer("ServiceURL")
|
|
||||||
defer func() { lol.Tracer("end ServiceURL", st) }()
|
|
||||||
if !s.AuthRequired() {
|
if !s.AuthRequired() {
|
||||||
log.T.F("auth not required")
|
log.T.F("auth not required")
|
||||||
return
|
return
|
||||||
|
|||||||
10
pkg/app/relay/config.go
Normal file
10
pkg/app/relay/config.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package relay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"orly.dev/pkg/app/config"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) Config() (c *config.C) {
|
||||||
|
c = s.C
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -3,12 +3,13 @@ package relay
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"orly.dev/pkg/interfaces/relay"
|
"orly.dev/pkg/interfaces/relay"
|
||||||
"orly.dev/pkg/protocol/relayinfo"
|
"orly.dev/pkg/protocol/relayinfo"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
"orly.dev/pkg/version"
|
"orly.dev/pkg/version"
|
||||||
"sort"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// HandleRelayInfo generates and returns a relay information document in JSON
|
// HandleRelayInfo generates and returns a relay information document in JSON
|
||||||
@@ -44,7 +45,7 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
|||||||
// relayinfo.CommandResults,
|
// relayinfo.CommandResults,
|
||||||
relayinfo.ParameterizedReplaceableEvents,
|
relayinfo.ParameterizedReplaceableEvents,
|
||||||
// relayinfo.ExpirationTimestamp,
|
// relayinfo.ExpirationTimestamp,
|
||||||
// relayinfo.ProtectedEvents,
|
relayinfo.ProtectedEvents,
|
||||||
// relayinfo.RelayListMetadata,
|
// relayinfo.RelayListMetadata,
|
||||||
)
|
)
|
||||||
sort.Sort(supportedNIPs)
|
sort.Sort(supportedNIPs)
|
||||||
@@ -52,10 +53,12 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
|||||||
info = &relayinfo.T{
|
info = &relayinfo.T{
|
||||||
Name: s.relay.Name(),
|
Name: s.relay.Name(),
|
||||||
Description: version.Description,
|
Description: version.Description,
|
||||||
Nips: supportedNIPs, Software: version.URL,
|
Nips: supportedNIPs,
|
||||||
|
Software: version.URL,
|
||||||
Version: version.V,
|
Version: version.V,
|
||||||
Limitation: relayinfo.Limits{
|
Limitation: relayinfo.Limits{
|
||||||
AuthRequired: s.C.AuthRequired,
|
AuthRequired: s.C.AuthRequired,
|
||||||
|
RestrictedWrites: s.C.AuthRequired,
|
||||||
},
|
},
|
||||||
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
|
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
|
||||||
}
|
}
|
||||||
|
|||||||
39
pkg/app/relay/owners-followed-auth.go
Normal file
39
pkg/app/relay/owners-followed-auth.go
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
package relay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"net/http"
|
||||||
|
"orly.dev/pkg/protocol/httpauth"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) OwnersFollowedAuth(
|
||||||
|
r *http.Request, remote string,
|
||||||
|
tolerance ...time.Duration,
|
||||||
|
) (authed bool, pubkey []byte) {
|
||||||
|
var valid bool
|
||||||
|
var err error
|
||||||
|
var tolerate time.Duration
|
||||||
|
if len(tolerance) > 0 {
|
||||||
|
tolerate = tolerance[0]
|
||||||
|
}
|
||||||
|
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !valid {
|
||||||
|
log.E.F(
|
||||||
|
"invalid auth %s from %s",
|
||||||
|
r.Header.Get("Authorization"), remote,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, pk := range s.ownersFollowed {
|
||||||
|
if bytes.Equal(pk, pubkey) {
|
||||||
|
authed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
6
pkg/app/relay/owners-pubkeys.go
Normal file
6
pkg/app/relay/owners-pubkeys.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
package relay
|
||||||
|
|
||||||
|
func (s *Server) OwnersPubkeys() (pks [][]byte) {
|
||||||
|
pks = s.ownersPubkeys
|
||||||
|
return
|
||||||
|
}
|
||||||
72
pkg/app/relay/peers.go
Normal file
72
pkg/app/relay/peers.go
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
package relay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"orly.dev/pkg/crypto/p256k"
|
||||||
|
"orly.dev/pkg/encoders/bech32encoding"
|
||||||
|
"orly.dev/pkg/interfaces/signer"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/keys"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Peers is a structure that keeps the information required when peer
|
||||||
|
// replication is enabled.
|
||||||
|
//
|
||||||
|
// - Addresses are the relay addresses that will be pushed new events when
|
||||||
|
// accepted. From ORLY_PEER_RELAYS first field after the |.
|
||||||
|
//
|
||||||
|
// - Pubkeys are the relay peer public keys that we will send any event to
|
||||||
|
// including privileged type. From ORLY_PEER_RELAYS before the |.
|
||||||
|
//
|
||||||
|
// - I - the signer of this relay, generated from the nsec in
|
||||||
|
// ORLY_SECRET_KEY.
|
||||||
|
type Peers struct {
|
||||||
|
Addresses []string
|
||||||
|
Pubkeys [][]byte
|
||||||
|
signer.I
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init accepts the lists which will come from config.C for peer relay settings
|
||||||
|
// and populate the Peers with this data after decoding it.
|
||||||
|
func (p *Peers) Init(
|
||||||
|
addresses []string, sec string,
|
||||||
|
) (err error) {
|
||||||
|
for _, address := range addresses {
|
||||||
|
if len(address) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
split := strings.Split(address, "@")
|
||||||
|
if len(split) != 2 {
|
||||||
|
log.E.F("invalid peer address: %s", address)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.Addresses = append(p.Addresses, split[1])
|
||||||
|
var pk []byte
|
||||||
|
if pk, err = keys.DecodeNpubOrHex(split[0]); chk.D(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.Pubkeys = append(p.Pubkeys, pk)
|
||||||
|
log.I.F("peer %s added; pubkey: %0x", split[1], pk)
|
||||||
|
}
|
||||||
|
if sec == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.I = &p256k.Signer{}
|
||||||
|
var s []byte
|
||||||
|
if s, err = keys.DecodeNsecOrHex(sec); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = p.I.InitSec(s); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var npub []byte
|
||||||
|
if npub, err = bech32encoding.BinToNpub(p.I.Pub()); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.I.F(
|
||||||
|
"relay peer initialized, relay's npub: %s",
|
||||||
|
npub,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -1,11 +1,9 @@
|
|||||||
// Package publisher is a singleton package that keeps track of subscriptions in
|
|
||||||
// both websockets and http SSE, including managing the authentication state of
|
|
||||||
// a connection.
|
|
||||||
package publish
|
package publish
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"orly.dev/pkg/encoders/event"
|
"orly.dev/pkg/encoders/event"
|
||||||
"orly.dev/pkg/interfaces/publisher"
|
"orly.dev/pkg/interfaces/publisher"
|
||||||
|
"orly.dev/pkg/interfaces/typer"
|
||||||
)
|
)
|
||||||
|
|
||||||
// S is the control structure for the subscription management scheme.
|
// S is the control structure for the subscription management scheme.
|
||||||
@@ -26,11 +24,10 @@ func (s *S) Type() string { return "publish" }
|
|||||||
func (s *S) Deliver(ev *event.E) {
|
func (s *S) Deliver(ev *event.E) {
|
||||||
for _, p := range s.Publishers {
|
for _, p := range s.Publishers {
|
||||||
p.Deliver(ev)
|
p.Deliver(ev)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *S) Receive(msg publisher.Message) {
|
func (s *S) Receive(msg typer.T) {
|
||||||
t := msg.Type()
|
t := msg.Type()
|
||||||
for _, p := range s.Publishers {
|
for _, p := range s.Publishers {
|
||||||
if p.Type() == t {
|
if p.Type() == t {
|
||||||
|
|||||||
@@ -18,7 +18,9 @@ import (
|
|||||||
"orly.dev/pkg/utils/normalize"
|
"orly.dev/pkg/utils/normalize"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Publish processes and stores an event in the server's storage. It handles different types of events: ephemeral, replaceable, and parameterized replaceable.
|
// Publish processes and stores an event in the server's storage. It handles
|
||||||
|
// different types of events: ephemeral, replaceable, and parameterized
|
||||||
|
// replaceable.
|
||||||
//
|
//
|
||||||
// # Parameters
|
// # Parameters
|
||||||
//
|
//
|
||||||
@@ -60,8 +62,14 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
|||||||
log.T.F("found %d possible duplicate events", len(evs))
|
log.T.F("found %d possible duplicate events", len(evs))
|
||||||
for _, ev := range evs {
|
for _, ev := range evs {
|
||||||
del := true
|
del := true
|
||||||
if bytes.Equal(ev.Id, evt.Id) {
|
if bytes.Equal(ev.ID, evt.ID) {
|
||||||
continue
|
return errorf.W(
|
||||||
|
string(
|
||||||
|
normalize.Duplicate.F(
|
||||||
|
"event already in relay database",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
log.I.F(
|
log.I.F(
|
||||||
"maybe replace %s with %s", ev.Serialize(), evt.Serialize(),
|
"maybe replace %s with %s", ev.Serialize(), evt.Serialize(),
|
||||||
@@ -75,6 +83,12 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
// not deleting these events because some clients are retarded
|
||||||
|
// and the query will pull the new one, but a backup can recover
|
||||||
|
// the data of old ones
|
||||||
|
if ev.Kind.IsDirectoryEvent() {
|
||||||
|
del = false
|
||||||
|
}
|
||||||
if evt.Kind.Equal(kind.FollowList) {
|
if evt.Kind.Equal(kind.FollowList) {
|
||||||
// if the event is from someone on ownersFollowed or
|
// if the event is from someone on ownersFollowed or
|
||||||
// followedFollows, for now add to this list so they're
|
// followedFollows, for now add to this list so they're
|
||||||
@@ -88,7 +102,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
|||||||
}
|
}
|
||||||
if isFollowed {
|
if isFollowed {
|
||||||
if _, _, err = sto.SaveEvent(
|
if _, _, err = sto.SaveEvent(
|
||||||
c, evt,
|
c, evt, false, nil,
|
||||||
); err != nil && !errors.Is(
|
); err != nil && !errors.Is(
|
||||||
err, store.ErrDupEvent,
|
err, store.ErrDupEvent,
|
||||||
) {
|
) {
|
||||||
@@ -99,7 +113,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
|||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
// event has been saved and lists updated.
|
// event has been saved and lists updated.
|
||||||
return
|
// return
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -110,7 +124,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
|||||||
for _, pk := range owners {
|
for _, pk := range owners {
|
||||||
if bytes.Equal(evt.Pubkey, pk) {
|
if bytes.Equal(evt.Pubkey, pk) {
|
||||||
if _, _, err = sto.SaveEvent(
|
if _, _, err = sto.SaveEvent(
|
||||||
c, evt,
|
c, evt, false, nil,
|
||||||
); err != nil && !errors.Is(
|
); err != nil && !errors.Is(
|
||||||
err, store.ErrDupEvent,
|
err, store.ErrDupEvent,
|
||||||
) {
|
) {
|
||||||
@@ -121,7 +135,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
|||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
// event has been saved and lists updated.
|
// event has been saved and lists updated.
|
||||||
return
|
// return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -222,10 +236,17 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, _, err = sto.SaveEvent(c, evt); err != nil && !errors.Is(
|
if _, _, err = sto.SaveEvent(
|
||||||
|
c, evt, false, append(s.Peers.Pubkeys, s.ownersPubkeys...),
|
||||||
|
); err != nil && !errors.Is(
|
||||||
err, store.ErrDupEvent,
|
err, store.ErrDupEvent,
|
||||||
) {
|
) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
log.T.C(
|
||||||
|
func() string {
|
||||||
|
return fmt.Sprintf("saved event:\n%s", evt.Serialize())
|
||||||
|
},
|
||||||
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,10 +6,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"orly.dev/pkg/protocol/socketapi"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"orly.dev/pkg/protocol/openapi"
|
||||||
|
"orly.dev/pkg/protocol/socketapi"
|
||||||
|
|
||||||
"orly.dev/pkg/app/config"
|
"orly.dev/pkg/app/config"
|
||||||
"orly.dev/pkg/app/relay/helpers"
|
"orly.dev/pkg/app/relay/helpers"
|
||||||
"orly.dev/pkg/app/relay/options"
|
"orly.dev/pkg/app/relay/options"
|
||||||
@@ -18,6 +21,7 @@ import (
|
|||||||
"orly.dev/pkg/protocol/servemux"
|
"orly.dev/pkg/protocol/servemux"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/context"
|
"orly.dev/pkg/utils/context"
|
||||||
|
"orly.dev/pkg/utils/keys"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
|
|
||||||
"github.com/rs/cors"
|
"github.com/rs/cors"
|
||||||
@@ -35,8 +39,11 @@ type Server struct {
|
|||||||
mux *servemux.S
|
mux *servemux.S
|
||||||
httpServer *http.Server
|
httpServer *http.Server
|
||||||
listeners *publish.S
|
listeners *publish.S
|
||||||
|
blacklistPubkeys [][]byte
|
||||||
*config.C
|
*config.C
|
||||||
*Lists
|
*Lists
|
||||||
|
*Peers
|
||||||
|
Mux *servemux.S
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerParams represents the configuration parameters for initializing a
|
// ServerParams represents the configuration parameters for initializing a
|
||||||
@@ -48,6 +55,7 @@ type ServerParams struct {
|
|||||||
Rl relay.I
|
Rl relay.I
|
||||||
DbPath string
|
DbPath string
|
||||||
MaxLimit int
|
MaxLimit int
|
||||||
|
Mux *servemux.S
|
||||||
*config.C
|
*config.C
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,7 +86,9 @@ type ServerParams struct {
|
|||||||
// - Sets up a ServeMux for handling HTTP requests.
|
// - Sets up a ServeMux for handling HTTP requests.
|
||||||
//
|
//
|
||||||
// - Initializes the relay, starting its operation in a separate goroutine.
|
// - Initializes the relay, starting its operation in a separate goroutine.
|
||||||
func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
func NewServer(
|
||||||
|
sp *ServerParams, serveMux *servemux.S, opts ...options.O,
|
||||||
|
) (s *Server, err error) {
|
||||||
op := options.Default()
|
op := options.Default()
|
||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
opt(op)
|
opt(op)
|
||||||
@@ -88,7 +98,6 @@ func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
|||||||
return nil, fmt.Errorf("storage init: %w", err)
|
return nil, fmt.Errorf("storage init: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
serveMux := servemux.NewServeMux()
|
|
||||||
s = &Server{
|
s = &Server{
|
||||||
Ctx: sp.Ctx,
|
Ctx: sp.Ctx,
|
||||||
Cancel: sp.Cancel,
|
Cancel: sp.Cancel,
|
||||||
@@ -97,8 +106,23 @@ func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
|||||||
options: op,
|
options: op,
|
||||||
C: sp.C,
|
C: sp.C,
|
||||||
Lists: new(Lists),
|
Lists: new(Lists),
|
||||||
|
Peers: new(Peers),
|
||||||
}
|
}
|
||||||
s.listeners = publish.New(socketapi.New(s))
|
// Parse blacklist pubkeys
|
||||||
|
for _, v := range s.C.Blacklist {
|
||||||
|
if len(v) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var pk []byte
|
||||||
|
if pk, err = keys.DecodeNpubOrHex(v); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
s.blacklistPubkeys = append(s.blacklistPubkeys, pk)
|
||||||
|
}
|
||||||
|
chk.E(
|
||||||
|
s.Peers.Init(sp.C.PeerRelays, sp.C.RelaySecret),
|
||||||
|
)
|
||||||
|
s.listeners = publish.New(socketapi.New(s), openapi.NewPublisher(s))
|
||||||
go func() {
|
go func() {
|
||||||
if err := s.relay.Init(); chk.E(err) {
|
if err := s.relay.Init(); chk.E(err) {
|
||||||
s.Shutdown()
|
s.Shutdown()
|
||||||
@@ -130,6 +154,21 @@ func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
|||||||
//
|
//
|
||||||
// - For all other paths, delegates to the internal mux's ServeHTTP method.
|
// - For all other paths, delegates to the internal mux's ServeHTTP method.
|
||||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
c := s.Config()
|
||||||
|
remote := helpers.GetRemoteFromReq(r)
|
||||||
|
var whitelisted bool
|
||||||
|
if len(c.Whitelist) > 0 {
|
||||||
|
for _, addr := range c.Whitelist {
|
||||||
|
if strings.HasPrefix(remote, addr) {
|
||||||
|
whitelisted = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
whitelisted = true
|
||||||
|
}
|
||||||
|
if !whitelisted {
|
||||||
|
return
|
||||||
|
}
|
||||||
// standard nostr protocol only governs the "root" path of the relay and
|
// standard nostr protocol only governs the "root" path of the relay and
|
||||||
// websockets
|
// websockets
|
||||||
if r.URL.Path == "/" {
|
if r.URL.Path == "/" {
|
||||||
@@ -182,6 +221,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
func (s *Server) Start(
|
func (s *Server) Start(
|
||||||
host string, port int, started ...chan bool,
|
host string, port int, started ...chan bool,
|
||||||
) (err error) {
|
) (err error) {
|
||||||
|
log.I.F("running spider every %v", s.C.SpiderTime)
|
||||||
if len(s.C.Owners) > 0 {
|
if len(s.C.Owners) > 0 {
|
||||||
// start up spider
|
// start up spider
|
||||||
if err = s.Spider(s.C.Private); chk.E(err) {
|
if err = s.Spider(s.C.Private); chk.E(err) {
|
||||||
@@ -191,7 +231,7 @@ func (s *Server) Start(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
// start up a spider run to trigger every 30 minutes
|
// start up a spider run to trigger every 30 minutes
|
||||||
ticker := time.NewTicker(time.Hour)
|
ticker := time.NewTicker(s.C.SpiderTime)
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -209,8 +249,8 @@ func (s *Server) Start(
|
|||||||
}()
|
}()
|
||||||
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
||||||
log.I.F("starting relay listener at %s", addr)
|
log.I.F("starting relay listener at %s", addr)
|
||||||
ln, err := net.Listen("tcp", addr)
|
var ln net.Listener
|
||||||
if err != nil {
|
if ln, err = net.Listen("tcp", addr); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.httpServer = &http.Server{
|
s.httpServer = &http.Server{
|
||||||
|
|||||||
@@ -1,61 +1,140 @@
|
|||||||
package relay
|
package relay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"runtime/debug"
|
||||||
|
"time"
|
||||||
|
|
||||||
"orly.dev/pkg/crypto/ec/schnorr"
|
"orly.dev/pkg/crypto/ec/schnorr"
|
||||||
|
"orly.dev/pkg/database/indexes/types"
|
||||||
"orly.dev/pkg/encoders/event"
|
"orly.dev/pkg/encoders/event"
|
||||||
"orly.dev/pkg/encoders/filter"
|
"orly.dev/pkg/encoders/filter"
|
||||||
"orly.dev/pkg/encoders/hex"
|
"orly.dev/pkg/encoders/hex"
|
||||||
"orly.dev/pkg/encoders/kind"
|
|
||||||
"orly.dev/pkg/encoders/kinds"
|
"orly.dev/pkg/encoders/kinds"
|
||||||
"orly.dev/pkg/encoders/tag"
|
"orly.dev/pkg/encoders/tag"
|
||||||
|
"orly.dev/pkg/encoders/timestamp"
|
||||||
"orly.dev/pkg/protocol/ws"
|
"orly.dev/pkg/protocol/ws"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/context"
|
"orly.dev/pkg/utils/context"
|
||||||
|
"orly.dev/pkg/utils/errorf"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
"sort"
|
"orly.dev/pkg/utils/values"
|
||||||
"sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// IdPkTs is a map of event IDs to their id, pubkey, kind, and timestamp
|
||||||
|
// This is used to reduce memory usage by storing only the essential information
|
||||||
|
// instead of the full events
|
||||||
|
type IdPkTs struct {
|
||||||
|
Id []byte
|
||||||
|
Pubkey []byte
|
||||||
|
Kind uint16
|
||||||
|
Timestamp int64
|
||||||
|
}
|
||||||
|
|
||||||
func (s *Server) SpiderFetch(
|
func (s *Server) SpiderFetch(
|
||||||
k *kind.T, noFetch bool, pubkeys ...[]byte,
|
k *kinds.T, noFetch, noExtract bool, pubkeys ...[]byte,
|
||||||
) (pks [][]byte, err error) {
|
) (pks [][]byte, err error) {
|
||||||
|
// Map to store id, pubkey, kind, and timestamp for each event
|
||||||
|
// Key is a combination of pubkey and kind for deduplication
|
||||||
|
pkKindMap := make(map[string]*IdPkTs)
|
||||||
|
// Map to collect pubkeys from p tags
|
||||||
|
pkMap := make(map[string]struct{})
|
||||||
|
|
||||||
// first search the local database
|
// first search the local database
|
||||||
pkList := tag.New(pubkeys...)
|
pkList := tag.New(pubkeys...)
|
||||||
f := &filter.F{
|
f := &filter.F{
|
||||||
Kinds: kinds.New(k),
|
Kinds: k,
|
||||||
Authors: pkList,
|
Authors: pkList,
|
||||||
}
|
}
|
||||||
var evs event.S
|
|
||||||
if evs, err = s.Storage().QueryEvents(s.Ctx, f); chk.E(err) {
|
var kindsList string
|
||||||
|
if k != nil {
|
||||||
|
for i, kk := range k.K {
|
||||||
|
if i > 0 {
|
||||||
|
kindsList += ","
|
||||||
|
}
|
||||||
|
kindsList += kk.Name()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
kindsList = "*"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query local database
|
||||||
|
var localEvents event.S
|
||||||
|
if localEvents, err = s.Storage().QueryEvents(s.Ctx, f); chk.E(err) {
|
||||||
// none were found, so we need to scan the spiders
|
// none were found, so we need to scan the spiders
|
||||||
err = nil
|
err = nil
|
||||||
}
|
}
|
||||||
if len(evs) < len(pubkeys) && !noFetch {
|
|
||||||
|
// Process local events
|
||||||
|
for _, ev := range localEvents {
|
||||||
|
// Create a key based on pubkey and kind for deduplication
|
||||||
|
pkKindKey := string(ev.Pubkey) + string(ev.Kind.Marshal(nil))
|
||||||
|
|
||||||
|
// Check if we already have an event with this pubkey and kind
|
||||||
|
existing, exists := pkKindMap[pkKindKey]
|
||||||
|
|
||||||
|
// If it doesn't exist or the new event is newer, store it
|
||||||
|
if !exists || ev.CreatedAtInt64() > existing.Timestamp {
|
||||||
|
pkKindMap[pkKindKey] = &IdPkTs{
|
||||||
|
Id: ev.ID,
|
||||||
|
Pubkey: ev.Pubkey,
|
||||||
|
Kind: ev.Kind.ToU16(),
|
||||||
|
Timestamp: ev.CreatedAtInt64(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract p tags if not in noExtract mode
|
||||||
|
if !noExtract {
|
||||||
|
t := ev.Tags.GetAll(tag.New("p"))
|
||||||
|
for _, tt := range t.ToSliceOfTags() {
|
||||||
|
pkh := tt.Value()
|
||||||
|
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||||
|
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||||
|
err = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pkMap[string(pk)] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nil the event to free memory
|
||||||
|
ev = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
log.I.F("%d events found of type %s", len(pkKindMap), kindsList)
|
||||||
|
|
||||||
|
if !noFetch && len(s.C.SpiderSeeds) > 0 {
|
||||||
// we need to search the spider seeds.
|
// we need to search the spider seeds.
|
||||||
// Break up pubkeys into batches of 512
|
// Break up pubkeys into batches of 128
|
||||||
for i := 0; i < len(pubkeys); i += 512 {
|
for i := 0; i < len(pubkeys); i += 128 {
|
||||||
end := i + 512
|
end := i + 128
|
||||||
if end > len(pubkeys) {
|
if end > len(pubkeys) {
|
||||||
end = len(pubkeys)
|
end = len(pubkeys)
|
||||||
}
|
}
|
||||||
batchPubkeys := pubkeys[i:end]
|
batchPubkeys := pubkeys[i:end]
|
||||||
log.I.F(
|
log.I.F(
|
||||||
"processing batch %d to %d of %d for kind %s",
|
"processing batch %d to %d of %d for kind %s",
|
||||||
i, end, len(pubkeys), k.Name(),
|
i, end, len(pubkeys), kindsList,
|
||||||
)
|
)
|
||||||
batchPkList := tag.New(batchPubkeys...)
|
batchPkList := tag.New(batchPubkeys...)
|
||||||
batchFilter := &filter.F{
|
lim := uint(batchPkList.Len())
|
||||||
Kinds: kinds.New(k),
|
l := &lim
|
||||||
Authors: batchPkList,
|
var since *timestamp.T
|
||||||
|
if k == nil {
|
||||||
|
since = timestamp.FromTime(time.Now().Add(-1 * s.C.SpiderTime * 3 / 2))
|
||||||
|
} else {
|
||||||
|
l = values.ToUintPointer(512)
|
||||||
|
}
|
||||||
|
batchFilter := &filter.F{
|
||||||
|
Kinds: k,
|
||||||
|
Authors: batchPkList,
|
||||||
|
Since: since,
|
||||||
|
Limit: l,
|
||||||
}
|
}
|
||||||
|
|
||||||
var mx sync.Mutex
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
for _, seed := range s.C.SpiderSeeds {
|
for _, seed := range s.C.SpiderSeeds {
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
defer wg.Done()
|
|
||||||
select {
|
select {
|
||||||
case <-s.Ctx.Done():
|
case <-s.Ctx.Done():
|
||||||
return
|
return
|
||||||
@@ -67,7 +146,7 @@ func (s *Server) SpiderFetch(
|
|||||||
context.Bg(), seed,
|
context.Bg(), seed,
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
err = nil
|
err = nil
|
||||||
return
|
continue
|
||||||
}
|
}
|
||||||
if evss, err = cli.QuerySync(
|
if evss, err = cli.QuerySync(
|
||||||
context.Bg(), batchFilter,
|
context.Bg(), batchFilter,
|
||||||
@@ -75,42 +154,42 @@ func (s *Server) SpiderFetch(
|
|||||||
err = nil
|
err = nil
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mx.Lock()
|
// Process each event immediately
|
||||||
for _, ev := range evss {
|
for i, ev := range evss {
|
||||||
evs = append(evs, ev)
|
// log.I.S(ev)
|
||||||
|
// Create a key based on pubkey and kind for deduplication
|
||||||
|
pkKindKey := string(ev.Pubkey) + string(ev.Kind.Marshal(nil))
|
||||||
|
// Check if we already have an event with this pubkey and kind
|
||||||
|
existing, exists := pkKindMap[pkKindKey]
|
||||||
|
// If it doesn't exist or the new event is newer, store it and save to database
|
||||||
|
if !exists || ev.CreatedAtInt64() > existing.Timestamp {
|
||||||
|
var ser *types.Uint40
|
||||||
|
if ser, err = s.Storage().GetSerialById(ev.ID); err == nil && ser != nil {
|
||||||
|
err = errorf.E("event already exists: %0x", ev.ID)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
// verify the signature
|
||||||
|
var valid bool
|
||||||
|
if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
mx.Unlock()
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
wg.Wait()
|
// Save the event to the database
|
||||||
}
|
if _, _, err = s.Storage().SaveEvent(
|
||||||
// save the events to the database
|
s.Ctx, ev, true, nil,
|
||||||
for _, ev := range evs {
|
); chk.E(err) {
|
||||||
if _, _, err = s.Storage().SaveEvent(s.Ctx, ev); chk.E(err) {
|
|
||||||
err = nil
|
err = nil
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// Store the essential information
|
||||||
|
pkKindMap[pkKindKey] = &IdPkTs{
|
||||||
|
Id: ev.ID,
|
||||||
|
Pubkey: ev.Pubkey,
|
||||||
|
Kind: ev.Kind.ToU16(),
|
||||||
|
Timestamp: ev.CreatedAtInt64(),
|
||||||
}
|
}
|
||||||
}
|
// Extract p tags if not in noExtract mode
|
||||||
// deduplicate and take the newest
|
if !noExtract {
|
||||||
var tmp event.S
|
|
||||||
evMap := make(map[string]event.S)
|
|
||||||
for _, ev := range evs {
|
|
||||||
evMap[ev.PubKeyString()] = append(evMap[ev.PubKeyString()], ev)
|
|
||||||
}
|
|
||||||
for _, evm := range evMap {
|
|
||||||
if len(evm) < 1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if len(evm) > 1 {
|
|
||||||
sort.Sort(evm)
|
|
||||||
}
|
|
||||||
tmp = append(tmp, evm[0])
|
|
||||||
}
|
|
||||||
evs = tmp
|
|
||||||
// we have all we're going to get now
|
|
||||||
pkMap := make(map[string]struct{})
|
|
||||||
for _, ev := range evs {
|
|
||||||
t := ev.Tags.GetAll(tag.New("p"))
|
t := ev.Tags.GetAll(tag.New("p"))
|
||||||
for _, tt := range t.ToSliceOfTags() {
|
for _, tt := range t.ToSliceOfTags() {
|
||||||
pkh := tt.Value()
|
pkh := tt.Value()
|
||||||
@@ -118,13 +197,27 @@ func (s *Server) SpiderFetch(
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||||
if _, err = hex.DecBytes(pk, pkh); chk.E(err) {
|
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||||
err = nil
|
err = nil
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
pkMap[string(pk)] = struct{}{}
|
pkMap[string(pk)] = struct{}{}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
// Nil the event in the slice to free memory
|
||||||
|
evss[i] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chk.E(s.Storage().Sync())
|
||||||
|
debug.FreeOSMemory()
|
||||||
|
// If we're in noExtract mode, just return
|
||||||
|
if noExtract {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Convert the collected pubkeys to the return format
|
||||||
for pk := range pkMap {
|
for pk := range pkMap {
|
||||||
pks = append(pks, []byte(pk))
|
pks = append(pks, []byte(pk))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,41 +2,20 @@ package relay
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"orly.dev/pkg/crypto/ec/bech32"
|
|
||||||
"orly.dev/pkg/encoders/bech32encoding"
|
|
||||||
"orly.dev/pkg/encoders/hex"
|
|
||||||
"orly.dev/pkg/encoders/kind"
|
"orly.dev/pkg/encoders/kind"
|
||||||
|
"orly.dev/pkg/encoders/kinds"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/keys"
|
||||||
"orly.dev/pkg/utils/log"
|
"orly.dev/pkg/utils/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (s *Server) Spider(noFetch ...bool) (err error) {
|
func (s *Server) Spider(noFetch ...bool) (err error) {
|
||||||
var ownersPubkeys [][]byte
|
var ownersPubkeys [][]byte
|
||||||
for _, v := range s.C.Owners {
|
for _, v := range s.C.Owners {
|
||||||
var prf []byte
|
|
||||||
var pk []byte
|
var pk []byte
|
||||||
var bits5 []byte
|
if pk, err = keys.DecodeNpubOrHex(v); chk.E(err) {
|
||||||
if prf, bits5, err = bech32.DecodeNoLimit([]byte(v)); chk.D(err) {
|
|
||||||
// try hex then
|
|
||||||
if _, err = hex.DecBytes(pk, []byte(v)); chk.E(err) {
|
|
||||||
log.W.F(
|
|
||||||
"owner key %s is neither bech32 npub nor hex",
|
|
||||||
v,
|
|
||||||
)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
if !bytes.Equal(prf, bech32encoding.NpubHRP) {
|
|
||||||
log.W.F(
|
|
||||||
"owner key %s is neither bech32 npub nor hex",
|
|
||||||
v,
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if pk, err = bech32.ConvertBits(bits5, 5, 8, false); chk.E(err) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// owners themselves are on the OwnersFollowed list as first level
|
// owners themselves are on the OwnersFollowed list as first level
|
||||||
ownersPubkeys = append(ownersPubkeys, pk)
|
ownersPubkeys = append(ownersPubkeys, pk)
|
||||||
}
|
}
|
||||||
@@ -52,21 +31,22 @@ func (s *Server) Spider(noFetch ...bool) (err error) {
|
|||||||
log.I.F("getting ownersFollowed")
|
log.I.F("getting ownersFollowed")
|
||||||
var ownersFollowed [][]byte
|
var ownersFollowed [][]byte
|
||||||
if ownersFollowed, err = s.SpiderFetch(
|
if ownersFollowed, err = s.SpiderFetch(
|
||||||
kind.FollowList, dontFetch, ownersPubkeys...,
|
kinds.New(kind.FollowList), dontFetch, false, ownersPubkeys...,
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// log.I.S(ownersFollowed)
|
||||||
log.I.F("getting followedFollows")
|
log.I.F("getting followedFollows")
|
||||||
var followedFollows [][]byte
|
var followedFollows [][]byte
|
||||||
if followedFollows, err = s.SpiderFetch(
|
if followedFollows, err = s.SpiderFetch(
|
||||||
kind.FollowList, dontFetch, ownersFollowed...,
|
kinds.New(kind.FollowList), dontFetch, false, ownersFollowed...,
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.I.F("getting ownersMuted")
|
log.I.F("getting ownersMuted")
|
||||||
var ownersMuted [][]byte
|
var ownersMuted [][]byte
|
||||||
if ownersMuted, err = s.SpiderFetch(
|
if ownersMuted, err = s.SpiderFetch(
|
||||||
kind.MuteList, dontFetch, ownersPubkeys...,
|
kinds.New(kind.MuteList), dontFetch, false, ownersPubkeys...,
|
||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -74,23 +54,18 @@ func (s *Server) Spider(noFetch ...bool) (err error) {
|
|||||||
// list
|
// list
|
||||||
filteredFollows := make([][]byte, 0, len(followedFollows))
|
filteredFollows := make([][]byte, 0, len(followedFollows))
|
||||||
for _, follow := range followedFollows {
|
for _, follow := range followedFollows {
|
||||||
found := false
|
|
||||||
for _, owner := range ownersFollowed {
|
for _, owner := range ownersFollowed {
|
||||||
if bytes.Equal(follow, owner) {
|
if bytes.Equal(follow, owner) {
|
||||||
found = true
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, owner := range ownersMuted {
|
for _, owner := range ownersMuted {
|
||||||
if bytes.Equal(follow, owner) {
|
if bytes.Equal(follow, owner) {
|
||||||
found = true
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !found {
|
|
||||||
filteredFollows = append(filteredFollows, follow)
|
filteredFollows = append(filteredFollows, follow)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
followedFollows = filteredFollows
|
followedFollows = filteredFollows
|
||||||
own := "owner"
|
own := "owner"
|
||||||
if len(ownersPubkeys) > 1 {
|
if len(ownersPubkeys) > 1 {
|
||||||
@@ -115,19 +90,45 @@ func (s *Server) Spider(noFetch ...bool) (err error) {
|
|||||||
len(followedFollows), folfol,
|
len(followedFollows), folfol,
|
||||||
len(ownersMuted), mut,
|
len(ownersMuted), mut,
|
||||||
)
|
)
|
||||||
// add the owners
|
// add the owners to the ownersFollowed
|
||||||
ownersFollowed = append(ownersFollowed, ownersPubkeys...)
|
ownersFollowed = append(ownersFollowed, ownersPubkeys...)
|
||||||
s.SetOwnersPubkeys(ownersPubkeys)
|
s.SetOwnersPubkeys(ownersPubkeys)
|
||||||
s.SetOwnersFollowed(ownersFollowed)
|
s.SetOwnersFollowed(ownersFollowed)
|
||||||
s.SetFollowedFollows(followedFollows)
|
s.SetFollowedFollows(followedFollows)
|
||||||
s.SetOwnersMuted(ownersMuted)
|
s.SetOwnersMuted(ownersMuted)
|
||||||
// lastly, update users profile metadata and relay lists in the background
|
// lastly, update all followed users new events in the background
|
||||||
if !dontFetch {
|
if !dontFetch && s.C.SpiderType != "none" {
|
||||||
go func() {
|
go func() {
|
||||||
everyone := append(ownersFollowed, followedFollows...)
|
var k *kinds.T
|
||||||
s.SpiderFetch(kind.ProfileMetadata, false, everyone...)
|
if s.C.SpiderType == "directory" {
|
||||||
s.SpiderFetch(kind.RelayListMetadata, false, everyone...)
|
k = kinds.New(
|
||||||
s.SpiderFetch(kind.DMRelaysList, false, everyone...)
|
kind.ProfileMetadata, kind.RelayListMetadata,
|
||||||
|
kind.DMRelaysList, kind.MuteList,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
everyone := ownersFollowed
|
||||||
|
if s.C.SpiderSecondDegree &&
|
||||||
|
(s.C.SpiderType == "follows" ||
|
||||||
|
s.C.SpiderType == "directory") {
|
||||||
|
everyone = append(ownersFollowed, followedFollows...)
|
||||||
|
}
|
||||||
|
_, _ = s.SpiderFetch(
|
||||||
|
k, false, true, everyone...,
|
||||||
|
)
|
||||||
|
// get the directory events also for second degree if spider
|
||||||
|
// type is directory but second degree is disabled, so all
|
||||||
|
// directory data is available for all whitelisted users.
|
||||||
|
if !s.C.SpiderSecondDegree && s.C.SpiderType == "directory" {
|
||||||
|
k = kinds.New(
|
||||||
|
kind.ProfileMetadata, kind.RelayListMetadata,
|
||||||
|
kind.DMRelaysList, kind.MuteList,
|
||||||
|
)
|
||||||
|
everyone = append(ownersFollowed, followedFollows...)
|
||||||
|
_, _ = s.SpiderFetch(
|
||||||
|
k, false, true, everyone...,
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"orly.dev/pkg/encoders/eventid"
|
"orly.dev/pkg/encoders/eventid"
|
||||||
"orly.dev/pkg/encoders/filter"
|
"orly.dev/pkg/encoders/filter"
|
||||||
"orly.dev/pkg/interfaces/store"
|
"orly.dev/pkg/interfaces/store"
|
||||||
|
"orly.dev/pkg/protocol/servemux"
|
||||||
"orly.dev/pkg/utils/context"
|
"orly.dev/pkg/utils/context"
|
||||||
"orly.dev/pkg/utils/units"
|
"orly.dev/pkg/utils/units"
|
||||||
"testing"
|
"testing"
|
||||||
@@ -14,6 +15,7 @@ import (
|
|||||||
|
|
||||||
func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
serveMux := servemux.NewServeMux()
|
||||||
srv, _ := NewServer(
|
srv, _ := NewServer(
|
||||||
&ServerParams{
|
&ServerParams{
|
||||||
Ctx: c,
|
Ctx: c,
|
||||||
@@ -21,6 +23,7 @@ func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
|||||||
Rl: tr,
|
Rl: tr,
|
||||||
MaxLimit: 500 * units.Kb,
|
MaxLimit: 500 * units.Kb,
|
||||||
},
|
},
|
||||||
|
serveMux,
|
||||||
)
|
)
|
||||||
started := make(chan bool)
|
started := make(chan bool)
|
||||||
go srv.Start("127.0.0.1", 0, started)
|
go srv.Start("127.0.0.1", 0, started)
|
||||||
|
|||||||
50
pkg/app/relay/user-auth.go
Normal file
50
pkg/app/relay/user-auth.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package relay
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"net/http"
|
||||||
|
"orly.dev/pkg/protocol/httpauth"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Server) UserAuth(
|
||||||
|
r *http.Request, remote string, tolerance ...time.Duration,
|
||||||
|
) (authed bool, pubkey []byte, super bool) {
|
||||||
|
var valid bool
|
||||||
|
var err error
|
||||||
|
var tolerate time.Duration
|
||||||
|
if len(tolerance) > 0 {
|
||||||
|
tolerate = tolerance[0]
|
||||||
|
}
|
||||||
|
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !valid {
|
||||||
|
log.E.F(
|
||||||
|
"invalid auth %s from %s",
|
||||||
|
r.Header.Get("Authorization"), remote,
|
||||||
|
)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, pk := range append(s.ownersFollowed, s.followedFollows...) {
|
||||||
|
if bytes.Equal(pk, pubkey) {
|
||||||
|
authed = true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if the client is one of the relay cluster replicas, also set the super
|
||||||
|
// flag to indicate that privilege checks can be bypassed.
|
||||||
|
if len(s.Peers.Pubkeys) > 0 {
|
||||||
|
for _, pk := range s.Peers.Pubkeys {
|
||||||
|
if bytes.Equal(pk, pubkey) {
|
||||||
|
authed = true
|
||||||
|
super = true
|
||||||
|
pubkey = pk
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -5,42 +5,16 @@ import (
|
|||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
"crypto/cipher"
|
"crypto/cipher"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"orly.dev/pkg/crypto/p256k"
|
"lukechampine.com/frand"
|
||||||
"orly.dev/pkg/encoders/hex"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/errorf"
|
"orly.dev/pkg/utils/errorf"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"lukechampine.com/frand"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ComputeSharedSecret returns a shared secret key used to encrypt messages. The private and public keys should be hex
|
|
||||||
// encoded. Uses the Diffie-Hellman key exchange (ECDH) (RFC 4753).
|
|
||||||
func ComputeSharedSecret(pkh, skh string) (sharedSecret []byte, err error) {
|
|
||||||
var skb, pkb []byte
|
|
||||||
if skb, err = hex.Dec(skh); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if pkb, err = hex.Dec(pkh); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
signer := new(p256k.Signer)
|
|
||||||
if err = signer.InitSec(skb); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if sharedSecret, err = signer.ECDH(pkb); chk.E(err) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// EncryptNip4 encrypts message with key using aes-256-cbc. key should be the shared secret generated by
|
// EncryptNip4 encrypts message with key using aes-256-cbc. key should be the shared secret generated by
|
||||||
// ComputeSharedSecret.
|
// ComputeSharedSecret.
|
||||||
//
|
//
|
||||||
// Returns: base64(encrypted_bytes) + "?iv=" + base64(initialization_vector).
|
// Returns: base64(encrypted_bytes) + "?iv=" + base64(initialization_vector).
|
||||||
//
|
func EncryptNip4(msg, key []byte) (ct []byte, err error) {
|
||||||
// Deprecated: upgrade to using Decrypt with the NIP-44 algorithm.
|
|
||||||
func EncryptNip4(msg string, key []byte) (ct []byte, err error) {
|
|
||||||
// block size is 16 bytes
|
// block size is 16 bytes
|
||||||
iv := make([]byte, 16)
|
iv := make([]byte, 16)
|
||||||
if _, err = frand.Read(iv); chk.E(err) {
|
if _, err = frand.Read(iv); chk.E(err) {
|
||||||
@@ -71,22 +45,20 @@ func EncryptNip4(msg string, key []byte) (ct []byte, err error) {
|
|||||||
|
|
||||||
// DecryptNip4 decrypts a content string using the shared secret key. The inverse operation to message ->
|
// DecryptNip4 decrypts a content string using the shared secret key. The inverse operation to message ->
|
||||||
// EncryptNip4(message, key).
|
// EncryptNip4(message, key).
|
||||||
//
|
func DecryptNip4(content, key []byte) (msg []byte, err error) {
|
||||||
// Deprecated: upgrade to using Decrypt with the NIP-44 algorithm.
|
parts := bytes.Split(content, []byte("?iv="))
|
||||||
func DecryptNip4(content string, key []byte) (msg []byte, err error) {
|
|
||||||
parts := strings.Split(content, "?iv=")
|
|
||||||
if len(parts) < 2 {
|
if len(parts) < 2 {
|
||||||
return nil, errorf.E(
|
return nil, errorf.E(
|
||||||
"error parsing encrypted message: no initialization vector",
|
"error parsing encrypted message: no initialization vector",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
var ciphertext []byte
|
ciphertext := make([]byte, base64.StdEncoding.EncodedLen(len(parts[0])))
|
||||||
if ciphertext, err = base64.StdEncoding.DecodeString(parts[0]); chk.E(err) {
|
if _, err = base64.StdEncoding.Decode(ciphertext, parts[0]); chk.E(err) {
|
||||||
err = errorf.E("error decoding ciphertext from base64: %w", err)
|
err = errorf.E("error decoding ciphertext from base64: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var iv []byte
|
iv := make([]byte, base64.StdEncoding.EncodedLen(len(parts[1])))
|
||||||
if iv, err = base64.StdEncoding.DecodeString(parts[1]); chk.E(err) {
|
if _, err = base64.StdEncoding.Decode(iv, parts[1]); chk.E(err) {
|
||||||
err = errorf.E("error decoding iv from base64: %w", err)
|
err = errorf.E("error decoding iv from base64: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,7 +10,9 @@ import (
|
|||||||
"golang.org/x/crypto/hkdf"
|
"golang.org/x/crypto/hkdf"
|
||||||
"io"
|
"io"
|
||||||
"math"
|
"math"
|
||||||
|
"orly.dev/pkg/crypto/p256k"
|
||||||
"orly.dev/pkg/crypto/sha256"
|
"orly.dev/pkg/crypto/sha256"
|
||||||
|
"orly.dev/pkg/interfaces/signer"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"orly.dev/pkg/utils/errorf"
|
"orly.dev/pkg/utils/errorf"
|
||||||
)
|
)
|
||||||
@@ -43,11 +45,9 @@ func WithCustomNonce(salt []byte) func(opts *Opts) {
|
|||||||
// Encrypt data using a provided symmetric conversation key using NIP-44
|
// Encrypt data using a provided symmetric conversation key using NIP-44
|
||||||
// encryption (chacha20 cipher stream and sha256 HMAC).
|
// encryption (chacha20 cipher stream and sha256 HMAC).
|
||||||
func Encrypt(
|
func Encrypt(
|
||||||
plaintext string, conversationKey []byte,
|
plaintext, conversationKey []byte, applyOptions ...func(opts *Opts),
|
||||||
applyOptions ...func(opts *Opts),
|
|
||||||
) (
|
) (
|
||||||
cipherString string,
|
cipherString []byte, err error,
|
||||||
err error,
|
|
||||||
) {
|
) {
|
||||||
|
|
||||||
var o Opts
|
var o Opts
|
||||||
@@ -70,7 +70,7 @@ func Encrypt(
|
|||||||
); chk.E(err) {
|
); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
plain := []byte(plaintext)
|
plain := plaintext
|
||||||
size := len(plain)
|
size := len(plain)
|
||||||
if size < MinPlaintextSize || size > MaxPlaintextSize {
|
if size < MinPlaintextSize || size > MaxPlaintextSize {
|
||||||
err = errorf.E("plaintext should be between 1b and 64kB")
|
err = errorf.E("plaintext should be between 1b and 64kB")
|
||||||
@@ -93,14 +93,15 @@ func Encrypt(
|
|||||||
ct = append(ct, o.nonce...)
|
ct = append(ct, o.nonce...)
|
||||||
ct = append(ct, cipher...)
|
ct = append(ct, cipher...)
|
||||||
ct = append(ct, mac...)
|
ct = append(ct, mac...)
|
||||||
cipherString = base64.StdEncoding.EncodeToString(ct)
|
cipherString = make([]byte, base64.StdEncoding.EncodedLen(len(ct)))
|
||||||
|
base64.StdEncoding.Encode(cipherString, ct)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt data that has been encoded using a provided symmetric conversation
|
// Decrypt data that has been encoded using a provided symmetric conversation
|
||||||
// key using NIP-44 encryption (chacha20 cipher stream and sha256 HMAC).
|
// key using NIP-44 encryption (chacha20 cipher stream and sha256 HMAC).
|
||||||
func Decrypt(b64ciphertextWrapped string, conversationKey []byte) (
|
func Decrypt(b64ciphertextWrapped, conversationKey []byte) (
|
||||||
plaintext string,
|
plaintext []byte,
|
||||||
err error,
|
err error,
|
||||||
) {
|
) {
|
||||||
cLen := len(b64ciphertextWrapped)
|
cLen := len(b64ciphertextWrapped)
|
||||||
@@ -108,12 +109,12 @@ func Decrypt(b64ciphertextWrapped string, conversationKey []byte) (
|
|||||||
err = errorf.E("invalid payload length: %d", cLen)
|
err = errorf.E("invalid payload length: %d", cLen)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b64ciphertextWrapped[:1] == "#" {
|
if len(b64ciphertextWrapped) > 0 && b64ciphertextWrapped[0] == '#' {
|
||||||
err = errorf.E("unknown version")
|
err = errorf.E("unknown version")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
var decoded []byte
|
var decoded []byte
|
||||||
if decoded, err = base64.StdEncoding.DecodeString(b64ciphertextWrapped); chk.E(err) {
|
if decoded, err = base64.StdEncoding.DecodeString(string(b64ciphertextWrapped)); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if decoded[0] != version {
|
if decoded[0] != version {
|
||||||
@@ -153,12 +154,12 @@ func Decrypt(b64ciphertextWrapped string, conversationKey []byte) (
|
|||||||
err = errorf.E("invalid padding")
|
err = errorf.E("invalid padding")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
plaintext = string(unpadded)
|
plaintext = unpadded
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateConversationKey performs an ECDH key generation hashed with the nip-44-v2 using hkdf.
|
// GenerateConversationKeyFromHex performs an ECDH key generation hashed with the nip-44-v2 using hkdf.
|
||||||
func GenerateConversationKey(pkh, skh string) (ck []byte, err error) {
|
func GenerateConversationKeyFromHex(pkh, skh string) (ck []byte, err error) {
|
||||||
if skh >= "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" ||
|
if skh >= "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" ||
|
||||||
skh == "0000000000000000000000000000000000000000000000000000000000000000" {
|
skh == "0000000000000000000000000000000000000000000000000000000000000000" {
|
||||||
err = errorf.E(
|
err = errorf.E(
|
||||||
@@ -167,8 +168,27 @@ func GenerateConversationKey(pkh, skh string) (ck []byte, err error) {
|
|||||||
)
|
)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
var sign signer.I
|
||||||
|
if sign, err = p256k.NewSecFromHex(skh); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var pk []byte
|
||||||
|
if pk, err = p256k.HexToBin(pkh); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
var shared []byte
|
var shared []byte
|
||||||
if shared, err = ComputeSharedSecret(pkh, skh); chk.E(err) {
|
if shared, err = sign.ECDH(pk); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func GenerateConversationKeyWithSigner(sign signer.I, pk []byte) (
|
||||||
|
ck []byte, err error,
|
||||||
|
) {
|
||||||
|
var shared []byte
|
||||||
|
if shared, err = sign.ECDH(pk); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
|
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
|
||||||
|
|||||||
@@ -19,8 +19,8 @@ func assertCryptPriv(
|
|||||||
sk1, sk2, conversationKey, salt, plaintext, expected string,
|
sk1, sk2, conversationKey, salt, plaintext, expected string,
|
||||||
) {
|
) {
|
||||||
var (
|
var (
|
||||||
k1, s []byte
|
k1, s, plaintextBytes, actualBytes,
|
||||||
actual, decrypted string
|
expectedBytes, decrypted []byte
|
||||||
ok bool
|
ok bool
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
@@ -41,25 +41,29 @@ func assertCryptPriv(
|
|||||||
); !ok {
|
); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
actual, err = Encrypt(plaintext, k1, WithCustomNonce(s))
|
plaintextBytes = []byte(plaintext)
|
||||||
|
actualBytes, err = Encrypt(plaintextBytes, k1, WithCustomNonce(s))
|
||||||
if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok {
|
if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ok = assert.Equalf(t, expected, actual, "wrong encryption"); !ok {
|
expectedBytes = []byte(expected)
|
||||||
|
if ok = assert.Equalf(
|
||||||
|
t, string(expectedBytes), string(actualBytes), "wrong encryption",
|
||||||
|
); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
decrypted, err = Decrypt(expected, k1)
|
decrypted, err = Decrypt(expectedBytes, k1)
|
||||||
if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok {
|
if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
assert.Equal(t, decrypted, plaintext, "wrong decryption")
|
assert.Equal(t, decrypted, plaintextBytes, "wrong decryption")
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertDecryptFail(
|
func assertDecryptFail(
|
||||||
t *testing.T, conversationKey, plaintext, ciphertext, msg string,
|
t *testing.T, conversationKey, plaintext, ciphertext, msg string,
|
||||||
) {
|
) {
|
||||||
var (
|
var (
|
||||||
k1 []byte
|
k1, ciphertextBytes []byte
|
||||||
ok bool
|
ok bool
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
@@ -69,14 +73,15 @@ func assertDecryptFail(
|
|||||||
); !ok {
|
); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, err = Decrypt(ciphertext, k1)
|
ciphertextBytes = []byte(ciphertext)
|
||||||
|
_, err = Decrypt(ciphertextBytes, k1)
|
||||||
assert.ErrorContains(t, err, msg)
|
assert.ErrorContains(t, err, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertConversationKeyFail(
|
func assertConversationKeyFail(
|
||||||
t *testing.T, priv string, pub string, msg string,
|
t *testing.T, priv string, pub string, msg string,
|
||||||
) {
|
) {
|
||||||
_, err := GenerateConversationKey(pub, priv)
|
_, err := GenerateConversationKeyFromHex(pub, priv)
|
||||||
assert.ErrorContains(t, err, msg)
|
assert.ErrorContains(t, err, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -95,7 +100,7 @@ func assertConversationKeyGeneration(
|
|||||||
); !ok {
|
); !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
actualConversationKey, err = GenerateConversationKey(pub, priv)
|
actualConversationKey, err = GenerateConversationKeyFromHex(pub, priv)
|
||||||
if ok = assert.NoErrorf(
|
if ok = assert.NoErrorf(
|
||||||
t, err, "conversation key generation failed: %v", err,
|
t, err, "conversation key generation failed: %v", err,
|
||||||
); !ok {
|
); !ok {
|
||||||
@@ -196,12 +201,12 @@ func assertMessageKeyGeneration(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func assertCryptLong(
|
func assertCryptLong(
|
||||||
t *testing.T, conversationKey, salt, pattern string, repeat int,
|
t *testing.T, conversationKey, salt string, pattern []byte, repeat int,
|
||||||
plaintextSha256, payloadSha256 string,
|
plaintextSha256, payloadSha256 string,
|
||||||
) {
|
) {
|
||||||
var (
|
var (
|
||||||
convKey, convSalt []byte
|
convKey, convSalt, plaintext, payloadBytes []byte
|
||||||
plaintext, actualPlaintextSha256, actualPayload, actualPayloadSha256 string
|
actualPlaintextSha256, actualPayloadSha256 string
|
||||||
h hash.Hash
|
h hash.Hash
|
||||||
ok bool
|
ok bool
|
||||||
err error
|
err error
|
||||||
@@ -218,12 +223,12 @@ func assertCryptLong(
|
|||||||
); !ok {
|
); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
plaintext = ""
|
plaintext = make([]byte, 0, len(pattern)*repeat)
|
||||||
for i := 0; i < repeat; i++ {
|
for i := 0; i < repeat; i++ {
|
||||||
plaintext += pattern
|
plaintext = append(plaintext, pattern...)
|
||||||
}
|
}
|
||||||
h = sha256.New()
|
h = sha256.New()
|
||||||
h.Write([]byte(plaintext))
|
h.Write(plaintext)
|
||||||
actualPlaintextSha256 = hex.Enc(h.Sum(nil))
|
actualPlaintextSha256 = hex.Enc(h.Sum(nil))
|
||||||
if ok = assert.Equalf(
|
if ok = assert.Equalf(
|
||||||
t, plaintextSha256, actualPlaintextSha256,
|
t, plaintextSha256, actualPlaintextSha256,
|
||||||
@@ -231,12 +236,14 @@ func assertCryptLong(
|
|||||||
); !ok {
|
); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
actualPayload, err = Encrypt(plaintext, convKey, WithCustomNonce(convSalt))
|
payloadBytes, err = Encrypt(
|
||||||
|
plaintext, convKey, WithCustomNonce(convSalt),
|
||||||
|
)
|
||||||
if ok = assert.NoErrorf(t, err, "encryption failed: %v", err); !ok {
|
if ok = assert.NoErrorf(t, err, "encryption failed: %v", err); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
h.Reset()
|
h.Reset()
|
||||||
h.Write([]byte(actualPayload))
|
h.Write(payloadBytes)
|
||||||
actualPayloadSha256 = hex.Enc(h.Sum(nil))
|
actualPayloadSha256 = hex.Enc(h.Sum(nil))
|
||||||
if ok = assert.Equalf(
|
if ok = assert.Equalf(
|
||||||
t, payloadSha256, actualPayloadSha256,
|
t, payloadSha256, actualPayloadSha256,
|
||||||
@@ -383,7 +390,7 @@ func TestCryptLong001(t *testing.T) {
|
|||||||
t,
|
t,
|
||||||
"8fc262099ce0d0bb9b89bac05bb9e04f9bc0090acc181fef6840ccee470371ed",
|
"8fc262099ce0d0bb9b89bac05bb9e04f9bc0090acc181fef6840ccee470371ed",
|
||||||
"326bcb2c943cd6bb717588c9e5a7e738edf6ed14ec5f5344caa6ef56f0b9cff7",
|
"326bcb2c943cd6bb717588c9e5a7e738edf6ed14ec5f5344caa6ef56f0b9cff7",
|
||||||
"x",
|
[]byte("x"),
|
||||||
65535,
|
65535,
|
||||||
"09ab7495d3e61a76f0deb12cb0306f0696cbb17ffc12131368c7a939f12f56d3",
|
"09ab7495d3e61a76f0deb12cb0306f0696cbb17ffc12131368c7a939f12f56d3",
|
||||||
"90714492225faba06310bff2f249ebdc2a5e609d65a629f1c87f2d4ffc55330a",
|
"90714492225faba06310bff2f249ebdc2a5e609d65a629f1c87f2d4ffc55330a",
|
||||||
@@ -395,7 +402,7 @@ func TestCryptLong002(t *testing.T) {
|
|||||||
t,
|
t,
|
||||||
"56adbe3720339363ab9c3b8526ffce9fd77600927488bfc4b59f7a68ffe5eae0",
|
"56adbe3720339363ab9c3b8526ffce9fd77600927488bfc4b59f7a68ffe5eae0",
|
||||||
"ad68da81833c2a8ff609c3d2c0335fd44fe5954f85bb580c6a8d467aa9fc5dd0",
|
"ad68da81833c2a8ff609c3d2c0335fd44fe5954f85bb580c6a8d467aa9fc5dd0",
|
||||||
"!",
|
[]byte("!"),
|
||||||
65535,
|
65535,
|
||||||
"6af297793b72ae092c422e552c3bb3cbc310da274bd1cf9e31023a7fe4a2d75e",
|
"6af297793b72ae092c422e552c3bb3cbc310da274bd1cf9e31023a7fe4a2d75e",
|
||||||
"8013e45a109fad3362133132b460a2d5bce235fe71c8b8f4014793fb52a49844",
|
"8013e45a109fad3362133132b460a2d5bce235fe71c8b8f4014793fb52a49844",
|
||||||
@@ -407,7 +414,7 @@ func TestCryptLong003(t *testing.T) {
|
|||||||
t,
|
t,
|
||||||
"7fc540779979e472bb8d12480b443d1e5eb1098eae546ef2390bee499bbf46be",
|
"7fc540779979e472bb8d12480b443d1e5eb1098eae546ef2390bee499bbf46be",
|
||||||
"34905e82105c20de9a2f6cd385a0d541e6bcc10601d12481ff3a7575dc622033",
|
"34905e82105c20de9a2f6cd385a0d541e6bcc10601d12481ff3a7575dc622033",
|
||||||
"🦄",
|
[]byte("🦄"),
|
||||||
16383,
|
16383,
|
||||||
"a249558d161b77297bc0cb311dde7d77190f6571b25c7e4429cd19044634a61f",
|
"a249558d161b77297bc0cb311dde7d77190f6571b25c7e4429cd19044634a61f",
|
||||||
"b3348422471da1f3c59d79acfe2fe103f3cd24488109e5b18734cdb5953afd15",
|
"b3348422471da1f3c59d79acfe2fe103f3cd24488109e5b18734cdb5953afd15",
|
||||||
@@ -1307,9 +1314,12 @@ func TestMaxLength(t *testing.T) {
|
|||||||
pub2, _ := keys.GetPublicKeyHex(string(sk2))
|
pub2, _ := keys.GetPublicKeyHex(string(sk2))
|
||||||
salt := make([]byte, 32)
|
salt := make([]byte, 32)
|
||||||
rand.Read(salt)
|
rand.Read(salt)
|
||||||
conversationKey, _ := GenerateConversationKey(pub2, string(sk1))
|
conversationKey, _ := GenerateConversationKeyFromHex(pub2, string(sk1))
|
||||||
plaintext := strings.Repeat("a", MaxPlaintextSize)
|
plaintext := strings.Repeat("a", MaxPlaintextSize)
|
||||||
encrypted, err := Encrypt(plaintext, conversationKey, WithCustomNonce(salt))
|
plaintextBytes := []byte(plaintext)
|
||||||
|
encrypted, err := Encrypt(
|
||||||
|
plaintextBytes, conversationKey, WithCustomNonce(salt),
|
||||||
|
)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@@ -1321,7 +1331,7 @@ func TestMaxLength(t *testing.T) {
|
|||||||
fmt.Sprintf("%x", conversationKey),
|
fmt.Sprintf("%x", conversationKey),
|
||||||
fmt.Sprintf("%x", salt),
|
fmt.Sprintf("%x", salt),
|
||||||
plaintext,
|
plaintext,
|
||||||
encrypted,
|
string(encrypted),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1330,8 +1340,8 @@ func assertCryptPub(
|
|||||||
sk1, pub2, conversationKey, salt, plaintext, expected string,
|
sk1, pub2, conversationKey, salt, plaintext, expected string,
|
||||||
) {
|
) {
|
||||||
var (
|
var (
|
||||||
k1, s []byte
|
k1, s, plaintextBytes,
|
||||||
actual, decrypted string
|
actualBytes, expectedBytes, decrypted []byte
|
||||||
ok bool
|
ok bool
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
@@ -1352,16 +1362,20 @@ func assertCryptPub(
|
|||||||
); !ok {
|
); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
actual, err = Encrypt(plaintext, k1, WithCustomNonce(s))
|
plaintextBytes = []byte(plaintext)
|
||||||
|
actualBytes, err = Encrypt(plaintextBytes, k1, WithCustomNonce(s))
|
||||||
if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok {
|
if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ok = assert.Equalf(t, expected, actual, "wrong encryption"); !ok {
|
expectedBytes = []byte(expected)
|
||||||
|
if ok = assert.Equalf(
|
||||||
|
t, string(expectedBytes), string(actualBytes), "wrong encryption",
|
||||||
|
); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
decrypted, err = Decrypt(expected, k1)
|
decrypted, err = Decrypt(expectedBytes, k1)
|
||||||
if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok {
|
if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
assert.Equal(t, decrypted, plaintext, "wrong decryption")
|
assert.Equal(t, decrypted, plaintextBytes, "wrong decryption")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,3 +18,7 @@ type Signer = btcec.Signer
|
|||||||
type Keygen = btcec.Keygen
|
type Keygen = btcec.Keygen
|
||||||
|
|
||||||
func NewKeygen() (k *Keygen) { return new(Keygen) }
|
func NewKeygen() (k *Keygen) { return new(Keygen) }
|
||||||
|
|
||||||
|
var NewSecFromHex = btcec.NewSecFromHex
|
||||||
|
var NewPubFromHex = btcec.NewPubFromHex
|
||||||
|
var HexToBin = btcec.HexToBin
|
||||||
|
|||||||
@@ -55,10 +55,20 @@ func (s *Signer) InitPub(pub []byte) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sec returns the raw secret key bytes.
|
// Sec returns the raw secret key bytes.
|
||||||
func (s *Signer) Sec() (b []byte) { return s.skb }
|
func (s *Signer) Sec() (b []byte) {
|
||||||
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.skb
|
||||||
|
}
|
||||||
|
|
||||||
// Pub returns the raw BIP-340 schnorr public key bytes.
|
// Pub returns the raw BIP-340 schnorr public key bytes.
|
||||||
func (s *Signer) Pub() (b []byte) { return s.pkb }
|
func (s *Signer) Pub() (b []byte) {
|
||||||
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.pkb
|
||||||
|
}
|
||||||
|
|
||||||
// Sign a message with the Signer. Requires an initialised secret key.
|
// Sign a message with the Signer. Requires an initialised secret key.
|
||||||
func (s *Signer) Sign(msg []byte) (sig []byte, err error) {
|
func (s *Signer) Sign(msg []byte) (sig []byte, err error) {
|
||||||
|
|||||||
40
pkg/crypto/p256k/btcec/helpers-btcec.go
Normal file
40
pkg/crypto/p256k/btcec/helpers-btcec.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
//go:build !cgo
|
||||||
|
|
||||||
|
package btcec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"orly.dev/pkg/encoders/hex"
|
||||||
|
"orly.dev/pkg/interfaces/signer"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {
|
||||||
|
var sk []byte
|
||||||
|
if _, err = hex.DecBytes(sk, []byte(skh)); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sign = &Signer{}
|
||||||
|
if err = sign.InitSec(sk); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPubFromHex[V []byte | string](pkh V) (sign signer.I, err error) {
|
||||||
|
var sk []byte
|
||||||
|
if _, err = hex.DecBytes(sk, []byte(pkh)); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sign = &Signer{}
|
||||||
|
if err = sign.InitPub(sk); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func HexToBin(hexStr string) (b []byte, err error) {
|
||||||
|
if _, err = hex.DecBytes(b, []byte(hexStr)); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
40
pkg/crypto/p256k/helpers.go
Normal file
40
pkg/crypto/p256k/helpers.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
//go:build cgo
|
||||||
|
|
||||||
|
package p256k
|
||||||
|
|
||||||
|
import (
|
||||||
|
"orly.dev/pkg/encoders/hex"
|
||||||
|
"orly.dev/pkg/interfaces/signer"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {
|
||||||
|
var sk []byte
|
||||||
|
if _, err = hex.DecBytes(sk, []byte(skh)); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sign = &Signer{}
|
||||||
|
if err = sign.InitSec(sk); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPubFromHex[V []byte | string](pkh V) (sign signer.I, err error) {
|
||||||
|
var sk []byte
|
||||||
|
if _, err = hex.DecBytes(sk, []byte(pkh)); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sign = &Signer{}
|
||||||
|
if err = sign.InitPub(sk); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func HexToBin(hexStr string) (b []byte, err error) {
|
||||||
|
if b, err = hex.DecAppend(b, []byte(hexStr)); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -77,8 +77,18 @@ func (s *Signer) InitPub(pub []byte) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Signer) Sec() (b []byte) { return s.skb }
|
func (s *Signer) Sec() (b []byte) {
|
||||||
func (s *Signer) Pub() (b []byte) { return s.pkb }
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.skb
|
||||||
|
}
|
||||||
|
func (s *Signer) Pub() (b []byte) {
|
||||||
|
if s == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return s.pkb
|
||||||
|
}
|
||||||
|
|
||||||
// func (s *Signer) ECPub() (b []byte) { return s.pkb }
|
// func (s *Signer) ECPub() (b []byte) { return s.pkb }
|
||||||
|
|
||||||
|
|||||||
@@ -61,7 +61,7 @@ func TestSignerVerify(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
|
if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
|
||||||
t.Errorf("failed to verify: %s\n%0x", err, ev.Id)
|
t.Errorf("failed to verify: %s\n%0x", err, ev.ID)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !valid {
|
if !valid {
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package database
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/dgraph-io/badger/v4"
|
"github.com/dgraph-io/badger/v4"
|
||||||
"io"
|
|
||||||
"orly.dev/pkg/encoders/eventidserial"
|
"orly.dev/pkg/encoders/eventidserial"
|
||||||
"orly.dev/pkg/utils/apputil"
|
"orly.dev/pkg/utils/apputil"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
@@ -75,16 +74,6 @@ func (d *D) Wipe() (err error) {
|
|||||||
panic("implement me")
|
panic("implement me")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *D) Import(r io.Reader) {
|
|
||||||
// TODO implement me
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *D) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
|
|
||||||
// TODO implement me
|
|
||||||
panic("implement me")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *D) SetLogLevel(level string) {
|
func (d *D) SetLogLevel(level string) {
|
||||||
d.Logger.SetLogLevel(lol.GetLogLevel(level))
|
d.Logger.SetLogLevel(lol.GetLogLevel(level))
|
||||||
}
|
}
|
||||||
@@ -106,6 +95,7 @@ func (d *D) Init(path string) (err error) {
|
|||||||
|
|
||||||
// Sync flushes the database buffers to disk.
|
// Sync flushes the database buffers to disk.
|
||||||
func (d *D) Sync() (err error) {
|
func (d *D) Sync() (err error) {
|
||||||
|
d.DB.RunValueLogGC(0.5)
|
||||||
return d.DB.Sync()
|
return d.DB.Sync()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ func (d *D) DeleteEvent(c context.T, eid *eventid.T) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ser == nil {
|
if ser == nil {
|
||||||
// Event not found, nothing to delete
|
// Event wasn't found, nothing to delete
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Fetch the event to get its data
|
// Fetch the event to get its data
|
||||||
@@ -33,7 +33,7 @@ func (d *D) DeleteEvent(c context.T, eid *eventid.T) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if ev == nil {
|
if ev == nil {
|
||||||
// Event not found, nothing to delete
|
// Event wasn't found, nothing to delete
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Get all indexes for the event
|
// Get all indexes for the event
|
||||||
|
|||||||
105
pkg/database/export.go
Normal file
105
pkg/database/export.go
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"github.com/dgraph-io/badger/v4"
|
||||||
|
"io"
|
||||||
|
"orly.dev/pkg/database/indexes"
|
||||||
|
"orly.dev/pkg/database/indexes/types"
|
||||||
|
"orly.dev/pkg/encoders/event"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/context"
|
||||||
|
"orly.dev/pkg/utils/units"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Export the complete database of stored events to an io.Writer in line structured minified
|
||||||
|
// JSON.
|
||||||
|
func (d *D) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
|
||||||
|
var err error
|
||||||
|
evB := make([]byte, 0, units.Mb)
|
||||||
|
evBuf := bytes.NewBuffer(evB)
|
||||||
|
if len(pubkeys) == 0 {
|
||||||
|
if err = d.View(
|
||||||
|
func(txn *badger.Txn) (err error) {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err = indexes.EventEnc(nil).MarshalWrite(buf); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
it := txn.NewIterator(badger.IteratorOptions{Prefix: buf.Bytes()})
|
||||||
|
defer it.Close()
|
||||||
|
for it.Rewind(); it.Valid(); it.Next() {
|
||||||
|
item := it.Item()
|
||||||
|
if err = item.Value(
|
||||||
|
func(val []byte) (err error) {
|
||||||
|
evBuf.Write(val)
|
||||||
|
return
|
||||||
|
},
|
||||||
|
); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ev := event.New()
|
||||||
|
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Serialize the event to JSON and write it to the output
|
||||||
|
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
evBuf.Reset()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, pubkey := range pubkeys {
|
||||||
|
if err = d.View(
|
||||||
|
func(txn *badger.Txn) (err error) {
|
||||||
|
pkBuf := new(bytes.Buffer)
|
||||||
|
ph := &types.PubHash{}
|
||||||
|
if err = ph.FromPubkey(pubkey); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = indexes.PubkeyEnc(
|
||||||
|
ph, nil, nil,
|
||||||
|
).MarshalWrite(pkBuf); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
it := txn.NewIterator(badger.IteratorOptions{Prefix: pkBuf.Bytes()})
|
||||||
|
defer it.Close()
|
||||||
|
for it.Rewind(); it.Valid(); it.Next() {
|
||||||
|
item := it.Item()
|
||||||
|
if err = item.Value(
|
||||||
|
func(val []byte) (err error) {
|
||||||
|
evBuf.Write(val)
|
||||||
|
return
|
||||||
|
},
|
||||||
|
); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ev := event.New()
|
||||||
|
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Serialize the event to JSON and write it to the output
|
||||||
|
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
evBuf.Reset()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
},
|
||||||
|
); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
111
pkg/database/export_test.go
Normal file
111
pkg/database/export_test.go
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/encoders/event"
|
||||||
|
"orly.dev/pkg/encoders/event/examples"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestExport tests the Export function by:
|
||||||
|
// 1. Creating a new database with events from examples.Cache
|
||||||
|
// 2. Checking that all event IDs in the cache are found in the export
|
||||||
|
// 3. Verifying this also works when only a few pubkeys are requested
|
||||||
|
func TestExport(t *testing.T) {
|
||||||
|
// Create a temporary directory for the database
|
||||||
|
tempDir, err := os.MkdirTemp("", "test-db-*")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir) // Clean up after the test
|
||||||
|
|
||||||
|
// Create a context and cancel function for the database
|
||||||
|
ctx, cancel := context.Cancel(context.Bg())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Initialize the database
|
||||||
|
db, err := New(ctx, cancel, tempDir, "info")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create database: %v", err)
|
||||||
|
}
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Create a scanner to read events from examples.Cache
|
||||||
|
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||||
|
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
|
||||||
|
|
||||||
|
// Maps to store event IDs and their associated pubkeys
|
||||||
|
eventIDs := make(map[string]bool)
|
||||||
|
pubkeyToEventIDs := make(map[string][]string)
|
||||||
|
|
||||||
|
// Process each event
|
||||||
|
for scanner.Scan() {
|
||||||
|
chk.E(scanner.Err())
|
||||||
|
b := scanner.Bytes()
|
||||||
|
ev := event.New()
|
||||||
|
|
||||||
|
// Unmarshal the event
|
||||||
|
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save the event to the database
|
||||||
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
|
t.Fatalf("Failed to save event: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the event ID
|
||||||
|
eventID := ev.IdString()
|
||||||
|
eventIDs[eventID] = true
|
||||||
|
|
||||||
|
// Store the event ID by pubkey
|
||||||
|
pubkey := ev.PubKeyString()
|
||||||
|
pubkeyToEventIDs[pubkey] = append(pubkeyToEventIDs[pubkey], eventID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for scanner errors
|
||||||
|
if err = scanner.Err(); err != nil {
|
||||||
|
t.Fatalf("Scanner error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Saved %d events to the database", len(eventIDs))
|
||||||
|
|
||||||
|
// Test 1: Export all events and verify all IDs are in the export
|
||||||
|
var exportBuffer bytes.Buffer
|
||||||
|
db.Export(ctx, &exportBuffer)
|
||||||
|
|
||||||
|
// Parse the exported events and check that all IDs are present
|
||||||
|
exportedIDs := make(map[string]bool)
|
||||||
|
exportScanner := bufio.NewScanner(&exportBuffer)
|
||||||
|
exportScanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
|
||||||
|
exportCount := 0
|
||||||
|
for exportScanner.Scan() {
|
||||||
|
b := exportScanner.Bytes()
|
||||||
|
ev := event.New()
|
||||||
|
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
exportedIDs[ev.IdString()] = true
|
||||||
|
exportCount++
|
||||||
|
}
|
||||||
|
// Check for scanner errors
|
||||||
|
if err = exportScanner.Err(); err != nil {
|
||||||
|
t.Fatalf("Scanner error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Found %d events in the export", exportCount)
|
||||||
|
|
||||||
|
// Check that all original event IDs are in the export
|
||||||
|
for id := range eventIDs {
|
||||||
|
if !exportedIDs[id] {
|
||||||
|
t.Errorf("Event ID %s not found in export", id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("All %d event IDs found in export", len(eventIDs))
|
||||||
|
}
|
||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"github.com/dgraph-io/badger/v4"
|
"github.com/dgraph-io/badger/v4"
|
||||||
"orly.dev/pkg/database/indexes"
|
"orly.dev/pkg/database/indexes"
|
||||||
"orly.dev/pkg/database/indexes/types"
|
"orly.dev/pkg/database/indexes/types"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/encoders/event"
|
"orly.dev/pkg/encoders/event"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
)
|
)
|
||||||
@@ -13,8 +12,7 @@ import (
|
|||||||
func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||||
if err = d.View(
|
if err = d.View(
|
||||||
func(txn *badger.Txn) (err error) {
|
func(txn *badger.Txn) (err error) {
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
defer codecbuf.Put(buf)
|
|
||||||
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
|
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ func TestFetchEventBySerial(t *testing.T) {
|
|||||||
events = append(events, ev)
|
events = append(events, ev)
|
||||||
|
|
||||||
// Save the event to the database
|
// Save the event to the database
|
||||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,7 +78,7 @@ func TestFetchEventBySerial(t *testing.T) {
|
|||||||
var sers types.Uint40s
|
var sers types.Uint40s
|
||||||
sers, err = db.QueryForSerials(
|
sers, err = db.QueryForSerials(
|
||||||
ctx, &filter.F{
|
ctx, &filter.F{
|
||||||
Ids: tag.New(testEvent.Id),
|
Ids: tag.New(testEvent.ID),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -102,10 +102,10 @@ func TestFetchEventBySerial(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify the fetched event has the same ID as the original event
|
// Verify the fetched event has the same ID as the original event
|
||||||
if !bytes.Equal(fetchedEvent.Id, testEvent.Id) {
|
if !bytes.Equal(fetchedEvent.ID, testEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Fetched event ID doesn't match original event ID. Got %x, expected %x",
|
"Fetched event ID doesn't match original event ID. Got %x, expected %x",
|
||||||
fetchedEvent.Id, testEvent.Id,
|
fetchedEvent.ID, testEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"github.com/dgraph-io/badger/v4"
|
"github.com/dgraph-io/badger/v4"
|
||||||
"orly.dev/pkg/database/indexes"
|
"orly.dev/pkg/database/indexes"
|
||||||
"orly.dev/pkg/database/indexes/types"
|
"orly.dev/pkg/database/indexes/types"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/interfaces/store"
|
"orly.dev/pkg/interfaces/store"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
)
|
)
|
||||||
@@ -15,8 +14,7 @@ func (d *D) GetFullIdPubkeyBySerial(ser *types.Uint40) (
|
|||||||
) {
|
) {
|
||||||
if err = d.View(
|
if err = d.View(
|
||||||
func(txn *badger.Txn) (err error) {
|
func(txn *badger.Txn) (err error) {
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
defer codecbuf.Put(buf)
|
|
||||||
if err = indexes.FullIdPubkeyEnc(
|
if err = indexes.FullIdPubkeyEnc(
|
||||||
ser, nil, nil, nil,
|
ser, nil, nil, nil,
|
||||||
).MarshalWrite(buf); chk.E(err) {
|
).MarshalWrite(buf); chk.E(err) {
|
||||||
|
|||||||
@@ -39,9 +39,9 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
|||||||
if err = ser.Set(serial); chk.E(err) {
|
if err = ser.Set(serial); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Id index
|
// ID index
|
||||||
idHash := new(IdHash)
|
idHash := new(IdHash)
|
||||||
if err = idHash.FromId(ev.Id); chk.E(err) {
|
if err = idHash.FromId(ev.ID); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
idIndex := indexes.IdEnc(idHash, ser)
|
idIndex := indexes.IdEnc(idHash, ser)
|
||||||
@@ -50,7 +50,7 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
|||||||
}
|
}
|
||||||
// FullIdPubkey index
|
// FullIdPubkey index
|
||||||
fullID := new(Id)
|
fullID := new(Id)
|
||||||
if err = fullID.FromId(ev.Id); chk.E(err) {
|
if err = fullID.FromId(ev.ID); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pubHash := new(PubHash)
|
pubHash := new(PubHash)
|
||||||
|
|||||||
@@ -2,16 +2,16 @@ package database
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
|
||||||
"orly.dev/pkg/database/indexes"
|
"orly.dev/pkg/database/indexes"
|
||||||
types2 "orly.dev/pkg/database/indexes/types"
|
types2 "orly.dev/pkg/database/indexes/types"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/encoders/event"
|
"orly.dev/pkg/encoders/event"
|
||||||
"orly.dev/pkg/encoders/kind"
|
"orly.dev/pkg/encoders/kind"
|
||||||
"orly.dev/pkg/encoders/tag"
|
"orly.dev/pkg/encoders/tag"
|
||||||
"orly.dev/pkg/encoders/tags"
|
"orly.dev/pkg/encoders/tags"
|
||||||
"orly.dev/pkg/encoders/timestamp"
|
"orly.dev/pkg/encoders/timestamp"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
)
|
)
|
||||||
@@ -26,8 +26,7 @@ func TestGetIndexesForEvent(t *testing.T) {
|
|||||||
// indexes
|
// indexes
|
||||||
func verifyIndexIncluded(t *testing.T, idxs [][]byte, expectedIdx *indexes.T) {
|
func verifyIndexIncluded(t *testing.T, idxs [][]byte, expectedIdx *indexes.T) {
|
||||||
// Marshal the expected index
|
// Marshal the expected index
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
defer codecbuf.Put(buf)
|
|
||||||
err := expectedIdx.MarshalWrite(buf)
|
err := expectedIdx.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("Failed to marshal expected index: %v", err)
|
t.Fatalf("Failed to marshal expected index: %v", err)
|
||||||
@@ -60,7 +59,7 @@ func testBasicEvent(t *testing.T) {
|
|||||||
for i := range id {
|
for i := range id {
|
||||||
id[i] = byte(i)
|
id[i] = byte(i)
|
||||||
}
|
}
|
||||||
ev.Id = id
|
ev.ID = id
|
||||||
|
|
||||||
// Set Pubkey
|
// Set Pubkey
|
||||||
pubkey := make([]byte, 32)
|
pubkey := make([]byte, 32)
|
||||||
@@ -92,7 +91,7 @@ func testBasicEvent(t *testing.T) {
|
|||||||
|
|
||||||
// Create and verify the expected indexes
|
// Create and verify the expected indexes
|
||||||
|
|
||||||
// 1. Id index
|
// 1. ID index
|
||||||
ser := new(types2.Uint40)
|
ser := new(types2.Uint40)
|
||||||
err = ser.Set(serial)
|
err = ser.Set(serial)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
@@ -100,7 +99,7 @@ func testBasicEvent(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
idHash := new(types2.IdHash)
|
idHash := new(types2.IdHash)
|
||||||
err = idHash.FromId(ev.Id)
|
err = idHash.FromId(ev.ID)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("Failed to create IdHash: %v", err)
|
t.Fatalf("Failed to create IdHash: %v", err)
|
||||||
}
|
}
|
||||||
@@ -109,9 +108,9 @@ func testBasicEvent(t *testing.T) {
|
|||||||
|
|
||||||
// 2. FullIdPubkey index
|
// 2. FullIdPubkey index
|
||||||
fullID := new(types2.Id)
|
fullID := new(types2.Id)
|
||||||
err = fullID.FromId(ev.Id)
|
err = fullID.FromId(ev.ID)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("Failed to create Id: %v", err)
|
t.Fatalf("Failed to create ID: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pubHash := new(types2.PubHash)
|
pubHash := new(types2.PubHash)
|
||||||
@@ -156,7 +155,7 @@ func testEventWithTags(t *testing.T) {
|
|||||||
for i := range id {
|
for i := range id {
|
||||||
id[i] = byte(i)
|
id[i] = byte(i)
|
||||||
}
|
}
|
||||||
ev.Id = id
|
ev.ID = id
|
||||||
|
|
||||||
// Set Pubkey
|
// Set Pubkey
|
||||||
pubkey := make([]byte, 32)
|
pubkey := make([]byte, 32)
|
||||||
@@ -210,7 +209,7 @@ func testEventWithTags(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
idHash := new(types2.IdHash)
|
idHash := new(types2.IdHash)
|
||||||
err = idHash.FromId(ev.Id)
|
err = idHash.FromId(ev.ID)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("Failed to create IdHash: %v", err)
|
t.Fatalf("Failed to create IdHash: %v", err)
|
||||||
}
|
}
|
||||||
@@ -268,7 +267,7 @@ func testErrorHandling(t *testing.T) {
|
|||||||
for i := range id {
|
for i := range id {
|
||||||
id[i] = byte(i)
|
id[i] = byte(i)
|
||||||
}
|
}
|
||||||
ev.Id = id
|
ev.ID = id
|
||||||
|
|
||||||
// Set Pubkey
|
// Set Pubkey
|
||||||
pubkey := make([]byte, 32)
|
pubkey := make([]byte, 32)
|
||||||
|
|||||||
@@ -76,7 +76,7 @@ func CreatePubHashFromData(data []byte) (p *types2.PubHash, err error) {
|
|||||||
// complete set of combinations of all fields in the event, thus there is no
|
// complete set of combinations of all fields in the event, thus there is no
|
||||||
// need to decode events until they are to be delivered.
|
// need to decode events until they are to be delivered.
|
||||||
func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||||
// Id eid
|
// ID eid
|
||||||
//
|
//
|
||||||
// If there is any Ids in the filter, none of the other fields matter. It
|
// If there is any Ids in the filter, none of the other fields matter. It
|
||||||
// should be an error, but convention just ignores it.
|
// should be an error, but convention just ignores it.
|
||||||
|
|||||||
@@ -3,23 +3,23 @@ package database
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"math"
|
"math"
|
||||||
|
"testing"
|
||||||
|
|
||||||
"orly.dev/pkg/database/indexes"
|
"orly.dev/pkg/database/indexes"
|
||||||
types2 "orly.dev/pkg/database/indexes/types"
|
types2 "orly.dev/pkg/database/indexes/types"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/encoders/filter"
|
"orly.dev/pkg/encoders/filter"
|
||||||
"orly.dev/pkg/encoders/kind"
|
"orly.dev/pkg/encoders/kind"
|
||||||
"orly.dev/pkg/encoders/kinds"
|
"orly.dev/pkg/encoders/kinds"
|
||||||
"orly.dev/pkg/encoders/tag"
|
"orly.dev/pkg/encoders/tag"
|
||||||
"orly.dev/pkg/encoders/timestamp"
|
"orly.dev/pkg/encoders/timestamp"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestGetIndexesFromFilter tests the GetIndexesFromFilter function
|
// TestGetIndexesFromFilter tests the GetIndexesFromFilter function
|
||||||
func TestGetIndexesFromFilter(t *testing.T) {
|
func TestGetIndexesFromFilter(t *testing.T) {
|
||||||
t.Run("Id", testIdFilter)
|
t.Run("ID", testIdFilter)
|
||||||
t.Run("Pubkey", testPubkeyFilter)
|
t.Run("Pubkey", testPubkeyFilter)
|
||||||
t.Run("CreatedAt", testCreatedAtFilter)
|
t.Run("CreatedAt", testCreatedAtFilter)
|
||||||
t.Run("CreatedAtUntil", testCreatedAtUntilFilter)
|
t.Run("CreatedAtUntil", testCreatedAtUntilFilter)
|
||||||
@@ -41,8 +41,7 @@ func verifyIndex(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Marshal the expected start index
|
// Marshal the expected start index
|
||||||
startBuf := codecbuf.Get()
|
startBuf := new(bytes.Buffer)
|
||||||
defer codecbuf.Put(startBuf)
|
|
||||||
err := expectedStartIdx.MarshalWrite(startBuf)
|
err := expectedStartIdx.MarshalWrite(startBuf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("Failed to marshal expected start index: %v", err)
|
t.Fatalf("Failed to marshal expected start index: %v", err)
|
||||||
@@ -62,8 +61,7 @@ func verifyIndex(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Marshal the expected end index
|
// Marshal the expected end index
|
||||||
endBuf := codecbuf.Get()
|
endBuf := new(bytes.Buffer)
|
||||||
defer codecbuf.Put(endBuf)
|
|
||||||
err = endIdx.MarshalWrite(endBuf)
|
err = endIdx.MarshalWrite(endBuf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("Failed to marshal expected End index: %v", err)
|
t.Fatalf("Failed to marshal expected End index: %v", err)
|
||||||
@@ -77,9 +75,9 @@ func verifyIndex(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test Id filter
|
// Test ID filter
|
||||||
func testIdFilter(t *testing.T) {
|
func testIdFilter(t *testing.T) {
|
||||||
// Create a filter with an Id
|
// Create a filter with an ID
|
||||||
f := filter.New()
|
f := filter.New()
|
||||||
id := make([]byte, sha256.Size)
|
id := make([]byte, sha256.Size)
|
||||||
for i := range id {
|
for i := range id {
|
||||||
@@ -102,7 +100,7 @@ func testIdFilter(t *testing.T) {
|
|||||||
expectedIdx := indexes.IdEnc(idHash, nil)
|
expectedIdx := indexes.IdEnc(idHash, nil)
|
||||||
|
|
||||||
// Verify the generated index
|
// Verify the generated index
|
||||||
// For Id filter, both start and end indexes are the same
|
// For ID filter, both start and end indexes are the same
|
||||||
verifyIndex(t, idxs, expectedIdx, expectedIdx)
|
verifyIndex(t, idxs, expectedIdx, expectedIdx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ func TestGetSerialById(t *testing.T) {
|
|||||||
events = append(events, ev)
|
events = append(events, ev)
|
||||||
|
|
||||||
// Save the event to the database
|
// Save the event to the database
|
||||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,7 +71,7 @@ func TestGetSerialById(t *testing.T) {
|
|||||||
testEvent := events[3] // Using the same event as in QueryForIds test
|
testEvent := events[3] // Using the same event as in QueryForIds test
|
||||||
|
|
||||||
// Get the serial by ID
|
// Get the serial by ID
|
||||||
serial, err := db.GetSerialById(testEvent.Id)
|
serial, err := db.GetSerialById(testEvent.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||||
}
|
}
|
||||||
@@ -82,10 +82,10 @@ func TestGetSerialById(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test with a non-existent ID
|
// Test with a non-existent ID
|
||||||
nonExistentId := make([]byte, len(testEvent.Id))
|
nonExistentId := make([]byte, len(testEvent.ID))
|
||||||
// Ensure it's different from any real ID
|
// Ensure it's different from any real ID
|
||||||
for i := range nonExistentId {
|
for i := range nonExistentId {
|
||||||
nonExistentId[i] = ^testEvent.Id[i]
|
nonExistentId[i] = ^testEvent.ID[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
serial, err = db.GetSerialById(nonExistentId)
|
serial, err = db.GetSerialById(nonExistentId)
|
||||||
|
|||||||
@@ -60,12 +60,12 @@ func TestGetSerialsByRange(t *testing.T) {
|
|||||||
events = append(events, ev)
|
events = append(events, ev)
|
||||||
|
|
||||||
// Save the event to the database
|
// Save the event to the database
|
||||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the serial for this event
|
// Get the serial for this event
|
||||||
serial, err := db.GetSerialById(ev.Id)
|
serial, err := db.GetSerialById(ev.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Failed to get serial for event #%d: %v", eventCount+1, err,
|
"Failed to get serial for event #%d: %v", eventCount+1, err,
|
||||||
@@ -73,7 +73,7 @@ func TestGetSerialsByRange(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if serial != nil {
|
if serial != nil {
|
||||||
eventSerials[string(ev.Id)] = serial
|
eventSerials[string(ev.ID)] = serial
|
||||||
}
|
}
|
||||||
|
|
||||||
eventCount++
|
eventCount++
|
||||||
|
|||||||
82
pkg/database/import.go
Normal file
82
pkg/database/import.go
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
package database
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"orly.dev/pkg/encoders/event"
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
"orly.dev/pkg/utils/log"
|
||||||
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxLen = 500000000
|
||||||
|
|
||||||
|
// Import a collection of events in line structured minified JSON format (JSONL).
|
||||||
|
func (d *D) Import(rr io.Reader) {
|
||||||
|
// store to disk so we can return fast
|
||||||
|
tmpPath := os.TempDir() + string(os.PathSeparator) + "orly"
|
||||||
|
os.MkdirAll(tmpPath, 0700)
|
||||||
|
tmp, err := os.CreateTemp(tmpPath, "")
|
||||||
|
if chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.I.F("buffering upload to %s", tmp.Name())
|
||||||
|
if _, err = io.Copy(tmp, rr); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = tmp.Seek(0, 0); chk.E(err) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
var err error
|
||||||
|
// Create a scanner to read the buffer line by line
|
||||||
|
scan := bufio.NewScanner(tmp)
|
||||||
|
scanBuf := make([]byte, maxLen)
|
||||||
|
scan.Buffer(scanBuf, maxLen)
|
||||||
|
|
||||||
|
var count, total int
|
||||||
|
for scan.Scan() {
|
||||||
|
select {
|
||||||
|
case <-d.ctx.Done():
|
||||||
|
log.I.F("context closed")
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
b := scan.Bytes()
|
||||||
|
total += len(b) + 1
|
||||||
|
if len(b) < 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ev := &event.E{}
|
||||||
|
if _, err = ev.Unmarshal(b); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, _, err = d.SaveEvent(d.ctx, ev, false, nil); err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
b = nil
|
||||||
|
ev = nil
|
||||||
|
count++
|
||||||
|
if count%100 == 0 {
|
||||||
|
log.I.F("received %d events", count)
|
||||||
|
debug.FreeOSMemory()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
log.I.F("read %d bytes and saved %d events", total, count)
|
||||||
|
err = scan.Err()
|
||||||
|
if chk.E(err) {
|
||||||
|
}
|
||||||
|
|
||||||
|
// Help garbage collection
|
||||||
|
tmp = nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
@@ -186,7 +186,7 @@ func EventDec(ser *types.Uint40) (enc *T) { return New(NewPrefix(), ser) }
|
|||||||
// Id contains a truncated 8-byte hash of an event index. This is the secondary
|
// Id contains a truncated 8-byte hash of an event index. This is the secondary
|
||||||
// key of an event, the primary key is the serial found in the Event.
|
// key of an event, the primary key is the serial found in the Event.
|
||||||
//
|
//
|
||||||
// 3 prefix|8 Id hash|5 serial
|
// 3 prefix|8 ID hash|5 serial
|
||||||
var Id = next()
|
var Id = next()
|
||||||
|
|
||||||
func IdVars() (id *types.IdHash, ser *types.Uint40) {
|
func IdVars() (id *types.IdHash, ser *types.Uint40) {
|
||||||
@@ -202,7 +202,7 @@ func IdDec(id *types.IdHash, ser *types.Uint40) (enc *T) {
|
|||||||
// FullIdPubkey is an index designed to enable sorting and filtering of
|
// FullIdPubkey is an index designed to enable sorting and filtering of
|
||||||
// results found via other indexes, without having to decode the event.
|
// results found via other indexes, without having to decode the event.
|
||||||
//
|
//
|
||||||
// 3 prefix|5 serial|32 Id|8 pubkey hash|8 timestamp
|
// 3 prefix|5 serial|32 ID|8 pubkey hash|8 timestamp
|
||||||
var FullIdPubkey = next()
|
var FullIdPubkey = next()
|
||||||
|
|
||||||
func FullIdPubkeyVars() (
|
func FullIdPubkeyVars() (
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"orly.dev/pkg/database/indexes/types"
|
"orly.dev/pkg/database/indexes/types"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
@@ -49,7 +48,7 @@ func TestPrefixMethods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite method
|
// Test MarshalWrite method
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := prefix.MarshalWrite(buf)
|
err := prefix.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -83,7 +82,7 @@ func TestPrefixFunction(t *testing.T) {
|
|||||||
expected I
|
expected I
|
||||||
}{
|
}{
|
||||||
{"Event", Event, EventPrefix},
|
{"Event", Event, EventPrefix},
|
||||||
{"Id", Id, IdPrefix},
|
{"ID", Id, IdPrefix},
|
||||||
{"FullIdPubkey", FullIdPubkey, FullIdPubkeyPrefix},
|
{"FullIdPubkey", FullIdPubkey, FullIdPubkeyPrefix},
|
||||||
{"Pubkey", Pubkey, PubkeyPrefix},
|
{"Pubkey", Pubkey, PubkeyPrefix},
|
||||||
{"CreatedAt", CreatedAt, CreatedAtPrefix},
|
{"CreatedAt", CreatedAt, CreatedAtPrefix},
|
||||||
@@ -122,7 +121,7 @@ func TestIdentify(t *testing.T) {
|
|||||||
expected int
|
expected int
|
||||||
}{
|
}{
|
||||||
{"Event", EventPrefix, Event},
|
{"Event", EventPrefix, Event},
|
||||||
{"Id", IdPrefix, Id},
|
{"ID", IdPrefix, Id},
|
||||||
{"FullIdPubkey", FullIdPubkeyPrefix, FullIdPubkey},
|
{"FullIdPubkey", FullIdPubkeyPrefix, FullIdPubkey},
|
||||||
{"Pubkey", PubkeyPrefix, Pubkey},
|
{"Pubkey", PubkeyPrefix, Pubkey},
|
||||||
{"CreatedAt", CreatedAtPrefix, CreatedAt},
|
{"CreatedAt", CreatedAtPrefix, CreatedAt},
|
||||||
@@ -209,7 +208,7 @@ func TestTStruct(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite
|
// Test MarshalWrite
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := enc.MarshalWrite(buf)
|
err := enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -272,7 +271,7 @@ func TestEventFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := enc.MarshalWrite(buf)
|
err := enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -318,7 +317,7 @@ func TestIdFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := enc.MarshalWrite(buf)
|
err := enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -391,7 +390,7 @@ func TestIdPubkeyFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = enc.MarshalWrite(buf)
|
err = enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -452,7 +451,7 @@ func TestCreatedAtFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := enc.MarshalWrite(buf)
|
err := enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -516,7 +515,7 @@ func TestPubkeyFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = enc.MarshalWrite(buf)
|
err = enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -588,7 +587,7 @@ func TestPubkeyTagFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = enc.MarshalWrite(buf)
|
err = enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -660,7 +659,7 @@ func TestTagFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = enc.MarshalWrite(buf)
|
err = enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -724,7 +723,7 @@ func TestKindFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := enc.MarshalWrite(buf)
|
err := enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -789,7 +788,7 @@ func TestKindTagFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = enc.MarshalWrite(buf)
|
err = enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -865,7 +864,7 @@ func TestKindPubkeyFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = enc.MarshalWrite(buf)
|
err = enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -941,7 +940,7 @@ func TestKindPubkeyTagFunctions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test marshaling and unmarshaling
|
// Test marshaling and unmarshaling
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = enc.MarshalWrite(buf)
|
err = enc.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ type Id struct {
|
|||||||
func (fi *Id) FromId(id []byte) (err error) {
|
func (fi *Id) FromId(id []byte) (err error) {
|
||||||
if len(id) != IdLen {
|
if len(id) != IdLen {
|
||||||
err = errorf.E(
|
err = errorf.E(
|
||||||
"fullid.FromId: invalid Id length, got %d require %d", len(id),
|
"fullid.FromId: invalid ID length, got %d require %d", len(id),
|
||||||
IdLen,
|
IdLen,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func TestFromId(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
||||||
// Create a Id with a known value
|
// Create a ID with a known value
|
||||||
fi1 := &Id{}
|
fi1 := &Id{}
|
||||||
validId := make([]byte, sha256.Size)
|
validId := make([]byte, sha256.Size)
|
||||||
for i := 0; i < sha256.Size; i++ {
|
for i := 0; i < sha256.Size; i++ {
|
||||||
@@ -55,7 +55,7 @@ func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite
|
// Test MarshalWrite
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = fi1.MarshalWrite(buf)
|
err = fi1.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -80,7 +80,7 @@ func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIdUnmarshalReadWithCorruptedData(t *testing.T) {
|
func TestIdUnmarshalReadWithCorruptedData(t *testing.T) {
|
||||||
// Create a Id with a known value
|
// Create a ID with a known value
|
||||||
fi1 := &Id{}
|
fi1 := &Id{}
|
||||||
validId := make([]byte, sha256.Size)
|
validId := make([]byte, sha256.Size)
|
||||||
for i := 0; i < sha256.Size; i++ {
|
for i := 0; i < sha256.Size; i++ {
|
||||||
@@ -91,7 +91,7 @@ func TestIdUnmarshalReadWithCorruptedData(t *testing.T) {
|
|||||||
t.Fatalf("FromId failed: %v", err)
|
t.Fatalf("FromId failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a second Id with a different value
|
// Create a second ID with a different value
|
||||||
fi2 := &Id{}
|
fi2 := &Id{}
|
||||||
differentId := make([]byte, sha256.Size)
|
differentId := make([]byte, sha256.Size)
|
||||||
for i := 0; i < sha256.Size; i++ {
|
for i := 0; i < sha256.Size; i++ {
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -45,7 +45,7 @@ func TestIdent_MarshalWriteUnmarshalRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite
|
// Test MarshalWrite
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = i1.MarshalWrite(buf)
|
err = i1.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ func (i *IdHash) Set(idh []byte) {
|
|||||||
func (i *IdHash) FromId(id []byte) (err error) {
|
func (i *IdHash) FromId(id []byte) (err error) {
|
||||||
if len(id) != sha256.Size {
|
if len(id) != sha256.Size {
|
||||||
err = errorf.E(
|
err = errorf.E(
|
||||||
"FromId: invalid Id length, got %d require %d", len(id),
|
"FromId: invalid ID length, got %d require %d", len(id),
|
||||||
sha256.Size,
|
sha256.Size,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
@@ -43,7 +43,7 @@ func (i *IdHash) FromIdBase64(idb64 string) (err error) {
|
|||||||
// Check if the decoded ID has the correct length
|
// Check if the decoded ID has the correct length
|
||||||
if len(decoded) != sha256.Size {
|
if len(decoded) != sha256.Size {
|
||||||
err = errorf.E(
|
err = errorf.E(
|
||||||
"FromIdBase64: invalid Id length, got %d require %d", len(decoded),
|
"FromIdBase64: invalid ID length, got %d require %d", len(decoded),
|
||||||
sha256.Size,
|
sha256.Size,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
@@ -62,7 +62,7 @@ func (i *IdHash) FromIdHex(idh string) (err error) {
|
|||||||
}
|
}
|
||||||
if len(id) != sha256.Size {
|
if len(id) != sha256.Size {
|
||||||
err = errorf.E(
|
err = errorf.E(
|
||||||
"FromIdHex: invalid Id length, got %d require %d", len(id),
|
"FromIdHex: invalid ID length, got %d require %d", len(id),
|
||||||
sha256.Size,
|
sha256.Size,
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -3,10 +3,10 @@ package types
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
"testing"
|
||||||
|
|
||||||
"orly.dev/pkg/encoders/hex"
|
"orly.dev/pkg/encoders/hex"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
)
|
)
|
||||||
@@ -142,7 +142,7 @@ func TestIdHashMarshalWriteUnmarshalRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite
|
// Test MarshalWrite
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = i1.MarshalWrite(buf)
|
err = i1.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
|
|||||||
@@ -2,9 +2,9 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestLetter_New(t *testing.T) {
|
func TestLetter_New(t *testing.T) {
|
||||||
@@ -53,7 +53,7 @@ func TestLetter_MarshalWriteUnmarshalRead(t *testing.T) {
|
|||||||
l1 := new(Letter)
|
l1 := new(Letter)
|
||||||
l1.Set('A')
|
l1.Set('A')
|
||||||
// Test MarshalWrite
|
// Test MarshalWrite
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := l1.MarshalWrite(buf)
|
err := l1.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
|
|||||||
@@ -2,11 +2,11 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"testing"
|
||||||
|
|
||||||
"orly.dev/pkg/crypto/ec/schnorr"
|
"orly.dev/pkg/crypto/ec/schnorr"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/encoders/hex"
|
"orly.dev/pkg/encoders/hex"
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/minio/sha256-simd"
|
"github.com/minio/sha256-simd"
|
||||||
)
|
)
|
||||||
@@ -105,7 +105,7 @@ func TestPubHash_MarshalWriteUnmarshalRead(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite
|
// Test MarshalWrite
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err = ph1.MarshalWrite(buf)
|
err = ph1.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package types
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,7 +28,7 @@ func (ts *Timestamp) ToTimestamp() (timestamp int64) {
|
|||||||
func (ts *Timestamp) Bytes() (b []byte, err error) {
|
func (ts *Timestamp) Bytes() (b []byte, err error) {
|
||||||
v := new(Uint64)
|
v := new(Uint64)
|
||||||
v.Set(uint64(ts.val))
|
v.Set(uint64(ts.val))
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
if err = v.MarshalWrite(buf); chk.E(err) {
|
if err = v.MarshalWrite(buf); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,10 +2,10 @@ package types
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestTimestamp_FromInt(t *testing.T) {
|
func TestTimestamp_FromInt(t *testing.T) {
|
||||||
@@ -89,7 +89,7 @@ func TestTimestamp_FromBytes(t *testing.T) {
|
|||||||
v.Set(12345)
|
v.Set(12345)
|
||||||
|
|
||||||
// Marshal it to bytes
|
// Marshal it to bytes
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := v.MarshalWrite(buf)
|
err := v.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -163,7 +163,7 @@ func TestTimestamp_Bytes(t *testing.T) {
|
|||||||
func TestTimestamp_MarshalWriteUnmarshalRead(t *testing.T) {
|
func TestTimestamp_MarshalWriteUnmarshalRead(t *testing.T) {
|
||||||
// Test with a positive value
|
// Test with a positive value
|
||||||
ts1 := &Timestamp{val: 12345}
|
ts1 := &Timestamp{val: 12345}
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := ts1.MarshalWrite(buf)
|
err := ts1.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -183,7 +183,7 @@ func TestTimestamp_MarshalWriteUnmarshalRead(t *testing.T) {
|
|||||||
|
|
||||||
// Test with a negative value
|
// Test with a negative value
|
||||||
ts1 = &Timestamp{val: -12345}
|
ts1 = &Timestamp{val: -12345}
|
||||||
buf = codecbuf.Get()
|
buf = new(bytes.Buffer)
|
||||||
err = ts1.MarshalWrite(buf)
|
err = ts1.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
@@ -225,7 +225,7 @@ func TestTimestamp_WithCurrentTime(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite and UnmarshalRead
|
// Test MarshalWrite and UnmarshalRead
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
err := ts.MarshalWrite(buf)
|
err := ts.MarshalWrite(buf)
|
||||||
if chk.E(err) {
|
if chk.E(err) {
|
||||||
t.Fatalf("MarshalWrite failed: %v", err)
|
t.Fatalf("MarshalWrite failed: %v", err)
|
||||||
|
|||||||
@@ -3,11 +3,11 @@ package types
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"math"
|
"math"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
|
||||||
"lukechampine.com/frand"
|
"lukechampine.com/frand"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -44,7 +44,7 @@ func TestUint16(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test encoding to []byte and decoding back
|
// Test encoding to []byte and decoding back
|
||||||
bufEnc := codecbuf.Get()
|
bufEnc := new(bytes.Buffer)
|
||||||
|
|
||||||
// MarshalWrite
|
// MarshalWrite
|
||||||
err := encodedUint16.MarshalWrite(bufEnc)
|
err := encodedUint16.MarshalWrite(bufEnc)
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
"bytes"
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUint24(t *testing.T) {
|
func TestUint24(t *testing.T) {
|
||||||
@@ -45,7 +46,7 @@ func TestUint24(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite and UnmarshalRead
|
// Test MarshalWrite and UnmarshalRead
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
|
|
||||||
// MarshalWrite directly to the buffer
|
// MarshalWrite directly to the buffer
|
||||||
if err := codec.MarshalWrite(buf); chk.E(err) {
|
if err := codec.MarshalWrite(buf); chk.E(err) {
|
||||||
|
|||||||
@@ -3,11 +3,11 @@ package types
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"math"
|
"math"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
|
||||||
"lukechampine.com/frand"
|
"lukechampine.com/frand"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func TestUint32(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test encoding to []byte and decoding back
|
// Test encoding to []byte and decoding back
|
||||||
bufEnc := codecbuf.Get()
|
bufEnc := new(bytes.Buffer)
|
||||||
|
|
||||||
// MarshalWrite
|
// MarshalWrite
|
||||||
err := codec.MarshalWrite(bufEnc)
|
err := codec.MarshalWrite(bufEnc)
|
||||||
|
|||||||
@@ -1,10 +1,11 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
"bytes"
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUint40(t *testing.T) {
|
func TestUint40(t *testing.T) {
|
||||||
@@ -48,7 +49,7 @@ func TestUint40(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test MarshalWrite and UnmarshalRead
|
// Test MarshalWrite and UnmarshalRead
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
|
|
||||||
// Marshal to a buffer
|
// Marshal to a buffer
|
||||||
if err = codec.MarshalWrite(buf); chk.E(err) {
|
if err = codec.MarshalWrite(buf); chk.E(err) {
|
||||||
|
|||||||
@@ -3,11 +3,11 @@ package types
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"math"
|
"math"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"orly.dev/pkg/utils/chk"
|
||||||
|
|
||||||
"lukechampine.com/frand"
|
"lukechampine.com/frand"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ func TestUint64(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test encoding to []byte and decoding back
|
// Test encoding to []byte and decoding back
|
||||||
bufEnc := codecbuf.Get()
|
bufEnc := new(bytes.Buffer)
|
||||||
|
|
||||||
// MarshalWrite
|
// MarshalWrite
|
||||||
err := codec.MarshalWrite(bufEnc)
|
err := codec.MarshalWrite(bufEnc)
|
||||||
|
|||||||
@@ -1,8 +1,8 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"orly.dev/pkg/encoders/codecbuf"
|
|
||||||
"orly.dev/pkg/utils/chk"
|
"orly.dev/pkg/utils/chk"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -35,8 +35,7 @@ func (w *Word) MarshalWrite(wr io.Writer) (err error) {
|
|||||||
|
|
||||||
// UnmarshalRead reads the word from the reader, stopping at the zero-byte marker
|
// UnmarshalRead reads the word from the reader, stopping at the zero-byte marker
|
||||||
func (w *Word) UnmarshalRead(r io.Reader) error {
|
func (w *Word) UnmarshalRead(r io.Reader) error {
|
||||||
buf := codecbuf.Get()
|
buf := new(bytes.Buffer)
|
||||||
defer codecbuf.Put(buf)
|
|
||||||
tmp := make([]byte, 1)
|
tmp := make([]byte, 1)
|
||||||
foundEndMarker := false
|
foundEndMarker := false
|
||||||
|
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
|||||||
baseEvent.Sign(sign)
|
baseEvent.Sign(sign)
|
||||||
|
|
||||||
// Save the base parameterized replaceable event
|
// Save the base parameterized replaceable event
|
||||||
if _, _, err := db.SaveEvent(ctx, baseEvent); err != nil {
|
if _, _, err := db.SaveEvent(ctx, baseEvent, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save base parameterized replaceable event: %v", err)
|
t.Fatalf("Failed to save base parameterized replaceable event: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,7 +63,7 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
|||||||
newerEvent.Sign(sign)
|
newerEvent.Sign(sign)
|
||||||
|
|
||||||
// Save the newer parameterized replaceable event
|
// Save the newer parameterized replaceable event
|
||||||
if _, _, err := db.SaveEvent(ctx, newerEvent); err != nil {
|
if _, _, err := db.SaveEvent(ctx, newerEvent, false, nil); err != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Failed to save newer parameterized replaceable event: %v", err,
|
"Failed to save newer parameterized replaceable event: %v", err,
|
||||||
)
|
)
|
||||||
@@ -83,7 +83,7 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
|||||||
newestEvent.Sign(sign)
|
newestEvent.Sign(sign)
|
||||||
|
|
||||||
// Save the newest parameterized replaceable event
|
// Save the newest parameterized replaceable event
|
||||||
if _, _, err := db.SaveEvent(ctx, newestEvent); err != nil {
|
if _, _, err := db.SaveEvent(ctx, newestEvent, false, nil); err != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Failed to save newest parameterized replaceable event: %v", err,
|
"Failed to save newest parameterized replaceable event: %v", err,
|
||||||
)
|
)
|
||||||
@@ -127,10 +127,10 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's the newest event
|
// Verify it's the newest event
|
||||||
if !bytes.Equal(evs[0].Id, newestEvent.Id) {
|
if !bytes.Equal(evs[0].ID, newestEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Event ID doesn't match the newest event. Got %x, expected %x",
|
"Event ID doesn't match the newest event. Got %x, expected %x",
|
||||||
evs[0].Id, newestEvent.Id,
|
evs[0].ID, newestEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,7 +145,7 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
|||||||
// Query for the base event by ID
|
// Query for the base event by ID
|
||||||
evs, err = db.QueryEvents(
|
evs, err = db.QueryEvents(
|
||||||
ctx, &filter.F{
|
ctx, &filter.F{
|
||||||
Ids: tag.New(baseEvent.Id),
|
Ids: tag.New(baseEvent.ID),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -161,10 +161,10 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's the base event
|
// Verify it's the base event
|
||||||
if !bytes.Equal(evs[0].Id, baseEvent.Id) {
|
if !bytes.Equal(evs[0].ID, baseEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Event ID doesn't match when querying for base event by ID. Got %x, expected %x",
|
"Event ID doesn't match when querying for base event by ID. Got %x, expected %x",
|
||||||
evs[0].Id, baseEvent.Id,
|
evs[0].ID, baseEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -16,11 +16,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QueryEvents retrieves events based on the provided filter. If the filter
|
|
||||||
// contains Ids, it fetches events by those Ids directly, overriding other
|
|
||||||
// filter criteria. Otherwise, it queries by other filter criteria and fetches
|
|
||||||
// matching events. Results are returned in reverse chronological order of their
|
|
||||||
// creation timestamps.
|
|
||||||
func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
||||||
// if there is Ids in the query, this overrides anything else
|
// if there is Ids in the query, this overrides anything else
|
||||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||||
@@ -32,7 +27,7 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
|||||||
}
|
}
|
||||||
// fetch the events
|
// fetch the events
|
||||||
var ev *event.E
|
var ev *event.E
|
||||||
if ev, err = d.FetchEventBySerial(ser); chk.E(err) {
|
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
evs = append(evs, ev)
|
evs = append(evs, ev)
|
||||||
@@ -218,7 +213,7 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
|||||||
isIdInFilter := false
|
isIdInFilter := false
|
||||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||||
for i := 0; i < f.Ids.Len(); i++ {
|
for i := 0; i < f.Ids.Len(); i++ {
|
||||||
if bytes.Equal(ev.Id, f.Ids.B(i)) {
|
if bytes.Equal(ev.ID, f.Ids.B(i)) {
|
||||||
isIdInFilter = true
|
isIdInFilter = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -62,7 +62,7 @@ func setupTestDB(t *testing.T) (
|
|||||||
events = append(events, ev)
|
events = append(events, ev)
|
||||||
|
|
||||||
// Save the event to the database
|
// Save the event to the database
|
||||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -90,7 +90,7 @@ func TestQueryEventsByID(t *testing.T) {
|
|||||||
|
|
||||||
evs, err := db.QueryEvents(
|
evs, err := db.QueryEvents(
|
||||||
ctx, &filter.F{
|
ctx, &filter.F{
|
||||||
Ids: tag.New(testEvent.Id),
|
Ids: tag.New(testEvent.ID),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -103,10 +103,10 @@ func TestQueryEventsByID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's the correct event
|
// Verify it's the correct event
|
||||||
if !bytes.Equal(evs[0].Id, testEvent.Id) {
|
if !bytes.Equal(evs[0].ID, testEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Event ID doesn't match. Got %x, expected %x", evs[0].Id,
|
"Event ID doesn't match. Got %x, expected %x", evs[0].ID,
|
||||||
testEvent.Id,
|
testEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -202,7 +202,9 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
replaceableEvent.Tags = tags.New()
|
replaceableEvent.Tags = tags.New()
|
||||||
replaceableEvent.Sign(sign)
|
replaceableEvent.Sign(sign)
|
||||||
// Save the replaceable event
|
// Save the replaceable event
|
||||||
if _, _, err := db.SaveEvent(ctx, replaceableEvent); err != nil {
|
if _, _, err := db.SaveEvent(
|
||||||
|
ctx, replaceableEvent, false, nil,
|
||||||
|
); err != nil {
|
||||||
t.Fatalf("Failed to save replaceable event: %v", err)
|
t.Fatalf("Failed to save replaceable event: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -216,14 +218,14 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
newerEvent.Tags = tags.New()
|
newerEvent.Tags = tags.New()
|
||||||
newerEvent.Sign(sign)
|
newerEvent.Sign(sign)
|
||||||
// Save the newer event
|
// Save the newer event
|
||||||
if _, _, err := db.SaveEvent(ctx, newerEvent); err != nil {
|
if _, _, err := db.SaveEvent(ctx, newerEvent, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save newer event: %v", err)
|
t.Fatalf("Failed to save newer event: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Query for the original event by ID
|
// Query for the original event by ID
|
||||||
evs, err := db.QueryEvents(
|
evs, err := db.QueryEvents(
|
||||||
ctx, &filter.F{
|
ctx, &filter.F{
|
||||||
Ids: tag.New(replaceableEvent.Id),
|
Ids: tag.New(replaceableEvent.ID),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -239,10 +241,10 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's the original event
|
// Verify it's the original event
|
||||||
if !bytes.Equal(evs[0].Id, replaceableEvent.Id) {
|
if !bytes.Equal(evs[0].ID, replaceableEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Event ID doesn't match when querying for replaced event. Got %x, expected %x",
|
"Event ID doesn't match when querying for replaced event. Got %x, expected %x",
|
||||||
evs[0].Id, replaceableEvent.Id,
|
evs[0].ID, replaceableEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -269,10 +271,10 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's the newer event
|
// Verify it's the newer event
|
||||||
if !bytes.Equal(evs[0].Id, newerEvent.Id) {
|
if !bytes.Equal(evs[0].ID, newerEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Event ID doesn't match when querying for replaceable events. Got %x, expected %x",
|
"Event ID doesn't match when querying for replaceable events. Got %x, expected %x",
|
||||||
evs[0].Id, newerEvent.Id,
|
evs[0].ID, newerEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -289,11 +291,11 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
|
|
||||||
// Add an e-tag referencing the replaceable event
|
// Add an e-tag referencing the replaceable event
|
||||||
deletionEvent.Tags = deletionEvent.Tags.AppendTags(
|
deletionEvent.Tags = deletionEvent.Tags.AppendTags(
|
||||||
tag.New([]byte{'e'}, []byte(hex.Enc(replaceableEvent.Id))),
|
tag.New([]byte{'e'}, []byte(hex.Enc(replaceableEvent.ID))),
|
||||||
)
|
)
|
||||||
|
|
||||||
// Save the deletion event
|
// Save the deletion event
|
||||||
if _, _, err = db.SaveEvent(ctx, deletionEvent); err != nil {
|
if _, _, err = db.SaveEvent(ctx, deletionEvent, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save deletion event: %v", err)
|
t.Fatalf("Failed to save deletion event: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -319,17 +321,17 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's still the newer event
|
// Verify it's still the newer event
|
||||||
if !bytes.Equal(evs[0].Id, newerEvent.Id) {
|
if !bytes.Equal(evs[0].ID, newerEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Event ID doesn't match after deletion. Got %x, expected %x",
|
"Event ID doesn't match after deletion. Got %x, expected %x",
|
||||||
evs[0].Id, newerEvent.Id,
|
evs[0].ID, newerEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Query for the original event by ID
|
// Query for the original event by ID
|
||||||
evs, err = db.QueryEvents(
|
evs, err = db.QueryEvents(
|
||||||
ctx, &filter.F{
|
ctx, &filter.F{
|
||||||
Ids: tag.New(replaceableEvent.Id),
|
Ids: tag.New(replaceableEvent.ID),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -345,10 +347,10 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's the original event
|
// Verify it's the original event
|
||||||
if !bytes.Equal(evs[0].Id, replaceableEvent.Id) {
|
if !bytes.Equal(evs[0].ID, replaceableEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Event ID doesn't match when querying for deleted event by ID. Got %x, expected %x",
|
"Event ID doesn't match when querying for deleted event by ID. Got %x, expected %x",
|
||||||
evs[0].Id, replaceableEvent.Id,
|
evs[0].ID, replaceableEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -379,7 +381,7 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
paramEvent.Sign(sign)
|
paramEvent.Sign(sign)
|
||||||
|
|
||||||
// Save the parameterized replaceable event
|
// Save the parameterized replaceable event
|
||||||
if _, _, err := db.SaveEvent(ctx, paramEvent); err != nil {
|
if _, _, err := db.SaveEvent(ctx, paramEvent, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save parameterized replaceable event: %v", err)
|
t.Fatalf("Failed to save parameterized replaceable event: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -405,7 +407,9 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
paramDeletionEvent.Sign(sign)
|
paramDeletionEvent.Sign(sign)
|
||||||
|
|
||||||
// Save the parameterized deletion event
|
// Save the parameterized deletion event
|
||||||
if _, _, err := db.SaveEvent(ctx, paramDeletionEvent); err != nil {
|
if _, _, err := db.SaveEvent(
|
||||||
|
ctx, paramDeletionEvent, false, nil,
|
||||||
|
); err != nil {
|
||||||
t.Fatalf("Failed to save parameterized deletion event: %v", err)
|
t.Fatalf("Failed to save parameterized deletion event: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -433,12 +437,14 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
paramDeletionEvent2.Tags = tags.New()
|
paramDeletionEvent2.Tags = tags.New()
|
||||||
// Add an e-tag referencing the parameterized replaceable event
|
// Add an e-tag referencing the parameterized replaceable event
|
||||||
paramDeletionEvent2.Tags = paramDeletionEvent2.Tags.AppendTags(
|
paramDeletionEvent2.Tags = paramDeletionEvent2.Tags.AppendTags(
|
||||||
tag.New([]byte{'e'}, []byte(hex.Enc(paramEvent.Id))),
|
tag.New([]byte{'e'}, []byte(hex.Enc(paramEvent.ID))),
|
||||||
)
|
)
|
||||||
paramDeletionEvent2.Sign(sign)
|
paramDeletionEvent2.Sign(sign)
|
||||||
|
|
||||||
// Save the parameterized deletion event with e-tag
|
// Save the parameterized deletion event with e-tag
|
||||||
if _, _, err := db.SaveEvent(ctx, paramDeletionEvent2); err != nil {
|
if _, _, err := db.SaveEvent(
|
||||||
|
ctx, paramDeletionEvent2, false, nil,
|
||||||
|
); err != nil {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Failed to save parameterized deletion event with e-tag: %v", err,
|
"Failed to save parameterized deletion event with e-tag: %v", err,
|
||||||
)
|
)
|
||||||
@@ -483,7 +489,7 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
// Query for the parameterized event by ID
|
// Query for the parameterized event by ID
|
||||||
evs, err = db.QueryEvents(
|
evs, err = db.QueryEvents(
|
||||||
ctx, &filter.F{
|
ctx, &filter.F{
|
||||||
Ids: tag.New(paramEvent.Id),
|
Ids: tag.New(paramEvent.ID),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -501,10 +507,10 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Verify it's the correct event
|
// Verify it's the correct event
|
||||||
if !bytes.Equal(evs[0].Id, paramEvent.Id) {
|
if !bytes.Equal(evs[0].ID, paramEvent.ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"Event ID doesn't match when querying for deleted parameterized event by ID. Got %x, expected %x",
|
"Event ID doesn't match when querying for deleted parameterized event by ID. Got %x, expected %x",
|
||||||
evs[0].Id, paramEvent.Id,
|
evs[0].ID, paramEvent.ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ func TestQueryForAuthorsTags(t *testing.T) {
|
|||||||
events = append(events, ev)
|
events = append(events, ev)
|
||||||
|
|
||||||
// Save the event to the database
|
// Save the event to the database
|
||||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,7 +131,7 @@ func TestQueryForAuthorsTags(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
|
|
||||||
if !bytes.Equal(ev.Pubkey, testEvent.Pubkey) {
|
if !bytes.Equal(ev.Pubkey, testEvent.Pubkey) {
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ func TestQueryForCreatedAt(t *testing.T) {
|
|||||||
events = append(events, ev)
|
events = append(events, ev)
|
||||||
|
|
||||||
// Save the event to the database
|
// Save the event to the database
|
||||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -105,7 +105,7 @@ func TestQueryForCreatedAt(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -143,7 +143,7 @@ func TestQueryForCreatedAt(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -181,7 +181,7 @@ func TestQueryForCreatedAt(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -43,6 +43,9 @@ func (d *D) QueryForIds(c context.T, f *filter.F) (
|
|||||||
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if fidpk == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
tagIdPkTs = append(tagIdPkTs, *fidpk)
|
tagIdPkTs = append(tagIdPkTs, *fidpk)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -59,6 +62,9 @@ func (d *D) QueryForIds(c context.T, f *filter.F) (
|
|||||||
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if fidpk == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
temp = append(temp, *fidpk)
|
temp = append(temp, *fidpk)
|
||||||
}
|
}
|
||||||
var intersecting []store.IdPkTs
|
var intersecting []store.IdPkTs
|
||||||
@@ -93,6 +99,9 @@ func (d *D) QueryForIds(c context.T, f *filter.F) (
|
|||||||
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if fidpk == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
idPkTs = append(idPkTs, *fidpk)
|
idPkTs = append(idPkTs, *fidpk)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,7 +60,7 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
events = append(events, ev)
|
events = append(events, ev)
|
||||||
|
|
||||||
// Save the event to the database
|
// Save the event to the database
|
||||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,34 +86,34 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
len(idTsPk),
|
len(idTsPk),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(idTsPk[0].Id, events[5474].Id) {
|
if !bytes.Equal(idTsPk[0].Id, events[5474].ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"failed to get expected event, got %0x, expected %0x", idTsPk[0].Id,
|
"failed to get expected event, got %0x, expected %0x", idTsPk[0].Id,
|
||||||
events[5474].Id,
|
events[5474].ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(idTsPk[1].Id, events[272].Id) {
|
if !bytes.Equal(idTsPk[1].Id, events[272].ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"failed to get expected event, got %0x, expected %0x", idTsPk[1].Id,
|
"failed to get expected event, got %0x, expected %0x", idTsPk[1].Id,
|
||||||
events[272].Id,
|
events[272].ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(idTsPk[2].Id, events[1].Id) {
|
if !bytes.Equal(idTsPk[2].Id, events[1].ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"failed to get expected event, got %0x, expected %0x", idTsPk[2].Id,
|
"failed to get expected event, got %0x, expected %0x", idTsPk[2].Id,
|
||||||
events[1].Id,
|
events[1].ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(idTsPk[3].Id, events[80].Id) {
|
if !bytes.Equal(idTsPk[3].Id, events[80].ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"failed to get expected event, got %0x, expected %0x", idTsPk[3].Id,
|
"failed to get expected event, got %0x, expected %0x", idTsPk[3].Id,
|
||||||
events[80].Id,
|
events[80].ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if !bytes.Equal(idTsPk[4].Id, events[123].Id) {
|
if !bytes.Equal(idTsPk[4].Id, events[123].ID) {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
"failed to get expected event, got %0x, expected %0x", idTsPk[4].Id,
|
"failed to get expected event, got %0x, expected %0x", idTsPk[4].Id,
|
||||||
events[123].Id,
|
events[123].ID,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,7 +141,7 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
if ev.Kind.K != testKind.K {
|
if ev.Kind.K != testKind.K {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@@ -207,7 +207,7 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
|
|
||||||
// Check if the event has the tag we're looking for
|
// Check if the event has the tag we're looking for
|
||||||
@@ -258,7 +258,7 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
if ev.Kind.K != testKind.K {
|
if ev.Kind.K != testKind.K {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@@ -305,7 +305,7 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
if ev.Kind.K != testEvent.Kind.K {
|
if ev.Kind.K != testEvent.Kind.K {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@@ -366,7 +366,7 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
if ev.Kind.K != testEvent.Kind.K {
|
if ev.Kind.K != testEvent.Kind.K {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
@@ -433,7 +433,7 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
|
|
||||||
if !bytes.Equal(ev.Pubkey, testEvent.Pubkey) {
|
if !bytes.Equal(ev.Pubkey, testEvent.Pubkey) {
|
||||||
@@ -506,7 +506,7 @@ func TestQueryForIds(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ func TestQueryForKindsAuthorsTags(t *testing.T) {
|
|||||||
events = append(events, ev)
|
events = append(events, ev)
|
||||||
|
|
||||||
// Save the event to the database
|
// Save the event to the database
|
||||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
if _, _, err = db.SaveEvent(ctx, ev, false, nil); err != nil {
|
||||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,7 +136,7 @@ func TestQueryForKindsAuthorsTags(t *testing.T) {
|
|||||||
// Find the event with this ID
|
// Find the event with this ID
|
||||||
var found bool
|
var found bool
|
||||||
for _, ev := range events {
|
for _, ev := range events {
|
||||||
if bytes.Equal(result.Id, ev.Id) {
|
if bytes.Equal(result.Id, ev.ID) {
|
||||||
found = true
|
found = true
|
||||||
if ev.Kind.K != testKind.K {
|
if ev.Kind.K != testKind.K {
|
||||||
t.Fatalf(
|
t.Fatalf(
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user