Compare commits
107 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
7e6adf9fba
|
|||
|
7d5ebd5ccd
|
|||
|
f8a321eaee
|
|||
|
48c7fab795
|
|||
|
f6054f3c37
|
|||
|
e1da199858
|
|||
|
45b4f82995
|
|||
|
e58eb1d3e3
|
|||
|
72d6ddff15
|
|||
|
a50ef55d8e
|
|||
| c2d5d2a165 | |||
|
05b13399e3
|
|||
|
0dea0ca791
|
|||
|
ff017b45d2
|
|||
|
50179e44ed
|
|||
|
34a3b1ba69
|
|||
|
093a19db29
|
|||
|
2ba361c915
|
|||
|
7736bb7640
|
|||
|
804e1c9649
|
|||
|
81a6aade4e
|
|||
|
fc9600f99d
|
|||
|
199f922208
|
|||
|
405e223aa6
|
|||
|
fc3a89a309
|
|||
|
ba8166da07
|
|||
|
3e3af08644
|
|||
|
fbdf565bf7
|
|||
|
14b6960070
|
|||
|
f9896e52ea
|
|||
|
ad7ca69964
|
|||
|
facf03783f
|
|||
|
a5b6943320
|
|||
|
1fe0a395be
|
|||
|
92b3716a61
|
|||
|
5c05d741d9
|
|||
|
9a1bbbafce
|
|||
|
2fd3828010
|
|||
|
24b742bd20
|
|||
|
|
42273ab2fa | ||
|
6f71b95734
|
|||
|
82665444f4
|
|||
|
effeae4495
|
|||
|
6b38291bf9
|
|||
|
0b69ea6d80
|
|||
|
9c85dca598
|
|||
|
0d8c518896
|
|||
|
20fbce9263
|
|||
|
4532def9f5
|
|||
|
90f21fbcd1
|
|||
|
81a40c04e5
|
|||
|
58a9e83038
|
|||
|
22cde96f3f
|
|||
|
49a172820a
|
|||
|
9d2bf173fe
|
|||
|
e521b788fb
|
|||
|
f5cce92bf8
|
|||
|
2ccdc5e756
|
|||
|
173a34784f
|
|||
|
a75e0994f9
|
|||
|
60e925d748
|
|||
|
3d2f970f04
|
|||
|
935eb1fb0b
|
|||
|
509aac3819
|
|||
|
a9893a0918
|
|||
|
8290e1ae0e
|
|||
|
fc546ddc0b
|
|||
|
c45276ef08
|
|||
|
fefa4d202e
|
|||
|
bf062a4a46
|
|||
|
246591b60b
|
|||
|
098595717f
|
|||
|
bc1527e6cf
|
|||
|
45c31795e7
|
|||
|
3ec2f60e0b
|
|||
|
110223fc4e
|
|||
|
2dd119401b
|
|||
|
6e06905773
|
|||
|
d1316a5b7a
|
|||
|
b45f0a2c51
|
|||
|
e2b7152221
|
|||
|
bf7ca1da43
|
|||
|
bb8998fef6
|
|||
|
57ac3667e6
|
|||
|
cb54891473
|
|||
|
fdcfd863e0
|
|||
|
4e96c9e2f7
|
|||
|
fb956ff09c
|
|||
|
eac6ba1410
|
|||
|
6b4b035f0c
|
|||
|
c2c6720e01
|
|||
|
dddcc682b9
|
|||
|
ddaab70d2b
|
|||
|
61cec63ca9
|
|||
|
b063dab2a3
|
|||
|
9e59d5f72b
|
|||
|
fe3893addf
|
|||
|
5eb192f208
|
|||
|
2385d1f752
|
|||
|
faad7ddc93
|
|||
|
c9314bdbd0
|
|||
|
85d806b157
|
|||
|
6207f9d426
|
|||
|
ebb5e2c0f3
|
|||
|
9dec51cd40
|
|||
|
f570660f37
|
|||
|
3d3a0fa520
|
@@ -89,3 +89,11 @@ A good typical example:
|
||||
// - Initializes the relay, starting its operation in a separate goroutine.
|
||||
|
||||
```
|
||||
|
||||
use the source of the relay-tester to help guide what expectations the test has,
|
||||
and use context7 for information about the nostr protocol, and use additional
|
||||
log statements to help locate the cause of bugs
|
||||
|
||||
always use Go v1.25.1 for everything involving Go
|
||||
|
||||
always use the nips repository that is available at /nips in the root of the repository for documentation about nostr protocol
|
||||
18
.dockerignore
Normal file
18
.dockerignore
Normal file
@@ -0,0 +1,18 @@
|
||||
# Exclude heavy or host-specific data from Docker build context
|
||||
# Fixes: failed to solve: error from sender: open cmd/benchmark/data/postgres: permission denied
|
||||
|
||||
# Benchmark data and reports (mounted at runtime via volumes)
|
||||
cmd/benchmark/data/
|
||||
cmd/benchmark/reports/
|
||||
|
||||
# VCS and OS cruft
|
||||
.git
|
||||
.gitignore
|
||||
**/.DS_Store
|
||||
**/Thumbs.db
|
||||
|
||||
# Go build cache and binaries
|
||||
**/bin/
|
||||
**/dist/
|
||||
**/build/
|
||||
**/*.out
|
||||
15
.gitignore
vendored
15
.gitignore
vendored
@@ -29,7 +29,8 @@ node_modules/**
|
||||
# and others
|
||||
/go.work.sum
|
||||
/secp256k1/
|
||||
|
||||
cmd/benchmark/external
|
||||
cmd/benchmark/data
|
||||
# But not these files...
|
||||
!/.gitignore
|
||||
!*.go
|
||||
@@ -87,6 +88,17 @@ node_modules/**
|
||||
!.gitignore
|
||||
!version
|
||||
!out.jsonl
|
||||
!Dockerfile*
|
||||
!strfry.conf
|
||||
!config.toml
|
||||
!.dockerignore
|
||||
!*.jsx
|
||||
!*.tsx
|
||||
!app/web/dist
|
||||
!/app/web/dist
|
||||
!/app/web/dist/*
|
||||
!/app/web/dist/**
|
||||
!bun.lock
|
||||
# ...even if they are in subdirectories
|
||||
!*/
|
||||
/blocklist.json
|
||||
@@ -108,3 +120,4 @@ pkg/database/testrealy
|
||||
/.idea/inspectionProfiles/Project_Default.xml
|
||||
/.idea/.name
|
||||
/ctxproxy.config.yml
|
||||
cmd/benchmark/external/**
|
||||
364
APACHE-PROXY-GUIDE.md
Normal file
364
APACHE-PROXY-GUIDE.md
Normal file
@@ -0,0 +1,364 @@
|
||||
# Apache Reverse Proxy Guide for Docker Apps
|
||||
|
||||
**Complete guide for WebSocket-enabled applications - covers both Plesk and Standard Apache**
|
||||
**Updated with real-world troubleshooting solutions**
|
||||
|
||||
## 🎯 **What This Solves**
|
||||
- WebSocket connection failures (`NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`)
|
||||
- Nostr relay connectivity issues (`HTTP 426` instead of WebSocket upgrade)
|
||||
- Docker container proxy configuration
|
||||
- SSL certificate integration
|
||||
- Plesk configuration conflicts and virtual host precedence issues
|
||||
|
||||
## 🐳 **Step 1: Deploy Your Docker Application**
|
||||
|
||||
### **For Stella's Orly Relay:**
|
||||
```bash
|
||||
# Pull and run the relay
|
||||
docker run -d \
|
||||
--name stella-relay \
|
||||
--restart unless-stopped \
|
||||
-p 127.0.0.1:7777:7777 \
|
||||
-v /data/orly-relay:/data \
|
||||
-e ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx \
|
||||
-e ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z \
|
||||
silberengel/orly-relay:latest
|
||||
|
||||
# Test the relay
|
||||
curl -I http://127.0.0.1:7777
|
||||
# Should return: HTTP/1.1 426 Upgrade Required
|
||||
```
|
||||
|
||||
### **For Web Apps (like Jumble):**
|
||||
```bash
|
||||
# Run with fixed port for easier proxy setup
|
||||
docker run -d \
|
||||
--name jumble-app \
|
||||
--restart unless-stopped \
|
||||
-p 127.0.0.1:3000:80 \
|
||||
-e NODE_ENV=production \
|
||||
silberengel/imwald-jumble:latest
|
||||
|
||||
# Test the app
|
||||
curl -I http://127.0.0.1:3000
|
||||
```
|
||||
|
||||
## 🔧 **Step 2A: PLESK Configuration**
|
||||
|
||||
### **For Your Friend's Standard Apache Setup:**
|
||||
|
||||
**Tell your friend to create `/etc/apache2/sites-available/domain.conf`:**
|
||||
|
||||
```apache
|
||||
<VirtualHost *:443>
|
||||
ServerName your-domain.com
|
||||
|
||||
# SSL Configuration (Let's Encrypt)
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
# Enable required modules first:
|
||||
# sudo a2enmod proxy proxy_http proxy_wstunnel rewrite headers ssl
|
||||
|
||||
# Proxy settings
|
||||
ProxyPreserveHost On
|
||||
ProxyRequests Off
|
||||
|
||||
# WebSocket upgrade handling - CRITICAL for apps with WebSockets
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule ^/?(.*) "ws://127.0.0.1:PORT/$1" [P,L]
|
||||
|
||||
# Regular HTTP proxy
|
||||
ProxyPass / http://127.0.0.1:PORT/
|
||||
ProxyPassReverse / http://127.0.0.1:PORT/
|
||||
|
||||
# Headers for modern web apps
|
||||
Header always set X-Forwarded-Proto "https"
|
||||
Header always set X-Forwarded-Port "443"
|
||||
Header always set X-Forwarded-For %{REMOTE_ADDR}s
|
||||
|
||||
# Security headers
|
||||
Header always set Strict-Transport-Security "max-age=63072000; includeSubDomains"
|
||||
Header always set X-Content-Type-Options nosniff
|
||||
Header always set X-Frame-Options SAMEORIGIN
|
||||
</VirtualHost>
|
||||
|
||||
# Redirect HTTP to HTTPS
|
||||
<VirtualHost *:80>
|
||||
ServerName your-domain.com
|
||||
Redirect permanent / https://your-domain.com/
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
**Then enable it:**
|
||||
```bash
|
||||
sudo a2ensite domain.conf
|
||||
sudo systemctl reload apache2
|
||||
```
|
||||
|
||||
### **For Plesk Users (You):**
|
||||
|
||||
⚠️ **Important**: Plesk often doesn't apply Apache directives correctly through the interface. If the interface method fails, use the "Direct Apache Override" method below.
|
||||
|
||||
#### **Method 1: Plesk Interface (Try First)**
|
||||
|
||||
1. **Go to Plesk** → Websites & Domains → **your-domain.com**
|
||||
2. **Click "Apache & nginx Settings"**
|
||||
3. **DISABLE nginx** (uncheck "Proxy mode" and "Smart static files processing")
|
||||
4. **Clear HTTP section** (leave empty)
|
||||
5. **In HTTPS section, add:**
|
||||
|
||||
**For Nostr Relay (port 7777):**
|
||||
```apache
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
```
|
||||
|
||||
6. **Click "Apply"** and wait 60 seconds
|
||||
|
||||
#### **Method 2: Direct Apache Override (If Plesk Interface Fails)**
|
||||
|
||||
If Plesk doesn't apply your configuration (common issue), bypass it entirely:
|
||||
|
||||
```bash
|
||||
# Create direct Apache override
|
||||
sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
|
||||
<VirtualHost YOUR_SERVER_IP:443>
|
||||
ServerName your-domain.com
|
||||
ServerAlias www.your-domain.com
|
||||
ServerAlias ipv4.your-domain.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# For Nostr relay - proxy everything to WebSocket
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
# CORS headers
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
Header always set Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||
|
||||
# Logging
|
||||
ErrorLog /var/log/apache2/relay-error.log
|
||||
CustomLog /var/log/apache2/relay-access.log combined
|
||||
</VirtualHost>
|
||||
EOF
|
||||
|
||||
# Enable the override
|
||||
sudo a2enconf relay-override
|
||||
sudo mkdir -p /var/www/relay
|
||||
sudo systemctl restart apache2
|
||||
|
||||
# Remove Plesk config if it conflicts
|
||||
sudo rm /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
```
|
||||
|
||||
#### **Method 3: Debugging Plesk Issues**
|
||||
|
||||
If configurations aren't being applied:
|
||||
|
||||
```bash
|
||||
# Check if Plesk applied your config
|
||||
grep -E "(ProxyPass|proxy)" /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
|
||||
# Check virtual host precedence
|
||||
apache2ctl -S | grep your-domain.com
|
||||
|
||||
# Check Apache modules
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
```
|
||||
|
||||
#### **For Web Apps (port 3000 or 32768):**
|
||||
```apache
|
||||
ProxyPreserveHost On
|
||||
ProxyRequests Off
|
||||
|
||||
# WebSocket upgrade handling
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule ^/?(.*) "ws://127.0.0.1:32768/$1" [P,L]
|
||||
|
||||
# Regular HTTP proxy
|
||||
ProxyPass / http://127.0.0.1:32768/
|
||||
ProxyPassReverse / http://127.0.0.1:32768/
|
||||
|
||||
# Headers
|
||||
ProxyAddHeaders On
|
||||
Header always set X-Forwarded-Proto "https"
|
||||
Header always set X-Forwarded-Port "443"
|
||||
```
|
||||
|
||||
### **Method B: Direct Apache Override (RECOMMENDED for Plesk)**
|
||||
|
||||
⚠️ **Use this if Plesk interface doesn't work** (common issue):
|
||||
|
||||
```bash
|
||||
# Create direct Apache override with your server's IP
|
||||
sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
|
||||
<VirtualHost YOUR_SERVER_IP:443>
|
||||
ServerName your-domain.com
|
||||
ServerAlias www.your-domain.com
|
||||
ServerAlias ipv4.your-domain.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# For Nostr relay - proxy everything to WebSocket
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
# CORS headers
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
|
||||
# Logging
|
||||
ErrorLog /var/log/apache2/relay-error.log
|
||||
CustomLog /var/log/apache2/relay-access.log combined
|
||||
</VirtualHost>
|
||||
EOF
|
||||
|
||||
# Enable override and create directory
|
||||
sudo a2enconf relay-override
|
||||
sudo mkdir -p /var/www/relay
|
||||
sudo systemctl restart apache2
|
||||
|
||||
# Remove conflicting Plesk config if needed
|
||||
sudo rm /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
```
|
||||
|
||||
## ⚡ **Step 3: Enable Required Modules**
|
||||
|
||||
In Plesk, you might need to enable modules. SSH to your server:
|
||||
|
||||
```bash
|
||||
# Enable Apache modules
|
||||
sudo a2enmod proxy
|
||||
sudo a2enmod proxy_http
|
||||
sudo a2enmod proxy_wstunnel
|
||||
sudo a2enmod rewrite
|
||||
sudo systemctl restart apache2
|
||||
```
|
||||
|
||||
## ⚡ **Step 4: Alternative - Nginx in Plesk**
|
||||
|
||||
If Apache keeps giving issues, switch to Nginx in Plesk:
|
||||
|
||||
1. Go to Plesk → Websites & Domains → orly-relay.imwald.eu
|
||||
2. Click "Apache & nginx Settings"
|
||||
3. Enable "nginx" and set it to serve static files
|
||||
4. In "Additional nginx directives" add:
|
||||
|
||||
```nginx
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:7777;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
```
|
||||
|
||||
## 🧪 **Testing**
|
||||
|
||||
After making changes:
|
||||
|
||||
1. **Apply settings** in Plesk
|
||||
2. **Wait 30 seconds** for changes to take effect
|
||||
3. **Test WebSocket**:
|
||||
```bash
|
||||
# From your server
|
||||
echo '["REQ","test",{}]' | websocat wss://orly-relay.imwald.eu/
|
||||
```
|
||||
|
||||
## 🎯 **Expected Result**
|
||||
|
||||
- ✅ No more "websocket error" in browser console
|
||||
- ✅ `wss://orly-relay.imwald.eu/` connects successfully
|
||||
- ✅ Jumble app can publish notes
|
||||
|
||||
## 🚨 **Real-World Troubleshooting Guide**
|
||||
|
||||
*Based on actual deployment experience with Plesk and WebSocket issues*
|
||||
|
||||
### **Critical Issues & Solutions:**
|
||||
|
||||
#### **🔴 HTTP 503 Service Unavailable**
|
||||
- **Cause**: Docker container not running
|
||||
- **Check**: `docker ps | grep relay`
|
||||
- **Fix**: `docker start container-name`
|
||||
|
||||
#### **🔴 HTTP 426 Instead of WebSocket Upgrade**
|
||||
- **Cause**: Apache using `http://` proxy instead of `ws://`
|
||||
- **Fix**: Use `ProxyPass / ws://127.0.0.1:7777/` (not `http://`)
|
||||
|
||||
#### **🔴 Plesk Configuration Not Applied**
|
||||
- **Symptom**: Config not in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
- **Solution**: Use Direct Apache Override method (bypass Plesk interface)
|
||||
|
||||
#### **🔴 Virtual Host Conflicts**
|
||||
- **Check**: `apache2ctl -S | grep domain.com`
|
||||
- **Fix**: Remove Plesk config: `sudo rm /etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
|
||||
#### **🔴 Nginx Intercepting (Plesk)**
|
||||
- **Symptom**: Response shows `Server: nginx`
|
||||
- **Fix**: Disable nginx in Plesk settings
|
||||
|
||||
### **Debug Commands:**
|
||||
```bash
|
||||
# Essential debugging
|
||||
docker ps | grep relay # Container running?
|
||||
curl -I http://127.0.0.1:7777 # Local relay (should return 426)
|
||||
apache2ctl -S | grep domain.com # Virtual host precedence
|
||||
grep ProxyPass /etc/apache2/plesk.conf.d/vhosts/domain.conf # Config applied?
|
||||
|
||||
# WebSocket testing
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ # Root path
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ws/ # /ws/ path
|
||||
```
|
||||
|
||||
### **Working Solution (Proven):**
|
||||
```apache
|
||||
<VirtualHost SERVER_IP:443>
|
||||
ServerName domain.com
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# Direct WebSocket proxy - this is the key!
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Key Lessons**:
|
||||
1. Plesk interface often fails to apply Apache directives
|
||||
2. Use `ws://` proxy for Nostr relays, not `http://`
|
||||
3. Direct Apache config files are more reliable than Plesk interface
|
||||
4. Always check virtual host precedence with `apache2ctl -S`
|
||||
188
DOCKER.md
Normal file
188
DOCKER.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# Docker Deployment Guide
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Basic Relay Setup
|
||||
|
||||
```bash
|
||||
# Build and start the relay
|
||||
docker-compose up -d
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f stella-relay
|
||||
|
||||
# Stop the relay
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
### 2. With Nginx Proxy (for SSL/domain setup)
|
||||
|
||||
```bash
|
||||
# Start relay with nginx proxy
|
||||
docker-compose --profile proxy up -d
|
||||
|
||||
# Configure SSL certificates in nginx/ssl/
|
||||
# Then update nginx/nginx.conf to enable HTTPS
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Copy `env.example` to `.env` and customize:
|
||||
|
||||
```bash
|
||||
cp env.example .env
|
||||
# Edit .env with your settings
|
||||
```
|
||||
|
||||
Key settings:
|
||||
- `ORLY_OWNERS`: Owner npubs (comma-separated, full control)
|
||||
- `ORLY_ADMINS`: Admin npubs (comma-separated, deletion permissions)
|
||||
- `ORLY_PORT`: Port to listen on (default: 7777)
|
||||
- `ORLY_MAX_CONNECTIONS`: Max concurrent connections
|
||||
- `ORLY_CONCURRENT_WORKERS`: CPU cores for concurrent processing (0 = auto)
|
||||
|
||||
### Data Persistence
|
||||
|
||||
The relay data is stored in `./data` directory which is mounted as a volume.
|
||||
|
||||
### Performance Tuning
|
||||
|
||||
Based on the v0.4.8 optimizations:
|
||||
- Concurrent event publishing using all CPU cores
|
||||
- Optimized BadgerDB access patterns
|
||||
- Configurable batch sizes and cache settings
|
||||
|
||||
## Development
|
||||
|
||||
### Local Build
|
||||
|
||||
```bash
|
||||
# Pull the latest image (recommended)
|
||||
docker pull silberengel/orly-relay:latest
|
||||
|
||||
# Or build locally if needed
|
||||
docker build -t silberengel/orly-relay:latest .
|
||||
|
||||
# Run with custom settings
|
||||
docker run -p 7777:7777 -v $(pwd)/data:/data silberengel/orly-relay:latest
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Test WebSocket connection
|
||||
websocat ws://localhost:7777
|
||||
|
||||
# Run stress tests (if available in cmd/stresstest)
|
||||
go run ./cmd/stresstest -relay ws://localhost:7777
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
### SSL Setup
|
||||
|
||||
1. Get SSL certificates (Let's Encrypt recommended)
|
||||
2. Place certificates in `nginx/ssl/`
|
||||
3. Update `nginx/nginx.conf` to enable HTTPS
|
||||
4. Start with proxy profile: `docker-compose --profile proxy up -d`
|
||||
|
||||
### Monitoring
|
||||
|
||||
- Health checks are configured for both services
|
||||
- Logs are rotated (max 10MB, 3 files)
|
||||
- Resource limits are set to prevent runaway processes
|
||||
|
||||
### Security
|
||||
|
||||
- Runs as non-root user (uid 1000)
|
||||
- Rate limiting configured in nginx
|
||||
- Configurable authentication and event size limits
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues (Real-World Experience)
|
||||
|
||||
#### **Container Issues:**
|
||||
1. **Port already in use**: Change `ORLY_PORT` in docker-compose.yml
|
||||
2. **Permission denied**: Ensure `./data` directory is writable
|
||||
3. **Container won't start**: Check logs with `docker logs container-name`
|
||||
|
||||
#### **WebSocket Issues:**
|
||||
4. **HTTP 426 instead of WebSocket upgrade**:
|
||||
- Use `ws://127.0.0.1:7777` in proxy config, not `http://`
|
||||
- Ensure `proxy_wstunnel` module is enabled
|
||||
5. **Connection refused in browser but works with websocat**:
|
||||
- Clear browser cache and service workers
|
||||
- Try incognito mode
|
||||
- Add CORS headers to Apache/nginx config
|
||||
|
||||
#### **Plesk-Specific Issues:**
|
||||
6. **Plesk not applying Apache directives**:
|
||||
- Check if config appears in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
- Use direct Apache override if Plesk interface fails
|
||||
7. **Virtual host conflicts**:
|
||||
- Check precedence with `apache2ctl -S`
|
||||
- Remove conflicting Plesk configs if needed
|
||||
|
||||
#### **SSL Certificate Issues:**
|
||||
8. **Self-signed certificate after Let's Encrypt**:
|
||||
- Plesk might not be using the correct certificate
|
||||
- Import Let's Encrypt certs into Plesk or use direct Apache config
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Container debugging
|
||||
docker ps | grep relay
|
||||
docker logs stella-relay
|
||||
curl -I http://127.0.0.1:7777 # Should return HTTP 426
|
||||
|
||||
# WebSocket testing
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ws/
|
||||
|
||||
# Apache debugging (for reverse proxy issues)
|
||||
apache2ctl -S | grep domain.com
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
grep ProxyPass /etc/apache2/plesk.conf.d/vhosts/domain.conf
|
||||
```
|
||||
|
||||
### Logs
|
||||
|
||||
```bash
|
||||
# View relay logs
|
||||
docker-compose logs -f stella-relay
|
||||
|
||||
# View nginx logs (if using proxy)
|
||||
docker-compose logs -f nginx
|
||||
|
||||
# Apache logs (for reverse proxy debugging)
|
||||
sudo tail -f /var/log/apache2/error.log
|
||||
sudo tail -f /var/log/apache2/domain-error.log
|
||||
```
|
||||
|
||||
### Working Reverse Proxy Config
|
||||
|
||||
**For Apache (direct config file):**
|
||||
```apache
|
||||
<VirtualHost SERVER_IP:443>
|
||||
ServerName domain.com
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
|
||||
|
||||
# Direct WebSocket proxy for Nostr relay
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Crafted for Stella's digital forest* 🌲
|
||||
78
Dockerfile
Normal file
78
Dockerfile
Normal file
@@ -0,0 +1,78 @@
|
||||
# Dockerfile for Stella's Nostr Relay (next.orly.dev)
|
||||
# Owner: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
git \
|
||||
build-base \
|
||||
autoconf \
|
||||
automake \
|
||||
libtool \
|
||||
pkgconfig
|
||||
|
||||
# Install secp256k1 library from Alpine packages
|
||||
RUN apk add --no-cache libsecp256k1-dev
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules first (for better caching)
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the relay with optimizations from v0.4.8
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -ldflags "-w -s" -o relay .
|
||||
|
||||
# Create non-root user for security
|
||||
RUN adduser -D -u 1000 stella && \
|
||||
chown -R 1000:1000 /build
|
||||
|
||||
# Final stage - minimal runtime image
|
||||
FROM alpine:latest
|
||||
|
||||
# Install only runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libsecp256k1 \
|
||||
libsecp256k1-dev
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /build/relay /app/relay
|
||||
|
||||
# Create runtime user and directories
|
||||
RUN adduser -D -u 1000 stella && \
|
||||
mkdir -p /data /profiles /app && \
|
||||
chown -R 1000:1000 /data /profiles /app
|
||||
|
||||
# Expose the relay port
|
||||
EXPOSE 7777
|
||||
|
||||
# Set environment variables for Stella's relay
|
||||
ENV ORLY_DATA_DIR=/data
|
||||
ENV ORLY_LISTEN=0.0.0.0
|
||||
ENV ORLY_PORT=7777
|
||||
ENV ORLY_LOG_LEVEL=info
|
||||
ENV ORLY_MAX_CONNECTIONS=1000
|
||||
ENV ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
ENV ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
|
||||
|
||||
# Health check to ensure relay is responding
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD sh -c "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:7777 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
|
||||
|
||||
# Create volume for persistent data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Drop privileges and run as stella user
|
||||
USER 1000:1000
|
||||
|
||||
# Run Stella's Nostr relay
|
||||
CMD ["/app/relay"]
|
||||
101
SERVICE-WORKER-FIX.md
Normal file
101
SERVICE-WORKER-FIX.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Service Worker Certificate Caching Fix
|
||||
|
||||
## 🚨 **Problem**
|
||||
When accessing Jumble from the ImWald landing page, the service worker serves a cached self-signed certificate instead of the new Let's Encrypt certificate.
|
||||
|
||||
## ⚡ **Solutions**
|
||||
|
||||
### **Option 1: Force Service Worker Update**
|
||||
Add this to your Jumble app's service worker or main JavaScript:
|
||||
|
||||
```javascript
|
||||
// Force service worker update and certificate refresh
|
||||
if ('serviceWorker' in navigator) {
|
||||
navigator.serviceWorker.getRegistrations().then(function(registrations) {
|
||||
for(let registration of registrations) {
|
||||
registration.update(); // Force update
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Clear all caches on certificate update
|
||||
if ('caches' in window) {
|
||||
caches.keys().then(function(names) {
|
||||
for (let name of names) {
|
||||
caches.delete(name);
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### **Option 2: Update Service Worker Cache Strategy**
|
||||
In your service worker file, add cache busting for SSL-sensitive requests:
|
||||
|
||||
```javascript
|
||||
// In your service worker
|
||||
self.addEventListener('fetch', function(event) {
|
||||
// Don't cache HTTPS requests that might have certificate issues
|
||||
if (event.request.url.startsWith('https://') &&
|
||||
event.request.url.includes('imwald.eu')) {
|
||||
event.respondWith(
|
||||
fetch(event.request, { cache: 'no-store' })
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Your existing fetch handling...
|
||||
});
|
||||
```
|
||||
|
||||
### **Option 3: Version Your Service Worker**
|
||||
Update your service worker with a new version number:
|
||||
|
||||
```javascript
|
||||
// At the top of your service worker
|
||||
const CACHE_VERSION = 'v2.0.1'; // Increment this when certificates change
|
||||
const CACHE_NAME = `jumble-cache-${CACHE_VERSION}`;
|
||||
|
||||
// Clear old caches
|
||||
self.addEventListener('activate', function(event) {
|
||||
event.waitUntil(
|
||||
caches.keys().then(function(cacheNames) {
|
||||
return Promise.all(
|
||||
cacheNames.map(function(cacheName) {
|
||||
if (cacheName !== CACHE_NAME) {
|
||||
return caches.delete(cacheName);
|
||||
}
|
||||
})
|
||||
);
|
||||
})
|
||||
);
|
||||
});
|
||||
```
|
||||
|
||||
### **Option 4: Add Cache Headers**
|
||||
In your Plesk Apache config for Jumble, add:
|
||||
|
||||
```apache
|
||||
# Prevent service worker from caching SSL-sensitive content
|
||||
Header always set Cache-Control "no-cache, no-store, must-revalidate"
|
||||
Header always set Pragma "no-cache"
|
||||
Header always set Expires "0"
|
||||
|
||||
# Only for service worker file
|
||||
<Files "sw.js">
|
||||
Header always set Cache-Control "no-cache, no-store, must-revalidate"
|
||||
</Files>
|
||||
```
|
||||
|
||||
## 🧹 **Immediate User Fix**
|
||||
|
||||
For users experiencing the certificate issue:
|
||||
|
||||
1. **Clear browser data** for jumble.imwald.eu
|
||||
2. **Unregister service worker**:
|
||||
- F12 → Application → Service Workers → Unregister
|
||||
3. **Hard refresh**: Ctrl+Shift+R
|
||||
4. **Or use incognito mode** to test
|
||||
|
||||
---
|
||||
|
||||
This will prevent the service worker from serving stale certificate data.
|
||||
109
WEBSOCKET-DEBUG.md
Normal file
109
WEBSOCKET-DEBUG.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# WebSocket Connection Debug Guide
|
||||
|
||||
## 🚨 **Current Issue**
|
||||
`wss://orly-relay.imwald.eu/` returns `NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`
|
||||
|
||||
## 🔍 **Debug Steps**
|
||||
|
||||
### **Step 1: Verify Relay is Running**
|
||||
```bash
|
||||
# On your server
|
||||
curl -I http://127.0.0.1:7777
|
||||
# Should return: HTTP/1.1 426 Upgrade Required
|
||||
|
||||
docker ps | grep stella
|
||||
# Should show running container
|
||||
```
|
||||
|
||||
### **Step 2: Test Apache Modules**
|
||||
```bash
|
||||
# Check if WebSocket modules are enabled
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
|
||||
# If missing, enable them:
|
||||
sudo a2enmod proxy
|
||||
sudo a2enmod proxy_http
|
||||
sudo a2enmod proxy_wstunnel
|
||||
sudo a2enmod rewrite
|
||||
sudo a2enmod headers
|
||||
sudo systemctl restart apache2
|
||||
```
|
||||
|
||||
### **Step 3: Check Apache Configuration**
|
||||
```bash
|
||||
# Check what Plesk generated
|
||||
sudo cat /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
|
||||
|
||||
# Look for proxy and rewrite rules
|
||||
grep -E "(Proxy|Rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
|
||||
```
|
||||
|
||||
### **Step 4: Test Direct WebSocket Connection**
|
||||
```bash
|
||||
# Test if the issue is Apache or the relay itself
|
||||
echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/
|
||||
|
||||
# If that works, the issue is Apache proxy
|
||||
# If that fails, the issue is the relay
|
||||
```
|
||||
|
||||
### **Step 5: Check Apache Error Logs**
|
||||
```bash
|
||||
# Watch Apache errors in real-time
|
||||
sudo tail -f /var/log/apache2/error.log
|
||||
|
||||
# Then try connecting to wss://orly-relay.imwald.eu/ and see what errors appear
|
||||
```
|
||||
|
||||
## 🔧 **Specific Plesk Fix**
|
||||
|
||||
Based on your current status, try this **exact configuration** in Plesk:
|
||||
|
||||
### **Go to Apache & nginx Settings for orly-relay.imwald.eu:**
|
||||
|
||||
**Clear both HTTP and HTTPS sections, then add to HTTPS:**
|
||||
|
||||
```apache
|
||||
# Enable proxy
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
|
||||
# WebSocket handling - the key part
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} =websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule /(.*) ws://127.0.0.1:7777/$1 [P,L]
|
||||
|
||||
# Fallback for regular HTTP
|
||||
RewriteCond %{HTTP:Upgrade} !=websocket [NC]
|
||||
RewriteRule /(.*) http://127.0.0.1:7777/$1 [P,L]
|
||||
|
||||
# Headers
|
||||
ProxyAddHeaders On
|
||||
```
|
||||
|
||||
### **Alternative Simpler Version:**
|
||||
If the above doesn't work, try just:
|
||||
|
||||
```apache
|
||||
ProxyPass / http://127.0.0.1:7777/
|
||||
ProxyPassReverse / http://127.0.0.1:7777/
|
||||
ProxyPass /ws ws://127.0.0.1:7777/
|
||||
ProxyPassReverse /ws ws://127.0.0.1:7777/
|
||||
```
|
||||
|
||||
## 🧪 **Testing Commands**
|
||||
|
||||
```bash
|
||||
# Test the WebSocket after each change
|
||||
echo '["REQ","test",{}]' | websocat wss://orly-relay.imwald.eu/
|
||||
|
||||
# Check what's actually being served
|
||||
curl -v https://orly-relay.imwald.eu/ 2>&1 | grep -E "(HTTP|upgrade|connection)"
|
||||
```
|
||||
|
||||
## 🎯 **Expected Fix**
|
||||
|
||||
The issue is likely that Apache isn't properly handling the WebSocket upgrade request. The `proxy_wstunnel` module and correct rewrite rules should fix this.
|
||||
|
||||
Try the **simpler ProxyPass version first** - it's often more reliable in Plesk environments.
|
||||
@@ -23,17 +23,33 @@ import (
|
||||
// and default values. It defines parameters for app behaviour, storage
|
||||
// locations, logging, and network settings used across the relay service.
|
||||
type C struct {
|
||||
AppName string `env:"ORLY_APP_NAME" usage:"set a name to display on information about the relay" default:"ORLY"`
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
|
||||
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation"`
|
||||
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
|
||||
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
|
||||
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"follows"`
|
||||
AppName string `env:"ORLY_APP_NAME" usage:"set a name to display on information about the relay" default:"ORLY"`
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
HealthPort int `env:"ORLY_HEALTH_PORT" default:"0" usage:"optional health check HTTP port; 0 disables"`
|
||||
EnableShutdown bool `env:"ORLY_ENABLE_SHUTDOWN" default:"false" usage:"if true, expose /shutdown on the health port to gracefully stop the process (for profiling)"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
|
||||
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
||||
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation,heap,block,goroutine,threadcreate,mutex"`
|
||||
PprofPath string `env:"ORLY_PPROF_PATH" usage:"optional directory to write pprof profiles into (inside container); default is temporary dir"`
|
||||
PprofHTTP bool `env:"ORLY_PPROF_HTTP" default:"false" usage:"if true, expose net/http/pprof on port 6060"`
|
||||
OpenPprofWeb bool `env:"ORLY_OPEN_PPROF_WEB" default:"false" usage:"if true, automatically open the pprof web viewer when profiling is enabled"`
|
||||
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
|
||||
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
|
||||
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
|
||||
SpiderMode string `env:"ORLY_SPIDER_MODE" usage:"spider mode: none,follow" default:"none"`
|
||||
SpiderFrequency time.Duration `env:"ORLY_SPIDER_FREQUENCY" usage:"spider frequency in seconds" default:"1h"`
|
||||
NWCUri string `env:"ORLY_NWC_URI" usage:"NWC (Nostr Wallet Connect) connection string for Lightning payments"`
|
||||
SubscriptionEnabled bool `env:"ORLY_SUBSCRIPTION_ENABLED" default:"false" usage:"enable subscription-based access control requiring payment for non-directory events"`
|
||||
MonthlyPriceSats int64 `env:"ORLY_MONTHLY_PRICE_SATS" default:"6000" usage:"price in satoshis for one month subscription (default ~$2 USD)"`
|
||||
RelayURL string `env:"ORLY_RELAY_URL" usage:"base URL for the relay dashboard (e.g., https://relay.example.com)"`
|
||||
|
||||
// Web UI and dev mode settings
|
||||
WebDisableEmbedded bool `env:"ORLY_WEB_DISABLE" default:"false" usage:"disable serving the embedded web UI; useful for hot-reload during development"`
|
||||
WebDevProxyURL string `env:"ORLY_WEB_DEV_PROXY_URL" usage:"when ORLY_WEB_DISABLE is true, reverse-proxy non-API paths to this dev server URL (e.g. http://localhost:5173)"`
|
||||
}
|
||||
|
||||
// New creates and initializes a new configuration object for the relay
|
||||
@@ -73,6 +89,9 @@ func New() (cfg *C, err error) {
|
||||
PrintHelp(cfg, os.Stderr)
|
||||
os.Exit(0)
|
||||
}
|
||||
if cfg.LogToStdout {
|
||||
lol.Writer = os.Stdout
|
||||
}
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
return
|
||||
}
|
||||
@@ -121,6 +140,21 @@ func GetEnv() (requested bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// IdentityRequested checks if the first command line argument is "identity" and returns
|
||||
// whether the relay identity should be printed and the program should exit.
|
||||
//
|
||||
// Return Values
|
||||
// - requested: true if the 'identity' subcommand was provided, false otherwise.
|
||||
func IdentityRequested() (requested bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "identity":
|
||||
requested = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KV is a key/value pair.
|
||||
type KV struct{ Key, Value string }
|
||||
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/envelopes/okenvelope"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"protocol.orly/auth"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/protocol/auth"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleAuth(b []byte) (err error) {
|
||||
@@ -46,10 +46,38 @@ func (l *Listener) HandleAuth(b []byte) (err error) {
|
||||
return
|
||||
}
|
||||
log.D.F(
|
||||
"%s authed to pubkey,%0x", l.remote,
|
||||
"%s authed to pubkey %0x", l.remote,
|
||||
env.Event.Pubkey,
|
||||
)
|
||||
l.authedPubkey.Store(env.Event.Pubkey)
|
||||
|
||||
// Check if this is a first-time user and create welcome note
|
||||
go l.handleFirstTimeUser(env.Event.Pubkey)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// handleFirstTimeUser checks if user is logging in for first time and creates welcome note
|
||||
func (l *Listener) handleFirstTimeUser(pubkey []byte) {
|
||||
// Check if this is a first-time user
|
||||
isFirstTime, err := l.Server.D.IsFirstTimeUser(pubkey)
|
||||
if err != nil {
|
||||
log.E.F("failed to check first-time user status: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !isFirstTime {
|
||||
return // Not a first-time user
|
||||
}
|
||||
|
||||
// Get payment processor to create welcome note
|
||||
if l.Server.paymentProcessor != nil {
|
||||
// Set the dashboard URL based on the current HTTP request
|
||||
dashboardURL := l.Server.DashboardURL(l.req)
|
||||
l.Server.paymentProcessor.SetDashboardURL(dashboardURL)
|
||||
|
||||
if err := l.Server.paymentProcessor.CreateWelcomeNote(pubkey); err != nil {
|
||||
log.E.F("failed to create welcome note for first-time user: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@ package app
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"encoders.orly/envelopes/closeenvelope"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||
)
|
||||
|
||||
// HandleClose processes a CLOSE envelope by unmarshalling the request,
|
||||
|
||||
@@ -3,18 +3,18 @@ package app
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"database.orly/indexes/types"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/event"
|
||||
"encoders.orly/filter"
|
||||
"encoders.orly/hex"
|
||||
"encoders.orly/ints"
|
||||
"encoders.orly/kind"
|
||||
"encoders.orly/tag"
|
||||
"encoders.orly/tag/atag"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
utils "utils.orly"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/ints"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/tag/atag"
|
||||
utils "next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func (l *Listener) GetSerialsFromFilter(f *filter.F) (
|
||||
@@ -23,14 +23,14 @@ func (l *Listener) GetSerialsFromFilter(f *filter.F) (
|
||||
return l.D.GetSerialsFromFilter(f)
|
||||
}
|
||||
|
||||
func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
||||
log.I.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"delete event\n%s", env.E.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||
// log.I.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "delete event\n%s", env.E.Serialize(),
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
var ownerDelete bool
|
||||
for _, pk := range l.Admins {
|
||||
if utils.FastEqual(pk, env.E.Pubkey) {
|
||||
@@ -39,15 +39,17 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
||||
}
|
||||
}
|
||||
// process the tags in the delete event
|
||||
var err error
|
||||
var deleteErr error
|
||||
var validDeletionFound bool
|
||||
for _, t := range *env.E.Tags {
|
||||
// first search for a tags, as these are the simplest to process
|
||||
if utils.FastEqual(t.Key(), []byte("a")) {
|
||||
at := new(atag.T)
|
||||
if _, err = at.Unmarshal(t.Value()); chk.E(err) {
|
||||
if _, deleteErr = at.Unmarshal(t.Value()); chk.E(deleteErr) {
|
||||
continue
|
||||
}
|
||||
if ownerDelete || utils.FastEqual(env.E.Pubkey, at.Pubkey) {
|
||||
validDeletionFound = true
|
||||
// find the event and delete it
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(at.Pubkey),
|
||||
@@ -69,13 +71,43 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
||||
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
if !(kind.IsReplaceable(ev.Kind) && len(at.DTag) == 0) {
|
||||
// skip a tags with no dtag if the kind is not
|
||||
// replaceable.
|
||||
// Only delete events that match the a-tag criteria:
|
||||
// - For parameterized replaceable events: must have matching d-tag
|
||||
// - For regular replaceable events: should not have d-tag constraint
|
||||
if kind.IsParameterizedReplaceable(ev.Kind) {
|
||||
// For parameterized replaceable, we need a DTag to match
|
||||
if len(at.DTag) == 0 {
|
||||
log.I.F(
|
||||
"HandleDelete: skipping parameterized replaceable event %s - no DTag in a-tag",
|
||||
hex.Enc(ev.ID),
|
||||
)
|
||||
continue
|
||||
}
|
||||
} else if !kind.IsReplaceable(ev.Kind) {
|
||||
// For non-replaceable events, a-tags don't apply
|
||||
log.I.F(
|
||||
"HandleDelete: skipping non-replaceable event %s - a-tags only apply to replaceable events",
|
||||
hex.Enc(ev.ID),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
// Only delete events that are older than or equal to the delete event timestamp
|
||||
if ev.CreatedAt > env.E.CreatedAt {
|
||||
log.I.F(
|
||||
"HandleDelete: skipping newer event %s (created_at=%d) - delete event timestamp is %d",
|
||||
hex.Enc(ev.ID), ev.CreatedAt, env.E.CreatedAt,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
log.I.F(
|
||||
"HandleDelete: deleting event %s via a-tag %d:%s:%s (event_time=%d, delete_time=%d)",
|
||||
hex.Enc(ev.ID), at.Kind.K, hex.Enc(at.Pubkey),
|
||||
string(at.DTag), ev.CreatedAt, env.E.CreatedAt,
|
||||
)
|
||||
if err = l.DeleteEventBySerial(
|
||||
l.Ctx, s, ev,
|
||||
l.Ctx(), s, ev,
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
@@ -87,10 +119,16 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
||||
// if e tags are found, delete them if the author is signer, or one of
|
||||
// the owners is signer
|
||||
if utils.FastEqual(t.Key(), []byte("e")) {
|
||||
var dst []byte
|
||||
if _, err = hex.DecBytes(dst, t.Value()); chk.E(err) {
|
||||
val := t.Value()
|
||||
if len(val) == 0 {
|
||||
continue
|
||||
}
|
||||
var dst []byte
|
||||
if b, e := hex.Dec(string(val)); chk.E(e) {
|
||||
continue
|
||||
} else {
|
||||
dst = b
|
||||
}
|
||||
f := &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(dst),
|
||||
}
|
||||
@@ -107,17 +145,25 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
||||
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// check that the author is the same as the signer of the
|
||||
// delete, for the k tag case the author is the signer of
|
||||
// the event.
|
||||
if !utils.FastEqual(env.E.Pubkey, ev.Pubkey) {
|
||||
// allow deletion if the signer is the author OR an admin/owner
|
||||
if !(ownerDelete || utils.FastEqual(env.E.Pubkey, ev.Pubkey)) {
|
||||
log.W.F(
|
||||
"HandleDelete: attempted deletion of event %s by unauthorized user - delete pubkey=%s, event pubkey=%s",
|
||||
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
|
||||
hex.Enc(ev.Pubkey),
|
||||
)
|
||||
continue
|
||||
}
|
||||
validDeletionFound = true
|
||||
// exclude delete events
|
||||
if ev.Kind == kind.EventDeletion.K {
|
||||
continue
|
||||
}
|
||||
if err = l.DeleteEventBySerial(l.Ctx, s, ev); chk.E(err) {
|
||||
log.I.F(
|
||||
"HandleDelete: deleting event %s by authorized user %s",
|
||||
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
|
||||
)
|
||||
if err = l.DeleteEventBySerial(l.Ctx(), s, ev); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -164,5 +210,11 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If no valid deletions were found, return an error
|
||||
if !validDeletionFound {
|
||||
return fmt.Errorf("blocked: cannot delete events that belong to other users")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
acl "acl.orly"
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/okenvelope"
|
||||
"encoders.orly/kind"
|
||||
"encoders.orly/reason"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
utils "utils.orly"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/reason"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
@@ -62,7 +64,7 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
return
|
||||
}
|
||||
// check permissions of user
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load())
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
log.D.F(
|
||||
@@ -99,11 +101,35 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
return
|
||||
default:
|
||||
// user has write access or better, continue
|
||||
log.D.F("user has %s access", accessLevel)
|
||||
// log.D.F("user has %s access", accessLevel)
|
||||
}
|
||||
// check for protected tag (NIP-70)
|
||||
protectedTag := env.E.Tags.GetFirst([]byte("-"))
|
||||
if protectedTag != nil && acl.Registry.Active.Load() != "none" {
|
||||
// check that the pubkey of the event matches the authed pubkey
|
||||
if !utils.FastEqual(l.authedPubkey.Load(), env.E.Pubkey) {
|
||||
if err = Ok.Blocked(
|
||||
l, env,
|
||||
"protected tag may only be published by user authed to the same pubkey",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
// if the event is a delete, process the delete
|
||||
if env.E.Kind == kind.EventDeletion.K {
|
||||
l.HandleDelete(env)
|
||||
if err = l.HandleDelete(env); err != nil {
|
||||
if strings.HasPrefix(err.Error(), "blocked:") {
|
||||
errStr := err.Error()[len("blocked: "):len(err.Error())]
|
||||
if err = Ok.Error(
|
||||
l, env, errStr,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// check if the event was deleted
|
||||
if err = l.CheckForDeleted(env.E, l.Admins); err != nil {
|
||||
@@ -117,21 +143,51 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
// store the event
|
||||
log.I.F("saving event %0x, %s", env.E.ID, env.E.Serialize())
|
||||
if _, _, err = l.SaveEvent(l.Ctx, env.E); chk.E(err) {
|
||||
// store the event - use a separate context to prevent cancellation issues
|
||||
saveCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
// log.I.F("saving event %0x, %s", env.E.ID, env.E.Serialize())
|
||||
if _, _, err = l.SaveEvent(saveCtx, env.E); err != nil {
|
||||
if strings.HasPrefix(err.Error(), "blocked:") {
|
||||
errStr := err.Error()[len("blocked: "):len(err.Error())]
|
||||
if err = Ok.Error(
|
||||
l, env, errStr,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
chk.E(err)
|
||||
return
|
||||
}
|
||||
// if a follow list was saved, reconfigure ACLs now that it is persisted
|
||||
if env.E.Kind == kind.FollowList.K {
|
||||
if err = acl.Registry.Configure(); chk.E(err) {
|
||||
}
|
||||
}
|
||||
l.publishers.Deliver(env.E)
|
||||
// Send a success response storing
|
||||
if err = Ok.Ok(l, env, ""); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Deliver the event to subscribers immediately after sending OK response
|
||||
// Clone the event to prevent corruption when the original is freed
|
||||
clonedEvent := env.E.Clone()
|
||||
go l.publishers.Deliver(clonedEvent)
|
||||
log.D.F("saved event %0x", env.E.ID)
|
||||
var isNewFromAdmin bool
|
||||
for _, admin := range l.Admins {
|
||||
if utils.FastEqual(admin, env.E.Pubkey) {
|
||||
isNewFromAdmin = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if isNewFromAdmin {
|
||||
log.I.F("new event from admin %0x", env.E.Pubkey)
|
||||
// if a follow list was saved, reconfigure ACLs now that it is persisted
|
||||
if env.E.Kind == kind.FollowList.K ||
|
||||
env.E.Kind == kind.RelayListMetadata.K {
|
||||
// Run ACL reconfiguration asynchronously to prevent blocking websocket operations
|
||||
go func() {
|
||||
if err := acl.Registry.Configure(); chk.E(err) {
|
||||
log.E.F("failed to reconfigure ACL: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3,57 +3,76 @@ package app
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"encoders.orly/envelopes"
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/envelopes/closeenvelope"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/noticeenvelope"
|
||||
"encoders.orly/envelopes/reqenvelope"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/noticeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"%s received message:\n%s", remote, msg,
|
||||
)
|
||||
},
|
||||
)
|
||||
msgPreview := string(msg)
|
||||
if len(msgPreview) > 150 {
|
||||
msgPreview = msgPreview[:150] + "..."
|
||||
}
|
||||
log.D.F("%s processing message (len=%d): %s", remote, len(msg), msgPreview)
|
||||
|
||||
l.msgCount++
|
||||
var err error
|
||||
var t string
|
||||
var rem []byte
|
||||
if t, rem, err = envelopes.Identify(msg); !chk.E(err) {
|
||||
switch t {
|
||||
case eventenvelope.L:
|
||||
log.D.F("eventenvelope: %s", rem)
|
||||
err = l.HandleEvent(rem)
|
||||
case reqenvelope.L:
|
||||
log.D.F("reqenvelope: %s", rem)
|
||||
err = l.HandleReq(rem)
|
||||
case closeenvelope.L:
|
||||
log.D.F("closeenvelope: %s", rem)
|
||||
err = l.HandleClose(rem)
|
||||
case authenvelope.L:
|
||||
log.D.F("authenvelope: %s", rem)
|
||||
err = l.HandleAuth(rem)
|
||||
default:
|
||||
err = errorf.E("unknown envelope type %s\n%s", t, rem)
|
||||
|
||||
// Attempt to identify the envelope type
|
||||
if t, rem, err = envelopes.Identify(msg); err != nil {
|
||||
log.E.F("%s envelope identification FAILED (len=%d): %v", remote, len(msg), err)
|
||||
log.D.F("%s malformed message content: %q", remote, msgPreview)
|
||||
chk.E(err)
|
||||
// Send error notice to client
|
||||
if noticeErr := noticeenvelope.NewFrom("malformed message: " + err.Error()).Write(l); noticeErr != nil {
|
||||
log.E.F("%s failed to send malformed message notice: %v", remote, noticeErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.D.F("%s identified envelope type: %s (payload_len=%d)", remote, t, len(rem))
|
||||
|
||||
// Process the identified envelope type
|
||||
switch t {
|
||||
case eventenvelope.L:
|
||||
log.D.F("%s processing EVENT envelope", remote)
|
||||
l.eventCount++
|
||||
err = l.HandleEvent(rem)
|
||||
case reqenvelope.L:
|
||||
log.D.F("%s processing REQ envelope", remote)
|
||||
l.reqCount++
|
||||
err = l.HandleReq(rem)
|
||||
case closeenvelope.L:
|
||||
log.D.F("%s processing CLOSE envelope", remote)
|
||||
err = l.HandleClose(rem)
|
||||
case authenvelope.L:
|
||||
log.D.F("%s processing AUTH envelope", remote)
|
||||
err = l.HandleAuth(rem)
|
||||
default:
|
||||
err = fmt.Errorf("unknown envelope type %s", t)
|
||||
log.E.F("%s unknown envelope type: %s (payload: %q)", remote, t, string(rem))
|
||||
}
|
||||
|
||||
// Handle any processing errors
|
||||
if err != nil {
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"notice->%s %s", remote, err,
|
||||
)
|
||||
},
|
||||
)
|
||||
if err = noticeenvelope.NewFrom(err.Error()).Write(l); chk.E(err) {
|
||||
log.E.F("%s message processing FAILED (type=%s): %v", remote, t, err)
|
||||
log.D.F("%s error context - original message: %q", remote, msgPreview)
|
||||
|
||||
// Send error notice to client
|
||||
noticeMsg := fmt.Sprintf("%s: %s", t, err.Error())
|
||||
if noticeErr := noticeenvelope.NewFrom(noticeMsg).Write(l); noticeErr != nil {
|
||||
log.E.F("%s failed to send error notice after %s processing failure: %v", remote, t, noticeErr)
|
||||
return
|
||||
}
|
||||
log.D.F("%s sent error notice for %s processing failure", remote, t)
|
||||
} else {
|
||||
log.D.F("%s message processing SUCCESS (type=%s)", remote, t)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -4,11 +4,14 @@ import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/protocol/relayinfo"
|
||||
"next.orly.dev/pkg/version"
|
||||
"protocol.orly/relayinfo"
|
||||
)
|
||||
|
||||
// HandleRelayInfo generates and returns a relay information document in JSON
|
||||
@@ -31,49 +34,64 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
||||
var info *relayinfo.T
|
||||
supportedNIPs := relayinfo.GetList(
|
||||
relayinfo.BasicProtocol,
|
||||
// relayinfo.Authentication,
|
||||
// relayinfo.EncryptedDirectMessage,
|
||||
// relayinfo.EventDeletion,
|
||||
relayinfo.Authentication,
|
||||
relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EventDeletion,
|
||||
relayinfo.RelayInformationDocument,
|
||||
// relayinfo.GenericTagQueries,
|
||||
relayinfo.GenericTagQueries,
|
||||
// relayinfo.NostrMarketplace,
|
||||
// relayinfo.EventTreatment,
|
||||
// relayinfo.CommandResults,
|
||||
// relayinfo.ParameterizedReplaceableEvents,
|
||||
// relayinfo.ExpirationTimestamp,
|
||||
// relayinfo.ProtectedEvents,
|
||||
// relayinfo.RelayListMetadata,
|
||||
relayinfo.EventTreatment,
|
||||
relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
relayinfo.RelayListMetadata,
|
||||
)
|
||||
if s.Config.ACLMode != "none" {
|
||||
supportedNIPs = relayinfo.GetList(
|
||||
relayinfo.BasicProtocol,
|
||||
relayinfo.Authentication,
|
||||
// relayinfo.EncryptedDirectMessage,
|
||||
// relayinfo.EventDeletion,
|
||||
relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EventDeletion,
|
||||
relayinfo.RelayInformationDocument,
|
||||
// relayinfo.GenericTagQueries,
|
||||
relayinfo.GenericTagQueries,
|
||||
// relayinfo.NostrMarketplace,
|
||||
// relayinfo.EventTreatment,
|
||||
// relayinfo.CommandResults,
|
||||
// relayinfo.ParameterizedReplaceableEvents,
|
||||
// relayinfo.ExpirationTimestamp,
|
||||
// relayinfo.ProtectedEvents,
|
||||
// relayinfo.RelayListMetadata,
|
||||
relayinfo.EventTreatment,
|
||||
relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
relayinfo.RelayListMetadata,
|
||||
)
|
||||
}
|
||||
sort.Sort(supportedNIPs)
|
||||
log.T.Ln("supported NIPs", supportedNIPs)
|
||||
// Construct description with dashboard URL
|
||||
dashboardURL := s.DashboardURL(r)
|
||||
description := version.Description + " dashboard: " + dashboardURL
|
||||
|
||||
// Get relay identity pubkey as hex
|
||||
var relayPubkey string
|
||||
if skb, err := s.D.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err == nil {
|
||||
relayPubkey = hex.Enc(sign.Pub())
|
||||
}
|
||||
}
|
||||
|
||||
info = &relayinfo.T{
|
||||
Name: s.Config.AppName,
|
||||
Description: version.Description,
|
||||
Description: description,
|
||||
PubKey: relayPubkey,
|
||||
Nips: supportedNIPs,
|
||||
Software: version.URL,
|
||||
Version: version.V,
|
||||
Version: strings.TrimPrefix(version.V, "v"),
|
||||
Limitation: relayinfo.Limits{
|
||||
AuthRequired: s.Config.ACLMode != "none",
|
||||
RestrictedWrites: s.Config.ACLMode != "none",
|
||||
PaymentRequired: s.Config.MonthlyPriceSats > 0,
|
||||
},
|
||||
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
|
||||
Icon: "https://i.nostr.build/6wGXAn7Zaw9mHxFg.png",
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(info); chk.E(err) {
|
||||
}
|
||||
|
||||
@@ -1,40 +1,41 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
acl "acl.orly"
|
||||
"encoders.orly/envelopes/authenvelope"
|
||||
"encoders.orly/envelopes/closedenvelope"
|
||||
"encoders.orly/envelopes/eoseenvelope"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/okenvelope"
|
||||
"encoders.orly/envelopes/reqenvelope"
|
||||
"encoders.orly/event"
|
||||
"encoders.orly/filter"
|
||||
"encoders.orly/hex"
|
||||
"encoders.orly/kind"
|
||||
"encoders.orly/reason"
|
||||
"encoders.orly/tag"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
utils "utils.orly"
|
||||
"utils.orly/normalize"
|
||||
"utils.orly/pointers"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closedenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/reason"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/normalize"
|
||||
"next.orly.dev/pkg/utils/pointers"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleReq(msg []byte) (
|
||||
err error,
|
||||
) {
|
||||
var rem []byte
|
||||
func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
log.D.F("HandleReq: START processing from %s", l.remote)
|
||||
// var rem []byte
|
||||
env := reqenvelope.New()
|
||||
if rem, err = env.Unmarshal(msg); chk.E(err) {
|
||||
if _, err = env.Unmarshal(msg); chk.E(err) {
|
||||
return normalize.Error.Errorf(err.Error())
|
||||
}
|
||||
if len(rem) > 0 {
|
||||
log.I.F("extra '%s'", rem)
|
||||
}
|
||||
log.D.C(func() string { return fmt.Sprintf("REQ sub=%s filters=%d", env.Subscription, len(*env.Filters)) })
|
||||
// send a challenge to the client to auth if an ACL is active
|
||||
if acl.Registry.Active.Load() != "none" {
|
||||
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
||||
@@ -43,11 +44,12 @@ func (l *Listener) HandleReq(msg []byte) (
|
||||
}
|
||||
}
|
||||
// check permissions of user
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load())
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
if err = okenvelope.NewFrom(
|
||||
env.Subscription, false,
|
||||
// For REQ denial, send a CLOSED with auth-required reason (NIP-01)
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription,
|
||||
reason.AuthRequired.F("user not authed or has no read access"),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
@@ -55,36 +57,140 @@ func (l *Listener) HandleReq(msg []byte) (
|
||||
return
|
||||
default:
|
||||
// user has read access or better, continue
|
||||
log.D.F("user has %s access", accessLevel)
|
||||
}
|
||||
var events event.S
|
||||
// Create a single context for all filter queries, tied to the connection context, to prevent leaks and support timely cancellation
|
||||
queryCtx, queryCancel := context.WithTimeout(
|
||||
l.ctx, 30*time.Second,
|
||||
)
|
||||
defer queryCancel()
|
||||
|
||||
// Collect all events from all filters
|
||||
var allEvents event.S
|
||||
for _, f := range *env.Filters {
|
||||
if pointers.Present(f.Limit) {
|
||||
if f != nil {
|
||||
// Summarize filter details for diagnostics (avoid internal fields)
|
||||
var kindsLen int
|
||||
if f.Kinds != nil {
|
||||
kindsLen = f.Kinds.Len()
|
||||
}
|
||||
var authorsLen int
|
||||
if f.Authors != nil {
|
||||
authorsLen = f.Authors.Len()
|
||||
}
|
||||
var idsLen int
|
||||
if f.Ids != nil {
|
||||
idsLen = f.Ids.Len()
|
||||
}
|
||||
var dtag string
|
||||
if f.Tags != nil {
|
||||
if d := f.Tags.GetFirst([]byte("d")); d != nil {
|
||||
dtag = string(d.Value())
|
||||
}
|
||||
}
|
||||
var lim any
|
||||
if f.Limit != nil {
|
||||
lim = *f.Limit
|
||||
}
|
||||
var since any
|
||||
if f.Since != nil {
|
||||
since = f.Since.Int()
|
||||
}
|
||||
var until any
|
||||
if f.Until != nil {
|
||||
until = f.Until.Int()
|
||||
}
|
||||
log.D.C(func() string {
|
||||
return fmt.Sprintf("REQ %s filter: kinds.len=%d authors.len=%d ids.len=%d d=%q limit=%v since=%v until=%v", env.Subscription, kindsLen, authorsLen, idsLen, dtag, lim, since, until)
|
||||
})
|
||||
}
|
||||
if f != nil && pointers.Present(f.Limit) {
|
||||
if *f.Limit == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if events, err = l.QueryEvents(l.Ctx, f); chk.E(err) {
|
||||
var filterEvents event.S
|
||||
if filterEvents, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||
if errors.Is(err, badger.ErrDBClosed) {
|
||||
return
|
||||
}
|
||||
log.E.F("QueryEvents failed for filter: %v", err)
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
// Append events from this filter to the overall collection
|
||||
allEvents = append(allEvents, filterEvents...)
|
||||
}
|
||||
events = allEvents
|
||||
defer func() {
|
||||
for _, ev := range events {
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
var tmp event.S
|
||||
privCheck:
|
||||
for _, ev := range events {
|
||||
if kind.IsPrivileged(ev.Kind) &&
|
||||
accessLevel != "admin" { // admins can see all events
|
||||
log.I.F("checking privileged event %s", ev.ID)
|
||||
// Check for private tag first
|
||||
privateTags := ev.Tags.GetAll([]byte("private"))
|
||||
if len(privateTags) > 0 && accessLevel != "admin" {
|
||||
pk := l.authedPubkey.Load()
|
||||
if pk == nil {
|
||||
continue // no auth, can't access private events
|
||||
}
|
||||
|
||||
// Convert authenticated pubkey to npub for comparison
|
||||
authedNpub, err := bech32encoding.BinToNpub(pk)
|
||||
if err != nil {
|
||||
continue // couldn't convert pubkey, skip
|
||||
}
|
||||
|
||||
// Check if authenticated npub is in any private tag
|
||||
authorized := false
|
||||
for _, privateTag := range privateTags {
|
||||
authorizedNpubs := strings.Split(
|
||||
string(privateTag.Value()), ",",
|
||||
)
|
||||
for _, npub := range authorizedNpubs {
|
||||
if strings.TrimSpace(npub) == string(authedNpub) {
|
||||
authorized = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if authorized {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !authorized {
|
||||
continue // not authorized to see this private event
|
||||
}
|
||||
|
||||
tmp = append(tmp, ev)
|
||||
continue
|
||||
}
|
||||
|
||||
if l.Config.ACLMode != "none" &&
|
||||
(kind.IsPrivileged(ev.Kind) && accessLevel != "admin") &&
|
||||
l.authedPubkey.Load() != nil { // admins can see all events
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"checking privileged event %0x", ev.ID,
|
||||
)
|
||||
},
|
||||
)
|
||||
pk := l.authedPubkey.Load()
|
||||
if pk == nil {
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(ev.Pubkey, pk) {
|
||||
log.I.F(
|
||||
"privileged event %s is for logged in pubkey %0x", ev.ID,
|
||||
pk,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s is for logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
tmp = append(tmp, ev)
|
||||
continue
|
||||
@@ -96,17 +202,25 @@ privCheck:
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(pt, pk) {
|
||||
log.I.F(
|
||||
"privileged event %s is for logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s is for logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
tmp = append(tmp, ev)
|
||||
continue privCheck
|
||||
}
|
||||
}
|
||||
log.W.F(
|
||||
"privileged event %s does not contain the logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s does not contain the logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
} else {
|
||||
tmp = append(tmp, ev)
|
||||
@@ -115,8 +229,19 @@ privCheck:
|
||||
events = tmp
|
||||
seen := make(map[string]struct{})
|
||||
for _, ev := range events {
|
||||
// track the IDs we've sent
|
||||
seen[string(ev.ID)] = struct{}{}
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||
hex.Enc(ev.ID), ev.Kind,
|
||||
)
|
||||
},
|
||||
)
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||
},
|
||||
)
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(
|
||||
env.Subscription, ev,
|
||||
@@ -126,10 +251,12 @@ privCheck:
|
||||
if err = res.Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// track the IDs we've sent (use hex encoding for stable key)
|
||||
seen[hex.Enc(ev.ID)] = struct{}{}
|
||||
}
|
||||
// write the EOSE to signal to the client that all events found have been
|
||||
// sent.
|
||||
log.T.F("sending EOSE to %s", l.remote)
|
||||
log.D.F("sending EOSE to %s", l.remote)
|
||||
if err = eoseenvelope.NewFrom(env.Subscription).
|
||||
Write(l); chk.E(err) {
|
||||
return
|
||||
@@ -137,6 +264,10 @@ privCheck:
|
||||
// if the query was for just Ids, we know there can't be any more results,
|
||||
// so cancel the subscription.
|
||||
cancel := true
|
||||
log.D.F(
|
||||
"REQ %s: computing cancel/subscription; events_sent=%d",
|
||||
env.Subscription, len(events),
|
||||
)
|
||||
var subbedFilters filter.S
|
||||
for _, f := range *env.Filters {
|
||||
if f.Ids.Len() < 1 {
|
||||
@@ -145,12 +276,16 @@ privCheck:
|
||||
} else {
|
||||
// remove the IDs that we already sent
|
||||
var notFounds [][]byte
|
||||
for _, ev := range events {
|
||||
if _, ok := seen[string(ev.ID)]; ok {
|
||||
for _, id := range f.Ids.T {
|
||||
if _, ok := seen[hex.Enc(id)]; ok {
|
||||
continue
|
||||
}
|
||||
notFounds = append(notFounds, ev.ID)
|
||||
notFounds = append(notFounds, id)
|
||||
}
|
||||
log.T.F(
|
||||
"REQ %s: ids outstanding=%d of %d", env.Subscription,
|
||||
len(notFounds), f.Ids.Len(),
|
||||
)
|
||||
// if all were found, don't add to subbedFilters
|
||||
if len(notFounds) == 0 {
|
||||
continue
|
||||
@@ -162,8 +297,8 @@ privCheck:
|
||||
}
|
||||
// also, if we received the limit number of events, subscription ded
|
||||
if pointers.Present(f.Limit) {
|
||||
if len(events) < int(*f.Limit) {
|
||||
cancel = false
|
||||
if len(events) >= int(*f.Limit) {
|
||||
cancel = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -172,19 +307,17 @@ privCheck:
|
||||
if !cancel {
|
||||
l.publishers.Receive(
|
||||
&W{
|
||||
Conn: l.conn,
|
||||
remote: l.remote,
|
||||
Id: string(env.Subscription),
|
||||
Receiver: receiver,
|
||||
Filters: env.Filters,
|
||||
Conn: l.conn,
|
||||
remote: l.remote,
|
||||
Id: string(env.Subscription),
|
||||
Receiver: receiver,
|
||||
Filters: env.Filters,
|
||||
AuthedPubkey: l.authedPubkey.Load(),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription, nil,
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// suppress server-sent CLOSED; client will close subscription if desired
|
||||
}
|
||||
log.D.F("HandleReq: COMPLETED processing from %s", l.remote)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -7,17 +7,19 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"encoders.orly/hex"
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"utils.orly/units"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/utils/units"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultWriteWait = 10 * time.Second
|
||||
DefaultPongWait = 60 * time.Second
|
||||
DefaultPingWait = DefaultPongWait / 2
|
||||
DefaultWriteTimeout = 3 * time.Second
|
||||
DefaultMaxMessageSize = 1 * units.Mb
|
||||
|
||||
// CloseMessage denotes a close control message. The optional message
|
||||
@@ -56,27 +58,59 @@ whitelist:
|
||||
if conn, err = websocket.Accept(
|
||||
w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}},
|
||||
); chk.E(err) {
|
||||
log.E.F("websocket accept failed from %s: %v", remote, err)
|
||||
return
|
||||
}
|
||||
log.T.F("websocket accepted from %s path=%s", remote, r.URL.String())
|
||||
conn.SetReadLimit(DefaultMaxMessageSize)
|
||||
defer conn.CloseNow()
|
||||
listener := &Listener{
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
chal := make([]byte, 32)
|
||||
rand.Read(chal)
|
||||
listener.challenge.Store([]byte(hex.Enc(chal)))
|
||||
if s.Config.ACLMode != "none" {
|
||||
log.D.F("sending AUTH challenge to %s", remote)
|
||||
if err = authenvelope.NewChallengeWith(listener.challenge.Load()).
|
||||
Write(listener); chk.E(err) {
|
||||
log.E.F("failed to send AUTH challenge to %s: %v", remote, err)
|
||||
return
|
||||
}
|
||||
log.D.F("AUTH challenge sent successfully to %s", remote)
|
||||
}
|
||||
ticker := time.NewTicker(DefaultPingWait)
|
||||
go s.Pinger(ctx, conn, ticker, cancel)
|
||||
defer func() {
|
||||
log.D.F("closing websocket connection from %s", remote)
|
||||
|
||||
// Cancel context and stop pinger
|
||||
cancel()
|
||||
ticker.Stop()
|
||||
|
||||
// Cancel all subscriptions for this connection
|
||||
log.D.F("cancelling subscriptions for %s", remote)
|
||||
listener.publishers.Receive(&W{Cancel: true})
|
||||
|
||||
// Log detailed connection statistics
|
||||
dur := time.Since(listener.startTime)
|
||||
log.D.F(
|
||||
"ws connection closed %s: msgs=%d, REQs=%d, EVENTs=%d, duration=%v",
|
||||
remote, listener.msgCount, listener.reqCount, listener.eventCount,
|
||||
dur,
|
||||
)
|
||||
|
||||
// Log any remaining connection state
|
||||
if listener.authedPubkey.Load() != nil {
|
||||
log.D.F("ws connection %s was authenticated", remote)
|
||||
} else {
|
||||
log.D.F("ws connection %s was not authenticated", remote)
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
@@ -87,12 +121,23 @@ whitelist:
|
||||
var typ websocket.MessageType
|
||||
var msg []byte
|
||||
log.T.F("waiting for message from %s", remote)
|
||||
if typ, msg, err = conn.Read(ctx); chk.E(err) {
|
||||
|
||||
// Block waiting for message; rely on pings and context cancellation to detect dead peers
|
||||
typ, msg, err = conn.Read(ctx)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(
|
||||
err.Error(), "use of closed network connection",
|
||||
) {
|
||||
return
|
||||
}
|
||||
// Handle EOF errors gracefully - these occur when client closes connection
|
||||
// or sends incomplete/malformed WebSocket frames
|
||||
if strings.Contains(err.Error(), "EOF") ||
|
||||
strings.Contains(err.Error(), "failed to read frame header") {
|
||||
log.T.F("connection from %s closed: %v", remote, err)
|
||||
return
|
||||
}
|
||||
status := websocket.CloseStatus(err)
|
||||
switch status {
|
||||
case websocket.StatusNormalClosure,
|
||||
@@ -100,18 +145,46 @@ whitelist:
|
||||
websocket.StatusNoStatusRcvd,
|
||||
websocket.StatusAbnormalClosure,
|
||||
websocket.StatusProtocolError:
|
||||
log.T.F(
|
||||
"connection from %s closed with status: %v", remote, status,
|
||||
)
|
||||
default:
|
||||
log.E.F("unexpected close error from %s: %v", remote, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if typ == PingMessage {
|
||||
if err = conn.Write(ctx, PongMessage, msg); chk.E(err) {
|
||||
log.D.F("received PING from %s, sending PONG", remote)
|
||||
// Create a write context with timeout for pong response
|
||||
writeCtx, writeCancel := context.WithTimeout(
|
||||
ctx, DefaultWriteTimeout,
|
||||
)
|
||||
pongStart := time.Now()
|
||||
if err = conn.Write(writeCtx, PongMessage, msg); chk.E(err) {
|
||||
pongDuration := time.Since(pongStart)
|
||||
log.E.F(
|
||||
"failed to send PONG to %s after %v: %v", remote,
|
||||
pongDuration, err,
|
||||
)
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F(
|
||||
"PONG write timeout to %s after %v (limit=%v)", remote,
|
||||
pongDuration, DefaultWriteTimeout,
|
||||
)
|
||||
}
|
||||
writeCancel()
|
||||
return
|
||||
}
|
||||
pongDuration := time.Since(pongStart)
|
||||
log.D.F("sent PONG to %s successfully in %v", remote, pongDuration)
|
||||
if pongDuration > time.Millisecond*50 {
|
||||
log.D.F("SLOW PONG to %s: %v (>50ms)", remote, pongDuration)
|
||||
}
|
||||
writeCancel()
|
||||
continue
|
||||
}
|
||||
go listener.HandleMessage(msg, remote)
|
||||
// log.T.F("received message from %s: %s", remote, string(msg))
|
||||
listener.HandleMessage(msg, remote)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,17 +193,51 @@ func (s *Server) Pinger(
|
||||
cancel context.CancelFunc,
|
||||
) {
|
||||
defer func() {
|
||||
log.D.F("pinger shutting down")
|
||||
cancel()
|
||||
ticker.Stop()
|
||||
}()
|
||||
var err error
|
||||
pingCount := 0
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err = conn.Ping(ctx); chk.E(err) {
|
||||
pingCount++
|
||||
log.D.F("sending PING #%d", pingCount)
|
||||
|
||||
// Create a write context with timeout for ping operation
|
||||
pingCtx, pingCancel := context.WithTimeout(ctx, DefaultWriteTimeout)
|
||||
pingStart := time.Now()
|
||||
|
||||
if err = conn.Ping(pingCtx); err != nil {
|
||||
pingDuration := time.Since(pingStart)
|
||||
log.E.F(
|
||||
"PING #%d FAILED after %v: %v", pingCount, pingDuration,
|
||||
err,
|
||||
)
|
||||
|
||||
if pingCtx.Err() != nil {
|
||||
log.E.F(
|
||||
"PING #%d timeout after %v (limit=%v)", pingCount,
|
||||
pingDuration, DefaultWriteTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
chk.E(err)
|
||||
pingCancel()
|
||||
return
|
||||
}
|
||||
|
||||
pingDuration := time.Since(pingStart)
|
||||
log.D.F("PING #%d sent successfully in %v", pingCount, pingDuration)
|
||||
|
||||
if pingDuration > time.Millisecond*100 {
|
||||
log.D.F("SLOW PING #%d: %v (>100ms)", pingCount, pingDuration)
|
||||
}
|
||||
|
||||
pingCancel()
|
||||
case <-ctx.Done():
|
||||
log.D.F("pinger context cancelled after %d pings", pingCount)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,12 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"utils.orly/atomic"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
type Listener struct {
|
||||
@@ -17,12 +19,73 @@ type Listener struct {
|
||||
req *http.Request
|
||||
challenge atomic.Bytes
|
||||
authedPubkey atomic.Bytes
|
||||
startTime time.Time
|
||||
// Diagnostics: per-connection counters
|
||||
msgCount int
|
||||
reqCount int
|
||||
eventCount int
|
||||
}
|
||||
|
||||
// Ctx returns the listener's context, but creates a new context for each operation
|
||||
// to prevent cancellation from affecting subsequent operations
|
||||
func (l *Listener) Ctx() context.Context {
|
||||
return l.ctx
|
||||
}
|
||||
|
||||
func (l *Listener) Write(p []byte) (n int, err error) {
|
||||
if err = l.conn.Write(l.ctx, websocket.MessageText, p); chk.E(err) {
|
||||
start := time.Now()
|
||||
msgLen := len(p)
|
||||
|
||||
// Log message attempt with content preview (first 200 chars for diagnostics)
|
||||
preview := string(p)
|
||||
if len(preview) > 200 {
|
||||
preview = preview[:200] + "..."
|
||||
}
|
||||
log.D.F("ws->%s attempting write: len=%d preview=%q", l.remote, msgLen, preview)
|
||||
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the main connection context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
context.Background(), DefaultWriteTimeout,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
// Attempt the write operation
|
||||
writeStart := time.Now()
|
||||
if err = l.conn.Write(writeCtx, websocket.MessageText, p); err != nil {
|
||||
writeDuration := time.Since(writeStart)
|
||||
totalDuration := time.Since(start)
|
||||
|
||||
// Log detailed failure information
|
||||
log.E.F("ws->%s WRITE FAILED: len=%d duration=%v write_duration=%v error=%v preview=%q",
|
||||
l.remote, msgLen, totalDuration, writeDuration, err, preview)
|
||||
|
||||
// Check if this is a context timeout
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F("ws->%s write timeout after %v (limit=%v)", l.remote, writeDuration, DefaultWriteTimeout)
|
||||
}
|
||||
|
||||
// Check connection state
|
||||
if l.conn != nil {
|
||||
log.D.F("ws->%s connection state during failure: remote_addr=%v", l.remote, l.req.RemoteAddr)
|
||||
}
|
||||
|
||||
chk.E(err) // Still call the original error handler
|
||||
return
|
||||
}
|
||||
n = len(p)
|
||||
|
||||
// Log successful write with timing
|
||||
writeDuration := time.Since(writeStart)
|
||||
totalDuration := time.Since(start)
|
||||
n = msgLen
|
||||
|
||||
log.D.F("ws->%s WRITE SUCCESS: len=%d duration=%v write_duration=%v",
|
||||
l.remote, n, totalDuration, writeDuration)
|
||||
|
||||
// Log slow writes for performance diagnostics
|
||||
if writeDuration > time.Millisecond*100 {
|
||||
log.D.F("ws->%s SLOW WRITE detected: %v (>100ms) len=%d", l.remote, writeDuration, n)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
44
app/main.go
44
app/main.go
@@ -5,12 +5,13 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
database "database.orly"
|
||||
"encoders.orly/bech32encoding"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
"protocol.orly/publish"
|
||||
"next.orly.dev/pkg/crypto/keys"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
)
|
||||
|
||||
func Run(
|
||||
@@ -28,6 +29,9 @@ func Run(
|
||||
var err error
|
||||
var adminKeys [][]byte
|
||||
for _, admin := range cfg.Admins {
|
||||
if len(admin) == 0 {
|
||||
continue
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(err) {
|
||||
continue
|
||||
@@ -42,6 +46,40 @@ func Run(
|
||||
publishers: publish.New(NewPublisher(ctx)),
|
||||
Admins: adminKeys,
|
||||
}
|
||||
// Initialize the user interface
|
||||
l.UserInterface()
|
||||
|
||||
// Ensure a relay identity secret key exists when subscriptions and NWC are enabled
|
||||
if cfg.SubscriptionEnabled && cfg.NWCUri != "" {
|
||||
if skb, e := db.GetOrCreateRelayIdentitySecret(); e != nil {
|
||||
log.E.F("failed to ensure relay identity key: %v", e)
|
||||
} else if pk, e2 := keys.SecretBytesToPubKeyHex(skb); e2 == nil {
|
||||
log.I.F("relay identity loaded (pub=%s)", pk)
|
||||
// ensure relay identity pubkey is considered an admin for ACL follows mode
|
||||
found := false
|
||||
for _, a := range cfg.Admins {
|
||||
if a == pk {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
cfg.Admins = append(cfg.Admins, pk)
|
||||
log.I.F("added relay identity to admins for follow-list whitelisting")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if l.paymentProcessor, err = NewPaymentProcessor(ctx, cfg, db); err != nil {
|
||||
log.E.F("failed to create payment processor: %v", err)
|
||||
// Continue without payment processor
|
||||
} else {
|
||||
if err = l.paymentProcessor.Start(); err != nil {
|
||||
log.E.F("failed to start payment processor: %v", err)
|
||||
} else {
|
||||
log.I.F("payment processor started successfully")
|
||||
}
|
||||
}
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Listen, cfg.Port)
|
||||
log.I.F("starting listener on http://%s", addr)
|
||||
go func() {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/okenvelope"
|
||||
"encoders.orly/reason"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/encoders/reason"
|
||||
)
|
||||
|
||||
// OK represents a function that processes events or operations, using provided
|
||||
|
||||
894
app/payment_processor.go
Normal file
894
app/payment_processor.go
Normal file
@@ -0,0 +1,894 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
// std hex not used; use project hex encoder instead
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/json"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
"next.orly.dev/pkg/protocol/nwc"
|
||||
)
|
||||
|
||||
// PaymentProcessor handles NWC payment notifications and updates subscriptions
|
||||
type PaymentProcessor struct {
|
||||
nwcClient *nwc.Client
|
||||
db *database.D
|
||||
config *config.C
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
dashboardURL string
|
||||
}
|
||||
|
||||
// NewPaymentProcessor creates a new payment processor
|
||||
func NewPaymentProcessor(
|
||||
ctx context.Context, cfg *config.C, db *database.D,
|
||||
) (pp *PaymentProcessor, err error) {
|
||||
if cfg.NWCUri == "" {
|
||||
return nil, fmt.Errorf("NWC URI not configured")
|
||||
}
|
||||
|
||||
var nwcClient *nwc.Client
|
||||
if nwcClient, err = nwc.NewClient(cfg.NWCUri); chk.E(err) {
|
||||
return nil, fmt.Errorf("failed to create NWC client: %w", err)
|
||||
}
|
||||
|
||||
c, cancel := context.WithCancel(ctx)
|
||||
|
||||
pp = &PaymentProcessor{
|
||||
nwcClient: nwcClient,
|
||||
db: db,
|
||||
config: cfg,
|
||||
ctx: c,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
return pp, nil
|
||||
}
|
||||
|
||||
// Start begins listening for payment notifications
|
||||
func (pp *PaymentProcessor) Start() error {
|
||||
// start NWC notifications listener
|
||||
pp.wg.Add(1)
|
||||
go func() {
|
||||
defer pp.wg.Done()
|
||||
if err := pp.listenForPayments(); err != nil {
|
||||
log.E.F("payment processor error: %v", err)
|
||||
}
|
||||
}()
|
||||
// start periodic follow-list sync if subscriptions are enabled
|
||||
if pp.config != nil && pp.config.SubscriptionEnabled {
|
||||
pp.wg.Add(1)
|
||||
go func() {
|
||||
defer pp.wg.Done()
|
||||
pp.runFollowSyncLoop()
|
||||
}()
|
||||
// start daily subscription checker
|
||||
pp.wg.Add(1)
|
||||
go func() {
|
||||
defer pp.wg.Done()
|
||||
pp.runDailySubscriptionChecker()
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully stops the payment processor
|
||||
func (pp *PaymentProcessor) Stop() {
|
||||
if pp.cancel != nil {
|
||||
pp.cancel()
|
||||
}
|
||||
pp.wg.Wait()
|
||||
}
|
||||
|
||||
// listenForPayments subscribes to NWC notifications and processes payments
|
||||
func (pp *PaymentProcessor) listenForPayments() error {
|
||||
return pp.nwcClient.SubscribeNotifications(pp.ctx, pp.handleNotification)
|
||||
}
|
||||
|
||||
// runFollowSyncLoop periodically syncs the relay identity follow list with active subscribers
|
||||
func (pp *PaymentProcessor) runFollowSyncLoop() {
|
||||
t := time.NewTicker(10 * time.Minute)
|
||||
defer t.Stop()
|
||||
// do an initial sync shortly after start
|
||||
_ = pp.syncFollowList()
|
||||
for {
|
||||
select {
|
||||
case <-pp.ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
if err := pp.syncFollowList(); err != nil {
|
||||
log.W.F("follow list sync failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runDailySubscriptionChecker checks once daily for subscription expiry warnings and trial reminders
|
||||
func (pp *PaymentProcessor) runDailySubscriptionChecker() {
|
||||
t := time.NewTicker(24 * time.Hour)
|
||||
defer t.Stop()
|
||||
// do an initial check shortly after start
|
||||
_ = pp.checkSubscriptionStatus()
|
||||
for {
|
||||
select {
|
||||
case <-pp.ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
if err := pp.checkSubscriptionStatus(); err != nil {
|
||||
log.W.F("subscription status check failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// syncFollowList builds a kind-3 event from the relay identity containing only active subscribers
|
||||
func (pp *PaymentProcessor) syncFollowList() error {
|
||||
// ensure we have a relay identity secret
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return nil // nothing to do if no identity
|
||||
}
|
||||
// collect active subscribers
|
||||
actives, err := pp.getActiveSubscriberPubkeys()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return err
|
||||
}
|
||||
// build follow list event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.FollowList.K
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Tags = tag.NewS()
|
||||
for _, pk := range actives {
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(pk)))
|
||||
}
|
||||
// sign and save
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return err
|
||||
}
|
||||
log.I.F(
|
||||
"updated relay follow list with %d active subscribers", len(actives),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getActiveSubscriberPubkeys scans the subscription records and returns active ones
|
||||
func (pp *PaymentProcessor) getActiveSubscriberPubkeys() ([][]byte, error) {
|
||||
prefix := []byte("sub:")
|
||||
now := time.Now()
|
||||
var out [][]byte
|
||||
err := pp.db.DB.View(
|
||||
func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
// key format: sub:<hexpub>
|
||||
hexpub := string(key[len(prefix):])
|
||||
var sub database.Subscription
|
||||
if err := item.Value(
|
||||
func(val []byte) error {
|
||||
return json.Unmarshal(val, &sub)
|
||||
},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if now.Before(sub.TrialEnd) || (!sub.PaidUntil.IsZero() && now.Before(sub.PaidUntil)) {
|
||||
if b, err := hex.Dec(hexpub); err == nil {
|
||||
out = append(out, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// checkSubscriptionStatus scans all subscriptions and creates warning/reminder notes
|
||||
func (pp *PaymentProcessor) checkSubscriptionStatus() error {
|
||||
prefix := []byte("sub:")
|
||||
now := time.Now()
|
||||
sevenDaysFromNow := now.AddDate(0, 0, 7)
|
||||
|
||||
return pp.db.DB.View(
|
||||
func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
// key format: sub:<hexpub>
|
||||
hexpub := string(key[len(prefix):])
|
||||
|
||||
var sub database.Subscription
|
||||
if err := item.Value(
|
||||
func(val []byte) error {
|
||||
return json.Unmarshal(val, &sub)
|
||||
},
|
||||
); err != nil {
|
||||
continue // skip invalid subscription records
|
||||
}
|
||||
|
||||
pubkey, err := hex.Dec(hexpub)
|
||||
if err != nil {
|
||||
continue // skip invalid pubkey
|
||||
}
|
||||
|
||||
// Check if paid subscription is expiring in 7 days
|
||||
if !sub.PaidUntil.IsZero() {
|
||||
// Format dates for comparison (ignore time component)
|
||||
paidUntilDate := sub.PaidUntil.Truncate(24 * time.Hour)
|
||||
sevenDaysDate := sevenDaysFromNow.Truncate(24 * time.Hour)
|
||||
|
||||
if paidUntilDate.Equal(sevenDaysDate) {
|
||||
go pp.createExpiryWarningNote(pubkey, sub.PaidUntil)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if user is on trial (no paid subscription, trial not expired)
|
||||
if sub.PaidUntil.IsZero() && now.Before(sub.TrialEnd) {
|
||||
go pp.createTrialReminderNote(pubkey, sub.TrialEnd)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// createExpiryWarningNote creates a warning note for users whose paid subscription expires in 7 days
|
||||
func (pp *PaymentProcessor) createExpiryWarningNote(userPubkey []byte, expiryTime time.Time) error {
|
||||
// Get relay identity secret to sign the note
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Get relay npub for content link
|
||||
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the warning note content
|
||||
content := fmt.Sprintf(`⚠️ Subscription Expiring Soon ⚠️
|
||||
|
||||
Your paid subscription to this relay will expire in 7 days on %s.
|
||||
|
||||
💰 To extend your subscription:
|
||||
- Monthly price: %d sats
|
||||
- Zap this note with your payment amount
|
||||
- Each %d sats = 30 days of access
|
||||
|
||||
⚡ Payment Instructions:
|
||||
1. Use any Lightning wallet that supports zaps
|
||||
2. Zap this note with your payment
|
||||
3. Your subscription will be automatically extended
|
||||
|
||||
Don't lose access to your private relay! Extend your subscription today.
|
||||
|
||||
Relay: nostr:%s
|
||||
|
||||
Log in to the relay dashboard to access your configuration at: %s`,
|
||||
expiryTime.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K // Kind 1 for text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add "p" tag for the user
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
|
||||
|
||||
// Add expiration tag (5 days from creation)
|
||||
noteExpiry := time.Now().AddDate(0, 0, 5)
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
|
||||
|
||||
// Add "private" tag with authorized npubs (user and relay)
|
||||
var authorizedNpubs []string
|
||||
|
||||
// Add user npub
|
||||
userNpub, err := bech32encoding.BinToNpub(userPubkey)
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(userNpub))
|
||||
}
|
||||
|
||||
// Add relay npub
|
||||
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
|
||||
}
|
||||
|
||||
// Create the private tag with comma-separated npubs
|
||||
if len(authorizedNpubs) > 0 {
|
||||
privateTagValue := strings.Join(authorizedNpubs, ",")
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
|
||||
}
|
||||
|
||||
// Add a special tag to mark this as an expiry warning
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("warning", "subscription-expiry"))
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save expiry warning note: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("created expiry warning note for user %s (expires %s)", hex.Enc(userPubkey), expiryTime.Format("2006-01-02"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// createTrialReminderNote creates a reminder note for users on trial to support the relay
|
||||
func (pp *PaymentProcessor) createTrialReminderNote(userPubkey []byte, trialEnd time.Time) error {
|
||||
// Get relay identity secret to sign the note
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Calculate daily rate
|
||||
dailyRate := monthlyPrice / 30
|
||||
|
||||
// Get relay npub for content link
|
||||
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the reminder note content
|
||||
content := fmt.Sprintf(`🆓 Free Trial Reminder 🆓
|
||||
|
||||
You're currently using this relay for FREE! Your trial expires on %s.
|
||||
|
||||
🙏 Support Relay Operations:
|
||||
This relay provides you with private, censorship-resistant communication. Please consider supporting its continued operation.
|
||||
|
||||
💰 Subscription Details:
|
||||
- Monthly price: %d sats (%d sats/day)
|
||||
- Fair pricing for premium service
|
||||
- Helps keep the relay running 24/7
|
||||
|
||||
⚡ How to Subscribe:
|
||||
Simply zap this note with your payment amount:
|
||||
- Each %d sats = 30 days of access
|
||||
- Payment is processed automatically
|
||||
- No account setup required
|
||||
|
||||
Thank you for considering supporting decentralized communication!
|
||||
|
||||
Relay: nostr:%s
|
||||
|
||||
Log in to the relay dashboard to access your configuration at: %s`,
|
||||
trialEnd.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, dailyRate, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K // Kind 1 for text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add "p" tag for the user
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
|
||||
|
||||
// Add expiration tag (5 days from creation)
|
||||
noteExpiry := time.Now().AddDate(0, 0, 5)
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
|
||||
|
||||
// Add "private" tag with authorized npubs (user and relay)
|
||||
var authorizedNpubs []string
|
||||
|
||||
// Add user npub
|
||||
userNpub, err := bech32encoding.BinToNpub(userPubkey)
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(userNpub))
|
||||
}
|
||||
|
||||
// Add relay npub
|
||||
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
|
||||
}
|
||||
|
||||
// Create the private tag with comma-separated npubs
|
||||
if len(authorizedNpubs) > 0 {
|
||||
privateTagValue := strings.Join(authorizedNpubs, ",")
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
|
||||
}
|
||||
|
||||
// Add a special tag to mark this as a trial reminder
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("reminder", "trial-support"))
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save trial reminder note: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("created trial reminder note for user %s (trial ends %s)", hex.Enc(userPubkey), trialEnd.Format("2006-01-02"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleNotification processes incoming payment notifications
|
||||
func (pp *PaymentProcessor) handleNotification(
|
||||
notificationType string, notification map[string]any,
|
||||
) error {
|
||||
// Only process payment_received notifications
|
||||
if notificationType != "payment_received" {
|
||||
return nil
|
||||
}
|
||||
|
||||
amount, ok := notification["amount"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid amount")
|
||||
}
|
||||
|
||||
// Prefer explicit payer/relay pubkeys if provided in metadata
|
||||
var payerPubkey []byte
|
||||
var userNpub string
|
||||
if metadata, ok := notification["metadata"].(map[string]any); ok {
|
||||
if s, ok := metadata["payer_pubkey"].(string); ok && s != "" {
|
||||
if pk, err := decodeAnyPubkey(s); err == nil {
|
||||
payerPubkey = pk
|
||||
}
|
||||
}
|
||||
if payerPubkey == nil {
|
||||
if s, ok := metadata["sender_pubkey"].(string); ok && s != "" { // alias
|
||||
if pk, err := decodeAnyPubkey(s); err == nil {
|
||||
payerPubkey = pk
|
||||
}
|
||||
}
|
||||
}
|
||||
// Optional: the intended subscriber npub (for backwards compat)
|
||||
if userNpub == "" {
|
||||
if npubField, ok := metadata["npub"].(string); ok {
|
||||
userNpub = npubField
|
||||
}
|
||||
}
|
||||
// If relay identity pubkey is provided, verify it matches ours
|
||||
if s, ok := metadata["relay_pubkey"].(string); ok && s != "" {
|
||||
if rpk, err := decodeAnyPubkey(s); err == nil {
|
||||
if skb, err := pp.db.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
|
||||
var signer p256k.Signer
|
||||
if err := signer.InitSec(skb); err == nil {
|
||||
if !strings.EqualFold(hex.Enc(rpk), hex.Enc(signer.Pub())) {
|
||||
log.W.F("relay_pubkey in payment metadata does not match this relay identity: got %s want %s", hex.Enc(rpk), hex.Enc(signer.Pub()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: extract npub from description or metadata
|
||||
description, _ := notification["description"].(string)
|
||||
if userNpub == "" {
|
||||
userNpub = pp.extractNpubFromDescription(description)
|
||||
}
|
||||
|
||||
var pubkey []byte
|
||||
var err error
|
||||
if payerPubkey != nil {
|
||||
pubkey = payerPubkey
|
||||
} else {
|
||||
if userNpub == "" {
|
||||
return fmt.Errorf("no payer_pubkey or npub provided in payment notification")
|
||||
}
|
||||
pubkey, err = pp.npubToPubkey(userNpub)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid npub: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
satsReceived := int64(amount / 1000)
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
days := int((float64(satsReceived) / float64(monthlyPrice)) * 30)
|
||||
if days < 1 {
|
||||
return fmt.Errorf("payment amount too small")
|
||||
}
|
||||
|
||||
if err := pp.db.ExtendSubscription(pubkey, days); err != nil {
|
||||
return fmt.Errorf("failed to extend subscription: %w", err)
|
||||
}
|
||||
|
||||
// Record payment history
|
||||
invoice, _ := notification["invoice"].(string)
|
||||
preimage, _ := notification["preimage"].(string)
|
||||
if err := pp.db.RecordPayment(
|
||||
pubkey, satsReceived, invoice, preimage,
|
||||
); err != nil {
|
||||
log.E.F("failed to record payment: %v", err)
|
||||
}
|
||||
|
||||
// Log helpful identifiers
|
||||
var payerHex = hex.Enc(pubkey)
|
||||
if userNpub == "" {
|
||||
log.I.F("payment processed: payer %s %d sats -> %d days", payerHex, satsReceived, days)
|
||||
} else {
|
||||
log.I.F("payment processed: %s (%s) %d sats -> %d days", userNpub, payerHex, satsReceived, days)
|
||||
}
|
||||
|
||||
// Update ACL follows cache and relay follow list immediately
|
||||
if pp.config != nil && pp.config.ACLMode == "follows" {
|
||||
acl.Registry.AddFollow(pubkey)
|
||||
}
|
||||
// Trigger an immediate follow-list sync in background (best-effort)
|
||||
go func() { _ = pp.syncFollowList() }()
|
||||
|
||||
// Create a note with payment confirmation and private tag
|
||||
if err := pp.createPaymentNote(pubkey, satsReceived, days); err != nil {
|
||||
log.E.F("failed to create payment note: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createPaymentNote creates a note recording the payment with private tag for authorization
|
||||
func (pp *PaymentProcessor) createPaymentNote(payerPubkey []byte, satsReceived int64, days int) error {
|
||||
// Get relay identity secret to sign the note
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
// Get subscription info to determine expiry
|
||||
sub, err := pp.db.GetSubscription(payerPubkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get subscription: %w", err)
|
||||
}
|
||||
|
||||
var expiryTime time.Time
|
||||
if sub != nil && !sub.PaidUntil.IsZero() {
|
||||
expiryTime = sub.PaidUntil
|
||||
} else {
|
||||
expiryTime = time.Now().AddDate(0, 0, days)
|
||||
}
|
||||
|
||||
// Get relay npub for content link
|
||||
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the note content with nostr:npub link and dashboard link
|
||||
content := fmt.Sprintf("Payment received: %d sats for %d days. Subscription expires: %s\n\nRelay: nostr:%s\n\nLog in to the relay dashboard to access your configuration at: %s",
|
||||
satsReceived, days, expiryTime.Format("2006-01-02 15:04:05 UTC"), string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K // Kind 1 for text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add "p" tag for the payer
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(payerPubkey)))
|
||||
|
||||
// Add expiration tag (5 days from creation)
|
||||
noteExpiry := time.Now().AddDate(0, 0, 5)
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
|
||||
|
||||
// Add "private" tag with authorized npubs (payer and relay)
|
||||
var authorizedNpubs []string
|
||||
|
||||
// Add payer npub
|
||||
payerNpub, err := bech32encoding.BinToNpub(payerPubkey)
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(payerNpub))
|
||||
}
|
||||
|
||||
// Add relay npub
|
||||
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
|
||||
}
|
||||
|
||||
// Create the private tag with comma-separated npubs
|
||||
if len(authorizedNpubs) > 0 {
|
||||
privateTagValue := strings.Join(authorizedNpubs, ",")
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
|
||||
}
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save payment note: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("created payment note for %s with private authorization", hex.Enc(payerPubkey))
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateWelcomeNote creates a welcome note for first-time users with private tag for authorization
|
||||
func (pp *PaymentProcessor) CreateWelcomeNote(userPubkey []byte) error {
|
||||
// Get relay identity secret to sign the note
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Get relay npub for content link
|
||||
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the welcome note content with nostr:npub link
|
||||
content := fmt.Sprintf(`Welcome to the relay! 🎉
|
||||
|
||||
You have a FREE 30-day trial that started when you first logged in.
|
||||
|
||||
💰 Subscription Details:
|
||||
- Monthly price: %d sats
|
||||
- Trial period: 30 days from first login
|
||||
|
||||
💡 How to Subscribe:
|
||||
To extend your subscription after the trial ends, simply zap this note with the amount you want to pay. Each %d sats = 30 days of access.
|
||||
|
||||
⚡ Payment Instructions:
|
||||
1. Use any Lightning wallet that supports zaps
|
||||
2. Zap this note with your payment
|
||||
3. Your subscription will be automatically extended
|
||||
|
||||
Relay: nostr:%s
|
||||
|
||||
Log in to the relay dashboard to access your configuration at: %s
|
||||
|
||||
Enjoy your time on the relay!`, monthlyPrice, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K // Kind 1 for text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add "p" tag for the user
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
|
||||
|
||||
// Add expiration tag (5 days from creation)
|
||||
noteExpiry := time.Now().AddDate(0, 0, 5)
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
|
||||
|
||||
// Add "private" tag with authorized npubs (user and relay)
|
||||
var authorizedNpubs []string
|
||||
|
||||
// Add user npub
|
||||
userNpub, err := bech32encoding.BinToNpub(userPubkey)
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(userNpub))
|
||||
}
|
||||
|
||||
// Add relay npub
|
||||
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
|
||||
}
|
||||
|
||||
// Create the private tag with comma-separated npubs
|
||||
if len(authorizedNpubs) > 0 {
|
||||
privateTagValue := strings.Join(authorizedNpubs, ",")
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
|
||||
}
|
||||
|
||||
// Add a special tag to mark this as a welcome note
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("welcome", "first-time-user"))
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save welcome note: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("created welcome note for first-time user %s", hex.Enc(userPubkey))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDashboardURL sets the dynamic dashboard URL based on HTTP request
|
||||
func (pp *PaymentProcessor) SetDashboardURL(url string) {
|
||||
pp.dashboardURL = url
|
||||
}
|
||||
|
||||
// getDashboardURL returns the dashboard URL for the relay
|
||||
func (pp *PaymentProcessor) getDashboardURL() string {
|
||||
// Use dynamic URL if available
|
||||
if pp.dashboardURL != "" {
|
||||
return pp.dashboardURL
|
||||
}
|
||||
// Fallback to static config
|
||||
if pp.config.RelayURL != "" {
|
||||
return pp.config.RelayURL
|
||||
}
|
||||
// Default fallback if no URL is configured
|
||||
return "https://your-relay.example.com"
|
||||
}
|
||||
|
||||
// extractNpubFromDescription extracts an npub from the payment description
|
||||
func (pp *PaymentProcessor) extractNpubFromDescription(description string) string {
|
||||
// check if the entire description is just an npub
|
||||
description = strings.TrimSpace(description)
|
||||
if strings.HasPrefix(description, "npub1") && len(description) == 63 {
|
||||
return description
|
||||
}
|
||||
|
||||
// Look for npub1... pattern in the description
|
||||
parts := strings.Fields(description)
|
||||
for _, part := range parts {
|
||||
if strings.HasPrefix(part, "npub1") && len(part) == 63 {
|
||||
return part
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// npubToPubkey converts an npub string to pubkey bytes
|
||||
func (pp *PaymentProcessor) npubToPubkey(npubStr string) ([]byte, error) {
|
||||
// Validate npub format
|
||||
if !strings.HasPrefix(npubStr, "npub1") || len(npubStr) != 63 {
|
||||
return nil, fmt.Errorf("invalid npub format")
|
||||
}
|
||||
|
||||
// Decode using bech32encoding
|
||||
prefix, value, err := bech32encoding.Decode([]byte(npubStr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode npub: %w", err)
|
||||
}
|
||||
|
||||
if !strings.EqualFold(string(prefix), "npub") {
|
||||
return nil, fmt.Errorf("invalid prefix: %s", string(prefix))
|
||||
}
|
||||
|
||||
pubkey, ok := value.([]byte)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("decoded value is not []byte")
|
||||
}
|
||||
|
||||
return pubkey, nil
|
||||
}
|
||||
|
||||
// UpdateRelayProfile creates or updates the relay's kind 0 profile with subscription information
|
||||
func (pp *PaymentProcessor) UpdateRelayProfile() error {
|
||||
// Get relay identity secret to sign the profile
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Calculate daily rate
|
||||
dailyRate := monthlyPrice / 30
|
||||
|
||||
// Get relay wss:// URL - use dashboard URL but with wss:// scheme
|
||||
relayURL := strings.Replace(pp.getDashboardURL(), "https://", "wss://", 1)
|
||||
|
||||
// Create profile content as JSON
|
||||
profileContent := fmt.Sprintf(`{
|
||||
"name": "Relay Bot",
|
||||
"about": "This relay requires a subscription to access. Zap any of my notes to pay for access. Monthly price: %d sats (%d sats/day). Relay: %s",
|
||||
"lud16": "",
|
||||
"nip05": "",
|
||||
"website": "%s"
|
||||
}`, monthlyPrice, dailyRate, relayURL, pp.getDashboardURL())
|
||||
|
||||
// Build the profile event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.ProfileMetadata.K // Kind 0 for profile metadata
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(profileContent)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save relay profile: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("updated relay profile with subscription information")
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeAnyPubkey decodes a public key from either hex string or npub format
|
||||
func decodeAnyPubkey(s string) ([]byte, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
if strings.HasPrefix(s, "npub1") {
|
||||
prefix, value, err := bech32encoding.Decode([]byte(s))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode npub: %w", err)
|
||||
}
|
||||
if !strings.EqualFold(string(prefix), "npub") {
|
||||
return nil, fmt.Errorf("invalid prefix: %s", string(prefix))
|
||||
}
|
||||
b, ok := value.([]byte)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("decoded value is not []byte")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
// assume hex-encoded public key
|
||||
return hex.Dec(s)
|
||||
}
|
||||
237
app/publisher.go
237
app/publisher.go
@@ -4,21 +4,26 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/event"
|
||||
"encoders.orly/filter"
|
||||
"github.com/coder/websocket"
|
||||
"interfaces.orly/publisher"
|
||||
"interfaces.orly/typer"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/interfaces/publisher"
|
||||
"next.orly.dev/pkg/interfaces/typer"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
const Type = "socketapi"
|
||||
|
||||
type Subscription struct {
|
||||
remote string
|
||||
remote string
|
||||
AuthedPubkey []byte
|
||||
*filter.S
|
||||
}
|
||||
|
||||
@@ -46,6 +51,9 @@ type W struct {
|
||||
// associated with this WebSocket connection. It is used to determine which
|
||||
// notifications or data should be received by the subscriber.
|
||||
Filters *filter.S
|
||||
|
||||
// AuthedPubkey is the authenticated pubkey associated with the listener (if any).
|
||||
AuthedPubkey []byte
|
||||
}
|
||||
|
||||
func (w *W) Type() (typeName string) { return Type }
|
||||
@@ -56,7 +64,7 @@ func (w *W) Type() (typeName string) { return Type }
|
||||
type P struct {
|
||||
c context.Context
|
||||
// Mx is the mutex for the Map.
|
||||
Mx sync.Mutex
|
||||
Mx sync.RWMutex
|
||||
// Map is the map of subscribers and subscriptions from the websocket api.
|
||||
Map
|
||||
}
|
||||
@@ -94,17 +102,17 @@ func (p *P) Receive(msg typer.T) {
|
||||
if m.Cancel {
|
||||
if m.Id == "" {
|
||||
p.removeSubscriber(m.Conn)
|
||||
log.D.F("removed listener %s", m.remote)
|
||||
// log.D.F("removed listener %s", m.remote)
|
||||
} else {
|
||||
p.removeSubscriberId(m.Conn, m.Id)
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"removed subscription %s for %s", m.Id,
|
||||
m.remote,
|
||||
)
|
||||
},
|
||||
)
|
||||
// log.D.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "removed subscription %s for %s", m.Id,
|
||||
// m.remote,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -112,27 +120,31 @@ func (p *P) Receive(msg typer.T) {
|
||||
defer p.Mx.Unlock()
|
||||
if subs, ok := p.Map[m.Conn]; !ok {
|
||||
subs = make(map[string]Subscription)
|
||||
subs[m.Id] = Subscription{S: m.Filters, remote: m.remote}
|
||||
subs[m.Id] = Subscription{
|
||||
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
||||
}
|
||||
p.Map[m.Conn] = subs
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"created new subscription for %s, %s",
|
||||
m.remote,
|
||||
m.Filters.Marshal(nil),
|
||||
)
|
||||
},
|
||||
)
|
||||
// log.D.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "created new subscription for %s, %s",
|
||||
// m.remote,
|
||||
// m.Filters.Marshal(nil),
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
} else {
|
||||
subs[m.Id] = Subscription{S: m.Filters, remote: m.remote}
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"added subscription %s for %s", m.Id,
|
||||
m.remote,
|
||||
)
|
||||
},
|
||||
)
|
||||
subs[m.Id] = Subscription{
|
||||
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
|
||||
}
|
||||
// log.D.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "added subscription %s for %s", m.Id,
|
||||
// m.remote,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -150,71 +162,140 @@ func (p *P) Receive(msg typer.T) {
|
||||
// for unauthenticated users when events are privileged.
|
||||
func (p *P) Deliver(ev *event.E) {
|
||||
var err error
|
||||
p.Mx.Lock()
|
||||
defer p.Mx.Unlock()
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"delivering event %0x to websocket subscribers %d", ev.ID,
|
||||
len(p.Map),
|
||||
)
|
||||
},
|
||||
)
|
||||
// Snapshot the deliveries under read lock to avoid holding locks during I/O
|
||||
p.Mx.RLock()
|
||||
type delivery struct {
|
||||
w *websocket.Conn
|
||||
id string
|
||||
sub Subscription
|
||||
}
|
||||
var deliveries []delivery
|
||||
for w, subs := range p.Map {
|
||||
for id, subscriber := range subs {
|
||||
if !subscriber.Match(ev) {
|
||||
continue
|
||||
if subscriber.Match(ev) {
|
||||
deliveries = append(
|
||||
deliveries, delivery{w: w, id: id, sub: subscriber},
|
||||
)
|
||||
}
|
||||
// if p.Server.AuthRequired() {
|
||||
// if !auth.CheckPrivilege(w.AuthedPubkey(), ev) {
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(id, ev); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
if err = w.Write(
|
||||
p.c, websocket.MessageText, res.Marshal(nil),
|
||||
); chk.E(err) {
|
||||
p.removeSubscriber(w)
|
||||
if err = w.CloseNow(); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"dispatched event %0x to subscription %s, %s",
|
||||
ev.ID, id, subscriber.remote,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
p.Mx.RUnlock()
|
||||
if len(deliveries) > 0 {
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"delivering event %0x to websocket subscribers %d", ev.ID,
|
||||
len(deliveries),
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
for _, d := range deliveries {
|
||||
// If the event is privileged, enforce that the subscriber's authed pubkey matches
|
||||
// either the event pubkey or appears in any 'p' tag of the event.
|
||||
if kind.IsPrivileged(ev.Kind) && len(d.sub.AuthedPubkey) > 0 {
|
||||
pk := d.sub.AuthedPubkey
|
||||
allowed := false
|
||||
// Direct author match
|
||||
if utils.FastEqual(ev.Pubkey, pk) {
|
||||
allowed = true
|
||||
} else if ev.Tags != nil {
|
||||
for _, pTag := range ev.Tags.GetAll([]byte("p")) {
|
||||
// pTag.Value() returns []byte hex string; decode to bytes
|
||||
dec, derr := hex.Dec(string(pTag.Value()))
|
||||
if derr != nil {
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(dec, pk) {
|
||||
allowed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !allowed {
|
||||
log.D.F("subscription delivery DENIED for privileged event %s to %s (auth mismatch)",
|
||||
hex.Enc(ev.ID), d.sub.remote)
|
||||
// Skip delivery for this subscriber
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
|
||||
log.E.F("failed to create event envelope for %s to %s: %v",
|
||||
hex.Enc(ev.ID), d.sub.remote, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Log delivery attempt
|
||||
msgData := res.Marshal(nil)
|
||||
log.D.F("attempting delivery of event %s (kind=%d, len=%d) to subscription %s @ %s",
|
||||
hex.Enc(ev.ID), ev.Kind, len(msgData), d.id, d.sub.remote)
|
||||
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the publisher context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
context.Background(), DefaultWriteTimeout,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
deliveryStart := time.Now()
|
||||
if err = d.w.Write(
|
||||
writeCtx, websocket.MessageText, msgData,
|
||||
); err != nil {
|
||||
deliveryDuration := time.Since(deliveryStart)
|
||||
|
||||
// Log detailed failure information
|
||||
log.E.F("subscription delivery FAILED: event=%s to=%s sub=%s duration=%v error=%v",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, err)
|
||||
|
||||
// Check for timeout specifically
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F("subscription delivery TIMEOUT: event=%s to=%s after %v (limit=%v)",
|
||||
hex.Enc(ev.ID), d.sub.remote, deliveryDuration, DefaultWriteTimeout)
|
||||
}
|
||||
|
||||
// Log connection cleanup
|
||||
log.D.F("removing failed subscriber connection: %s", d.sub.remote)
|
||||
|
||||
// On error, remove the subscriber connection safely
|
||||
p.removeSubscriber(d.w)
|
||||
_ = d.w.CloseNow()
|
||||
continue
|
||||
}
|
||||
|
||||
deliveryDuration := time.Since(deliveryStart)
|
||||
log.D.F("subscription delivery SUCCESS: event=%s to=%s sub=%s duration=%v len=%d",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, len(msgData))
|
||||
|
||||
// Log slow deliveries for performance monitoring
|
||||
if deliveryDuration > time.Millisecond*50 {
|
||||
log.D.F("SLOW subscription delivery: event=%s to=%s duration=%v (>50ms)",
|
||||
hex.Enc(ev.ID), d.sub.remote, deliveryDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// removeSubscriberId removes a specific subscription from a subscriber
|
||||
// websocket.
|
||||
func (p *P) removeSubscriberId(ws *websocket.Conn, id string) {
|
||||
p.Mx.Lock()
|
||||
defer p.Mx.Unlock()
|
||||
var subs map[string]Subscription
|
||||
var ok bool
|
||||
if subs, ok = p.Map[ws]; ok {
|
||||
delete(p.Map[ws], id)
|
||||
_ = subs
|
||||
if len(subs) == 0 {
|
||||
delete(subs, id)
|
||||
// Check the actual map after deletion, not the original reference
|
||||
if len(p.Map[ws]) == 0 {
|
||||
delete(p.Map, ws)
|
||||
}
|
||||
}
|
||||
p.Mx.Unlock()
|
||||
}
|
||||
|
||||
// removeSubscriber removes a websocket from the P collection.
|
||||
func (p *P) removeSubscriber(ws *websocket.Conn) {
|
||||
p.Mx.Lock()
|
||||
defer p.Mx.Unlock()
|
||||
clear(p.Map[ws])
|
||||
delete(p.Map, ws)
|
||||
p.Mx.Unlock()
|
||||
}
|
||||
|
||||
578
app/server.go
578
app/server.go
@@ -2,16 +2,27 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"database.orly"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
"protocol.orly/publish"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/protocol/auth"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
@@ -22,26 +33,55 @@ type Server struct {
|
||||
publishers *publish.S
|
||||
Admins [][]byte
|
||||
*database.D
|
||||
|
||||
// optional reverse proxy for dev web server
|
||||
devProxy *httputil.ReverseProxy
|
||||
|
||||
// Challenge storage for HTTP UI authentication
|
||||
challengeMutex sync.RWMutex
|
||||
challenges map[string][]byte
|
||||
|
||||
paymentProcessor *PaymentProcessor
|
||||
}
|
||||
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf("path %v header %v", r.URL, r.Header)
|
||||
},
|
||||
// Set CORS headers for all responses
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
|
||||
w.Header().Set(
|
||||
"Access-Control-Allow-Headers", "Content-Type, Authorization",
|
||||
)
|
||||
if r.Header.Get("Upgrade") == "websocket" {
|
||||
s.HandleWebsocket(w, r)
|
||||
} else if r.Header.Get("Accept") == "application/nostr+json" {
|
||||
s.HandleRelayInfo(w, r)
|
||||
} else {
|
||||
if s.mux == nil {
|
||||
http.Error(w, "Upgrade required", http.StatusUpgradeRequired)
|
||||
} else {
|
||||
s.mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// Handle preflight OPTIONS requests
|
||||
if r.Method == "OPTIONS" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
// If this is a websocket request, only intercept the relay root path.
|
||||
// This allows other websocket paths (e.g., Vite HMR) to be handled by the dev proxy when enabled.
|
||||
if r.Header.Get("Upgrade") == "websocket" {
|
||||
if s.mux != nil && s.Config != nil && s.Config.WebDisableEmbedded && s.Config.WebDevProxyURL != "" && r.URL.Path != "/" {
|
||||
// forward to mux (which will proxy to dev server)
|
||||
s.mux.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
s.HandleWebsocket(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Header.Get("Accept") == "application/nostr+json" {
|
||||
s.HandleRelayInfo(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if s.mux == nil {
|
||||
http.Error(w, "Upgrade required", http.StatusUpgradeRequired)
|
||||
return
|
||||
}
|
||||
s.mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||
host := req.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
@@ -72,3 +112,505 @@ func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||
}
|
||||
return proto + "://" + host
|
||||
}
|
||||
|
||||
// DashboardURL constructs HTTPS URL for the dashboard based on the HTTP request
|
||||
func (s *Server) DashboardURL(req *http.Request) string {
|
||||
host := req.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = req.Host
|
||||
}
|
||||
return "https://" + host
|
||||
}
|
||||
|
||||
// UserInterface sets up a basic Nostr NDK interface that allows users to log into the relay user interface
|
||||
func (s *Server) UserInterface() {
|
||||
if s.mux == nil {
|
||||
s.mux = http.NewServeMux()
|
||||
}
|
||||
|
||||
// If dev proxy is configured, initialize it
|
||||
if s.Config != nil && s.Config.WebDisableEmbedded && s.Config.WebDevProxyURL != "" {
|
||||
proxyURL := s.Config.WebDevProxyURL
|
||||
// Add default scheme if missing to avoid: proxy error: unsupported protocol scheme ""
|
||||
if !strings.Contains(proxyURL, "://") {
|
||||
proxyURL = "http://" + proxyURL
|
||||
}
|
||||
if target, err := url.Parse(proxyURL); !chk.E(err) {
|
||||
if target.Scheme == "" || target.Host == "" {
|
||||
// invalid URL, disable proxy
|
||||
log.Printf(
|
||||
"invalid ORLY_WEB_DEV_PROXY_URL: %q — disabling dev proxy\n",
|
||||
s.Config.WebDevProxyURL,
|
||||
)
|
||||
} else {
|
||||
s.devProxy = httputil.NewSingleHostReverseProxy(target)
|
||||
// Ensure Host header points to upstream for dev servers that care
|
||||
origDirector := s.devProxy.Director
|
||||
s.devProxy.Director = func(req *http.Request) {
|
||||
origDirector(req)
|
||||
req.Host = target.Host
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize challenge storage if not already done
|
||||
if s.challenges == nil {
|
||||
s.challengeMutex.Lock()
|
||||
s.challenges = make(map[string][]byte)
|
||||
s.challengeMutex.Unlock()
|
||||
}
|
||||
|
||||
// Serve the main login interface (and static assets) or proxy in dev mode
|
||||
s.mux.HandleFunc("/", s.handleLoginInterface)
|
||||
|
||||
// API endpoints for authentication
|
||||
s.mux.HandleFunc("/api/auth/challenge", s.handleAuthChallenge)
|
||||
s.mux.HandleFunc("/api/auth/login", s.handleAuthLogin)
|
||||
s.mux.HandleFunc("/api/auth/status", s.handleAuthStatus)
|
||||
s.mux.HandleFunc("/api/auth/logout", s.handleAuthLogout)
|
||||
s.mux.HandleFunc("/api/permissions/", s.handlePermissions)
|
||||
// Export endpoints
|
||||
s.mux.HandleFunc("/api/export", s.handleExport)
|
||||
s.mux.HandleFunc("/api/export/mine", s.handleExportMine)
|
||||
// Events endpoints
|
||||
s.mux.HandleFunc("/api/events/mine", s.handleEventsMine)
|
||||
// Import endpoint (admin only)
|
||||
s.mux.HandleFunc("/api/import", s.handleImport)
|
||||
}
|
||||
|
||||
// handleLoginInterface serves the main user interface for login
|
||||
func (s *Server) handleLoginInterface(w http.ResponseWriter, r *http.Request) {
|
||||
// In dev mode with proxy configured, forward to dev server
|
||||
if s.Config != nil && s.Config.WebDisableEmbedded && s.devProxy != nil {
|
||||
s.devProxy.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
// If embedded UI is disabled but no proxy configured, return a helpful message
|
||||
if s.Config != nil && s.Config.WebDisableEmbedded {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write([]byte("Web UI disabled (ORLY_WEB_DISABLE=true). Run the web app in standalone dev mode (e.g., npm run dev) or set ORLY_WEB_DEV_PROXY_URL to proxy through this server."))
|
||||
return
|
||||
}
|
||||
// Default: serve embedded React app
|
||||
fileServer := http.FileServer(GetReactAppFS())
|
||||
fileServer.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// handleAuthChallenge generates and returns an authentication challenge
|
||||
func (s *Server) handleAuthChallenge(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a proper challenge using the auth package
|
||||
challenge := auth.GenerateChallenge()
|
||||
challengeHex := hex.Enc(challenge)
|
||||
|
||||
// Store the challenge using the hex value as the key for easy lookup
|
||||
s.challengeMutex.Lock()
|
||||
s.challenges[challengeHex] = challenge
|
||||
s.challengeMutex.Unlock()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"challenge": "` + challengeHex + `"}`))
|
||||
}
|
||||
|
||||
// handleAuthLogin processes authentication requests
|
||||
func (s *Server) handleAuthLogin(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// Read the request body
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if chk.E(err) {
|
||||
w.Write([]byte(`{"success": false, "error": "Failed to read request body"}`))
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the signed event
|
||||
var evt event.E
|
||||
if err = json.Unmarshal(body, &evt); chk.E(err) {
|
||||
w.Write([]byte(`{"success": false, "error": "Invalid event format"}`))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract the challenge from the event to look up the stored challenge
|
||||
challengeTag := evt.Tags.GetFirst([]byte("challenge"))
|
||||
if challengeTag == nil {
|
||||
w.Write([]byte(`{"success": false, "error": "Challenge tag missing from event"}`))
|
||||
return
|
||||
}
|
||||
|
||||
challengeHex := string(challengeTag.Value())
|
||||
|
||||
// Retrieve the stored challenge
|
||||
s.challengeMutex.RLock()
|
||||
_, exists := s.challenges[challengeHex]
|
||||
s.challengeMutex.RUnlock()
|
||||
|
||||
if !exists {
|
||||
w.Write([]byte(`{"success": false, "error": "Invalid or expired challenge"}`))
|
||||
return
|
||||
}
|
||||
|
||||
// Clean up the used challenge
|
||||
s.challengeMutex.Lock()
|
||||
delete(s.challenges, challengeHex)
|
||||
s.challengeMutex.Unlock()
|
||||
|
||||
relayURL := s.ServiceURL(r)
|
||||
|
||||
// Validate the authentication event with the correct challenge
|
||||
// The challenge in the event tag is hex-encoded, so we need to pass the hex string as bytes
|
||||
ok, err := auth.Validate(&evt, []byte(challengeHex), relayURL)
|
||||
if chk.E(err) || !ok {
|
||||
errorMsg := "Authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
w.Write([]byte(`{"success": false, "error": "` + errorMsg + `"}`))
|
||||
return
|
||||
}
|
||||
|
||||
// Authentication successful: set a simple session cookie with the pubkey
|
||||
cookie := &http.Cookie{
|
||||
Name: "orly_auth",
|
||||
Value: hex.Enc(evt.Pubkey),
|
||||
Path: "/",
|
||||
HttpOnly: true,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
MaxAge: 60 * 60 * 24 * 30, // 30 days
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
w.Write([]byte(`{"success": true, "pubkey": "` + hex.Enc(evt.Pubkey) + `", "message": "Authentication successful"}`))
|
||||
}
|
||||
|
||||
// handleAuthStatus returns the current authentication status
|
||||
func (s *Server) handleAuthStatus(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
// Check for auth cookie
|
||||
if c, err := r.Cookie("orly_auth"); err == nil && c.Value != "" {
|
||||
// Validate pubkey format (hex)
|
||||
if _, err := hex.Dec(c.Value); !chk.E(err) {
|
||||
w.Write([]byte(`{"authenticated": true, "pubkey": "` + c.Value + `"}`))
|
||||
return
|
||||
}
|
||||
}
|
||||
w.Write([]byte(`{"authenticated": false}`))
|
||||
}
|
||||
|
||||
// handleAuthLogout clears the auth cookie
|
||||
func (s *Server) handleAuthLogout(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
// Expire the cookie
|
||||
http.SetCookie(
|
||||
w, &http.Cookie{
|
||||
Name: "orly_auth",
|
||||
Value: "",
|
||||
Path: "/",
|
||||
MaxAge: -1,
|
||||
HttpOnly: true,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
},
|
||||
)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"success": true}`))
|
||||
}
|
||||
|
||||
// handlePermissions returns the permission level for a given pubkey
|
||||
func (s *Server) handlePermissions(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Extract pubkey from URL path
|
||||
pubkeyHex := strings.TrimPrefix(r.URL.Path, "/api/permissions/")
|
||||
if pubkeyHex == "" || pubkeyHex == "/" {
|
||||
http.Error(w, "Invalid pubkey", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Convert hex to binary pubkey
|
||||
pubkey, err := hex.Dec(pubkeyHex)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Invalid pubkey format", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Get access level using acl registry
|
||||
permission := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
|
||||
// Set content type and write JSON response
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// Format response as proper JSON
|
||||
response := struct {
|
||||
Permission string `json:"permission"`
|
||||
}{
|
||||
Permission: permission,
|
||||
}
|
||||
|
||||
// Marshal and write the response
|
||||
jsonData, err := json.Marshal(response)
|
||||
if chk.E(err) {
|
||||
http.Error(
|
||||
w, "Error generating response", http.StatusInternalServerError,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(jsonData)
|
||||
}
|
||||
|
||||
// handleExport streams all events as JSONL (NDJSON). Admins only.
|
||||
func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require auth cookie
|
||||
c, err := r.Cookie("orly_auth")
|
||||
if err != nil || c.Value == "" {
|
||||
http.Error(w, "Not authenticated", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
requesterPubHex := c.Value
|
||||
requesterPub, err := hex.Dec(requesterPubHex)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
// Check permissions
|
||||
if acl.Registry.GetAccessLevel(requesterPub, r.RemoteAddr) != "admin" {
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
// Optional filtering by pubkey(s)
|
||||
var pks [][]byte
|
||||
q := r.URL.Query()
|
||||
for _, pkHex := range q["pubkey"] {
|
||||
if pkHex == "" {
|
||||
continue
|
||||
}
|
||||
if pk, err := hex.Dec(pkHex); !chk.E(err) {
|
||||
pks = append(pks, pk)
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-ndjson")
|
||||
filename := "events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
|
||||
w.Header().Set(
|
||||
"Content-Disposition", "attachment; filename=\""+filename+"\"",
|
||||
)
|
||||
|
||||
// Stream export
|
||||
s.D.Export(s.Ctx, w, pks...)
|
||||
}
|
||||
|
||||
// handleExportMine streams only the authenticated user's events as JSONL (NDJSON).
|
||||
func (s *Server) handleExportMine(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require auth cookie
|
||||
c, err := r.Cookie("orly_auth")
|
||||
if err != nil || c.Value == "" {
|
||||
http.Error(w, "Not authenticated", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
pubkey, err := hex.Dec(c.Value)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-ndjson")
|
||||
filename := "my-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
|
||||
w.Header().Set(
|
||||
"Content-Disposition", "attachment; filename=\""+filename+"\"",
|
||||
)
|
||||
|
||||
// Stream export for this user's pubkey only
|
||||
s.D.Export(s.Ctx, w, pubkey)
|
||||
}
|
||||
|
||||
// handleImport receives a JSONL/NDJSON file or body and enqueues an async import. Admins only.
|
||||
func (s *Server) handleImport(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require auth cookie
|
||||
c, err := r.Cookie("orly_auth")
|
||||
if err != nil || c.Value == "" {
|
||||
http.Error(w, "Not authenticated", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
requesterPub, err := hex.Dec(c.Value)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
// Admins only
|
||||
if acl.Registry.GetAccessLevel(requesterPub, r.RemoteAddr) != "admin" {
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
ct := r.Header.Get("Content-Type")
|
||||
if strings.HasPrefix(ct, "multipart/form-data") {
|
||||
if err := r.ParseMultipartForm(32 << 20); chk.E(err) { // 32MB memory, rest to temp files
|
||||
http.Error(w, "Failed to parse form", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
file, _, err := r.FormFile("file")
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Missing file", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
s.D.Import(file)
|
||||
} else {
|
||||
if r.Body == nil {
|
||||
http.Error(w, "Empty request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.D.Import(r.Body)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
w.Write([]byte(`{"success": true, "message": "Import started"}`))
|
||||
}
|
||||
|
||||
// handleEventsMine returns the authenticated user's events in JSON format with pagination
|
||||
func (s *Server) handleEventsMine(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require auth cookie
|
||||
c, err := r.Cookie("orly_auth")
|
||||
if err != nil || c.Value == "" {
|
||||
http.Error(w, "Not authenticated", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
pubkey, err := hex.Dec(c.Value)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse pagination parameters
|
||||
query := r.URL.Query()
|
||||
limit := 50 // default limit
|
||||
if l := query.Get("limit"); l != "" {
|
||||
if parsed, err := strconv.Atoi(l); err == nil && parsed > 0 && parsed <= 100 {
|
||||
limit = parsed
|
||||
}
|
||||
}
|
||||
|
||||
offset := 0
|
||||
if o := query.Get("offset"); o != "" {
|
||||
if parsed, err := strconv.Atoi(o); err == nil && parsed >= 0 {
|
||||
offset = parsed
|
||||
}
|
||||
}
|
||||
|
||||
// Use QueryEvents with filter for this user's events
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(pubkey),
|
||||
}
|
||||
|
||||
log.Printf("DEBUG: Querying events for pubkey: %s", hex.Enc(pubkey))
|
||||
events, err := s.D.QueryEvents(s.Ctx, f)
|
||||
if chk.E(err) {
|
||||
log.Printf("DEBUG: QueryEvents failed: %v", err)
|
||||
http.Error(w, "Failed to query events", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
log.Printf("DEBUG: QueryEvents returned %d events", len(events))
|
||||
|
||||
// If no events found, let's also check if there are any events at all in the database
|
||||
if len(events) == 0 {
|
||||
// Create a filter to get any events (no authors filter)
|
||||
allEventsFilter := &filter.F{}
|
||||
allEvents, err := s.D.QueryEvents(s.Ctx, allEventsFilter)
|
||||
if err == nil {
|
||||
log.Printf("DEBUG: Total events in database: %d", len(allEvents))
|
||||
} else {
|
||||
log.Printf("DEBUG: Failed to query all events: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Events are already sorted by QueryEvents in reverse chronological order
|
||||
|
||||
// Apply offset and limit manually since QueryEvents doesn't support offset
|
||||
totalEvents := len(events)
|
||||
start := offset
|
||||
if start > totalEvents {
|
||||
start = totalEvents
|
||||
}
|
||||
end := start + limit
|
||||
if end > totalEvents {
|
||||
end = totalEvents
|
||||
}
|
||||
|
||||
paginatedEvents := events[start:end]
|
||||
|
||||
// Convert events to JSON response format
|
||||
type EventResponse struct {
|
||||
ID string `json:"id"`
|
||||
Kind int `json:"kind"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
Content string `json:"content"`
|
||||
RawJSON string `json:"raw_json"`
|
||||
}
|
||||
|
||||
response := struct {
|
||||
Events []EventResponse `json:"events"`
|
||||
Total int `json:"total"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
}{
|
||||
Events: make([]EventResponse, len(paginatedEvents)),
|
||||
Total: totalEvents,
|
||||
Offset: offset,
|
||||
Limit: limit,
|
||||
}
|
||||
|
||||
for i, ev := range paginatedEvents {
|
||||
response.Events[i] = EventResponse{
|
||||
ID: hex.Enc(ev.ID),
|
||||
Kind: int(ev.Kind),
|
||||
CreatedAt: int64(ev.CreatedAt),
|
||||
Content: string(ev.Content),
|
||||
RawJSON: string(ev.Serialize()),
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
19
app/web.go
Normal file
19
app/web.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
//go:embed web/dist
|
||||
var reactAppFS embed.FS
|
||||
|
||||
// GetReactAppFS returns a http.FileSystem from the embedded React app
|
||||
func GetReactAppFS() http.FileSystem {
|
||||
webDist, err := fs.Sub(reactAppFS, "web/dist")
|
||||
if err != nil {
|
||||
panic("Failed to load embedded web app: " + err.Error())
|
||||
}
|
||||
return http.FS(webDist)
|
||||
}
|
||||
30
app/web/.gitignore
vendored
Normal file
30
app/web/.gitignore
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
# Dependencies
|
||||
node_modules
|
||||
.pnp
|
||||
.pnp.js
|
||||
|
||||
# Bun
|
||||
.bunfig.toml
|
||||
bun.lockb
|
||||
|
||||
# Build directories
|
||||
build
|
||||
|
||||
# Cache and logs
|
||||
.cache
|
||||
.temp
|
||||
.log
|
||||
*.log
|
||||
|
||||
# Environment variables
|
||||
.env
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
# Editor directories and files
|
||||
.idea
|
||||
.vscode
|
||||
*.swp
|
||||
*.swo
|
||||
89
app/web/README.md
Normal file
89
app/web/README.md
Normal file
@@ -0,0 +1,89 @@
|
||||
# Orly Web Application
|
||||
|
||||
This is a React web application that uses Bun for building and bundling, and is automatically embedded into the Go binary when built.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Bun](https://bun.sh/) - JavaScript runtime and toolkit
|
||||
- Go 1.16+ (for embedding functionality)
|
||||
|
||||
## Development
|
||||
|
||||
There are two ways to develop the web app:
|
||||
|
||||
1) Standalone (recommended for hot reload)
|
||||
- Start the Go relay with the embedded web UI disabled so the React app can run on its own dev server with HMR.
|
||||
- Configure the relay via environment variables:
|
||||
|
||||
```bash
|
||||
# In another shell at repo root
|
||||
export ORLY_WEB_DISABLE=true
|
||||
# Optional: if you want same-origin URLs, you can set a proxy target and access the relay on the same port
|
||||
# export ORLY_WEB_DEV_PROXY_URL=http://localhost:5173
|
||||
|
||||
# Start the relay as usual
|
||||
go run .
|
||||
```
|
||||
|
||||
- Then start the React dev server:
|
||||
|
||||
```bash
|
||||
cd app/web
|
||||
bun install
|
||||
bun dev
|
||||
```
|
||||
|
||||
When ORLY_WEB_DISABLE=true is set, the Go server still serves the API and websocket endpoints and sends permissive CORS headers, so the dev server can access them cross-origin. If ORLY_WEB_DEV_PROXY_URL is set, the Go server will reverse-proxy non-/api paths to the dev server so you can use the same origin.
|
||||
|
||||
2) Embedded (no hot reload)
|
||||
- Build the web app and run the Go server with defaults:
|
||||
|
||||
```bash
|
||||
cd app/web
|
||||
bun install
|
||||
bun run build
|
||||
cd ../../
|
||||
go run .
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
The React application needs to be built before compiling the Go binary to ensure that the embedded files are available:
|
||||
|
||||
```bash
|
||||
# Build the React application
|
||||
cd app/web
|
||||
bun install
|
||||
bun run build
|
||||
|
||||
# Build the Go binary from project root
|
||||
cd ../../
|
||||
go build
|
||||
```
|
||||
|
||||
## How it works
|
||||
|
||||
1. The React application is built to the `app/web/dist` directory
|
||||
2. The Go embed directive in `app/web.go` embeds these files into the binary
|
||||
3. When the server runs, it serves the embedded React app at the root path
|
||||
|
||||
## Build Automation
|
||||
|
||||
You can create a shell script to automate the build process:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# build.sh
|
||||
echo "Building React app..."
|
||||
cd app/web
|
||||
bun install
|
||||
bun run build
|
||||
|
||||
echo "Building Go binary..."
|
||||
cd ../../
|
||||
go build
|
||||
|
||||
echo "Build complete!"
|
||||
```
|
||||
|
||||
Make it executable with `chmod +x build.sh` and run with `./build.sh`.
|
||||
36
app/web/bun.lock
Normal file
36
app/web/bun.lock
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "orly-web",
|
||||
"dependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
},
|
||||
"devDependencies": {
|
||||
"bun-types": "latest",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"@types/node": ["@types/node@24.5.2", "", { "dependencies": { "undici-types": "~7.12.0" } }, "sha512-FYxk1I7wPv3K2XBaoyH2cTnocQEu8AOZ60hPbsyukMPLv5/5qr7V1i8PLHdl6Zf87I+xZXFvPCXYjiTFq+YSDQ=="],
|
||||
|
||||
"@types/react": ["@types/react@19.1.13", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-hHkbU/eoO3EG5/MZkuFSKmYqPbSVk5byPFa3e7y/8TybHiLMACgI8seVYlicwk7H5K/rI2px9xrQp/C+AUDTiQ=="],
|
||||
|
||||
"bun-types": ["bun-types@1.2.22", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-hwaAu8tct/Zn6Zft4U9BsZcXkYomzpHJX28ofvx7k0Zz2HNz54n1n+tDgxoWFGB4PcFvJXJQloPhaV2eP3Q6EA=="],
|
||||
|
||||
"csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="],
|
||||
|
||||
"js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="],
|
||||
|
||||
"loose-envify": ["loose-envify@1.4.0", "", { "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": { "loose-envify": "cli.js" } }, "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q=="],
|
||||
|
||||
"react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="],
|
||||
|
||||
"react-dom": ["react-dom@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" }, "peerDependencies": { "react": "^18.3.1" } }, "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw=="],
|
||||
|
||||
"scheduler": ["scheduler@0.23.2", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ=="],
|
||||
|
||||
"undici-types": ["undici-types@7.12.0", "", {}, "sha512-goOacqME2GYyOZZfb5Lgtu+1IDmAlAEu5xnD3+xTzS10hT0vzpf0SPjkXwAw9Jm+4n/mQGDP3LO8CPbYROeBfQ=="],
|
||||
}
|
||||
}
|
||||
1
app/web/dist/index-q4cwd1fy.css
vendored
Normal file
1
app/web/dist/index-q4cwd1fy.css
vendored
Normal file
File diff suppressed because one or more lines are too long
160
app/web/dist/index-w8zpqk4w.js
vendored
Normal file
160
app/web/dist/index-w8zpqk4w.js
vendored
Normal file
File diff suppressed because one or more lines are too long
30
app/web/dist/index.html
vendored
Normal file
30
app/web/dist/index.html
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Nostr Relay</title>
|
||||
|
||||
<link rel="stylesheet" crossorigin href="./index-q4cwd1fy.css"><script type="module" crossorigin src="./index-w8zpqk4w.js"></script></head>
|
||||
<body>
|
||||
<script>
|
||||
// Apply system theme preference immediately to avoid flash of wrong theme
|
||||
function applyTheme(isDark) {
|
||||
document.body.classList.remove('bg-white', 'bg-gray-900');
|
||||
document.body.classList.add(isDark ? 'bg-gray-900' : 'bg-white');
|
||||
}
|
||||
|
||||
// Set initial theme
|
||||
applyTheme(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches);
|
||||
|
||||
// Listen for theme changes
|
||||
if (window.matchMedia) {
|
||||
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
|
||||
applyTheme(e.matches);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
<div id="root"></div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
112
app/web/dist/tailwind.min.css
vendored
Normal file
112
app/web/dist/tailwind.min.css
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
Local Tailwind CSS (minimal subset for this UI)
|
||||
Note: This file includes just the utilities used by the app to keep size small.
|
||||
You can replace this with a full Tailwind build if desired.
|
||||
*/
|
||||
|
||||
/* Preflight-like resets (very minimal) */
|
||||
*,::before,::after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}
|
||||
html,body,#root{height:100%}
|
||||
html{line-height:1.5;-webkit-text-size-adjust:100%;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,Segoe UI,Roboto,Helvetica,Arial,Noto Sans,\"Apple Color Emoji\",\"Segoe UI Emoji\"}
|
||||
body{margin:0}
|
||||
button,input{font:inherit;color:inherit}
|
||||
img{display:block;max-width:100%;height:auto}
|
||||
|
||||
/* Layout */
|
||||
.sticky{position:sticky}.relative{position:relative}.absolute{position:absolute}
|
||||
.top-0{top:0}.left-0{left:0}.inset-0{top:0;right:0;bottom:0;left:0}
|
||||
.z-50{z-index:50}.z-10{z-index:10}
|
||||
.block{display:block}.flex{display:flex}
|
||||
.items-center{align-items:center}.justify-start{justify-content:flex-start}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}
|
||||
.flex-grow{flex-grow:1}.shrink-0{flex-shrink:0}
|
||||
.overflow-hidden{overflow:hidden}
|
||||
|
||||
/* Sizing */
|
||||
.w-full{width:100%}.w-auto{width:auto}.w-16{width:4rem}
|
||||
.h-full{height:100%}.h-16{height:4rem}
|
||||
.aspect-square{aspect-ratio:1/1}
|
||||
.max-w-3xl{max-width:48rem}
|
||||
|
||||
/* Spacing */
|
||||
.p-0{padding:0}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-6{padding:1.5rem}
|
||||
.px-2{padding-left:.5rem;padding-right:.5rem}
|
||||
.mr-0{margin-right:0}.mr-2{margin-right:.5rem}
|
||||
.mt-2{margin-top:.5rem}.mt-5{margin-top:1.25rem}
|
||||
.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-4{margin-bottom:1rem}.mb-5{margin-bottom:1.25rem}
|
||||
.mx-auto{margin-left:auto;margin-right:auto}
|
||||
|
||||
/* Borders & Radius */
|
||||
.rounded{border-radius:.25rem}.rounded-full{border-radius:9999px}
|
||||
.border-0{border-width:0}.border-2{border-width:2px}
|
||||
.border-white{border-color:#fff}
|
||||
.border{border-width:1px}.border-gray-300{border-color:#d1d5db}.border-gray-600{border-color:#4b5563}
|
||||
.border-red-500{border-color:#ef4444}.border-red-700{border-color:#b91c1c}
|
||||
|
||||
/* Colors / Backgrounds */
|
||||
.bg-white{background-color:#fff}
|
||||
.bg-gray-100{background-color:#f3f4f6}
|
||||
.bg-gray-200{background-color:#e5e7eb}
|
||||
.bg-gray-300{background-color:#d1d5db}
|
||||
.bg-gray-600{background-color:#4b5563}
|
||||
.bg-gray-700{background-color:#374151}
|
||||
.bg-gray-800{background-color:#1f2937}
|
||||
.bg-gray-900{background-color:#111827}
|
||||
.bg-blue-500{background-color:#3b82f6}
|
||||
.bg-blue-600{background-color:#2563eb}.hover\:bg-blue-700:hover{background-color:#1d4ed8}
|
||||
.hover\:bg-blue-600:hover{background-color:#2563eb}
|
||||
.bg-red-600{background-color:#dc2626}.hover\:bg-red-700:hover{background-color:#b91c1c}
|
||||
.bg-cyan-100{background-color:#cffafe}
|
||||
.bg-green-100{background-color:#d1fae5}
|
||||
.bg-red-100{background-color:#fee2e2}
|
||||
.bg-red-50{background-color:#fef2f2}
|
||||
.bg-green-900{background-color:#064e3b}
|
||||
.bg-red-900{background-color:#7f1d1d}
|
||||
.bg-cyan-900{background-color:#164e63}
|
||||
.bg-cover{background-size:cover}.bg-center{background-position:center}
|
||||
.bg-transparent{background-color:transparent}
|
||||
|
||||
/* Text */
|
||||
.text-left{text-align:left}
|
||||
.text-white{color:#fff}
|
||||
.text-gray-300{color:#d1d5db}
|
||||
.text-gray-500{color:#6b7280}.hover\:text-gray-800:hover{color:#1f2937}
|
||||
.hover\:text-gray-100:hover{color:#f3f4f6}
|
||||
.text-gray-700{color:#374151}
|
||||
.text-gray-800{color:#1f2937}
|
||||
.text-gray-900{color:#111827}
|
||||
.text-gray-100{color:#f3f4f6}
|
||||
.text-green-800{color:#065f46}
|
||||
.text-green-100{color:#dcfce7}
|
||||
.text-red-800{color:#991b1b}
|
||||
.text-red-200{color:#fecaca}
|
||||
.text-red-100{color:#fee2e2}
|
||||
.text-cyan-800{color:#155e75}
|
||||
.text-cyan-100{color:#cffafe}
|
||||
.text-base{font-size:1rem;line-height:1.5rem}
|
||||
.text-lg{font-size:1.125rem;line-height:1.75rem}
|
||||
.text-2xl{font-size:1.5rem;line-height:2rem}
|
||||
.font-bold{font-weight:700}
|
||||
|
||||
/* Opacity */
|
||||
.opacity-70{opacity:.7}
|
||||
|
||||
/* Effects */
|
||||
.shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,0.1),0 1px 2px -1px rgba(0,0,0,0.1);box-shadow:var(--tw-shadow)}
|
||||
|
||||
/* Cursor */
|
||||
.cursor-pointer{cursor:pointer}
|
||||
|
||||
/* Box model */
|
||||
.box-border{box-sizing:border-box}
|
||||
|
||||
/* Utilities */
|
||||
.hover\:bg-transparent:hover{background-color:transparent}
|
||||
.hover\:bg-gray-200:hover{background-color:#e5e7eb}
|
||||
.hover\:bg-gray-600:hover{background-color:#4b5563}
|
||||
.focus\:ring-2:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}
|
||||
.focus\:ring-blue-200:focus{--tw-ring-color:rgba(191, 219, 254, var(--tw-ring-opacity))}
|
||||
.focus\:ring-blue-500:focus{--tw-ring-color:rgba(59, 130, 246, var(--tw-ring-opacity))}
|
||||
.disabled\:opacity-50:disabled{opacity:.5}
|
||||
.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}
|
||||
|
||||
/* Height for avatar images in header already inherit from container */
|
||||
18
app/web/package.json
Normal file
18
app/web/package.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "orly-web",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "bun --hot --port 5173 public/dev.html",
|
||||
"build": "rm -rf dist && bun build ./public/index.html --outdir ./dist --minify --splitting && cp -r public/tailwind.min.css dist/",
|
||||
"preview": "bun x serve dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"bun-types": "latest"
|
||||
}
|
||||
}
|
||||
13
app/web/public/dev.html
Normal file
13
app/web/public/dev.html
Normal file
@@ -0,0 +1,13 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Nostr Relay (Dev)</title>
|
||||
<link rel="stylesheet" href="tailwind.min.css" />
|
||||
</head>
|
||||
<body class="bg-white">
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/index.jsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
30
app/web/public/index.html
Normal file
30
app/web/public/index.html
Normal file
@@ -0,0 +1,30 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Nostr Relay</title>
|
||||
<link rel="stylesheet" href="tailwind.min.css" />
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
// Apply system theme preference immediately to avoid flash of wrong theme
|
||||
function applyTheme(isDark) {
|
||||
document.body.classList.remove('bg-white', 'bg-gray-900');
|
||||
document.body.classList.add(isDark ? 'bg-gray-900' : 'bg-white');
|
||||
}
|
||||
|
||||
// Set initial theme
|
||||
applyTheme(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches);
|
||||
|
||||
// Listen for theme changes
|
||||
if (window.matchMedia) {
|
||||
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
|
||||
applyTheme(e.matches);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/index.jsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
112
app/web/public/tailwind.min.css
vendored
Normal file
112
app/web/public/tailwind.min.css
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
Local Tailwind CSS (minimal subset for this UI)
|
||||
Note: This file includes just the utilities used by the app to keep size small.
|
||||
You can replace this with a full Tailwind build if desired.
|
||||
*/
|
||||
|
||||
/* Preflight-like resets (very minimal) */
|
||||
*,::before,::after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}
|
||||
html,body,#root{height:100%}
|
||||
html{line-height:1.5;-webkit-text-size-adjust:100%;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,Segoe UI,Roboto,Helvetica,Arial,Noto Sans,\"Apple Color Emoji\",\"Segoe UI Emoji\"}
|
||||
body{margin:0}
|
||||
button,input{font:inherit;color:inherit}
|
||||
img{display:block;max-width:100%;height:auto}
|
||||
|
||||
/* Layout */
|
||||
.sticky{position:sticky}.relative{position:relative}.absolute{position:absolute}
|
||||
.top-0{top:0}.left-0{left:0}.inset-0{top:0;right:0;bottom:0;left:0}
|
||||
.z-50{z-index:50}.z-10{z-index:10}
|
||||
.block{display:block}.flex{display:flex}
|
||||
.items-center{align-items:center}.justify-start{justify-content:flex-start}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}
|
||||
.flex-grow{flex-grow:1}.shrink-0{flex-shrink:0}
|
||||
.overflow-hidden{overflow:hidden}
|
||||
|
||||
/* Sizing */
|
||||
.w-full{width:100%}.w-auto{width:auto}.w-16{width:4rem}
|
||||
.h-full{height:100%}.h-16{height:4rem}
|
||||
.aspect-square{aspect-ratio:1/1}
|
||||
.max-w-3xl{max-width:48rem}
|
||||
|
||||
/* Spacing */
|
||||
.p-0{padding:0}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-6{padding:1.5rem}
|
||||
.px-2{padding-left:.5rem;padding-right:.5rem}
|
||||
.mr-0{margin-right:0}.mr-2{margin-right:.5rem}
|
||||
.mt-2{margin-top:.5rem}.mt-5{margin-top:1.25rem}
|
||||
.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-4{margin-bottom:1rem}.mb-5{margin-bottom:1.25rem}
|
||||
.mx-auto{margin-left:auto;margin-right:auto}
|
||||
|
||||
/* Borders & Radius */
|
||||
.rounded{border-radius:.25rem}.rounded-full{border-radius:9999px}
|
||||
.border-0{border-width:0}.border-2{border-width:2px}
|
||||
.border-white{border-color:#fff}
|
||||
.border{border-width:1px}.border-gray-300{border-color:#d1d5db}.border-gray-600{border-color:#4b5563}
|
||||
.border-red-500{border-color:#ef4444}.border-red-700{border-color:#b91c1c}
|
||||
|
||||
/* Colors / Backgrounds */
|
||||
.bg-white{background-color:#fff}
|
||||
.bg-gray-100{background-color:#f3f4f6}
|
||||
.bg-gray-200{background-color:#e5e7eb}
|
||||
.bg-gray-300{background-color:#d1d5db}
|
||||
.bg-gray-600{background-color:#4b5563}
|
||||
.bg-gray-700{background-color:#374151}
|
||||
.bg-gray-800{background-color:#1f2937}
|
||||
.bg-gray-900{background-color:#111827}
|
||||
.bg-blue-500{background-color:#3b82f6}
|
||||
.bg-blue-600{background-color:#2563eb}.hover\:bg-blue-700:hover{background-color:#1d4ed8}
|
||||
.hover\:bg-blue-600:hover{background-color:#2563eb}
|
||||
.bg-red-600{background-color:#dc2626}.hover\:bg-red-700:hover{background-color:#b91c1c}
|
||||
.bg-cyan-100{background-color:#cffafe}
|
||||
.bg-green-100{background-color:#d1fae5}
|
||||
.bg-red-100{background-color:#fee2e2}
|
||||
.bg-red-50{background-color:#fef2f2}
|
||||
.bg-green-900{background-color:#064e3b}
|
||||
.bg-red-900{background-color:#7f1d1d}
|
||||
.bg-cyan-900{background-color:#164e63}
|
||||
.bg-cover{background-size:cover}.bg-center{background-position:center}
|
||||
.bg-transparent{background-color:transparent}
|
||||
|
||||
/* Text */
|
||||
.text-left{text-align:left}
|
||||
.text-white{color:#fff}
|
||||
.text-gray-300{color:#d1d5db}
|
||||
.text-gray-500{color:#6b7280}.hover\:text-gray-800:hover{color:#1f2937}
|
||||
.hover\:text-gray-100:hover{color:#f3f4f6}
|
||||
.text-gray-700{color:#374151}
|
||||
.text-gray-800{color:#1f2937}
|
||||
.text-gray-900{color:#111827}
|
||||
.text-gray-100{color:#f3f4f6}
|
||||
.text-green-800{color:#065f46}
|
||||
.text-green-100{color:#dcfce7}
|
||||
.text-red-800{color:#991b1b}
|
||||
.text-red-200{color:#fecaca}
|
||||
.text-red-100{color:#fee2e2}
|
||||
.text-cyan-800{color:#155e75}
|
||||
.text-cyan-100{color:#cffafe}
|
||||
.text-base{font-size:1rem;line-height:1.5rem}
|
||||
.text-lg{font-size:1.125rem;line-height:1.75rem}
|
||||
.text-2xl{font-size:1.5rem;line-height:2rem}
|
||||
.font-bold{font-weight:700}
|
||||
|
||||
/* Opacity */
|
||||
.opacity-70{opacity:.7}
|
||||
|
||||
/* Effects */
|
||||
.shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,0.1),0 1px 2px -1px rgba(0,0,0,0.1);box-shadow:var(--tw-shadow)}
|
||||
|
||||
/* Cursor */
|
||||
.cursor-pointer{cursor:pointer}
|
||||
|
||||
/* Box model */
|
||||
.box-border{box-sizing:border-box}
|
||||
|
||||
/* Utilities */
|
||||
.hover\:bg-transparent:hover{background-color:transparent}
|
||||
.hover\:bg-gray-200:hover{background-color:#e5e7eb}
|
||||
.hover\:bg-gray-600:hover{background-color:#4b5563}
|
||||
.focus\:ring-2:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}
|
||||
.focus\:ring-blue-200:focus{--tw-ring-color:rgba(191, 219, 254, var(--tw-ring-opacity))}
|
||||
.focus\:ring-blue-500:focus{--tw-ring-color:rgba(59, 130, 246, var(--tw-ring-opacity))}
|
||||
.disabled\:opacity-50:disabled{opacity:.5}
|
||||
.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}
|
||||
|
||||
/* Height for avatar images in header already inherit from container */
|
||||
1960
app/web/src/App.jsx
Normal file
1960
app/web/src/App.jsx
Normal file
File diff suppressed because it is too large
Load Diff
11
app/web/src/index.jsx
Normal file
11
app/web/src/index.jsx
Normal file
@@ -0,0 +1,11 @@
|
||||
import React from 'react';
|
||||
import { createRoot } from 'react-dom/client';
|
||||
import App from './App';
|
||||
import './styles.css';
|
||||
|
||||
const root = createRoot(document.getElementById('root'));
|
||||
root.render(
|
||||
<React.StrictMode>
|
||||
<App />
|
||||
</React.StrictMode>
|
||||
);
|
||||
191
app/web/src/styles.css
Normal file
191
app/web/src/styles.css
Normal file
@@ -0,0 +1,191 @@
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.container {
|
||||
background: #f9f9f9;
|
||||
padding: 30px;
|
||||
border-radius: 8px;
|
||||
margin-top: 20px; /* Reduced space since header is now sticky */
|
||||
}
|
||||
|
||||
.form-group {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
label {
|
||||
display: block;
|
||||
margin-bottom: 5px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
input, textarea {
|
||||
width: 100%;
|
||||
padding: 10px;
|
||||
border: 1px solid #ddd;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
button {
|
||||
background: #007cba;
|
||||
color: white;
|
||||
padding: 12px 20px;
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
background: #005a87;
|
||||
}
|
||||
|
||||
.danger-button {
|
||||
background: #dc3545;
|
||||
}
|
||||
|
||||
.danger-button:hover {
|
||||
background: #c82333;
|
||||
}
|
||||
|
||||
.status {
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
padding: 10px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.success {
|
||||
background: #d4edda;
|
||||
color: #155724;
|
||||
}
|
||||
|
||||
.error {
|
||||
background: #f8d7da;
|
||||
color: #721c24;
|
||||
}
|
||||
|
||||
.info {
|
||||
background: #d1ecf1;
|
||||
color: #0c5460;
|
||||
}
|
||||
|
||||
.header-panel {
|
||||
position: sticky;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
background-color: #f8f9fa;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||
z-index: 1000;
|
||||
height: 60px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
background-size: cover;
|
||||
background-position: center;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.header-content {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
height: 100%;
|
||||
padding: 0 0 0 12px;
|
||||
width: 100%;
|
||||
margin: 0 auto;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.header-left {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.header-center {
|
||||
display: flex;
|
||||
flex-grow: 1;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.header-right {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-end;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.header-logo {
|
||||
height: 100%;
|
||||
aspect-ratio: 1 / 1;
|
||||
width: auto;
|
||||
border-radius: 0;
|
||||
object-fit: cover;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.user-avatar {
|
||||
width: 2em;
|
||||
height: 2em;
|
||||
border-radius: 50%;
|
||||
object-fit: cover;
|
||||
border: 2px solid white;
|
||||
margin-right: 10px;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.2);
|
||||
}
|
||||
|
||||
.user-profile {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.user-info {
|
||||
font-weight: bold;
|
||||
font-size: 1.2em;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.user-name {
|
||||
font-weight: bold;
|
||||
font-size: 1em;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.profile-banner {
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 0;
|
||||
z-index: -1;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.logout-button {
|
||||
background: transparent;
|
||||
color: #6c757d;
|
||||
border: none;
|
||||
font-size: 20px;
|
||||
cursor: pointer;
|
||||
padding: 0;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 48px;
|
||||
height: 100%;
|
||||
margin-left: 10px;
|
||||
margin-right: 0;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.logout-button:hover {
|
||||
background: transparent;
|
||||
color: #343a40;
|
||||
}
|
||||
51
cmd/benchmark/Dockerfile.benchmark
Normal file
51
cmd/benchmark/Dockerfile.benchmark
Normal file
@@ -0,0 +1,51 @@
|
||||
# Dockerfile for benchmark runner
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the benchmark tool
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o benchmark cmd/benchmark/main.go
|
||||
|
||||
# Final stage
|
||||
FROM alpine:latest
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk --no-cache add ca-certificates curl wget
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy benchmark binary
|
||||
COPY --from=builder /build/benchmark /app/benchmark
|
||||
|
||||
# Copy benchmark runner script
|
||||
COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /app/benchmark-runner
|
||||
|
||||
# Create runtime user and reports directory owned by uid 1000
|
||||
RUN adduser -u 1000 -D appuser && \
|
||||
mkdir -p /reports && \
|
||||
chown -R 1000:1000 /app /reports
|
||||
|
||||
# Environment variables
|
||||
ENV BENCHMARK_EVENTS=10000
|
||||
ENV BENCHMARK_WORKERS=8
|
||||
ENV BENCHMARK_DURATION=60s
|
||||
|
||||
# Drop privileges: run as uid 1000
|
||||
USER 1000:1000
|
||||
|
||||
# Run the benchmark runner
|
||||
CMD ["/app/benchmark-runner"]
|
||||
22
cmd/benchmark/Dockerfile.khatru-badger
Normal file
22
cmd/benchmark/Dockerfile.khatru-badger
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-badger example
|
||||
RUN echo ${pwd};cd examples/basic-badger && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=0 go build -o khatru-badger .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 3334
|
||||
ENV DATABASE_PATH=/data/badger
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:3334 || exit 1
|
||||
CMD ["/app/khatru-badger"]
|
||||
22
cmd/benchmark/Dockerfile.khatru-sqlite
Normal file
22
cmd/benchmark/Dockerfile.khatru-sqlite
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-sqlite3 example
|
||||
RUN cd examples/basic-sqlite3 && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o khatru-sqlite .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-sqlite3/khatru-sqlite /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 3334
|
||||
ENV DATABASE_PATH=/data/khatru.db
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:3334 || exit 1
|
||||
CMD ["/app/khatru-sqlite"]
|
||||
91
cmd/benchmark/Dockerfile.next-orly
Normal file
91
cmd/benchmark/Dockerfile.next-orly
Normal file
@@ -0,0 +1,91 @@
|
||||
# Dockerfile for next.orly.dev relay
|
||||
FROM ubuntu:22.04 as builder
|
||||
|
||||
# Set environment variables
|
||||
ARG GOLANG_VERSION=1.22.5
|
||||
|
||||
# Update package list and install dependencies
|
||||
RUN apt-get update && \
|
||||
apt-get install -y wget ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Download Go binary
|
||||
RUN wget https://go.dev/dl/go${GOLANG_VERSION}.linux-amd64.tar.gz && \
|
||||
rm -rf /usr/local/go && \
|
||||
tar -C /usr/local -xzf go${GOLANG_VERSION}.linux-amd64.tar.gz && \
|
||||
rm go${GOLANG_VERSION}.linux-amd64.tar.gz
|
||||
|
||||
# Set PATH environment variable
|
||||
ENV PATH="/usr/local/go/bin:${PATH}"
|
||||
|
||||
# Verify installation
|
||||
RUN go version
|
||||
|
||||
RUN apt update && \
|
||||
apt -y install build-essential autoconf libtool git wget
|
||||
RUN cd /tmp && \
|
||||
rm -rf secp256k1 && \
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git && \
|
||||
cd secp256k1 && \
|
||||
git checkout v0.6.0 && \
|
||||
git submodule init && \
|
||||
git submodule update && \
|
||||
./autogen.sh && \
|
||||
./configure --enable-module-schnorrsig --enable-module-ecdh --prefix=/usr && \
|
||||
make -j1 && \
|
||||
make install
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -gcflags "all=-N -l" -o relay .
|
||||
|
||||
# Create non-root user (uid 1000) for runtime in builder stage (used by analyzer)
|
||||
RUN useradd -u 1000 -m -s /bin/bash appuser && \
|
||||
chown -R 1000:1000 /build
|
||||
# Switch to uid 1000 for any subsequent runtime use of this stage
|
||||
USER 1000:1000
|
||||
|
||||
# Final stage
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y ca-certificates curl libsecp256k1-0 libsecp256k1-dev && rm -rf /var/lib/apt/lists/* && \
|
||||
ln -sf /usr/lib/x86_64-linux-gnu/libsecp256k1.so.0 /usr/lib/x86_64-linux-gnu/libsecp256k1.so.5
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /build/relay /app/relay
|
||||
|
||||
# Create runtime user and writable directories
|
||||
RUN useradd -u 1000 -m -s /bin/bash appuser && \
|
||||
mkdir -p /data /profiles /app && \
|
||||
chown -R 1000:1000 /data /profiles /app
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8080
|
||||
|
||||
# Set environment variables
|
||||
ENV ORLY_DATA_DIR=/data
|
||||
ENV ORLY_LISTEN=0.0.0.0
|
||||
ENV ORLY_PORT=8080
|
||||
ENV ORLY_LOG_LEVEL=off
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD bash -lc "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8080 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
|
||||
|
||||
# Drop privileges: run as uid 1000
|
||||
USER 1000:1000
|
||||
|
||||
# Run the relay
|
||||
CMD ["/app/relay"]
|
||||
23
cmd/benchmark/Dockerfile.nostr-rs-relay
Normal file
23
cmd/benchmark/Dockerfile.nostr-rs-relay
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM rust:1.81-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache musl-dev sqlite-dev build-base bash perl protobuf
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN cargo build --release
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/target/release/nostr-rs-relay /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV RUST_LOG=info
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/nostr-rs-relay"]
|
||||
23
cmd/benchmark/Dockerfile.relayer-basic
Normal file
23
cmd/benchmark/Dockerfile.relayer-basic
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic example
|
||||
RUN cd examples/basic && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o relayer-basic .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic/relayer-basic /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 7447
|
||||
ENV DATABASE_PATH=/data/relayer.db
|
||||
# PORT env is not used by relayer-basic; it always binds to 7447 in code.
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:7447 || exit 1
|
||||
CMD ["/app/relayer-basic"]
|
||||
44
cmd/benchmark/Dockerfile.strfry
Normal file
44
cmd/benchmark/Dockerfile.strfry
Normal file
@@ -0,0 +1,44 @@
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
build-essential \
|
||||
liblmdb-dev \
|
||||
libsecp256k1-dev \
|
||||
pkg-config \
|
||||
libtool \
|
||||
autoconf \
|
||||
automake \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Fetch strfry source with submodules to ensure golpe is present
|
||||
RUN git clone --recurse-submodules https://github.com/hoytech/strfry .
|
||||
|
||||
# Build strfry
|
||||
RUN make setup-golpe && \
|
||||
make -j$(nproc)
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y \
|
||||
liblmdb0 \
|
||||
libsecp256k1-0 \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/strfry /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
ENV STRFRY_RELAY_PORT=8080
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/strfry", "relay"]
|
||||
260
cmd/benchmark/README.md
Normal file
260
cmd/benchmark/README.md
Normal file
@@ -0,0 +1,260 @@
|
||||
# Nostr Relay Benchmark Suite
|
||||
|
||||
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including:
|
||||
|
||||
- **next.orly.dev** (this repository) - BadgerDB-based relay
|
||||
- **Khatru** - SQLite and Badger variants
|
||||
- **Relayer** - Basic example implementation
|
||||
- **Strfry** - C++ LMDB-based relay
|
||||
- **nostr-rs-relay** - Rust-based relay with SQLite
|
||||
|
||||
## Features
|
||||
|
||||
### Benchmark Tests
|
||||
|
||||
1. **Peak Throughput Test**
|
||||
- Tests maximum event ingestion rate
|
||||
- Concurrent workers pushing events as fast as possible
|
||||
- Measures events/second, latency distribution, success rate
|
||||
|
||||
2. **Burst Pattern Test**
|
||||
- Simulates real-world traffic patterns
|
||||
- Alternating high-activity bursts and quiet periods
|
||||
- Tests relay behavior under varying loads
|
||||
|
||||
3. **Mixed Read/Write Test**
|
||||
- Concurrent read and write operations
|
||||
- Tests query performance while events are being ingested
|
||||
- Measures combined throughput and latency
|
||||
|
||||
### Performance Metrics
|
||||
|
||||
- **Throughput**: Events processed per second
|
||||
- **Latency**: Average, P95, and P99 response times
|
||||
- **Success Rate**: Percentage of successful operations
|
||||
- **Memory Usage**: Peak memory consumption during tests
|
||||
- **Error Analysis**: Detailed error reporting and categorization
|
||||
|
||||
### Reporting
|
||||
|
||||
- Individual relay reports with detailed metrics
|
||||
- Aggregate comparison report across all relays
|
||||
- Comparison tables for easy performance analysis
|
||||
- Timestamped results for tracking improvements over time
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Setup External Relays
|
||||
|
||||
Run the setup script to download and configure all external relay repositories:
|
||||
|
||||
```bash
|
||||
cd cmd/benchmark
|
||||
./setup-external-relays.sh
|
||||
```
|
||||
|
||||
This will:
|
||||
- Clone all external relay repositories
|
||||
- Create Docker configurations for each relay
|
||||
- Set up configuration files
|
||||
- Create data and report directories
|
||||
|
||||
### 2. Run Benchmarks
|
||||
|
||||
Start all relays and run the benchmark suite:
|
||||
|
||||
```bash
|
||||
docker compose up --build
|
||||
```
|
||||
|
||||
The system will:
|
||||
- Build and start all relay containers
|
||||
- Wait for all relays to become healthy
|
||||
- Run benchmarks against each relay sequentially
|
||||
- Generate individual and aggregate reports
|
||||
|
||||
### 3. View Results
|
||||
|
||||
Results are stored in the `reports/` directory with timestamps:
|
||||
|
||||
```bash
|
||||
# View the aggregate report
|
||||
cat reports/run_YYYYMMDD_HHMMSS/aggregate_report.txt
|
||||
|
||||
# View individual relay results
|
||||
ls reports/run_YYYYMMDD_HHMMSS/
|
||||
```
|
||||
|
||||
## Architecture
|
||||
|
||||
### Docker Compose Services
|
||||
|
||||
| Service | Port | Description |
|
||||
|---------|------|-------------|
|
||||
| next-orly | 8001 | This repository's BadgerDB relay |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
| relayer-basic | 8004 | Basic relayer example |
|
||||
| strfry | 8005 | Strfry C++ LMDB relay |
|
||||
| nostr-rs-relay | 8006 | Rust SQLite relay |
|
||||
| benchmark-runner | - | Orchestrates tests and aggregates results |
|
||||
|
||||
### File Structure
|
||||
|
||||
```
|
||||
cmd/benchmark/
|
||||
├── main.go # Benchmark tool implementation
|
||||
├── docker-compose.yml # Service orchestration
|
||||
├── setup-external-relays.sh # Repository setup script
|
||||
├── benchmark-runner.sh # Test orchestration script
|
||||
├── Dockerfile.next-orly # This repo's relay container
|
||||
├── Dockerfile.benchmark # Benchmark runner container
|
||||
├── Dockerfile.khatru-sqlite # Khatru SQLite variant
|
||||
├── Dockerfile.khatru-badger # Khatru Badger variant
|
||||
├── Dockerfile.relayer-basic # Relayer basic example
|
||||
├── Dockerfile.strfry # Strfry relay
|
||||
├── Dockerfile.nostr-rs-relay # Rust relay
|
||||
├── configs/
|
||||
│ ├── strfry.conf # Strfry configuration
|
||||
│ └── config.toml # nostr-rs-relay configuration
|
||||
├── external/ # External relay repositories
|
||||
├── data/ # Persistent data for each relay
|
||||
└── reports/ # Benchmark results
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The benchmark can be configured via environment variables in `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- BENCHMARK_EVENTS=10000 # Number of events per test
|
||||
- BENCHMARK_WORKERS=8 # Concurrent workers
|
||||
- BENCHMARK_DURATION=60s # Test duration
|
||||
- BENCHMARK_TARGETS=... # Relay endpoints to test
|
||||
```
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
1. **Modify test parameters**: Edit environment variables in `docker-compose.yml`
|
||||
2. **Add new relays**:
|
||||
- Add service to `docker-compose.yml`
|
||||
- Create appropriate Dockerfile
|
||||
- Update `BENCHMARK_TARGETS` environment variable
|
||||
3. **Adjust relay configs**: Edit files in `configs/` directory
|
||||
|
||||
## Manual Usage
|
||||
|
||||
### Run Individual Relay
|
||||
|
||||
```bash
|
||||
# Build and run a specific relay
|
||||
docker-compose up next-orly
|
||||
|
||||
# Run benchmark against specific endpoint
|
||||
./benchmark -datadir=/tmp/test -events=1000 -workers=4
|
||||
```
|
||||
|
||||
### Run Benchmark Tool Directly
|
||||
|
||||
```bash
|
||||
# Build the benchmark tool
|
||||
go build -o benchmark main.go
|
||||
|
||||
# Run with custom parameters
|
||||
./benchmark \
|
||||
-datadir=/tmp/benchmark_db \
|
||||
-events=5000 \
|
||||
-workers=4 \
|
||||
-duration=30s
|
||||
```
|
||||
|
||||
## Benchmark Results Interpretation
|
||||
|
||||
### Peak Throughput Test
|
||||
- **High events/sec**: Good write performance
|
||||
- **Low latency**: Efficient event processing
|
||||
- **High success rate**: Stable under load
|
||||
|
||||
### Burst Pattern Test
|
||||
- **Consistent performance**: Good handling of variable loads
|
||||
- **Low P95/P99 latency**: Predictable response times
|
||||
- **No errors during bursts**: Robust queuing/buffering
|
||||
|
||||
### Mixed Read/Write Test
|
||||
- **Balanced throughput**: Good concurrent operation handling
|
||||
- **Low read latency**: Efficient query processing
|
||||
- **Stable write performance**: Queries don't significantly impact writes
|
||||
|
||||
## Development
|
||||
|
||||
### Adding New Tests
|
||||
|
||||
1. Extend the `Benchmark` struct in `main.go`
|
||||
2. Add new test method following existing patterns
|
||||
3. Update `main()` function to call new test
|
||||
4. Update result aggregation in `benchmark-runner.sh`
|
||||
|
||||
### Modifying Relay Configurations
|
||||
|
||||
Each relay's Dockerfile and configuration can be customized:
|
||||
- **Resource limits**: Adjust memory/CPU limits in docker-compose.yml
|
||||
- **Database settings**: Modify configuration files in `configs/`
|
||||
- **Network settings**: Update port mappings and health checks
|
||||
|
||||
### Debugging
|
||||
|
||||
```bash
|
||||
# View logs for specific relay
|
||||
docker-compose logs next-orly
|
||||
|
||||
# Run benchmark with debug output
|
||||
docker-compose up --build benchmark-runner
|
||||
|
||||
# Check individual container health
|
||||
docker-compose ps
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Relay fails to start**: Check logs with `docker-compose logs <service>`
|
||||
2. **Connection refused**: Ensure relay health checks are passing
|
||||
3. **Build failures**: Verify external repositories were cloned correctly
|
||||
4. **Permission errors**: Ensure setup script is executable
|
||||
|
||||
### Performance Issues
|
||||
|
||||
- **Low throughput**: Check resource limits and concurrent worker count
|
||||
- **High memory usage**: Monitor container resource consumption
|
||||
- **Network bottlenecks**: Test on different host configurations
|
||||
|
||||
### Reset Environment
|
||||
|
||||
```bash
|
||||
# Clean up everything
|
||||
docker-compose down -v
|
||||
docker system prune -f
|
||||
rm -rf external/ data/ reports/
|
||||
|
||||
# Start fresh
|
||||
./setup-external-relays.sh
|
||||
docker-compose up --build
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
To add support for new relay implementations:
|
||||
|
||||
1. Create appropriate Dockerfile following existing patterns
|
||||
2. Add service definition to `docker-compose.yml`
|
||||
3. Update `BENCHMARK_TARGETS` environment variable
|
||||
4. Test the new relay integration
|
||||
5. Update documentation
|
||||
|
||||
## License
|
||||
|
||||
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms.
|
||||
275
cmd/benchmark/benchmark-runner.sh
Normal file
275
cmd/benchmark/benchmark-runner.sh
Normal file
@@ -0,0 +1,275 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Benchmark runner script for testing multiple Nostr relay implementations
|
||||
# This script coordinates testing all relays and aggregates results
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration from environment variables
|
||||
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-10000}"
|
||||
BENCHMARK_WORKERS="${BENCHMARK_WORKERS:-8}"
|
||||
BENCHMARK_DURATION="${BENCHMARK_DURATION:-60s}"
|
||||
BENCHMARK_TARGETS="${BENCHMARK_TARGETS:-next-orly:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080}"
|
||||
OUTPUT_DIR="${OUTPUT_DIR:-/reports}"
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "${OUTPUT_DIR}"
|
||||
|
||||
# Generate timestamp for this benchmark run
|
||||
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
|
||||
RUN_DIR="${OUTPUT_DIR}/run_${TIMESTAMP}"
|
||||
mkdir -p "${RUN_DIR}"
|
||||
|
||||
echo "=================================================="
|
||||
echo "Nostr Relay Benchmark Suite"
|
||||
echo "=================================================="
|
||||
echo "Timestamp: $(date)"
|
||||
echo "Events per test: ${BENCHMARK_EVENTS}"
|
||||
echo "Concurrent workers: ${BENCHMARK_WORKERS}"
|
||||
echo "Test duration: ${BENCHMARK_DURATION}"
|
||||
echo "Output directory: ${RUN_DIR}"
|
||||
echo "=================================================="
|
||||
|
||||
# Function to wait for relay to be ready
|
||||
wait_for_relay() {
|
||||
local name="$1"
|
||||
local url="$2"
|
||||
local max_attempts=60
|
||||
local attempt=0
|
||||
|
||||
echo "Waiting for ${name} to be ready at ${url}..."
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
# Try wget first to obtain an HTTP status code
|
||||
local status=""
|
||||
status=$(wget --quiet --server-response --tries=1 --timeout=5 "http://${url}" 2>&1 | awk '/^ HTTP\//{print $2; exit}')
|
||||
|
||||
# Fallback to curl to obtain an HTTP status code
|
||||
if [ -z "$status" ]; then
|
||||
status=$(curl -s -o /dev/null -w "%{http_code}" --connect-timeout 5 --max-time 5 "http://${url}" || echo 000)
|
||||
fi
|
||||
|
||||
case "$status" in
|
||||
101|200|400|404|426)
|
||||
echo "${name} is ready! (HTTP ${status})"
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
attempt=$((attempt + 1))
|
||||
echo " Attempt ${attempt}/${max_attempts}: ${name} not ready yet (HTTP ${status:-none})..."
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "ERROR: ${name} failed to become ready after ${max_attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to run benchmark against a specific relay
|
||||
run_benchmark() {
|
||||
local relay_name="$1"
|
||||
local relay_url="$2"
|
||||
local output_file="$3"
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Testing ${relay_name} at ws://${relay_url}"
|
||||
echo "=================================================="
|
||||
|
||||
# Wait for relay to be ready
|
||||
if ! wait_for_relay "${relay_name}" "${relay_url}"; then
|
||||
echo "ERROR: ${relay_name} is not responding, skipping..."
|
||||
echo "RELAY: ${relay_name}" > "${output_file}"
|
||||
echo "STATUS: FAILED - Relay not responding" >> "${output_file}"
|
||||
echo "ERROR: Connection failed" >> "${output_file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Run the benchmark
|
||||
echo "Running benchmark against ${relay_name}..."
|
||||
|
||||
# Create temporary directory for this relay's data
|
||||
TEMP_DATA_DIR="/tmp/benchmark_${relay_name}_$$"
|
||||
mkdir -p "${TEMP_DATA_DIR}"
|
||||
|
||||
# Run benchmark and capture both stdout and stderr
|
||||
if /app/benchmark \
|
||||
-datadir="${TEMP_DATA_DIR}" \
|
||||
-events="${BENCHMARK_EVENTS}" \
|
||||
-workers="${BENCHMARK_WORKERS}" \
|
||||
-duration="${BENCHMARK_DURATION}" \
|
||||
> "${output_file}" 2>&1; then
|
||||
|
||||
echo "✓ Benchmark completed successfully for ${relay_name}"
|
||||
|
||||
# Add relay identification to the report
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
echo "BENCHMARK_CONFIG:" >> "${output_file}"
|
||||
echo " Events: ${BENCHMARK_EVENTS}" >> "${output_file}"
|
||||
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||
echo " Duration: ${BENCHMARK_DURATION}" >> "${output_file}"
|
||||
|
||||
else
|
||||
echo "✗ Benchmark failed for ${relay_name}"
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "STATUS: FAILED" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
fi
|
||||
|
||||
# Clean up temporary data
|
||||
rm -rf "${TEMP_DATA_DIR}"
|
||||
}
|
||||
|
||||
# Function to generate aggregate report
|
||||
generate_aggregate_report() {
|
||||
local aggregate_file="${RUN_DIR}/aggregate_report.txt"
|
||||
|
||||
echo "Generating aggregate report..."
|
||||
|
||||
cat > "${aggregate_file}" << EOF
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: $(date -Iseconds)
|
||||
Benchmark Configuration:
|
||||
Events per test: ${BENCHMARK_EVENTS}
|
||||
Concurrent workers: ${BENCHMARK_WORKERS}
|
||||
Test duration: ${BENCHMARK_DURATION}
|
||||
|
||||
Relays tested: $(echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | wc -l)
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
EOF
|
||||
|
||||
# Process each relay's results
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
echo "Relay: ${relay_name}" >> "${aggregate_file}"
|
||||
echo "----------------------------------------" >> "${aggregate_file}"
|
||||
|
||||
if [ -f "${relay_file}" ]; then
|
||||
# Extract key metrics from the relay's report
|
||||
if grep -q "STATUS: FAILED" "${relay_file}"; then
|
||||
echo "Status: FAILED" >> "${aggregate_file}"
|
||||
grep "ERROR:" "${relay_file}" | head -1 >> "${aggregate_file}" || echo "Error: Unknown failure" >> "${aggregate_file}"
|
||||
else
|
||||
echo "Status: COMPLETED" >> "${aggregate_file}"
|
||||
|
||||
# Extract performance metrics
|
||||
grep "Events/sec:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Success Rate:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Avg Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "P95 Latency:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
grep "Memory:" "${relay_file}" | head -3 >> "${aggregate_file}" || true
|
||||
fi
|
||||
else
|
||||
echo "Status: NO RESULTS FILE" >> "${aggregate_file}"
|
||||
echo "Error: Results file not found" >> "${aggregate_file}"
|
||||
fi
|
||||
|
||||
echo "" >> "${aggregate_file}"
|
||||
done
|
||||
|
||||
cat >> "${aggregate_file}" << EOF
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
$(ls "${RUN_DIR}"/*_results.txt 2>/dev/null | sed 's|^| - |' || echo " No individual reports found")
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
EOF
|
||||
|
||||
# Create a comparison table
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "Relay" "Status" "Peak Tput/s" "Avg Latency" "Success Rate" >> "${aggregate_file}"
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "----" "------" "-----------" "-----------" "------------" >> "${aggregate_file}"
|
||||
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
if [ -f "${relay_file}" ]; then
|
||||
if grep -q "STATUS: FAILED" "${relay_file}"; then
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "FAILED" "-" "-" "-" >> "${aggregate_file}"
|
||||
else
|
||||
# Extract metrics for the table
|
||||
peak_tput=$(grep "Events/sec:" "${relay_file}" | head -1 | awk '{print $2}' || echo "-")
|
||||
avg_latency=$(grep "Avg Latency:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-")
|
||||
success_rate=$(grep "Success Rate:" "${relay_file}" | head -1 | awk '{print $3}' || echo "-")
|
||||
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "OK" "${peak_tput}" "${avg_latency}" "${success_rate}" >> "${aggregate_file}"
|
||||
fi
|
||||
else
|
||||
printf "%-20s %-10s %-15s %-15s %-15s\n" "${relay_name}" "NO DATA" "-" "-" "-" >> "${aggregate_file}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "" >> "${aggregate_file}"
|
||||
echo "================================================================" >> "${aggregate_file}"
|
||||
echo "End of Report" >> "${aggregate_file}"
|
||||
echo "================================================================" >> "${aggregate_file}"
|
||||
}
|
||||
|
||||
# Main execution
|
||||
echo "Starting relay benchmark suite..."
|
||||
|
||||
# Parse targets and run benchmarks
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
echo "WARNING: Skipping invalid target: ${relay_name}:${relay_port}"
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_url="${relay_name}:${relay_port}"
|
||||
output_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
run_benchmark "${relay_name}" "${relay_url}" "${output_file}"
|
||||
|
||||
# Small delay between tests
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Generate aggregate report
|
||||
generate_aggregate_report
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Benchmark Suite Completed!"
|
||||
echo "=================================================="
|
||||
echo "Results directory: ${RUN_DIR}"
|
||||
echo "Aggregate report: ${RUN_DIR}/aggregate_report.txt"
|
||||
echo ""
|
||||
|
||||
# Display summary
|
||||
if [ -f "${RUN_DIR}/aggregate_report.txt" ]; then
|
||||
echo "Quick Summary:"
|
||||
echo "=============="
|
||||
grep -A 10 "BENCHMARK COMPARISON TABLE" "${RUN_DIR}/aggregate_report.txt" | tail -n +4
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "All benchmark files:"
|
||||
ls -la "${RUN_DIR}/"
|
||||
echo ""
|
||||
echo "Benchmark suite finished at: $(date)"
|
||||
36
cmd/benchmark/configs/config.toml
Normal file
36
cmd/benchmark/configs/config.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[info]
|
||||
relay_url = "ws://localhost:8080"
|
||||
name = "nostr-rs-relay benchmark"
|
||||
description = "A nostr-rs-relay for benchmarking"
|
||||
pubkey = ""
|
||||
contact = ""
|
||||
|
||||
[database]
|
||||
data_directory = "/data"
|
||||
in_memory = false
|
||||
engine = "sqlite"
|
||||
|
||||
[network]
|
||||
port = 8080
|
||||
address = "0.0.0.0"
|
||||
|
||||
[limits]
|
||||
messages_per_sec = 0
|
||||
subscriptions_per_min = 0
|
||||
max_event_bytes = 65535
|
||||
max_ws_message_bytes = 131072
|
||||
max_ws_frame_bytes = 131072
|
||||
|
||||
[authorization]
|
||||
pubkey_whitelist = []
|
||||
|
||||
[verified_users]
|
||||
mode = "passive"
|
||||
domain_whitelist = []
|
||||
domain_blacklist = []
|
||||
|
||||
[pay_to_relay]
|
||||
enabled = false
|
||||
|
||||
[options]
|
||||
reject_future_seconds = 30
|
||||
101
cmd/benchmark/configs/strfry.conf
Normal file
101
cmd/benchmark/configs/strfry.conf
Normal file
@@ -0,0 +1,101 @@
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "/data/strfry.lmdb"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required)
|
||||
mapsize = 1099511627776
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 8080
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 1000000
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "strfry benchmark"
|
||||
|
||||
# NIP-11: Detailed description of this server, free-form
|
||||
description = "A strfry relay for benchmarking"
|
||||
|
||||
# NIP-11: Administrative pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative contact for this server
|
||||
contact = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 20
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = ""
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = true
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# yesstr threads: experimental yesstr protocol (restart required)
|
||||
yesstr = 1
|
||||
}
|
||||
}
|
||||
200
cmd/benchmark/docker-compose.yml
Normal file
200
cmd/benchmark/docker-compose.yml
Normal file
@@ -0,0 +1,200 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# Next.orly.dev relay (this repository)
|
||||
next-orly:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=8080
|
||||
- ORLY_LOG_LEVEL=off
|
||||
volumes:
|
||||
- ./data/next-orly:/data
|
||||
ports:
|
||||
- "8001:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Khatru with SQLite
|
||||
khatru-sqlite:
|
||||
build:
|
||||
context: ./external/khatru
|
||||
dockerfile: ../../Dockerfile.khatru-sqlite
|
||||
container_name: benchmark-khatru-sqlite
|
||||
environment:
|
||||
- DATABASE_TYPE=sqlite
|
||||
- DATABASE_PATH=/data/khatru.db
|
||||
volumes:
|
||||
- ./data/khatru-sqlite:/data
|
||||
ports:
|
||||
- "8002:3334"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Khatru with Badger
|
||||
khatru-badger:
|
||||
build:
|
||||
context: ./external/khatru
|
||||
dockerfile: ../../Dockerfile.khatru-badger
|
||||
container_name: benchmark-khatru-badger
|
||||
environment:
|
||||
- DATABASE_TYPE=badger
|
||||
- DATABASE_PATH=/data/badger
|
||||
volumes:
|
||||
- ./data/khatru-badger:/data
|
||||
ports:
|
||||
- "8003:3334"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Relayer basic example
|
||||
relayer-basic:
|
||||
build:
|
||||
context: ./external/relayer
|
||||
dockerfile: ../../Dockerfile.relayer-basic
|
||||
container_name: benchmark-relayer-basic
|
||||
environment:
|
||||
- POSTGRESQL_DATABASE=postgres://relayer:relayerpass@postgres:5432/relayerdb?sslmode=disable
|
||||
volumes:
|
||||
- ./data/relayer-basic:/data
|
||||
ports:
|
||||
- "8004:7447"
|
||||
networks:
|
||||
- benchmark-net
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Strfry
|
||||
strfry:
|
||||
image: ghcr.io/hoytech/strfry:latest
|
||||
container_name: benchmark-strfry
|
||||
environment:
|
||||
- STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
- STRFRY_RELAY_PORT=8080
|
||||
volumes:
|
||||
- ./data/strfry:/data
|
||||
- ./configs/strfry.conf:/etc/strfry.conf
|
||||
ports:
|
||||
- "8005:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Nostr-rs-relay
|
||||
nostr-rs-relay:
|
||||
build:
|
||||
context: ./external/nostr-rs-relay
|
||||
dockerfile: ../../Dockerfile.nostr-rs-relay
|
||||
container_name: benchmark-nostr-rs-relay
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
volumes:
|
||||
- ./data/nostr-rs-relay:/data
|
||||
- ./configs/config.toml:/app/config.toml
|
||||
ports:
|
||||
- "8006:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Benchmark runner
|
||||
benchmark-runner:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.benchmark
|
||||
container_name: benchmark-runner
|
||||
depends_on:
|
||||
next-orly:
|
||||
condition: service_healthy
|
||||
khatru-sqlite:
|
||||
condition: service_healthy
|
||||
khatru-badger:
|
||||
condition: service_healthy
|
||||
relayer-basic:
|
||||
condition: service_healthy
|
||||
strfry:
|
||||
condition: service_healthy
|
||||
nostr-rs-relay:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- BENCHMARK_TARGETS=next-orly:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080
|
||||
- BENCHMARK_EVENTS=10000
|
||||
- BENCHMARK_WORKERS=8
|
||||
- BENCHMARK_DURATION=60s
|
||||
volumes:
|
||||
- ./reports:/reports
|
||||
networks:
|
||||
- benchmark-net
|
||||
command: >
|
||||
sh -c "
|
||||
echo 'Waiting for all relays to be ready...' &&
|
||||
sleep 30 &&
|
||||
echo 'Starting benchmark tests...' &&
|
||||
/app/benchmark-runner --output-dir=/reports
|
||||
"
|
||||
|
||||
# PostgreSQL for relayer-basic
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
container_name: benchmark-postgres
|
||||
environment:
|
||||
- POSTGRES_DB=relayerdb
|
||||
- POSTGRES_USER=relayer
|
||||
- POSTGRES_PASSWORD=relayerpass
|
||||
volumes:
|
||||
- ./data/postgres:/var/lib/postgresql/data
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U relayer -d relayerdb"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 20s
|
||||
|
||||
networks:
|
||||
benchmark-net:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
benchmark-data:
|
||||
driver: local
|
||||
1201
cmd/benchmark/main.go
Normal file
1201
cmd/benchmark/main.go
Normal file
File diff suppressed because it is too large
Load Diff
156
cmd/benchmark/profile.sh
Executable file
156
cmd/benchmark/profile.sh
Executable file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Runs the ORLY relay with CPU profiling enabled and opens the resulting
|
||||
# pprof profile in a local web UI.
|
||||
#
|
||||
# Usage:
|
||||
# ./profile.sh [duration_seconds]
|
||||
#
|
||||
# - Builds the relay.
|
||||
# - Starts it with ORLY_PPROF=cpu and minimal logging.
|
||||
# - Waits for the profile path printed at startup.
|
||||
# - Runs for DURATION seconds (default 10), then stops the relay to flush the
|
||||
# CPU profile to disk.
|
||||
# - Launches `go tool pprof -http=:8000` for convenient browsing.
|
||||
#
|
||||
# Notes:
|
||||
# - The profile file path is detected from the relay's stdout/stderr lines
|
||||
# emitted by github.com/pkg/profile, typically like:
|
||||
# profile: cpu profiling enabled, path: /tmp/profile123456/cpu.pprof
|
||||
# - You can change DURATION by passing a number of seconds as the first arg
|
||||
# or by setting DURATION env var.
|
||||
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/../.." && pwd)"
|
||||
cd "$REPO_ROOT"
|
||||
|
||||
DURATION="${1:-${DURATION:-10}}"
|
||||
PPROF_HTTP_PORT="${PPROF_HTTP_PORT:-8000}"
|
||||
|
||||
# Load generation controls
|
||||
LOAD_ENABLED="${LOAD_ENABLED:-1}" # set to 0 to disable load
|
||||
# Use the benchmark main package in cmd/benchmark as the load generator
|
||||
BENCHMARK_PKG_DIR="$REPO_ROOT/cmd/benchmark"
|
||||
BENCHMARK_BIN="${BENCHMARK_BIN:-}" # if empty, we will build to $RUN_DIR/benchmark
|
||||
BENCHMARK_EVENTS="${BENCHMARK_EVENTS:-}" # optional override for -events
|
||||
BENCHMARK_DURATION="${BENCHMARK_DURATION:-}" # optional override for -duration (e.g. 30s); defaults to DURATION seconds
|
||||
|
||||
BIN="$REPO_ROOT/next.orly.dev"
|
||||
LOG_DIR="${LOG_DIR:-$REPO_ROOT/cmd/benchmark/reports}"
|
||||
mkdir -p "$LOG_DIR"
|
||||
RUN_TS="$(date +%Y%m%d_%H%M%S)"
|
||||
RUN_DIR="$LOG_DIR/profile_run_${RUN_TS}"
|
||||
mkdir -p "$RUN_DIR"
|
||||
LOG_FILE="$RUN_DIR/relay.log"
|
||||
LOAD_LOG_FILE="$RUN_DIR/load.log"
|
||||
|
||||
echo "[profile.sh] Building relay binary ..."
|
||||
go build -o "$BIN" .
|
||||
|
||||
# Ensure we clean up the child process on exit
|
||||
RELAY_PID=""
|
||||
LOAD_PID=""
|
||||
cleanup() {
|
||||
if [[ -n "$LOAD_PID" ]] && kill -0 "$LOAD_PID" 2>/dev/null; then
|
||||
echo "[profile.sh] Stopping load generator (pid=$LOAD_PID) ..."
|
||||
kill -INT "$LOAD_PID" 2>/dev/null || true
|
||||
sleep 0.5
|
||||
kill -TERM "$LOAD_PID" 2>/dev/null || true
|
||||
fi
|
||||
if [[ -n "$RELAY_PID" ]] && kill -0 "$RELAY_PID" 2>/dev/null; then
|
||||
echo "[profile.sh] Stopping relay (pid=$RELAY_PID) ..."
|
||||
kill -INT "$RELAY_PID" 2>/dev/null || true
|
||||
# give it a moment to exit and flush profile
|
||||
sleep 1
|
||||
kill -TERM "$RELAY_PID" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Start the relay with CPU profiling enabled. Capture both stdout and stderr.
|
||||
echo "[profile.sh] Starting relay with CPU profiling enabled ..."
|
||||
(
|
||||
ORLY_LOG_LEVEL=off \
|
||||
ORLY_LISTEN="${ORLY_LISTEN:-127.0.0.1}" \
|
||||
ORLY_PORT="${ORLY_PORT:-3334}" \
|
||||
ORLY_PPROF=cpu \
|
||||
"$BIN"
|
||||
) >"$LOG_FILE" 2>&1 &
|
||||
RELAY_PID=$!
|
||||
echo "[profile.sh] Relay started with pid $RELAY_PID; logging to $LOG_FILE"
|
||||
|
||||
# Wait until the profile path is printed. Timeout after reasonable period.
|
||||
PPROF_FILE=""
|
||||
START_TIME=$(date +%s)
|
||||
TIMEOUT=30
|
||||
|
||||
echo "[profile.sh] Waiting for profile path to appear in relay output ..."
|
||||
while :; do
|
||||
if grep -Eo "/tmp/profile[^ ]+/cpu\.pprof" "$LOG_FILE" >/dev/null 2>&1; then
|
||||
PPROF_FILE=$(grep -Eo "/tmp/profile[^ ]+/cpu\.pprof" "$LOG_FILE" | tail -n1)
|
||||
break
|
||||
fi
|
||||
NOW=$(date +%s)
|
||||
if (( NOW - START_TIME > TIMEOUT )); then
|
||||
echo "[profile.sh] ERROR: Timed out waiting for profile path in $LOG_FILE" >&2
|
||||
echo "Last 50 log lines:" >&2
|
||||
tail -n 50 "$LOG_FILE" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.3
|
||||
done
|
||||
|
||||
echo "[profile.sh] Detected profile file: $PPROF_FILE"
|
||||
|
||||
# Optionally start load generator to exercise the relay
|
||||
if [[ "$LOAD_ENABLED" == "1" ]]; then
|
||||
# Build benchmark binary if not provided
|
||||
if [[ -z "$BENCHMARK_BIN" ]]; then
|
||||
BENCHMARK_BIN="$RUN_DIR/benchmark"
|
||||
echo "[profile.sh] Building benchmark load generator ($BENCHMARK_PKG_DIR) ..."
|
||||
go build -o "$BENCHMARK_BIN" "$BENCHMARK_PKG_DIR"
|
||||
fi
|
||||
BENCH_DB_DIR="$RUN_DIR/benchdb"
|
||||
mkdir -p "$BENCH_DB_DIR"
|
||||
DURATION_ARG="${BENCHMARK_DURATION:-${DURATION}s}"
|
||||
EXTRA_EVENTS=""
|
||||
if [[ -n "$BENCHMARK_EVENTS" ]]; then
|
||||
EXTRA_EVENTS="-events=$BENCHMARK_EVENTS"
|
||||
fi
|
||||
echo "[profile.sh] Starting benchmark load generator for duration $DURATION_ARG ..."
|
||||
RELAY_URL="ws://${ORLY_LISTEN:-127.0.0.1}:${ORLY_PORT:-3334}"
|
||||
echo "[profile.sh] Using relay URL: $RELAY_URL"
|
||||
(
|
||||
"$BENCHMARK_BIN" -relay-url="$RELAY_URL" -net-workers="${NET_WORKERS:-2}" -net-rate="${NET_RATE:-20}" -duration="$DURATION_ARG" $EXTRA_EVENTS \
|
||||
>"$LOAD_LOG_FILE" 2>&1 &
|
||||
)
|
||||
LOAD_PID=$!
|
||||
echo "[profile.sh] Load generator started (pid=$LOAD_PID); logging to $LOAD_LOG_FILE"
|
||||
else
|
||||
echo "[profile.sh] LOAD_ENABLED=0; not starting load generator."
|
||||
fi
|
||||
|
||||
echo "[profile.sh] Letting the relay run for ${DURATION}s to collect CPU samples ..."
|
||||
sleep "$DURATION"
|
||||
|
||||
# Stop the relay to flush the CPU profile
|
||||
cleanup
|
||||
# Disable trap so we don't double-kill
|
||||
trap - EXIT
|
||||
|
||||
# Wait briefly to ensure the profile file is finalized
|
||||
for i in {1..20}; do
|
||||
if [[ -s "$PPROF_FILE" ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 0.2
|
||||
done
|
||||
|
||||
if [[ ! -s "$PPROF_FILE" ]]; then
|
||||
echo "[profile.sh] WARNING: Profile file exists but is empty or missing: $PPROF_FILE" >&2
|
||||
fi
|
||||
|
||||
# Launch pprof HTTP UI
|
||||
echo "[profile.sh] Launching pprof web UI (http://localhost:${PPROF_HTTP_PORT}) ..."
|
||||
exec go tool pprof -http=":${PPROF_HTTP_PORT}" "$BIN" "$PPROF_FILE"
|
||||
140
cmd/benchmark/reports/run_20250920_101521/aggregate_report.txt
Normal file
140
cmd/benchmark/reports/run_20250920_101521/aggregate_report.txt
Normal file
@@ -0,0 +1,140 @@
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: 2025-09-20T11:04:39+00:00
|
||||
Benchmark Configuration:
|
||||
Events per test: 10000
|
||||
Concurrent workers: 8
|
||||
Test duration: 60s
|
||||
|
||||
Relays tested: 6
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
Relay: next-orly
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 1035.42
|
||||
Events/sec: 659.20
|
||||
Events/sec: 1094.56
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 470.069µs
|
||||
Bottom 10% Avg Latency: 750.491µs
|
||||
Avg Latency: 190.573µs
|
||||
P95 Latency: 693.101µs
|
||||
P95 Latency: 289.761µs
|
||||
P95 Latency: 22.450848ms
|
||||
|
||||
Relay: khatru-sqlite
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 1105.61
|
||||
Events/sec: 624.87
|
||||
Events/sec: 1070.10
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 458.035µs
|
||||
Bottom 10% Avg Latency: 702.193µs
|
||||
Avg Latency: 193.997µs
|
||||
P95 Latency: 660.608µs
|
||||
P95 Latency: 302.666µs
|
||||
P95 Latency: 23.653412ms
|
||||
|
||||
Relay: khatru-badger
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 1040.11
|
||||
Events/sec: 663.14
|
||||
Events/sec: 1065.58
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 454.784µs
|
||||
Bottom 10% Avg Latency: 706.219µs
|
||||
Avg Latency: 193.914µs
|
||||
P95 Latency: 654.637µs
|
||||
P95 Latency: 296.525µs
|
||||
P95 Latency: 21.642655ms
|
||||
|
||||
Relay: relayer-basic
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 1104.88
|
||||
Events/sec: 642.17
|
||||
Events/sec: 1079.27
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 433.89µs
|
||||
Bottom 10% Avg Latency: 653.813µs
|
||||
Avg Latency: 186.306µs
|
||||
P95 Latency: 617.868µs
|
||||
P95 Latency: 279.192µs
|
||||
P95 Latency: 21.247322ms
|
||||
|
||||
Relay: strfry
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 1090.49
|
||||
Events/sec: 652.03
|
||||
Events/sec: 1098.57
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 448.058µs
|
||||
Bottom 10% Avg Latency: 729.464µs
|
||||
Avg Latency: 189.06µs
|
||||
P95 Latency: 667.141µs
|
||||
P95 Latency: 290.433µs
|
||||
P95 Latency: 20.822884ms
|
||||
|
||||
Relay: nostr-rs-relay
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 1123.91
|
||||
Events/sec: 647.62
|
||||
Events/sec: 1033.64
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 416.753µs
|
||||
Bottom 10% Avg Latency: 638.318µs
|
||||
Avg Latency: 185.217µs
|
||||
P95 Latency: 597.338µs
|
||||
P95 Latency: 273.191µs
|
||||
P95 Latency: 22.416221ms
|
||||
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
- /reports/run_20250920_101521/khatru-badger_results.txt
|
||||
- /reports/run_20250920_101521/khatru-sqlite_results.txt
|
||||
- /reports/run_20250920_101521/next-orly_results.txt
|
||||
- /reports/run_20250920_101521/nostr-rs-relay_results.txt
|
||||
- /reports/run_20250920_101521/relayer-basic_results.txt
|
||||
- /reports/run_20250920_101521/strfry_results.txt
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
Relay Status Peak Tput/s Avg Latency Success Rate
|
||||
---- ------ ----------- ----------- ------------
|
||||
next-orly OK 1035.42 470.069µs 100.0%
|
||||
khatru-sqlite OK 1105.61 458.035µs 100.0%
|
||||
khatru-badger OK 1040.11 454.784µs 100.0%
|
||||
relayer-basic OK 1104.88 433.89µs 100.0%
|
||||
strfry OK 1090.49 448.058µs 100.0%
|
||||
nostr-rs-relay OK 1123.91 416.753µs 100.0%
|
||||
|
||||
================================================================
|
||||
End of Report
|
||||
================================================================
|
||||
@@ -0,0 +1,298 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
1758364309339505ℹ️/tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
1758364309340007ℹ️/tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
1758364309340039ℹ️/tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
1758364309340327ℹ️(*types.Uint32)(0xc000147840)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
1758364309340465ℹ️migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.614321551s
|
||||
Events/sec: 1040.11
|
||||
Avg latency: 454.784µs
|
||||
P90 latency: 596.266µs
|
||||
P95 latency: 654.637µs
|
||||
P99 latency: 844.569µs
|
||||
Bottom 10% Avg latency: 706.219µs
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 136.444875ms
|
||||
Burst completed: 1000 events in 141.806497ms
|
||||
Burst completed: 1000 events in 168.991278ms
|
||||
Burst completed: 1000 events in 167.713425ms
|
||||
Burst completed: 1000 events in 162.89698ms
|
||||
Burst completed: 1000 events in 157.775164ms
|
||||
Burst completed: 1000 events in 166.476709ms
|
||||
Burst completed: 1000 events in 161.742632ms
|
||||
Burst completed: 1000 events in 162.138977ms
|
||||
Burst completed: 1000 events in 156.657194ms
|
||||
Burst test completed: 10000 events in 15.07982611s
|
||||
Events/sec: 663.14
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 44.903267299s
|
||||
Combined ops/sec: 222.70
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 3166 queries in 1m0.104195004s
|
||||
Queries/sec: 52.68
|
||||
Avg query latency: 125.847553ms
|
||||
P95 query latency: 148.109766ms
|
||||
P99 query latency: 212.054697ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 11366 operations (1366 queries, 10000 writes) in 1m0.127232573s
|
||||
Operations/sec: 189.03
|
||||
Avg latency: 16.671438ms
|
||||
Avg query latency: 134.993072ms
|
||||
Avg write latency: 508.703µs
|
||||
P95 latency: 133.755996ms
|
||||
P99 latency: 152.790563ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.384548186s
|
||||
Events/sec: 1065.58
|
||||
Avg latency: 566.375µs
|
||||
P90 latency: 738.377µs
|
||||
P95 latency: 839.679µs
|
||||
P99 latency: 1.131084ms
|
||||
Bottom 10% Avg latency: 1.312791ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 166.832259ms
|
||||
Burst completed: 1000 events in 175.061575ms
|
||||
Burst completed: 1000 events in 168.897493ms
|
||||
Burst completed: 1000 events in 167.584171ms
|
||||
Burst completed: 1000 events in 178.212526ms
|
||||
Burst completed: 1000 events in 202.208945ms
|
||||
Burst completed: 1000 events in 154.130024ms
|
||||
Burst completed: 1000 events in 168.817721ms
|
||||
Burst completed: 1000 events in 153.032223ms
|
||||
Burst completed: 1000 events in 154.799008ms
|
||||
Burst test completed: 10000 events in 15.449161726s
|
||||
Events/sec: 647.28
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4582 reads in 1m0.037041762s
|
||||
Combined ops/sec: 159.60
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 959 queries in 1m0.42440735s
|
||||
Queries/sec: 15.87
|
||||
Avg query latency: 418.846875ms
|
||||
P95 query latency: 473.089327ms
|
||||
P99 query latency: 650.467474ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 10484 operations (484 queries, 10000 writes) in 1m0.283590079s
|
||||
Operations/sec: 173.91
|
||||
Avg latency: 17.921964ms
|
||||
Avg query latency: 381.041592ms
|
||||
Avg write latency: 346.974µs
|
||||
P95 latency: 1.269749ms
|
||||
P99 latency: 399.015222ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.614321551s
|
||||
Total Events: 10000
|
||||
Events/sec: 1040.11
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 118 MB
|
||||
Avg Latency: 454.784µs
|
||||
P90 Latency: 596.266µs
|
||||
P95 Latency: 654.637µs
|
||||
P99 Latency: 844.569µs
|
||||
Bottom 10% Avg Latency: 706.219µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.07982611s
|
||||
Total Events: 10000
|
||||
Events/sec: 663.14
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 162 MB
|
||||
Avg Latency: 193.914µs
|
||||
P90 Latency: 255.617µs
|
||||
P95 Latency: 296.525µs
|
||||
P99 Latency: 451.81µs
|
||||
Bottom 10% Avg Latency: 343.222µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 44.903267299s
|
||||
Total Events: 10000
|
||||
Events/sec: 222.70
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 121 MB
|
||||
Avg Latency: 9.145633ms
|
||||
P90 Latency: 19.946513ms
|
||||
P95 Latency: 21.642655ms
|
||||
P99 Latency: 23.951572ms
|
||||
Bottom 10% Avg Latency: 21.861602ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.104195004s
|
||||
Total Events: 3166
|
||||
Events/sec: 52.68
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 188 MB
|
||||
Avg Latency: 125.847553ms
|
||||
P90 Latency: 140.664966ms
|
||||
P95 Latency: 148.109766ms
|
||||
P99 Latency: 212.054697ms
|
||||
Bottom 10% Avg Latency: 164.089129ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.127232573s
|
||||
Total Events: 11366
|
||||
Events/sec: 189.03
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 112 MB
|
||||
Avg Latency: 16.671438ms
|
||||
P90 Latency: 122.627849ms
|
||||
P95 Latency: 133.755996ms
|
||||
P99 Latency: 152.790563ms
|
||||
Bottom 10% Avg Latency: 138.087104ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.384548186s
|
||||
Total Events: 10000
|
||||
Events/sec: 1065.58
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 1441 MB
|
||||
Avg Latency: 566.375µs
|
||||
P90 Latency: 738.377µs
|
||||
P95 Latency: 839.679µs
|
||||
P99 Latency: 1.131084ms
|
||||
Bottom 10% Avg Latency: 1.312791ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.449161726s
|
||||
Total Events: 10000
|
||||
Events/sec: 647.28
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 165 MB
|
||||
Avg Latency: 186.353µs
|
||||
P90 Latency: 243.413µs
|
||||
P95 Latency: 283.06µs
|
||||
P99 Latency: 440.76µs
|
||||
Bottom 10% Avg Latency: 324.151µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.037041762s
|
||||
Total Events: 9582
|
||||
Events/sec: 159.60
|
||||
Success Rate: 95.8%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 138 MB
|
||||
Avg Latency: 16.358228ms
|
||||
P90 Latency: 37.654373ms
|
||||
P95 Latency: 40.578604ms
|
||||
P99 Latency: 46.331181ms
|
||||
Bottom 10% Avg Latency: 41.76124ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.42440735s
|
||||
Total Events: 959
|
||||
Events/sec: 15.87
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 110 MB
|
||||
Avg Latency: 418.846875ms
|
||||
P90 Latency: 448.809017ms
|
||||
P95 Latency: 473.089327ms
|
||||
P99 Latency: 650.467474ms
|
||||
Bottom 10% Avg Latency: 518.112626ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.283590079s
|
||||
Total Events: 10484
|
||||
Events/sec: 173.91
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 205 MB
|
||||
Avg Latency: 17.921964ms
|
||||
P90 Latency: 582.319µs
|
||||
P95 Latency: 1.269749ms
|
||||
P99 Latency: 399.015222ms
|
||||
Bottom 10% Avg Latency: 176.257001ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
|
||||
1758364794792663ℹ️/tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
1758364796617126ℹ️/tmp/benchmark_khatru-badger_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
1758364796621659ℹ️/tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-badger
|
||||
RELAY_URL: ws://khatru-badger:3334
|
||||
TEST_TIMESTAMP: 2025-09-20T10:39:56+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,298 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
1758363814412229ℹ️/tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
1758363814412803ℹ️/tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
1758363814412840ℹ️/tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
1758363814413123ℹ️(*types.Uint32)(0xc0001ea00c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
1758363814413200ℹ️migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.044789549s
|
||||
Events/sec: 1105.61
|
||||
Avg latency: 458.035µs
|
||||
P90 latency: 601.736µs
|
||||
P95 latency: 660.608µs
|
||||
P99 latency: 844.108µs
|
||||
Bottom 10% Avg latency: 702.193µs
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 146.610877ms
|
||||
Burst completed: 1000 events in 179.229665ms
|
||||
Burst completed: 1000 events in 157.096919ms
|
||||
Burst completed: 1000 events in 164.796374ms
|
||||
Burst completed: 1000 events in 188.464354ms
|
||||
Burst completed: 1000 events in 196.529596ms
|
||||
Burst completed: 1000 events in 169.425581ms
|
||||
Burst completed: 1000 events in 147.99354ms
|
||||
Burst completed: 1000 events in 157.996252ms
|
||||
Burst completed: 1000 events in 167.299262ms
|
||||
Burst test completed: 10000 events in 16.003207139s
|
||||
Events/sec: 624.87
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 46.924555793s
|
||||
Combined ops/sec: 213.11
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 3052 queries in 1m0.102264s
|
||||
Queries/sec: 50.78
|
||||
Avg query latency: 128.464192ms
|
||||
P95 query latency: 148.086431ms
|
||||
P99 query latency: 219.275394ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 11296 operations (1296 queries, 10000 writes) in 1m0.108871986s
|
||||
Operations/sec: 187.93
|
||||
Avg latency: 16.71621ms
|
||||
Avg query latency: 142.320434ms
|
||||
Avg write latency: 437.903µs
|
||||
P95 latency: 141.357185ms
|
||||
P99 latency: 163.50992ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.344884331s
|
||||
Events/sec: 1070.10
|
||||
Avg latency: 578.453µs
|
||||
P90 latency: 742.585µs
|
||||
P95 latency: 849.679µs
|
||||
P99 latency: 1.122058ms
|
||||
Bottom 10% Avg latency: 1.362355ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 185.472655ms
|
||||
Burst completed: 1000 events in 194.135516ms
|
||||
Burst completed: 1000 events in 176.056931ms
|
||||
Burst completed: 1000 events in 161.500315ms
|
||||
Burst completed: 1000 events in 157.673837ms
|
||||
Burst completed: 1000 events in 167.130208ms
|
||||
Burst completed: 1000 events in 182.164655ms
|
||||
Burst completed: 1000 events in 156.589581ms
|
||||
Burst completed: 1000 events in 154.419949ms
|
||||
Burst completed: 1000 events in 158.445927ms
|
||||
Burst test completed: 10000 events in 15.587711126s
|
||||
Events/sec: 641.53
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4405 reads in 1m0.043842569s
|
||||
Combined ops/sec: 156.64
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 915 queries in 1m0.3452177s
|
||||
Queries/sec: 15.16
|
||||
Avg query latency: 435.125142ms
|
||||
P95 query latency: 520.311963ms
|
||||
P99 query latency: 618.85899ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 10489 operations (489 queries, 10000 writes) in 1m0.27235761s
|
||||
Operations/sec: 174.03
|
||||
Avg latency: 18.043774ms
|
||||
Avg query latency: 379.681531ms
|
||||
Avg write latency: 359.688µs
|
||||
P95 latency: 1.316628ms
|
||||
P99 latency: 400.223248ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.044789549s
|
||||
Total Events: 10000
|
||||
Events/sec: 1105.61
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 144 MB
|
||||
Avg Latency: 458.035µs
|
||||
P90 Latency: 601.736µs
|
||||
P95 Latency: 660.608µs
|
||||
P99 Latency: 844.108µs
|
||||
Bottom 10% Avg Latency: 702.193µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 16.003207139s
|
||||
Total Events: 10000
|
||||
Events/sec: 624.87
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 89 MB
|
||||
Avg Latency: 193.997µs
|
||||
P90 Latency: 261.969µs
|
||||
P95 Latency: 302.666µs
|
||||
P99 Latency: 431.933µs
|
||||
Bottom 10% Avg Latency: 334.383µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 46.924555793s
|
||||
Total Events: 10000
|
||||
Events/sec: 213.11
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 96 MB
|
||||
Avg Latency: 9.781737ms
|
||||
P90 Latency: 21.91971ms
|
||||
P95 Latency: 23.653412ms
|
||||
P99 Latency: 27.511972ms
|
||||
Bottom 10% Avg Latency: 24.396695ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.102264s
|
||||
Total Events: 3052
|
||||
Events/sec: 50.78
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 209 MB
|
||||
Avg Latency: 128.464192ms
|
||||
P90 Latency: 142.195039ms
|
||||
P95 Latency: 148.086431ms
|
||||
P99 Latency: 219.275394ms
|
||||
Bottom 10% Avg Latency: 162.874217ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.108871986s
|
||||
Total Events: 11296
|
||||
Events/sec: 187.93
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 159 MB
|
||||
Avg Latency: 16.71621ms
|
||||
P90 Latency: 127.287246ms
|
||||
P95 Latency: 141.357185ms
|
||||
P99 Latency: 163.50992ms
|
||||
Bottom 10% Avg Latency: 145.199189ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.344884331s
|
||||
Total Events: 10000
|
||||
Events/sec: 1070.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 1441 MB
|
||||
Avg Latency: 578.453µs
|
||||
P90 Latency: 742.585µs
|
||||
P95 Latency: 849.679µs
|
||||
P99 Latency: 1.122058ms
|
||||
Bottom 10% Avg Latency: 1.362355ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.587711126s
|
||||
Total Events: 10000
|
||||
Events/sec: 641.53
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 141 MB
|
||||
Avg Latency: 190.235µs
|
||||
P90 Latency: 254.795µs
|
||||
P95 Latency: 290.563µs
|
||||
P99 Latency: 437.323µs
|
||||
Bottom 10% Avg Latency: 328.752µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.043842569s
|
||||
Total Events: 9405
|
||||
Events/sec: 156.64
|
||||
Success Rate: 94.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 105 MB
|
||||
Avg Latency: 16.852438ms
|
||||
P90 Latency: 39.677855ms
|
||||
P95 Latency: 42.553634ms
|
||||
P99 Latency: 48.262077ms
|
||||
Bottom 10% Avg Latency: 43.994063ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.3452177s
|
||||
Total Events: 915
|
||||
Events/sec: 15.16
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 157 MB
|
||||
Avg Latency: 435.125142ms
|
||||
P90 Latency: 482.304439ms
|
||||
P95 Latency: 520.311963ms
|
||||
P99 Latency: 618.85899ms
|
||||
Bottom 10% Avg Latency: 545.670939ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.27235761s
|
||||
Total Events: 10489
|
||||
Events/sec: 174.03
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 132 MB
|
||||
Avg Latency: 18.043774ms
|
||||
P90 Latency: 583.962µs
|
||||
P95 Latency: 1.316628ms
|
||||
P99 Latency: 400.223248ms
|
||||
Bottom 10% Avg Latency: 177.440946ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
|
||||
1758364302230610ℹ️/tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
1758364304057942ℹ️/tmp/benchmark_khatru-sqlite_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
1758364304063521ℹ️/tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-09-20T10:31:44+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
298
cmd/benchmark/reports/run_20250920_101521/next-orly_results.txt
Normal file
298
cmd/benchmark/reports/run_20250920_101521/next-orly_results.txt
Normal file
@@ -0,0 +1,298 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_next-orly_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
1758363321263384ℹ️/tmp/benchmark_next-orly_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
1758363321263864ℹ️/tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
1758363321263887ℹ️/tmp/benchmark_next-orly_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
1758363321264128ℹ️(*types.Uint32)(0xc0001f7ffc)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
1758363321264177ℹ️migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.657904043s
|
||||
Events/sec: 1035.42
|
||||
Avg latency: 470.069µs
|
||||
P90 latency: 628.167µs
|
||||
P95 latency: 693.101µs
|
||||
P99 latency: 922.357µs
|
||||
Bottom 10% Avg latency: 750.491µs
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 175.034134ms
|
||||
Burst completed: 1000 events in 150.401771ms
|
||||
Burst completed: 1000 events in 168.992305ms
|
||||
Burst completed: 1000 events in 179.447581ms
|
||||
Burst completed: 1000 events in 165.602457ms
|
||||
Burst completed: 1000 events in 178.649561ms
|
||||
Burst completed: 1000 events in 195.002303ms
|
||||
Burst completed: 1000 events in 168.970954ms
|
||||
Burst completed: 1000 events in 150.818413ms
|
||||
Burst completed: 1000 events in 185.285662ms
|
||||
Burst test completed: 10000 events in 15.169978801s
|
||||
Events/sec: 659.20
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 45.597478865s
|
||||
Combined ops/sec: 219.31
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 3151 queries in 1m0.067849757s
|
||||
Queries/sec: 52.46
|
||||
Avg query latency: 126.38548ms
|
||||
P95 query latency: 149.976367ms
|
||||
P99 query latency: 205.807461ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 11325 operations (1325 queries, 10000 writes) in 1m0.081967157s
|
||||
Operations/sec: 188.49
|
||||
Avg latency: 16.694154ms
|
||||
Avg query latency: 139.524748ms
|
||||
Avg write latency: 419.1µs
|
||||
P95 latency: 138.688202ms
|
||||
P99 latency: 158.824742ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.136097148s
|
||||
Events/sec: 1094.56
|
||||
Avg latency: 510.7µs
|
||||
P90 latency: 636.763µs
|
||||
P95 latency: 705.564µs
|
||||
P99 latency: 922.777µs
|
||||
Bottom 10% Avg latency: 1.094965ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 176.337148ms
|
||||
Burst completed: 1000 events in 177.351251ms
|
||||
Burst completed: 1000 events in 181.515292ms
|
||||
Burst completed: 1000 events in 164.043866ms
|
||||
Burst completed: 1000 events in 152.697196ms
|
||||
Burst completed: 1000 events in 144.231922ms
|
||||
Burst completed: 1000 events in 162.606659ms
|
||||
Burst completed: 1000 events in 137.485182ms
|
||||
Burst completed: 1000 events in 163.19487ms
|
||||
Burst completed: 1000 events in 147.900339ms
|
||||
Burst test completed: 10000 events in 15.514130113s
|
||||
Events/sec: 644.57
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4489 reads in 1m0.036174989s
|
||||
Combined ops/sec: 158.05
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 900 queries in 1m0.304636826s
|
||||
Queries/sec: 14.92
|
||||
Avg query latency: 444.57989ms
|
||||
P95 query latency: 547.598358ms
|
||||
P99 query latency: 660.926147ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 10462 operations (462 queries, 10000 writes) in 1m0.362856212s
|
||||
Operations/sec: 173.32
|
||||
Avg latency: 17.808607ms
|
||||
Avg query latency: 395.594177ms
|
||||
Avg write latency: 354.914µs
|
||||
P95 latency: 1.221657ms
|
||||
P99 latency: 411.642669ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.657904043s
|
||||
Total Events: 10000
|
||||
Events/sec: 1035.42
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 144 MB
|
||||
Avg Latency: 470.069µs
|
||||
P90 Latency: 628.167µs
|
||||
P95 Latency: 693.101µs
|
||||
P99 Latency: 922.357µs
|
||||
Bottom 10% Avg Latency: 750.491µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.169978801s
|
||||
Total Events: 10000
|
||||
Events/sec: 659.20
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 135 MB
|
||||
Avg Latency: 190.573µs
|
||||
P90 Latency: 252.701µs
|
||||
P95 Latency: 289.761µs
|
||||
P99 Latency: 408.147µs
|
||||
Bottom 10% Avg Latency: 316.797µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 45.597478865s
|
||||
Total Events: 10000
|
||||
Events/sec: 219.31
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 119 MB
|
||||
Avg Latency: 9.381158ms
|
||||
P90 Latency: 20.487026ms
|
||||
P95 Latency: 22.450848ms
|
||||
P99 Latency: 24.696325ms
|
||||
Bottom 10% Avg Latency: 22.632933ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.067849757s
|
||||
Total Events: 3151
|
||||
Events/sec: 52.46
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 145 MB
|
||||
Avg Latency: 126.38548ms
|
||||
P90 Latency: 142.39268ms
|
||||
P95 Latency: 149.976367ms
|
||||
P99 Latency: 205.807461ms
|
||||
Bottom 10% Avg Latency: 162.636454ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.081967157s
|
||||
Total Events: 11325
|
||||
Events/sec: 188.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 194 MB
|
||||
Avg Latency: 16.694154ms
|
||||
P90 Latency: 125.314618ms
|
||||
P95 Latency: 138.688202ms
|
||||
P99 Latency: 158.824742ms
|
||||
Bottom 10% Avg Latency: 142.699977ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.136097148s
|
||||
Total Events: 10000
|
||||
Events/sec: 1094.56
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 1441 MB
|
||||
Avg Latency: 510.7µs
|
||||
P90 Latency: 636.763µs
|
||||
P95 Latency: 705.564µs
|
||||
P99 Latency: 922.777µs
|
||||
Bottom 10% Avg Latency: 1.094965ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.514130113s
|
||||
Total Events: 10000
|
||||
Events/sec: 644.57
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 138 MB
|
||||
Avg Latency: 230.062µs
|
||||
P90 Latency: 316.624µs
|
||||
P95 Latency: 389.882µs
|
||||
P99 Latency: 859.548µs
|
||||
Bottom 10% Avg Latency: 529.836µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.036174989s
|
||||
Total Events: 9489
|
||||
Events/sec: 158.05
|
||||
Success Rate: 94.9%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 182 MB
|
||||
Avg Latency: 16.56372ms
|
||||
P90 Latency: 38.24931ms
|
||||
P95 Latency: 41.187306ms
|
||||
P99 Latency: 46.02529ms
|
||||
Bottom 10% Avg Latency: 42.131189ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.304636826s
|
||||
Total Events: 900
|
||||
Events/sec: 14.92
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 141 MB
|
||||
Avg Latency: 444.57989ms
|
||||
P90 Latency: 490.730651ms
|
||||
P95 Latency: 547.598358ms
|
||||
P99 Latency: 660.926147ms
|
||||
Bottom 10% Avg Latency: 563.628707ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.362856212s
|
||||
Total Events: 10462
|
||||
Events/sec: 173.32
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 152 MB
|
||||
Avg Latency: 17.808607ms
|
||||
P90 Latency: 631.703µs
|
||||
P95 Latency: 1.221657ms
|
||||
P99 Latency: 411.642669ms
|
||||
Bottom 10% Avg Latency: 175.052418ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly_8/benchmark_report.adoc
|
||||
1758363807245770ℹ️/tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
1758363809118416ℹ️/tmp/benchmark_next-orly_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
1758363809123697ℹ️/tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: next-orly
|
||||
RELAY_URL: ws://next-orly:8080
|
||||
TEST_TIMESTAMP: 2025-09-20T10:23:29+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,298 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_nostr-rs-relay_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
1758365785928076ℹ️/tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
1758365785929028ℹ️/tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
1758365785929097ℹ️/tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
1758365785929509ℹ️(*types.Uint32)(0xc0001c820c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
1758365785929573ℹ️migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 8.897492256s
|
||||
Events/sec: 1123.91
|
||||
Avg latency: 416.753µs
|
||||
P90 latency: 546.351µs
|
||||
P95 latency: 597.338µs
|
||||
P99 latency: 760.549µs
|
||||
Bottom 10% Avg latency: 638.318µs
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 158.263016ms
|
||||
Burst completed: 1000 events in 181.558983ms
|
||||
Burst completed: 1000 events in 155.219861ms
|
||||
Burst completed: 1000 events in 183.834156ms
|
||||
Burst completed: 1000 events in 192.398437ms
|
||||
Burst completed: 1000 events in 176.450074ms
|
||||
Burst completed: 1000 events in 175.050138ms
|
||||
Burst completed: 1000 events in 178.883047ms
|
||||
Burst completed: 1000 events in 180.74321ms
|
||||
Burst completed: 1000 events in 169.39146ms
|
||||
Burst test completed: 10000 events in 15.441062872s
|
||||
Events/sec: 647.62
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 45.847091984s
|
||||
Combined ops/sec: 218.12
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 3229 queries in 1m0.085047549s
|
||||
Queries/sec: 53.74
|
||||
Avg query latency: 123.209617ms
|
||||
P95 query latency: 141.745618ms
|
||||
P99 query latency: 154.527843ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 11298 operations (1298 queries, 10000 writes) in 1m0.096751583s
|
||||
Operations/sec: 188.00
|
||||
Avg latency: 16.447175ms
|
||||
Avg query latency: 139.791065ms
|
||||
Avg write latency: 437.138µs
|
||||
P95 latency: 137.879538ms
|
||||
P99 latency: 162.020385ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.674593819s
|
||||
Events/sec: 1033.64
|
||||
Avg latency: 541.545µs
|
||||
P90 latency: 693.862µs
|
||||
P95 latency: 775.757µs
|
||||
P99 latency: 1.05005ms
|
||||
Bottom 10% Avg latency: 1.219386ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 168.056064ms
|
||||
Burst completed: 1000 events in 159.819647ms
|
||||
Burst completed: 1000 events in 147.500264ms
|
||||
Burst completed: 1000 events in 159.150392ms
|
||||
Burst completed: 1000 events in 149.954829ms
|
||||
Burst completed: 1000 events in 138.082938ms
|
||||
Burst completed: 1000 events in 157.234213ms
|
||||
Burst completed: 1000 events in 158.468955ms
|
||||
Burst completed: 1000 events in 144.346047ms
|
||||
Burst completed: 1000 events in 154.930576ms
|
||||
Burst test completed: 10000 events in 15.646785427s
|
||||
Events/sec: 639.11
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4415 reads in 1m0.02899167s
|
||||
Combined ops/sec: 156.84
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 890 queries in 1m0.279192867s
|
||||
Queries/sec: 14.76
|
||||
Avg query latency: 448.809547ms
|
||||
P95 query latency: 607.28509ms
|
||||
P99 query latency: 786.387053ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 10469 operations (469 queries, 10000 writes) in 1m0.190785048s
|
||||
Operations/sec: 173.93
|
||||
Avg latency: 17.73903ms
|
||||
Avg query latency: 388.59336ms
|
||||
Avg write latency: 345.962µs
|
||||
P95 latency: 1.158136ms
|
||||
P99 latency: 407.947907ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 8.897492256s
|
||||
Total Events: 10000
|
||||
Events/sec: 1123.91
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 132 MB
|
||||
Avg Latency: 416.753µs
|
||||
P90 Latency: 546.351µs
|
||||
P95 Latency: 597.338µs
|
||||
P99 Latency: 760.549µs
|
||||
Bottom 10% Avg Latency: 638.318µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.441062872s
|
||||
Total Events: 10000
|
||||
Events/sec: 647.62
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 104 MB
|
||||
Avg Latency: 185.217µs
|
||||
P90 Latency: 241.64µs
|
||||
P95 Latency: 273.191µs
|
||||
P99 Latency: 412.897µs
|
||||
Bottom 10% Avg Latency: 306.752µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 45.847091984s
|
||||
Total Events: 10000
|
||||
Events/sec: 218.12
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 96 MB
|
||||
Avg Latency: 9.446215ms
|
||||
P90 Latency: 20.522135ms
|
||||
P95 Latency: 22.416221ms
|
||||
P99 Latency: 24.696283ms
|
||||
Bottom 10% Avg Latency: 22.59535ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.085047549s
|
||||
Total Events: 3229
|
||||
Events/sec: 53.74
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 175 MB
|
||||
Avg Latency: 123.209617ms
|
||||
P90 Latency: 137.629898ms
|
||||
P95 Latency: 141.745618ms
|
||||
P99 Latency: 154.527843ms
|
||||
Bottom 10% Avg Latency: 145.245967ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.096751583s
|
||||
Total Events: 11298
|
||||
Events/sec: 188.00
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 181 MB
|
||||
Avg Latency: 16.447175ms
|
||||
P90 Latency: 123.920421ms
|
||||
P95 Latency: 137.879538ms
|
||||
P99 Latency: 162.020385ms
|
||||
Bottom 10% Avg Latency: 142.654147ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.674593819s
|
||||
Total Events: 10000
|
||||
Events/sec: 1033.64
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 1441 MB
|
||||
Avg Latency: 541.545µs
|
||||
P90 Latency: 693.862µs
|
||||
P95 Latency: 775.757µs
|
||||
P99 Latency: 1.05005ms
|
||||
Bottom 10% Avg Latency: 1.219386ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.646785427s
|
||||
Total Events: 10000
|
||||
Events/sec: 639.11
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 146 MB
|
||||
Avg Latency: 331.896µs
|
||||
P90 Latency: 520.511µs
|
||||
P95 Latency: 864.486µs
|
||||
P99 Latency: 2.251087ms
|
||||
Bottom 10% Avg Latency: 1.16922ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.02899167s
|
||||
Total Events: 9415
|
||||
Events/sec: 156.84
|
||||
Success Rate: 94.2%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 147 MB
|
||||
Avg Latency: 16.723365ms
|
||||
P90 Latency: 39.058801ms
|
||||
P95 Latency: 41.904891ms
|
||||
P99 Latency: 47.156263ms
|
||||
Bottom 10% Avg Latency: 42.800456ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.279192867s
|
||||
Total Events: 890
|
||||
Events/sec: 14.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 156 MB
|
||||
Avg Latency: 448.809547ms
|
||||
P90 Latency: 524.488485ms
|
||||
P95 Latency: 607.28509ms
|
||||
P99 Latency: 786.387053ms
|
||||
Bottom 10% Avg Latency: 634.016595ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.190785048s
|
||||
Total Events: 10469
|
||||
Events/sec: 173.93
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 226 MB
|
||||
Avg Latency: 17.73903ms
|
||||
P90 Latency: 561.359µs
|
||||
P95 Latency: 1.158136ms
|
||||
P99 Latency: 407.947907ms
|
||||
Bottom 10% Avg Latency: 174.508065ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
|
||||
1758366272164052ℹ️/tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
1758366274030399ℹ️/tmp/benchmark_nostr-rs-relay_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
1758366274036413ℹ️/tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: nostr-rs-relay
|
||||
RELAY_URL: ws://nostr-rs-relay:8080
|
||||
TEST_TIMESTAMP: 2025-09-20T11:04:34+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,298 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_relayer-basic_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
1758364801895559ℹ️/tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
1758364801896041ℹ️/tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
1758364801896078ℹ️/tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
1758364801896347ℹ️(*types.Uint32)(0xc0001a801c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
1758364801896400ℹ️migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.050770003s
|
||||
Events/sec: 1104.88
|
||||
Avg latency: 433.89µs
|
||||
P90 latency: 567.261µs
|
||||
P95 latency: 617.868µs
|
||||
P99 latency: 783.593µs
|
||||
Bottom 10% Avg latency: 653.813µs
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 183.738134ms
|
||||
Burst completed: 1000 events in 155.035832ms
|
||||
Burst completed: 1000 events in 160.066514ms
|
||||
Burst completed: 1000 events in 183.724238ms
|
||||
Burst completed: 1000 events in 178.910929ms
|
||||
Burst completed: 1000 events in 168.905441ms
|
||||
Burst completed: 1000 events in 172.584809ms
|
||||
Burst completed: 1000 events in 177.214508ms
|
||||
Burst completed: 1000 events in 169.921566ms
|
||||
Burst completed: 1000 events in 162.042488ms
|
||||
Burst test completed: 10000 events in 15.572250139s
|
||||
Events/sec: 642.17
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 44.509677166s
|
||||
Combined ops/sec: 224.67
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 3253 queries in 1m0.095238426s
|
||||
Queries/sec: 54.13
|
||||
Avg query latency: 122.100718ms
|
||||
P95 query latency: 140.360749ms
|
||||
P99 query latency: 148.353154ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 11408 operations (1408 queries, 10000 writes) in 1m0.117581615s
|
||||
Operations/sec: 189.76
|
||||
Avg latency: 16.525268ms
|
||||
Avg query latency: 130.972853ms
|
||||
Avg write latency: 411.048µs
|
||||
P95 latency: 132.130964ms
|
||||
P99 latency: 146.285305ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.265496879s
|
||||
Events/sec: 1079.27
|
||||
Avg latency: 529.266µs
|
||||
P90 latency: 658.033µs
|
||||
P95 latency: 732.024µs
|
||||
P99 latency: 953.285µs
|
||||
Bottom 10% Avg latency: 1.168714ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 172.300479ms
|
||||
Burst completed: 1000 events in 149.247397ms
|
||||
Burst completed: 1000 events in 170.000198ms
|
||||
Burst completed: 1000 events in 133.786958ms
|
||||
Burst completed: 1000 events in 172.157036ms
|
||||
Burst completed: 1000 events in 153.284738ms
|
||||
Burst completed: 1000 events in 166.711903ms
|
||||
Burst completed: 1000 events in 170.635427ms
|
||||
Burst completed: 1000 events in 153.381031ms
|
||||
Burst completed: 1000 events in 162.125949ms
|
||||
Burst test completed: 10000 events in 16.674963543s
|
||||
Events/sec: 599.70
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4665 reads in 1m0.035358264s
|
||||
Combined ops/sec: 160.99
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 944 queries in 1m0.383519958s
|
||||
Queries/sec: 15.63
|
||||
Avg query latency: 421.75292ms
|
||||
P95 query latency: 491.340259ms
|
||||
P99 query latency: 664.614262ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 10479 operations (479 queries, 10000 writes) in 1m0.291926697s
|
||||
Operations/sec: 173.80
|
||||
Avg latency: 18.049265ms
|
||||
Avg query latency: 385.864458ms
|
||||
Avg write latency: 430.918µs
|
||||
P95 latency: 3.05038ms
|
||||
P99 latency: 404.540502ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.050770003s
|
||||
Total Events: 10000
|
||||
Events/sec: 1104.88
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 433.89µs
|
||||
P90 Latency: 567.261µs
|
||||
P95 Latency: 617.868µs
|
||||
P99 Latency: 783.593µs
|
||||
Bottom 10% Avg Latency: 653.813µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.572250139s
|
||||
Total Events: 10000
|
||||
Events/sec: 642.17
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 134 MB
|
||||
Avg Latency: 186.306µs
|
||||
P90 Latency: 243.995µs
|
||||
P95 Latency: 279.192µs
|
||||
P99 Latency: 392.859µs
|
||||
Bottom 10% Avg Latency: 303.766µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 44.509677166s
|
||||
Total Events: 10000
|
||||
Events/sec: 224.67
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 163 MB
|
||||
Avg Latency: 8.892738ms
|
||||
P90 Latency: 19.406836ms
|
||||
P95 Latency: 21.247322ms
|
||||
P99 Latency: 23.452072ms
|
||||
Bottom 10% Avg Latency: 21.397913ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.095238426s
|
||||
Total Events: 3253
|
||||
Events/sec: 54.13
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 126 MB
|
||||
Avg Latency: 122.100718ms
|
||||
P90 Latency: 136.523661ms
|
||||
P95 Latency: 140.360749ms
|
||||
P99 Latency: 148.353154ms
|
||||
Bottom 10% Avg Latency: 142.067372ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.117581615s
|
||||
Total Events: 11408
|
||||
Events/sec: 189.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 149 MB
|
||||
Avg Latency: 16.525268ms
|
||||
P90 Latency: 121.696848ms
|
||||
P95 Latency: 132.130964ms
|
||||
P99 Latency: 146.285305ms
|
||||
Bottom 10% Avg Latency: 134.054744ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.265496879s
|
||||
Total Events: 10000
|
||||
Events/sec: 1079.27
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 1441 MB
|
||||
Avg Latency: 529.266µs
|
||||
P90 Latency: 658.033µs
|
||||
P95 Latency: 732.024µs
|
||||
P99 Latency: 953.285µs
|
||||
Bottom 10% Avg Latency: 1.168714ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 16.674963543s
|
||||
Total Events: 10000
|
||||
Events/sec: 599.70
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 142 MB
|
||||
Avg Latency: 264.288µs
|
||||
P90 Latency: 350.187µs
|
||||
P95 Latency: 519.139µs
|
||||
P99 Latency: 1.961326ms
|
||||
Bottom 10% Avg Latency: 877.366µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.035358264s
|
||||
Total Events: 9665
|
||||
Events/sec: 160.99
|
||||
Success Rate: 96.7%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 151 MB
|
||||
Avg Latency: 16.019245ms
|
||||
P90 Latency: 36.340362ms
|
||||
P95 Latency: 39.113864ms
|
||||
P99 Latency: 44.271098ms
|
||||
Bottom 10% Avg Latency: 40.108462ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.383519958s
|
||||
Total Events: 944
|
||||
Events/sec: 15.63
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 280 MB
|
||||
Avg Latency: 421.75292ms
|
||||
P90 Latency: 460.902551ms
|
||||
P95 Latency: 491.340259ms
|
||||
P99 Latency: 664.614262ms
|
||||
Bottom 10% Avg Latency: 538.014725ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.291926697s
|
||||
Total Events: 10479
|
||||
Events/sec: 173.80
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 122 MB
|
||||
Avg Latency: 18.049265ms
|
||||
P90 Latency: 843.867µs
|
||||
P95 Latency: 3.05038ms
|
||||
P99 Latency: 404.540502ms
|
||||
Bottom 10% Avg Latency: 177.245211ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
|
||||
1758365287933287ℹ️/tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
1758365289807797ℹ️/tmp/benchmark_relayer-basic_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
1758365289812921ℹ️/tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: relayer-basic
|
||||
RELAY_URL: ws://relayer-basic:7447
|
||||
TEST_TIMESTAMP: 2025-09-20T10:48:10+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
298
cmd/benchmark/reports/run_20250920_101521/strfry_results.txt
Normal file
298
cmd/benchmark/reports/run_20250920_101521/strfry_results.txt
Normal file
@@ -0,0 +1,298 @@
|
||||
Starting Nostr Relay Benchmark
|
||||
Data Directory: /tmp/benchmark_strfry_8
|
||||
Events: 10000, Workers: 8, Duration: 1m0s
|
||||
1758365295110579ℹ️/tmp/benchmark_strfry_8: All 0 tables opened in 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
|
||||
1758365295111085ℹ️/tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
|
||||
1758365295111113ℹ️/tmp/benchmark_strfry_8: Set nextTxnTs to 0
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
|
||||
1758365295111319ℹ️(*types.Uint32)(0xc000141a3c)({
|
||||
value: (uint32) 1
|
||||
})
|
||||
/build/pkg/database/migrations.go:65
|
||||
1758365295111354ℹ️migrating to version 1... /build/pkg/database/migrations.go:79
|
||||
|
||||
=== Starting test round 1/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.170212358s
|
||||
Events/sec: 1090.49
|
||||
Avg latency: 448.058µs
|
||||
P90 latency: 597.558µs
|
||||
P95 latency: 667.141µs
|
||||
P99 latency: 920.784µs
|
||||
Bottom 10% Avg latency: 729.464µs
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 172.138862ms
|
||||
Burst completed: 1000 events in 168.99322ms
|
||||
Burst completed: 1000 events in 162.213786ms
|
||||
Burst completed: 1000 events in 161.027417ms
|
||||
Burst completed: 1000 events in 183.148824ms
|
||||
Burst completed: 1000 events in 178.152837ms
|
||||
Burst completed: 1000 events in 158.65623ms
|
||||
Burst completed: 1000 events in 186.7166ms
|
||||
Burst completed: 1000 events in 177.202878ms
|
||||
Burst completed: 1000 events in 182.780071ms
|
||||
Burst test completed: 10000 events in 15.336760896s
|
||||
Events/sec: 652.03
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 5000 reads in 44.257468151s
|
||||
Combined ops/sec: 225.95
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 3002 queries in 1m0.091429487s
|
||||
Queries/sec: 49.96
|
||||
Avg query latency: 131.632043ms
|
||||
P95 query latency: 175.810416ms
|
||||
P99 query latency: 228.52716ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 11308 operations (1308 queries, 10000 writes) in 1m0.111257202s
|
||||
Operations/sec: 188.12
|
||||
Avg latency: 16.193707ms
|
||||
Avg query latency: 137.019852ms
|
||||
Avg write latency: 389.647µs
|
||||
P95 latency: 136.70132ms
|
||||
P99 latency: 156.996779ms
|
||||
|
||||
Pausing 10s before next round...
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
=== Starting test round 2/2 ===
|
||||
RunPeakThroughputTest..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
Events saved: 10000/10000 (100.0%)
|
||||
Duration: 9.102738s
|
||||
Events/sec: 1098.57
|
||||
Avg latency: 493.093µs
|
||||
P90 latency: 605.684µs
|
||||
P95 latency: 659.477µs
|
||||
P99 latency: 826.344µs
|
||||
Bottom 10% Avg latency: 1.097884ms
|
||||
RunBurstPatternTest..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 1000 events in 178.755916ms
|
||||
Burst completed: 1000 events in 170.810722ms
|
||||
Burst completed: 1000 events in 166.730701ms
|
||||
Burst completed: 1000 events in 172.177576ms
|
||||
Burst completed: 1000 events in 164.907178ms
|
||||
Burst completed: 1000 events in 153.267727ms
|
||||
Burst completed: 1000 events in 157.855743ms
|
||||
Burst completed: 1000 events in 159.632496ms
|
||||
Burst completed: 1000 events in 160.802526ms
|
||||
Burst completed: 1000 events in 178.513954ms
|
||||
Burst test completed: 10000 events in 15.535933443s
|
||||
Events/sec: 643.67
|
||||
RunMixedReadWriteTest..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Pre-populating database for read tests...
|
||||
Mixed test completed: 5000 writes, 4550 reads in 1m0.032080518s
|
||||
Combined ops/sec: 159.08
|
||||
RunQueryTest..
|
||||
|
||||
=== Query Test ===
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 913 queries in 1m0.248877091s
|
||||
Queries/sec: 15.15
|
||||
Avg query latency: 436.472206ms
|
||||
P95 query latency: 493.12732ms
|
||||
P99 query latency: 623.201275ms
|
||||
RunConcurrentQueryStoreTest..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Concurrent test completed: 10470 operations (470 queries, 10000 writes) in 1m0.293280495s
|
||||
Operations/sec: 173.65
|
||||
Avg latency: 18.084009ms
|
||||
Avg query latency: 395.171481ms
|
||||
Avg write latency: 360.898µs
|
||||
P95 latency: 1.338148ms
|
||||
P99 latency: 413.21015ms
|
||||
|
||||
=== Test round completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.170212358s
|
||||
Total Events: 10000
|
||||
Events/sec: 1090.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 108 MB
|
||||
Avg Latency: 448.058µs
|
||||
P90 Latency: 597.558µs
|
||||
P95 Latency: 667.141µs
|
||||
P99 Latency: 920.784µs
|
||||
Bottom 10% Avg Latency: 729.464µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.336760896s
|
||||
Total Events: 10000
|
||||
Events/sec: 652.03
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 123 MB
|
||||
Avg Latency: 189.06µs
|
||||
P90 Latency: 248.714µs
|
||||
P95 Latency: 290.433µs
|
||||
P99 Latency: 416.924µs
|
||||
Bottom 10% Avg Latency: 324.174µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 44.257468151s
|
||||
Total Events: 10000
|
||||
Events/sec: 225.95
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 158 MB
|
||||
Avg Latency: 8.745534ms
|
||||
P90 Latency: 18.980294ms
|
||||
P95 Latency: 20.822884ms
|
||||
P99 Latency: 23.124918ms
|
||||
Bottom 10% Avg Latency: 21.006886ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.091429487s
|
||||
Total Events: 3002
|
||||
Events/sec: 49.96
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 191 MB
|
||||
Avg Latency: 131.632043ms
|
||||
P90 Latency: 152.618309ms
|
||||
P95 Latency: 175.810416ms
|
||||
P99 Latency: 228.52716ms
|
||||
Bottom 10% Avg Latency: 186.230874ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.111257202s
|
||||
Total Events: 11308
|
||||
Events/sec: 188.12
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 146 MB
|
||||
Avg Latency: 16.193707ms
|
||||
P90 Latency: 122.204256ms
|
||||
P95 Latency: 136.70132ms
|
||||
P99 Latency: 156.996779ms
|
||||
Bottom 10% Avg Latency: 140.031139ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 9.102738s
|
||||
Total Events: 10000
|
||||
Events/sec: 1098.57
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 1441 MB
|
||||
Avg Latency: 493.093µs
|
||||
P90 Latency: 605.684µs
|
||||
P95 Latency: 659.477µs
|
||||
P99 Latency: 826.344µs
|
||||
Bottom 10% Avg Latency: 1.097884ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 15.535933443s
|
||||
Total Events: 10000
|
||||
Events/sec: 643.67
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 130 MB
|
||||
Avg Latency: 186.177µs
|
||||
P90 Latency: 243.915µs
|
||||
P95 Latency: 276.146µs
|
||||
P99 Latency: 418.787µs
|
||||
Bottom 10% Avg Latency: 309.015µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 1m0.032080518s
|
||||
Total Events: 9550
|
||||
Events/sec: 159.08
|
||||
Success Rate: 95.5%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 115 MB
|
||||
Avg Latency: 16.401942ms
|
||||
P90 Latency: 37.575878ms
|
||||
P95 Latency: 40.323279ms
|
||||
P99 Latency: 45.453669ms
|
||||
Bottom 10% Avg Latency: 41.331235ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.248877091s
|
||||
Total Events: 913
|
||||
Events/sec: 15.15
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 211 MB
|
||||
Avg Latency: 436.472206ms
|
||||
P90 Latency: 474.430346ms
|
||||
P95 Latency: 493.12732ms
|
||||
P99 Latency: 623.201275ms
|
||||
Bottom 10% Avg Latency: 523.084076ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.293280495s
|
||||
Total Events: 10470
|
||||
Events/sec: 173.65
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 8
|
||||
Memory Used: 171 MB
|
||||
Avg Latency: 18.084009ms
|
||||
P90 Latency: 624.339µs
|
||||
P95 Latency: 1.338148ms
|
||||
P99 Latency: 413.21015ms
|
||||
Bottom 10% Avg Latency: 177.8924ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
|
||||
1758365779337138ℹ️/tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
|
||||
1758365780726692ℹ️/tmp/benchmark_strfry_8:
|
||||
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
|
||||
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
|
||||
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
|
||||
Level Done
|
||||
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
|
||||
1758365780732292ℹ️/tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
|
||||
|
||||
RELAY_NAME: strfry
|
||||
RELAY_URL: ws://strfry:8080
|
||||
TEST_TIMESTAMP: 2025-09-20T10:56:20+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 10000
|
||||
Workers: 8
|
||||
Duration: 60s
|
||||
368
cmd/benchmark/setup-external-relays.sh
Executable file
368
cmd/benchmark/setup-external-relays.sh
Executable file
@@ -0,0 +1,368 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Setup script for downloading and configuring external relay repositories
|
||||
# for benchmarking
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
EXTERNAL_DIR="${SCRIPT_DIR}/external"
|
||||
|
||||
echo "Setting up external relay repositories for benchmarking..."
|
||||
|
||||
# Create external directory
|
||||
mkdir -p "${EXTERNAL_DIR}"
|
||||
|
||||
# Function to clone or update repository
|
||||
clone_or_update() {
|
||||
local repo_url="$1"
|
||||
local repo_dir="$2"
|
||||
local repo_name="$3"
|
||||
|
||||
echo "Setting up ${repo_name}..."
|
||||
|
||||
if [ -d "${repo_dir}" ]; then
|
||||
echo " ${repo_name} already exists, updating..."
|
||||
cd "${repo_dir}"
|
||||
git pull origin main 2>/dev/null || git pull origin master 2>/dev/null || true
|
||||
cd - > /dev/null
|
||||
else
|
||||
echo " Cloning ${repo_name}..."
|
||||
git clone "${repo_url}" "${repo_dir}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Clone khatru
|
||||
clone_or_update "https://github.com/fiatjaf/khatru.git" "${EXTERNAL_DIR}/khatru" "Khatru"
|
||||
|
||||
# Clone relayer
|
||||
clone_or_update "https://github.com/fiatjaf/relayer.git" "${EXTERNAL_DIR}/relayer" "Relayer"
|
||||
|
||||
# Clone strfry
|
||||
clone_or_update "https://github.com/hoytech/strfry.git" "${EXTERNAL_DIR}/strfry" "Strfry"
|
||||
|
||||
# Clone nostr-rs-relay
|
||||
clone_or_update "https://git.sr.ht/~gheartsfield/nostr-rs-relay" "${EXTERNAL_DIR}/nostr-rs-relay" "Nostr-rs-relay"
|
||||
|
||||
echo "Creating Dockerfiles for external relays..."
|
||||
|
||||
# Create Dockerfile for Khatru SQLite
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.khatru-sqlite" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-sqlite example
|
||||
RUN cd examples/basic-sqlite && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o khatru-sqlite .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-sqlite/khatru-sqlite /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/khatru.db
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/khatru-sqlite"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Khatru Badger
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.khatru-badger" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic-badger example
|
||||
RUN cd examples/basic-badger && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=0 go build -o khatru-badger .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic-badger/khatru-badger /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/badger
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/khatru-badger"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Relayer basic example
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.relayer-basic" << 'EOF'
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache git ca-certificates sqlite-dev gcc musl-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the basic example
|
||||
RUN cd examples/basic && \
|
||||
go mod tidy && \
|
||||
CGO_ENABLED=1 go build -o relayer-basic .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/examples/basic/relayer-basic /app/
|
||||
RUN mkdir -p /data
|
||||
EXPOSE 8080
|
||||
ENV DATABASE_PATH=/data/relayer.db
|
||||
ENV PORT=8080
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
CMD ["/app/relayer-basic"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for Strfry
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.strfry" << 'EOF'
|
||||
FROM ubuntu:22.04 AS builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
git \
|
||||
build-essential \
|
||||
liblmdb-dev \
|
||||
libsecp256k1-dev \
|
||||
pkg-config \
|
||||
libtool \
|
||||
autoconf \
|
||||
automake \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build strfry
|
||||
RUN make setup-golpe && \
|
||||
make -j$(nproc)
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && apt-get install -y \
|
||||
liblmdb0 \
|
||||
libsecp256k1-0 \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/strfry /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV STRFRY_DB_PATH=/data/strfry.lmdb
|
||||
ENV STRFRY_RELAY_PORT=8080
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/strfry", "relay"]
|
||||
EOF
|
||||
|
||||
# Create Dockerfile for nostr-rs-relay
|
||||
cat > "${SCRIPT_DIR}/Dockerfile.nostr-rs-relay" << 'EOF'
|
||||
FROM rust:1.70-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache musl-dev sqlite-dev
|
||||
|
||||
WORKDIR /build
|
||||
COPY . .
|
||||
|
||||
# Build the relay
|
||||
RUN cargo build --release
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates sqlite wget
|
||||
WORKDIR /app
|
||||
COPY --from=builder /build/target/release/nostr-rs-relay /app/
|
||||
RUN mkdir -p /data
|
||||
|
||||
EXPOSE 8080
|
||||
ENV RUST_LOG=info
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --quiet --tries=1 --spider http://localhost:8080 || exit 1
|
||||
|
||||
CMD ["/app/nostr-rs-relay"]
|
||||
EOF
|
||||
|
||||
echo "Creating configuration files..."
|
||||
|
||||
# Create configs directory
|
||||
mkdir -p "${SCRIPT_DIR}/configs"
|
||||
|
||||
# Create strfry configuration
|
||||
cat > "${SCRIPT_DIR}/configs/strfry.conf" << 'EOF'
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "/data/strfry.lmdb"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap to use when loading LMDB (default is 1TB, which is probably reasonable) (restart required)
|
||||
mapsize = 1099511627776
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 8080
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 1000000
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "strfry benchmark"
|
||||
|
||||
# NIP-11: Detailed description of this server, free-form
|
||||
description = "A strfry relay for benchmarking"
|
||||
|
||||
# NIP-11: Administrative pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative contact for this server
|
||||
contact = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy) (restart required)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 20
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = ""
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but uses more CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = true
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# yesstr threads: experimental yesstr protocol (restart required)
|
||||
yesstr = 1
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create nostr-rs-relay configuration
|
||||
cat > "${SCRIPT_DIR}/configs/config.toml" << 'EOF'
|
||||
[info]
|
||||
relay_url = "ws://localhost:8080"
|
||||
name = "nostr-rs-relay benchmark"
|
||||
description = "A nostr-rs-relay for benchmarking"
|
||||
pubkey = ""
|
||||
contact = ""
|
||||
|
||||
[database]
|
||||
data_directory = "/data"
|
||||
in_memory = false
|
||||
engine = "sqlite"
|
||||
|
||||
[network]
|
||||
port = 8080
|
||||
address = "0.0.0.0"
|
||||
|
||||
[limits]
|
||||
messages_per_sec = 0
|
||||
subscriptions_per_min = 0
|
||||
max_event_bytes = 65535
|
||||
max_ws_message_bytes = 131072
|
||||
max_ws_frame_bytes = 131072
|
||||
|
||||
[authorization]
|
||||
pubkey_whitelist = []
|
||||
|
||||
[verified_users]
|
||||
mode = "passive"
|
||||
domain_whitelist = []
|
||||
domain_blacklist = []
|
||||
|
||||
[pay_to_relay]
|
||||
enabled = false
|
||||
|
||||
[options]
|
||||
reject_future_seconds = 30
|
||||
EOF
|
||||
|
||||
echo "Creating data directories..."
|
||||
mkdir -p "${SCRIPT_DIR}/data"/{next-orly,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay}
|
||||
mkdir -p "${SCRIPT_DIR}/reports"
|
||||
|
||||
echo "Setup complete!"
|
||||
echo ""
|
||||
echo "External relay repositories have been cloned to: ${EXTERNAL_DIR}"
|
||||
echo "Dockerfiles have been created for all relay implementations"
|
||||
echo "Configuration files have been created in: ${SCRIPT_DIR}/configs"
|
||||
echo "Data directories have been created in: ${SCRIPT_DIR}/data"
|
||||
echo ""
|
||||
echo "To run the benchmark:"
|
||||
echo " cd ${SCRIPT_DIR}"
|
||||
echo " docker-compose up --build"
|
||||
echo ""
|
||||
echo "Reports will be generated in: ${SCRIPT_DIR}/reports"
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"crypto.orly/ec/schnorr"
|
||||
"crypto.orly/ec/secp256k1"
|
||||
b32 "encoders.orly/bech32encoding"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
b32 "next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
func usage() {
|
||||
|
||||
634
cmd/stresstest/main.go
Normal file
634
cmd/stresstest/main.go
Normal file
@@ -0,0 +1,634 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/event/examples"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
"next.orly.dev/pkg/protocol/ws"
|
||||
)
|
||||
|
||||
// randomHex returns a hex-encoded string of n random bytes (2n hex chars)
|
||||
func randomHex(n int) string {
|
||||
b := make([]byte, n)
|
||||
_, _ = rand.Read(b)
|
||||
return hex.Enc(b)
|
||||
}
|
||||
|
||||
func makeEvent(rng *rand.Rand, signer *p256k.Signer) (*event.E, error) {
|
||||
ev := &event.E{
|
||||
CreatedAt: time.Now().Unix(),
|
||||
Kind: kind.TextNote.K,
|
||||
Tags: tag.NewS(),
|
||||
Content: []byte(fmt.Sprintf("stresstest %d", rng.Int63())),
|
||||
}
|
||||
|
||||
// Random number of p-tags up to 100
|
||||
nPTags := rng.Intn(101) // 0..100 inclusive
|
||||
for i := 0; i < nPTags; i++ {
|
||||
// random 32-byte pubkey in hex (64 chars)
|
||||
phex := randomHex(32)
|
||||
ev.Tags.Append(tag.NewFromAny("p", phex))
|
||||
}
|
||||
|
||||
// Sign and verify to ensure pubkey, id and signature are coherent
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok, err := ev.Verify(); err != nil || !ok {
|
||||
return nil, fmt.Errorf("event signature verification failed: %v", err)
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
type RelayConn struct {
|
||||
mu sync.RWMutex
|
||||
client *ws.Client
|
||||
url string
|
||||
}
|
||||
|
||||
type CacheIndex struct {
|
||||
events []*event.E
|
||||
ids [][]byte
|
||||
authors [][]byte
|
||||
times []int64
|
||||
tags map[byte][][]byte // single-letter tag -> list of values
|
||||
}
|
||||
|
||||
func (rc *RelayConn) Get() *ws.Client {
|
||||
rc.mu.RLock()
|
||||
defer rc.mu.RUnlock()
|
||||
return rc.client
|
||||
}
|
||||
|
||||
func (rc *RelayConn) Reconnect(ctx context.Context) error {
|
||||
rc.mu.Lock()
|
||||
defer rc.mu.Unlock()
|
||||
if rc.client != nil {
|
||||
_ = rc.client.Close()
|
||||
}
|
||||
c, err := ws.RelayConnect(ctx, rc.url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc.client = c
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadCacheAndIndex parses examples.Cache (JSONL of events) and builds an index
|
||||
func loadCacheAndIndex() (*CacheIndex, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(examples.Cache))
|
||||
idx := &CacheIndex{tags: make(map[byte][][]byte)}
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
if len(bytes.TrimSpace(line)) == 0 {
|
||||
continue
|
||||
}
|
||||
ev := event.New()
|
||||
rem, err := ev.Unmarshal(line)
|
||||
_ = rem
|
||||
if err != nil {
|
||||
// skip malformed lines
|
||||
continue
|
||||
}
|
||||
idx.events = append(idx.events, ev)
|
||||
// collect fields
|
||||
if len(ev.ID) > 0 {
|
||||
idx.ids = append(idx.ids, append([]byte(nil), ev.ID...))
|
||||
}
|
||||
if len(ev.Pubkey) > 0 {
|
||||
idx.authors = append(idx.authors, append([]byte(nil), ev.Pubkey...))
|
||||
}
|
||||
idx.times = append(idx.times, ev.CreatedAt)
|
||||
if ev.Tags != nil {
|
||||
for _, tg := range *ev.Tags {
|
||||
if tg == nil || tg.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
k := tg.Key()
|
||||
if len(k) != 1 {
|
||||
continue // only single-letter keys per requirement
|
||||
}
|
||||
key := k[0]
|
||||
for _, v := range tg.T[1:] {
|
||||
idx.tags[key] = append(
|
||||
idx.tags[key], append([]byte(nil), v...),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
// publishCacheEvents uploads all cache events to the relay using multiple concurrent connections
|
||||
func publishCacheEvents(
|
||||
ctx context.Context, relayURL string, idx *CacheIndex,
|
||||
) (sentCount int) {
|
||||
numWorkers := runtime.NumCPU()
|
||||
log.I.F("using %d concurrent connections for cache upload", numWorkers)
|
||||
|
||||
// Channel to distribute events to workers
|
||||
eventChan := make(chan *event.E, len(idx.events))
|
||||
var totalSent atomic.Int64
|
||||
|
||||
// Fill the event channel
|
||||
for _, ev := range idx.events {
|
||||
eventChan <- ev
|
||||
}
|
||||
close(eventChan)
|
||||
|
||||
// Start worker goroutines
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < numWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Create separate connection for this worker
|
||||
client, err := ws.RelayConnect(ctx, relayURL)
|
||||
if err != nil {
|
||||
log.E.F("worker %d: failed to connect: %v", workerID, err)
|
||||
return
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
rc := &RelayConn{client: client, url: relayURL}
|
||||
workerSent := 0
|
||||
|
||||
// Process events from the channel
|
||||
for ev := range eventChan {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
// Get client connection
|
||||
wsClient := rc.Get()
|
||||
if wsClient == nil {
|
||||
if err := rc.Reconnect(ctx); err != nil {
|
||||
log.E.F("worker %d: reconnect failed: %v", workerID, err)
|
||||
continue
|
||||
}
|
||||
wsClient = rc.Get()
|
||||
}
|
||||
|
||||
// Send event without waiting for OK response (fire-and-forget)
|
||||
envelope := eventenvelope.NewSubmissionWith(ev)
|
||||
envBytes := envelope.Marshal(nil)
|
||||
if err := <-wsClient.Write(envBytes); err != nil {
|
||||
log.E.F("worker %d: write error: %v", workerID, err)
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "connection closed") {
|
||||
_ = rc.Reconnect(ctx)
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
workerSent++
|
||||
totalSent.Add(1)
|
||||
log.T.F("worker %d: sent event %d (total: %d)", workerID, workerSent, totalSent.Load())
|
||||
|
||||
// Small delay to prevent overwhelming the relay
|
||||
select {
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.I.F("worker %d: completed, sent %d events", workerID, workerSent)
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Wait for all workers to complete
|
||||
wg.Wait()
|
||||
|
||||
return int(totalSent.Load())
|
||||
}
|
||||
|
||||
// buildRandomFilter builds a filter combining random subsets of id, author, timestamp, and a single-letter tag value.
|
||||
func buildRandomFilter(idx *CacheIndex, rng *rand.Rand, mask int) *filter.F {
|
||||
// pick a random base event as anchor for fields
|
||||
i := rng.Intn(len(idx.events))
|
||||
ev := idx.events[i]
|
||||
f := filter.New()
|
||||
// clear defaults we don't set
|
||||
f.Kinds = kind.NewS() // we don't constrain kinds
|
||||
// include fields based on mask bits: 1=id, 2=author, 4=timestamp, 8=tag
|
||||
if mask&1 != 0 {
|
||||
f.Ids.T = append(f.Ids.T, append([]byte(nil), ev.ID...))
|
||||
}
|
||||
if mask&2 != 0 {
|
||||
f.Authors.T = append(f.Authors.T, append([]byte(nil), ev.Pubkey...))
|
||||
}
|
||||
if mask&4 != 0 {
|
||||
// use a tight window around the event timestamp (exact match)
|
||||
f.Since = timestamp.FromUnix(ev.CreatedAt)
|
||||
f.Until = timestamp.FromUnix(ev.CreatedAt)
|
||||
}
|
||||
if mask&8 != 0 {
|
||||
// choose a random single-letter tag from this event if present; fallback to global index
|
||||
var key byte
|
||||
var val []byte
|
||||
chosen := false
|
||||
if ev.Tags != nil {
|
||||
for _, tg := range *ev.Tags {
|
||||
if tg == nil || tg.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
k := tg.Key()
|
||||
if len(k) == 1 {
|
||||
key = k[0]
|
||||
vv := tg.T[1:]
|
||||
val = vv[rng.Intn(len(vv))]
|
||||
chosen = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !chosen && len(idx.tags) > 0 {
|
||||
// pick a random entry from global tags map
|
||||
keys := make([]byte, 0, len(idx.tags))
|
||||
for k := range idx.tags {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
key = keys[rng.Intn(len(keys))]
|
||||
vals := idx.tags[key]
|
||||
val = vals[rng.Intn(len(vals))]
|
||||
}
|
||||
if key != 0 && len(val) > 0 {
|
||||
f.Tags.Append(tag.NewFromBytesSlice([]byte{key}, val))
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
func publisherWorker(
|
||||
ctx context.Context, rc *RelayConn, id int, stats *uint64,
|
||||
) {
|
||||
// Unique RNG per worker
|
||||
src := rand.NewSource(time.Now().UnixNano() ^ int64(id<<16))
|
||||
rng := rand.New(src)
|
||||
// Generate and reuse signing key per worker
|
||||
signer := &p256k.Signer{}
|
||||
if err := signer.Generate(); err != nil {
|
||||
log.E.F("worker %d: signer generate error: %v", id, err)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
ev, err := makeEvent(rng, signer)
|
||||
if err != nil {
|
||||
log.E.F("worker %d: makeEvent error: %v", id, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Send event without waiting for OK response (fire-and-forget)
|
||||
client := rc.Get()
|
||||
if client == nil {
|
||||
_ = rc.Reconnect(ctx)
|
||||
continue
|
||||
}
|
||||
// Create EVENT envelope and send directly without waiting for OK
|
||||
envelope := eventenvelope.NewSubmissionWith(ev)
|
||||
envBytes := envelope.Marshal(nil)
|
||||
if err := <-client.Write(envBytes); err != nil {
|
||||
log.E.F("worker %d: write error: %v", id, err)
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "connection closed") {
|
||||
for attempt := 0; attempt < 5; attempt++ {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if err := rc.Reconnect(ctx); err == nil {
|
||||
log.I.F("worker %d: reconnected to %s", id, rc.url)
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-time.After(200 * time.Millisecond):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// back off briefly on error to avoid tight loop if relay misbehaves
|
||||
select {
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
atomic.AddUint64(stats, 1)
|
||||
|
||||
// Randomly fluctuate pacing: small random sleep 0..50ms plus occasional longer jitter
|
||||
sleep := time.Duration(rng.Intn(50)) * time.Millisecond
|
||||
if rng.Intn(10) == 0 { // 10% chance add extra 100..400ms
|
||||
sleep += time.Duration(100+rng.Intn(300)) * time.Millisecond
|
||||
}
|
||||
select {
|
||||
case <-time.After(sleep):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func queryWorker(
|
||||
ctx context.Context, rc *RelayConn, idx *CacheIndex, id int,
|
||||
queries, results *uint64, subTimeout time.Duration,
|
||||
minInterval, maxInterval time.Duration,
|
||||
) {
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(id<<24)))
|
||||
mask := 1
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
if len(idx.events) == 0 {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
f := buildRandomFilter(idx, rng, mask)
|
||||
mask++
|
||||
if mask > 15 { // all combinations of 4 criteria (excluding 0)
|
||||
mask = 1
|
||||
}
|
||||
client := rc.Get()
|
||||
if client == nil {
|
||||
_ = rc.Reconnect(ctx)
|
||||
continue
|
||||
}
|
||||
ff := filter.S{f}
|
||||
sCtx, cancel := context.WithTimeout(ctx, subTimeout)
|
||||
sub, err := client.Subscribe(
|
||||
sCtx, &ff, ws.WithLabel("stresstest-query"),
|
||||
)
|
||||
if err != nil {
|
||||
cancel()
|
||||
// reconnect on connection issues
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "connection closed") {
|
||||
_ = rc.Reconnect(ctx)
|
||||
}
|
||||
continue
|
||||
}
|
||||
atomic.AddUint64(queries, 1)
|
||||
// read until EOSE or timeout
|
||||
innerDone := false
|
||||
for !innerDone {
|
||||
select {
|
||||
case <-sCtx.Done():
|
||||
innerDone = true
|
||||
case <-sub.EndOfStoredEvents:
|
||||
innerDone = true
|
||||
case ev, ok := <-sub.Events:
|
||||
if !ok {
|
||||
innerDone = true
|
||||
break
|
||||
}
|
||||
if ev != nil {
|
||||
atomic.AddUint64(results, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
sub.Unsub()
|
||||
cancel()
|
||||
// wait a random interval between queries
|
||||
interval := minInterval
|
||||
if maxInterval > minInterval {
|
||||
delta := rng.Int63n(int64(maxInterval - minInterval))
|
||||
interval += time.Duration(delta)
|
||||
}
|
||||
select {
|
||||
case <-time.After(interval):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startReader(ctx context.Context, rl *ws.Client, received *uint64) error {
|
||||
// Broad filter: subscribe to text notes since now-5m to catch our own writes
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.TextNote)
|
||||
// We don't set authors to ensure we read all text notes coming in
|
||||
ff := filter.S{f}
|
||||
sub, err := rl.Subscribe(ctx, &ff, ws.WithLabel("stresstest-reader"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case ev, ok := <-sub.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if ev != nil {
|
||||
atomic.AddUint64(received, 1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
address string
|
||||
port int
|
||||
workers int
|
||||
duration time.Duration
|
||||
publishTimeout time.Duration
|
||||
queryWorkers int
|
||||
queryTimeout time.Duration
|
||||
queryMinInt time.Duration
|
||||
queryMaxInt time.Duration
|
||||
skipCache bool
|
||||
)
|
||||
|
||||
flag.StringVar(
|
||||
&address, "address", "localhost", "relay address (host or IP)",
|
||||
)
|
||||
flag.IntVar(&port, "port", 3334, "relay port")
|
||||
flag.IntVar(
|
||||
&workers, "workers", 8, "number of concurrent publisher workers",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&duration, "duration", 60*time.Second,
|
||||
"how long to run the stress test",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&publishTimeout, "publish-timeout", 15*time.Second,
|
||||
"timeout waiting for OK per publish",
|
||||
)
|
||||
flag.IntVar(
|
||||
&queryWorkers, "query-workers", 4, "number of concurrent query workers",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&queryTimeout, "query-timeout", 3*time.Second,
|
||||
"subscription timeout for queries",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&queryMinInt, "query-min-interval", 50*time.Millisecond,
|
||||
"minimum interval between queries per worker",
|
||||
)
|
||||
flag.DurationVar(
|
||||
&queryMaxInt, "query-max-interval", 300*time.Millisecond,
|
||||
"maximum interval between queries per worker",
|
||||
)
|
||||
flag.BoolVar(
|
||||
&skipCache, "skip-cache", false,
|
||||
"skip uploading examples.Cache before running",
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
relayURL := fmt.Sprintf("ws://%s:%d", address, port)
|
||||
log.I.F("stresstest: connecting to %s", relayURL)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Handle Ctrl+C
|
||||
sigc := make(chan os.Signal, 1)
|
||||
signal.Notify(sigc, os.Interrupt)
|
||||
go func() {
|
||||
select {
|
||||
case <-sigc:
|
||||
log.I.Ln("interrupt received, shutting down...")
|
||||
cancel()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}()
|
||||
|
||||
rl, err := ws.RelayConnect(ctx, relayURL)
|
||||
if err != nil {
|
||||
log.E.F("failed to connect to relay %s: %v", relayURL, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer rl.Close()
|
||||
|
||||
rc := &RelayConn{client: rl, url: relayURL}
|
||||
|
||||
// Load and publish cache events first (unless skipped)
|
||||
idx, err := loadCacheAndIndex()
|
||||
if err != nil {
|
||||
log.E.F("failed to load examples.Cache: %v", err)
|
||||
}
|
||||
cacheSent := 0
|
||||
if !skipCache && idx != nil && len(idx.events) > 0 {
|
||||
log.I.F("sending %d events from examples.Cache...", len(idx.events))
|
||||
cacheSent = publishCacheEvents(ctx, relayURL, idx)
|
||||
log.I.F("sent %d/%d cache events", cacheSent, len(idx.events))
|
||||
}
|
||||
|
||||
var pubOK uint64
|
||||
var recvCount uint64
|
||||
var qCount uint64
|
||||
var qResults uint64
|
||||
|
||||
if err := startReader(ctx, rl, &recvCount); err != nil {
|
||||
log.E.F("reader subscribe error: %v", err)
|
||||
// continue anyway, we can still write
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
// Start publisher workers
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
publisherWorker(ctx, rc, i, &pubOK)
|
||||
}()
|
||||
}
|
||||
// Start query workers
|
||||
if idx != nil && len(idx.events) > 0 && queryWorkers > 0 {
|
||||
wg.Add(queryWorkers)
|
||||
for i := 0; i < queryWorkers; i++ {
|
||||
i := i
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
queryWorker(
|
||||
ctx, rc, idx, i, &qCount, &qResults, queryTimeout,
|
||||
queryMinInt, queryMaxInt,
|
||||
)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Timer for duration and periodic stats
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
defer ticker.Stop()
|
||||
end := time.NewTimer(duration)
|
||||
start := time.Now()
|
||||
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
elapsed := time.Since(start).Seconds()
|
||||
p := atomic.LoadUint64(&pubOK)
|
||||
r := atomic.LoadUint64(&recvCount)
|
||||
qc := atomic.LoadUint64(&qCount)
|
||||
qr := atomic.LoadUint64(&qResults)
|
||||
log.I.F(
|
||||
"elapsed=%.1fs sent=%d (%.0f/s) received=%d cache_sent=%d queries=%d results=%d",
|
||||
elapsed, p, float64(p)/elapsed, r, cacheSent, qc, qr,
|
||||
)
|
||||
case <-end.C:
|
||||
break loop
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
cancel()
|
||||
wg.Wait()
|
||||
p := atomic.LoadUint64(&pubOK)
|
||||
r := atomic.LoadUint64(&recvCount)
|
||||
qc := atomic.LoadUint64(&qCount)
|
||||
qr := atomic.LoadUint64(&qResults)
|
||||
log.I.F(
|
||||
"stresstest complete: cache_sent=%d sent=%d received=%d queries=%d results=%d duration=%s",
|
||||
cacheSent, p, r, qc, qr,
|
||||
time.Since(start).Truncate(time.Millisecond),
|
||||
)
|
||||
}
|
||||
116
debug-websocket.sh
Executable file
116
debug-websocket.sh
Executable file
@@ -0,0 +1,116 @@
|
||||
#!/bin/bash
|
||||
# WebSocket Debug Script for Stella's Orly Relay
|
||||
|
||||
echo "🔍 Debugging WebSocket Connection for orly-relay.imwald.eu"
|
||||
echo "=================================================="
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 1: Check if relay container is running"
|
||||
echo "----------------------------------------------"
|
||||
docker ps | grep -E "(stella|relay|orly)" || echo "❌ No relay containers found"
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 2: Test local relay connection"
|
||||
echo "--------------------------------------"
|
||||
if curl -s -I http://127.0.0.1:7777 | grep -q "426"; then
|
||||
echo "✅ Local relay responding correctly (HTTP 426)"
|
||||
else
|
||||
echo "❌ Local relay not responding correctly"
|
||||
curl -I http://127.0.0.1:7777
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 3: Check Apache modules"
|
||||
echo "------------------------------"
|
||||
if apache2ctl -M 2>/dev/null | grep -q "proxy_wstunnel"; then
|
||||
echo "✅ proxy_wstunnel module enabled"
|
||||
else
|
||||
echo "❌ proxy_wstunnel module NOT enabled"
|
||||
echo "Run: sudo a2enmod proxy_wstunnel"
|
||||
fi
|
||||
|
||||
if apache2ctl -M 2>/dev/null | grep -q "rewrite"; then
|
||||
echo "✅ rewrite module enabled"
|
||||
else
|
||||
echo "❌ rewrite module NOT enabled"
|
||||
echo "Run: sudo a2enmod rewrite"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 4: Check Plesk Apache configuration"
|
||||
echo "------------------------------------------"
|
||||
if [ -f "/etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf" ]; then
|
||||
echo "✅ Plesk config file exists"
|
||||
echo "Current proxy configuration:"
|
||||
grep -E "(Proxy|Rewrite|proxy|rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf || echo "❌ No proxy/rewrite rules found"
|
||||
else
|
||||
echo "❌ Plesk config file not found"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 5: Test WebSocket connections"
|
||||
echo "------------------------------------"
|
||||
|
||||
# Test with curl first (simpler)
|
||||
echo "Testing HTTP upgrade request to local relay..."
|
||||
if curl -s -I -H "Connection: Upgrade" -H "Upgrade: websocket" http://127.0.0.1:7777 | grep -q "426\|101"; then
|
||||
echo "✅ Local relay accepts upgrade requests"
|
||||
else
|
||||
echo "❌ Local relay doesn't accept upgrade requests"
|
||||
fi
|
||||
|
||||
echo "Testing HTTP upgrade request to remote relay..."
|
||||
if curl -s -I -H "Connection: Upgrade" -H "Upgrade: websocket" https://orly-relay.imwald.eu | grep -q "426\|101"; then
|
||||
echo "✅ Remote relay accepts upgrade requests"
|
||||
else
|
||||
echo "❌ Remote relay doesn't accept upgrade requests"
|
||||
echo "This indicates Apache proxy issue"
|
||||
fi
|
||||
|
||||
# Try to install websocat if not available
|
||||
if ! command -v websocat >/dev/null 2>&1; then
|
||||
echo ""
|
||||
echo "📥 Installing websocat for proper WebSocket testing..."
|
||||
if wget -q https://github.com/vi/websocat/releases/download/v1.12.0/websocat.x86_64-unknown-linux-musl -O websocat 2>/dev/null; then
|
||||
chmod +x websocat
|
||||
echo "✅ websocat installed"
|
||||
else
|
||||
echo "❌ Could not install websocat (no internet or wget issue)"
|
||||
echo "Manual install: wget https://github.com/vi/websocat/releases/download/v1.12.0/websocat.x86_64-unknown-linux-musl -O websocat && chmod +x websocat"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test with websocat if available
|
||||
if command -v ./websocat >/dev/null 2>&1; then
|
||||
echo ""
|
||||
echo "Testing actual WebSocket connection..."
|
||||
echo "Local WebSocket test:"
|
||||
timeout 3 bash -c 'echo "[\"REQ\",\"test\",{}]" | ./websocat ws://127.0.0.1:7777/' 2>/dev/null || echo "❌ Local WebSocket failed"
|
||||
|
||||
echo "Remote WebSocket test (ignoring SSL):"
|
||||
timeout 3 bash -c 'echo "[\"REQ\",\"test\",{}]" | ./websocat --insecure wss://orly-relay.imwald.eu/' 2>/dev/null || echo "❌ Remote WebSocket failed"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 6: Check ports and connections"
|
||||
echo "------------------------------------"
|
||||
echo "Ports listening on 7777:"
|
||||
netstat -tlnp 2>/dev/null | grep :7777 || ss -tlnp 2>/dev/null | grep :7777 || echo "❌ No process listening on port 7777"
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 7: Test SSL certificate"
|
||||
echo "------------------------------"
|
||||
echo "Certificate issuer:"
|
||||
echo | openssl s_client -connect orly-relay.imwald.eu:443 -servername orly-relay.imwald.eu 2>/dev/null | openssl x509 -noout -issuer 2>/dev/null || echo "❌ SSL test failed"
|
||||
|
||||
echo ""
|
||||
echo "🎯 RECOMMENDED NEXT STEPS:"
|
||||
echo "========================="
|
||||
echo "1. If proxy_wstunnel is missing: sudo a2enmod proxy_wstunnel && sudo systemctl restart apache2"
|
||||
echo "2. If no proxy rules found: Add configuration in Plesk Apache & nginx Settings"
|
||||
echo "3. If local WebSocket fails: Check if relay container is actually running"
|
||||
echo "4. If remote WebSocket fails but local works: Apache proxy configuration issue"
|
||||
echo ""
|
||||
echo "🔧 Try this simple Plesk configuration:"
|
||||
echo "ProxyPass / http://127.0.0.1:7777/"
|
||||
echo "ProxyPassReverse / http://127.0.0.1:7777/"
|
||||
93
docker-compose.yml
Normal file
93
docker-compose.yml
Normal file
@@ -0,0 +1,93 @@
|
||||
# Docker Compose for Stella's Nostr Relay
|
||||
# Owner: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
stella-relay:
|
||||
image: silberengel/orly-relay:latest
|
||||
container_name: stella-nostr-relay
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:7777:7777"
|
||||
volumes:
|
||||
- relay_data:/data
|
||||
- ./profiles:/profiles:ro
|
||||
environment:
|
||||
# Relay Configuration
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=7777
|
||||
- ORLY_LOG_LEVEL=info
|
||||
- ORLY_MAX_CONNECTIONS=1000
|
||||
- ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
- ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
|
||||
|
||||
# Performance Settings (based on v0.4.8 optimizations)
|
||||
- ORLY_CONCURRENT_WORKERS=0 # 0 = auto-detect CPU cores
|
||||
- ORLY_BATCH_SIZE=1000
|
||||
- ORLY_CACHE_SIZE=10000
|
||||
|
||||
# Database Settings
|
||||
- BADGER_LOG_LEVEL=ERROR
|
||||
- BADGER_SYNC_WRITES=false # Better performance, slightly less durability
|
||||
|
||||
# Security Settings
|
||||
- ORLY_REQUIRE_AUTH=false
|
||||
- ORLY_MAX_EVENT_SIZE=65536
|
||||
- ORLY_MAX_SUBSCRIPTIONS=20
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:7777"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
# Resource limits
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
cpus: '1.0'
|
||||
reservations:
|
||||
memory: 256M
|
||||
cpus: '0.25'
|
||||
|
||||
# Logging configuration
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Optional: Nginx reverse proxy for SSL/domain setup
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: stella-nginx
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./nginx/ssl:/etc/nginx/ssl:ro
|
||||
- nginx_logs:/var/log/nginx
|
||||
depends_on:
|
||||
- stella-relay
|
||||
profiles:
|
||||
- proxy # Only start with: docker-compose --profile proxy up
|
||||
|
||||
volumes:
|
||||
relay_data:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: ./data
|
||||
nginx_logs:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: stella-relay-network
|
||||
BIN
docs/orly.png
BIN
docs/orly.png
Binary file not shown.
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 485 KiB |
259
docs/websocket-req-comparison.md
Normal file
259
docs/websocket-req-comparison.md
Normal file
@@ -0,0 +1,259 @@
|
||||
# WebSocket REQ Handling Comparison: Khatru vs Next.orly.dev
|
||||
|
||||
## Overview
|
||||
|
||||
This document compares how two Nostr relay implementations handle WebSocket connections and REQ (subscription) messages:
|
||||
|
||||
1. **Khatru** - A popular Go-based Nostr relay library by fiatjaf
|
||||
2. **Next.orly.dev** - A custom relay implementation with advanced features
|
||||
|
||||
## Architecture Comparison
|
||||
|
||||
### Khatru Architecture
|
||||
- **Monolithic approach**: Single large `HandleWebsocket` method (~380 lines) processes all message types
|
||||
- **Inline processing**: REQ handling is embedded within the main websocket handler
|
||||
- **Hook-based extensibility**: Uses function slices for customizable behavior
|
||||
- **Simple structure**: WebSocket struct with basic fields and mutex for thread safety
|
||||
|
||||
### Next.orly.dev Architecture
|
||||
- **Modular approach**: Separate methods for each message type (`HandleReq`, `HandleEvent`, etc.)
|
||||
- **Layered processing**: Message identification → envelope parsing → type-specific handling
|
||||
- **Publisher-subscriber system**: Dedicated infrastructure for subscription management
|
||||
- **Rich context**: Listener struct with detailed state tracking and metrics
|
||||
|
||||
## Connection Establishment
|
||||
|
||||
### Khatru
|
||||
```go
|
||||
// Simple websocket upgrade
|
||||
conn, err := rl.upgrader.Upgrade(w, r, nil)
|
||||
ws := &WebSocket{
|
||||
conn: conn,
|
||||
Request: r,
|
||||
Challenge: hex.EncodeToString(challenge),
|
||||
negentropySessions: xsync.NewMapOf[string, *NegentropySession](),
|
||||
}
|
||||
```
|
||||
|
||||
### Next.orly.dev
|
||||
```go
|
||||
// More sophisticated setup with IP whitelisting
|
||||
conn, err = websocket.Accept(w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}})
|
||||
listener := &Listener{
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
}
|
||||
// Immediate AUTH challenge if ACLs are configured
|
||||
```
|
||||
|
||||
**Key Differences:**
|
||||
- Next.orly.dev includes IP whitelisting and immediate authentication challenges
|
||||
- Khatru uses fasthttp/websocket library vs next.orly.dev using coder/websocket
|
||||
- Next.orly.dev has more detailed connection state tracking
|
||||
|
||||
## Message Processing
|
||||
|
||||
### Khatru
|
||||
- Uses `nostr.MessageParser` for sequential parsing
|
||||
- Switch statement on envelope type within goroutine
|
||||
- Direct processing without intermediate validation layers
|
||||
|
||||
### Next.orly.dev
|
||||
- Custom envelope identification system (`envelopes.Identify`)
|
||||
- Separate validation and processing phases
|
||||
- Extensive logging and error handling at each step
|
||||
|
||||
## REQ Message Handling
|
||||
|
||||
### Khatru REQ Processing
|
||||
```go
|
||||
case *nostr.ReqEnvelope:
|
||||
eose := sync.WaitGroup{}
|
||||
eose.Add(len(env.Filters))
|
||||
|
||||
// Handle each filter separately
|
||||
for _, filter := range env.Filters {
|
||||
err := srl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter)
|
||||
if err != nil {
|
||||
// Fail everything if any filter is rejected
|
||||
ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason})
|
||||
return
|
||||
} else {
|
||||
rl.addListener(ws, env.SubscriptionID, srl, filter, cancelReqCtx)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
eose.Wait()
|
||||
ws.WriteJSON(nostr.EOSEEnvelope(env.SubscriptionID))
|
||||
}()
|
||||
```
|
||||
|
||||
### Next.orly.dev REQ Processing
|
||||
```go
|
||||
// Comprehensive ACL and authentication checks first
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
return // Send auth-required response
|
||||
}
|
||||
|
||||
// Process all filters and collect events
|
||||
for _, f := range *env.Filters {
|
||||
filterEvents, err = l.QueryEvents(queryCtx, f)
|
||||
allEvents = append(allEvents, filterEvents...)
|
||||
}
|
||||
|
||||
// Apply privacy and privilege checks
|
||||
// Send all historical events
|
||||
// Set up ongoing subscription only if needed
|
||||
```
|
||||
|
||||
## Key Architectural Differences
|
||||
|
||||
### 1. **Filter Processing Strategy**
|
||||
|
||||
**Khatru:**
|
||||
- Processes each filter independently and concurrently
|
||||
- Uses WaitGroup to coordinate EOSE across all filters
|
||||
- Immediately sets up listeners for ongoing subscriptions
|
||||
- Fails entire subscription if any filter is rejected
|
||||
|
||||
**Next.orly.dev:**
|
||||
- Processes all filters sequentially in a single context
|
||||
- Collects all events before applying access control
|
||||
- Only sets up subscriptions for filters that need ongoing updates
|
||||
- Gracefully handles individual filter failures
|
||||
|
||||
### 2. **Access Control Integration**
|
||||
|
||||
**Khatru:**
|
||||
- Basic NIP-42 authentication support
|
||||
- Hook-based authorization via `RejectFilter` functions
|
||||
- Limited built-in access control features
|
||||
|
||||
**Next.orly.dev:**
|
||||
- Comprehensive ACL system with multiple access levels
|
||||
- Built-in support for private events with npub authorization
|
||||
- Privileged event filtering based on pubkey and p-tags
|
||||
- Granular permission checking at multiple stages
|
||||
|
||||
### 3. **Subscription Management**
|
||||
|
||||
**Khatru:**
|
||||
```go
|
||||
// Simple listener registration
|
||||
type listenerSpec struct {
|
||||
filter nostr.Filter
|
||||
cancel context.CancelCauseFunc
|
||||
subRelay *Relay
|
||||
}
|
||||
rl.addListener(ws, subscriptionID, relay, filter, cancel)
|
||||
```
|
||||
|
||||
**Next.orly.dev:**
|
||||
```go
|
||||
// Publisher-subscriber system with rich metadata
|
||||
type W struct {
|
||||
Conn *websocket.Conn
|
||||
remote string
|
||||
Id string
|
||||
Receiver event.C
|
||||
Filters *filter.S
|
||||
AuthedPubkey []byte
|
||||
}
|
||||
l.publishers.Receive(&W{...})
|
||||
```
|
||||
|
||||
### 4. **Performance Optimizations**
|
||||
|
||||
**Khatru:**
|
||||
- Concurrent filter processing
|
||||
- Immediate streaming of events as they're found
|
||||
- Memory-efficient with direct event streaming
|
||||
|
||||
**Next.orly.dev:**
|
||||
- Batch processing with deduplication
|
||||
- Memory management with explicit `ev.Free()` calls
|
||||
- Smart subscription cancellation for ID-only queries
|
||||
- Event result caching and seen-tracking
|
||||
|
||||
### 5. **Error Handling & Observability**
|
||||
|
||||
**Khatru:**
|
||||
- Basic error logging
|
||||
- Simple connection state management
|
||||
- Limited metrics and observability
|
||||
|
||||
**Next.orly.dev:**
|
||||
- Comprehensive error handling with context preservation
|
||||
- Detailed logging at each processing stage
|
||||
- Built-in metrics (message count, REQ count, event count)
|
||||
- Graceful degradation on individual component failures
|
||||
|
||||
## Memory Management
|
||||
|
||||
### Khatru
|
||||
- Relies on Go's garbage collector
|
||||
- Simple WebSocket struct with minimal state
|
||||
- Uses sync.Map for thread-safe operations
|
||||
|
||||
### Next.orly.dev
|
||||
- Explicit memory management with `ev.Free()` calls
|
||||
- Resource pooling and reuse patterns
|
||||
- Detailed tracking of connection resources
|
||||
|
||||
## Concurrency Models
|
||||
|
||||
### Khatru
|
||||
- Per-connection goroutine for message reading
|
||||
- Additional goroutines for each message processing
|
||||
- WaitGroup coordination for multi-filter EOSE
|
||||
|
||||
### Next.orly.dev
|
||||
- Per-connection goroutine with single-threaded message processing
|
||||
- Publisher-subscriber system handles concurrent event distribution
|
||||
- Context-based cancellation throughout
|
||||
|
||||
## Trade-offs Analysis
|
||||
|
||||
### Khatru Advantages
|
||||
- **Simplicity**: Easier to understand and modify
|
||||
- **Performance**: Lower latency due to concurrent processing
|
||||
- **Flexibility**: Hook-based architecture allows extensive customization
|
||||
- **Streaming**: Events sent as soon as they're found
|
||||
|
||||
### Khatru Disadvantages
|
||||
- **Monolithic**: Large methods harder to maintain
|
||||
- **Limited ACL**: Basic authentication and authorization
|
||||
- **Error handling**: Less graceful failure recovery
|
||||
- **Resource usage**: No explicit memory management
|
||||
|
||||
### Next.orly.dev Advantages
|
||||
- **Security**: Comprehensive ACL and privacy features
|
||||
- **Observability**: Extensive logging and metrics
|
||||
- **Resource management**: Explicit memory and connection lifecycle management
|
||||
- **Modularity**: Easier to test and extend individual components
|
||||
- **Robustness**: Graceful handling of edge cases and failures
|
||||
|
||||
### Next.orly.dev Disadvantages
|
||||
- **Complexity**: Higher cognitive overhead and learning curve
|
||||
- **Latency**: Sequential processing may be slower for some use cases
|
||||
- **Resource overhead**: More memory usage due to batching and state tracking
|
||||
- **Coupling**: Tighter integration between components
|
||||
|
||||
## Conclusion
|
||||
|
||||
Both implementations represent different philosophies:
|
||||
|
||||
- **Khatru** prioritizes simplicity, performance, and extensibility through a hook-based architecture
|
||||
- **Next.orly.dev** prioritizes security, observability, and robustness through comprehensive built-in features
|
||||
|
||||
The choice between them depends on specific requirements:
|
||||
- Choose **Khatru** for high-performance relays with custom business logic
|
||||
- Choose **Next.orly.dev** for production relays requiring comprehensive access control and monitoring
|
||||
|
||||
Both approaches demonstrate mature understanding of Nostr protocol requirements while making different trade-offs in complexity vs. features.
|
||||
51
go.mod
51
go.mod
@@ -3,58 +3,49 @@ module next.orly.dev
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
acl.orly v0.0.0-00010101000000-000000000000
|
||||
crypto.orly v0.0.0-00010101000000-000000000000
|
||||
database.orly v0.0.0-00010101000000-000000000000
|
||||
encoders.orly v0.0.0-00010101000000-000000000000
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/klauspost/cpuid/v2 v2.3.0
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
|
||||
go-simpler.org/env v0.12.0
|
||||
interfaces.orly v0.0.0-00010101000000-000000000000
|
||||
lol.mleku.dev v1.0.2
|
||||
protocol.orly v0.0.0-00010101000000-000000000000
|
||||
utils.orly v0.0.0-00010101000000-000000000000
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/crypto v0.41.0
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||
golang.org/x/net v0.43.0
|
||||
honnef.co/go/tools v0.6.1
|
||||
lol.mleku.dev v1.0.3
|
||||
lukechampine.com/frand v1.5.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/templexxx/cpu v0.0.1 // indirect
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
lukechampine.com/frand v1.5.1 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
acl.orly => ./pkg/acl
|
||||
crypto.orly => ./pkg/crypto
|
||||
database.orly => ./pkg/database
|
||||
encoders.orly => ./pkg/encoders
|
||||
interfaces.orly => ./pkg/interfaces
|
||||
next.orly.dev => ../../
|
||||
protocol.orly => ./pkg/protocol
|
||||
utils.orly => ./pkg/utils
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
58
go.sum
58
go.sum
@@ -1,3 +1,5 @@
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
@@ -18,8 +20,6 @@ github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa5
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
@@ -40,20 +40,24 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
||||
@@ -70,21 +74,49 @@ go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mx
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
|
||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
lol.mleku.dev v1.0.3 h1:IrqLd/wFRghu6MX7mgyKh//3VQiId2AM4RdCbFqSLnY=
|
||||
lol.mleku.dev v1.0.3/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
||||
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=
|
||||
|
||||
255
main.go
255
main.go
@@ -3,25 +3,172 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
pp "net/http/pprof"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
acl "acl.orly"
|
||||
database "database.orly"
|
||||
"github.com/pkg/profile"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app"
|
||||
"next.orly.dev/app/config"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/crypto/keys"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/spider"
|
||||
"next.orly.dev/pkg/version"
|
||||
)
|
||||
|
||||
// openBrowser attempts to open the specified URL in the default browser.
|
||||
// It supports multiple platforms including Linux, macOS, and Windows.
|
||||
func openBrowser(url string) {
|
||||
var err error
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
err = exec.Command("xdg-open", url).Start()
|
||||
case "windows":
|
||||
err = exec.Command(
|
||||
"rundll32", "url.dll,FileProtocolHandler", url,
|
||||
).Start()
|
||||
case "darwin":
|
||||
err = exec.Command("open", url).Start()
|
||||
default:
|
||||
log.W.F("unsupported platform for opening browser: %s", runtime.GOOS)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.E.F("failed to open browser: %v", err)
|
||||
} else {
|
||||
log.I.F("opened browser to %s", url)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU() * 4)
|
||||
var err error
|
||||
var cfg *config.C
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
}
|
||||
log.I.F("starting %s %s", cfg.AppName, version.V)
|
||||
|
||||
// Handle 'identity' subcommand: print relay identity secret and pubkey and exit
|
||||
if config.IdentityRequested() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var db *database.D
|
||||
if db, err = database.New(ctx, cancel, cfg.DataDir, cfg.DBLogLevel); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
skb, err := db.GetOrCreateRelayIdentitySecret()
|
||||
if chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
pk, err := keys.SecretBytesToPubKeyHex(skb)
|
||||
if chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("identity secret: %s\nidentity pubkey: %s\n", hex.Enc(skb), pk)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// If OpenPprofWeb is true and profiling is enabled, we need to ensure HTTP profiling is also enabled
|
||||
if cfg.OpenPprofWeb && cfg.Pprof != "" && !cfg.PprofHTTP {
|
||||
log.I.F("enabling HTTP pprof server to support web viewer")
|
||||
cfg.PprofHTTP = true
|
||||
}
|
||||
switch cfg.Pprof {
|
||||
case "cpu":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.CPUProfile, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.CPUProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "memory":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.MemProfile, profile.MemProfileRate(32),
|
||||
profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.MemProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "allocation":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.MemProfileAllocs, profile.MemProfileRate(32),
|
||||
profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.MemProfileAllocs)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "heap":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.MemProfileHeap, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.MemProfileHeap)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "mutex":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.MutexProfile, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.MutexProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "threadcreate":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.ThreadcreationProfile,
|
||||
profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.ThreadcreationProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "goroutine":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.GoroutineProfile, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.GoroutineProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
case "block":
|
||||
if cfg.PprofPath != "" {
|
||||
prof := profile.Start(
|
||||
profile.BlockProfile, profile.ProfilePath(cfg.PprofPath),
|
||||
)
|
||||
defer prof.Stop()
|
||||
} else {
|
||||
prof := profile.Start(profile.BlockProfile)
|
||||
defer prof.Stop()
|
||||
}
|
||||
|
||||
}
|
||||
log.I.F("starting %s %s", cfg.AppName, version.V)
|
||||
startProfiler(cfg.Pprof)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var db *database.D
|
||||
if db, err = database.New(
|
||||
@@ -34,6 +181,100 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
acl.Registry.Syncer()
|
||||
|
||||
// Initialize and start spider functionality if enabled
|
||||
spiderCtx, spiderCancel := context.WithCancel(ctx)
|
||||
spiderInstance := spider.New(db, cfg, spiderCtx, spiderCancel)
|
||||
spiderInstance.Start()
|
||||
defer spiderInstance.Stop()
|
||||
|
||||
// Start HTTP pprof server if enabled
|
||||
if cfg.PprofHTTP {
|
||||
pprofAddr := fmt.Sprintf("%s:%d", cfg.Listen, 6060)
|
||||
pprofMux := http.NewServeMux()
|
||||
pprofMux.HandleFunc("/debug/pprof/", pp.Index)
|
||||
pprofMux.HandleFunc("/debug/pprof/cmdline", pp.Cmdline)
|
||||
pprofMux.HandleFunc("/debug/pprof/profile", pp.Profile)
|
||||
pprofMux.HandleFunc("/debug/pprof/symbol", pp.Symbol)
|
||||
pprofMux.HandleFunc("/debug/pprof/trace", pp.Trace)
|
||||
for _, p := range []string{
|
||||
"allocs", "block", "goroutine", "heap", "mutex", "threadcreate",
|
||||
} {
|
||||
pprofMux.Handle("/debug/pprof/"+p, pp.Handler(p))
|
||||
}
|
||||
ppSrv := &http.Server{Addr: pprofAddr, Handler: pprofMux}
|
||||
go func() {
|
||||
log.I.F("pprof server listening on %s", pprofAddr)
|
||||
if err := ppSrv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.E.F("pprof server error: %v", err)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutdownCtx, cancelShutdown := context.WithTimeout(
|
||||
context.Background(), 2*time.Second,
|
||||
)
|
||||
defer cancelShutdown()
|
||||
_ = ppSrv.Shutdown(shutdownCtx)
|
||||
}()
|
||||
|
||||
// Open the pprof web viewer if enabled
|
||||
if cfg.OpenPprofWeb && cfg.Pprof != "" {
|
||||
pprofURL := fmt.Sprintf("http://localhost:6060/debug/pprof/")
|
||||
go func() {
|
||||
// Wait a moment for the server to start
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
openBrowser(pprofURL)
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Start health check HTTP server if configured
|
||||
var healthSrv *http.Server
|
||||
if cfg.HealthPort > 0 {
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(
|
||||
"/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("ok"))
|
||||
log.I.F("health check ok")
|
||||
},
|
||||
)
|
||||
// Optional shutdown endpoint to gracefully stop the process so profiling defers run
|
||||
if cfg.EnableShutdown {
|
||||
mux.HandleFunc(
|
||||
"/shutdown", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = w.Write([]byte("shutting down"))
|
||||
log.I.F("shutdown requested via /shutdown; sending SIGINT to self")
|
||||
go func() {
|
||||
p, _ := os.FindProcess(os.Getpid())
|
||||
_ = p.Signal(os.Interrupt)
|
||||
}()
|
||||
},
|
||||
)
|
||||
}
|
||||
healthSrv = &http.Server{
|
||||
Addr: fmt.Sprintf(
|
||||
"%s:%d", cfg.Listen, cfg.HealthPort,
|
||||
), Handler: mux,
|
||||
}
|
||||
go func() {
|
||||
log.I.F("health check server listening on %s", healthSrv.Addr)
|
||||
if err := healthSrv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.E.F("health server error: %v", err)
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutdownCtx, cancelShutdown := context.WithTimeout(
|
||||
context.Background(), 2*time.Second,
|
||||
)
|
||||
defer cancelShutdown()
|
||||
_ = healthSrv.Shutdown(shutdownCtx)
|
||||
}()
|
||||
}
|
||||
|
||||
quit := app.Run(ctx, cfg, db)
|
||||
sigs := make(chan os.Signal, 1)
|
||||
signal.Notify(sigs, os.Interrupt)
|
||||
@@ -43,12 +284,14 @@ func main() {
|
||||
fmt.Printf("\r")
|
||||
cancel()
|
||||
chk.E(db.Close())
|
||||
log.I.F("exiting")
|
||||
return
|
||||
case <-quit:
|
||||
cancel()
|
||||
chk.E(db.Close())
|
||||
log.I.F("exiting")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
log.I.F("exiting")
|
||||
}
|
||||
|
||||
89
manage-relay.sh
Executable file
89
manage-relay.sh
Executable file
@@ -0,0 +1,89 @@
|
||||
#!/bin/bash
|
||||
# Stella's Orly Relay Management Script
|
||||
|
||||
set -e
|
||||
|
||||
RELAY_SERVICE="stella-relay"
|
||||
RELAY_URL="ws://127.0.0.1:7777"
|
||||
|
||||
case "${1:-}" in
|
||||
"start")
|
||||
echo "🚀 Starting Stella's Orly Relay..."
|
||||
sudo systemctl start $RELAY_SERVICE
|
||||
echo "✅ Relay started!"
|
||||
;;
|
||||
"stop")
|
||||
echo "⏹️ Stopping Stella's Orly Relay..."
|
||||
sudo systemctl stop $RELAY_SERVICE
|
||||
echo "✅ Relay stopped!"
|
||||
;;
|
||||
"restart")
|
||||
echo "🔄 Restarting Stella's Orly Relay..."
|
||||
sudo systemctl restart $RELAY_SERVICE
|
||||
echo "✅ Relay restarted!"
|
||||
;;
|
||||
"status")
|
||||
echo "📊 Stella's Orly Relay Status:"
|
||||
sudo systemctl status $RELAY_SERVICE --no-pager
|
||||
;;
|
||||
"logs")
|
||||
echo "📜 Stella's Orly Relay Logs:"
|
||||
sudo journalctl -u $RELAY_SERVICE -f --no-pager
|
||||
;;
|
||||
"test")
|
||||
echo "🧪 Testing relay connection..."
|
||||
if curl -s -I http://127.0.0.1:7777 | grep -q "426 Upgrade Required"; then
|
||||
echo "✅ Relay is responding correctly!"
|
||||
echo "📡 WebSocket URL: $RELAY_URL"
|
||||
else
|
||||
echo "❌ Relay is not responding correctly"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"enable")
|
||||
echo "🔧 Enabling relay to start at boot..."
|
||||
sudo systemctl enable $RELAY_SERVICE
|
||||
echo "✅ Relay will start automatically at boot!"
|
||||
;;
|
||||
"disable")
|
||||
echo "🔧 Disabling relay auto-start..."
|
||||
sudo systemctl disable $RELAY_SERVICE
|
||||
echo "✅ Relay will not start automatically at boot!"
|
||||
;;
|
||||
"info")
|
||||
echo "📋 Stella's Orly Relay Information:"
|
||||
echo " Service: $RELAY_SERVICE"
|
||||
echo " WebSocket URL: $RELAY_URL"
|
||||
echo " HTTP URL: http://127.0.0.1:7777"
|
||||
echo " Data Directory: /home/madmin/.local/share/orly-relay"
|
||||
echo " Config Directory: $(pwd)"
|
||||
echo ""
|
||||
echo "🔑 Admin NPubs:"
|
||||
echo " Stella: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx"
|
||||
echo " Admin2: npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z"
|
||||
;;
|
||||
*)
|
||||
echo "🌲 Stella's Orly Relay Management Script"
|
||||
echo ""
|
||||
echo "Usage: $0 [COMMAND]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " start Start the relay"
|
||||
echo " stop Stop the relay"
|
||||
echo " restart Restart the relay"
|
||||
echo " status Show relay status"
|
||||
echo " logs Show relay logs (follow mode)"
|
||||
echo " test Test relay connection"
|
||||
echo " enable Enable auto-start at boot"
|
||||
echo " disable Disable auto-start at boot"
|
||||
echo " info Show relay information"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 start # Start the relay"
|
||||
echo " $0 status # Check if it's running"
|
||||
echo " $0 test # Test WebSocket connection"
|
||||
echo " $0 logs # Watch real-time logs"
|
||||
echo ""
|
||||
echo "🌲 Crafted in the digital forest by Stella ✨"
|
||||
;;
|
||||
esac
|
||||
@@ -1,8 +1,8 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"interfaces.orly/acl"
|
||||
"utils.orly/atomic"
|
||||
"next.orly.dev/pkg/interfaces/acl"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
var Registry = &S{}
|
||||
@@ -28,10 +28,10 @@ func (s *S) Configure(cfg ...any) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *S) GetAccessLevel(pub []byte) (level string) {
|
||||
func (s *S) GetAccessLevel(pub []byte, address string) (level string) {
|
||||
for _, i := range s.ACL {
|
||||
if i.Type() == s.Active.Load() {
|
||||
level = i.GetAccessLevel(pub)
|
||||
level = i.GetAccessLevel(pub, address)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -66,3 +66,15 @@ func (s *S) Type() (typ string) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AddFollow forwards a pubkey to the active ACL if it supports dynamic follows
|
||||
func (s *S) AddFollow(pub []byte) {
|
||||
for _, i := range s.ACL {
|
||||
if i.Type() == s.Active.Load() {
|
||||
if f, ok := i.(*Follows); ok {
|
||||
f.AddFollow(pub)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,38 +1,41 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
database "database.orly"
|
||||
"database.orly/indexes/types"
|
||||
"encoders.orly/bech32encoding"
|
||||
"encoders.orly/envelopes"
|
||||
"encoders.orly/envelopes/eoseenvelope"
|
||||
"encoders.orly/envelopes/eventenvelope"
|
||||
"encoders.orly/envelopes/reqenvelope"
|
||||
"encoders.orly/event"
|
||||
"encoders.orly/filter"
|
||||
"encoders.orly/hex"
|
||||
"encoders.orly/kind"
|
||||
"encoders.orly/tag"
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
utils "utils.orly"
|
||||
"utils.orly/normalize"
|
||||
"utils.orly/values"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/envelopes"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
"next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/normalize"
|
||||
"next.orly.dev/pkg/utils/values"
|
||||
)
|
||||
|
||||
type Follows struct {
|
||||
Ctx context.Context
|
||||
cfg *config.C
|
||||
*database.D
|
||||
pubs *publish.S
|
||||
followsMx sync.RWMutex
|
||||
admins [][]byte
|
||||
follows [][]byte
|
||||
@@ -45,14 +48,17 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
for _, ca := range cfg {
|
||||
switch c := ca.(type) {
|
||||
case *config.C:
|
||||
log.D.F("setting ACL config: %v", c)
|
||||
// log.D.F("setting ACL config: %v", c)
|
||||
f.cfg = c
|
||||
case *database.D:
|
||||
log.D.F("setting ACL database: %s", c.Path())
|
||||
// log.D.F("setting ACL database: %s", c.Path())
|
||||
f.D = c
|
||||
case context.Context:
|
||||
log.D.F("setting ACL context: %s", c.Value("id"))
|
||||
// log.D.F("setting ACL context: %s", c.Value("id"))
|
||||
f.Ctx = c
|
||||
case *publish.S:
|
||||
// set publisher for dispatching new events
|
||||
f.pubs = c
|
||||
default:
|
||||
err = errorf.E("invalid type: %T", reflect.TypeOf(ca))
|
||||
}
|
||||
@@ -64,15 +70,17 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
// find admin follow lists
|
||||
f.followsMx.Lock()
|
||||
defer f.followsMx.Unlock()
|
||||
log.I.F("finding admins")
|
||||
// log.I.F("finding admins")
|
||||
f.follows, f.admins = nil, nil
|
||||
for _, admin := range f.cfg.Admins {
|
||||
log.I.F("%s", admin)
|
||||
// log.I.F("%s", admin)
|
||||
var adm []byte
|
||||
if adm, err = bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(err) {
|
||||
if a, e := bech32encoding.NpubOrHexToPublicKeyBinary(admin); chk.E(e) {
|
||||
continue
|
||||
} else {
|
||||
adm = a
|
||||
}
|
||||
log.I.F("admin: %0x", adm)
|
||||
// log.I.F("admin: %0x", adm)
|
||||
f.admins = append(f.admins, adm)
|
||||
fl := &filter.F{
|
||||
Authors: tag.NewFromAny(adm),
|
||||
@@ -96,12 +104,14 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
if ev, err = f.D.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
log.I.F("admin follow list:\n%s", ev.Serialize())
|
||||
// log.I.F("admin follow list:\n%s", ev.Serialize())
|
||||
for _, v := range ev.Tags.GetAll([]byte("p")) {
|
||||
log.I.F("adding follow: %s", v.Value())
|
||||
// log.I.F("adding follow: %s", v.Value())
|
||||
var a []byte
|
||||
if a, err = hex.Dec(string(v.Value())); chk.E(err) {
|
||||
if b, e := hex.Dec(string(v.Value())); chk.E(e) {
|
||||
continue
|
||||
} else {
|
||||
a = b
|
||||
}
|
||||
f.follows = append(f.follows, a)
|
||||
}
|
||||
@@ -116,7 +126,7 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (f *Follows) GetAccessLevel(pub []byte) (level string) {
|
||||
func (f *Follows) GetAccessLevel(pub []byte, address string) (level string) {
|
||||
if f.cfg == nil {
|
||||
return "write"
|
||||
}
|
||||
@@ -199,11 +209,12 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
urls := f.adminRelays()
|
||||
log.I.S(urls)
|
||||
if len(urls) == 0 {
|
||||
log.W.F("follows syncer: no admin relays found in DB (kind 10002)")
|
||||
return
|
||||
}
|
||||
log.I.F(
|
||||
log.T.F(
|
||||
"follows syncer: subscribing to %d relays for %d authors", len(urls),
|
||||
len(authors),
|
||||
)
|
||||
@@ -220,6 +231,15 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
c, _, err := websocket.Dial(ctx, u, nil)
|
||||
if err != nil {
|
||||
log.W.F("follows syncer: dial %s failed: %v", u, err)
|
||||
if strings.Contains(
|
||||
err.Error(), "response status code 101 but got 403",
|
||||
) {
|
||||
// 403 means the relay is not accepting connections from
|
||||
// us. Forbidden is the meaning, usually used to
|
||||
// indicate either the IP or user is blocked. so stop
|
||||
// trying this one.
|
||||
return
|
||||
}
|
||||
timer := time.NewTimer(backoff)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -240,13 +260,13 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
}
|
||||
*ff = append(*ff, f1)
|
||||
req := reqenvelope.NewFrom([]byte("follows-sync"), ff)
|
||||
if err := c.Write(
|
||||
if err = c.Write(
|
||||
ctx, websocket.MessageText, req.Marshal(nil),
|
||||
); chk.E(err) {
|
||||
_ = c.Close(websocket.StatusInternalError, "write failed")
|
||||
continue
|
||||
}
|
||||
log.I.F("sent REQ to %s for follows subscription", u)
|
||||
log.T.F("sent REQ to %s for follows subscription", u)
|
||||
// read loop
|
||||
for {
|
||||
select {
|
||||
@@ -274,11 +294,11 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
if ok, err := res.Event.Verify(); chk.T(err) || !ok {
|
||||
continue
|
||||
}
|
||||
if _, _, err := f.D.SaveEvent(
|
||||
if _, _, err = f.D.SaveEvent(
|
||||
ctx, res.Event,
|
||||
); err != nil {
|
||||
if !strings.HasPrefix(
|
||||
err.Error(), "event already exists",
|
||||
err.Error(), "blocked:",
|
||||
) {
|
||||
log.W.F(
|
||||
"follows syncer: save event failed: %v",
|
||||
@@ -286,11 +306,16 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
)
|
||||
}
|
||||
// ignore duplicates and continue
|
||||
} else {
|
||||
// Only dispatch if the event was newly saved (no error)
|
||||
if f.pubs != nil {
|
||||
go f.pubs.Deliver(res.Event)
|
||||
}
|
||||
// log.I.F(
|
||||
// "saved new event from follows syncer: %0x",
|
||||
// res.Event.ID,
|
||||
// )
|
||||
}
|
||||
log.I.F(
|
||||
"saved new event from follows syncer: %0x",
|
||||
res.Event.ID,
|
||||
)
|
||||
case eoseenvelope.L:
|
||||
// ignore, continue subscription
|
||||
default:
|
||||
@@ -333,6 +358,43 @@ func (f *Follows) Syncer() {
|
||||
}
|
||||
}
|
||||
}()
|
||||
f.updated <- struct{}{}
|
||||
}
|
||||
|
||||
// GetFollowedPubkeys returns a copy of the followed pubkeys list
|
||||
func (f *Follows) GetFollowedPubkeys() [][]byte {
|
||||
f.followsMx.RLock()
|
||||
defer f.followsMx.RUnlock()
|
||||
|
||||
followedPubkeys := make([][]byte, len(f.follows))
|
||||
copy(followedPubkeys, f.follows)
|
||||
return followedPubkeys
|
||||
}
|
||||
|
||||
// AddFollow appends a pubkey to the in-memory follows list if not already present
|
||||
// and signals the syncer to refresh subscriptions.
|
||||
func (f *Follows) AddFollow(pub []byte) {
|
||||
if len(pub) == 0 {
|
||||
return
|
||||
}
|
||||
f.followsMx.Lock()
|
||||
defer f.followsMx.Unlock()
|
||||
for _, p := range f.follows {
|
||||
if bytes.Equal(p, pub) {
|
||||
return
|
||||
}
|
||||
}
|
||||
b := make([]byte, len(pub))
|
||||
copy(b, pub)
|
||||
f.follows = append(f.follows, b)
|
||||
// notify syncer if initialized
|
||||
if f.updated != nil {
|
||||
select {
|
||||
case f.updated <- struct{}{}:
|
||||
default:
|
||||
// if channel is full or not yet listened to, ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
module acl.orly
|
||||
|
||||
go 1.25.0
|
||||
|
||||
replace (
|
||||
acl.orly => ../acl
|
||||
crypto.orly => ../crypto
|
||||
database.orly => ../database
|
||||
encoders.orly => ../encoders
|
||||
interfaces.orly => ../interfaces
|
||||
next.orly.dev => ../../
|
||||
protocol.orly => ../protocol
|
||||
utils.orly => ../utils
|
||||
)
|
||||
|
||||
require (
|
||||
database.orly v0.0.0-00010101000000-000000000000
|
||||
encoders.orly v0.0.0-00010101000000-000000000000
|
||||
interfaces.orly v0.0.0-00010101000000-000000000000
|
||||
lol.mleku.dev v1.0.2
|
||||
next.orly.dev v0.0.0-00010101000000-000000000000
|
||||
utils.orly v0.0.0-00010101000000-000000000000
|
||||
)
|
||||
|
||||
require (
|
||||
crypto.orly v0.0.0-00010101000000-000000000000 // indirect
|
||||
github.com/adrg/xdg v0.5.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/templexxx/cpu v0.0.1 // indirect
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b // indirect
|
||||
go-simpler.org/env v0.12.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
lukechampine.com/frand v1.5.1 // indirect
|
||||
)
|
||||
@@ -1,68 +0,0 @@
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
|
||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
|
||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
|
||||
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
||||
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=
|
||||
@@ -8,7 +8,7 @@ type None struct{}
|
||||
|
||||
func (n None) Configure(cfg ...any) (err error) { return }
|
||||
|
||||
func (n None) GetAccessLevel(pub []byte) (level string) {
|
||||
func (n None) GetAccessLevel(pub []byte, address string) (level string) {
|
||||
return "write"
|
||||
}
|
||||
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/base58"
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/crypto/ec/base58"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
var stringTests = []struct {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/base58"
|
||||
"next.orly.dev/pkg/crypto/ec/base58"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -7,7 +7,7 @@ package base58
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"crypto.orly/sha256"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
)
|
||||
|
||||
// ErrChecksum indicates that the checksum of a check-encoded string does not verify against
|
||||
|
||||
@@ -7,7 +7,7 @@ package base58_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/base58"
|
||||
"next.orly.dev/pkg/crypto/ec/base58"
|
||||
)
|
||||
|
||||
var checkEncodingStringTests = []struct {
|
||||
|
||||
@@ -7,7 +7,7 @@ package base58_test
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec/base58"
|
||||
"next.orly.dev/pkg/crypto/ec/base58"
|
||||
)
|
||||
|
||||
// This example demonstrates how to decode modified base58 encoded data.
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// TestBech32 tests whether decoding and re-encoding the valid BIP-173 test
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// setHex decodes the passed big-endian hex string into the internal field value
|
||||
|
||||
@@ -20,7 +20,7 @@ package btcec
|
||||
// reverse the transform than to operate in affine coordinates.
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// KoblitzCurve provides an implementation for secp256k1 that fits the ECC
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"crypto.orly/ec/wire"
|
||||
"next.orly.dev/pkg/crypto/ec/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -3,8 +3,8 @@ package chaincfg
|
||||
import (
|
||||
"time"
|
||||
|
||||
"crypto.orly/ec/chainhash"
|
||||
"crypto.orly/ec/wire"
|
||||
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||
"next.orly.dev/pkg/crypto/ec/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"crypto.orly/ec/chainhash"
|
||||
"crypto.orly/ec/wire"
|
||||
"next.orly.dev/pkg/crypto/ec/chainhash"
|
||||
"next.orly.dev/pkg/crypto/ec/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/sha256"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -7,7 +7,7 @@ package chainhash
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// mainNetGenesisHash is the hash of the first block in the block chain for the
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
package chainhash
|
||||
|
||||
import (
|
||||
"crypto.orly/sha256"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
)
|
||||
|
||||
// HashB calculates hash(b) and returns the resulting bytes.
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// GenerateSharedSecret generates a shared secret based on a secret key and a
|
||||
|
||||
@@ -7,7 +7,7 @@ package btcec
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func TestGenerateSharedSecret(t *testing.T) {
|
||||
|
||||
@@ -6,7 +6,7 @@ package btcec
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// JacobianPoint is an element of the group formed by the secp256k1 curve in
|
||||
|
||||
@@ -8,8 +8,8 @@ package ecdsa
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// hexToModNScalar converts the passed hex string into a ModNScalar and will
|
||||
|
||||
@@ -8,7 +8,7 @@ package ecdsa
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// References:
|
||||
|
||||
@@ -14,10 +14,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"encoders.orly/hex"
|
||||
"lol.mleku.dev/chk"
|
||||
"utils.orly"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// hexToBytes converts the passed hex string into bytes and will panic if there
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// Error identifies an error related to public key cryptography using a
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// FieldVal implements optimized fixed-precision arithmetic over the secp256k1
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"encoders.orly/hex"
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// TestIsZero ensures that checking if a field IsZero works as expected.
|
||||
|
||||
@@ -11,7 +11,7 @@ package btcec
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"encoders.orly/hex"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
func FuzzParsePubKey(f *testing.F) {
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
package btcec
|
||||
|
||||
import (
|
||||
"crypto.orly/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
)
|
||||
|
||||
// ModNScalar implements optimized 256-bit constant-time fixed-precision
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user