Compare commits
36 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
3314a2a892
|
|||
|
7c14c72e9d
|
|||
|
dbdc5d703e
|
|||
|
c1acf0deaa
|
|||
|
ccffeb902c
|
|||
|
35201490a0
|
|||
|
3afd6131d5
|
|||
|
386878fec8
|
|||
| 474e16c315 | |||
|
|
47e94c5ff6 | ||
|
|
c62fdc96d5 | ||
|
|
4c66eda10e | ||
|
|
9fdef77e02 | ||
|
e8a69077b3
|
|||
|
128bc60726
|
|||
|
6c6f9e8874
|
|||
|
01131f252e
|
|||
|
02333b74ae
|
|||
|
86ac7b7897
|
|||
|
7e6adf9fba
|
|||
|
7d5ebd5ccd
|
|||
|
f8a321eaee
|
|||
|
48c7fab795
|
|||
|
f6054f3c37
|
|||
|
e1da199858
|
|||
|
45b4f82995
|
|||
|
e58eb1d3e3
|
|||
|
72d6ddff15
|
|||
|
a50ef55d8e
|
|||
| c2d5d2a165 | |||
|
05b13399e3
|
|||
|
0dea0ca791
|
|||
|
ff017b45d2
|
|||
|
50179e44ed
|
|||
|
34a3b1ba69
|
|||
|
|
42273ab2fa |
@@ -94,4 +94,6 @@ use the source of the relay-tester to help guide what expectations the test has,
|
||||
and use context7 for information about the nostr protocol, and use additional
|
||||
log statements to help locate the cause of bugs
|
||||
|
||||
always use Go v1.25.1 for everything involving Go
|
||||
always use Go v1.25.1 for everything involving Go
|
||||
|
||||
always use the nips repository also for information, found at ../github.com/nostr-protocol/nips attached to the project
|
||||
@@ -13,6 +13,8 @@ cmd/benchmark/reports/
|
||||
|
||||
# Go build cache and binaries
|
||||
**/bin/
|
||||
**/dist/
|
||||
**/build/
|
||||
**/*.out
|
||||
|
||||
# Allow web dist directory (needed for embedding)
|
||||
!app/web/dist/
|
||||
|
||||
7
.idea/jsLibraryMappings.xml
generated
7
.idea/jsLibraryMappings.xml
generated
@@ -1,7 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="JavaScriptLibraryMappings">
|
||||
<file url="file://$PROJECT_DIR$/../github.com/jumble" libraries="{jumble/node_modules}" />
|
||||
<file url="file://$PROJECT_DIR$/../github.com/mleku/jumble" libraries="{jumble/node_modules}" />
|
||||
</component>
|
||||
</project>
|
||||
483
APACHE-PROXY-GUIDE.md
Normal file
483
APACHE-PROXY-GUIDE.md
Normal file
@@ -0,0 +1,483 @@
|
||||
# Apache Reverse Proxy Guide for Docker Apps
|
||||
|
||||
**Complete guide for WebSocket-enabled applications - covers both Plesk and Standard Apache**
|
||||
**Updated with real-world troubleshooting solutions and latest Orly relay improvements**
|
||||
|
||||
## 🎯 **What This Solves**
|
||||
- WebSocket connection failures (`NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`)
|
||||
- Nostr relay connectivity issues (`HTTP 426` instead of WebSocket upgrade)
|
||||
- Docker container proxy configuration
|
||||
- SSL certificate integration
|
||||
- Plesk configuration conflicts and virtual host precedence issues
|
||||
- **NEW**: WebSocket scheme validation errors (`expected 'ws' got 'wss'`)
|
||||
- **NEW**: Proxy-friendly relay configuration with enhanced CORS headers
|
||||
- **NEW**: Improved error handling for malformed client data
|
||||
|
||||
## 🐳 **Step 1: Deploy Your Docker Application**
|
||||
|
||||
### **For Stella's Orly Relay (Latest Version with Proxy Improvements):**
|
||||
```bash
|
||||
# Pull and run the relay with enhanced proxy support
|
||||
docker run -d \
|
||||
--name orly-relay \
|
||||
--restart unless-stopped \
|
||||
-p 127.0.0.1:7777:7777 \
|
||||
-v /data/orly-relay:/data \
|
||||
-e ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx \
|
||||
-e ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z,npub1m4ny6hjqzepn4rxknuq94c2gpqzr29ufkkw7ttcxyak7v43n6vvsajc2jl \
|
||||
-e ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.nostr.band,wss://relay.damus.io \
|
||||
-e ORLY_RELAY_URL=wss://orly-relay.imwald.eu \
|
||||
-e ORLY_ACL_MODE=follows \
|
||||
-e ORLY_SPIDER_MODE=follows \
|
||||
-e ORLY_SPIDER_FREQUENCY=1h \
|
||||
-e ORLY_SUBSCRIPTION_ENABLED=false \
|
||||
silberengel/next-orly:latest
|
||||
|
||||
# Test the relay
|
||||
curl -I http://127.0.0.1:7777
|
||||
# Should return: HTTP/1.1 200 OK with enhanced CORS headers
|
||||
```
|
||||
|
||||
### **For Web Apps (like Jumble):**
|
||||
```bash
|
||||
# Run with fixed port for easier proxy setup
|
||||
docker run -d \
|
||||
--name jumble-app \
|
||||
--restart unless-stopped \
|
||||
-p 127.0.0.1:3000:80 \
|
||||
-e NODE_ENV=production \
|
||||
silberengel/imwald-jumble:latest
|
||||
|
||||
# Test the app
|
||||
curl -I http://127.0.0.1:3000
|
||||
```
|
||||
|
||||
## 🔧 **Step 2A: PLESK Configuration**
|
||||
|
||||
### **For Your Friend's Standard Apache Setup:**
|
||||
|
||||
**Tell your friend to create `/etc/apache2/sites-available/domain.conf`:**
|
||||
|
||||
```apache
|
||||
<VirtualHost *:443>
|
||||
ServerName your-domain.com
|
||||
|
||||
# SSL Configuration (Let's Encrypt)
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
# Enable required modules first:
|
||||
# sudo a2enmod proxy proxy_http proxy_wstunnel rewrite headers ssl
|
||||
|
||||
# Proxy settings
|
||||
ProxyPreserveHost On
|
||||
ProxyRequests Off
|
||||
|
||||
# WebSocket upgrade handling - CRITICAL for apps with WebSockets
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule ^/?(.*) "ws://127.0.0.1:PORT/$1" [P,L]
|
||||
|
||||
# Regular HTTP proxy
|
||||
ProxyPass / http://127.0.0.1:PORT/
|
||||
ProxyPassReverse / http://127.0.0.1:PORT/
|
||||
|
||||
# Headers for modern web apps
|
||||
Header always set X-Forwarded-Proto "https"
|
||||
Header always set X-Forwarded-Port "443"
|
||||
Header always set X-Forwarded-For %{REMOTE_ADDR}s
|
||||
|
||||
# Security headers
|
||||
Header always set Strict-Transport-Security "max-age=63072000; includeSubDomains"
|
||||
Header always set X-Content-Type-Options nosniff
|
||||
Header always set X-Frame-Options SAMEORIGIN
|
||||
</VirtualHost>
|
||||
|
||||
# Redirect HTTP to HTTPS
|
||||
<VirtualHost *:80>
|
||||
ServerName your-domain.com
|
||||
Redirect permanent / https://your-domain.com/
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
**Then enable it:**
|
||||
```bash
|
||||
sudo a2ensite domain.conf
|
||||
sudo systemctl reload apache2
|
||||
```
|
||||
|
||||
### **For Plesk Users (You):**
|
||||
|
||||
⚠️ **Important**: Plesk often doesn't apply Apache directives correctly through the interface. If the interface method fails, use the "Direct Apache Override" method below.
|
||||
|
||||
#### **Method 1: Plesk Interface (Try First)**
|
||||
|
||||
1. **Go to Plesk** → Websites & Domains → **your-domain.com**
|
||||
2. **Click "Apache & nginx Settings"**
|
||||
3. **DISABLE nginx** (uncheck "Proxy mode" and "Smart static files processing")
|
||||
4. **Clear HTTP section** (leave empty)
|
||||
5. **In HTTPS section, add:**
|
||||
|
||||
**For Nostr Relay (port 7777):**
|
||||
```apache
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
```
|
||||
|
||||
6. **Click "Apply"** and wait 60 seconds
|
||||
|
||||
#### **Method 2: Direct Apache Override (If Plesk Interface Fails)**
|
||||
|
||||
If Plesk doesn't apply your configuration (common issue), bypass it entirely:
|
||||
|
||||
```bash
|
||||
# Create direct Apache override
|
||||
sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
|
||||
<VirtualHost YOUR_SERVER_IP:443>
|
||||
ServerName your-domain.com
|
||||
ServerAlias www.your-domain.com
|
||||
ServerAlias ipv4.your-domain.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# For Nostr relay - proxy everything to WebSocket
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
# CORS headers
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
Header always set Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||
|
||||
# Logging
|
||||
ErrorLog /var/log/apache2/relay-error.log
|
||||
CustomLog /var/log/apache2/relay-access.log combined
|
||||
</VirtualHost>
|
||||
EOF
|
||||
|
||||
# Enable the override
|
||||
sudo a2enconf relay-override
|
||||
sudo mkdir -p /var/www/relay
|
||||
sudo systemctl restart apache2
|
||||
|
||||
# Remove Plesk config if it conflicts
|
||||
sudo rm /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
```
|
||||
|
||||
#### **Method 3: Debugging Plesk Issues**
|
||||
|
||||
If configurations aren't being applied:
|
||||
|
||||
```bash
|
||||
# Check if Plesk applied your config
|
||||
grep -E "(ProxyPass|proxy)" /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
|
||||
# Check virtual host precedence
|
||||
apache2ctl -S | grep your-domain.com
|
||||
|
||||
# Check Apache modules
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
```
|
||||
|
||||
#### **For Web Apps (port 3000 or 32768):**
|
||||
```apache
|
||||
ProxyPreserveHost On
|
||||
ProxyRequests Off
|
||||
|
||||
# WebSocket upgrade handling
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule ^/?(.*) "ws://127.0.0.1:32768/$1" [P,L]
|
||||
|
||||
# Regular HTTP proxy
|
||||
ProxyPass / http://127.0.0.1:32768/
|
||||
ProxyPassReverse / http://127.0.0.1:32768/
|
||||
|
||||
# Headers
|
||||
ProxyAddHeaders On
|
||||
Header always set X-Forwarded-Proto "https"
|
||||
Header always set X-Forwarded-Port "443"
|
||||
```
|
||||
|
||||
### **Method B: Direct Apache Override (RECOMMENDED for Plesk)**
|
||||
|
||||
⚠️ **Use this if Plesk interface doesn't work** (common issue):
|
||||
|
||||
```bash
|
||||
# Create direct Apache override with your server's IP
|
||||
sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
|
||||
<VirtualHost YOUR_SERVER_IP:443>
|
||||
ServerName your-domain.com
|
||||
ServerAlias www.your-domain.com
|
||||
ServerAlias ipv4.your-domain.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# For Nostr relay - proxy everything to WebSocket
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
# CORS headers
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
|
||||
# Logging
|
||||
ErrorLog /var/log/apache2/relay-error.log
|
||||
CustomLog /var/log/apache2/relay-access.log combined
|
||||
</VirtualHost>
|
||||
EOF
|
||||
|
||||
# Enable override and create directory
|
||||
sudo a2enconf relay-override
|
||||
sudo mkdir -p /var/www/relay
|
||||
sudo systemctl restart apache2
|
||||
|
||||
# Remove conflicting Plesk config if needed
|
||||
sudo rm /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
```
|
||||
|
||||
## ⚡ **Step 3: Enable Required Modules**
|
||||
|
||||
In Plesk, you might need to enable modules. SSH to your server:
|
||||
|
||||
```bash
|
||||
# Enable Apache modules
|
||||
sudo a2enmod proxy
|
||||
sudo a2enmod proxy_http
|
||||
sudo a2enmod proxy_wstunnel
|
||||
sudo a2enmod rewrite
|
||||
sudo a2enmod headers
|
||||
sudo systemctl restart apache2
|
||||
```
|
||||
|
||||
## 🆕 **Step 4: Latest Orly Relay Improvements**
|
||||
|
||||
### **Enhanced Proxy Support**
|
||||
The latest Orly relay includes several proxy improvements:
|
||||
|
||||
1. **Flexible WebSocket Scheme Handling**: Accepts both `ws://` and `wss://` schemes for authentication
|
||||
2. **Enhanced CORS Headers**: Better compatibility with web applications
|
||||
3. **Improved Error Handling**: More robust handling of malformed client data
|
||||
4. **Proxy-Aware Logging**: Better debugging information for proxy setups
|
||||
|
||||
### **Key Environment Variables**
|
||||
```bash
|
||||
# Essential for proxy setups
|
||||
ORLY_RELAY_URL=wss://your-domain.com # Must match your public URL
|
||||
ORLY_ACL_MODE=follows # Enable follows-based access control
|
||||
ORLY_SPIDER_MODE=follows # Enable content syncing from other relays
|
||||
ORLY_SUBSCRIPTION_ENABLED=false # Disable payment requirements
|
||||
```
|
||||
|
||||
### **Testing the Enhanced Relay**
|
||||
```bash
|
||||
# Test local connectivity
|
||||
curl -I http://127.0.0.1:7777
|
||||
|
||||
# Expected response includes enhanced CORS headers:
|
||||
# Access-Control-Allow-Credentials: true
|
||||
# Access-Control-Max-Age: 86400
|
||||
# Vary: Origin, Access-Control-Request-Method, Access-Control-Request-Headers
|
||||
```
|
||||
|
||||
## ⚡ **Step 4: Alternative - Nginx in Plesk**
|
||||
|
||||
If Apache keeps giving issues, switch to Nginx in Plesk:
|
||||
|
||||
1. Go to Plesk → Websites & Domains → orly-relay.imwald.eu
|
||||
2. Click "Apache & nginx Settings"
|
||||
3. Enable "nginx" and set it to serve static files
|
||||
4. In "Additional nginx directives" add:
|
||||
|
||||
```nginx
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:7777;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
```
|
||||
|
||||
## 🧪 **Testing**
|
||||
|
||||
After making changes:
|
||||
|
||||
1. **Apply settings** in Plesk
|
||||
2. **Wait 30 seconds** for changes to take effect
|
||||
3. **Test WebSocket**:
|
||||
```bash
|
||||
# From your server
|
||||
echo '["REQ","test",{}]' | websocat wss://orly-relay.imwald.eu/
|
||||
```
|
||||
|
||||
## 🎯 **Expected Result**
|
||||
|
||||
- ✅ No more "websocket error" in browser console
|
||||
- ✅ `wss://orly-relay.imwald.eu/` connects successfully
|
||||
- ✅ Jumble app can publish notes
|
||||
|
||||
## 🚨 **Real-World Troubleshooting Guide**
|
||||
|
||||
*Based on actual deployment experience with Plesk and WebSocket issues*
|
||||
|
||||
### **Critical Issues & Solutions:**
|
||||
|
||||
#### **🔴 HTTP 503 Service Unavailable**
|
||||
- **Cause**: Docker container not running
|
||||
- **Check**: `docker ps | grep relay`
|
||||
- **Fix**: `docker start container-name`
|
||||
|
||||
#### **🔴 HTTP 426 Instead of WebSocket Upgrade**
|
||||
- **Cause**: Apache using `http://` proxy instead of `ws://`
|
||||
- **Fix**: Use `ProxyPass / ws://127.0.0.1:7777/` (not `http://`)
|
||||
|
||||
#### **🔴 Plesk Configuration Not Applied**
|
||||
- **Symptom**: Config not in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
- **Solution**: Use Direct Apache Override method (bypass Plesk interface)
|
||||
|
||||
#### **🔴 Virtual Host Conflicts**
|
||||
- **Check**: `apache2ctl -S | grep domain.com`
|
||||
- **Fix**: Remove Plesk config: `sudo rm /etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
|
||||
#### **🔴 Nginx Intercepting (Plesk)**
|
||||
- **Symptom**: Response shows `Server: nginx`
|
||||
- **Fix**: Disable nginx in Plesk settings
|
||||
|
||||
### **Debug Commands:**
|
||||
```bash
|
||||
# Essential debugging
|
||||
docker ps | grep relay # Container running?
|
||||
curl -I http://127.0.0.1:7777 # Local relay (should return 200 with CORS headers)
|
||||
apache2ctl -S | grep domain.com # Virtual host precedence
|
||||
grep ProxyPass /etc/apache2/plesk.conf.d/vhosts/domain.conf # Config applied?
|
||||
|
||||
# WebSocket testing
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ # Root path
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ws/ # /ws/ path
|
||||
|
||||
# Check relay logs for proxy information
|
||||
docker logs relay-name | grep -i "proxy info"
|
||||
docker logs relay-name | grep -i "websocket connection"
|
||||
```
|
||||
|
||||
## 🚨 **Latest Troubleshooting Solutions**
|
||||
|
||||
### **WebSocket Scheme Validation Errors**
|
||||
**Problem**: `"HTTP Scheme incorrect: expected 'ws' got 'wss'"`
|
||||
|
||||
**Solution**: Use the latest Orly relay image with enhanced proxy support:
|
||||
```bash
|
||||
# Pull the latest image with proxy improvements
|
||||
docker pull silberengel/next-orly:latest
|
||||
|
||||
# Restart with the latest image
|
||||
docker stop orly-relay && docker rm orly-relay
|
||||
# Then run with the configuration above
|
||||
```
|
||||
|
||||
### **Malformed Client Data Errors**
|
||||
**Problem**: `"invalid hex array size, got 2 expect 64"`
|
||||
|
||||
**Solution**: These are client-side issues, not server problems. The latest relay handles them gracefully:
|
||||
- The relay now sends helpful error messages to clients
|
||||
- Malformed requests are logged but don't crash the relay
|
||||
- Normal operations continue despite client errors
|
||||
|
||||
### **Follows ACL Not Working**
|
||||
**Problem**: Only owners can write, admins can't write
|
||||
|
||||
**Solution**: Ensure proper configuration:
|
||||
```bash
|
||||
# Check ACL configuration
|
||||
docker exec orly-relay env | grep ACL
|
||||
|
||||
# Should show: ORLY_ACL_MODE=follows
|
||||
# If not, restart with explicit configuration
|
||||
```
|
||||
|
||||
### **Spider Not Syncing Content**
|
||||
**Problem**: Spider enabled but not pulling events
|
||||
|
||||
**Solution**: Check for relay lists and follow events:
|
||||
```bash
|
||||
# Check spider status
|
||||
docker logs orly-relay | grep -i spider
|
||||
|
||||
# Look for relay discovery
|
||||
docker logs orly-relay | grep -i "relay URLs"
|
||||
|
||||
# Check for follow events
|
||||
docker logs orly-relay | grep -i "kind.*3"
|
||||
```
|
||||
|
||||
### **Working Solution (Proven):**
|
||||
```apache
|
||||
<VirtualHost SERVER_IP:443>
|
||||
ServerName domain.com
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# Direct WebSocket proxy - this is the key!
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Key Lessons**:
|
||||
1. Plesk interface often fails to apply Apache directives
|
||||
2. Use `ws://` proxy for Nostr relays, not `http://`
|
||||
3. Direct Apache config files are more reliable than Plesk interface
|
||||
4. Always check virtual host precedence with `apache2ctl -S`
|
||||
5. **NEW**: Use the latest Orly relay image for better proxy compatibility
|
||||
6. **NEW**: Enhanced CORS headers improve web app compatibility
|
||||
7. **NEW**: Flexible WebSocket scheme handling eliminates authentication errors
|
||||
8. **NEW**: Improved error handling makes the relay more robust
|
||||
|
||||
## 🎉 **Summary of Latest Improvements**
|
||||
|
||||
### **Enhanced Proxy Support**
|
||||
- ✅ Flexible WebSocket scheme validation (accepts both `ws://` and `wss://`)
|
||||
- ✅ Enhanced CORS headers for better web app compatibility
|
||||
- ✅ Improved error handling for malformed client data
|
||||
- ✅ Proxy-aware logging for better debugging
|
||||
|
||||
### **Spider and ACL Features**
|
||||
- ✅ Follows-based access control (`ORLY_ACL_MODE=follows`)
|
||||
- ✅ Content syncing from other relays (`ORLY_SPIDER_MODE=follows`)
|
||||
- ✅ No payment requirements (`ORLY_SUBSCRIPTION_ENABLED=false`)
|
||||
|
||||
### **Production Ready**
|
||||
- ✅ Robust error handling
|
||||
- ✅ Enhanced logging and debugging
|
||||
- ✅ Better client compatibility
|
||||
- ✅ Improved proxy support
|
||||
|
||||
**The latest Orly relay is now fully optimized for proxy environments and provides a much better user experience!**
|
||||
188
DOCKER.md
Normal file
188
DOCKER.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# Docker Deployment Guide
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Basic Relay Setup
|
||||
|
||||
```bash
|
||||
# Build and start the relay
|
||||
docker-compose up -d
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f orly-relay
|
||||
|
||||
# Stop the relay
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
### 2. With Nginx Proxy (for SSL/domain setup)
|
||||
|
||||
```bash
|
||||
# Start relay with nginx proxy
|
||||
docker-compose --profile proxy up -d
|
||||
|
||||
# Configure SSL certificates in nginx/ssl/
|
||||
# Then update nginx/nginx.conf to enable HTTPS
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Copy `env.example` to `.env` and customize:
|
||||
|
||||
```bash
|
||||
cp env.example .env
|
||||
# Edit .env with your settings
|
||||
```
|
||||
|
||||
Key settings:
|
||||
- `ORLY_OWNERS`: Owner npubs (comma-separated, full control)
|
||||
- `ORLY_ADMINS`: Admin npubs (comma-separated, deletion permissions)
|
||||
- `ORLY_PORT`: Port to listen on (default: 7777)
|
||||
- `ORLY_MAX_CONNECTIONS`: Max concurrent connections
|
||||
- `ORLY_CONCURRENT_WORKERS`: CPU cores for concurrent processing (0 = auto)
|
||||
|
||||
### Data Persistence
|
||||
|
||||
The relay data is stored in `./data` directory which is mounted as a volume.
|
||||
|
||||
### Performance Tuning
|
||||
|
||||
Based on the v0.4.8 optimizations:
|
||||
- Concurrent event publishing using all CPU cores
|
||||
- Optimized BadgerDB access patterns
|
||||
- Configurable batch sizes and cache settings
|
||||
|
||||
## Development
|
||||
|
||||
### Local Build
|
||||
|
||||
```bash
|
||||
# Pull the latest image (recommended)
|
||||
docker pull silberengel/orly-relay:latest
|
||||
|
||||
# Or build locally if needed
|
||||
docker build -t silberengel/orly-relay:latest .
|
||||
|
||||
# Run with custom settings
|
||||
docker run -p 7777:7777 -v $(pwd)/data:/data silberengel/orly-relay:latest
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Test WebSocket connection
|
||||
websocat ws://localhost:7777
|
||||
|
||||
# Run stress tests (if available in cmd/stresstest)
|
||||
go run ./cmd/stresstest -relay ws://localhost:7777
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
### SSL Setup
|
||||
|
||||
1. Get SSL certificates (Let's Encrypt recommended)
|
||||
2. Place certificates in `nginx/ssl/`
|
||||
3. Update `nginx/nginx.conf` to enable HTTPS
|
||||
4. Start with proxy profile: `docker-compose --profile proxy up -d`
|
||||
|
||||
### Monitoring
|
||||
|
||||
- Health checks are configured for both services
|
||||
- Logs are rotated (max 10MB, 3 files)
|
||||
- Resource limits are set to prevent runaway processes
|
||||
|
||||
### Security
|
||||
|
||||
- Runs as non-root user (uid 1000)
|
||||
- Rate limiting configured in nginx
|
||||
- Configurable authentication and event size limits
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues (Real-World Experience)
|
||||
|
||||
#### **Container Issues:**
|
||||
1. **Port already in use**: Change `ORLY_PORT` in docker-compose.yml
|
||||
2. **Permission denied**: Ensure `./data` directory is writable
|
||||
3. **Container won't start**: Check logs with `docker logs container-name`
|
||||
|
||||
#### **WebSocket Issues:**
|
||||
4. **HTTP 426 instead of WebSocket upgrade**:
|
||||
- Use `ws://127.0.0.1:7777` in proxy config, not `http://`
|
||||
- Ensure `proxy_wstunnel` module is enabled
|
||||
5. **Connection refused in browser but works with websocat**:
|
||||
- Clear browser cache and service workers
|
||||
- Try incognito mode
|
||||
- Add CORS headers to Apache/nginx config
|
||||
|
||||
#### **Plesk-Specific Issues:**
|
||||
6. **Plesk not applying Apache directives**:
|
||||
- Check if config appears in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
- Use direct Apache override if Plesk interface fails
|
||||
7. **Virtual host conflicts**:
|
||||
- Check precedence with `apache2ctl -S`
|
||||
- Remove conflicting Plesk configs if needed
|
||||
|
||||
#### **SSL Certificate Issues:**
|
||||
8. **Self-signed certificate after Let's Encrypt**:
|
||||
- Plesk might not be using the correct certificate
|
||||
- Import Let's Encrypt certs into Plesk or use direct Apache config
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Container debugging
|
||||
docker ps | grep relay
|
||||
docker logs orly-relay
|
||||
curl -I http://127.0.0.1:7777 # Should return HTTP 426
|
||||
|
||||
# WebSocket testing
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ws/
|
||||
|
||||
# Apache debugging (for reverse proxy issues)
|
||||
apache2ctl -S | grep domain.com
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
grep ProxyPass /etc/apache2/plesk.conf.d/vhosts/domain.conf
|
||||
```
|
||||
|
||||
### Logs
|
||||
|
||||
```bash
|
||||
# View relay logs
|
||||
docker-compose logs -f orly-relay
|
||||
|
||||
# View nginx logs (if using proxy)
|
||||
docker-compose logs -f nginx
|
||||
|
||||
# Apache logs (for reverse proxy debugging)
|
||||
sudo tail -f /var/log/apache2/error.log
|
||||
sudo tail -f /var/log/apache2/domain-error.log
|
||||
```
|
||||
|
||||
### Working Reverse Proxy Config
|
||||
|
||||
**For Apache (direct config file):**
|
||||
```apache
|
||||
<VirtualHost SERVER_IP:443>
|
||||
ServerName domain.com
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
|
||||
|
||||
# Direct WebSocket proxy for Nostr relay
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
*Crafted for Stella's digital forest* 🌲
|
||||
78
Dockerfile
Normal file
78
Dockerfile
Normal file
@@ -0,0 +1,78 @@
|
||||
# Dockerfile for Stella's Nostr Relay (next.orly.dev)
|
||||
# Owner: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
git \
|
||||
build-base \
|
||||
autoconf \
|
||||
automake \
|
||||
libtool \
|
||||
pkgconfig
|
||||
|
||||
# Install secp256k1 library from Alpine packages
|
||||
RUN apk add --no-cache libsecp256k1-dev
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules first (for better caching)
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the relay with optimizations from v0.4.8
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -ldflags "-w -s" -o relay .
|
||||
|
||||
# Create non-root user for security
|
||||
RUN adduser -D -u 1000 stella && \
|
||||
chown -R 1000:1000 /build
|
||||
|
||||
# Final stage - minimal runtime image
|
||||
FROM alpine:latest
|
||||
|
||||
# Install only runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libsecp256k1 \
|
||||
libsecp256k1-dev
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /build/relay /app/relay
|
||||
|
||||
# Create runtime user and directories
|
||||
RUN adduser -D -u 1000 stella && \
|
||||
mkdir -p /data /profiles /app && \
|
||||
chown -R 1000:1000 /data /profiles /app
|
||||
|
||||
# Expose the relay port
|
||||
EXPOSE 7777
|
||||
|
||||
# Set environment variables for Stella's relay
|
||||
ENV ORLY_DATA_DIR=/data
|
||||
ENV ORLY_LISTEN=0.0.0.0
|
||||
ENV ORLY_PORT=7777
|
||||
ENV ORLY_LOG_LEVEL=info
|
||||
ENV ORLY_MAX_CONNECTIONS=1000
|
||||
ENV ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
ENV ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1m4ny6hjqzepn4rxknuq94c2gpqzr29ufkkw7ttcxyak7v43n6vvsajc2jl,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
|
||||
|
||||
# Health check to ensure relay is responding
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD sh -c "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:7777 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
|
||||
|
||||
# Create volume for persistent data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Drop privileges and run as stella user
|
||||
USER 1000:1000
|
||||
|
||||
# Run Stella's Nostr relay
|
||||
CMD ["/app/relay"]
|
||||
101
SERVICE-WORKER-FIX.md
Normal file
101
SERVICE-WORKER-FIX.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Service Worker Certificate Caching Fix
|
||||
|
||||
## 🚨 **Problem**
|
||||
When accessing Jumble from the ImWald landing page, the service worker serves a cached self-signed certificate instead of the new Let's Encrypt certificate.
|
||||
|
||||
## ⚡ **Solutions**
|
||||
|
||||
### **Option 1: Force Service Worker Update**
|
||||
Add this to your Jumble app's service worker or main JavaScript:
|
||||
|
||||
```javascript
|
||||
// Force service worker update and certificate refresh
|
||||
if ('serviceWorker' in navigator) {
|
||||
navigator.serviceWorker.getRegistrations().then(function(registrations) {
|
||||
for(let registration of registrations) {
|
||||
registration.update(); // Force update
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Clear all caches on certificate update
|
||||
if ('caches' in window) {
|
||||
caches.keys().then(function(names) {
|
||||
for (let name of names) {
|
||||
caches.delete(name);
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### **Option 2: Update Service Worker Cache Strategy**
|
||||
In your service worker file, add cache busting for SSL-sensitive requests:
|
||||
|
||||
```javascript
|
||||
// In your service worker
|
||||
self.addEventListener('fetch', function(event) {
|
||||
// Don't cache HTTPS requests that might have certificate issues
|
||||
if (event.request.url.startsWith('https://') &&
|
||||
event.request.url.includes('imwald.eu')) {
|
||||
event.respondWith(
|
||||
fetch(event.request, { cache: 'no-store' })
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Your existing fetch handling...
|
||||
});
|
||||
```
|
||||
|
||||
### **Option 3: Version Your Service Worker**
|
||||
Update your service worker with a new version number:
|
||||
|
||||
```javascript
|
||||
// At the top of your service worker
|
||||
const CACHE_VERSION = 'v2.0.1'; // Increment this when certificates change
|
||||
const CACHE_NAME = `jumble-cache-${CACHE_VERSION}`;
|
||||
|
||||
// Clear old caches
|
||||
self.addEventListener('activate', function(event) {
|
||||
event.waitUntil(
|
||||
caches.keys().then(function(cacheNames) {
|
||||
return Promise.all(
|
||||
cacheNames.map(function(cacheName) {
|
||||
if (cacheName !== CACHE_NAME) {
|
||||
return caches.delete(cacheName);
|
||||
}
|
||||
})
|
||||
);
|
||||
})
|
||||
);
|
||||
});
|
||||
```
|
||||
|
||||
### **Option 4: Add Cache Headers**
|
||||
In your Plesk Apache config for Jumble, add:
|
||||
|
||||
```apache
|
||||
# Prevent service worker from caching SSL-sensitive content
|
||||
Header always set Cache-Control "no-cache, no-store, must-revalidate"
|
||||
Header always set Pragma "no-cache"
|
||||
Header always set Expires "0"
|
||||
|
||||
# Only for service worker file
|
||||
<Files "sw.js">
|
||||
Header always set Cache-Control "no-cache, no-store, must-revalidate"
|
||||
</Files>
|
||||
```
|
||||
|
||||
## 🧹 **Immediate User Fix**
|
||||
|
||||
For users experiencing the certificate issue:
|
||||
|
||||
1. **Clear browser data** for jumble.imwald.eu
|
||||
2. **Unregister service worker**:
|
||||
- F12 → Application → Service Workers → Unregister
|
||||
3. **Hard refresh**: Ctrl+Shift+R
|
||||
4. **Or use incognito mode** to test
|
||||
|
||||
---
|
||||
|
||||
This will prevent the service worker from serving stale certificate data.
|
||||
109
WEBSOCKET-DEBUG.md
Normal file
109
WEBSOCKET-DEBUG.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# WebSocket Connection Debug Guide
|
||||
|
||||
## 🚨 **Current Issue**
|
||||
`wss://orly-relay.imwald.eu/` returns `NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`
|
||||
|
||||
## 🔍 **Debug Steps**
|
||||
|
||||
### **Step 1: Verify Relay is Running**
|
||||
```bash
|
||||
# On your server
|
||||
curl -I http://127.0.0.1:7777
|
||||
# Should return: HTTP/1.1 426 Upgrade Required
|
||||
|
||||
docker ps | grep stella
|
||||
# Should show running container
|
||||
```
|
||||
|
||||
### **Step 2: Test Apache Modules**
|
||||
```bash
|
||||
# Check if WebSocket modules are enabled
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
|
||||
# If missing, enable them:
|
||||
sudo a2enmod proxy
|
||||
sudo a2enmod proxy_http
|
||||
sudo a2enmod proxy_wstunnel
|
||||
sudo a2enmod rewrite
|
||||
sudo a2enmod headers
|
||||
sudo systemctl restart apache2
|
||||
```
|
||||
|
||||
### **Step 3: Check Apache Configuration**
|
||||
```bash
|
||||
# Check what Plesk generated
|
||||
sudo cat /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
|
||||
|
||||
# Look for proxy and rewrite rules
|
||||
grep -E "(Proxy|Rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
|
||||
```
|
||||
|
||||
### **Step 4: Test Direct WebSocket Connection**
|
||||
```bash
|
||||
# Test if the issue is Apache or the relay itself
|
||||
echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/
|
||||
|
||||
# If that works, the issue is Apache proxy
|
||||
# If that fails, the issue is the relay
|
||||
```
|
||||
|
||||
### **Step 5: Check Apache Error Logs**
|
||||
```bash
|
||||
# Watch Apache errors in real-time
|
||||
sudo tail -f /var/log/apache2/error.log
|
||||
|
||||
# Then try connecting to wss://orly-relay.imwald.eu/ and see what errors appear
|
||||
```
|
||||
|
||||
## 🔧 **Specific Plesk Fix**
|
||||
|
||||
Based on your current status, try this **exact configuration** in Plesk:
|
||||
|
||||
### **Go to Apache & nginx Settings for orly-relay.imwald.eu:**
|
||||
|
||||
**Clear both HTTP and HTTPS sections, then add to HTTPS:**
|
||||
|
||||
```apache
|
||||
# Enable proxy
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
|
||||
# WebSocket handling - the key part
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} =websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule /(.*) ws://127.0.0.1:7777/$1 [P,L]
|
||||
|
||||
# Fallback for regular HTTP
|
||||
RewriteCond %{HTTP:Upgrade} !=websocket [NC]
|
||||
RewriteRule /(.*) http://127.0.0.1:7777/$1 [P,L]
|
||||
|
||||
# Headers
|
||||
ProxyAddHeaders On
|
||||
```
|
||||
|
||||
### **Alternative Simpler Version:**
|
||||
If the above doesn't work, try just:
|
||||
|
||||
```apache
|
||||
ProxyPass / http://127.0.0.1:7777/
|
||||
ProxyPassReverse / http://127.0.0.1:7777/
|
||||
ProxyPass /ws ws://127.0.0.1:7777/
|
||||
ProxyPassReverse /ws ws://127.0.0.1:7777/
|
||||
```
|
||||
|
||||
## 🧪 **Testing Commands**
|
||||
|
||||
```bash
|
||||
# Test the WebSocket after each change
|
||||
echo '["REQ","test",{}]' | websocat wss://orly-relay.imwald.eu/
|
||||
|
||||
# Check what's actually being served
|
||||
curl -v https://orly-relay.imwald.eu/ 2>&1 | grep -E "(HTTP|upgrade|connection)"
|
||||
```
|
||||
|
||||
## 🎯 **Expected Fix**
|
||||
|
||||
The issue is likely that Apache isn't properly handling the WebSocket upgrade request. The `proxy_wstunnel` module and correct rewrite rules should fix this.
|
||||
|
||||
Try the **simpler ProxyPass version first** - it's often more reliable in Plesk environments.
|
||||
@@ -40,11 +40,13 @@ type C struct {
|
||||
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
|
||||
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
|
||||
SpiderMode string `env:"ORLY_SPIDER_MODE" usage:"spider mode: none,follow" default:"none"`
|
||||
SpiderMode string `env:"ORLY_SPIDER_MODE" usage:"spider mode: none,follows" default:"none"`
|
||||
SpiderFrequency time.Duration `env:"ORLY_SPIDER_FREQUENCY" usage:"spider frequency in seconds" default:"1h"`
|
||||
BootstrapRelays []string `env:"ORLY_BOOTSTRAP_RELAYS" usage:"comma-separated list of bootstrap relay URLs for initial sync"`
|
||||
NWCUri string `env:"ORLY_NWC_URI" usage:"NWC (Nostr Wallet Connect) connection string for Lightning payments"`
|
||||
SubscriptionEnabled bool `env:"ORLY_SUBSCRIPTION_ENABLED" default:"false" usage:"enable subscription-based access control requiring payment for non-directory events"`
|
||||
MonthlyPriceSats int64 `env:"ORLY_MONTHLY_PRICE_SATS" default:"6000" usage:"price in satoshis for one month subscription (default ~$2 USD)"`
|
||||
RelayURL string `env:"ORLY_RELAY_URL" usage:"base URL for the relay dashboard (e.g., https://relay.example.com)"`
|
||||
|
||||
// Web UI and dev mode settings
|
||||
WebDisableEmbedded bool `env:"ORLY_WEB_DISABLE" default:"false" usage:"disable serving the embedded web UI; useful for hot-reload during development"`
|
||||
@@ -224,15 +226,14 @@ func EnvKV(cfg any) (m KVSlice) {
|
||||
k := t.Field(i).Tag.Get("env")
|
||||
v := reflect.ValueOf(cfg).Field(i).Interface()
|
||||
var val string
|
||||
switch v.(type) {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
val = v.(string)
|
||||
val = v
|
||||
case int, bool, time.Duration:
|
||||
val = fmt.Sprint(v)
|
||||
case []string:
|
||||
arr := v.([]string)
|
||||
if len(arr) > 0 {
|
||||
val = strings.Join(arr, ",")
|
||||
if len(v) > 0 {
|
||||
val = strings.Join(v, ",")
|
||||
}
|
||||
}
|
||||
// this can happen with embedded structs
|
||||
@@ -304,5 +305,4 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
fmt.Fprintf(printer, "\ncurrent configuration:\n\n")
|
||||
PrintEnv(cfg, printer)
|
||||
fmt.Fprintln(printer)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -72,6 +72,10 @@ func (l *Listener) handleFirstTimeUser(pubkey []byte) {
|
||||
|
||||
// Get payment processor to create welcome note
|
||||
if l.Server.paymentProcessor != nil {
|
||||
// Set the dashboard URL based on the current HTTP request
|
||||
dashboardURL := l.Server.DashboardURL(l.req)
|
||||
l.Server.paymentProcessor.SetDashboardURL(dashboardURL)
|
||||
|
||||
if err := l.Server.paymentProcessor.CreateWelcomeNote(pubkey); err != nil {
|
||||
log.E.F("failed to create welcome note for first-time user: %v", err)
|
||||
}
|
||||
|
||||
78
app/handle-count.go
Normal file
78
app/handle-count.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/countenvelope"
|
||||
"next.orly.dev/pkg/utils/normalize"
|
||||
)
|
||||
|
||||
// HandleCount processes a COUNT envelope by parsing the request, verifying
|
||||
// permissions, invoking the database CountEvents for each provided filter, and
|
||||
// responding with a COUNT response containing the aggregate count.
|
||||
func (l *Listener) HandleCount(msg []byte) (err error) {
|
||||
log.D.F("HandleCount: START processing from %s", l.remote)
|
||||
|
||||
// Parse the COUNT request
|
||||
env := countenvelope.New()
|
||||
if _, err = env.Unmarshal(msg); chk.E(err) {
|
||||
return normalize.Error.Errorf(err.Error())
|
||||
}
|
||||
log.D.C(func() string { return fmt.Sprintf("COUNT sub=%s filters=%d", env.Subscription, len(env.Filters)) })
|
||||
|
||||
// If ACL is active, send a challenge (same as REQ path)
|
||||
if acl.Registry.Active.Load() != "none" {
|
||||
if err = authenvelope.NewChallengeWith(l.challenge.Load()).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check read permissions
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
return errors.New("auth required: user not authed or has no read access")
|
||||
default:
|
||||
// allowed to read
|
||||
}
|
||||
|
||||
// Use a bounded context for counting
|
||||
ctx, cancel := context.WithTimeout(l.ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Aggregate count across all provided filters
|
||||
var total int
|
||||
var approx bool // database returns false per implementation
|
||||
for _, f := range env.Filters {
|
||||
if f == nil {
|
||||
continue
|
||||
}
|
||||
var cnt int
|
||||
var a bool
|
||||
cnt, a, err = l.D.CountEvents(ctx, f)
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
total += cnt
|
||||
approx = approx || a
|
||||
}
|
||||
|
||||
// Build and send COUNT response
|
||||
var res *countenvelope.Response
|
||||
if res, err = countenvelope.NewResponseFrom(env.Subscription, total, approx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = res.Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
log.D.F("HandleCount: COMPLETED processing from %s count=%d approx=%v", l.remote, total, approx)
|
||||
return nil
|
||||
}
|
||||
@@ -4,48 +4,79 @@ import (
|
||||
"fmt"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/countenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/noticeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
||||
// log.D.F("%s received message:\n%s", remote, msg)
|
||||
msgPreview := string(msg)
|
||||
if len(msgPreview) > 150 {
|
||||
msgPreview = msgPreview[:150] + "..."
|
||||
}
|
||||
log.D.F("%s processing message (len=%d): %s", remote, len(msg), msgPreview)
|
||||
|
||||
l.msgCount++
|
||||
var err error
|
||||
var t string
|
||||
var rem []byte
|
||||
if t, rem, err = envelopes.Identify(msg); !chk.E(err) {
|
||||
switch t {
|
||||
case eventenvelope.L:
|
||||
// log.D.F("eventenvelope: %s %s", remote, rem)
|
||||
err = l.HandleEvent(rem)
|
||||
case reqenvelope.L:
|
||||
// log.D.F("reqenvelope: %s %s", remote, rem)
|
||||
err = l.HandleReq(rem)
|
||||
case closeenvelope.L:
|
||||
// log.D.F("closeenvelope: %s %s", remote, rem)
|
||||
err = l.HandleClose(rem)
|
||||
case authenvelope.L:
|
||||
// log.D.F("authenvelope: %s %s", remote, rem)
|
||||
err = l.HandleAuth(rem)
|
||||
default:
|
||||
err = fmt.Errorf("unknown envelope type %s\n%s", t, rem)
|
||||
|
||||
// Attempt to identify the envelope type
|
||||
if t, rem, err = envelopes.Identify(msg); err != nil {
|
||||
log.E.F("%s envelope identification FAILED (len=%d): %v", remote, len(msg), err)
|
||||
log.D.F("%s malformed message content: %q", remote, msgPreview)
|
||||
chk.E(err)
|
||||
// Send error notice to client
|
||||
if noticeErr := noticeenvelope.NewFrom("malformed message: " + err.Error()).Write(l); noticeErr != nil {
|
||||
log.E.F("%s failed to send malformed message notice: %v", remote, noticeErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.D.F("%s identified envelope type: %s (payload_len=%d)", remote, t, len(rem))
|
||||
|
||||
// Process the identified envelope type
|
||||
switch t {
|
||||
case eventenvelope.L:
|
||||
log.D.F("%s processing EVENT envelope", remote)
|
||||
l.eventCount++
|
||||
err = l.HandleEvent(rem)
|
||||
case reqenvelope.L:
|
||||
log.D.F("%s processing REQ envelope", remote)
|
||||
l.reqCount++
|
||||
err = l.HandleReq(rem)
|
||||
case closeenvelope.L:
|
||||
log.D.F("%s processing CLOSE envelope", remote)
|
||||
err = l.HandleClose(rem)
|
||||
case authenvelope.L:
|
||||
log.D.F("%s processing AUTH envelope", remote)
|
||||
err = l.HandleAuth(rem)
|
||||
case countenvelope.L:
|
||||
log.D.F("%s processing COUNT envelope", remote)
|
||||
err = l.HandleCount(rem)
|
||||
default:
|
||||
err = fmt.Errorf("unknown envelope type %s", t)
|
||||
log.E.F("%s unknown envelope type: %s (payload: %q)", remote, t, string(rem))
|
||||
}
|
||||
|
||||
// Handle any processing errors
|
||||
if err != nil {
|
||||
// log.D.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "notice->%s %s", remote, err,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
if err = noticeenvelope.NewFrom(err.Error()).Write(l); err != nil {
|
||||
log.E.F("%s message processing FAILED (type=%s): %v", remote, t, err)
|
||||
log.D.F("%s error context - original message: %q", remote, msgPreview)
|
||||
|
||||
// Send error notice to client
|
||||
noticeMsg := fmt.Sprintf("%s: %s", t, err.Error())
|
||||
if noticeErr := noticeenvelope.NewFrom(noticeMsg).Write(l); noticeErr != nil {
|
||||
log.E.F("%s failed to send error notice after %s processing failure: %v", remote, t, noticeErr)
|
||||
return
|
||||
}
|
||||
log.D.F("%s sent error notice for %s processing failure", remote, t)
|
||||
} else {
|
||||
log.D.F("%s message processing SUCCESS (type=%s)", remote, t)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -4,9 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/protocol/relayinfo"
|
||||
"next.orly.dev/pkg/version"
|
||||
)
|
||||
@@ -32,43 +35,61 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
||||
supportedNIPs := relayinfo.GetList(
|
||||
relayinfo.BasicProtocol,
|
||||
relayinfo.Authentication,
|
||||
// relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EventDeletion,
|
||||
relayinfo.RelayInformationDocument,
|
||||
relayinfo.GenericTagQueries,
|
||||
// relayinfo.NostrMarketplace,
|
||||
relayinfo.CountingResults,
|
||||
relayinfo.EventTreatment,
|
||||
// relayinfo.CommandResults,
|
||||
relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
// relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
relayinfo.RelayListMetadata,
|
||||
relayinfo.SearchCapability,
|
||||
)
|
||||
if s.Config.ACLMode != "none" {
|
||||
supportedNIPs = relayinfo.GetList(
|
||||
relayinfo.BasicProtocol,
|
||||
relayinfo.Authentication,
|
||||
// relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EventDeletion,
|
||||
relayinfo.RelayInformationDocument,
|
||||
relayinfo.GenericTagQueries,
|
||||
// relayinfo.NostrMarketplace,
|
||||
relayinfo.CountingResults,
|
||||
relayinfo.EventTreatment,
|
||||
// relayinfo.CommandResults,
|
||||
relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
relayinfo.RelayListMetadata,
|
||||
relayinfo.SearchCapability,
|
||||
)
|
||||
}
|
||||
sort.Sort(supportedNIPs)
|
||||
log.T.Ln("supported NIPs", supportedNIPs)
|
||||
log.I.Ln("supported NIPs", supportedNIPs)
|
||||
// Construct description with dashboard URL
|
||||
dashboardURL := s.DashboardURL(r)
|
||||
description := version.Description + " dashboard: " + dashboardURL
|
||||
|
||||
// Get relay identity pubkey as hex
|
||||
var relayPubkey string
|
||||
if skb, err := s.D.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err == nil {
|
||||
relayPubkey = hex.Enc(sign.Pub())
|
||||
}
|
||||
}
|
||||
|
||||
info = &relayinfo.T{
|
||||
Name: s.Config.AppName,
|
||||
Description: version.Description,
|
||||
Description: description,
|
||||
PubKey: relayPubkey,
|
||||
Nips: supportedNIPs,
|
||||
Software: version.URL,
|
||||
Version: version.V,
|
||||
Version: strings.TrimPrefix(version.V, "v"),
|
||||
Limitation: relayinfo.Limits{
|
||||
AuthRequired: s.Config.ACLMode != "none",
|
||||
RestrictedWrites: s.Config.ACLMode != "none",
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"next.orly.dev/pkg/encoders/envelopes/closedenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
@@ -30,15 +29,13 @@ import (
|
||||
)
|
||||
|
||||
func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
// log.T.F("HandleReq: START processing from %s\n%s\n", l.remote, msg)
|
||||
log.D.F("HandleReq: START processing from %s", l.remote)
|
||||
// var rem []byte
|
||||
env := reqenvelope.New()
|
||||
if _, err = env.Unmarshal(msg); chk.E(err) {
|
||||
return normalize.Error.Errorf(err.Error())
|
||||
}
|
||||
// if len(rem) > 0 {
|
||||
// log.I.F("REQ extra bytes: '%s'", rem)
|
||||
// }
|
||||
log.D.C(func() string { return fmt.Sprintf("REQ sub=%s filters=%d", env.Subscription, len(*env.Filters)) })
|
||||
// send a challenge to the client to auth if an ACL is active
|
||||
if acl.Registry.Active.Load() != "none" {
|
||||
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
||||
@@ -50,8 +47,9 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
if err = okenvelope.NewFrom(
|
||||
env.Subscription, false,
|
||||
// For REQ denial, send a CLOSED with auth-required reason (NIP-01)
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription,
|
||||
reason.AuthRequired.F("user not authed or has no read access"),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
@@ -59,89 +57,76 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
return
|
||||
default:
|
||||
// user has read access or better, continue
|
||||
// log.D.F("user has %s access", accessLevel)
|
||||
}
|
||||
var events event.S
|
||||
// Create a single context for all filter queries, tied to the connection context, to prevent leaks and support timely cancellation
|
||||
queryCtx, queryCancel := context.WithTimeout(
|
||||
l.ctx, 30*time.Second,
|
||||
)
|
||||
defer queryCancel()
|
||||
|
||||
// Collect all events from all filters
|
||||
var allEvents event.S
|
||||
for _, f := range *env.Filters {
|
||||
// idsLen := 0
|
||||
// kindsLen := 0
|
||||
// authorsLen := 0
|
||||
// tagsLen := 0
|
||||
// if f != nil {
|
||||
// if f.Ids != nil {
|
||||
// idsLen = f.Ids.Len()
|
||||
// }
|
||||
// if f.Kinds != nil {
|
||||
// kindsLen = f.Kinds.Len()
|
||||
// }
|
||||
// if f.Authors != nil {
|
||||
// authorsLen = f.Authors.Len()
|
||||
// }
|
||||
// if f.Tags != nil {
|
||||
// tagsLen = f.Tags.Len()
|
||||
// }
|
||||
// }
|
||||
// log.T.F(
|
||||
// "REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d",
|
||||
// env.Subscription, idsLen, kindsLen, authorsLen, tagsLen,
|
||||
// )
|
||||
if f != nil && f.Authors != nil && f.Authors.Len() > 0 {
|
||||
var authors []string
|
||||
for _, a := range f.Authors.T {
|
||||
authors = append(authors, hex.Enc(a))
|
||||
if f != nil {
|
||||
// Summarize filter details for diagnostics (avoid internal fields)
|
||||
var kindsLen int
|
||||
if f.Kinds != nil {
|
||||
kindsLen = f.Kinds.Len()
|
||||
}
|
||||
// log.T.F("REQ %s: authors=%v", env.Subscription, authors)
|
||||
var authorsLen int
|
||||
if f.Authors != nil {
|
||||
authorsLen = f.Authors.Len()
|
||||
}
|
||||
var idsLen int
|
||||
if f.Ids != nil {
|
||||
idsLen = f.Ids.Len()
|
||||
}
|
||||
var dtag string
|
||||
if f.Tags != nil {
|
||||
if d := f.Tags.GetFirst([]byte("d")); d != nil {
|
||||
dtag = string(d.Value())
|
||||
}
|
||||
}
|
||||
var lim any
|
||||
if f.Limit != nil {
|
||||
lim = *f.Limit
|
||||
}
|
||||
var since any
|
||||
if f.Since != nil {
|
||||
since = f.Since.Int()
|
||||
}
|
||||
var until any
|
||||
if f.Until != nil {
|
||||
until = f.Until.Int()
|
||||
}
|
||||
log.D.C(func() string {
|
||||
return fmt.Sprintf("REQ %s filter: kinds.len=%d authors.len=%d ids.len=%d d=%q limit=%v since=%v until=%v", env.Subscription, kindsLen, authorsLen, idsLen, dtag, lim, since, until)
|
||||
})
|
||||
}
|
||||
// if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 {
|
||||
// log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16())
|
||||
// }
|
||||
// if f != nil && f.Ids != nil && f.Ids.Len() > 0 {
|
||||
// var ids []string
|
||||
// for _, id := range f.Ids.T {
|
||||
// ids = append(ids, hex.Enc(id))
|
||||
// }
|
||||
// // var lim any
|
||||
// // if pointers.Present(f.Limit) {
|
||||
// // lim = *f.Limit
|
||||
// // } else {
|
||||
// // lim = nil
|
||||
// // }
|
||||
// // log.T.F(
|
||||
// // "REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription,
|
||||
// // f.Ids.Len(), ids, lim,
|
||||
// // )
|
||||
// }
|
||||
if f != nil && pointers.Present(f.Limit) {
|
||||
if *f.Limit == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Use a separate context for QueryEvents to prevent cancellation issues
|
||||
queryCtx, cancel := context.WithTimeout(
|
||||
context.Background(), 30*time.Second,
|
||||
)
|
||||
defer cancel()
|
||||
// log.T.F(
|
||||
// "HandleReq: About to QueryEvents for %s, main context done: %v",
|
||||
// l.remote, l.ctx.Err() != nil,
|
||||
// )
|
||||
if events, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||
var filterEvents event.S
|
||||
if filterEvents, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||
if errors.Is(err, badger.ErrDBClosed) {
|
||||
return
|
||||
}
|
||||
// log.T.F("HandleReq: QueryEvents error for %s: %v", l.remote, err)
|
||||
log.E.F("QueryEvents failed for filter: %v", err)
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
defer func() {
|
||||
for _, ev := range events {
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
// log.T.F(
|
||||
// "HandleReq: QueryEvents completed for %s, found %d events",
|
||||
// l.remote, len(events),
|
||||
// )
|
||||
// Append events from this filter to the overall collection
|
||||
allEvents = append(allEvents, filterEvents...)
|
||||
}
|
||||
events = allEvents
|
||||
defer func() {
|
||||
for _, ev := range events {
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
var tmp event.S
|
||||
privCheck:
|
||||
for _, ev := range events {
|
||||
@@ -152,17 +137,19 @@ privCheck:
|
||||
if pk == nil {
|
||||
continue // no auth, can't access private events
|
||||
}
|
||||
|
||||
|
||||
// Convert authenticated pubkey to npub for comparison
|
||||
authedNpub, err := bech32encoding.BinToNpub(pk)
|
||||
if err != nil {
|
||||
continue // couldn't convert pubkey, skip
|
||||
}
|
||||
|
||||
|
||||
// Check if authenticated npub is in any private tag
|
||||
authorized := false
|
||||
for _, privateTag := range privateTags {
|
||||
authorizedNpubs := strings.Split(string(privateTag.Value()), ",")
|
||||
authorizedNpubs := strings.Split(
|
||||
string(privateTag.Value()), ",",
|
||||
)
|
||||
for _, npub := range authorizedNpubs {
|
||||
if strings.TrimSpace(npub) == string(authedNpub) {
|
||||
authorized = true
|
||||
@@ -173,24 +160,25 @@ privCheck:
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if !authorized {
|
||||
continue // not authorized to see this private event
|
||||
}
|
||||
|
||||
|
||||
tmp = append(tmp, ev)
|
||||
continue
|
||||
}
|
||||
|
||||
if kind.IsPrivileged(ev.Kind) &&
|
||||
accessLevel != "admin" { // admins can see all events
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "checking privileged event %0x", ev.ID,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
|
||||
if l.Config.ACLMode != "none" &&
|
||||
(kind.IsPrivileged(ev.Kind) && accessLevel != "admin") &&
|
||||
l.authedPubkey.Load() != nil { // admins can see all events
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"checking privileged event %0x", ev.ID,
|
||||
)
|
||||
},
|
||||
)
|
||||
pk := l.authedPubkey.Load()
|
||||
if pk == nil {
|
||||
continue
|
||||
@@ -214,26 +202,26 @@ privCheck:
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(pt, pk) {
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "privileged event %s is for logged in pubkey %0x",
|
||||
// ev.ID, pk,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s is for logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
tmp = append(tmp, ev)
|
||||
continue privCheck
|
||||
}
|
||||
}
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "privileged event %s does not contain the logged in pubkey %0x",
|
||||
// ev.ID, pk,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s does not contain the logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
} else {
|
||||
tmp = append(tmp, ev)
|
||||
}
|
||||
@@ -241,19 +229,19 @@ privCheck:
|
||||
events = tmp
|
||||
seen := make(map[string]struct{})
|
||||
for _, ev := range events {
|
||||
// log.D.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||
// hex.Enc(ev.ID), ev.Kind,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||
// },
|
||||
// )
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||
hex.Enc(ev.ID), ev.Kind,
|
||||
)
|
||||
},
|
||||
)
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||
},
|
||||
)
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(
|
||||
env.Subscription, ev,
|
||||
@@ -268,7 +256,7 @@ privCheck:
|
||||
}
|
||||
// write the EOSE to signal to the client that all events found have been
|
||||
// sent.
|
||||
// log.T.F("sending EOSE to %s", l.remote)
|
||||
log.D.F("sending EOSE to %s", l.remote)
|
||||
if err = eoseenvelope.NewFrom(env.Subscription).
|
||||
Write(l); chk.E(err) {
|
||||
return
|
||||
@@ -276,10 +264,10 @@ privCheck:
|
||||
// if the query was for just Ids, we know there can't be any more results,
|
||||
// so cancel the subscription.
|
||||
cancel := true
|
||||
// log.T.F(
|
||||
// "REQ %s: computing cancel/subscription; events_sent=%d",
|
||||
// env.Subscription, len(events),
|
||||
// )
|
||||
log.D.F(
|
||||
"REQ %s: computing cancel/subscription; events_sent=%d",
|
||||
env.Subscription, len(events),
|
||||
)
|
||||
var subbedFilters filter.S
|
||||
for _, f := range *env.Filters {
|
||||
if f.Ids.Len() < 1 {
|
||||
@@ -294,10 +282,10 @@ privCheck:
|
||||
}
|
||||
notFounds = append(notFounds, id)
|
||||
}
|
||||
// log.T.F(
|
||||
// "REQ %s: ids outstanding=%d of %d", env.Subscription,
|
||||
// len(notFounds), f.Ids.Len(),
|
||||
// )
|
||||
log.T.F(
|
||||
"REQ %s: ids outstanding=%d of %d", env.Subscription,
|
||||
len(notFounds), f.Ids.Len(),
|
||||
)
|
||||
// if all were found, don't add to subbedFilters
|
||||
if len(notFounds) == 0 {
|
||||
continue
|
||||
@@ -309,8 +297,8 @@ privCheck:
|
||||
}
|
||||
// also, if we received the limit number of events, subscription ded
|
||||
if pointers.Present(f.Limit) {
|
||||
if len(events) < int(*f.Limit) {
|
||||
cancel = false
|
||||
if len(events) >= int(*f.Limit) {
|
||||
cancel = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -328,12 +316,8 @@ privCheck:
|
||||
},
|
||||
)
|
||||
} else {
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription, nil,
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// suppress server-sent CLOSED; client will close subscription if desired
|
||||
}
|
||||
// log.T.F("HandleReq: COMPLETED processing from %s", l.remote)
|
||||
log.D.F("HandleReq: COMPLETED processing from %s", l.remote)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ const (
|
||||
DefaultWriteWait = 10 * time.Second
|
||||
DefaultPongWait = 60 * time.Second
|
||||
DefaultPingWait = DefaultPongWait / 2
|
||||
DefaultReadTimeout = 7 * time.Second // Read timeout to detect stalled connections
|
||||
DefaultWriteTimeout = 3 * time.Second
|
||||
DefaultMaxMessageSize = 1 * units.Mb
|
||||
|
||||
@@ -39,7 +38,9 @@ const (
|
||||
|
||||
func (s *Server) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
||||
remote := GetRemoteFromReq(r)
|
||||
log.T.F("handling websocket connection from %s", remote)
|
||||
|
||||
// Log comprehensive proxy information for debugging
|
||||
LogProxyInfo(r, "WebSocket connection from "+remote)
|
||||
if len(s.Config.IPWhitelist) > 0 {
|
||||
for _, ip := range s.Config.IPWhitelist {
|
||||
log.T.F("checking IP whitelist: %s", ip)
|
||||
@@ -56,38 +57,67 @@ whitelist:
|
||||
defer cancel()
|
||||
var err error
|
||||
var conn *websocket.Conn
|
||||
if conn, err = websocket.Accept(
|
||||
w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}},
|
||||
); chk.E(err) {
|
||||
// Configure WebSocket accept options for proxy compatibility
|
||||
acceptOptions := &websocket.AcceptOptions{
|
||||
OriginPatterns: []string{"*"}, // Allow all origins for proxy compatibility
|
||||
// Don't check origin when behind a proxy - let the proxy handle it
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
||||
if conn, err = websocket.Accept(w, r, acceptOptions); chk.E(err) {
|
||||
log.E.F("websocket accept failed from %s: %v", remote, err)
|
||||
return
|
||||
}
|
||||
log.T.F("websocket accepted from %s path=%s", remote, r.URL.String())
|
||||
conn.SetReadLimit(DefaultMaxMessageSize)
|
||||
defer conn.CloseNow()
|
||||
listener := &Listener{
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
chal := make([]byte, 32)
|
||||
rand.Read(chal)
|
||||
listener.challenge.Store([]byte(hex.Enc(chal)))
|
||||
// If admins are configured, immediately prompt client to AUTH (NIP-42)
|
||||
if len(s.Config.Admins) > 0 {
|
||||
// log.D.F("sending initial AUTH challenge to %s", remote)
|
||||
if s.Config.ACLMode != "none" {
|
||||
log.D.F("sending AUTH challenge to %s", remote)
|
||||
if err = authenvelope.NewChallengeWith(listener.challenge.Load()).
|
||||
Write(listener); chk.E(err) {
|
||||
log.E.F("failed to send AUTH challenge to %s: %v", remote, err)
|
||||
return
|
||||
}
|
||||
log.D.F("AUTH challenge sent successfully to %s", remote)
|
||||
}
|
||||
ticker := time.NewTicker(DefaultPingWait)
|
||||
go s.Pinger(ctx, conn, ticker, cancel)
|
||||
defer func() {
|
||||
// log.D.F("closing websocket connection from %s", remote)
|
||||
log.D.F("closing websocket connection from %s", remote)
|
||||
|
||||
// Cancel context and stop pinger
|
||||
cancel()
|
||||
ticker.Stop()
|
||||
|
||||
// Cancel all subscriptions for this connection
|
||||
log.D.F("cancelling subscriptions for %s", remote)
|
||||
listener.publishers.Receive(&W{Cancel: true})
|
||||
|
||||
// Log detailed connection statistics
|
||||
dur := time.Since(listener.startTime)
|
||||
log.D.F(
|
||||
"ws connection closed %s: msgs=%d, REQs=%d, EVENTs=%d, duration=%v",
|
||||
remote, listener.msgCount, listener.reqCount, listener.eventCount,
|
||||
dur,
|
||||
)
|
||||
|
||||
// Log any remaining connection state
|
||||
if listener.authedPubkey.Load() != nil {
|
||||
log.D.F("ws connection %s was authenticated", remote)
|
||||
} else {
|
||||
log.D.F("ws connection %s was not authenticated", remote)
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
@@ -97,12 +127,10 @@ whitelist:
|
||||
}
|
||||
var typ websocket.MessageType
|
||||
var msg []byte
|
||||
// log.T.F("waiting for message from %s", remote)
|
||||
log.T.F("waiting for message from %s", remote)
|
||||
|
||||
// Create a read context with timeout to prevent indefinite blocking
|
||||
readCtx, readCancel := context.WithTimeout(ctx, DefaultReadTimeout)
|
||||
typ, msg, err = conn.Read(readCtx)
|
||||
readCancel()
|
||||
// Block waiting for message; rely on pings and context cancellation to detect dead peers
|
||||
typ, msg, err = conn.Read(ctx)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(
|
||||
@@ -110,14 +138,6 @@ whitelist:
|
||||
) {
|
||||
return
|
||||
}
|
||||
// Handle timeout errors - occurs when client becomes unresponsive
|
||||
if strings.Contains(err.Error(), "context deadline exceeded") {
|
||||
log.T.F(
|
||||
"connection from %s timed out after %v", remote,
|
||||
DefaultReadTimeout,
|
||||
)
|
||||
return
|
||||
}
|
||||
// Handle EOF errors gracefully - these occur when client closes connection
|
||||
// or sends incomplete/malformed WebSocket frames
|
||||
if strings.Contains(err.Error(), "EOF") ||
|
||||
@@ -141,19 +161,37 @@ whitelist:
|
||||
return
|
||||
}
|
||||
if typ == PingMessage {
|
||||
log.D.F("received PING from %s, sending PONG", remote)
|
||||
// Create a write context with timeout for pong response
|
||||
writeCtx, writeCancel := context.WithTimeout(
|
||||
ctx, DefaultWriteTimeout,
|
||||
)
|
||||
pongStart := time.Now()
|
||||
if err = conn.Write(writeCtx, PongMessage, msg); chk.E(err) {
|
||||
pongDuration := time.Since(pongStart)
|
||||
log.E.F(
|
||||
"failed to send PONG to %s after %v: %v", remote,
|
||||
pongDuration, err,
|
||||
)
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F(
|
||||
"PONG write timeout to %s after %v (limit=%v)", remote,
|
||||
pongDuration, DefaultWriteTimeout,
|
||||
)
|
||||
}
|
||||
writeCancel()
|
||||
return
|
||||
}
|
||||
pongDuration := time.Since(pongStart)
|
||||
log.D.F("sent PONG to %s successfully in %v", remote, pongDuration)
|
||||
if pongDuration > time.Millisecond*50 {
|
||||
log.D.F("SLOW PONG to %s: %v (>50ms)", remote, pongDuration)
|
||||
}
|
||||
writeCancel()
|
||||
continue
|
||||
}
|
||||
// log.T.F("received message from %s: %s", remote, string(msg))
|
||||
go listener.HandleMessage(msg, remote)
|
||||
listener.HandleMessage(msg, remote)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,21 +200,51 @@ func (s *Server) Pinger(
|
||||
cancel context.CancelFunc,
|
||||
) {
|
||||
defer func() {
|
||||
log.D.F("pinger shutting down")
|
||||
cancel()
|
||||
ticker.Stop()
|
||||
}()
|
||||
var err error
|
||||
pingCount := 0
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
pingCount++
|
||||
log.D.F("sending PING #%d", pingCount)
|
||||
|
||||
// Create a write context with timeout for ping operation
|
||||
pingCtx, pingCancel := context.WithTimeout(ctx, DefaultWriteTimeout)
|
||||
if err = conn.Ping(pingCtx); chk.E(err) {
|
||||
pingStart := time.Now()
|
||||
|
||||
if err = conn.Ping(pingCtx); err != nil {
|
||||
pingDuration := time.Since(pingStart)
|
||||
log.E.F(
|
||||
"PING #%d FAILED after %v: %v", pingCount, pingDuration,
|
||||
err,
|
||||
)
|
||||
|
||||
if pingCtx.Err() != nil {
|
||||
log.E.F(
|
||||
"PING #%d timeout after %v (limit=%v)", pingCount,
|
||||
pingDuration, DefaultWriteTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
chk.E(err)
|
||||
pingCancel()
|
||||
return
|
||||
}
|
||||
|
||||
pingDuration := time.Since(pingStart)
|
||||
log.D.F("PING #%d sent successfully in %v", pingCount, pingDuration)
|
||||
|
||||
if pingDuration > time.Millisecond*100 {
|
||||
log.D.F("SLOW PING #%d: %v (>100ms)", pingCount, pingDuration)
|
||||
}
|
||||
|
||||
pingCancel()
|
||||
case <-ctx.Done():
|
||||
log.D.F("pinger context cancelled after %d pings", pingCount)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package app
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"lol.mleku.dev/log"
|
||||
)
|
||||
|
||||
// GetRemoteFromReq retrieves the originating IP address of the client from
|
||||
@@ -67,3 +69,28 @@ func GetRemoteFromReq(r *http.Request) (rr string) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// LogProxyInfo logs comprehensive proxy information for debugging
|
||||
func LogProxyInfo(r *http.Request, prefix string) {
|
||||
proxyHeaders := map[string]string{
|
||||
"X-Forwarded-For": r.Header.Get("X-Forwarded-For"),
|
||||
"X-Real-IP": r.Header.Get("X-Real-IP"),
|
||||
"X-Forwarded-Proto": r.Header.Get("X-Forwarded-Proto"),
|
||||
"X-Forwarded-Host": r.Header.Get("X-Forwarded-Host"),
|
||||
"X-Forwarded-Port": r.Header.Get("X-Forwarded-Port"),
|
||||
"Forwarded": r.Header.Get("Forwarded"),
|
||||
"Host": r.Header.Get("Host"),
|
||||
"User-Agent": r.Header.Get("User-Agent"),
|
||||
}
|
||||
|
||||
var info []string
|
||||
for header, value := range proxyHeaders {
|
||||
if value != "" {
|
||||
info = append(info, header+":"+value)
|
||||
}
|
||||
}
|
||||
|
||||
if len(info) > 0 {
|
||||
log.T.F("%s proxy info: %s", prefix, strings.Join(info, " "))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
@@ -17,6 +19,11 @@ type Listener struct {
|
||||
req *http.Request
|
||||
challenge atomic.Bytes
|
||||
authedPubkey atomic.Bytes
|
||||
startTime time.Time
|
||||
// Diagnostics: per-connection counters
|
||||
msgCount int
|
||||
reqCount int
|
||||
eventCount int
|
||||
}
|
||||
|
||||
// Ctx returns the listener's context, but creates a new context for each operation
|
||||
@@ -26,6 +33,16 @@ func (l *Listener) Ctx() context.Context {
|
||||
}
|
||||
|
||||
func (l *Listener) Write(p []byte) (n int, err error) {
|
||||
start := time.Now()
|
||||
msgLen := len(p)
|
||||
|
||||
// Log message attempt with content preview (first 200 chars for diagnostics)
|
||||
preview := string(p)
|
||||
if len(preview) > 200 {
|
||||
preview = preview[:200] + "..."
|
||||
}
|
||||
log.D.F("ws->%s attempting write: len=%d preview=%q", l.remote, msgLen, preview)
|
||||
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the main connection context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
@@ -33,9 +50,42 @@ func (l *Listener) Write(p []byte) (n int, err error) {
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
if err = l.conn.Write(writeCtx, websocket.MessageText, p); chk.E(err) {
|
||||
// Attempt the write operation
|
||||
writeStart := time.Now()
|
||||
if err = l.conn.Write(writeCtx, websocket.MessageText, p); err != nil {
|
||||
writeDuration := time.Since(writeStart)
|
||||
totalDuration := time.Since(start)
|
||||
|
||||
// Log detailed failure information
|
||||
log.E.F("ws->%s WRITE FAILED: len=%d duration=%v write_duration=%v error=%v preview=%q",
|
||||
l.remote, msgLen, totalDuration, writeDuration, err, preview)
|
||||
|
||||
// Check if this is a context timeout
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F("ws->%s write timeout after %v (limit=%v)", l.remote, writeDuration, DefaultWriteTimeout)
|
||||
}
|
||||
|
||||
// Check connection state
|
||||
if l.conn != nil {
|
||||
log.D.F("ws->%s connection state during failure: remote_addr=%v", l.remote, l.req.RemoteAddr)
|
||||
}
|
||||
|
||||
chk.E(err) // Still call the original error handler
|
||||
return
|
||||
}
|
||||
n = len(p)
|
||||
|
||||
// Log successful write with timing
|
||||
writeDuration := time.Since(writeStart)
|
||||
totalDuration := time.Since(start)
|
||||
n = msgLen
|
||||
|
||||
log.D.F("ws->%s WRITE SUCCESS: len=%d duration=%v write_duration=%v",
|
||||
l.remote, n, totalDuration, writeDuration)
|
||||
|
||||
// Log slow writes for performance diagnostics
|
||||
if writeDuration > time.Millisecond*100 {
|
||||
log.D.F("ws->%s SLOW WRITE detected: %v (>100ms) len=%d", l.remote, writeDuration, n)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -27,12 +27,13 @@ import (
|
||||
|
||||
// PaymentProcessor handles NWC payment notifications and updates subscriptions
|
||||
type PaymentProcessor struct {
|
||||
nwcClient *nwc.Client
|
||||
db *database.D
|
||||
config *config.C
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
nwcClient *nwc.Client
|
||||
db *database.D
|
||||
config *config.C
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
dashboardURL string
|
||||
}
|
||||
|
||||
// NewPaymentProcessor creates a new payment processor
|
||||
@@ -301,8 +302,10 @@ Your paid subscription to this relay will expire in 7 days on %s.
|
||||
|
||||
Don't lose access to your private relay! Extend your subscription today.
|
||||
|
||||
Relay: nostr:%s`,
|
||||
expiryTime.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, monthlyPrice, string(relayNpubForContent))
|
||||
Relay: nostr:%s
|
||||
|
||||
Log in to the relay dashboard to access your configuration at: %s`,
|
||||
expiryTime.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
@@ -402,8 +405,10 @@ Simply zap this note with your payment amount:
|
||||
|
||||
Thank you for considering supporting decentralized communication!
|
||||
|
||||
Relay: nostr:%s`,
|
||||
trialEnd.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, dailyRate, monthlyPrice, string(relayNpubForContent))
|
||||
Relay: nostr:%s
|
||||
|
||||
Log in to the relay dashboard to access your configuration at: %s`,
|
||||
trialEnd.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, dailyRate, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
@@ -605,9 +610,9 @@ func (pp *PaymentProcessor) createPaymentNote(payerPubkey []byte, satsReceived i
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the note content with nostr:npub link
|
||||
content := fmt.Sprintf("Payment received: %d sats for %d days. Subscription expires: %s\n\nRelay: nostr:%s",
|
||||
satsReceived, days, expiryTime.Format("2006-01-02 15:04:05 UTC"), string(relayNpubForContent))
|
||||
// Create the note content with nostr:npub link and dashboard link
|
||||
content := fmt.Sprintf("Payment received: %d sats for %d days. Subscription expires: %s\n\nRelay: nostr:%s\n\nLog in to the relay dashboard to access your configuration at: %s",
|
||||
satsReceived, days, expiryTime.Format("2006-01-02 15:04:05 UTC"), string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
@@ -699,7 +704,9 @@ To extend your subscription after the trial ends, simply zap this note with the
|
||||
|
||||
Relay: nostr:%s
|
||||
|
||||
Enjoy your time on the relay!`, monthlyPrice, monthlyPrice, string(relayNpubForContent))
|
||||
Log in to the relay dashboard to access your configuration at: %s
|
||||
|
||||
Enjoy your time on the relay!`, monthlyPrice, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
@@ -750,6 +757,25 @@ Enjoy your time on the relay!`, monthlyPrice, monthlyPrice, string(relayNpubForC
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDashboardURL sets the dynamic dashboard URL based on HTTP request
|
||||
func (pp *PaymentProcessor) SetDashboardURL(url string) {
|
||||
pp.dashboardURL = url
|
||||
}
|
||||
|
||||
// getDashboardURL returns the dashboard URL for the relay
|
||||
func (pp *PaymentProcessor) getDashboardURL() string {
|
||||
// Use dynamic URL if available
|
||||
if pp.dashboardURL != "" {
|
||||
return pp.dashboardURL
|
||||
}
|
||||
// Fallback to static config
|
||||
if pp.config.RelayURL != "" {
|
||||
return pp.config.RelayURL
|
||||
}
|
||||
// Default fallback if no URL is configured
|
||||
return "https://your-relay.example.com"
|
||||
}
|
||||
|
||||
// extractNpubFromDescription extracts an npub from the payment description
|
||||
func (pp *PaymentProcessor) extractNpubFromDescription(description string) string {
|
||||
// check if the entire description is just an npub
|
||||
@@ -794,6 +820,58 @@ func (pp *PaymentProcessor) npubToPubkey(npubStr string) ([]byte, error) {
|
||||
return pubkey, nil
|
||||
}
|
||||
|
||||
// UpdateRelayProfile creates or updates the relay's kind 0 profile with subscription information
|
||||
func (pp *PaymentProcessor) UpdateRelayProfile() error {
|
||||
// Get relay identity secret to sign the profile
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Calculate daily rate
|
||||
dailyRate := monthlyPrice / 30
|
||||
|
||||
// Get relay wss:// URL - use dashboard URL but with wss:// scheme
|
||||
relayURL := strings.Replace(pp.getDashboardURL(), "https://", "wss://", 1)
|
||||
|
||||
// Create profile content as JSON
|
||||
profileContent := fmt.Sprintf(`{
|
||||
"name": "Relay Bot",
|
||||
"about": "This relay requires a subscription to access. Zap any of my notes to pay for access. Monthly price: %d sats (%d sats/day). Relay: %s",
|
||||
"lud16": "",
|
||||
"nip05": "",
|
||||
"website": "%s"
|
||||
}`, monthlyPrice, dailyRate, relayURL, pp.getDashboardURL())
|
||||
|
||||
// Build the profile event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.ProfileMetadata.K // Kind 0 for profile metadata
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(profileContent)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save relay profile: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("updated relay profile with subscription information")
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeAnyPubkey decodes a public key from either hex string or npub format
|
||||
func decodeAnyPubkey(s string) ([]byte, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
@@ -210,39 +211,68 @@ func (p *P) Deliver(ev *event.E) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !allowed {
|
||||
// Skip delivery for this subscriber
|
||||
continue
|
||||
}
|
||||
}
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the publisher context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
context.Background(), DefaultWriteTimeout,
|
||||
)
|
||||
defer cancel()
|
||||
}
|
||||
if !allowed {
|
||||
log.D.F("subscription delivery DENIED for privileged event %s to %s (auth mismatch)",
|
||||
hex.Enc(ev.ID), d.sub.remote)
|
||||
// Skip delivery for this subscriber
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
|
||||
log.E.F("failed to create event envelope for %s to %s: %v",
|
||||
hex.Enc(ev.ID), d.sub.remote, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Log delivery attempt
|
||||
msgData := res.Marshal(nil)
|
||||
log.D.F("attempting delivery of event %s (kind=%d, len=%d) to subscription %s @ %s",
|
||||
hex.Enc(ev.ID), ev.Kind, len(msgData), d.id, d.sub.remote)
|
||||
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the publisher context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
context.Background(), DefaultWriteTimeout,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
if err = d.w.Write(
|
||||
writeCtx, websocket.MessageText, res.Marshal(nil),
|
||||
); err != nil {
|
||||
// On error, remove the subscriber connection safely
|
||||
p.removeSubscriber(d.w)
|
||||
_ = d.w.CloseNow()
|
||||
continue
|
||||
}
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"dispatched event %0x to subscription %s, %s",
|
||||
ev.ID, d.id, d.sub.remote,
|
||||
)
|
||||
},
|
||||
)
|
||||
deliveryStart := time.Now()
|
||||
if err = d.w.Write(
|
||||
writeCtx, websocket.MessageText, msgData,
|
||||
); err != nil {
|
||||
deliveryDuration := time.Since(deliveryStart)
|
||||
|
||||
// Log detailed failure information
|
||||
log.E.F("subscription delivery FAILED: event=%s to=%s sub=%s duration=%v error=%v",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, err)
|
||||
|
||||
// Check for timeout specifically
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F("subscription delivery TIMEOUT: event=%s to=%s after %v (limit=%v)",
|
||||
hex.Enc(ev.ID), d.sub.remote, deliveryDuration, DefaultWriteTimeout)
|
||||
}
|
||||
|
||||
// Log connection cleanup
|
||||
log.D.F("removing failed subscriber connection: %s", d.sub.remote)
|
||||
|
||||
// On error, remove the subscriber connection safely
|
||||
p.removeSubscriber(d.w)
|
||||
_ = d.w.CloseNow()
|
||||
continue
|
||||
}
|
||||
|
||||
deliveryDuration := time.Since(deliveryStart)
|
||||
log.D.F("subscription delivery SUCCESS: event=%s to=%s sub=%s duration=%v len=%d",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, len(msgData))
|
||||
|
||||
// Log slow deliveries for performance monitoring
|
||||
if deliveryDuration > time.Millisecond*50 {
|
||||
log.D.F("SLOW subscription delivery: event=%s to=%s duration=%v (>50ms)",
|
||||
hex.Enc(ev.ID), d.sub.remote, deliveryDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -40,17 +40,24 @@ type Server struct {
|
||||
// Challenge storage for HTTP UI authentication
|
||||
challengeMutex sync.RWMutex
|
||||
challenges map[string][]byte
|
||||
|
||||
|
||||
paymentProcessor *PaymentProcessor
|
||||
}
|
||||
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Set CORS headers for all responses
|
||||
// Set comprehensive CORS headers for proxy compatibility
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
|
||||
w.Header().Set(
|
||||
"Access-Control-Allow-Headers", "Content-Type, Authorization",
|
||||
)
|
||||
w.Header().Set("Access-Control-Allow-Headers",
|
||||
"Origin, X-Requested-With, Content-Type, Accept, Authorization, "+
|
||||
"X-Forwarded-For, X-Forwarded-Proto, X-Forwarded-Host, X-Real-IP, "+
|
||||
"Upgrade, Connection, Sec-WebSocket-Key, Sec-WebSocket-Version, "+
|
||||
"Sec-WebSocket-Protocol, Sec-WebSocket-Extensions")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
w.Header().Set("Access-Control-Max-Age", "86400")
|
||||
|
||||
// Add proxy-friendly headers
|
||||
w.Header().Set("Vary", "Origin, Access-Control-Request-Method, Access-Control-Request-Headers")
|
||||
|
||||
// Handle preflight OPTIONS requests
|
||||
if r.Method == "OPTIONS" {
|
||||
@@ -58,6 +65,11 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Log proxy information for debugging (only for WebSocket requests to avoid spam)
|
||||
if r.Header.Get("Upgrade") == "websocket" {
|
||||
LogProxyInfo(r, "HTTP request")
|
||||
}
|
||||
|
||||
// If this is a websocket request, only intercept the relay root path.
|
||||
// This allows other websocket paths (e.g., Vite HMR) to be handled by the dev proxy when enabled.
|
||||
if r.Header.Get("Upgrade") == "websocket" {
|
||||
@@ -83,13 +95,30 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||
// Get host from various proxy headers
|
||||
host := req.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = req.Header.Get("Host")
|
||||
}
|
||||
if host == "" {
|
||||
host = req.Host
|
||||
}
|
||||
|
||||
// Get protocol from various proxy headers
|
||||
proto := req.Header.Get("X-Forwarded-Proto")
|
||||
if proto == "" {
|
||||
if host == "localhost" {
|
||||
proto = req.Header.Get("X-Forwarded-Scheme")
|
||||
}
|
||||
if proto == "" {
|
||||
// Check if we're behind a proxy by looking for common proxy headers
|
||||
hasProxyHeaders := req.Header.Get("X-Forwarded-For") != "" ||
|
||||
req.Header.Get("X-Real-IP") != "" ||
|
||||
req.Header.Get("Forwarded") != ""
|
||||
|
||||
if hasProxyHeaders {
|
||||
// If we have proxy headers, assume HTTPS/WSS
|
||||
proto = "wss"
|
||||
} else if host == "localhost" {
|
||||
proto = "ws"
|
||||
} else if strings.Contains(host, ":") {
|
||||
// has a port number
|
||||
@@ -113,6 +142,15 @@ func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||
return proto + "://" + host
|
||||
}
|
||||
|
||||
// DashboardURL constructs HTTPS URL for the dashboard based on the HTTP request
|
||||
func (s *Server) DashboardURL(req *http.Request) string {
|
||||
host := req.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = req.Host
|
||||
}
|
||||
return "https://" + host
|
||||
}
|
||||
|
||||
// UserInterface sets up a basic Nostr NDK interface that allows users to log into the relay user interface
|
||||
func (s *Server) UserInterface() {
|
||||
if s.mux == nil {
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"dependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"react-json-pretty": "^2.2.0",
|
||||
},
|
||||
"devDependencies": {
|
||||
"bun-types": "latest",
|
||||
@@ -25,10 +26,18 @@
|
||||
|
||||
"loose-envify": ["loose-envify@1.4.0", "", { "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": { "loose-envify": "cli.js" } }, "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q=="],
|
||||
|
||||
"object-assign": ["object-assign@4.1.1", "", {}, "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="],
|
||||
|
||||
"prop-types": ["prop-types@15.8.1", "", { "dependencies": { "loose-envify": "^1.4.0", "object-assign": "^4.1.1", "react-is": "^16.13.1" } }, "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg=="],
|
||||
|
||||
"react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="],
|
||||
|
||||
"react-dom": ["react-dom@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" }, "peerDependencies": { "react": "^18.3.1" } }, "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw=="],
|
||||
|
||||
"react-is": ["react-is@16.13.1", "", {}, "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="],
|
||||
|
||||
"react-json-pretty": ["react-json-pretty@2.2.0", "", { "dependencies": { "prop-types": "^15.6.2" }, "peerDependencies": { "react": ">=15.0", "react-dom": ">=15.0" } }, "sha512-3UMzlAXkJ4R8S4vmkRKtvJHTewG4/rn1Q18n0zqdu/ipZbUPLVZD+QwC7uVcD/IAY3s8iNVHlgR2dMzIUS0n1A=="],
|
||||
|
||||
"scheduler": ["scheduler@0.23.2", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ=="],
|
||||
|
||||
"undici-types": ["undici-types@7.12.0", "", {}, "sha512-goOacqME2GYyOZZfb5Lgtu+1IDmAlAEu5xnD3+xTzS10hT0vzpf0SPjkXwAw9Jm+4n/mQGDP3LO8CPbYROeBfQ=="],
|
||||
|
||||
161
app/web/dist/index-kk1m7jg4.js
vendored
Normal file
161
app/web/dist/index-kk1m7jg4.js
vendored
Normal file
File diff suppressed because one or more lines are too long
160
app/web/dist/index-w8zpqk4w.js
vendored
160
app/web/dist/index-w8zpqk4w.js
vendored
File diff suppressed because one or more lines are too long
2
app/web/dist/index.html
vendored
2
app/web/dist/index.html
vendored
@@ -5,7 +5,7 @@
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Nostr Relay</title>
|
||||
|
||||
<link rel="stylesheet" crossorigin href="./index-q4cwd1fy.css"><script type="module" crossorigin src="./index-w8zpqk4w.js"></script></head>
|
||||
<link rel="stylesheet" crossorigin href="./index-q4cwd1fy.css"><script type="module" crossorigin src="./index-kk1m7jg4.js"></script></head>
|
||||
<body>
|
||||
<script>
|
||||
// Apply system theme preference immediately to avoid flash of wrong theme
|
||||
|
||||
@@ -10,7 +10,8 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0"
|
||||
"react-dom": "^18.2.0",
|
||||
"react-json-pretty": "^2.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"bun-types": "latest"
|
||||
|
||||
@@ -1,4 +1,22 @@
|
||||
import React, { useState, useEffect, useRef } from 'react';
|
||||
import JSONPretty from 'react-json-pretty';
|
||||
|
||||
function PrettyJSONView({ jsonString, maxHeightClass = 'max-h-64' }) {
|
||||
let data;
|
||||
try {
|
||||
data = JSON.parse(jsonString);
|
||||
} catch (_) {
|
||||
data = jsonString;
|
||||
}
|
||||
return (
|
||||
<div
|
||||
className={`text-xs p-2 rounded overflow-auto ${maxHeightClass} break-all break-words whitespace-pre-wrap bg-gray-950 text-white`}
|
||||
style={{ overflowWrap: 'anywhere', wordBreak: 'break-word' }}
|
||||
>
|
||||
<JSONPretty data={data} space={2} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function App() {
|
||||
const [user, setUser] = useState(null);
|
||||
@@ -25,6 +43,14 @@ function App() {
|
||||
const [allEventsHasMore, setAllEventsHasMore] = useState(true);
|
||||
const [expandedAllEventId, setExpandedAllEventId] = useState(null);
|
||||
|
||||
// Search state
|
||||
const [searchQuery, setSearchQuery] = useState('');
|
||||
const [searchResults, setSearchResults] = useState([]);
|
||||
const [searchLoading, setSearchLoading] = useState(false);
|
||||
const [searchOffset, setSearchOffset] = useState(0);
|
||||
const [searchHasMore, setSearchHasMore] = useState(true);
|
||||
const [expandedSearchEventId, setExpandedSearchEventId] = useState(null);
|
||||
|
||||
// Profile cache for All Events Log
|
||||
const [profileCache, setProfileCache] = useState({});
|
||||
|
||||
@@ -68,6 +94,7 @@ function App() {
|
||||
exportAll: false,
|
||||
exportSpecific: false,
|
||||
importEvents: false,
|
||||
search: true,
|
||||
eventsLog: false,
|
||||
allEventsLog: false
|
||||
});
|
||||
@@ -992,6 +1019,177 @@ function App() {
|
||||
}
|
||||
}
|
||||
|
||||
// Search functions
|
||||
function processSearchResponse(receivedEvents, reset) {
|
||||
try {
|
||||
const filtered = filterDeletedEvents(receivedEvents);
|
||||
const sorted = filtered.sort((a, b) => b.created_at - a.created_at);
|
||||
const currentOffset = reset ? 0 : searchOffset;
|
||||
const limit = 50;
|
||||
const page = sorted.slice(currentOffset, currentOffset + limit);
|
||||
if (reset) {
|
||||
setSearchResults(page);
|
||||
setSearchOffset(page.length);
|
||||
} else {
|
||||
setSearchResults(prev => [...prev, ...page]);
|
||||
setSearchOffset(prev => prev + page.length);
|
||||
}
|
||||
setSearchHasMore(currentOffset + page.length < sorted.length);
|
||||
// fetch profiles for authors in search results
|
||||
fetchProfilesForEvents(page);
|
||||
} catch (e) {
|
||||
console.error('Error processing search results:', e);
|
||||
} finally {
|
||||
setSearchLoading(false);
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchSearchResultsFromRelay(query, reset = true, limit = 50, timeoutMs = 10000) {
|
||||
if (!query || !query.trim()) {
|
||||
// clear results on empty query when resetting
|
||||
if (reset) {
|
||||
setSearchResults([]);
|
||||
setSearchOffset(0);
|
||||
setSearchHasMore(true);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (searchLoading) return;
|
||||
if (!reset && !searchHasMore) return;
|
||||
|
||||
setSearchLoading(true);
|
||||
|
||||
return new Promise((resolve) => {
|
||||
let resolved = false;
|
||||
let receivedEvents = [];
|
||||
let ws;
|
||||
let reqSent = false;
|
||||
|
||||
try {
|
||||
ws = new WebSocket(relayURL());
|
||||
} catch (e) {
|
||||
console.error('Failed to create WebSocket:', e);
|
||||
setSearchLoading(false);
|
||||
resolve();
|
||||
return;
|
||||
}
|
||||
|
||||
const subId = 'search-' + Math.random().toString(36).slice(2);
|
||||
const timer = setTimeout(() => {
|
||||
if (ws && ws.readyState === 1) {
|
||||
try { ws.close(); } catch (_) {}
|
||||
}
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
processSearchResponse(receivedEvents, reset);
|
||||
resolve();
|
||||
}
|
||||
}, timeoutMs);
|
||||
|
||||
const sendRequest = () => {
|
||||
if (!reqSent && ws && ws.readyState === 1) {
|
||||
try {
|
||||
const req = ['REQ', subId, { search: query }];
|
||||
ws.send(JSON.stringify(req));
|
||||
reqSent = true;
|
||||
} catch (e) {
|
||||
console.error('Failed to send WebSocket request:', e);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
ws.onopen = () => sendRequest();
|
||||
|
||||
ws.onmessage = async (msg) => {
|
||||
try {
|
||||
const data = JSON.parse(msg.data);
|
||||
const type = data[0];
|
||||
if (type === 'AUTH') {
|
||||
const challenge = data[1];
|
||||
if (!window.nostr) {
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
processSearchResponse(receivedEvents, reset);
|
||||
resolve();
|
||||
}
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const authEvent = { kind: 22242, created_at: Math.floor(Date.now()/1000), tags: [['relay', relayURL()], ['challenge', challenge]], content: '' };
|
||||
const signed = await window.nostr.signEvent(authEvent);
|
||||
ws.send(JSON.stringify(['AUTH', signed]));
|
||||
} catch (authErr) {
|
||||
console.error('Search auth failed:', authErr);
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
processSearchResponse(receivedEvents, reset);
|
||||
resolve();
|
||||
}
|
||||
}
|
||||
} else if (type === 'EVENT' && data[1] === subId) {
|
||||
const ev = data[2];
|
||||
if (ev) {
|
||||
receivedEvents.push({
|
||||
id: ev.id,
|
||||
kind: ev.kind,
|
||||
created_at: ev.created_at,
|
||||
content: ev.content || '',
|
||||
author: ev.pubkey || '',
|
||||
raw_json: JSON.stringify(ev)
|
||||
});
|
||||
}
|
||||
} else if (type === 'EOSE' && data[1] === subId) {
|
||||
try { ws.send(JSON.stringify(['CLOSE', subId])); } catch (_) {}
|
||||
try { ws.close(); } catch (_) {}
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
processSearchResponse(receivedEvents, reset);
|
||||
resolve();
|
||||
}
|
||||
} else if (type === 'CLOSED' && data[1] === subId) {
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
processSearchResponse(receivedEvents, reset);
|
||||
resolve();
|
||||
}
|
||||
} else if (type === 'OK' && data[1] && data[1].length === 64 && !reqSent) {
|
||||
sendRequest();
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Search WS message parse error:', e);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = (err) => {
|
||||
console.error('Search WS error:', err);
|
||||
try { ws.close(); } catch (_) {}
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
processSearchResponse(receivedEvents, reset);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
processSearchResponse(receivedEvents, reset);
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
function toggleSearchEventExpansion(eventId) {
|
||||
setExpandedSearchEventId(current => current === eventId ? null : eventId);
|
||||
}
|
||||
|
||||
// Events log functions
|
||||
async function fetchEvents(reset = false) {
|
||||
await fetchEventsFromRelay(reset);
|
||||
@@ -1015,11 +1213,22 @@ function App() {
|
||||
|
||||
function copyEventJSON(eventJSON) {
|
||||
try {
|
||||
navigator.clipboard.writeText(eventJSON);
|
||||
// Ensure minified JSON is copied regardless of input format
|
||||
let toCopy = eventJSON;
|
||||
try {
|
||||
toCopy = JSON.stringify(JSON.parse(eventJSON));
|
||||
} catch (_) {
|
||||
// if not valid JSON string, fall back to original
|
||||
}
|
||||
navigator.clipboard.writeText(toCopy);
|
||||
} catch (error) {
|
||||
// Fallback for older browsers
|
||||
const textArea = document.createElement('textarea');
|
||||
textArea.value = eventJSON;
|
||||
let toCopy = eventJSON;
|
||||
try {
|
||||
toCopy = JSON.stringify(JSON.parse(eventJSON));
|
||||
} catch (_) {}
|
||||
textArea.value = toCopy;
|
||||
document.body.appendChild(textArea);
|
||||
textArea.select();
|
||||
document.execCommand('copy');
|
||||
@@ -1617,6 +1826,140 @@ function App() {
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
{/* Search */}
|
||||
<div className={`m-2 p-2 ${getPanelBgClass()} rounded-lg w-full`}>
|
||||
<div
|
||||
className={`text-lg font-bold flex items-center justify-between cursor-pointer p-2 ${getTextClass()} ${getThemeClasses('hover:bg-gray-300', 'hover:bg-gray-700')} rounded`}
|
||||
onClick={() => toggleSection('search')}
|
||||
>
|
||||
<span>Search</span>
|
||||
<span className="text-xl">
|
||||
{expandedSections.search ? '▼' : '▶'}
|
||||
</span>
|
||||
</div>
|
||||
{expandedSections.search && (
|
||||
<div className="p-2 bg-gray-900 rounded-lg mt-2">
|
||||
<div className="flex gap-2 items-center mb-3">
|
||||
<input
|
||||
type="text"
|
||||
placeholder="Search notes..."
|
||||
value={searchQuery}
|
||||
onChange={(e) => setSearchQuery(e.target.value)}
|
||||
onKeyDown={(e) => { if (e.key === 'Enter') { fetchSearchResultsFromRelay(searchQuery, true); } }}
|
||||
className={`${getThemeClasses('bg-white text-black border-gray-300', 'bg-gray-800 text-white border-gray-600')} border rounded px-3 py-2 flex-grow`}
|
||||
/>
|
||||
<button
|
||||
className={`${getThemeClasses('bg-blue-600 hover:bg-blue-700', 'bg-blue-500 hover:bg-blue-600')} text-white px-4 py-2 rounded`}
|
||||
onClick={() => fetchSearchResultsFromRelay(searchQuery, true)}
|
||||
disabled={searchLoading}
|
||||
title="Search"
|
||||
>
|
||||
{searchLoading ? 'Searching…' : 'Search'}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div className="space-y-2">
|
||||
{searchResults.length === 0 && !searchLoading && (
|
||||
<div className={`text-center py-4 ${getTextClass()}`}>No results</div>
|
||||
)}
|
||||
|
||||
{searchResults.map((event) => (
|
||||
<div key={event.id} className={`border rounded p-3 ${getThemeClasses('border-gray-300 bg-white', 'border-gray-600 bg-gray-800')}`}>
|
||||
<div className="cursor-pointer" onClick={() => toggleSearchEventExpansion(event.id)}>
|
||||
<div className="flex items-center justify-between w-full">
|
||||
<div className="flex items-center gap-6 w-full">
|
||||
<div className="flex items-center gap-3 min-w-0">
|
||||
{event.author && profileCache[event.author] && (
|
||||
<>
|
||||
{profileCache[event.author].picture && (
|
||||
<img
|
||||
src={profileCache[event.author].picture}
|
||||
alt={profileCache[event.author].display_name || profileCache[event.author].name || 'User avatar'}
|
||||
className={`w-8 h-8 rounded-full object-cover border h-16 ${getThemeClasses('border-gray-300', 'border-gray-600')}`}
|
||||
onError={(e) => { e.currentTarget.style.display = 'none'; }}
|
||||
/>
|
||||
)}
|
||||
<div className="flex flex-col flex-grow w-full">
|
||||
<span className={`text-sm font-medium ${getTextClass()}`}>
|
||||
{profileCache[event.author].display_name || profileCache[event.author].name || `${event.author.slice(0, 8)}...`}
|
||||
</span>
|
||||
{profileCache[event.author].display_name && profileCache[event.author].name && (
|
||||
<span className={`text-xs ${getTextClass()} opacity-70`}>
|
||||
{profileCache[event.author].name}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
{event.author && !profileCache[event.author] && (
|
||||
<span className={`text-sm font-medium ${getTextClass()}`}>
|
||||
{`${event.author.slice(0, 8)}...`}
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="flex items-center gap-3">
|
||||
<span className={`font-mono text-sm px-2 py-1 rounded ${getThemeClasses('bg-blue-100 text-blue-800', 'bg-blue-900 text-blue-200')}`}>
|
||||
Kind {event.kind}
|
||||
</span>
|
||||
<span className={`text-sm ${getTextClass()}`}>
|
||||
{formatTimestamp(event.created_at)}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<div className="justify-end ml-auto rounded-full h-16 w-16 flex items-center justify-center">
|
||||
<div className={`text-white text-xs px-4 py-4 rounded flex flex-grow items-center ${getThemeClasses('text-gray-700', 'text-gray-300')}`}>
|
||||
{expandedSearchEventId === event.id ? '▼' : ' '}
|
||||
</div>
|
||||
<button
|
||||
className="bg-red-600 hover:bg-red-700 text-white text-xs px-1 py-1 rounded flex items-center"
|
||||
onClick={(e) => { e.stopPropagation(); deleteEvent(event.id, event.raw_json, event.author); }}
|
||||
title="Delete this event"
|
||||
>
|
||||
🗑️
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{event.content && (
|
||||
<div className={`mt-2 text-sm ${getTextClass()}`}>
|
||||
{truncateContent(event.content)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{expandedSearchEventId === event.id && (
|
||||
<div className={`mt-3 p-3 rounded ${getThemeClasses('bg-gray-100', 'bg-gray-900')}`} onClick={(e) => e.stopPropagation()}>
|
||||
<div className="flex items-center justify-between mb-2">
|
||||
<span className={`text-sm font-semibold ${getTextClass()}`}>Raw JSON</span>
|
||||
<button
|
||||
className={`${getThemeClasses('bg-gray-200 hover:bg-gray-300 text-black', 'bg-gray-800 hover:bg-gray-700 text-white')} text-xs px-2 py-1 rounded`}
|
||||
onClick={() => copyEventJSON(event.raw_json)}
|
||||
>
|
||||
Copy JSON
|
||||
</button>
|
||||
</div>
|
||||
<PrettyJSONView jsonString={event.raw_json} maxHeightClass="max-h-64" />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
|
||||
{!searchLoading && searchHasMore && searchResults.length > 0 && (
|
||||
<div className="text-center py-4">
|
||||
<button
|
||||
className={`${getThemeClasses('bg-blue-600 hover:bg-blue-700', 'bg-blue-500 hover:bg-blue-600')} text-white px-4 py-2 rounded`}
|
||||
onClick={() => fetchSearchResultsFromRelay(searchQuery, false)}
|
||||
>
|
||||
Load More
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* My Events Log */}
|
||||
<div className={`m-2 p-2 ${getPanelBgClass()} rounded-lg w-full`}>
|
||||
<div
|
||||
@@ -1734,9 +2077,7 @@ function App() {
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre className={`text-xs p-2 rounded overflow-auto max-h-40 break-all whitespace-pre-wrap ${getPanelBgClass()} ${getTextClass()}`}>
|
||||
{JSON.stringify(JSON.parse(event.raw_json), null, 2)}
|
||||
</pre>
|
||||
<PrettyJSONView jsonString={event.raw_json} maxHeightClass="max-h-40" />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
@@ -1883,9 +2224,7 @@ function App() {
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
<pre className={`text-xs p-2 rounded overflow-auto max-h-40 break-all whitespace-pre-wrap ${getPanelBgClass()} ${getTextClass()}`}>
|
||||
{JSON.stringify(JSON.parse(event.raw_json), null, 2)}
|
||||
</pre>
|
||||
<PrettyJSONView jsonString={event.raw_json} maxHeightClass="max-h-40" />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
1
cmd/benchmark/external/khatru
vendored
1
cmd/benchmark/external/khatru
vendored
Submodule cmd/benchmark/external/khatru deleted from 668c41b988
@@ -325,10 +325,10 @@ func (b *Benchmark) RunSuite() {
|
||||
fmt.Printf("RunConcurrentQueryStoreTest..\n")
|
||||
b.RunConcurrentQueryStoreTest()
|
||||
if round < 2 {
|
||||
fmt.Println("\nPausing 10s before next round...")
|
||||
fmt.Printf("\nPausing 10s before next round...\n")
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
fmt.Println("\n=== Test round completed ===\n")
|
||||
fmt.Printf("\n=== Test round completed ===\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
116
debug-websocket.sh
Executable file
116
debug-websocket.sh
Executable file
@@ -0,0 +1,116 @@
|
||||
#!/bin/bash
|
||||
# WebSocket Debug Script for Stella's Orly Relay
|
||||
|
||||
echo "🔍 Debugging WebSocket Connection for orly-relay.imwald.eu"
|
||||
echo "=================================================="
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 1: Check if relay container is running"
|
||||
echo "----------------------------------------------"
|
||||
docker ps | grep -E "(stella|relay|orly)" || echo "❌ No relay containers found"
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 2: Test local relay connection"
|
||||
echo "--------------------------------------"
|
||||
if curl -s -I http://127.0.0.1:7777 | grep -q "426"; then
|
||||
echo "✅ Local relay responding correctly (HTTP 426)"
|
||||
else
|
||||
echo "❌ Local relay not responding correctly"
|
||||
curl -I http://127.0.0.1:7777
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 3: Check Apache modules"
|
||||
echo "------------------------------"
|
||||
if apache2ctl -M 2>/dev/null | grep -q "proxy_wstunnel"; then
|
||||
echo "✅ proxy_wstunnel module enabled"
|
||||
else
|
||||
echo "❌ proxy_wstunnel module NOT enabled"
|
||||
echo "Run: sudo a2enmod proxy_wstunnel"
|
||||
fi
|
||||
|
||||
if apache2ctl -M 2>/dev/null | grep -q "rewrite"; then
|
||||
echo "✅ rewrite module enabled"
|
||||
else
|
||||
echo "❌ rewrite module NOT enabled"
|
||||
echo "Run: sudo a2enmod rewrite"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 4: Check Plesk Apache configuration"
|
||||
echo "------------------------------------------"
|
||||
if [ -f "/etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf" ]; then
|
||||
echo "✅ Plesk config file exists"
|
||||
echo "Current proxy configuration:"
|
||||
grep -E "(Proxy|Rewrite|proxy|rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf || echo "❌ No proxy/rewrite rules found"
|
||||
else
|
||||
echo "❌ Plesk config file not found"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 5: Test WebSocket connections"
|
||||
echo "------------------------------------"
|
||||
|
||||
# Test with curl first (simpler)
|
||||
echo "Testing HTTP upgrade request to local relay..."
|
||||
if curl -s -I -H "Connection: Upgrade" -H "Upgrade: websocket" http://127.0.0.1:7777 | grep -q "426\|101"; then
|
||||
echo "✅ Local relay accepts upgrade requests"
|
||||
else
|
||||
echo "❌ Local relay doesn't accept upgrade requests"
|
||||
fi
|
||||
|
||||
echo "Testing HTTP upgrade request to remote relay..."
|
||||
if curl -s -I -H "Connection: Upgrade" -H "Upgrade: websocket" https://orly-relay.imwald.eu | grep -q "426\|101"; then
|
||||
echo "✅ Remote relay accepts upgrade requests"
|
||||
else
|
||||
echo "❌ Remote relay doesn't accept upgrade requests"
|
||||
echo "This indicates Apache proxy issue"
|
||||
fi
|
||||
|
||||
# Try to install websocat if not available
|
||||
if ! command -v websocat >/dev/null 2>&1; then
|
||||
echo ""
|
||||
echo "📥 Installing websocat for proper WebSocket testing..."
|
||||
if wget -q https://github.com/vi/websocat/releases/download/v1.12.0/websocat.x86_64-unknown-linux-musl -O websocat 2>/dev/null; then
|
||||
chmod +x websocat
|
||||
echo "✅ websocat installed"
|
||||
else
|
||||
echo "❌ Could not install websocat (no internet or wget issue)"
|
||||
echo "Manual install: wget https://github.com/vi/websocat/releases/download/v1.12.0/websocat.x86_64-unknown-linux-musl -O websocat && chmod +x websocat"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test with websocat if available
|
||||
if command -v ./websocat >/dev/null 2>&1; then
|
||||
echo ""
|
||||
echo "Testing actual WebSocket connection..."
|
||||
echo "Local WebSocket test:"
|
||||
timeout 3 bash -c 'echo "[\"REQ\",\"test\",{}]" | ./websocat ws://127.0.0.1:7777/' 2>/dev/null || echo "❌ Local WebSocket failed"
|
||||
|
||||
echo "Remote WebSocket test (ignoring SSL):"
|
||||
timeout 3 bash -c 'echo "[\"REQ\",\"test\",{}]" | ./websocat --insecure wss://orly-relay.imwald.eu/' 2>/dev/null || echo "❌ Remote WebSocket failed"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 6: Check ports and connections"
|
||||
echo "------------------------------------"
|
||||
echo "Ports listening on 7777:"
|
||||
netstat -tlnp 2>/dev/null | grep :7777 || ss -tlnp 2>/dev/null | grep :7777 || echo "❌ No process listening on port 7777"
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 7: Test SSL certificate"
|
||||
echo "------------------------------"
|
||||
echo "Certificate issuer:"
|
||||
echo | openssl s_client -connect orly-relay.imwald.eu:443 -servername orly-relay.imwald.eu 2>/dev/null | openssl x509 -noout -issuer 2>/dev/null || echo "❌ SSL test failed"
|
||||
|
||||
echo ""
|
||||
echo "🎯 RECOMMENDED NEXT STEPS:"
|
||||
echo "========================="
|
||||
echo "1. If proxy_wstunnel is missing: sudo a2enmod proxy_wstunnel && sudo systemctl restart apache2"
|
||||
echo "2. If no proxy rules found: Add configuration in Plesk Apache & nginx Settings"
|
||||
echo "3. If local WebSocket fails: Check if relay container is actually running"
|
||||
echo "4. If remote WebSocket fails but local works: Apache proxy configuration issue"
|
||||
echo ""
|
||||
echo "🔧 Try this simple Plesk configuration:"
|
||||
echo "ProxyPass / http://127.0.0.1:7777/"
|
||||
echo "ProxyPassReverse / http://127.0.0.1:7777/"
|
||||
96
docker-compose.yml
Normal file
96
docker-compose.yml
Normal file
@@ -0,0 +1,96 @@
|
||||
# Docker Compose for Stella's Nostr Relay
|
||||
# Owner: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
|
||||
services:
|
||||
orly-relay:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: silberengel/next-orly:latest
|
||||
container_name: orly-relay
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:7777:7777"
|
||||
volumes:
|
||||
- relay_data:/data
|
||||
- ./profiles:/profiles:ro
|
||||
environment:
|
||||
# Relay Configuration
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=7777
|
||||
- ORLY_LOG_LEVEL=info
|
||||
- ORLY_DB_LOG_LEVEL=error
|
||||
- ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
- ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1m4ny6hjqzepn4rxknuq94c2gpqzr29ufkkw7ttcxyak7v43n6vvsajc2jl,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
|
||||
|
||||
# ACL and Spider Configuration
|
||||
- ORLY_ACL_MODE=follows
|
||||
- ORLY_SPIDER_MODE=follows
|
||||
|
||||
# Bootstrap relay URLs for initial sync
|
||||
- ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.nostr.band,wss://relay.damus.io
|
||||
|
||||
# Subscription Settings (optional)
|
||||
- ORLY_SUBSCRIPTION_ENABLED=false
|
||||
- ORLY_MONTHLY_PRICE_SATS=0
|
||||
|
||||
# Performance Settings
|
||||
- ORLY_MAX_CONNECTIONS=1000
|
||||
- ORLY_MAX_EVENT_SIZE=65536
|
||||
- ORLY_MAX_SUBSCRIPTIONS=20
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:7777"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
# Resource limits
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
cpus: '1.0'
|
||||
reservations:
|
||||
memory: 256M
|
||||
cpus: '0.25'
|
||||
|
||||
# Logging configuration
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Optional: Nginx reverse proxy for SSL/domain setup
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: stella-nginx
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./nginx/ssl:/etc/nginx/ssl:ro
|
||||
- nginx_logs:/var/log/nginx
|
||||
depends_on:
|
||||
- orly-relay
|
||||
profiles:
|
||||
- proxy # Only start with: docker-compose --profile proxy up
|
||||
|
||||
volumes:
|
||||
relay_data:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: ./data
|
||||
nginx_logs:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: orly-relay-network
|
||||
259
docs/websocket-req-comparison.md
Normal file
259
docs/websocket-req-comparison.md
Normal file
@@ -0,0 +1,259 @@
|
||||
# WebSocket REQ Handling Comparison: Khatru vs Next.orly.dev
|
||||
|
||||
## Overview
|
||||
|
||||
This document compares how two Nostr relay implementations handle WebSocket connections and REQ (subscription) messages:
|
||||
|
||||
1. **Khatru** - A popular Go-based Nostr relay library by fiatjaf
|
||||
2. **Next.orly.dev** - A custom relay implementation with advanced features
|
||||
|
||||
## Architecture Comparison
|
||||
|
||||
### Khatru Architecture
|
||||
- **Monolithic approach**: Single large `HandleWebsocket` method (~380 lines) processes all message types
|
||||
- **Inline processing**: REQ handling is embedded within the main websocket handler
|
||||
- **Hook-based extensibility**: Uses function slices for customizable behavior
|
||||
- **Simple structure**: WebSocket struct with basic fields and mutex for thread safety
|
||||
|
||||
### Next.orly.dev Architecture
|
||||
- **Modular approach**: Separate methods for each message type (`HandleReq`, `HandleEvent`, etc.)
|
||||
- **Layered processing**: Message identification → envelope parsing → type-specific handling
|
||||
- **Publisher-subscriber system**: Dedicated infrastructure for subscription management
|
||||
- **Rich context**: Listener struct with detailed state tracking and metrics
|
||||
|
||||
## Connection Establishment
|
||||
|
||||
### Khatru
|
||||
```go
|
||||
// Simple websocket upgrade
|
||||
conn, err := rl.upgrader.Upgrade(w, r, nil)
|
||||
ws := &WebSocket{
|
||||
conn: conn,
|
||||
Request: r,
|
||||
Challenge: hex.EncodeToString(challenge),
|
||||
negentropySessions: xsync.NewMapOf[string, *NegentropySession](),
|
||||
}
|
||||
```
|
||||
|
||||
### Next.orly.dev
|
||||
```go
|
||||
// More sophisticated setup with IP whitelisting
|
||||
conn, err = websocket.Accept(w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}})
|
||||
listener := &Listener{
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
}
|
||||
// Immediate AUTH challenge if ACLs are configured
|
||||
```
|
||||
|
||||
**Key Differences:**
|
||||
- Next.orly.dev includes IP whitelisting and immediate authentication challenges
|
||||
- Khatru uses fasthttp/websocket library vs next.orly.dev using coder/websocket
|
||||
- Next.orly.dev has more detailed connection state tracking
|
||||
|
||||
## Message Processing
|
||||
|
||||
### Khatru
|
||||
- Uses `nostr.MessageParser` for sequential parsing
|
||||
- Switch statement on envelope type within goroutine
|
||||
- Direct processing without intermediate validation layers
|
||||
|
||||
### Next.orly.dev
|
||||
- Custom envelope identification system (`envelopes.Identify`)
|
||||
- Separate validation and processing phases
|
||||
- Extensive logging and error handling at each step
|
||||
|
||||
## REQ Message Handling
|
||||
|
||||
### Khatru REQ Processing
|
||||
```go
|
||||
case *nostr.ReqEnvelope:
|
||||
eose := sync.WaitGroup{}
|
||||
eose.Add(len(env.Filters))
|
||||
|
||||
// Handle each filter separately
|
||||
for _, filter := range env.Filters {
|
||||
err := srl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter)
|
||||
if err != nil {
|
||||
// Fail everything if any filter is rejected
|
||||
ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason})
|
||||
return
|
||||
} else {
|
||||
rl.addListener(ws, env.SubscriptionID, srl, filter, cancelReqCtx)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
eose.Wait()
|
||||
ws.WriteJSON(nostr.EOSEEnvelope(env.SubscriptionID))
|
||||
}()
|
||||
```
|
||||
|
||||
### Next.orly.dev REQ Processing
|
||||
```go
|
||||
// Comprehensive ACL and authentication checks first
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
return // Send auth-required response
|
||||
}
|
||||
|
||||
// Process all filters and collect events
|
||||
for _, f := range *env.Filters {
|
||||
filterEvents, err = l.QueryEvents(queryCtx, f)
|
||||
allEvents = append(allEvents, filterEvents...)
|
||||
}
|
||||
|
||||
// Apply privacy and privilege checks
|
||||
// Send all historical events
|
||||
// Set up ongoing subscription only if needed
|
||||
```
|
||||
|
||||
## Key Architectural Differences
|
||||
|
||||
### 1. **Filter Processing Strategy**
|
||||
|
||||
**Khatru:**
|
||||
- Processes each filter independently and concurrently
|
||||
- Uses WaitGroup to coordinate EOSE across all filters
|
||||
- Immediately sets up listeners for ongoing subscriptions
|
||||
- Fails entire subscription if any filter is rejected
|
||||
|
||||
**Next.orly.dev:**
|
||||
- Processes all filters sequentially in a single context
|
||||
- Collects all events before applying access control
|
||||
- Only sets up subscriptions for filters that need ongoing updates
|
||||
- Gracefully handles individual filter failures
|
||||
|
||||
### 2. **Access Control Integration**
|
||||
|
||||
**Khatru:**
|
||||
- Basic NIP-42 authentication support
|
||||
- Hook-based authorization via `RejectFilter` functions
|
||||
- Limited built-in access control features
|
||||
|
||||
**Next.orly.dev:**
|
||||
- Comprehensive ACL system with multiple access levels
|
||||
- Built-in support for private events with npub authorization
|
||||
- Privileged event filtering based on pubkey and p-tags
|
||||
- Granular permission checking at multiple stages
|
||||
|
||||
### 3. **Subscription Management**
|
||||
|
||||
**Khatru:**
|
||||
```go
|
||||
// Simple listener registration
|
||||
type listenerSpec struct {
|
||||
filter nostr.Filter
|
||||
cancel context.CancelCauseFunc
|
||||
subRelay *Relay
|
||||
}
|
||||
rl.addListener(ws, subscriptionID, relay, filter, cancel)
|
||||
```
|
||||
|
||||
**Next.orly.dev:**
|
||||
```go
|
||||
// Publisher-subscriber system with rich metadata
|
||||
type W struct {
|
||||
Conn *websocket.Conn
|
||||
remote string
|
||||
Id string
|
||||
Receiver event.C
|
||||
Filters *filter.S
|
||||
AuthedPubkey []byte
|
||||
}
|
||||
l.publishers.Receive(&W{...})
|
||||
```
|
||||
|
||||
### 4. **Performance Optimizations**
|
||||
|
||||
**Khatru:**
|
||||
- Concurrent filter processing
|
||||
- Immediate streaming of events as they're found
|
||||
- Memory-efficient with direct event streaming
|
||||
|
||||
**Next.orly.dev:**
|
||||
- Batch processing with deduplication
|
||||
- Memory management with explicit `ev.Free()` calls
|
||||
- Smart subscription cancellation for ID-only queries
|
||||
- Event result caching and seen-tracking
|
||||
|
||||
### 5. **Error Handling & Observability**
|
||||
|
||||
**Khatru:**
|
||||
- Basic error logging
|
||||
- Simple connection state management
|
||||
- Limited metrics and observability
|
||||
|
||||
**Next.orly.dev:**
|
||||
- Comprehensive error handling with context preservation
|
||||
- Detailed logging at each processing stage
|
||||
- Built-in metrics (message count, REQ count, event count)
|
||||
- Graceful degradation on individual component failures
|
||||
|
||||
## Memory Management
|
||||
|
||||
### Khatru
|
||||
- Relies on Go's garbage collector
|
||||
- Simple WebSocket struct with minimal state
|
||||
- Uses sync.Map for thread-safe operations
|
||||
|
||||
### Next.orly.dev
|
||||
- Explicit memory management with `ev.Free()` calls
|
||||
- Resource pooling and reuse patterns
|
||||
- Detailed tracking of connection resources
|
||||
|
||||
## Concurrency Models
|
||||
|
||||
### Khatru
|
||||
- Per-connection goroutine for message reading
|
||||
- Additional goroutines for each message processing
|
||||
- WaitGroup coordination for multi-filter EOSE
|
||||
|
||||
### Next.orly.dev
|
||||
- Per-connection goroutine with single-threaded message processing
|
||||
- Publisher-subscriber system handles concurrent event distribution
|
||||
- Context-based cancellation throughout
|
||||
|
||||
## Trade-offs Analysis
|
||||
|
||||
### Khatru Advantages
|
||||
- **Simplicity**: Easier to understand and modify
|
||||
- **Performance**: Lower latency due to concurrent processing
|
||||
- **Flexibility**: Hook-based architecture allows extensive customization
|
||||
- **Streaming**: Events sent as soon as they're found
|
||||
|
||||
### Khatru Disadvantages
|
||||
- **Monolithic**: Large methods harder to maintain
|
||||
- **Limited ACL**: Basic authentication and authorization
|
||||
- **Error handling**: Less graceful failure recovery
|
||||
- **Resource usage**: No explicit memory management
|
||||
|
||||
### Next.orly.dev Advantages
|
||||
- **Security**: Comprehensive ACL and privacy features
|
||||
- **Observability**: Extensive logging and metrics
|
||||
- **Resource management**: Explicit memory and connection lifecycle management
|
||||
- **Modularity**: Easier to test and extend individual components
|
||||
- **Robustness**: Graceful handling of edge cases and failures
|
||||
|
||||
### Next.orly.dev Disadvantages
|
||||
- **Complexity**: Higher cognitive overhead and learning curve
|
||||
- **Latency**: Sequential processing may be slower for some use cases
|
||||
- **Resource overhead**: More memory usage due to batching and state tracking
|
||||
- **Coupling**: Tighter integration between components
|
||||
|
||||
## Conclusion
|
||||
|
||||
Both implementations represent different philosophies:
|
||||
|
||||
- **Khatru** prioritizes simplicity, performance, and extensibility through a hook-based architecture
|
||||
- **Next.orly.dev** prioritizes security, observability, and robustness through comprehensive built-in features
|
||||
|
||||
The choice between them depends on specific requirements:
|
||||
- Choose **Khatru** for high-performance relays with custom business logic
|
||||
- Choose **Next.orly.dev** for production relays requiring comprehensive access control and monitoring
|
||||
|
||||
Both approaches demonstrate mature understanding of Nostr protocol requirements while making different trade-offs in complexity vs. features.
|
||||
44
go.mod
44
go.mod
@@ -4,48 +4,50 @@ go 1.25.0
|
||||
|
||||
require (
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/klauspost/cpuid/v2 v2.3.0
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
|
||||
go-simpler.org/env v0.12.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/crypto v0.41.0
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
|
||||
golang.org/x/crypto v0.42.0
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||
golang.org/x/net v0.43.0
|
||||
golang.org/x/net v0.44.0
|
||||
honnef.co/go/tools v0.6.1
|
||||
lol.mleku.dev v1.0.3
|
||||
lukechampine.com/frand v1.5.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
|
||||
github.com/google/flatbuffers v25.9.23+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20251002213607-436353cc1ee6 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/templexxx/cpu v0.0.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
github.com/templexxx/cpu v0.1.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20251002181428-27f1f14c8bb9 // indirect
|
||||
golang.org/x/mod v0.28.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/tools v0.37.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
retract v1.0.3
|
||||
|
||||
102
go.sum
102
go.sum
@@ -1,39 +1,53 @@
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU=
|
||||
github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20251002213607-436353cc1ee6 h1:/WHh/1k4thM/w+PAZEIiZK9NwCMFahw5tUzKUCnUtds=
|
||||
github.com/google/pprof v0.0.0-20251002213607-436353cc1ee6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
@@ -44,70 +58,76 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/cpu v0.1.1 h1:isxHaxBXpYFWnk2DReuKkigaZyrjs2+9ypIdGP4h+HI=
|
||||
github.com/templexxx/cpu v0.1.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
|
||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
|
||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
|
||||
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20251002181428-27f1f14c8bb9 h1:EvjuVHWMoRaAxH402KMgrQpGUjoBy/OWvZjLOqQnwNk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
|
||||
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
|
||||
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
154
manage-relay.sh
Executable file
154
manage-relay.sh
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/bin/bash
|
||||
# Stella's Orly Relay Management Script
|
||||
# Uses docker-compose.yml directly for configuration
|
||||
|
||||
set -e
|
||||
|
||||
# Get script directory and project root
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_DIR="$SCRIPT_DIR"
|
||||
|
||||
# Configuration from docker-compose.yml
|
||||
RELAY_SERVICE="orly-relay"
|
||||
CONTAINER_NAME="orly-nostr-relay"
|
||||
RELAY_URL="ws://127.0.0.1:7777"
|
||||
HTTP_URL="http://127.0.0.1:7777"
|
||||
RELAY_DATA_DIR="/home/madmin/.local/share/orly-relay"
|
||||
|
||||
# Change to project directory for docker-compose commands
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
case "${1:-}" in
|
||||
"start")
|
||||
echo "🚀 Starting Stella's Orly Relay..."
|
||||
docker compose up -d orly-relay
|
||||
echo "✅ Relay started!"
|
||||
;;
|
||||
"stop")
|
||||
echo "⏹️ Stopping Stella's Orly Relay..."
|
||||
docker compose down
|
||||
echo "✅ Relay stopped!"
|
||||
;;
|
||||
"restart")
|
||||
echo "🔄 Restarting Stella's Orly Relay..."
|
||||
docker compose restart orly-relay
|
||||
echo "✅ Relay restarted!"
|
||||
;;
|
||||
"status")
|
||||
echo "📊 Stella's Orly Relay Status:"
|
||||
docker compose ps orly-relay
|
||||
;;
|
||||
"logs")
|
||||
echo "📜 Stella's Orly Relay Logs:"
|
||||
docker compose logs -f orly-relay
|
||||
;;
|
||||
"test")
|
||||
echo "🧪 Testing relay connection..."
|
||||
if curl -s -I "$HTTP_URL" | grep -q "426 Upgrade Required"; then
|
||||
echo "✅ Relay is responding correctly!"
|
||||
echo "📡 WebSocket URL: $RELAY_URL"
|
||||
echo "🌐 HTTP URL: $HTTP_URL"
|
||||
else
|
||||
echo "❌ Relay is not responding correctly"
|
||||
echo " Expected: 426 Upgrade Required"
|
||||
echo " URL: $HTTP_URL"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"enable")
|
||||
echo "🔧 Enabling relay to start at boot..."
|
||||
sudo systemctl enable $RELAY_SERVICE
|
||||
echo "✅ Relay will start automatically at boot!"
|
||||
;;
|
||||
"disable")
|
||||
echo "🔧 Disabling relay auto-start..."
|
||||
sudo systemctl disable $RELAY_SERVICE
|
||||
echo "✅ Relay will not start automatically at boot!"
|
||||
;;
|
||||
"info")
|
||||
echo "📋 Stella's Orly Relay Information:"
|
||||
echo " Service: $RELAY_SERVICE"
|
||||
echo " Container: $CONTAINER_NAME"
|
||||
echo " WebSocket URL: $RELAY_URL"
|
||||
echo " HTTP URL: $HTTP_URL"
|
||||
echo " Data Directory: $RELAY_DATA_DIR"
|
||||
echo " Config Directory: $PROJECT_DIR"
|
||||
echo ""
|
||||
echo "🐳 Docker Information:"
|
||||
echo " Compose File: $PROJECT_DIR/docker-compose.yml"
|
||||
echo " Container Status:"
|
||||
docker compose ps orly-relay 2>/dev/null || echo " Not running"
|
||||
echo ""
|
||||
echo "💡 Configuration:"
|
||||
echo " All settings are defined in docker-compose.yml"
|
||||
echo " Use 'docker compose config' to see parsed configuration"
|
||||
;;
|
||||
"docker-logs")
|
||||
echo "🐳 Docker Container Logs:"
|
||||
docker compose logs -f orly-relay 2>/dev/null || echo "❌ Container not found or not running"
|
||||
;;
|
||||
"docker-status")
|
||||
echo "🐳 Docker Container Status:"
|
||||
docker compose ps orly-relay
|
||||
;;
|
||||
"docker-restart")
|
||||
echo "🔄 Restarting Docker Container..."
|
||||
docker compose restart orly-relay
|
||||
echo "✅ Container restarted!"
|
||||
;;
|
||||
"docker-update")
|
||||
echo "🔄 Updating and restarting Docker Container..."
|
||||
docker compose pull orly-relay
|
||||
docker compose up -d orly-relay
|
||||
echo "✅ Container updated and restarted!"
|
||||
;;
|
||||
"docker-build")
|
||||
echo "🔨 Building Docker Container..."
|
||||
docker compose build orly-relay
|
||||
echo "✅ Container built!"
|
||||
;;
|
||||
"docker-down")
|
||||
echo "⏹️ Stopping Docker Container..."
|
||||
docker compose down
|
||||
echo "✅ Container stopped!"
|
||||
;;
|
||||
"docker-config")
|
||||
echo "📋 Docker Compose Configuration:"
|
||||
docker compose config
|
||||
;;
|
||||
*)
|
||||
echo "🌲 Stella's Orly Relay Management Script"
|
||||
echo ""
|
||||
echo "Usage: $0 [COMMAND]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " start Start the relay"
|
||||
echo " stop Stop the relay"
|
||||
echo " restart Restart the relay"
|
||||
echo " status Show relay status"
|
||||
echo " logs Show relay logs (follow mode)"
|
||||
echo " test Test relay connection"
|
||||
echo " enable Enable auto-start at boot"
|
||||
echo " disable Disable auto-start at boot"
|
||||
echo " info Show relay information"
|
||||
echo ""
|
||||
echo "Docker Commands:"
|
||||
echo " docker-logs Show Docker container logs"
|
||||
echo " docker-status Show Docker container status"
|
||||
echo " docker-restart Restart Docker container only"
|
||||
echo " docker-update Update and restart container"
|
||||
echo " docker-build Build Docker container"
|
||||
echo " docker-down Stop Docker container"
|
||||
echo " docker-config Show Docker Compose configuration"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 start # Start the relay"
|
||||
echo " $0 status # Check if it's running"
|
||||
echo " $0 test # Test WebSocket connection"
|
||||
echo " $0 logs # Watch real-time logs"
|
||||
echo " $0 docker-logs # Watch Docker container logs"
|
||||
echo " $0 docker-update # Update and restart container"
|
||||
echo ""
|
||||
echo "🌲 Crafted in the digital forest by Stella ✨"
|
||||
;;
|
||||
esac
|
||||
@@ -3,6 +3,8 @@ package acl
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -22,9 +24,9 @@ import (
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
"next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/normalize"
|
||||
@@ -108,7 +110,7 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
for _, v := range ev.Tags.GetAll([]byte("p")) {
|
||||
// log.I.F("adding follow: %s", v.Value())
|
||||
var a []byte
|
||||
if b, e := hex.Dec(string(v.Value())); chk.E(e) {
|
||||
if b, e := hex.DecodeString(string(v.Value())); chk.E(e) {
|
||||
continue
|
||||
} else {
|
||||
a = b
|
||||
@@ -158,6 +160,8 @@ func (f *Follows) adminRelays() (urls []string) {
|
||||
copy(admins, f.admins)
|
||||
f.followsMx.RUnlock()
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
// First, try to get relay URLs from admin kind 10002 events
|
||||
for _, adm := range admins {
|
||||
fl := &filter.F{
|
||||
Authors: tag.NewFromAny(adm),
|
||||
@@ -194,6 +198,29 @@ func (f *Follows) adminRelays() (urls []string) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no admin relays found, use bootstrap relays as fallback
|
||||
if len(urls) == 0 {
|
||||
log.I.F("no admin relays found in DB, checking bootstrap relays")
|
||||
if len(f.cfg.BootstrapRelays) > 0 {
|
||||
log.I.F("using bootstrap relays: %v", f.cfg.BootstrapRelays)
|
||||
for _, relay := range f.cfg.BootstrapRelays {
|
||||
n := string(normalize.URL(relay))
|
||||
if n == "" {
|
||||
log.W.F("invalid bootstrap relay URL: %s", relay)
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[n]; ok {
|
||||
continue
|
||||
}
|
||||
seen[n] = struct{}{}
|
||||
urls = append(urls, n)
|
||||
}
|
||||
} else {
|
||||
log.W.F("no bootstrap relays configured")
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -211,7 +238,7 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
urls := f.adminRelays()
|
||||
log.I.S(urls)
|
||||
if len(urls) == 0 {
|
||||
log.W.F("follows syncer: no admin relays found in DB (kind 10002)")
|
||||
log.W.F("follows syncer: no admin relays found in DB (kind 10002) and no bootstrap relays configured")
|
||||
return
|
||||
}
|
||||
log.T.F(
|
||||
@@ -228,18 +255,45 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
return
|
||||
default:
|
||||
}
|
||||
c, _, err := websocket.Dial(ctx, u, nil)
|
||||
// Create a timeout context for the connection
|
||||
connCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
|
||||
// Create proper headers for the WebSocket connection
|
||||
headers := http.Header{}
|
||||
headers.Set("User-Agent", "ORLY-Relay/0.9.2")
|
||||
headers.Set("Origin", "https://orly.dev")
|
||||
|
||||
// Use proper WebSocket dial options
|
||||
dialOptions := &websocket.DialOptions{
|
||||
HTTPHeader: headers,
|
||||
}
|
||||
|
||||
c, _, err := websocket.Dial(connCtx, u, dialOptions)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.W.F("follows syncer: dial %s failed: %v", u, err)
|
||||
if strings.Contains(
|
||||
err.Error(), "response status code 101 but got 403",
|
||||
) {
|
||||
// 403 means the relay is not accepting connections from
|
||||
// us. Forbidden is the meaning, usually used to
|
||||
// indicate either the IP or user is blocked. so stop
|
||||
// trying this one.
|
||||
return
|
||||
|
||||
// Handle different types of errors
|
||||
if strings.Contains(err.Error(), "response status code 101 but got 403") {
|
||||
// 403 means the relay is not accepting connections from us
|
||||
// Forbidden is the meaning, usually used to indicate either the IP or user is blocked
|
||||
// But we should still retry after a longer delay
|
||||
log.W.F("follows syncer: relay %s returned 403, will retry after longer delay", u)
|
||||
timer := time.NewTimer(5 * time.Minute) // Wait 5 minutes before retrying 403 errors
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
continue
|
||||
} else if strings.Contains(err.Error(), "timeout") || strings.Contains(err.Error(), "connection refused") {
|
||||
// Network issues, retry with normal backoff
|
||||
log.W.F("follows syncer: network issue with %s, retrying in %v", u, backoff)
|
||||
} else {
|
||||
// Other errors, retry with normal backoff
|
||||
log.W.F("follows syncer: connection error with %s, retrying in %v", u, backoff)
|
||||
}
|
||||
|
||||
timer := time.NewTimer(backoff)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -252,21 +306,37 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
backoff = time.Second
|
||||
// send REQ
|
||||
log.I.F("follows syncer: successfully connected to %s", u)
|
||||
|
||||
// send REQ for kind 3 (follow lists), kind 10002 (relay lists), and all events from follows
|
||||
ff := &filter.S{}
|
||||
f1 := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(authors...),
|
||||
Limit: values.ToUintPointer(0),
|
||||
Kinds: kind.NewS(kind.New(kind.FollowList.K)),
|
||||
Limit: values.ToUintPointer(100),
|
||||
}
|
||||
*ff = append(*ff, f1)
|
||||
f2 := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(authors...),
|
||||
Kinds: kind.NewS(kind.New(kind.RelayListMetadata.K)),
|
||||
Limit: values.ToUintPointer(100),
|
||||
}
|
||||
// Add filter for all events from follows (last 30 days)
|
||||
oneMonthAgo := timestamp.FromUnix(time.Now().Add(-30 * 24 * time.Hour).Unix())
|
||||
f3 := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(authors...),
|
||||
Since: oneMonthAgo,
|
||||
Limit: values.ToUintPointer(1000),
|
||||
}
|
||||
*ff = append(*ff, f1, f2, f3)
|
||||
req := reqenvelope.NewFrom([]byte("follows-sync"), ff)
|
||||
if err = c.Write(
|
||||
ctx, websocket.MessageText, req.Marshal(nil),
|
||||
); chk.E(err) {
|
||||
log.W.F("follows syncer: failed to send REQ to %s: %v", u, err)
|
||||
_ = c.Close(websocket.StatusInternalError, "write failed")
|
||||
continue
|
||||
}
|
||||
log.T.F("sent REQ to %s for follows subscription", u)
|
||||
log.I.F("follows syncer: sent REQ to %s for kind 3, 10002, and all events (last 30 days) from followed users", u)
|
||||
// read loop
|
||||
for {
|
||||
select {
|
||||
@@ -294,6 +364,23 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
if ok, err := res.Event.Verify(); chk.T(err) || !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process events based on kind
|
||||
switch res.Event.Kind {
|
||||
case kind.FollowList.K:
|
||||
log.I.F("follows syncer: received kind 3 (follow list) event from %s on relay %s",
|
||||
hex.EncodeToString(res.Event.Pubkey), u)
|
||||
// Extract followed pubkeys from 'p' tags in kind 3 events
|
||||
f.extractFollowedPubkeys(res.Event)
|
||||
case kind.RelayListMetadata.K:
|
||||
log.I.F("follows syncer: received kind 10002 (relay list) event from %s on relay %s",
|
||||
hex.EncodeToString(res.Event.Pubkey), u)
|
||||
default:
|
||||
// Log all other events from followed users
|
||||
log.I.F("follows syncer: received kind %d event from %s on relay %s",
|
||||
res.Event.Kind, hex.EncodeToString(res.Event.Pubkey), u)
|
||||
}
|
||||
|
||||
if _, _, err = f.D.SaveEvent(
|
||||
ctx, res.Event,
|
||||
); err != nil {
|
||||
@@ -365,12 +452,26 @@ func (f *Follows) Syncer() {
|
||||
func (f *Follows) GetFollowedPubkeys() [][]byte {
|
||||
f.followsMx.RLock()
|
||||
defer f.followsMx.RUnlock()
|
||||
|
||||
|
||||
followedPubkeys := make([][]byte, len(f.follows))
|
||||
copy(followedPubkeys, f.follows)
|
||||
return followedPubkeys
|
||||
}
|
||||
|
||||
// extractFollowedPubkeys extracts followed pubkeys from 'p' tags in kind 3 events
|
||||
func (f *Follows) extractFollowedPubkeys(event *event.E) {
|
||||
if event.Kind != kind.FollowList.K {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract all 'p' tags (followed pubkeys) from the kind 3 event
|
||||
for _, tag := range event.Tags.GetAll([]byte("p")) {
|
||||
if len(tag.Value()) == 32 { // Valid pubkey length
|
||||
f.AddFollow(tag.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddFollow appends a pubkey to the in-memory follows list if not already present
|
||||
// and signals the syncer to refresh subscriptions.
|
||||
func (f *Follows) AddFollow(pub []byte) {
|
||||
@@ -387,6 +488,7 @@ func (f *Follows) AddFollow(pub []byte) {
|
||||
b := make([]byte, len(pub))
|
||||
copy(b, pub)
|
||||
f.follows = append(f.follows, b)
|
||||
log.I.F("follows syncer: added new followed pubkey: %s", hex.EncodeToString(pub))
|
||||
// notify syncer if initialized
|
||||
if f.updated != nil {
|
||||
select {
|
||||
|
||||
44
pkg/database/count.go
Normal file
44
pkg/database/count.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
)
|
||||
|
||||
// CountEvents mirrors the initial selection logic of QueryEvents but stops
|
||||
// once we have identified candidate event serials (id/pk/ts). It returns the
|
||||
// count of those serials. The `approx` flag is always false as requested.
|
||||
func (d *D) CountEvents(c context.Context, f *filter.F) (
|
||||
count int, approx bool, err error,
|
||||
) {
|
||||
approx = false
|
||||
if f == nil {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
// If explicit Ids are provided, count how many of them resolve to serials.
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
var serials map[string]interface{}
|
||||
// Use type inference without importing extra packages by discarding the
|
||||
// concrete value type via a two-step assignment.
|
||||
if tmp, idErr := d.GetSerialsByIds(f.Ids); idErr != nil {
|
||||
return 0, false, idErr
|
||||
} else {
|
||||
// Reassign to a map with empty interface values to avoid referencing
|
||||
// the concrete Uint40 type here.
|
||||
serials = make(map[string]interface{}, len(tmp))
|
||||
for k := range tmp {
|
||||
serials[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
return len(serials), false, nil
|
||||
}
|
||||
|
||||
// Otherwise, query for candidate Id/Pubkey/Timestamp triplets and count them.
|
||||
if idPkTs, qErr := d.QueryForIds(c, f); qErr != nil {
|
||||
return 0, false, qErr
|
||||
} else {
|
||||
return len(idPkTs), false, nil
|
||||
}
|
||||
}
|
||||
@@ -52,8 +52,18 @@ func New(
|
||||
}
|
||||
|
||||
opts := badger.DefaultOptions(d.dataDir)
|
||||
opts.BlockCacheSize = int64(units.Gb)
|
||||
opts.BlockSize = units.Gb
|
||||
// Use sane defaults to avoid excessive memory usage during startup.
|
||||
// Badger's default BlockSize is small (e.g., 4KB). Overriding it to very large values
|
||||
// can cause massive allocations and OOM panics during deployments.
|
||||
// Set BlockCacheSize to a moderate value and keep BlockSize small.
|
||||
opts.BlockCacheSize = int64(256 * units.Mb) // 256 MB cache
|
||||
opts.BlockSize = 4 * units.Kb // 4 KB block size
|
||||
// Prevent huge allocations during table building and memtable flush.
|
||||
// Badger's TableBuilder buffer is sized by BaseTableSize; ensure it's small.
|
||||
opts.BaseTableSize = 64 * units.Mb // 64 MB per table (default ~2MB, increased for fewer files but safe)
|
||||
opts.MemTableSize = 64 * units.Mb // 64 MB memtable to match table size
|
||||
// Keep value log files to a moderate size as well
|
||||
opts.ValueLogFileSize = 256 * units.Mb // 256 MB value log files
|
||||
opts.CompactL0OnClose = true
|
||||
opts.LmaxCompaction = true
|
||||
opts.Compression = options.None
|
||||
|
||||
@@ -153,5 +153,35 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
||||
if err = appendIndexBytes(&idxs, kindPubkeyIndex); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Word token indexes (from content)
|
||||
if len(ev.Content) > 0 {
|
||||
for _, h := range TokenHashes(ev.Content) {
|
||||
w := new(Word)
|
||||
w.FromWord(h) // 8-byte truncated hash
|
||||
wIdx := indexes.WordEnc(w, ser)
|
||||
if err = appendIndexBytes(&idxs, wIdx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Extend full-text search to include all fields of all tags
|
||||
if ev.Tags != nil && ev.Tags.Len() > 0 {
|
||||
for _, t := range *ev.Tags {
|
||||
for _, field := range t.T { // include key and all values
|
||||
if len(field) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, h := range TokenHashes(field) {
|
||||
w := new(Word)
|
||||
w.FromWord(h)
|
||||
wIdx := indexes.WordEnc(w, ser)
|
||||
if err = appendIndexBytes(&idxs, wIdx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -113,6 +113,27 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Word search: if Search field is present, generate word index ranges
|
||||
if len(f.Search) > 0 {
|
||||
for _, h := range TokenHashes(f.Search) {
|
||||
w := new(types2.Word)
|
||||
w.FromWord(h)
|
||||
buf := new(bytes.Buffer)
|
||||
idx := indexes.WordEnc(w, nil)
|
||||
if err = idx.MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
b := buf.Bytes()
|
||||
end := make([]byte, len(b))
|
||||
copy(end, b)
|
||||
for i := 0; i < 5; i++ { // match any serial
|
||||
end = append(end, 0xff)
|
||||
}
|
||||
idxs = append(idxs, Range{b, end})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
caStart := new(types2.Uint64)
|
||||
caEnd := new(types2.Uint64)
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ const (
|
||||
TagPubkeyPrefix = I("tpc") // tag, pubkey, created at
|
||||
TagKindPubkeyPrefix = I("tkp") // tag, kind, pubkey, created at
|
||||
|
||||
WordPrefix = I("wrd") // word hash, serial
|
||||
ExpirationPrefix = I("exp") // timestamp of expiration
|
||||
VersionPrefix = I("ver") // database version number, for triggering reindexes when new keys are added (policy is add-only).
|
||||
)
|
||||
@@ -106,6 +107,8 @@ func Prefix(prf int) (i I) {
|
||||
return ExpirationPrefix
|
||||
case Version:
|
||||
return VersionPrefix
|
||||
case Word:
|
||||
return WordPrefix
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -147,6 +150,8 @@ func Identify(r io.Reader) (i int, err error) {
|
||||
|
||||
case ExpirationPrefix:
|
||||
i = Expiration
|
||||
case WordPrefix:
|
||||
i = Word
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -233,6 +238,21 @@ func FullIdPubkeyDec(
|
||||
return New(NewPrefix(), ser, fid, p, ca)
|
||||
}
|
||||
|
||||
// Word index for tokenized search terms
|
||||
//
|
||||
// 3 prefix|8 word-hash|5 serial
|
||||
var Word = next()
|
||||
|
||||
func WordVars() (w *types.Word, ser *types.Uint40) {
|
||||
return new(types.Word), new(types.Uint40)
|
||||
}
|
||||
func WordEnc(w *types.Word, ser *types.Uint40) (enc *T) {
|
||||
return New(NewPrefix(Word), w, ser)
|
||||
}
|
||||
func WordDec(w *types.Word, ser *types.Uint40) (enc *T) {
|
||||
return New(NewPrefix(), w, ser)
|
||||
}
|
||||
|
||||
// CreatedAt is an index that allows search for the timestamp on the event.
|
||||
//
|
||||
// 3 prefix|8 timestamp|5 serial
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
currentVersion uint32 = 1
|
||||
currentVersion uint32 = 2
|
||||
)
|
||||
|
||||
func (d *D) RunMigrations() {
|
||||
@@ -56,22 +56,8 @@ func (d *D) RunMigrations() {
|
||||
}
|
||||
if dbVersion == 0 {
|
||||
log.D.F("no version tag found, creating...")
|
||||
// write the version tag now
|
||||
if err = d.Update(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
buf := new(bytes.Buffer)
|
||||
vv := new(types.Uint32)
|
||||
vv.Set(currentVersion)
|
||||
log.I.S(vv)
|
||||
if err = indexes.VersionEnc(vv).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = txn.Set(buf.Bytes(), nil); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
// write the version tag now (ensure any old tags are removed first)
|
||||
if err = d.writeVersionTag(currentVersion); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -79,7 +65,136 @@ func (d *D) RunMigrations() {
|
||||
log.I.F("migrating to version 1...")
|
||||
// the first migration is expiration tags
|
||||
d.UpdateExpirationTags()
|
||||
// bump to version 1
|
||||
_ = d.writeVersionTag(1)
|
||||
}
|
||||
if dbVersion < 2 {
|
||||
log.I.F("migrating to version 2...")
|
||||
// backfill word indexes
|
||||
d.UpdateWordIndexes()
|
||||
// bump to version 2
|
||||
_ = d.writeVersionTag(2)
|
||||
}
|
||||
}
|
||||
|
||||
// writeVersionTag writes a new version tag key to the database (no value)
|
||||
func (d *D) writeVersionTag(ver uint32) (err error) {
|
||||
return d.Update(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
// delete any existing version keys first (there should only be one, but be safe)
|
||||
verPrf := new(bytes.Buffer)
|
||||
if _, err = indexes.VersionPrefix.Write(verPrf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: verPrf.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
if err = txn.Delete(key); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// now write the new version key
|
||||
buf := new(bytes.Buffer)
|
||||
vv := new(types.Uint32)
|
||||
vv.Set(ver)
|
||||
if err = indexes.VersionEnc(vv).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return txn.Set(buf.Bytes(), nil)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *D) UpdateWordIndexes() {
|
||||
log.T.F("updating word indexes...")
|
||||
var err error
|
||||
var wordIndexes [][]byte
|
||||
// iterate all events and generate word index keys from content and tags
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
prf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(nil).MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prf.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
var val []byte
|
||||
if val, err = item.ValueCopy(nil); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// decode the event
|
||||
ev := new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(val)); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// log.I.F("updating word indexes for event: %s", ev.Serialize())
|
||||
// read serial from key
|
||||
key := item.Key()
|
||||
ser := indexes.EventVars()
|
||||
if err = indexes.EventDec(ser).UnmarshalRead(bytes.NewBuffer(key)); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// collect unique word hashes for this event
|
||||
seen := make(map[string]struct{})
|
||||
// from content
|
||||
if len(ev.Content) > 0 {
|
||||
for _, h := range TokenHashes(ev.Content) {
|
||||
seen[string(h)] = struct{}{}
|
||||
}
|
||||
}
|
||||
// from all tag fields (key and values)
|
||||
if ev.Tags != nil && ev.Tags.Len() > 0 {
|
||||
for _, t := range *ev.Tags {
|
||||
for _, field := range t.T {
|
||||
if len(field) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, h := range TokenHashes(field) {
|
||||
seen[string(h)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// build keys
|
||||
for k := range seen {
|
||||
w := new(types.Word)
|
||||
w.FromWord([]byte(k))
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.WordEnc(
|
||||
w, ser,
|
||||
).MarshalWrite(buf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
wordIndexes = append(wordIndexes, buf.Bytes())
|
||||
}
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// sort the indexes for ordered writes
|
||||
sort.Slice(
|
||||
wordIndexes, func(i, j int) bool {
|
||||
return bytes.Compare(
|
||||
wordIndexes[i], wordIndexes[j],
|
||||
) < 0
|
||||
},
|
||||
)
|
||||
// write in a batch
|
||||
batch := d.NewWriteBatch()
|
||||
for _, v := range wordIndexes {
|
||||
if err = batch.Set(v, nil); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
_ = batch.Flush()
|
||||
log.T.F("finished updating word indexes...")
|
||||
}
|
||||
|
||||
func (d *D) UpdateExpirationTags() {
|
||||
|
||||
194
pkg/database/query-events-search_test.go
Normal file
194
pkg/database/query-events-search_test.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
)
|
||||
|
||||
// helper to create a fresh DB
|
||||
func newTestDB(t *testing.T) (*D, context.Context, context.CancelFunc, string) {
|
||||
t.Helper()
|
||||
tempDir, err := os.MkdirTemp("", "search-db-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
db, err := New(ctx, cancel, tempDir, "error")
|
||||
if err != nil {
|
||||
cancel()
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to init DB: %v", err)
|
||||
}
|
||||
return db, ctx, cancel, tempDir
|
||||
}
|
||||
|
||||
// TestQueryEventsBySearchTerms creates a small set of events with content and tags,
|
||||
// saves them, then queries using filter.Search to ensure the word index works.
|
||||
func TestQueryEventsBySearchTerms(t *testing.T) {
|
||||
db, ctx, cancel, tempDir := newTestDB(t)
|
||||
defer func() {
|
||||
// cancel context first to stop background routines cleanly
|
||||
cancel()
|
||||
db.Close()
|
||||
os.RemoveAll(tempDir)
|
||||
}()
|
||||
|
||||
// signer for all events
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.Generate(); chk.E(err) {
|
||||
t.Fatalf("signer generate: %v", err)
|
||||
}
|
||||
|
||||
now := timestamp.Now().V
|
||||
|
||||
// Events to cover tokenizer rules:
|
||||
// - regular words
|
||||
// - URLs ignored
|
||||
// - 64-char hex ignored
|
||||
// - nostr: URIs ignored
|
||||
// - #[n] mentions ignored
|
||||
// - tag fields included in search
|
||||
|
||||
// 1. Contains words: "alpha beta", plus URL and hex (ignored)
|
||||
ev1 := event.New()
|
||||
ev1.Kind = kind.TextNote.K
|
||||
ev1.Pubkey = sign.Pub()
|
||||
ev1.CreatedAt = now - 5
|
||||
ev1.Content = []byte("Alpha beta visit https://example.com deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
|
||||
ev1.Tags = tag.NewS()
|
||||
ev1.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev1); err != nil {
|
||||
t.Fatalf("save ev1: %v", err)
|
||||
}
|
||||
|
||||
// 2. Contains overlap word "beta" and unique "gamma" and nostr: URI ignored
|
||||
ev2 := event.New()
|
||||
ev2.Kind = kind.TextNote.K
|
||||
ev2.Pubkey = sign.Pub()
|
||||
ev2.CreatedAt = now - 4
|
||||
ev2.Content = []byte("beta and GAMMA with nostr:nevent1qqqqq")
|
||||
ev2.Tags = tag.NewS()
|
||||
ev2.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev2); err != nil {
|
||||
t.Fatalf("save ev2: %v", err)
|
||||
}
|
||||
|
||||
// 3. Contains only a URL (should not create word tokens) and mention #[1] (ignored)
|
||||
ev3 := event.New()
|
||||
ev3.Kind = kind.TextNote.K
|
||||
ev3.Pubkey = sign.Pub()
|
||||
ev3.CreatedAt = now - 3
|
||||
ev3.Content = []byte("see www.example.org #[1]")
|
||||
ev3.Tags = tag.NewS()
|
||||
ev3.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev3); err != nil {
|
||||
t.Fatalf("save ev3: %v", err)
|
||||
}
|
||||
|
||||
// 4. No content words, but tag value has searchable words: "delta epsilon"
|
||||
ev4 := event.New()
|
||||
ev4.Kind = kind.TextNote.K
|
||||
ev4.Pubkey = sign.Pub()
|
||||
ev4.CreatedAt = now - 2
|
||||
ev4.Content = []byte("")
|
||||
ev4.Tags = tag.NewS()
|
||||
*ev4.Tags = append(*ev4.Tags, tag.NewFromAny("t", "delta epsilon"))
|
||||
ev4.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev4); err != nil {
|
||||
t.Fatalf("save ev4: %v", err)
|
||||
}
|
||||
|
||||
// 5. Another event with both content and tag tokens for ordering checks
|
||||
ev5 := event.New()
|
||||
ev5.Kind = kind.TextNote.K
|
||||
ev5.Pubkey = sign.Pub()
|
||||
ev5.CreatedAt = now - 1
|
||||
ev5.Content = []byte("alpha DELTA mixed-case and link http://foo.bar")
|
||||
ev5.Tags = tag.NewS()
|
||||
*ev5.Tags = append(*ev5.Tags, tag.NewFromAny("t", "zeta"))
|
||||
ev5.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev5); err != nil {
|
||||
t.Fatalf("save ev5: %v", err)
|
||||
}
|
||||
|
||||
// Small sleep to ensure created_at ordering is the only factor
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Helper to run a search and return IDs
|
||||
run := func(q string) ([]*event.E, error) {
|
||||
f := &filter.F{Search: []byte(q)}
|
||||
return db.QueryEvents(ctx, f)
|
||||
}
|
||||
|
||||
// Single-term search: alpha -> should match ev1 and ev5 ordered by created_at desc (ev5 newer)
|
||||
if evs, err := run("alpha"); err != nil {
|
||||
t.Fatalf("search alpha: %v", err)
|
||||
} else {
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("alpha expected 2 results, got %d", len(evs))
|
||||
}
|
||||
if !(evs[0].CreatedAt >= evs[1].CreatedAt) {
|
||||
t.Fatalf("results not ordered by created_at desc")
|
||||
}
|
||||
}
|
||||
|
||||
// Overlap term beta -> ev1 and ev2
|
||||
if evs, err := run("beta"); err != nil {
|
||||
t.Fatalf("search beta: %v", err)
|
||||
} else if len(evs) != 2 {
|
||||
t.Fatalf("beta expected 2 results, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Unique term gamma -> only ev2
|
||||
if evs, err := run("gamma"); err != nil {
|
||||
t.Fatalf("search gamma: %v", err)
|
||||
} else if len(evs) != 1 {
|
||||
t.Fatalf("gamma expected 1 result, got %d", len(evs))
|
||||
}
|
||||
|
||||
// URL terms should be ignored: example -> appears only as URL in ev1/ev3/ev5; tokenizer ignores URLs so expect 0
|
||||
if evs, err := run("example"); err != nil {
|
||||
t.Fatalf("search example: %v", err)
|
||||
} else if len(evs) != 0 {
|
||||
t.Fatalf("example expected 0 results (URL tokens ignored), got %d", len(evs))
|
||||
}
|
||||
|
||||
// Tag words searchable: delta should match ev4 and ev5 (delta in tag for ev4, in content for ev5)
|
||||
if evs, err := run("delta"); err != nil {
|
||||
t.Fatalf("search delta: %v", err)
|
||||
} else if len(evs) != 2 {
|
||||
t.Fatalf("delta expected 2 results, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Very short token ignored: single-letter should yield 0
|
||||
if evs, err := run("a"); err != nil {
|
||||
t.Fatalf("search short token: %v", err)
|
||||
} else if len(evs) != 0 {
|
||||
t.Fatalf("single-letter expected 0 results, got %d", len(evs))
|
||||
}
|
||||
|
||||
// 64-char hex should be ignored
|
||||
hex64 := "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
if evs, err := run(hex64); err != nil {
|
||||
t.Fatalf("search hex64: %v", err)
|
||||
} else if len(evs) != 0 {
|
||||
t.Fatalf("hex64 expected 0 results, got %d", len(evs))
|
||||
}
|
||||
|
||||
// nostr: scheme ignored
|
||||
if evs, err := run("nostr:nevent1qqqqq"); err != nil {
|
||||
t.Fatalf("search nostr: %v", err)
|
||||
} else if len(evs) != 0 {
|
||||
t.Fatalf("nostr: expected 0 results, got %d", len(evs))
|
||||
}
|
||||
}
|
||||
@@ -7,13 +7,16 @@ import (
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/interfaces/store"
|
||||
)
|
||||
|
||||
// QueryForIds retrieves a list of IdPkTs based on the provided filter.
|
||||
// It supports filtering by ranges and tags but disallows filtering by Ids.
|
||||
// Results are sorted by timestamp in reverse chronological order.
|
||||
// Results are sorted by timestamp in reverse chronological order by default.
|
||||
// When a search query is present, results are ranked by a 50/50 blend of
|
||||
// match count (how many distinct search terms matched) and recency.
|
||||
// Returns an error if the filter contains Ids or if any operation fails.
|
||||
func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
idPkTs []*store.IdPkTs, err error,
|
||||
@@ -29,6 +32,9 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
}
|
||||
var results []*store.IdPkTs
|
||||
var founds []*types.Uint40
|
||||
// When searching, we want to count how many index ranges (search terms)
|
||||
// matched each note. We'll track counts by serial.
|
||||
counts := make(map[uint64]int)
|
||||
for _, idx := range idxs {
|
||||
if founds, err = d.GetSerialsByRange(idx); chk.E(err) {
|
||||
return
|
||||
@@ -37,6 +43,12 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
if tmp, err = d.GetFullIdPubkeyBySerials(founds); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// If this query is driven by Search terms, increment count per serial
|
||||
if len(f.Search) > 0 {
|
||||
for _, v := range tmp {
|
||||
counts[v.Ser]++
|
||||
}
|
||||
}
|
||||
results = append(results, tmp...)
|
||||
}
|
||||
// deduplicate in case this somehow happened (such as two or more
|
||||
@@ -48,12 +60,109 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
idPkTs = append(idPkTs, idpk)
|
||||
}
|
||||
}
|
||||
// sort results by timestamp in reverse chronological order
|
||||
sort.Slice(
|
||||
idPkTs, func(i, j int) bool {
|
||||
return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
},
|
||||
)
|
||||
|
||||
// If search is combined with Authors/Kinds/Tags, require events to match ALL of those present fields in addition to the word match.
|
||||
if len(f.Search) > 0 && ((f.Authors != nil && f.Authors.Len() > 0) || (f.Kinds != nil && f.Kinds.Len() > 0) || (f.Tags != nil && f.Tags.Len() > 0)) {
|
||||
// Build serial list for fetching full events
|
||||
serials := make([]*types.Uint40, 0, len(idPkTs))
|
||||
for _, v := range idPkTs {
|
||||
s := new(types.Uint40)
|
||||
s.Set(v.Ser)
|
||||
serials = append(serials, s)
|
||||
}
|
||||
var evs map[uint64]*event.E
|
||||
if evs, err = d.FetchEventsBySerials(serials); chk.E(err) {
|
||||
return
|
||||
}
|
||||
filtered := make([]*store.IdPkTs, 0, len(idPkTs))
|
||||
for _, v := range idPkTs {
|
||||
ev, ok := evs[v.Ser]
|
||||
if !ok || ev == nil {
|
||||
continue
|
||||
}
|
||||
matchesAll := true
|
||||
if f.Authors != nil && f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) {
|
||||
matchesAll = false
|
||||
}
|
||||
if matchesAll && f.Kinds != nil && f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) {
|
||||
matchesAll = false
|
||||
}
|
||||
if matchesAll && f.Tags != nil && f.Tags.Len() > 0 {
|
||||
// Require the event to satisfy all tag filters as in MatchesIgnoringTimestampConstraints
|
||||
tagOK := true
|
||||
for _, t := range *f.Tags {
|
||||
if t.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
key := t.Key()
|
||||
values := t.T[1:]
|
||||
if !ev.Tags.ContainsAny(key, values) {
|
||||
tagOK = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !tagOK {
|
||||
matchesAll = false
|
||||
}
|
||||
}
|
||||
if matchesAll {
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
}
|
||||
idPkTs = filtered
|
||||
}
|
||||
|
||||
if len(f.Search) == 0 {
|
||||
// No search query: sort by timestamp in reverse chronological order
|
||||
sort.Slice(
|
||||
idPkTs, func(i, j int) bool {
|
||||
return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
},
|
||||
)
|
||||
} else {
|
||||
// Search query present: blend match count relevance with recency (50/50)
|
||||
// Normalize both match count and timestamp to [0,1] and compute score.
|
||||
var maxCount int
|
||||
var minTs, maxTs int64
|
||||
if len(idPkTs) > 0 {
|
||||
minTs, maxTs = idPkTs[0].Ts, idPkTs[0].Ts
|
||||
}
|
||||
for _, v := range idPkTs {
|
||||
if c := counts[v.Ser]; c > maxCount {
|
||||
maxCount = c
|
||||
}
|
||||
if v.Ts < minTs {
|
||||
minTs = v.Ts
|
||||
}
|
||||
if v.Ts > maxTs {
|
||||
maxTs = v.Ts
|
||||
}
|
||||
}
|
||||
// Precompute denominator to avoid div-by-zero
|
||||
tsSpan := maxTs - minTs
|
||||
if tsSpan <= 0 {
|
||||
tsSpan = 1
|
||||
}
|
||||
if maxCount <= 0 {
|
||||
maxCount = 1
|
||||
}
|
||||
sort.Slice(
|
||||
idPkTs, func(i, j int) bool {
|
||||
ci := float64(counts[idPkTs[i].Ser]) / float64(maxCount)
|
||||
cj := float64(counts[idPkTs[j].Ser]) / float64(maxCount)
|
||||
ai := float64(idPkTs[i].Ts-minTs) / float64(tsSpan)
|
||||
aj := float64(idPkTs[j].Ts-minTs) / float64(tsSpan)
|
||||
si := 0.5*ci + 0.5*ai
|
||||
sj := 0.5*cj + 0.5*aj
|
||||
if si == sj {
|
||||
// tie-break by recency
|
||||
return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
}
|
||||
return si > sj
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if f.Limit != nil && len(idPkTs) > int(*f.Limit) {
|
||||
idPkTs = idPkTs[:*f.Limit]
|
||||
}
|
||||
|
||||
@@ -9,14 +9,23 @@ import (
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrOlderThanExisting is returned when a candidate event is older than an existing replaceable/addressable event.
|
||||
ErrOlderThanExisting = errors.New("older than existing event")
|
||||
// ErrMissingDTag is returned when a parameterized replaceable event lacks the required 'd' tag.
|
||||
ErrMissingDTag = errors.New("event is missing a d tag identifier")
|
||||
)
|
||||
|
||||
func (d *D) GetSerialsFromFilter(f *filter.F) (
|
||||
sers types.Uint40s, err error,
|
||||
) {
|
||||
@@ -34,6 +43,65 @@ func (d *D) GetSerialsFromFilter(f *filter.F) (
|
||||
return
|
||||
}
|
||||
|
||||
// WouldReplaceEvent checks if the provided event would replace existing events
|
||||
// based on Nostr's replaceable or parameterized replaceable semantics. It
|
||||
// returns true along with the serials of events that should be replaced if the
|
||||
// candidate is newer-or-equal. If an existing event is newer, it returns
|
||||
// (false, serials, ErrOlderThanExisting). If no conflicts exist, it returns
|
||||
// (false, nil, nil).
|
||||
func (d *D) WouldReplaceEvent(ev *event.E) (bool, types.Uint40s, error) {
|
||||
// Only relevant for replaceable or parameterized replaceable kinds
|
||||
if !(kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind)) {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
var f *filter.F
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
f = &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.New(ev.Kind)),
|
||||
}
|
||||
} else {
|
||||
// parameterized replaceable requires 'd' tag
|
||||
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||
if dTag == nil {
|
||||
return false, nil, ErrMissingDTag
|
||||
}
|
||||
f = &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.New(ev.Kind)),
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("d", dTag.Value()),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
sers, err := d.GetSerialsFromFilter(f)
|
||||
if chk.E(err) {
|
||||
return false, nil, err
|
||||
}
|
||||
if len(sers) == 0 {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// Determine if any existing event is newer than the candidate
|
||||
shouldReplace := true
|
||||
for _, s := range sers {
|
||||
oldEv, ferr := d.FetchEventBySerial(s)
|
||||
if chk.E(ferr) {
|
||||
continue
|
||||
}
|
||||
if ev.CreatedAt < oldEv.CreatedAt {
|
||||
shouldReplace = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldReplace {
|
||||
return true, sers, nil
|
||||
}
|
||||
return false, sers, ErrOlderThanExisting
|
||||
}
|
||||
|
||||
// SaveEvent saves an event to the database, generating all the necessary indexes.
|
||||
func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
|
||||
if ev == nil {
|
||||
@@ -66,117 +134,37 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
|
||||
err = fmt.Errorf("blocked: %s", err.Error())
|
||||
return
|
||||
}
|
||||
// check for replacement
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
// find the events and check timestamps before deleting
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.New(ev.Kind)),
|
||||
}
|
||||
// check for replacement (separated check vs deletion)
|
||||
if kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind) {
|
||||
var wouldReplace bool
|
||||
var sers types.Uint40s
|
||||
if sers, err = d.GetSerialsFromFilter(f); chk.E(err) {
|
||||
var werr error
|
||||
if wouldReplace, sers, werr = d.WouldReplaceEvent(ev); werr != nil {
|
||||
if errors.Is(werr, ErrOlderThanExisting) {
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
err = errors.New("blocked: event is older than existing replaceable event")
|
||||
} else {
|
||||
err = errors.New("blocked: event is older than existing addressable event")
|
||||
}
|
||||
return
|
||||
}
|
||||
if errors.Is(werr, ErrMissingDTag) {
|
||||
// keep behavior consistent with previous implementation
|
||||
err = ErrMissingDTag
|
||||
return
|
||||
}
|
||||
// any other error
|
||||
return
|
||||
}
|
||||
// if found, check timestamps before deleting
|
||||
if len(sers) > 0 {
|
||||
var shouldReplace bool = true
|
||||
if wouldReplace {
|
||||
for _, s := range sers {
|
||||
var oldEv *event.E
|
||||
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Only replace if the new event is newer or same timestamp
|
||||
if ev.CreatedAt < oldEv.CreatedAt {
|
||||
// log.I.F(
|
||||
// "SaveEvent: rejecting older replaceable event ID=%s (created_at=%d) - existing event ID=%s (created_at=%d)",
|
||||
// hex.Enc(ev.ID), ev.CreatedAt, hex.Enc(oldEv.ID),
|
||||
// oldEv.CreatedAt,
|
||||
// )
|
||||
shouldReplace = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldReplace {
|
||||
for _, s := range sers {
|
||||
var oldEv *event.E
|
||||
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// log.I.F(
|
||||
// "SaveEvent: replacing older replaceable event ID=%s (created_at=%d) with newer event ID=%s (created_at=%d)",
|
||||
// hex.Enc(oldEv.ID), oldEv.CreatedAt, hex.Enc(ev.ID),
|
||||
// ev.CreatedAt,
|
||||
// )
|
||||
if err = d.DeleteEventBySerial(
|
||||
c, s, oldEv,
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Don't save the older event - return an error
|
||||
err = errors.New("blocked: event is older than existing replaceable event")
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if kind.IsParameterizedReplaceable(ev.Kind) {
|
||||
// find the events and check timestamps before deleting
|
||||
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||
if dTag == nil {
|
||||
err = errors.New("event is missing a d tag identifier")
|
||||
return
|
||||
}
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.New(ev.Kind)),
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("d", dTag.Value()),
|
||||
),
|
||||
}
|
||||
var sers types.Uint40s
|
||||
if sers, err = d.GetSerialsFromFilter(f); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// if found, check timestamps before deleting
|
||||
if len(sers) > 0 {
|
||||
var shouldReplace bool = true
|
||||
for _, s := range sers {
|
||||
var oldEv *event.E
|
||||
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
|
||||
if err = d.DeleteEventBySerial(c, s, oldEv); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Only replace if the new event is newer or same timestamp
|
||||
if ev.CreatedAt < oldEv.CreatedAt {
|
||||
// log.I.F(
|
||||
// "SaveEvent: rejecting older addressable event ID=%s (created_at=%d) - existing event ID=%s (created_at=%d)",
|
||||
// hex.Enc(ev.ID), ev.CreatedAt, hex.Enc(oldEv.ID),
|
||||
// oldEv.CreatedAt,
|
||||
// )
|
||||
shouldReplace = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldReplace {
|
||||
for _, s := range sers {
|
||||
var oldEv *event.E
|
||||
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// log.I.F(
|
||||
// "SaveEvent: replacing older addressable event ID=%s (created_at=%d) with newer event ID=%s (created_at=%d)",
|
||||
// hex.Enc(oldEv.ID), oldEv.CreatedAt, hex.Enc(ev.ID),
|
||||
// ev.CreatedAt,
|
||||
// )
|
||||
if err = d.DeleteEventBySerial(
|
||||
c, s, oldEv,
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Don't save the older event - return an error
|
||||
err = errors.New("blocked: event is older than existing addressable event")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -230,10 +218,10 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
|
||||
return
|
||||
},
|
||||
)
|
||||
// log.T.F(
|
||||
// "total data written: %d bytes keys %d bytes values for event ID %s", kc,
|
||||
// vc, hex.Enc(ev.ID),
|
||||
// )
|
||||
log.T.F(
|
||||
"total data written: %d bytes keys %d bytes values for event ID %s", kc,
|
||||
vc, hex.Enc(ev.ID),
|
||||
)
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||
|
||||
178
pkg/database/tokenize.go
Normal file
178
pkg/database/tokenize.go
Normal file
@@ -0,0 +1,178 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
sha "next.orly.dev/pkg/crypto/sha256"
|
||||
)
|
||||
|
||||
// TokenHashes extracts unique word hashes (8-byte truncated sha256) from content.
|
||||
// Rules:
|
||||
// - Unicode-aware: words are sequences of letters or numbers.
|
||||
// - Lowercased using unicode case mapping.
|
||||
// - Ignore URLs (starting with http://, https://, www., or containing "://").
|
||||
// - Ignore nostr: URIs and #[n] mentions.
|
||||
// - Ignore words shorter than 2 runes.
|
||||
// - Exclude 64-character hexadecimal strings (likely IDs/pubkeys).
|
||||
func TokenHashes(content []byte) [][]byte {
|
||||
s := string(content)
|
||||
var out [][]byte
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
i := 0
|
||||
for i < len(s) {
|
||||
r, size := rune(s[i]), 1
|
||||
if r >= 0x80 {
|
||||
r, size = utf8DecodeRuneInString(s[i:])
|
||||
}
|
||||
|
||||
// Skip whitespace
|
||||
if unicode.IsSpace(r) {
|
||||
i += size
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip URLs and schemes
|
||||
if hasPrefixFold(s[i:], "http://") || hasPrefixFold(s[i:], "https://") || hasPrefixFold(s[i:], "nostr:") || hasPrefixFold(s[i:], "www.") {
|
||||
i = skipUntilSpace(s, i)
|
||||
continue
|
||||
}
|
||||
// If token contains "://" ahead, treat as URL and skip to space
|
||||
if j := strings.Index(s[i:], "://"); j == 0 || (j > 0 && isWordStart(r)) {
|
||||
// Only if it's at start of token
|
||||
before := s[i : i+j]
|
||||
if len(before) == 0 || allAlphaNum(before) {
|
||||
i = skipUntilSpace(s, i)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Skip #[n] mentions
|
||||
if r == '#' && i+size < len(s) && s[i+size] == '[' {
|
||||
end := strings.IndexByte(s[i:], ']')
|
||||
if end >= 0 {
|
||||
i += end + 1
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Collect a word
|
||||
start := i
|
||||
var runes []rune
|
||||
for i < len(s) {
|
||||
r2, size2 := rune(s[i]), 1
|
||||
if r2 >= 0x80 {
|
||||
r2, size2 = utf8DecodeRuneInString(s[i:])
|
||||
}
|
||||
if unicode.IsLetter(r2) || unicode.IsNumber(r2) {
|
||||
runes = append(runes, unicode.ToLower(r2))
|
||||
i += size2
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
// If we didn't consume any rune for a word, advance by one rune to avoid stalling
|
||||
if i == start {
|
||||
_, size2 := utf8DecodeRuneInString(s[i:])
|
||||
i += size2
|
||||
continue
|
||||
}
|
||||
if len(runes) >= 2 {
|
||||
w := string(runes)
|
||||
// Exclude 64-char hex strings
|
||||
if isHex64(w) {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[w]; !ok {
|
||||
seen[w] = struct{}{}
|
||||
h := sha.Sum256([]byte(w))
|
||||
out = append(out, h[:8])
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func hasPrefixFold(s, prefix string) bool {
|
||||
if len(s) < len(prefix) {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < len(prefix); i++ {
|
||||
c := s[i]
|
||||
p := prefix[i]
|
||||
if c == p {
|
||||
continue
|
||||
}
|
||||
// ASCII case-insensitive
|
||||
if 'A' <= c && c <= 'Z' {
|
||||
c = c - 'A' + 'a'
|
||||
}
|
||||
if 'A' <= p && p <= 'Z' {
|
||||
p = p - 'A' + 'a'
|
||||
}
|
||||
if c != p {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func skipUntilSpace(s string, i int) int {
|
||||
for i < len(s) {
|
||||
r, size := rune(s[i]), 1
|
||||
if r >= 0x80 {
|
||||
r, size = utf8DecodeRuneInString(s[i:])
|
||||
}
|
||||
if unicode.IsSpace(r) {
|
||||
return i
|
||||
}
|
||||
i += size
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func allAlphaNum(s string) bool {
|
||||
for _, r := range s {
|
||||
if !(unicode.IsLetter(r) || unicode.IsNumber(r)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isWordStart(r rune) bool { return unicode.IsLetter(r) || unicode.IsNumber(r) }
|
||||
|
||||
// Minimal utf8 rune decode without importing utf8 to avoid extra deps elsewhere
|
||||
func utf8DecodeRuneInString(s string) (r rune, size int) {
|
||||
// Fallback to standard library if available; however, using basic decoding
|
||||
for i := 1; i <= 4 && i <= len(s); i++ {
|
||||
r, size = rune(s[0]), 1
|
||||
if r < 0x80 {
|
||||
return r, 1
|
||||
}
|
||||
// Use stdlib for correctness
|
||||
return []rune(s[:i])[0], len(string([]rune(s[:i])[0]))
|
||||
}
|
||||
return rune(s[0]), 1
|
||||
}
|
||||
|
||||
// isHex64 returns true if s is exactly 64 hex characters (0-9, a-f)
|
||||
func isHex64(s string) bool {
|
||||
if len(s) != 64 {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < 64; i++ {
|
||||
c := s[i]
|
||||
if c >= '0' && c <= '9' {
|
||||
continue
|
||||
}
|
||||
if c >= 'a' && c <= 'f' {
|
||||
continue
|
||||
}
|
||||
if c >= 'A' && c <= 'F' {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -156,3 +156,21 @@ func (t *T) Relay() (key []byte) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ToSliceOfStrings returns the tag's bytes slices as a slice of strings. This
|
||||
// method provides a convenient way to access the tag's contents in string format.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - s ([]string): A slice containing all tag elements converted to strings.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// Returns an empty slice if the tag is empty, otherwise returns a new slice with
|
||||
// each byte slice element converted to a string.
|
||||
func (t *T) ToSliceOfStrings() (s []string) {
|
||||
for _, v := range t.T {
|
||||
s = append(s, string(v))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -83,6 +84,10 @@ func (s *S) MarshalJSON() (b []byte, err error) {
|
||||
}
|
||||
|
||||
func (s *S) Marshal(dst []byte) (b []byte) {
|
||||
if s == nil {
|
||||
log.I.F("tags cannot be used without initialization")
|
||||
return
|
||||
}
|
||||
b = dst
|
||||
b = append(b, '[')
|
||||
for i, ss := range *s {
|
||||
@@ -147,6 +152,9 @@ func (s *S) Unmarshal(b []byte) (r []byte, err error) {
|
||||
|
||||
// GetFirst returns the first tag.T that has the same Key as t.
|
||||
func (s *S) GetFirst(t []byte) (first *T) {
|
||||
if s == nil || len(*s) < 1 {
|
||||
return
|
||||
}
|
||||
for _, tt := range *s {
|
||||
if tt.Len() == 0 {
|
||||
continue
|
||||
@@ -159,7 +167,13 @@ func (s *S) GetFirst(t []byte) (first *T) {
|
||||
}
|
||||
|
||||
func (s *S) GetAll(t []byte) (all []*T) {
|
||||
if s == nil || len(*s) < 1 {
|
||||
return
|
||||
}
|
||||
for _, tt := range *s {
|
||||
if len(tt.T) < 1 {
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(tt.T[0], t) {
|
||||
all = append(all, tt)
|
||||
}
|
||||
@@ -174,3 +188,24 @@ func (s *S) GetTagElement(i int) (t *T) {
|
||||
t = (*s)[i]
|
||||
return
|
||||
}
|
||||
|
||||
// ToSliceOfSliceOfStrings converts the tag collection into a two-dimensional
|
||||
// slice of strings, maintaining the structure of tags and their elements.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - ss ([][]string): A slice of string slices where each inner slice represents
|
||||
// a tag's elements converted from bytes to strings.
|
||||
//
|
||||
// - err (error): Currently unused but maintained for interface consistency.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// Iterates through each tag in the collection and converts its byte elements
|
||||
// to strings, preserving the tag structure in the resulting nested slice.
|
||||
func (s *S) ToSliceOfSliceOfStrings() (ss [][]string) {
|
||||
for _, v := range *s {
|
||||
ss = append(ss, v.ToSliceOfStrings())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -91,12 +91,22 @@ func Validate(evt *event.E, challenge []byte, relayURL string) (
|
||||
err = errorf.E("error parsing relay url: %s", err)
|
||||
return
|
||||
}
|
||||
// Allow both ws:// and wss:// schemes when behind a reverse proxy
|
||||
// This handles cases where the relay expects ws:// but receives wss:// from clients
|
||||
// connecting through HTTPS proxies
|
||||
if expected.Scheme != found.Scheme {
|
||||
err = errorf.E(
|
||||
"HTTP Scheme incorrect: expected '%s' got '%s",
|
||||
expected.Scheme, found.Scheme,
|
||||
)
|
||||
return
|
||||
// Check if this is a ws/wss scheme mismatch (acceptable behind proxy)
|
||||
if (expected.Scheme == "ws" && found.Scheme == "wss") ||
|
||||
(expected.Scheme == "wss" && found.Scheme == "ws") {
|
||||
// This is acceptable when behind a reverse proxy
|
||||
// The client will always send wss:// when connecting through HTTPS
|
||||
} else {
|
||||
err = errorf.E(
|
||||
"HTTP Scheme incorrect: expected '%s' got '%s",
|
||||
expected.Scheme, found.Scheme,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
if expected.Host != found.Host {
|
||||
err = errorf.E(
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.8.1
|
||||
v0.10.5
|
||||
330
readme.adoc
330
readme.adoc
@@ -20,4 +20,332 @@ ORLY is a nostr relay written from the ground up to be performant, low latency,
|
||||
|
||||
ORLY uses a fast embedded link:https://github.com/hypermodeinc/badger[badger] database with a database designed for high performance querying and event storage.
|
||||
|
||||
On linux platforms, it uses https://github.com/bitcoin/secp256k1[libsecp256k1]-enabled signature and signature verification (see link:pkg/p256k/README.md[here]).
|
||||
On linux platforms, it uses https://github.com/bitcoin/secp256k1[libsecp256k1]-enabled signature and signature verification (see link:pkg/crypto/p256k/README.md[here]).
|
||||
|
||||
== building
|
||||
|
||||
ORLY is a standard Go application that can be built using the Go toolchain.
|
||||
|
||||
=== prerequisites
|
||||
|
||||
- Go 1.25.0 or later
|
||||
- Git
|
||||
- For web UI: link:https://bun.sh/[Bun] JavaScript runtime
|
||||
|
||||
=== basic build
|
||||
|
||||
To build the relay binary only:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
git clone <repository-url>
|
||||
cd next.orly.dev
|
||||
go build -o orly
|
||||
----
|
||||
|
||||
=== building with web UI
|
||||
|
||||
To build with the embedded web interface:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
# Build the React web application
|
||||
cd app/web
|
||||
bun install
|
||||
bun run build
|
||||
|
||||
# Build the Go binary from project root
|
||||
cd ../../
|
||||
go build -o orly
|
||||
----
|
||||
|
||||
You can automate this process with a build script:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
#!/bin/bash
|
||||
# build.sh
|
||||
echo "Building React app..."
|
||||
cd app/web
|
||||
bun install
|
||||
bun run build
|
||||
|
||||
echo "Building Go binary..."
|
||||
cd ../../
|
||||
go build -o orly
|
||||
|
||||
echo "Build complete!"
|
||||
----
|
||||
|
||||
Make it executable with `chmod +x build.sh` and run with `./build.sh`.
|
||||
|
||||
== secp256k1 dependency
|
||||
|
||||
ORLY uses the optimized `libsecp256k1` C library from Bitcoin Core for schnorr signatures, providing 4x faster signing and ECDH operations compared to pure Go implementations.
|
||||
|
||||
=== installation
|
||||
|
||||
For Ubuntu/Debian, you can use the provided installation script:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
./scripts/ubuntu_install_libsecp256k1.sh
|
||||
----
|
||||
|
||||
Or install manually:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
# Install build dependencies
|
||||
sudo apt -y install build-essential autoconf libtool
|
||||
|
||||
# Initialize and build secp256k1
|
||||
cd pkg/crypto/p256k/secp256k1
|
||||
git submodule init
|
||||
git submodule update
|
||||
./autogen.sh
|
||||
./configure --enable-module-schnorrsig --enable-module-ecdh --prefix=/usr
|
||||
make
|
||||
sudo make install
|
||||
----
|
||||
|
||||
=== fallback mode
|
||||
|
||||
If you need to build without the C library dependency, disable CGO:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
export CGO_ENABLED=0
|
||||
go build -o orly
|
||||
----
|
||||
|
||||
This uses the pure Go `btcec` fallback library, which is slower but doesn't require system dependencies.
|
||||
|
||||
== stress testing
|
||||
|
||||
The stress tester is a tool for performance testing relay implementations under various load conditions.
|
||||
|
||||
=== usage
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
cd cmd/stresstest
|
||||
go run . [options]
|
||||
----
|
||||
|
||||
Or use the compiled binary:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
./cmd/stresstest/stresstest [options]
|
||||
----
|
||||
|
||||
=== options
|
||||
|
||||
* `--address` - Relay address (default: localhost)
|
||||
* `--port` - Relay port (default: 3334)
|
||||
* `--workers` - Number of concurrent publisher workers (default: 8)
|
||||
* `--duration` - How long to run the stress test (default: 60s)
|
||||
* `--publish-timeout` - Timeout waiting for OK per publish (default: 15s)
|
||||
* `--query-workers` - Number of concurrent query workers (default: 4)
|
||||
* `--query-timeout` - Subscription timeout for queries (default: 3s)
|
||||
* `--query-min-interval` - Minimum interval between queries per worker (default: 50ms)
|
||||
* `--query-max-interval` - Maximum interval between queries per worker (default: 300ms)
|
||||
* `--skip-cache` - Skip uploading example events before running
|
||||
|
||||
=== example
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
# Run stress test against local relay for 2 minutes with 16 workers
|
||||
go run cmd/stresstest/main.go --address localhost --port 3334 --workers 16 --duration 120s
|
||||
|
||||
# Test a remote relay with higher query load
|
||||
go run cmd/stresstest/main.go --address relay.example.com --port 443 --query-workers 8 --duration 300s
|
||||
----
|
||||
|
||||
The stress tester will show real-time statistics including events sent/received per second, query counts, and results.
|
||||
|
||||
== benchmarks
|
||||
|
||||
The benchmark suite provides comprehensive performance testing and comparison across multiple relay implementations.
|
||||
|
||||
=== quick start
|
||||
|
||||
1. **Setup external relays:**
|
||||
+
|
||||
[source,bash]
|
||||
----
|
||||
cd cmd/benchmark
|
||||
./setup-external-relays.sh
|
||||
----
|
||||
|
||||
2. **Run all benchmarks:**
|
||||
+
|
||||
[source,bash]
|
||||
----
|
||||
docker compose up --build
|
||||
----
|
||||
|
||||
3. **View results:**
|
||||
+
|
||||
[source,bash]
|
||||
----
|
||||
# View aggregate report
|
||||
cat reports/run_YYYYMMDD_HHMMSS/aggregate_report.txt
|
||||
|
||||
# List individual relay results
|
||||
ls reports/run_YYYYMMDD_HHMMSS/
|
||||
----
|
||||
|
||||
=== benchmark types
|
||||
|
||||
The suite includes three main benchmark patterns:
|
||||
|
||||
==== peak throughput test
|
||||
Tests maximum event ingestion rate with concurrent workers pushing events as fast as possible. Measures events/second, latency distribution, and success rate.
|
||||
|
||||
==== burst pattern test
|
||||
Simulates real-world traffic with alternating high-activity bursts and quiet periods to test relay behavior under varying loads.
|
||||
|
||||
==== mixed read/write test
|
||||
Concurrent read and write operations to test query performance while events are being ingested. Measures combined throughput and latency.
|
||||
|
||||
=== tested relays
|
||||
|
||||
The benchmark suite compares:
|
||||
|
||||
* **next.orly.dev** (this repository) - BadgerDB-based relay
|
||||
* **Khatru** - SQLite and Badger variants
|
||||
* **Relayer** - Basic example implementation
|
||||
* **Strfry** - C++ LMDB-based relay
|
||||
* **nostr-rs-relay** - Rust-based relay with SQLite
|
||||
|
||||
=== metrics reported
|
||||
|
||||
* **Throughput**: Events processed per second
|
||||
* **Latency**: Average, P95, and P99 response times
|
||||
* **Success Rate**: Percentage of successful operations
|
||||
* **Memory Usage**: Peak memory consumption during tests
|
||||
* **Error Analysis**: Detailed error reporting and categorization
|
||||
|
||||
Results are timestamped and stored in the `reports/` directory for tracking performance improvements over time.
|
||||
|
||||
== follows ACL
|
||||
|
||||
The follows ACL (Access Control List) system provides a flexible way to control relay access based on social relationships in the Nostr network. It grants different access levels to users based on whether they are followed by designated admin users.
|
||||
|
||||
=== how it works
|
||||
|
||||
The follows ACL system operates by:
|
||||
|
||||
1. **Admin Configuration**: Designated admin users are specified in the relay configuration
|
||||
2. **Follow List Discovery**: The system fetches follow lists (kind 3 events) from admin users
|
||||
3. **Access Level Assignment**:
|
||||
- **Admin access**: Users listed as admins get full administrative privileges
|
||||
- **Write access**: Users followed by any admin can publish events to the relay
|
||||
- **Read access**: All other users can only read events from the relay
|
||||
|
||||
=== configuration
|
||||
|
||||
Enable the follows ACL system by setting the ACL mode:
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
export ORLY_ACL_MODE=follows
|
||||
export ORLY_ADMINS=npub1abc...,npub1xyz...
|
||||
----
|
||||
|
||||
Or in your environment configuration:
|
||||
|
||||
[source,env]
|
||||
----
|
||||
ORLY_ACL_MODE=follows
|
||||
ORLY_ADMINS=npub1abc123...,npub1xyz456...
|
||||
----
|
||||
|
||||
=== usage example
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
# Set up a relay with follows ACL
|
||||
export ORLY_ACL_MODE=follows
|
||||
export ORLY_ADMINS=npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku
|
||||
|
||||
# Start the relay
|
||||
./orly
|
||||
----
|
||||
|
||||
The relay will automatically:
|
||||
- Load the follow lists of the specified admin users
|
||||
- Grant write access to anyone followed by these admins
|
||||
- Provide read-only access to everyone else
|
||||
- Update follow lists dynamically as admins modify their follows
|
||||
|
||||
== relay sync spider
|
||||
|
||||
The relay sync spider is an intelligent synchronization system that discovers and syncs events from other Nostr relays based on social relationships. It works in conjunction with the follows ACL to create a distributed network of synchronized content.
|
||||
|
||||
=== how it works
|
||||
|
||||
The spider operates in two phases:
|
||||
|
||||
1. **Relay Discovery**:
|
||||
- Finds relay lists (kind 10002 events) from followed users
|
||||
- Builds a list of relays used by people in your social network
|
||||
- Prioritizes relays mentioned by admin users
|
||||
|
||||
2. **Event Synchronization**:
|
||||
- Queries discovered relays for events from followed users
|
||||
- Performs one-time historical sync (default: 1 month back)
|
||||
- Runs periodic syncs to stay current with new events
|
||||
- Validates and stores events locally
|
||||
|
||||
=== configuration
|
||||
|
||||
Enable the spider by setting the spider mode to "follows":
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
export ORLY_SPIDER_MODE=follows
|
||||
export ORLY_SPIDER_FREQUENCY=1h
|
||||
----
|
||||
|
||||
Configuration options:
|
||||
|
||||
* `ORLY_SPIDER_MODE` - Spider mode: "none" (disabled) or "follow" (enabled)
|
||||
* `ORLY_SPIDER_FREQUENCY` - How often to sync (default: 1h)
|
||||
|
||||
=== usage example
|
||||
|
||||
[source,bash]
|
||||
----
|
||||
# Enable both follows ACL and spider sync
|
||||
export ORLY_ACL_MODE=follows
|
||||
export ORLY_SPIDER_MODE=follows
|
||||
export ORLY_SPIDER_FREQUENCY=30m
|
||||
export ORLY_ADMINS=npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku
|
||||
|
||||
# Start the relay
|
||||
./orly
|
||||
----
|
||||
|
||||
The spider will:
|
||||
- Perform a one-time sync of the last month's events
|
||||
- Discover relays from followed users' relay lists
|
||||
- Sync events from those relays every 30 minutes
|
||||
- Only sync events from users in the follow network
|
||||
|
||||
=== benefits
|
||||
|
||||
* **Decentralized Content**: Automatically aggregates content from your social network
|
||||
* **Reduced Relay Dependency**: Less reliance on single large relays
|
||||
* **Improved User Experience**: Users see content from their social circle even when offline from other relays
|
||||
* **Network Resilience**: Content remains accessible even if origin relays go offline
|
||||
|
||||
=== technical notes
|
||||
|
||||
* The spider only runs when `ORLY_ACL_MODE=follows` to ensure proper authorization
|
||||
* One-time sync is marked to prevent repeated historical syncs on restart
|
||||
* Event validation ensures only properly signed events are stored
|
||||
* Sync windows are configurable to balance freshness with resource usage
|
||||
|
||||
0
scripts/orly.service
Normal file → Executable file
0
scripts/orly.service
Normal file → Executable file
104
scripts/run-market-and-orly.sh
Executable file
104
scripts/run-market-and-orly.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# run-relay-and-seed.sh
|
||||
# Starts the ORLY relay with specified settings, then runs `bun dev:seed` in a
|
||||
# provided Market repository to observe how the app interacts with the relay.
|
||||
#
|
||||
# Usage:
|
||||
# scripts/run-relay-and-seed.sh /path/to/market
|
||||
# MARKET_DIR=/path/to/market scripts/run-relay-and-seed.sh
|
||||
#
|
||||
# Notes:
|
||||
# - This script removes /tmp/plebeian before starting the relay.
|
||||
# - The relay listens on 0.0.0.0:3334
|
||||
# - ORLY_ADMINS is intentionally empty and ACL is set to 'none'.
|
||||
# - Requires: go, bun, curl
|
||||
|
||||
# ---------- Config ----------
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="10547"
|
||||
RELAY_DATA_DIR="/tmp/plebeian"
|
||||
LOG_PREFIX="[relay]"
|
||||
WAIT_TIMEOUT="120" # seconds - increased for slow startup
|
||||
|
||||
# ---------- Resolve repo root ----------
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/.." && pwd)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# ---------- Resolve Market directory ----------
|
||||
MARKET_DIR="${1:-${MARKET_DIR:-}}"
|
||||
if [[ -z "${MARKET_DIR}" ]]; then
|
||||
echo "ERROR: Market repository directory not provided. Set MARKET_DIR env or pass as first arg." >&2
|
||||
echo "Example: MARKET_DIR=$HOME/src/market scripts/run-relay-and-seed.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -d "${MARKET_DIR}" ]]; then
|
||||
echo "ERROR: MARKET_DIR does not exist: ${MARKET_DIR}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ---------- Prerequisites ----------
|
||||
command -v go >/dev/null 2>&1 || { echo "ERROR: 'go' not found in PATH" >&2; exit 1; }
|
||||
command -v bun >/dev/null 2>&1 || { echo "ERROR: 'bun' not found in PATH. Install Bun: https://bun.sh" >&2; exit 1; }
|
||||
command -v curl >/dev/null 2>&1 || { echo "ERROR: 'curl' not found in PATH" >&2; exit 1; }
|
||||
|
||||
# ---------- Cleanup handler ----------
|
||||
RELAY_PID=""
|
||||
cleanup() {
|
||||
set +e
|
||||
if [[ -n "${RELAY_PID}" ]]; then
|
||||
echo "${LOG_PREFIX} stopping relay (pid=${RELAY_PID})" >&2
|
||||
kill "${RELAY_PID}" 2>/dev/null || true
|
||||
wait "${RELAY_PID}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# ---------- Start relay ----------
|
||||
reset || true
|
||||
rm -rf "${RELAY_DATA_DIR}"
|
||||
|
||||
# Run go relay in background with required environment variables
|
||||
(
|
||||
export ORLY_LOG_LEVEL="trace"
|
||||
export ORLY_LISTEN="0.0.0.0"
|
||||
export ORLY_PORT="${RELAY_PORT}"
|
||||
export ORLY_ADMINS=""
|
||||
export ORLY_ACL_MODE="none"
|
||||
export ORLY_DATA_DIR="${RELAY_DATA_DIR}"
|
||||
# Important: run from repo root
|
||||
cd "${REPO_ROOT}"
|
||||
# Prefix relay logs so they are distinguishable
|
||||
stdbuf -oL -eL go run . 2>&1 | sed -u "s/^/${LOG_PREFIX} /"
|
||||
) &
|
||||
RELAY_PID=$!
|
||||
echo "${LOG_PREFIX} started (pid=${RELAY_PID}), waiting for readiness on ${RELAY_HOST}:${RELAY_PORT} …"
|
||||
|
||||
# ---------- Wait for readiness ----------
|
||||
start_ts=$(date +%s)
|
||||
while true; do
|
||||
if curl -fsS "http://${RELAY_HOST}:${RELAY_PORT}/" >/dev/null 2>&1; then
|
||||
break
|
||||
fi
|
||||
now=$(date +%s)
|
||||
if (( now - start_ts > WAIT_TIMEOUT )); then
|
||||
echo "ERROR: relay did not become ready within ${WAIT_TIMEOUT}s" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "${LOG_PREFIX} ready. Running Market seeding…"
|
||||
|
||||
# ---------- Run market seeding ----------
|
||||
(
|
||||
cd "${MARKET_DIR}"
|
||||
# Stream bun output with clear prefix
|
||||
stdbuf -oL -eL bun dev:seed 2>&1 | sed -u 's/^/[market] /'
|
||||
)
|
||||
#
|
||||
## After seeding completes, keep the relay up briefly for inspection
|
||||
#echo "${LOG_PREFIX} seeding finished. Relay is still running for inspection. Press Ctrl+C to stop."
|
||||
## Wait indefinitely until interrupted, to allow observing relay logs/behavior
|
||||
#while true; do sleep 3600; done
|
||||
242
scripts/run-market-probe.sh
Executable file
242
scripts/run-market-probe.sh
Executable file
@@ -0,0 +1,242 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# run-market-probe.sh
|
||||
# Starts the ORLY relay with relaxed ACL, then executes the Market repo's
|
||||
# scripts/startup.ts to publish seed events and finally runs a small NDK-based
|
||||
# fetcher to verify the events can be read back from the relay. The goal is to
|
||||
# print detailed logs to diagnose end-to-end publish/subscribe behavior.
|
||||
#
|
||||
# Usage:
|
||||
# scripts/run-market-probe.sh /path/to/market <hex_private_key>
|
||||
# MARKET_DIR=/path/to/market APP_PRIVATE_KEY=hex scripts/run-market-probe.sh
|
||||
#
|
||||
# Requirements:
|
||||
# - go, bun, curl
|
||||
# - Market repo available locally with scripts/startup.ts (see path above)
|
||||
#
|
||||
# Behavior:
|
||||
# - Clears relay data dir (/tmp/plebeian) each run
|
||||
# - Starts relay on 127.0.0.1:10547 with ORLY_ACL_MODE=none (no auth needed)
|
||||
# - Exports APP_RELAY_URL to ws://127.0.0.1:10547 for the Market startup.ts
|
||||
# - Runs Market's startup.ts to publish events (kinds 31990, 10002, 10000, 30000)
|
||||
# - Runs a temporary TypeScript fetcher using NDK to subscribe & log results
|
||||
#
|
||||
|
||||
# ---------- Config ----------
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="10547"
|
||||
RELAY_DATA_DIR="/tmp/plebeian"
|
||||
WAIT_TIMEOUT="120" # seconds - increased for slow startup
|
||||
RELAY_LOG_PREFIX="[relay]"
|
||||
MARKET_LOG_PREFIX="[market-seed]"
|
||||
FETCH_LOG_PREFIX="[fetch]"
|
||||
|
||||
# ---------- Resolve repo root ----------
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/.." && pwd)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# ---------- Resolve Market directory and private key ----------
|
||||
MARKET_DIR=${1:-${MARKET_DIR:-}}
|
||||
APP_PRIVATE_KEY_INPUT=${2:-${APP_PRIVATE_KEY:-${NOSTR_SK:-}}}
|
||||
if [[ -z "${MARKET_DIR}" ]]; then
|
||||
echo "ERROR: Market repository directory not provided. Set MARKET_DIR env or pass as first arg." >&2
|
||||
echo "Example: MARKET_DIR=$HOME/src/github.com/PlebianApp/market scripts/run-market-probe.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -d "${MARKET_DIR}" ]]; then
|
||||
echo "ERROR: MARKET_DIR does not exist: ${MARKET_DIR}" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${APP_PRIVATE_KEY_INPUT}" ]]; then
|
||||
echo "ERROR: Private key not provided. Pass as 2nd arg or set APP_PRIVATE_KEY or NOSTR_SK env var." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ---------- Prerequisites ----------
|
||||
command -v go >/dev/null 2>&1 || { echo "ERROR: 'go' not found in PATH" >&2; exit 1; }
|
||||
command -v bun >/dev/null 2>&1 || { echo "ERROR: 'bun' not found in PATH. Install Bun: https://bun.sh" >&2; exit 1; }
|
||||
command -v curl >/dev/null 2>&1 || { echo "ERROR: 'curl' not found in PATH" >&2; exit 1; }
|
||||
|
||||
# ---------- Cleanup handler ----------
|
||||
RELAY_PID=""
|
||||
TMP_FETCH_DIR=""
|
||||
TMP_FETCH_TS=""
|
||||
cleanup() {
|
||||
set +e
|
||||
if [[ -n "${RELAY_PID}" ]]; then
|
||||
echo "${RELAY_LOG_PREFIX} stopping relay (pid=${RELAY_PID})" >&2
|
||||
kill "${RELAY_PID}" 2>/dev/null || true
|
||||
wait "${RELAY_PID}" 2>/dev/null || true
|
||||
fi
|
||||
if [[ -n "${TMP_FETCH_DIR}" && -d "${TMP_FETCH_DIR}" ]]; then
|
||||
rm -rf "${TMP_FETCH_DIR}" || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# ---------- Start relay ----------
|
||||
reset || true
|
||||
rm -rf "${RELAY_DATA_DIR}"
|
||||
(
|
||||
export ORLY_LOG_LEVEL="trace"
|
||||
export ORLY_LISTEN="0.0.0.0"
|
||||
export ORLY_PORT="${RELAY_PORT}"
|
||||
export ORLY_ADMINS="" # ensure no admin ACL
|
||||
export ORLY_ACL_MODE="none" # fully open for test
|
||||
export ORLY_DATA_DIR="${RELAY_DATA_DIR}"
|
||||
cd "${REPO_ROOT}"
|
||||
stdbuf -oL -eL go run . 2>&1 | sed -u "s/^/${RELAY_LOG_PREFIX} /"
|
||||
) &
|
||||
RELAY_PID=$!
|
||||
echo "${RELAY_LOG_PREFIX} started (pid=${RELAY_PID}), waiting for readiness on ${RELAY_HOST}:${RELAY_PORT} …"
|
||||
|
||||
# ---------- Wait for readiness ----------
|
||||
start_ts=$(date +%s)
|
||||
while true; do
|
||||
if curl -fsS "http://${RELAY_HOST}:${RELAY_PORT}/" >/dev/null 2>&1; then
|
||||
break
|
||||
fi
|
||||
now=$(date +%s)
|
||||
if (( now - start_ts > WAIT_TIMEOUT )); then
|
||||
echo "ERROR: relay did not become ready within ${WAIT_TIMEOUT}s" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "${RELAY_LOG_PREFIX} ready. Starting Market publisher…"
|
||||
|
||||
# ---------- Publish via Market's startup.ts ----------
|
||||
(
|
||||
export APP_RELAY_URL="ws://${RELAY_HOST}:${RELAY_PORT}"
|
||||
export APP_PRIVATE_KEY="${APP_PRIVATE_KEY_INPUT}"
|
||||
cd "${MARKET_DIR}"
|
||||
# Use bun to run the exact startup.ts the app uses. Expect its dependencies in Market repo.
|
||||
echo "${MARKET_LOG_PREFIX} running scripts/startup.ts against ${APP_RELAY_URL} …"
|
||||
stdbuf -oL -eL bun run scripts/startup.ts 2>&1 | sed -u "s/^/${MARKET_LOG_PREFIX} /"
|
||||
)
|
||||
|
||||
# ---------- Prepare a temporary NDK fetcher workspace ----------
|
||||
TMP_FETCH_DIR=$(mktemp -d /tmp/ndk-fetch-XXXXXX)
|
||||
TMP_FETCH_TS="${TMP_FETCH_DIR}/probe.ts"
|
||||
|
||||
# Write probe script
|
||||
cat >"${TMP_FETCH_TS}" <<'TS'
|
||||
import { config } from 'dotenv'
|
||||
config()
|
||||
|
||||
const RELAY_URL = process.env.APP_RELAY_URL
|
||||
const APP_PRIVATE_KEY = process.env.APP_PRIVATE_KEY
|
||||
|
||||
if (!RELAY_URL || !APP_PRIVATE_KEY) {
|
||||
console.error('[fetch] Missing APP_RELAY_URL or APP_PRIVATE_KEY in env')
|
||||
process.exit(2)
|
||||
}
|
||||
|
||||
// Use NDK like startup.ts does
|
||||
import NDK, { NDKEvent, NDKPrivateKeySigner, NDKFilter } from '@nostr-dev-kit/ndk'
|
||||
|
||||
const relay = RELAY_URL as string
|
||||
const privateKey = APP_PRIVATE_KEY as string
|
||||
|
||||
async function main() {
|
||||
console.log(`[fetch] initializing NDK -> ${relay}`)
|
||||
const ndk = new NDK({ explicitRelayUrls: [relay] })
|
||||
ndk.pool?.on('relay:connect', (r) => console.log('[fetch] relay connected:', r.url))
|
||||
ndk.pool?.on('relay:disconnect', (r) => console.log('[fetch] relay disconnected:', r.url))
|
||||
ndk.pool?.on('relay:notice', (r, msg) => console.log('[fetch] relay notice:', r.url, msg))
|
||||
|
||||
await ndk.connect(8000)
|
||||
console.log('[fetch] connected')
|
||||
|
||||
// Setup signer and derive pubkey
|
||||
const signer = new NDKPrivateKeySigner(privateKey)
|
||||
ndk.signer = signer
|
||||
await signer.blockUntilReady()
|
||||
const pubkey = (await signer.user())?.pubkey
|
||||
console.log('[fetch] signer pubkey:', pubkey)
|
||||
|
||||
// Subscribe to the kinds published by startup.ts authored by pubkey
|
||||
const filters: NDKFilter[] = [
|
||||
{ kinds: [31990, 10002, 10000, 30000], authors: pubkey ? [pubkey] : undefined, since: Math.floor(Date.now()/1000) - 3600 },
|
||||
]
|
||||
console.log('[fetch] subscribing with filters:', JSON.stringify(filters))
|
||||
|
||||
const sub = ndk.subscribe(filters, { closeOnEose: true })
|
||||
let count = 0
|
||||
const received: string[] = []
|
||||
|
||||
sub.on('event', (e: NDKEvent) => {
|
||||
count++
|
||||
received.push(`${e.kind}:${e.tagValue('d') || ''}:${e.id}`)
|
||||
console.log('[fetch] EVENT kind=', e.kind, 'id=', e.id, 'tags=', e.tags)
|
||||
})
|
||||
sub.on('eose', () => {
|
||||
console.log('[fetch] EOSE received; total events:', count)
|
||||
})
|
||||
sub.on('error', (err: any) => {
|
||||
console.error('[fetch] subscription error:', err)
|
||||
})
|
||||
|
||||
// Also try to fetch by kinds one by one to be verbose
|
||||
const kinds = [31990, 10002, 10000, 30000]
|
||||
for (const k of kinds) {
|
||||
try {
|
||||
const e = await ndk.fetchEvent({ kinds: [k], authors: pubkey ? [pubkey] : undefined }, { cacheUsage: 'ONLY_RELAY' })
|
||||
if (e) {
|
||||
console.log(`[fetch] fetchEvent kind=${k} -> id=${e.id}`)
|
||||
} else {
|
||||
console.log(`[fetch] fetchEvent kind=${k} -> not found`)
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`[fetch] fetchEvent kind=${k} error`, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Wait a bit to allow sub to drain
|
||||
await new Promise((res) => setTimeout(res, 2000))
|
||||
console.log('[fetch] received summary:', received)
|
||||
// Note: NDK v2.14.x does not expose pool.close(); rely on closeOnEose and process exit
|
||||
}
|
||||
|
||||
main().catch((e) => {
|
||||
console.error('[fetch] fatal error:', e)
|
||||
process.exit(3)
|
||||
})
|
||||
TS
|
||||
|
||||
# Write minimal package.json to pin dependencies and satisfy NDK peer deps
|
||||
cat >"${TMP_FETCH_DIR}/package.json" <<'JSON'
|
||||
{
|
||||
"name": "ndk-fetch-probe",
|
||||
"version": "0.0.1",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"dependencies": {
|
||||
"@nostr-dev-kit/ndk": "^2.14.36",
|
||||
"nostr-tools": "^2.7.0",
|
||||
"dotenv": "^16.4.5"
|
||||
}
|
||||
}
|
||||
JSON
|
||||
|
||||
# ---------- Install probe dependencies explicitly (avoid Bun auto-install pitfalls) ----------
|
||||
(
|
||||
cd "${TMP_FETCH_DIR}"
|
||||
echo "${FETCH_LOG_PREFIX} installing probe deps (@nostr-dev-kit/ndk, nostr-tools, dotenv) …"
|
||||
stdbuf -oL -eL bun install 2>&1 | sed -u "s/^/${FETCH_LOG_PREFIX} [install] /"
|
||||
)
|
||||
|
||||
# ---------- Run the fetcher ----------
|
||||
(
|
||||
export APP_RELAY_URL="ws://${RELAY_HOST}:${RELAY_PORT}"
|
||||
export APP_PRIVATE_KEY="${APP_PRIVATE_KEY_INPUT}"
|
||||
echo "${FETCH_LOG_PREFIX} running fetch probe against ${APP_RELAY_URL} …"
|
||||
(
|
||||
cd "${TMP_FETCH_DIR}"
|
||||
stdbuf -oL -eL bun "${TMP_FETCH_TS}" 2>&1 | sed -u "s/^/${FETCH_LOG_PREFIX} /"
|
||||
)
|
||||
)
|
||||
|
||||
echo "[probe] Completed. Review logs above for publish/subscribe flow."
|
||||
104
scripts/run-relay-and-seed.sh
Executable file
104
scripts/run-relay-and-seed.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# run-relay-and-seed.sh
|
||||
# Starts the ORLY relay with specified settings, then runs `bun dev:seed` in a
|
||||
# provided Market repository to observe how the app interacts with the relay.
|
||||
#
|
||||
# Usage:
|
||||
# scripts/run-relay-and-seed.sh /path/to/market
|
||||
# MARKET_DIR=/path/to/market scripts/run-relay-and-seed.sh
|
||||
#
|
||||
# Notes:
|
||||
# - This script removes /tmp/plebeian before starting the relay.
|
||||
# - The relay listens on 0.0.0.0:3334
|
||||
# - ORLY_ADMINS is intentionally empty and ACL is set to 'none'.
|
||||
# - Requires: go, bun, curl
|
||||
|
||||
# ---------- Config ----------
|
||||
RELAY_HOST="127.0.0.1"
|
||||
RELAY_PORT="10547"
|
||||
RELAY_DATA_DIR="/tmp/plebeian"
|
||||
LOG_PREFIX="[relay]"
|
||||
WAIT_TIMEOUT="45" # seconds
|
||||
|
||||
# ---------- Resolve repo root ----------
|
||||
SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd -- "${SCRIPT_DIR}/.." && pwd)"
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# ---------- Resolve Market directory ----------
|
||||
MARKET_DIR="${1:-${MARKET_DIR:-}}"
|
||||
if [[ -z "${MARKET_DIR}" ]]; then
|
||||
echo "ERROR: Market repository directory not provided. Set MARKET_DIR env or pass as first arg." >&2
|
||||
echo "Example: MARKET_DIR=$HOME/src/market scripts/run-relay-and-seed.sh" >&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -d "${MARKET_DIR}" ]]; then
|
||||
echo "ERROR: MARKET_DIR does not exist: ${MARKET_DIR}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ---------- Prerequisites ----------
|
||||
command -v go >/dev/null 2>&1 || { echo "ERROR: 'go' not found in PATH" >&2; exit 1; }
|
||||
command -v bun >/dev/null 2>&1 || { echo "ERROR: 'bun' not found in PATH. Install Bun: https://bun.sh" >&2; exit 1; }
|
||||
command -v curl >/dev/null 2>&1 || { echo "ERROR: 'curl' not found in PATH" >&2; exit 1; }
|
||||
|
||||
# ---------- Cleanup handler ----------
|
||||
RELAY_PID=""
|
||||
cleanup() {
|
||||
set +e
|
||||
if [[ -n "${RELAY_PID}" ]]; then
|
||||
echo "${LOG_PREFIX} stopping relay (pid=${RELAY_PID})" >&2
|
||||
kill "${RELAY_PID}" 2>/dev/null || true
|
||||
wait "${RELAY_PID}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
# ---------- Start relay ----------
|
||||
reset || true
|
||||
rm -rf "${RELAY_DATA_DIR}"
|
||||
|
||||
# Run go relay in background with required environment variables
|
||||
(
|
||||
export ORLY_LOG_LEVEL="trace"
|
||||
export ORLY_LISTEN="0.0.0.0"
|
||||
export ORLY_PORT="${RELAY_PORT}"
|
||||
export ORLY_ADMINS=""
|
||||
export ORLY_ACL_MODE="none"
|
||||
export ORLY_DATA_DIR="${RELAY_DATA_DIR}"
|
||||
# Important: run from repo root
|
||||
cd "${REPO_ROOT}"
|
||||
# Prefix relay logs so they are distinguishable
|
||||
stdbuf -oL -eL go run . 2>&1 | sed -u "s/^/${LOG_PREFIX} /"
|
||||
) &
|
||||
RELAY_PID=$!
|
||||
echo "${LOG_PREFIX} started (pid=${RELAY_PID}), waiting for readiness on ${RELAY_HOST}:${RELAY_PORT} …"
|
||||
|
||||
# ---------- Wait for readiness ----------
|
||||
start_ts=$(date +%s)
|
||||
while true; do
|
||||
if curl -fsS "http://${RELAY_HOST}:${RELAY_PORT}/" >/dev/null 2>&1; then
|
||||
break
|
||||
fi
|
||||
now=$(date +%s)
|
||||
if (( now - start_ts > WAIT_TIMEOUT )); then
|
||||
echo "ERROR: relay did not become ready within ${WAIT_TIMEOUT}s" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
echo "${LOG_PREFIX} ready. Running Market seeding…"
|
||||
|
||||
# ---------- Run market seeding ----------
|
||||
(
|
||||
cd "${MARKET_DIR}"
|
||||
# Stream bun output with clear prefix
|
||||
stdbuf -oL -eL bun dev:seed 2>&1 | sed -u 's/^/[market] /'
|
||||
)
|
||||
|
||||
# After seeding completes, keep the relay up briefly for inspection
|
||||
echo "${LOG_PREFIX} seeding finished. Relay is still running for inspection. Press Ctrl+C to stop."
|
||||
# Wait indefinitely until interrupted, to allow observing relay logs/behavior
|
||||
while true; do sleep 3600; done
|
||||
0
scripts/runtests.sh
Normal file → Executable file
0
scripts/runtests.sh
Normal file → Executable file
42
stella-relay.service
Normal file
42
stella-relay.service
Normal file
@@ -0,0 +1,42 @@
|
||||
[Unit]
|
||||
Description=Stella's Orly Nostr Relay (Docker Compose)
|
||||
Documentation=https://github.com/Silberengel/next.orly.dev
|
||||
After=network-online.target docker.service
|
||||
Wants=network-online.target
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
User=madmin
|
||||
Group=madmin
|
||||
WorkingDirectory=/home/madmin/Projects/GitCitadel/next.orly.dev
|
||||
|
||||
# Start the relay using docker compose
|
||||
ExecStart=/usr/bin/docker compose up -d orly-relay
|
||||
|
||||
# Stop the relay
|
||||
ExecStop=/usr/bin/docker compose down
|
||||
|
||||
# Reload configuration (restart containers)
|
||||
ExecReload=/usr/bin/docker compose restart orly-relay
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=read-only
|
||||
ReadWritePaths=/home/madmin/.local/share/orly-relay
|
||||
ReadWritePaths=/home/madmin/Projects/GitCitadel/next.orly.dev/data
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
|
||||
# Restart policy
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
TimeoutStartSec=60
|
||||
TimeoutStopSec=30
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
Reference in New Issue
Block a user