Compare commits

...

67 Commits

Author SHA1 Message Date
01131f252e Rank search results by match relevance and recency, update deduplication, and bump version to v0.9.1.
Some checks failed
Go / build (push) Has been cancelled
2025-10-01 17:52:20 +01:00
02333b74ae completed fulltext index migration
Some checks failed
Go / build (push) Has been cancelled
2025-10-01 17:24:50 +01:00
86ac7b7897 Add full-text search indexing for word tokens and update tokenization logic
- Introduced word index (`WordPrefix`) for tokenized search terms.
- Added word token extraction in event and filter processing.
- Implemented Unicode-aware, case-insensitive tokenizer with URL, mention, and hex filters.
- Extended full-text indexing to include tags and content.
2025-10-01 15:03:41 +01:00
7e6adf9fba Adjust BadgerDB memory configurations to optimize resource usage and bump version to v0.8.9.
Some checks failed
Go / build (push) Has been cancelled
2025-10-01 12:52:45 +01:00
7d5ebd5ccd Adjust BadgerDB memory settings to prevent OOM issues and update version to v0.8.8. 2025-10-01 12:40:34 +01:00
f8a321eaee Add connection start time tracking and improve logging for WebSocket lifecycle
Some checks failed
Go / build (push) Has been cancelled
- Introduced `startTime` field in `Listener` to log connection duration.
- Enhanced diagnostics with detailed connection statistics on close.
- Improved logging and error handling for PING/PONG and message lifecycle.
- Updated version to v0.8.7.
2025-10-01 08:40:04 +01:00
48c7fab795 Improve logging and handling for WebSocket message processing, delivery, and diagnostics.
Some checks failed
Go / build (push) Has been cancelled
- Enhanced logging for WebSocket writes, message handling, and delivery timing.
- Added diagnostics for slow deliveries, failures, and context timeouts.
- Incorporated extensive error handling for malformed messages and client notifications.
- Enabled command results and refined subscription management.
- Introduced detailed connection state tracking and metrics for messages, requests, and events.
- Added new `run-market-probe.sh` script for relay testing and Market seeding.
2025-10-01 08:27:22 +01:00
f6054f3c37 Add run-relay-and-seed.sh script, remove redundant JS library mappings, and improve logging consistency.
- Introduced `scripts/run-relay-and-seed.sh` to simplify relay testing and Market seeding.
- Removed `.idea/jsLibraryMappings.xml` as it is no longer required.
- Enhanced consistency by reintroducing relevant debug logs and removing redundant comments.
2025-09-30 18:39:53 +01:00
e1da199858 Bump version to v0.8.5.
Some checks failed
Go / build (push) Has been cancelled
2025-09-30 18:08:57 +01:00
45b4f82995 Enable additional NIP support, improve tag handling validation, and simplify WebSocket message processing. 2025-09-30 18:07:42 +01:00
e58eb1d3e3 Remove commented-out debug logs and update rules for Go version and Nostr protocol documentation. 2025-09-30 13:11:41 +01:00
72d6ddff15 Merge remote-tracking branch 'origin/main' 2025-09-30 13:11:00 +01:00
a50ef55d8e Remove commented-out debug logs and update rules for Go version and Nostr protocol documentation. 2025-09-30 13:10:45 +01:00
c2d5d2a165 Merge pull request #1 from Silberengel/docker-deployment-setup
Add Docker deployment and Apache reverse proxy setup

lgtm 👍
2025-09-25 19:44:07 +01:00
05b13399e3 Expand README with follows ACL and relay sync spider documentation. 2025-09-23 16:05:32 +01:00
0dea0ca791 Expand README with detailed build instructions, dependency setup, stress testing, and performance benchmarking. 2025-09-23 16:00:30 +01:00
ff017b45d2 Add relay identity pubkey and subscription-based profile updates; bump version to v0.8.4.
Some checks failed
Go / build (push) Has been cancelled
- Included relay identity public key in `relayinfo` response.
- Added `UpdateRelayProfile` function to dynamically create/update relay's subscription profile.
- Incremented version from v0.8.3 to v0.8.4.
2025-09-23 15:08:30 +01:00
50179e44ed Add dashboard URL to relay description and bump version to v0.8.3.
Some checks failed
Go / build (push) Has been cancelled
- Updated relay description to include a dynamically constructed dashboard URL.
- Incremented version from v0.8.2 to v0.8.3.
2025-09-23 14:55:25 +01:00
34a3b1ba69 Add dynamic relay dashboard URL support and version increment to v0.8.2.
Some checks failed
Go / build (push) Has been cancelled
- Introduced configuration option `RelayURL` for relay dashboard base URL.
- Added dynamic dashboard URL functionality in `PaymentProcessor`.
- Updated payment notifications to include dashboard access link.
- Incremented version to v0.8.2.
2025-09-23 14:49:08 +01:00
093a19db29 Expand relay features and update version to v0.8.1.
Some checks failed
Go / build (push) Has been cancelled
- Enabled support for additional relay NIPs: Authentication, GenericTagQueries, ParameterizedReplaceableEvents, ExpirationTimestamp.
- Added `PaymentRequired` limitation based on configuration.
- Incremented version to v0.8.1.
2025-09-23 14:26:50 +01:00
2ba361c915 Add relay identity management and subscription enhancements.
Some checks failed
Go / build (push) Has been cancelled
- Introduced relay identity management for subscriptions and follow-list sync.
- Added `IdentityRequested` function to handle the `identity` subcommand.
- Implemented periodic follow-list synchronization for active subscribers.
- Enhanced payment handling to include payer pubkey and subscription updates.
- Added trial expiry and subscription expiry notifications.
2025-09-23 14:22:24 +01:00
7736bb7640 Add payment processing with NWC and subscription-based access control.
- Implemented `PaymentProcessor` to handle NWC payments and extend user subscriptions.
- Added configuration options for NWC URI, subscription pricing, and enablement.
- Updated server to initialize and manage the payment processor.
2025-09-22 17:36:05 +01:00
804e1c9649 Add NWC protocol handling and NIP-44 encryption and decryption functions. 2025-09-22 17:18:47 +01:00
81a6aade4e Bump version to v0.7.1; update relay icon URL.
Some checks failed
Go / build (push) Has been cancelled
2025-09-22 09:38:01 +01:00
fc9600f99d Bump version to v0.7.0; update docs image. 2025-09-22 09:33:04 +01:00
199f922208 Refactor deletion checks and error handling; bump version to v0.6.4.
Some checks failed
Go / build (push) Has been cancelled
2025-09-21 18:15:27 +01:00
405e223aa6 implement delete events 2025-09-21 18:06:11 +01:00
fc3a89a309 Remove unused JavaScript file index-tha189jf.js from dist directory.
- Cleaned up the `app/web/dist/` directory by deleting an unreferenced and outdated build artifact.
- Maintained a lean and organized repository structure.
2025-09-21 17:17:31 +01:00
ba8166da07 Remove unused JavaScript file index-wnwvj11w.js from dist directory.
- Cleaned up the `app/web/dist/` directory by deleting an unreferenced and outdated build artifact.
- Maintained a lean and organized repository structure.
2025-09-21 17:17:15 +01:00
3e3af08644 Remove unused JavaScript file index-wnwvj11w.js from dist directory.
Some checks failed
Go / build (push) Has been cancelled
- Cleaned up the `app/web/dist/` directory by deleting an unreferenced and outdated build artifact.
- Maintained a lean and organized repository structure.
2025-09-21 16:39:45 +01:00
fbdf565bf7 Remove unused JavaScript file index-sskmjaqz.js from dist directory.
- Cleaned up the `app/web/dist/` directory by deleting an unreferenced and outdated build artifact.
- Maintained a lean and organized repository structure.
2025-09-21 16:33:23 +01:00
14b6960070 Add admin-only "All Events Log" feature with WebSocket integration.
Some checks failed
Go / build (push) Has been cancelled
- Implemented an "All Events Log" section accessible only to admin users.
- Added WebSocket-based data fetching to retrieve all events from the relay.
- Included profile caching and metadata fetching for event authors.
- Updated UI components to display events with expandable raw JSON details.
- Adjusted CSS for avatar sizes and improved layout.
- Refactored logout logic to reset all event states.
2025-09-21 16:31:06 +01:00
f9896e52ea use websockets for events log 2025-09-21 16:12:10 +01:00
ad7ca69964 Bump version to v0.6.1 for patch release.
Some checks failed
Go / build (push) Has been cancelled
2025-09-21 14:39:23 +01:00
facf03783f Remove outdated CSS and JavaScript files from dist directory.
- Deleted `index-zhtd763e.css` and `index-zqddcpy5.js` to streamline the build artifacts.
- Simplified repository by removing unused generated files to maintain a clean structure.
2025-09-21 14:36:49 +01:00
a5b6943320 Bump version to v0.6.0 for upcoming release.
Some checks failed
Go / build (push) Has been cancelled
2025-09-21 11:56:32 +01:00
1fe0a395be Add minimal local build outputs for streamlined dist integration.
- Introduced `index-zhtd763e.css` with a tailored CSS rule set for performance optimization.
- Added `index-zqddcpy5.js` containing essential JavaScript for React app functionality and improved compatibility.
2025-09-21 11:51:20 +01:00
92b3716a61 Remove dist directory and streamline build artifacts.
- Deleted `index.css`, `index.js`, and `index.html` from `app/web/dist/`.
- Cleared unused build artifacts to maintain a lean repository structure.
2025-09-21 11:46:59 +01:00
5c05d741d9 Replace remote Tailwind CSS with a local minimal subset; refine .gitignore and dist structure.
- Added a minimal `tailwind.min.css` with utilities tailored to app needs (`app/web/dist/`).
- Updated `.gitignore` to include specific `dist/` paths while maintaining clean build artifacts.
- Added local `dist` files (`index.css`, `index.js`) for better control over UI styling and build outputs.
2025-09-21 11:34:08 +01:00
9a1bbbafce Refine login view styling and update authentication text.
- Updated `App.jsx` to improve layout with centered flexbox and dynamic width.
- Adjusted login text for better clarity: "Authenticate" replaces "Connect".
2025-09-21 11:28:35 +01:00
2fd3828010 Refine login view styling and update authentication text.
- Updated `App.jsx` to improve layout with centered flexbox and dynamic width.
- Adjusted login text for better clarity: "Authenticate" replaces "Connect".
2025-09-21 10:38:25 +01:00
24b742bd20 Enable dev mode for React app with proxy support; refine build, styles, and UI.
- Adjusted `package.json` scripts for Bun dev server and build flow.
- Added `dev.html` for standalone web development with hot-reload enabled.
- Introduced `WebDisableEmbedded` and `WebDevProxyURL` configurations to support proxying non-API paths.
- Refactored server logic to handle reverse proxy for development mode.
- Updated `App.jsx` structure, styles, and layout for responsiveness and dynamic padding.
- Improved login interface with logo support and cleaner design.
- Enhanced development flow documentation in `README.md`.
2025-09-21 10:29:17 +01:00
Silberengel
42273ab2fa Add Docker deployment and Apache reverse proxy setup
🐳 Docker Implementation:
- Add Dockerfile with Alpine Linux (46MB image)
- Add docker-compose.yml with production-ready config
- Add manage-relay.sh for easy local management
- Add stella-relay.service for systemd auto-start
- Published images: silberengel/orly-relay:latest, :v1, :v2

🔧 Apache Reverse Proxy:
- Add comprehensive Apache proxy guide for Plesk and standard Apache
- Add working WebSocket proxy configuration (ws:// not http://)
- Add troubleshooting guide based on real deployment experience
- Add debug-websocket.sh script for systematic diagnosis
2025-09-21 08:57:27 +02:00
6f71b95734 Handle EOF case in text encoder helper loop.
- Added check for `len(rem) == 0` to return `io.EOF` when no remaining input is available.
2025-09-21 03:00:29 +01:00
82665444f4 Add /api/auth/logout endpoint and improve auth flow.
- Implemented `handleAuthLogout` to support user logout by clearing session cookies.
- Improved `/api/auth/status` with authentication cookie validation for persistent login state.
- Enhanced `App.jsx` to prevent UI flash during auth status checks and streamline logout flow.
- Refined user profile handling and permission fetch logic for better reliability.
2025-09-20 20:30:14 +01:00
effeae4495 Replace remote Tailwind CSS with a minimal local build; refine build script and UI styling.
- Added `tailwind.min.css` tailored to current app requirements to reduce external dependencies.
- Updated `index.html` to use the local Tailwind CSS file.
- Improved `package.json` `build` script to ensure `dist` directory creation and inclusion of all `public/` assets.
- Refined CSS and layout in `App.jsx` for better consistency and responsiveness.
2025-09-20 20:24:04 +01:00
6b38291bf9 Add CORS headers and update UI for enhanced user profile handling.
- Added CORS support in server responses for cross-origin requests (`Access-Control-Allow-Origin`, etc.).
- Improved header panel behavior with a sticky position and refined CSS styling.
- Integrated profile data fetching (Kind 0 metadata) for user personalization.
- Enhanced login functionality to support dynamic profile display based on fetched metadata.
- Updated `index.html` to include Tailwind CSS for better design consistency.
2025-09-20 19:54:27 +01:00
0b69ea6d80 Embed React app and add new user authentication interface.
- Integrated a React-based web frontend into the Go server using the `embed` package, serving it from `/`.
- Added build and development scripts utilizing Bun for the React app (`package.json`, `README.md`).
- Enhanced auth interface to support better user experience and permissions (`App.jsx`, CSS updates).
- Refactored `/api/auth/login` to serve React UI, removing hardcoded HTML template.
- Implemented `/api/permissions/` with ACL support for user access management.
2025-09-20 19:03:25 +01:00
9c85dca598 Add graceful termination logging on signal triggers.
- Added explicit `log.I.F("exiting")` calls on signal handling for better visibility during shutdown.
- Ensured immediate return after logging to prevent further processing.
2025-09-20 17:59:06 +01:00
0d8c518896 Add user authentication interface with Nostr relay support.
- Implemented basic UI for login with NIP-07 extensions or private keys.
- Added `/api/auth/` endpoints for challenge generation, login handling, and status checking.
- Introduced challenge storage with thread-safe management.
- Enhanced `Server` structure to support authentication and user interface workflows.
- Improved HTML/CSS for a responsive and user-friendly experience.
2025-09-20 14:17:41 +01:00
20fbce9263 Add spider functionality for relay crawling, marker management, and new SpiderMode config.
- Introduced the `spider` package for relay crawling, including periodic tasks and one-time sync capabilities.
- Added `SetMarker`, `GetMarker`, `HasMarker`, and `DeleteMarker` methods in the database for marker management.
- Updated configuration with `SpiderMode` and `SpiderFrequency` options to enable and customize spider behavior.
- Integrated `spider` initialization into the main application flow.
- Improved tag handling, NIP-70 compliance, and protected tag validation in event processing.
- Removed unnecessary logging and replaced `errorf` with `fmt.Errorf` for better error handling.
- Incremented version to `v0.5.0`.
2025-09-20 13:46:22 +01:00
4532def9f5 Remove large outdated stacktrace.txt log file.
Some checks failed
Go / build (push) Has been cancelled
- Deleted auto-generated `stacktrace.txt` file to reduce repository clutter and maintain relevance of retained files.
2025-09-20 12:07:55 +01:00
90f21fbcd1 Add detailed benchmark results for multiple relays.
- Included results for `relayer-basic`, `strfry`, and `nostr-rs-relay` relay benchmarks.
- Comprehensive performance metrics added for throughput, latency, query, and concurrent operations.
- Reports saved as plain text and AsciiDoc formats.
2025-09-20 12:06:57 +01:00
81a40c04e5 Refactor publishCacheEvents for concurrent publishing and optimize database access.
- Updated `publishCacheEvents` to utilize multiple concurrent connections for event publishing.
- Introduced worker-based architecture leveraging `runtime.NumCPU` for parallel uploads.
- Optimized database fetch logic in `FetchEventsBySerials` for improved maintainability and performance.
- Bumped version to `v0.4.8`.
2025-09-20 04:10:59 +01:00
58a9e83038 Refactor publishCacheEvents and publisherWorker to use fire-and-forget publishing.
- Replaced `Publish` calls with direct event envelope writes, removing wait-for-OK behavior.
- Simplified `publishCacheEvents` logic, removed per-publish timeout contexts, and updated return values.
- Adjusted log messages to reflect "sent" instead of "published."
- Enhanced relay stability with delays between successful publishes.
- Removed unused `publishTimeout` parameter from `publisherWorker` and main logic.
2025-09-20 03:48:50 +01:00
22cde96f3f Remove bufpool references and unused imports, optimize memory operations.
- Removed `bufpool` usage throughout `tag`, `tags`, and `event` packages for memory efficiency.
- Replaced in-place buffer modifications with independent, deep-copied allocations to prevent unintended mutations.
- Added new `Clone` method for deep copying `event.E`.
- Ensured valid JSON emission for nil `Tags` in `event` marshaling.
- Introduced `cmd/stresstest` for relay stress-testing with detailed workload generation and query simulation.
2025-09-19 16:17:44 +01:00
49a172820a Remove unused dependencies and update lol.mleku.dev to v1.0.3. 2025-09-15 05:08:16 +01:00
9d2bf173fe Bump lol.mleku.dev to v1.0.3. 2025-09-15 05:05:52 +01:00
e521b788fb Delete outdated benchmark reports and results.
Removed old benchmark reports and detailed logs from the repository to clean up unnecessary files. These reports appear to be auto-generated and no longer relevant for ongoing development.
2025-09-15 05:00:19 +01:00
f5cce92bf8 Handle nil receiver S in ContainsAny method within tags.go. 2025-09-13 21:23:59 +01:00
2ccdc5e756 Bump version to v0.4.7. 2025-09-13 21:19:01 +01:00
173a34784f Remove redundant logging in acl/follows.go and get-indexes-from-filter.go, handle nil Tags in event.go. 2025-09-13 21:17:53 +01:00
a75e0994f9 Add debug logging for admins in ACL follows evaluation logic 2025-09-13 21:08:29 +01:00
60e925d748 added profiler tooling to enable automated generation of profile reports 2025-09-13 21:05:30 +01:00
3d2f970f04 added profiler tooling to enable automated generation of profile reports 2025-09-13 20:49:25 +01:00
935eb1fb0b added profiler tooling to enable automated generation of profile reports 2025-09-13 13:06:52 +01:00
509aac3819 Remove unused ACL integration and related configuration logic, bump version to v0.4.6.
Some checks failed
Go / build (push) Has been cancelled
2025-09-13 11:33:01 +01:00
126 changed files with 15505 additions and 17467 deletions

View File

@@ -94,4 +94,6 @@ use the source of the relay-tester to help guide what expectations the test has,
and use context7 for information about the nostr protocol, and use additional
log statements to help locate the cause of bugs
always use Go v1.25.1 for everything involving Go
always use Go v1.25.1 for everything involving Go
always use the nips repository also for information, found at ../github.com/nostr-protocol/nips attached to the project

18
.dockerignore Normal file
View File

@@ -0,0 +1,18 @@
# Exclude heavy or host-specific data from Docker build context
# Fixes: failed to solve: error from sender: open cmd/benchmark/data/postgres: permission denied
# Benchmark data and reports (mounted at runtime via volumes)
cmd/benchmark/data/
cmd/benchmark/reports/
# VCS and OS cruft
.git
.gitignore
**/.DS_Store
**/Thumbs.db
# Go build cache and binaries
**/bin/
**/dist/
**/build/
**/*.out

8
.gitignore vendored
View File

@@ -91,6 +91,14 @@ cmd/benchmark/data
!Dockerfile*
!strfry.conf
!config.toml
!.dockerignore
!*.jsx
!*.tsx
!app/web/dist
!/app/web/dist
!/app/web/dist/*
!/app/web/dist/**
!bun.lock
# ...even if they are in subdirectories
!*/
/blocklist.json

364
APACHE-PROXY-GUIDE.md Normal file
View File

@@ -0,0 +1,364 @@
# Apache Reverse Proxy Guide for Docker Apps
**Complete guide for WebSocket-enabled applications - covers both Plesk and Standard Apache**
**Updated with real-world troubleshooting solutions**
## 🎯 **What This Solves**
- WebSocket connection failures (`NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`)
- Nostr relay connectivity issues (`HTTP 426` instead of WebSocket upgrade)
- Docker container proxy configuration
- SSL certificate integration
- Plesk configuration conflicts and virtual host precedence issues
## 🐳 **Step 1: Deploy Your Docker Application**
### **For Stella's Orly Relay:**
```bash
# Pull and run the relay
docker run -d \
--name stella-relay \
--restart unless-stopped \
-p 127.0.0.1:7777:7777 \
-v /data/orly-relay:/data \
-e ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx \
-e ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z \
silberengel/orly-relay:latest
# Test the relay
curl -I http://127.0.0.1:7777
# Should return: HTTP/1.1 426 Upgrade Required
```
### **For Web Apps (like Jumble):**
```bash
# Run with fixed port for easier proxy setup
docker run -d \
--name jumble-app \
--restart unless-stopped \
-p 127.0.0.1:3000:80 \
-e NODE_ENV=production \
silberengel/imwald-jumble:latest
# Test the app
curl -I http://127.0.0.1:3000
```
## 🔧 **Step 2A: PLESK Configuration**
### **For Your Friend's Standard Apache Setup:**
**Tell your friend to create `/etc/apache2/sites-available/domain.conf`:**
```apache
<VirtualHost *:443>
ServerName your-domain.com
# SSL Configuration (Let's Encrypt)
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
# Enable required modules first:
# sudo a2enmod proxy proxy_http proxy_wstunnel rewrite headers ssl
# Proxy settings
ProxyPreserveHost On
ProxyRequests Off
# WebSocket upgrade handling - CRITICAL for apps with WebSockets
RewriteEngine On
RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteCond %{HTTP:Connection} upgrade [NC]
RewriteRule ^/?(.*) "ws://127.0.0.1:PORT/$1" [P,L]
# Regular HTTP proxy
ProxyPass / http://127.0.0.1:PORT/
ProxyPassReverse / http://127.0.0.1:PORT/
# Headers for modern web apps
Header always set X-Forwarded-Proto "https"
Header always set X-Forwarded-Port "443"
Header always set X-Forwarded-For %{REMOTE_ADDR}s
# Security headers
Header always set Strict-Transport-Security "max-age=63072000; includeSubDomains"
Header always set X-Content-Type-Options nosniff
Header always set X-Frame-Options SAMEORIGIN
</VirtualHost>
# Redirect HTTP to HTTPS
<VirtualHost *:80>
ServerName your-domain.com
Redirect permanent / https://your-domain.com/
</VirtualHost>
```
**Then enable it:**
```bash
sudo a2ensite domain.conf
sudo systemctl reload apache2
```
### **For Plesk Users (You):**
⚠️ **Important**: Plesk often doesn't apply Apache directives correctly through the interface. If the interface method fails, use the "Direct Apache Override" method below.
#### **Method 1: Plesk Interface (Try First)**
1. **Go to Plesk** → Websites & Domains → **your-domain.com**
2. **Click "Apache & nginx Settings"**
3. **DISABLE nginx** (uncheck "Proxy mode" and "Smart static files processing")
4. **Clear HTTP section** (leave empty)
5. **In HTTPS section, add:**
**For Nostr Relay (port 7777):**
```apache
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
Header always set Access-Control-Allow-Origin "*"
```
6. **Click "Apply"** and wait 60 seconds
#### **Method 2: Direct Apache Override (If Plesk Interface Fails)**
If Plesk doesn't apply your configuration (common issue), bypass it entirely:
```bash
# Create direct Apache override
sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
<VirtualHost YOUR_SERVER_IP:443>
ServerName your-domain.com
ServerAlias www.your-domain.com
ServerAlias ipv4.your-domain.com
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
DocumentRoot /var/www/relay
# For Nostr relay - proxy everything to WebSocket
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
# CORS headers
Header always set Access-Control-Allow-Origin "*"
Header always set Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
# Logging
ErrorLog /var/log/apache2/relay-error.log
CustomLog /var/log/apache2/relay-access.log combined
</VirtualHost>
EOF
# Enable the override
sudo a2enconf relay-override
sudo mkdir -p /var/www/relay
sudo systemctl restart apache2
# Remove Plesk config if it conflicts
sudo rm /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
```
#### **Method 3: Debugging Plesk Issues**
If configurations aren't being applied:
```bash
# Check if Plesk applied your config
grep -E "(ProxyPass|proxy)" /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
# Check virtual host precedence
apache2ctl -S | grep your-domain.com
# Check Apache modules
apache2ctl -M | grep -E "(proxy|rewrite)"
```
#### **For Web Apps (port 3000 or 32768):**
```apache
ProxyPreserveHost On
ProxyRequests Off
# WebSocket upgrade handling
RewriteEngine On
RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteCond %{HTTP:Connection} upgrade [NC]
RewriteRule ^/?(.*) "ws://127.0.0.1:32768/$1" [P,L]
# Regular HTTP proxy
ProxyPass / http://127.0.0.1:32768/
ProxyPassReverse / http://127.0.0.1:32768/
# Headers
ProxyAddHeaders On
Header always set X-Forwarded-Proto "https"
Header always set X-Forwarded-Port "443"
```
### **Method B: Direct Apache Override (RECOMMENDED for Plesk)**
⚠️ **Use this if Plesk interface doesn't work** (common issue):
```bash
# Create direct Apache override with your server's IP
sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
<VirtualHost YOUR_SERVER_IP:443>
ServerName your-domain.com
ServerAlias www.your-domain.com
ServerAlias ipv4.your-domain.com
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
DocumentRoot /var/www/relay
# For Nostr relay - proxy everything to WebSocket
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
# CORS headers
Header always set Access-Control-Allow-Origin "*"
# Logging
ErrorLog /var/log/apache2/relay-error.log
CustomLog /var/log/apache2/relay-access.log combined
</VirtualHost>
EOF
# Enable override and create directory
sudo a2enconf relay-override
sudo mkdir -p /var/www/relay
sudo systemctl restart apache2
# Remove conflicting Plesk config if needed
sudo rm /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
```
## ⚡ **Step 3: Enable Required Modules**
In Plesk, you might need to enable modules. SSH to your server:
```bash
# Enable Apache modules
sudo a2enmod proxy
sudo a2enmod proxy_http
sudo a2enmod proxy_wstunnel
sudo a2enmod rewrite
sudo systemctl restart apache2
```
## ⚡ **Step 4: Alternative - Nginx in Plesk**
If Apache keeps giving issues, switch to Nginx in Plesk:
1. Go to Plesk → Websites & Domains → orly-relay.imwald.eu
2. Click "Apache & nginx Settings"
3. Enable "nginx" and set it to serve static files
4. In "Additional nginx directives" add:
```nginx
location / {
proxy_pass http://127.0.0.1:7777;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
```
## 🧪 **Testing**
After making changes:
1. **Apply settings** in Plesk
2. **Wait 30 seconds** for changes to take effect
3. **Test WebSocket**:
```bash
# From your server
echo '["REQ","test",{}]' | websocat wss://orly-relay.imwald.eu/
```
## 🎯 **Expected Result**
- ✅ No more "websocket error" in browser console
- ✅ `wss://orly-relay.imwald.eu/` connects successfully
- ✅ Jumble app can publish notes
## 🚨 **Real-World Troubleshooting Guide**
*Based on actual deployment experience with Plesk and WebSocket issues*
### **Critical Issues & Solutions:**
#### **🔴 HTTP 503 Service Unavailable**
- **Cause**: Docker container not running
- **Check**: `docker ps | grep relay`
- **Fix**: `docker start container-name`
#### **🔴 HTTP 426 Instead of WebSocket Upgrade**
- **Cause**: Apache using `http://` proxy instead of `ws://`
- **Fix**: Use `ProxyPass / ws://127.0.0.1:7777/` (not `http://`)
#### **🔴 Plesk Configuration Not Applied**
- **Symptom**: Config not in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
- **Solution**: Use Direct Apache Override method (bypass Plesk interface)
#### **🔴 Virtual Host Conflicts**
- **Check**: `apache2ctl -S | grep domain.com`
- **Fix**: Remove Plesk config: `sudo rm /etc/apache2/plesk.conf.d/vhosts/domain.conf`
#### **🔴 Nginx Intercepting (Plesk)**
- **Symptom**: Response shows `Server: nginx`
- **Fix**: Disable nginx in Plesk settings
### **Debug Commands:**
```bash
# Essential debugging
docker ps | grep relay # Container running?
curl -I http://127.0.0.1:7777 # Local relay (should return 426)
apache2ctl -S | grep domain.com # Virtual host precedence
grep ProxyPass /etc/apache2/plesk.conf.d/vhosts/domain.conf # Config applied?
# WebSocket testing
echo '["REQ","test",{}]' | websocat wss://domain.com/ # Root path
echo '["REQ","test",{}]' | websocat wss://domain.com/ws/ # /ws/ path
```
### **Working Solution (Proven):**
```apache
<VirtualHost SERVER_IP:443>
ServerName domain.com
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
DocumentRoot /var/www/relay
# Direct WebSocket proxy - this is the key!
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
Header always set Access-Control-Allow-Origin "*"
</VirtualHost>
```
---
**Key Lessons**:
1. Plesk interface often fails to apply Apache directives
2. Use `ws://` proxy for Nostr relays, not `http://`
3. Direct Apache config files are more reliable than Plesk interface
4. Always check virtual host precedence with `apache2ctl -S`

188
DOCKER.md Normal file
View File

@@ -0,0 +1,188 @@
# Docker Deployment Guide
## Quick Start
### 1. Basic Relay Setup
```bash
# Build and start the relay
docker-compose up -d
# View logs
docker-compose logs -f stella-relay
# Stop the relay
docker-compose down
```
### 2. With Nginx Proxy (for SSL/domain setup)
```bash
# Start relay with nginx proxy
docker-compose --profile proxy up -d
# Configure SSL certificates in nginx/ssl/
# Then update nginx/nginx.conf to enable HTTPS
```
## Configuration
### Environment Variables
Copy `env.example` to `.env` and customize:
```bash
cp env.example .env
# Edit .env with your settings
```
Key settings:
- `ORLY_OWNERS`: Owner npubs (comma-separated, full control)
- `ORLY_ADMINS`: Admin npubs (comma-separated, deletion permissions)
- `ORLY_PORT`: Port to listen on (default: 7777)
- `ORLY_MAX_CONNECTIONS`: Max concurrent connections
- `ORLY_CONCURRENT_WORKERS`: CPU cores for concurrent processing (0 = auto)
### Data Persistence
The relay data is stored in `./data` directory which is mounted as a volume.
### Performance Tuning
Based on the v0.4.8 optimizations:
- Concurrent event publishing using all CPU cores
- Optimized BadgerDB access patterns
- Configurable batch sizes and cache settings
## Development
### Local Build
```bash
# Pull the latest image (recommended)
docker pull silberengel/orly-relay:latest
# Or build locally if needed
docker build -t silberengel/orly-relay:latest .
# Run with custom settings
docker run -p 7777:7777 -v $(pwd)/data:/data silberengel/orly-relay:latest
```
### Testing
```bash
# Test WebSocket connection
websocat ws://localhost:7777
# Run stress tests (if available in cmd/stresstest)
go run ./cmd/stresstest -relay ws://localhost:7777
```
## Production Deployment
### SSL Setup
1. Get SSL certificates (Let's Encrypt recommended)
2. Place certificates in `nginx/ssl/`
3. Update `nginx/nginx.conf` to enable HTTPS
4. Start with proxy profile: `docker-compose --profile proxy up -d`
### Monitoring
- Health checks are configured for both services
- Logs are rotated (max 10MB, 3 files)
- Resource limits are set to prevent runaway processes
### Security
- Runs as non-root user (uid 1000)
- Rate limiting configured in nginx
- Configurable authentication and event size limits
## Troubleshooting
### Common Issues (Real-World Experience)
#### **Container Issues:**
1. **Port already in use**: Change `ORLY_PORT` in docker-compose.yml
2. **Permission denied**: Ensure `./data` directory is writable
3. **Container won't start**: Check logs with `docker logs container-name`
#### **WebSocket Issues:**
4. **HTTP 426 instead of WebSocket upgrade**:
- Use `ws://127.0.0.1:7777` in proxy config, not `http://`
- Ensure `proxy_wstunnel` module is enabled
5. **Connection refused in browser but works with websocat**:
- Clear browser cache and service workers
- Try incognito mode
- Add CORS headers to Apache/nginx config
#### **Plesk-Specific Issues:**
6. **Plesk not applying Apache directives**:
- Check if config appears in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
- Use direct Apache override if Plesk interface fails
7. **Virtual host conflicts**:
- Check precedence with `apache2ctl -S`
- Remove conflicting Plesk configs if needed
#### **SSL Certificate Issues:**
8. **Self-signed certificate after Let's Encrypt**:
- Plesk might not be using the correct certificate
- Import Let's Encrypt certs into Plesk or use direct Apache config
### Debug Commands
```bash
# Container debugging
docker ps | grep relay
docker logs stella-relay
curl -I http://127.0.0.1:7777 # Should return HTTP 426
# WebSocket testing
echo '["REQ","test",{}]' | websocat wss://domain.com/
echo '["REQ","test",{}]' | websocat wss://domain.com/ws/
# Apache debugging (for reverse proxy issues)
apache2ctl -S | grep domain.com
apache2ctl -M | grep -E "(proxy|rewrite)"
grep ProxyPass /etc/apache2/plesk.conf.d/vhosts/domain.conf
```
### Logs
```bash
# View relay logs
docker-compose logs -f stella-relay
# View nginx logs (if using proxy)
docker-compose logs -f nginx
# Apache logs (for reverse proxy debugging)
sudo tail -f /var/log/apache2/error.log
sudo tail -f /var/log/apache2/domain-error.log
```
### Working Reverse Proxy Config
**For Apache (direct config file):**
```apache
<VirtualHost SERVER_IP:443>
ServerName domain.com
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
# Direct WebSocket proxy for Nostr relay
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
Header always set Access-Control-Allow-Origin "*"
</VirtualHost>
```
---
*Crafted for Stella's digital forest* 🌲

78
Dockerfile Normal file
View File

@@ -0,0 +1,78 @@
# Dockerfile for Stella's Nostr Relay (next.orly.dev)
# Owner: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
FROM golang:alpine AS builder
# Install build dependencies
RUN apk add --no-cache \
git \
build-base \
autoconf \
automake \
libtool \
pkgconfig
# Install secp256k1 library from Alpine packages
RUN apk add --no-cache libsecp256k1-dev
# Set working directory
WORKDIR /build
# Copy go modules first (for better caching)
COPY go.mod go.sum ./
RUN go mod download
# Copy source code
COPY . .
# Build the relay with optimizations from v0.4.8
RUN CGO_ENABLED=1 GOOS=linux go build -ldflags "-w -s" -o relay .
# Create non-root user for security
RUN adduser -D -u 1000 stella && \
chown -R 1000:1000 /build
# Final stage - minimal runtime image
FROM alpine:latest
# Install only runtime dependencies
RUN apk add --no-cache \
ca-certificates \
curl \
libsecp256k1 \
libsecp256k1-dev
WORKDIR /app
# Copy binary from builder
COPY --from=builder /build/relay /app/relay
# Create runtime user and directories
RUN adduser -D -u 1000 stella && \
mkdir -p /data /profiles /app && \
chown -R 1000:1000 /data /profiles /app
# Expose the relay port
EXPOSE 7777
# Set environment variables for Stella's relay
ENV ORLY_DATA_DIR=/data
ENV ORLY_LISTEN=0.0.0.0
ENV ORLY_PORT=7777
ENV ORLY_LOG_LEVEL=info
ENV ORLY_MAX_CONNECTIONS=1000
ENV ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
ENV ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
# Health check to ensure relay is responding
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD sh -c "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:7777 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
# Create volume for persistent data
VOLUME ["/data"]
# Drop privileges and run as stella user
USER 1000:1000
# Run Stella's Nostr relay
CMD ["/app/relay"]

101
SERVICE-WORKER-FIX.md Normal file
View File

@@ -0,0 +1,101 @@
# Service Worker Certificate Caching Fix
## 🚨 **Problem**
When accessing Jumble from the ImWald landing page, the service worker serves a cached self-signed certificate instead of the new Let's Encrypt certificate.
## ⚡ **Solutions**
### **Option 1: Force Service Worker Update**
Add this to your Jumble app's service worker or main JavaScript:
```javascript
// Force service worker update and certificate refresh
if ('serviceWorker' in navigator) {
navigator.serviceWorker.getRegistrations().then(function(registrations) {
for(let registration of registrations) {
registration.update(); // Force update
}
});
}
// Clear all caches on certificate update
if ('caches' in window) {
caches.keys().then(function(names) {
for (let name of names) {
caches.delete(name);
}
});
}
```
### **Option 2: Update Service Worker Cache Strategy**
In your service worker file, add cache busting for SSL-sensitive requests:
```javascript
// In your service worker
self.addEventListener('fetch', function(event) {
// Don't cache HTTPS requests that might have certificate issues
if (event.request.url.startsWith('https://') &&
event.request.url.includes('imwald.eu')) {
event.respondWith(
fetch(event.request, { cache: 'no-store' })
);
return;
}
// Your existing fetch handling...
});
```
### **Option 3: Version Your Service Worker**
Update your service worker with a new version number:
```javascript
// At the top of your service worker
const CACHE_VERSION = 'v2.0.1'; // Increment this when certificates change
const CACHE_NAME = `jumble-cache-${CACHE_VERSION}`;
// Clear old caches
self.addEventListener('activate', function(event) {
event.waitUntil(
caches.keys().then(function(cacheNames) {
return Promise.all(
cacheNames.map(function(cacheName) {
if (cacheName !== CACHE_NAME) {
return caches.delete(cacheName);
}
})
);
})
);
});
```
### **Option 4: Add Cache Headers**
In your Plesk Apache config for Jumble, add:
```apache
# Prevent service worker from caching SSL-sensitive content
Header always set Cache-Control "no-cache, no-store, must-revalidate"
Header always set Pragma "no-cache"
Header always set Expires "0"
# Only for service worker file
<Files "sw.js">
Header always set Cache-Control "no-cache, no-store, must-revalidate"
</Files>
```
## 🧹 **Immediate User Fix**
For users experiencing the certificate issue:
1. **Clear browser data** for jumble.imwald.eu
2. **Unregister service worker**:
- F12 → Application → Service Workers → Unregister
3. **Hard refresh**: Ctrl+Shift+R
4. **Or use incognito mode** to test
---
This will prevent the service worker from serving stale certificate data.

109
WEBSOCKET-DEBUG.md Normal file
View File

@@ -0,0 +1,109 @@
# WebSocket Connection Debug Guide
## 🚨 **Current Issue**
`wss://orly-relay.imwald.eu/` returns `NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`
## 🔍 **Debug Steps**
### **Step 1: Verify Relay is Running**
```bash
# On your server
curl -I http://127.0.0.1:7777
# Should return: HTTP/1.1 426 Upgrade Required
docker ps | grep stella
# Should show running container
```
### **Step 2: Test Apache Modules**
```bash
# Check if WebSocket modules are enabled
apache2ctl -M | grep -E "(proxy|rewrite)"
# If missing, enable them:
sudo a2enmod proxy
sudo a2enmod proxy_http
sudo a2enmod proxy_wstunnel
sudo a2enmod rewrite
sudo a2enmod headers
sudo systemctl restart apache2
```
### **Step 3: Check Apache Configuration**
```bash
# Check what Plesk generated
sudo cat /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
# Look for proxy and rewrite rules
grep -E "(Proxy|Rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
```
### **Step 4: Test Direct WebSocket Connection**
```bash
# Test if the issue is Apache or the relay itself
echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/
# If that works, the issue is Apache proxy
# If that fails, the issue is the relay
```
### **Step 5: Check Apache Error Logs**
```bash
# Watch Apache errors in real-time
sudo tail -f /var/log/apache2/error.log
# Then try connecting to wss://orly-relay.imwald.eu/ and see what errors appear
```
## 🔧 **Specific Plesk Fix**
Based on your current status, try this **exact configuration** in Plesk:
### **Go to Apache & nginx Settings for orly-relay.imwald.eu:**
**Clear both HTTP and HTTPS sections, then add to HTTPS:**
```apache
# Enable proxy
ProxyRequests Off
ProxyPreserveHost On
# WebSocket handling - the key part
RewriteEngine On
RewriteCond %{HTTP:Upgrade} =websocket [NC]
RewriteCond %{HTTP:Connection} upgrade [NC]
RewriteRule /(.*) ws://127.0.0.1:7777/$1 [P,L]
# Fallback for regular HTTP
RewriteCond %{HTTP:Upgrade} !=websocket [NC]
RewriteRule /(.*) http://127.0.0.1:7777/$1 [P,L]
# Headers
ProxyAddHeaders On
```
### **Alternative Simpler Version:**
If the above doesn't work, try just:
```apache
ProxyPass / http://127.0.0.1:7777/
ProxyPassReverse / http://127.0.0.1:7777/
ProxyPass /ws ws://127.0.0.1:7777/
ProxyPassReverse /ws ws://127.0.0.1:7777/
```
## 🧪 **Testing Commands**
```bash
# Test the WebSocket after each change
echo '["REQ","test",{}]' | websocat wss://orly-relay.imwald.eu/
# Check what's actually being served
curl -v https://orly-relay.imwald.eu/ 2>&1 | grep -E "(HTTP|upgrade|connection)"
```
## 🎯 **Expected Fix**
The issue is likely that Apache isn't properly handling the WebSocket upgrade request. The `proxy_wstunnel` module and correct rewrite rules should fix this.
Try the **simpler ProxyPass version first** - it's often more reliable in Plesk environments.

View File

@@ -23,19 +23,33 @@ import (
// and default values. It defines parameters for app behaviour, storage
// locations, logging, and network settings used across the relay service.
type C struct {
AppName string `env:"ORLY_APP_NAME" usage:"set a name to display on information about the relay" default:"ORLY"`
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
HealthPort int `env:"ORLY_HEALTH_PORT" default:"0" usage:"optional health check HTTP port; 0 disables"`
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation"`
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
AppName string `env:"ORLY_APP_NAME" usage:"set a name to display on information about the relay" default:"ORLY"`
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
HealthPort int `env:"ORLY_HEALTH_PORT" default:"0" usage:"optional health check HTTP port; 0 disables"`
EnableShutdown bool `env:"ORLY_ENABLE_SHUTDOWN" default:"false" usage:"if true, expose /shutdown on the health port to gracefully stop the process (for profiling)"`
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation,heap,block,goroutine,threadcreate,mutex"`
PprofPath string `env:"ORLY_PPROF_PATH" usage:"optional directory to write pprof profiles into (inside container); default is temporary dir"`
PprofHTTP bool `env:"ORLY_PPROF_HTTP" default:"false" usage:"if true, expose net/http/pprof on port 6060"`
OpenPprofWeb bool `env:"ORLY_OPEN_PPROF_WEB" default:"false" usage:"if true, automatically open the pprof web viewer when profiling is enabled"`
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
SpiderMode string `env:"ORLY_SPIDER_MODE" usage:"spider mode: none,follow" default:"none"`
SpiderFrequency time.Duration `env:"ORLY_SPIDER_FREQUENCY" usage:"spider frequency in seconds" default:"1h"`
NWCUri string `env:"ORLY_NWC_URI" usage:"NWC (Nostr Wallet Connect) connection string for Lightning payments"`
SubscriptionEnabled bool `env:"ORLY_SUBSCRIPTION_ENABLED" default:"false" usage:"enable subscription-based access control requiring payment for non-directory events"`
MonthlyPriceSats int64 `env:"ORLY_MONTHLY_PRICE_SATS" default:"6000" usage:"price in satoshis for one month subscription (default ~$2 USD)"`
RelayURL string `env:"ORLY_RELAY_URL" usage:"base URL for the relay dashboard (e.g., https://relay.example.com)"`
// Web UI and dev mode settings
WebDisableEmbedded bool `env:"ORLY_WEB_DISABLE" default:"false" usage:"disable serving the embedded web UI; useful for hot-reload during development"`
WebDevProxyURL string `env:"ORLY_WEB_DEV_PROXY_URL" usage:"when ORLY_WEB_DISABLE is true, reverse-proxy non-API paths to this dev server URL (e.g. http://localhost:5173)"`
}
// New creates and initializes a new configuration object for the relay
@@ -126,6 +140,21 @@ func GetEnv() (requested bool) {
return
}
// IdentityRequested checks if the first command line argument is "identity" and returns
// whether the relay identity should be printed and the program should exit.
//
// Return Values
// - requested: true if the 'identity' subcommand was provided, false otherwise.
func IdentityRequested() (requested bool) {
if len(os.Args) > 1 {
switch strings.ToLower(os.Args[1]) {
case "identity":
requested = true
}
}
return
}
// KV is a key/value pair.
type KV struct{ Key, Value string }

View File

@@ -50,6 +50,34 @@ func (l *Listener) HandleAuth(b []byte) (err error) {
env.Event.Pubkey,
)
l.authedPubkey.Store(env.Event.Pubkey)
// Check if this is a first-time user and create welcome note
go l.handleFirstTimeUser(env.Event.Pubkey)
}
return
}
// handleFirstTimeUser checks if user is logging in for first time and creates welcome note
func (l *Listener) handleFirstTimeUser(pubkey []byte) {
// Check if this is a first-time user
isFirstTime, err := l.Server.D.IsFirstTimeUser(pubkey)
if err != nil {
log.E.F("failed to check first-time user status: %v", err)
return
}
if !isFirstTime {
return // Not a first-time user
}
// Get payment processor to create welcome note
if l.Server.paymentProcessor != nil {
// Set the dashboard URL based on the current HTTP request
dashboardURL := l.Server.DashboardURL(l.req)
l.Server.paymentProcessor.SetDashboardURL(dashboardURL)
if err := l.Server.paymentProcessor.CreateWelcomeNote(pubkey); err != nil {
log.E.F("failed to create welcome note for first-time user: %v", err)
}
}
}

View File

@@ -145,12 +145,10 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
continue
}
// check that the author is the same as the signer of the
// delete, for the e tag case the author is the signer of
// the event.
if !utils.FastEqual(env.E.Pubkey, ev.Pubkey) {
// allow deletion if the signer is the author OR an admin/owner
if !(ownerDelete || utils.FastEqual(env.E.Pubkey, ev.Pubkey)) {
log.W.F(
"HandleDelete: attempted deletion of event %s by different user - delete pubkey=%s, event pubkey=%s",
"HandleDelete: attempted deletion of event %s by unauthorized user - delete pubkey=%s, event pubkey=%s",
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
hex.Enc(ev.Pubkey),
)

View File

@@ -103,6 +103,20 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
// user has write access or better, continue
// log.D.F("user has %s access", accessLevel)
}
// check for protected tag (NIP-70)
protectedTag := env.E.Tags.GetFirst([]byte("-"))
if protectedTag != nil && acl.Registry.Active.Load() != "none" {
// check that the pubkey of the event matches the authed pubkey
if !utils.FastEqual(l.authedPubkey.Load(), env.E.Pubkey) {
if err = Ok.Blocked(
l, env,
"protected tag may only be published by user authed to the same pubkey",
); chk.E(err) {
return
}
return
}
}
// if the event is a delete, process the delete
if env.E.Kind == kind.EventDeletion.K {
if err = l.HandleDelete(env); err != nil {
@@ -151,7 +165,9 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
return
}
// Deliver the event to subscribers immediately after sending OK response
go l.publishers.Deliver(env.E)
// Clone the event to prevent corruption when the original is freed
clonedEvent := env.E.Clone()
go l.publishers.Deliver(clonedEvent)
log.D.F("saved event %0x", env.E.ID)
var isNewFromAdmin bool
for _, admin := range l.Admins {

View File

@@ -1,8 +1,9 @@
package app
import (
"fmt"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/encoders/envelopes"
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
@@ -13,39 +14,65 @@ import (
)
func (l *Listener) HandleMessage(msg []byte, remote string) {
log.D.F("%s received message:\n%s", remote, msg)
msgPreview := string(msg)
if len(msgPreview) > 150 {
msgPreview = msgPreview[:150] + "..."
}
log.D.F("%s processing message (len=%d): %s", remote, len(msg), msgPreview)
l.msgCount++
var err error
var t string
var rem []byte
if t, rem, err = envelopes.Identify(msg); !chk.E(err) {
switch t {
case eventenvelope.L:
// log.D.F("eventenvelope: %s %s", remote, rem)
err = l.HandleEvent(rem)
case reqenvelope.L:
// log.D.F("reqenvelope: %s %s", remote, rem)
err = l.HandleReq(rem)
case closeenvelope.L:
// log.D.F("closeenvelope: %s %s", remote, rem)
err = l.HandleClose(rem)
case authenvelope.L:
// log.D.F("authenvelope: %s %s", remote, rem)
err = l.HandleAuth(rem)
default:
err = errorf.E("unknown envelope type %s\n%s", t, rem)
// Attempt to identify the envelope type
if t, rem, err = envelopes.Identify(msg); err != nil {
log.E.F("%s envelope identification FAILED (len=%d): %v", remote, len(msg), err)
log.D.F("%s malformed message content: %q", remote, msgPreview)
chk.E(err)
// Send error notice to client
if noticeErr := noticeenvelope.NewFrom("malformed message: " + err.Error()).Write(l); noticeErr != nil {
log.E.F("%s failed to send malformed message notice: %v", remote, noticeErr)
}
return
}
log.D.F("%s identified envelope type: %s (payload_len=%d)", remote, t, len(rem))
// Process the identified envelope type
switch t {
case eventenvelope.L:
log.D.F("%s processing EVENT envelope", remote)
l.eventCount++
err = l.HandleEvent(rem)
case reqenvelope.L:
log.D.F("%s processing REQ envelope", remote)
l.reqCount++
err = l.HandleReq(rem)
case closeenvelope.L:
log.D.F("%s processing CLOSE envelope", remote)
err = l.HandleClose(rem)
case authenvelope.L:
log.D.F("%s processing AUTH envelope", remote)
err = l.HandleAuth(rem)
default:
err = fmt.Errorf("unknown envelope type %s", t)
log.E.F("%s unknown envelope type: %s (payload: %q)", remote, t, string(rem))
}
// Handle any processing errors
if err != nil {
// log.D.C(
// func() string {
// return fmt.Sprintf(
// "notice->%s %s", remote, err,
// )
// },
// )
if err = noticeenvelope.NewFrom(err.Error()).Write(l); chk.E(err) {
log.E.F("%s message processing FAILED (type=%s): %v", remote, t, err)
log.D.F("%s error context - original message: %q", remote, msgPreview)
// Send error notice to client
noticeMsg := fmt.Sprintf("%s: %s", t, err.Error())
if noticeErr := noticeenvelope.NewFrom(noticeMsg).Write(l); noticeErr != nil {
log.E.F("%s failed to send error notice after %s processing failure: %v", remote, t, noticeErr)
return
}
log.D.F("%s sent error notice for %s processing failure", remote, t)
} else {
log.D.F("%s message processing SUCCESS (type=%s)", remote, t)
}
}

View File

@@ -4,9 +4,12 @@ import (
"encoding/json"
"net/http"
"sort"
"strings"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/p256k"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/protocol/relayinfo"
"next.orly.dev/pkg/version"
)
@@ -31,49 +34,66 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
var info *relayinfo.T
supportedNIPs := relayinfo.GetList(
relayinfo.BasicProtocol,
// relayinfo.Authentication,
// relayinfo.EncryptedDirectMessage,
relayinfo.Authentication,
relayinfo.EncryptedDirectMessage,
relayinfo.EventDeletion,
relayinfo.RelayInformationDocument,
// relayinfo.GenericTagQueries,
relayinfo.GenericTagQueries,
// relayinfo.NostrMarketplace,
relayinfo.EventTreatment,
// relayinfo.CommandResults,
relayinfo.CommandResults,
relayinfo.ParameterizedReplaceableEvents,
// relayinfo.ExpirationTimestamp,
relayinfo.ExpirationTimestamp,
relayinfo.ProtectedEvents,
relayinfo.RelayListMetadata,
relayinfo.SearchCapability,
)
if s.Config.ACLMode != "none" {
supportedNIPs = relayinfo.GetList(
relayinfo.BasicProtocol,
relayinfo.Authentication,
// relayinfo.EncryptedDirectMessage,
relayinfo.EncryptedDirectMessage,
relayinfo.EventDeletion,
relayinfo.RelayInformationDocument,
// relayinfo.GenericTagQueries,
relayinfo.GenericTagQueries,
// relayinfo.NostrMarketplace,
relayinfo.EventTreatment,
// relayinfo.CommandResults,
// relayinfo.ParameterizedReplaceableEvents,
// relayinfo.ExpirationTimestamp,
relayinfo.CommandResults,
relayinfo.ParameterizedReplaceableEvents,
relayinfo.ExpirationTimestamp,
relayinfo.ProtectedEvents,
relayinfo.RelayListMetadata,
relayinfo.SearchCapability,
)
}
sort.Sort(supportedNIPs)
log.T.Ln("supported NIPs", supportedNIPs)
// Construct description with dashboard URL
dashboardURL := s.DashboardURL(r)
description := version.Description + " dashboard: " + dashboardURL
// Get relay identity pubkey as hex
var relayPubkey string
if skb, err := s.D.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
sign := new(p256k.Signer)
if err := sign.InitSec(skb); err == nil {
relayPubkey = hex.Enc(sign.Pub())
}
}
info = &relayinfo.T{
Name: s.Config.AppName,
Description: version.Description,
Description: description,
PubKey: relayPubkey,
Nips: supportedNIPs,
Software: version.URL,
Version: version.V,
Version: strings.TrimPrefix(version.V, "v"),
Limitation: relayinfo.Limits{
AuthRequired: s.Config.ACLMode != "none",
RestrictedWrites: s.Config.ACLMode != "none",
PaymentRequired: s.Config.MonthlyPriceSats > 0,
},
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
Icon: "https://i.nostr.build/6wGXAn7Zaw9mHxFg.png",
}
if err := json.NewEncoder(w).Encode(info); chk.E(err) {
}

View File

@@ -4,17 +4,18 @@ import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
acl "next.orly.dev/pkg/acl"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/encoders/bech32encoding"
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
"next.orly.dev/pkg/encoders/envelopes/closedenvelope"
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
@@ -22,21 +23,19 @@ import (
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/reason"
"next.orly.dev/pkg/encoders/tag"
utils "next.orly.dev/pkg/utils"
"next.orly.dev/pkg/utils"
"next.orly.dev/pkg/utils/normalize"
"next.orly.dev/pkg/utils/pointers"
)
func (l *Listener) HandleReq(msg []byte) (err error) {
log.T.F("HandleReq: START processing from %s\n%s\n", l.remote, msg)
var rem []byte
log.D.F("HandleReq: START processing from %s", l.remote)
// var rem []byte
env := reqenvelope.New()
if rem, err = env.Unmarshal(msg); chk.E(err) {
if _, err = env.Unmarshal(msg); chk.E(err) {
return normalize.Error.Errorf(err.Error())
}
if len(rem) > 0 {
log.I.F("REQ extra bytes: '%s'", rem)
}
log.D.C(func() string { return fmt.Sprintf("REQ sub=%s filters=%d", env.Subscription, len(*env.Filters)) })
// send a challenge to the client to auth if an ACL is active
if acl.Registry.Active.Load() != "none" {
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
@@ -48,8 +47,9 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
switch accessLevel {
case "none":
if err = okenvelope.NewFrom(
env.Subscription, false,
// For REQ denial, send a CLOSED with auth-required reason (NIP-01)
if err = closedenvelope.NewFrom(
env.Subscription,
reason.AuthRequired.F("user not authed or has no read access"),
).Write(l); chk.E(err) {
return
@@ -57,94 +57,121 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
return
default:
// user has read access or better, continue
log.D.F("user has %s access", accessLevel)
}
var events event.S
// Create a single context for all filter queries, tied to the connection context, to prevent leaks and support timely cancellation
queryCtx, queryCancel := context.WithTimeout(
l.ctx, 30*time.Second,
)
defer queryCancel()
// Collect all events from all filters
var allEvents event.S
for _, f := range *env.Filters {
idsLen := 0
kindsLen := 0
authorsLen := 0
tagsLen := 0
if f != nil {
if f.Ids != nil {
idsLen = f.Ids.Len()
}
// Summarize filter details for diagnostics (avoid internal fields)
var kindsLen int
if f.Kinds != nil {
kindsLen = f.Kinds.Len()
}
var authorsLen int
if f.Authors != nil {
authorsLen = f.Authors.Len()
}
var idsLen int
if f.Ids != nil {
idsLen = f.Ids.Len()
}
var dtag string
if f.Tags != nil {
tagsLen = f.Tags.Len()
}
}
log.T.F(
"REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d",
env.Subscription, idsLen, kindsLen, authorsLen, tagsLen,
)
if f != nil && f.Authors != nil && f.Authors.Len() > 0 {
var authors []string
for _, a := range f.Authors.T {
authors = append(authors, hex.Enc(a))
}
log.T.F("REQ %s: authors=%v", env.Subscription, authors)
}
if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 {
log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16())
}
if f != nil && f.Ids != nil && f.Ids.Len() > 0 {
var ids []string
for _, id := range f.Ids.T {
ids = append(ids, hex.Enc(id))
if d := f.Tags.GetFirst([]byte("d")); d != nil {
dtag = string(d.Value())
}
}
var lim any
if pointers.Present(f.Limit) {
if f.Limit != nil {
lim = *f.Limit
} else {
lim = nil
}
log.T.F(
"REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription,
f.Ids.Len(), ids, lim,
)
var since any
if f.Since != nil {
since = f.Since.Int()
}
var until any
if f.Until != nil {
until = f.Until.Int()
}
log.D.C(func() string {
return fmt.Sprintf("REQ %s filter: kinds.len=%d authors.len=%d ids.len=%d d=%q limit=%v since=%v until=%v", env.Subscription, kindsLen, authorsLen, idsLen, dtag, lim, since, until)
})
}
if pointers.Present(f.Limit) {
if f != nil && pointers.Present(f.Limit) {
if *f.Limit == 0 {
continue
}
}
// Use a separate context for QueryEvents to prevent cancellation issues
queryCtx, cancel := context.WithTimeout(
context.Background(), 30*time.Second,
)
defer cancel()
log.T.F(
"HandleReq: About to QueryEvents for %s, main context done: %v",
l.remote, l.ctx.Err() != nil,
)
if events, err = l.QueryEvents(queryCtx, f); chk.E(err) {
var filterEvents event.S
if filterEvents, err = l.QueryEvents(queryCtx, f); chk.E(err) {
if errors.Is(err, badger.ErrDBClosed) {
return
}
log.T.F("HandleReq: QueryEvents error for %s: %v", l.remote, err)
log.E.F("QueryEvents failed for filter: %v", err)
err = nil
continue
}
defer func() {
for _, ev := range events {
ev.Free()
}
}()
log.T.F(
"HandleReq: QueryEvents completed for %s, found %d events",
l.remote, len(events),
)
// Append events from this filter to the overall collection
allEvents = append(allEvents, filterEvents...)
}
events = allEvents
defer func() {
for _, ev := range events {
ev.Free()
}
}()
var tmp event.S
privCheck:
for _, ev := range events {
if kind.IsPrivileged(ev.Kind) &&
accessLevel != "admin" { // admins can see all events
// Check for private tag first
privateTags := ev.Tags.GetAll([]byte("private"))
if len(privateTags) > 0 && accessLevel != "admin" {
pk := l.authedPubkey.Load()
if pk == nil {
continue // no auth, can't access private events
}
// Convert authenticated pubkey to npub for comparison
authedNpub, err := bech32encoding.BinToNpub(pk)
if err != nil {
continue // couldn't convert pubkey, skip
}
// Check if authenticated npub is in any private tag
authorized := false
for _, privateTag := range privateTags {
authorizedNpubs := strings.Split(
string(privateTag.Value()), ",",
)
for _, npub := range authorizedNpubs {
if strings.TrimSpace(npub) == string(authedNpub) {
authorized = true
break
}
}
if authorized {
break
}
}
if !authorized {
continue // not authorized to see this private event
}
tmp = append(tmp, ev)
continue
}
if l.Config.ACLMode != "none" &&
(kind.IsPrivileged(ev.Kind) && accessLevel != "admin") &&
l.authedPubkey.Load() != nil { // admins can see all events
log.T.C(
func() string {
return fmt.Sprintf(
@@ -229,7 +256,7 @@ privCheck:
}
// write the EOSE to signal to the client that all events found have been
// sent.
log.T.F("sending EOSE to %s", l.remote)
log.D.F("sending EOSE to %s", l.remote)
if err = eoseenvelope.NewFrom(env.Subscription).
Write(l); chk.E(err) {
return
@@ -237,7 +264,7 @@ privCheck:
// if the query was for just Ids, we know there can't be any more results,
// so cancel the subscription.
cancel := true
log.T.F(
log.D.F(
"REQ %s: computing cancel/subscription; events_sent=%d",
env.Subscription, len(events),
)
@@ -270,8 +297,8 @@ privCheck:
}
// also, if we received the limit number of events, subscription ded
if pointers.Present(f.Limit) {
if len(events) < int(*f.Limit) {
cancel = false
if len(events) >= int(*f.Limit) {
cancel = true
}
}
}
@@ -289,12 +316,8 @@ privCheck:
},
)
} else {
if err = closedenvelope.NewFrom(
env.Subscription, nil,
).Write(l); chk.E(err) {
return
}
// suppress server-sent CLOSED; client will close subscription if desired
}
log.T.F("HandleReq: COMPLETED processing from %s", l.remote)
log.D.F("HandleReq: COMPLETED processing from %s", l.remote)
return
}

View File

@@ -19,7 +19,6 @@ const (
DefaultWriteWait = 10 * time.Second
DefaultPongWait = 60 * time.Second
DefaultPingWait = DefaultPongWait / 2
DefaultReadTimeout = 3 * time.Second // Read timeout to detect stalled connections
DefaultWriteTimeout = 3 * time.Second
DefaultMaxMessageSize = 1 * units.Mb
@@ -59,35 +58,59 @@ whitelist:
if conn, err = websocket.Accept(
w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}},
); chk.E(err) {
log.E.F("websocket accept failed from %s: %v", remote, err)
return
}
log.T.F("websocket accepted from %s path=%s", remote, r.URL.String())
conn.SetReadLimit(DefaultMaxMessageSize)
defer conn.CloseNow()
listener := &Listener{
ctx: ctx,
Server: s,
conn: conn,
remote: remote,
req: r,
ctx: ctx,
Server: s,
conn: conn,
remote: remote,
req: r,
startTime: time.Now(),
}
chal := make([]byte, 32)
rand.Read(chal)
listener.challenge.Store([]byte(hex.Enc(chal)))
// If admins are configured, immediately prompt client to AUTH (NIP-42)
if len(s.Config.Admins) > 0 {
// log.D.F("sending initial AUTH challenge to %s", remote)
if s.Config.ACLMode != "none" {
log.D.F("sending AUTH challenge to %s", remote)
if err = authenvelope.NewChallengeWith(listener.challenge.Load()).
Write(listener); chk.E(err) {
log.E.F("failed to send AUTH challenge to %s: %v", remote, err)
return
}
log.D.F("AUTH challenge sent successfully to %s", remote)
}
ticker := time.NewTicker(DefaultPingWait)
go s.Pinger(ctx, conn, ticker, cancel)
defer func() {
// log.D.F("closing websocket connection from %s", remote)
log.D.F("closing websocket connection from %s", remote)
// Cancel context and stop pinger
cancel()
ticker.Stop()
// Cancel all subscriptions for this connection
log.D.F("cancelling subscriptions for %s", remote)
listener.publishers.Receive(&W{Cancel: true})
// Log detailed connection statistics
dur := time.Since(listener.startTime)
log.D.F(
"ws connection closed %s: msgs=%d, REQs=%d, EVENTs=%d, duration=%v",
remote, listener.msgCount, listener.reqCount, listener.eventCount,
dur,
)
// Log any remaining connection state
if listener.authedPubkey.Load() != nil {
log.D.F("ws connection %s was authenticated", remote)
} else {
log.D.F("ws connection %s was not authenticated", remote)
}
}()
for {
select {
@@ -99,10 +122,8 @@ whitelist:
var msg []byte
log.T.F("waiting for message from %s", remote)
// Create a read context with timeout to prevent indefinite blocking
readCtx, readCancel := context.WithTimeout(ctx, DefaultReadTimeout)
typ, msg, err = conn.Read(readCtx)
readCancel()
// Block waiting for message; rely on pings and context cancellation to detect dead peers
typ, msg, err = conn.Read(ctx)
if err != nil {
if strings.Contains(
@@ -110,14 +131,6 @@ whitelist:
) {
return
}
// Handle timeout errors - occurs when client becomes unresponsive
if strings.Contains(err.Error(), "context deadline exceeded") {
log.T.F(
"connection from %s timed out after %v", remote,
DefaultReadTimeout,
)
return
}
// Handle EOF errors gracefully - these occur when client closes connection
// or sends incomplete/malformed WebSocket frames
if strings.Contains(err.Error(), "EOF") ||
@@ -141,19 +154,37 @@ whitelist:
return
}
if typ == PingMessage {
log.D.F("received PING from %s, sending PONG", remote)
// Create a write context with timeout for pong response
writeCtx, writeCancel := context.WithTimeout(
ctx, DefaultWriteTimeout,
)
pongStart := time.Now()
if err = conn.Write(writeCtx, PongMessage, msg); chk.E(err) {
pongDuration := time.Since(pongStart)
log.E.F(
"failed to send PONG to %s after %v: %v", remote,
pongDuration, err,
)
if writeCtx.Err() != nil {
log.E.F(
"PONG write timeout to %s after %v (limit=%v)", remote,
pongDuration, DefaultWriteTimeout,
)
}
writeCancel()
return
}
pongDuration := time.Since(pongStart)
log.D.F("sent PONG to %s successfully in %v", remote, pongDuration)
if pongDuration > time.Millisecond*50 {
log.D.F("SLOW PONG to %s: %v (>50ms)", remote, pongDuration)
}
writeCancel()
continue
}
log.T.F("received message from %s: %s", remote, string(msg))
go listener.HandleMessage(msg, remote)
// log.T.F("received message from %s: %s", remote, string(msg))
listener.HandleMessage(msg, remote)
}
}
@@ -162,21 +193,51 @@ func (s *Server) Pinger(
cancel context.CancelFunc,
) {
defer func() {
log.D.F("pinger shutting down")
cancel()
ticker.Stop()
}()
var err error
pingCount := 0
for {
select {
case <-ticker.C:
pingCount++
log.D.F("sending PING #%d", pingCount)
// Create a write context with timeout for ping operation
pingCtx, pingCancel := context.WithTimeout(ctx, DefaultWriteTimeout)
if err = conn.Ping(pingCtx); chk.E(err) {
pingStart := time.Now()
if err = conn.Ping(pingCtx); err != nil {
pingDuration := time.Since(pingStart)
log.E.F(
"PING #%d FAILED after %v: %v", pingCount, pingDuration,
err,
)
if pingCtx.Err() != nil {
log.E.F(
"PING #%d timeout after %v (limit=%v)", pingCount,
pingDuration, DefaultWriteTimeout,
)
}
chk.E(err)
pingCancel()
return
}
pingDuration := time.Since(pingStart)
log.D.F("PING #%d sent successfully in %v", pingCount, pingDuration)
if pingDuration > time.Millisecond*100 {
log.D.F("SLOW PING #%d: %v (>100ms)", pingCount, pingDuration)
}
pingCancel()
case <-ctx.Done():
log.D.F("pinger context cancelled after %d pings", pingCount)
return
}
}

View File

@@ -3,9 +3,11 @@ package app
import (
"context"
"net/http"
"time"
"github.com/coder/websocket"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/utils/atomic"
)
@@ -17,6 +19,11 @@ type Listener struct {
req *http.Request
challenge atomic.Bytes
authedPubkey atomic.Bytes
startTime time.Time
// Diagnostics: per-connection counters
msgCount int
reqCount int
eventCount int
}
// Ctx returns the listener's context, but creates a new context for each operation
@@ -26,6 +33,16 @@ func (l *Listener) Ctx() context.Context {
}
func (l *Listener) Write(p []byte) (n int, err error) {
start := time.Now()
msgLen := len(p)
// Log message attempt with content preview (first 200 chars for diagnostics)
preview := string(p)
if len(preview) > 200 {
preview = preview[:200] + "..."
}
log.D.F("ws->%s attempting write: len=%d preview=%q", l.remote, msgLen, preview)
// Use a separate context with timeout for writes to prevent race conditions
// where the main connection context gets cancelled while writing events
writeCtx, cancel := context.WithTimeout(
@@ -33,9 +50,42 @@ func (l *Listener) Write(p []byte) (n int, err error) {
)
defer cancel()
if err = l.conn.Write(writeCtx, websocket.MessageText, p); chk.E(err) {
// Attempt the write operation
writeStart := time.Now()
if err = l.conn.Write(writeCtx, websocket.MessageText, p); err != nil {
writeDuration := time.Since(writeStart)
totalDuration := time.Since(start)
// Log detailed failure information
log.E.F("ws->%s WRITE FAILED: len=%d duration=%v write_duration=%v error=%v preview=%q",
l.remote, msgLen, totalDuration, writeDuration, err, preview)
// Check if this is a context timeout
if writeCtx.Err() != nil {
log.E.F("ws->%s write timeout after %v (limit=%v)", l.remote, writeDuration, DefaultWriteTimeout)
}
// Check connection state
if l.conn != nil {
log.D.F("ws->%s connection state during failure: remote_addr=%v", l.remote, l.req.RemoteAddr)
}
chk.E(err) // Still call the original error handler
return
}
n = len(p)
// Log successful write with timing
writeDuration := time.Since(writeStart)
totalDuration := time.Since(start)
n = msgLen
log.D.F("ws->%s WRITE SUCCESS: len=%d duration=%v write_duration=%v",
l.remote, n, totalDuration, writeDuration)
// Log slow writes for performance diagnostics
if writeDuration > time.Millisecond*100 {
log.D.F("ws->%s SLOW WRITE detected: %v (>100ms) len=%d", l.remote, writeDuration, n)
}
return
}

View File

@@ -8,8 +8,8 @@ import (
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/app/config"
acl "next.orly.dev/pkg/acl"
database "next.orly.dev/pkg/database"
"next.orly.dev/pkg/crypto/keys"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/bech32encoding"
"next.orly.dev/pkg/protocol/publish"
)
@@ -46,9 +46,39 @@ func Run(
publishers: publish.New(NewPublisher(ctx)),
Admins: adminKeys,
}
// provide publisher to ACL so background sync can dispatch events
if err := acl.Registry.Configure(cfg, db, ctx, l.publishers); chk.E(err) {
// if configuration fails, proceed but log; ACL might be 'none'
// Initialize the user interface
l.UserInterface()
// Ensure a relay identity secret key exists when subscriptions and NWC are enabled
if cfg.SubscriptionEnabled && cfg.NWCUri != "" {
if skb, e := db.GetOrCreateRelayIdentitySecret(); e != nil {
log.E.F("failed to ensure relay identity key: %v", e)
} else if pk, e2 := keys.SecretBytesToPubKeyHex(skb); e2 == nil {
log.I.F("relay identity loaded (pub=%s)", pk)
// ensure relay identity pubkey is considered an admin for ACL follows mode
found := false
for _, a := range cfg.Admins {
if a == pk {
found = true
break
}
}
if !found {
cfg.Admins = append(cfg.Admins, pk)
log.I.F("added relay identity to admins for follow-list whitelisting")
}
}
}
if l.paymentProcessor, err = NewPaymentProcessor(ctx, cfg, db); err != nil {
log.E.F("failed to create payment processor: %v", err)
// Continue without payment processor
} else {
if err = l.paymentProcessor.Start(); err != nil {
log.E.F("failed to start payment processor: %v", err)
} else {
log.I.F("payment processor started successfully")
}
}
addr := fmt.Sprintf("%s:%d", cfg.Listen, cfg.Port)
log.I.F("starting listener on http://%s", addr)

894
app/payment_processor.go Normal file
View File

@@ -0,0 +1,894 @@
package app
import (
"context"
// std hex not used; use project hex encoder instead
"fmt"
"strings"
"sync"
"time"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/app/config"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/crypto/p256k"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/bech32encoding"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/json"
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/encoders/timestamp"
"next.orly.dev/pkg/protocol/nwc"
)
// PaymentProcessor handles NWC payment notifications and updates subscriptions
type PaymentProcessor struct {
nwcClient *nwc.Client
db *database.D
config *config.C
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
dashboardURL string
}
// NewPaymentProcessor creates a new payment processor
func NewPaymentProcessor(
ctx context.Context, cfg *config.C, db *database.D,
) (pp *PaymentProcessor, err error) {
if cfg.NWCUri == "" {
return nil, fmt.Errorf("NWC URI not configured")
}
var nwcClient *nwc.Client
if nwcClient, err = nwc.NewClient(cfg.NWCUri); chk.E(err) {
return nil, fmt.Errorf("failed to create NWC client: %w", err)
}
c, cancel := context.WithCancel(ctx)
pp = &PaymentProcessor{
nwcClient: nwcClient,
db: db,
config: cfg,
ctx: c,
cancel: cancel,
}
return pp, nil
}
// Start begins listening for payment notifications
func (pp *PaymentProcessor) Start() error {
// start NWC notifications listener
pp.wg.Add(1)
go func() {
defer pp.wg.Done()
if err := pp.listenForPayments(); err != nil {
log.E.F("payment processor error: %v", err)
}
}()
// start periodic follow-list sync if subscriptions are enabled
if pp.config != nil && pp.config.SubscriptionEnabled {
pp.wg.Add(1)
go func() {
defer pp.wg.Done()
pp.runFollowSyncLoop()
}()
// start daily subscription checker
pp.wg.Add(1)
go func() {
defer pp.wg.Done()
pp.runDailySubscriptionChecker()
}()
}
return nil
}
// Stop gracefully stops the payment processor
func (pp *PaymentProcessor) Stop() {
if pp.cancel != nil {
pp.cancel()
}
pp.wg.Wait()
}
// listenForPayments subscribes to NWC notifications and processes payments
func (pp *PaymentProcessor) listenForPayments() error {
return pp.nwcClient.SubscribeNotifications(pp.ctx, pp.handleNotification)
}
// runFollowSyncLoop periodically syncs the relay identity follow list with active subscribers
func (pp *PaymentProcessor) runFollowSyncLoop() {
t := time.NewTicker(10 * time.Minute)
defer t.Stop()
// do an initial sync shortly after start
_ = pp.syncFollowList()
for {
select {
case <-pp.ctx.Done():
return
case <-t.C:
if err := pp.syncFollowList(); err != nil {
log.W.F("follow list sync failed: %v", err)
}
}
}
}
// runDailySubscriptionChecker checks once daily for subscription expiry warnings and trial reminders
func (pp *PaymentProcessor) runDailySubscriptionChecker() {
t := time.NewTicker(24 * time.Hour)
defer t.Stop()
// do an initial check shortly after start
_ = pp.checkSubscriptionStatus()
for {
select {
case <-pp.ctx.Done():
return
case <-t.C:
if err := pp.checkSubscriptionStatus(); err != nil {
log.W.F("subscription status check failed: %v", err)
}
}
}
}
// syncFollowList builds a kind-3 event from the relay identity containing only active subscribers
func (pp *PaymentProcessor) syncFollowList() error {
// ensure we have a relay identity secret
skb, err := pp.db.GetRelayIdentitySecret()
if err != nil || len(skb) != 32 {
return nil // nothing to do if no identity
}
// collect active subscribers
actives, err := pp.getActiveSubscriberPubkeys()
if err != nil {
return err
}
// signer
sign := new(p256k.Signer)
if err := sign.InitSec(skb); err != nil {
return err
}
// build follow list event
ev := event.New()
ev.Kind = kind.FollowList.K
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Tags = tag.NewS()
for _, pk := range actives {
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(pk)))
}
// sign and save
ev.Sign(sign)
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
return err
}
log.I.F(
"updated relay follow list with %d active subscribers", len(actives),
)
return nil
}
// getActiveSubscriberPubkeys scans the subscription records and returns active ones
func (pp *PaymentProcessor) getActiveSubscriberPubkeys() ([][]byte, error) {
prefix := []byte("sub:")
now := time.Now()
var out [][]byte
err := pp.db.DB.View(
func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
key := item.KeyCopy(nil)
// key format: sub:<hexpub>
hexpub := string(key[len(prefix):])
var sub database.Subscription
if err := item.Value(
func(val []byte) error {
return json.Unmarshal(val, &sub)
},
); err != nil {
return err
}
if now.Before(sub.TrialEnd) || (!sub.PaidUntil.IsZero() && now.Before(sub.PaidUntil)) {
if b, err := hex.Dec(hexpub); err == nil {
out = append(out, b)
}
}
}
return nil
},
)
return out, err
}
// checkSubscriptionStatus scans all subscriptions and creates warning/reminder notes
func (pp *PaymentProcessor) checkSubscriptionStatus() error {
prefix := []byte("sub:")
now := time.Now()
sevenDaysFromNow := now.AddDate(0, 0, 7)
return pp.db.DB.View(
func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
key := item.KeyCopy(nil)
// key format: sub:<hexpub>
hexpub := string(key[len(prefix):])
var sub database.Subscription
if err := item.Value(
func(val []byte) error {
return json.Unmarshal(val, &sub)
},
); err != nil {
continue // skip invalid subscription records
}
pubkey, err := hex.Dec(hexpub)
if err != nil {
continue // skip invalid pubkey
}
// Check if paid subscription is expiring in 7 days
if !sub.PaidUntil.IsZero() {
// Format dates for comparison (ignore time component)
paidUntilDate := sub.PaidUntil.Truncate(24 * time.Hour)
sevenDaysDate := sevenDaysFromNow.Truncate(24 * time.Hour)
if paidUntilDate.Equal(sevenDaysDate) {
go pp.createExpiryWarningNote(pubkey, sub.PaidUntil)
}
}
// Check if user is on trial (no paid subscription, trial not expired)
if sub.PaidUntil.IsZero() && now.Before(sub.TrialEnd) {
go pp.createTrialReminderNote(pubkey, sub.TrialEnd)
}
}
return nil
},
)
}
// createExpiryWarningNote creates a warning note for users whose paid subscription expires in 7 days
func (pp *PaymentProcessor) createExpiryWarningNote(userPubkey []byte, expiryTime time.Time) error {
// Get relay identity secret to sign the note
skb, err := pp.db.GetRelayIdentitySecret()
if err != nil || len(skb) != 32 {
return fmt.Errorf("no relay identity configured")
}
// Initialize signer
sign := new(p256k.Signer)
if err := sign.InitSec(skb); err != nil {
return fmt.Errorf("failed to initialize signer: %w", err)
}
monthlyPrice := pp.config.MonthlyPriceSats
if monthlyPrice <= 0 {
monthlyPrice = 6000
}
// Get relay npub for content link
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
if err != nil {
return fmt.Errorf("failed to encode relay npub: %w", err)
}
// Create the warning note content
content := fmt.Sprintf(`⚠️ Subscription Expiring Soon ⚠️
Your paid subscription to this relay will expire in 7 days on %s.
💰 To extend your subscription:
- Monthly price: %d sats
- Zap this note with your payment amount
- Each %d sats = 30 days of access
⚡ Payment Instructions:
1. Use any Lightning wallet that supports zaps
2. Zap this note with your payment
3. Your subscription will be automatically extended
Don't lose access to your private relay! Extend your subscription today.
Relay: nostr:%s
Log in to the relay dashboard to access your configuration at: %s`,
expiryTime.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
// Build the event
ev := event.New()
ev.Kind = kind.TextNote.K // Kind 1 for text note
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Content = []byte(content)
ev.Tags = tag.NewS()
// Add "p" tag for the user
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
// Add expiration tag (5 days from creation)
noteExpiry := time.Now().AddDate(0, 0, 5)
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
// Add "private" tag with authorized npubs (user and relay)
var authorizedNpubs []string
// Add user npub
userNpub, err := bech32encoding.BinToNpub(userPubkey)
if err == nil {
authorizedNpubs = append(authorizedNpubs, string(userNpub))
}
// Add relay npub
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
if err == nil {
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
}
// Create the private tag with comma-separated npubs
if len(authorizedNpubs) > 0 {
privateTagValue := strings.Join(authorizedNpubs, ",")
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
}
// Add a special tag to mark this as an expiry warning
*ev.Tags = append(*ev.Tags, tag.NewFromAny("warning", "subscription-expiry"))
// Sign and save the event
ev.Sign(sign)
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
return fmt.Errorf("failed to save expiry warning note: %w", err)
}
log.I.F("created expiry warning note for user %s (expires %s)", hex.Enc(userPubkey), expiryTime.Format("2006-01-02"))
return nil
}
// createTrialReminderNote creates a reminder note for users on trial to support the relay
func (pp *PaymentProcessor) createTrialReminderNote(userPubkey []byte, trialEnd time.Time) error {
// Get relay identity secret to sign the note
skb, err := pp.db.GetRelayIdentitySecret()
if err != nil || len(skb) != 32 {
return fmt.Errorf("no relay identity configured")
}
// Initialize signer
sign := new(p256k.Signer)
if err := sign.InitSec(skb); err != nil {
return fmt.Errorf("failed to initialize signer: %w", err)
}
monthlyPrice := pp.config.MonthlyPriceSats
if monthlyPrice <= 0 {
monthlyPrice = 6000
}
// Calculate daily rate
dailyRate := monthlyPrice / 30
// Get relay npub for content link
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
if err != nil {
return fmt.Errorf("failed to encode relay npub: %w", err)
}
// Create the reminder note content
content := fmt.Sprintf(`🆓 Free Trial Reminder 🆓
You're currently using this relay for FREE! Your trial expires on %s.
🙏 Support Relay Operations:
This relay provides you with private, censorship-resistant communication. Please consider supporting its continued operation.
💰 Subscription Details:
- Monthly price: %d sats (%d sats/day)
- Fair pricing for premium service
- Helps keep the relay running 24/7
⚡ How to Subscribe:
Simply zap this note with your payment amount:
- Each %d sats = 30 days of access
- Payment is processed automatically
- No account setup required
Thank you for considering supporting decentralized communication!
Relay: nostr:%s
Log in to the relay dashboard to access your configuration at: %s`,
trialEnd.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, dailyRate, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
// Build the event
ev := event.New()
ev.Kind = kind.TextNote.K // Kind 1 for text note
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Content = []byte(content)
ev.Tags = tag.NewS()
// Add "p" tag for the user
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
// Add expiration tag (5 days from creation)
noteExpiry := time.Now().AddDate(0, 0, 5)
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
// Add "private" tag with authorized npubs (user and relay)
var authorizedNpubs []string
// Add user npub
userNpub, err := bech32encoding.BinToNpub(userPubkey)
if err == nil {
authorizedNpubs = append(authorizedNpubs, string(userNpub))
}
// Add relay npub
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
if err == nil {
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
}
// Create the private tag with comma-separated npubs
if len(authorizedNpubs) > 0 {
privateTagValue := strings.Join(authorizedNpubs, ",")
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
}
// Add a special tag to mark this as a trial reminder
*ev.Tags = append(*ev.Tags, tag.NewFromAny("reminder", "trial-support"))
// Sign and save the event
ev.Sign(sign)
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
return fmt.Errorf("failed to save trial reminder note: %w", err)
}
log.I.F("created trial reminder note for user %s (trial ends %s)", hex.Enc(userPubkey), trialEnd.Format("2006-01-02"))
return nil
}
// handleNotification processes incoming payment notifications
func (pp *PaymentProcessor) handleNotification(
notificationType string, notification map[string]any,
) error {
// Only process payment_received notifications
if notificationType != "payment_received" {
return nil
}
amount, ok := notification["amount"].(float64)
if !ok {
return fmt.Errorf("invalid amount")
}
// Prefer explicit payer/relay pubkeys if provided in metadata
var payerPubkey []byte
var userNpub string
if metadata, ok := notification["metadata"].(map[string]any); ok {
if s, ok := metadata["payer_pubkey"].(string); ok && s != "" {
if pk, err := decodeAnyPubkey(s); err == nil {
payerPubkey = pk
}
}
if payerPubkey == nil {
if s, ok := metadata["sender_pubkey"].(string); ok && s != "" { // alias
if pk, err := decodeAnyPubkey(s); err == nil {
payerPubkey = pk
}
}
}
// Optional: the intended subscriber npub (for backwards compat)
if userNpub == "" {
if npubField, ok := metadata["npub"].(string); ok {
userNpub = npubField
}
}
// If relay identity pubkey is provided, verify it matches ours
if s, ok := metadata["relay_pubkey"].(string); ok && s != "" {
if rpk, err := decodeAnyPubkey(s); err == nil {
if skb, err := pp.db.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
var signer p256k.Signer
if err := signer.InitSec(skb); err == nil {
if !strings.EqualFold(hex.Enc(rpk), hex.Enc(signer.Pub())) {
log.W.F("relay_pubkey in payment metadata does not match this relay identity: got %s want %s", hex.Enc(rpk), hex.Enc(signer.Pub()))
}
}
}
}
}
}
// Fallback: extract npub from description or metadata
description, _ := notification["description"].(string)
if userNpub == "" {
userNpub = pp.extractNpubFromDescription(description)
}
var pubkey []byte
var err error
if payerPubkey != nil {
pubkey = payerPubkey
} else {
if userNpub == "" {
return fmt.Errorf("no payer_pubkey or npub provided in payment notification")
}
pubkey, err = pp.npubToPubkey(userNpub)
if err != nil {
return fmt.Errorf("invalid npub: %w", err)
}
}
satsReceived := int64(amount / 1000)
monthlyPrice := pp.config.MonthlyPriceSats
if monthlyPrice <= 0 {
monthlyPrice = 6000
}
days := int((float64(satsReceived) / float64(monthlyPrice)) * 30)
if days < 1 {
return fmt.Errorf("payment amount too small")
}
if err := pp.db.ExtendSubscription(pubkey, days); err != nil {
return fmt.Errorf("failed to extend subscription: %w", err)
}
// Record payment history
invoice, _ := notification["invoice"].(string)
preimage, _ := notification["preimage"].(string)
if err := pp.db.RecordPayment(
pubkey, satsReceived, invoice, preimage,
); err != nil {
log.E.F("failed to record payment: %v", err)
}
// Log helpful identifiers
var payerHex = hex.Enc(pubkey)
if userNpub == "" {
log.I.F("payment processed: payer %s %d sats -> %d days", payerHex, satsReceived, days)
} else {
log.I.F("payment processed: %s (%s) %d sats -> %d days", userNpub, payerHex, satsReceived, days)
}
// Update ACL follows cache and relay follow list immediately
if pp.config != nil && pp.config.ACLMode == "follows" {
acl.Registry.AddFollow(pubkey)
}
// Trigger an immediate follow-list sync in background (best-effort)
go func() { _ = pp.syncFollowList() }()
// Create a note with payment confirmation and private tag
if err := pp.createPaymentNote(pubkey, satsReceived, days); err != nil {
log.E.F("failed to create payment note: %v", err)
}
return nil
}
// createPaymentNote creates a note recording the payment with private tag for authorization
func (pp *PaymentProcessor) createPaymentNote(payerPubkey []byte, satsReceived int64, days int) error {
// Get relay identity secret to sign the note
skb, err := pp.db.GetRelayIdentitySecret()
if err != nil || len(skb) != 32 {
return fmt.Errorf("no relay identity configured")
}
// Initialize signer
sign := new(p256k.Signer)
if err := sign.InitSec(skb); err != nil {
return fmt.Errorf("failed to initialize signer: %w", err)
}
// Get subscription info to determine expiry
sub, err := pp.db.GetSubscription(payerPubkey)
if err != nil {
return fmt.Errorf("failed to get subscription: %w", err)
}
var expiryTime time.Time
if sub != nil && !sub.PaidUntil.IsZero() {
expiryTime = sub.PaidUntil
} else {
expiryTime = time.Now().AddDate(0, 0, days)
}
// Get relay npub for content link
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
if err != nil {
return fmt.Errorf("failed to encode relay npub: %w", err)
}
// Create the note content with nostr:npub link and dashboard link
content := fmt.Sprintf("Payment received: %d sats for %d days. Subscription expires: %s\n\nRelay: nostr:%s\n\nLog in to the relay dashboard to access your configuration at: %s",
satsReceived, days, expiryTime.Format("2006-01-02 15:04:05 UTC"), string(relayNpubForContent), pp.getDashboardURL())
// Build the event
ev := event.New()
ev.Kind = kind.TextNote.K // Kind 1 for text note
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Content = []byte(content)
ev.Tags = tag.NewS()
// Add "p" tag for the payer
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(payerPubkey)))
// Add expiration tag (5 days from creation)
noteExpiry := time.Now().AddDate(0, 0, 5)
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
// Add "private" tag with authorized npubs (payer and relay)
var authorizedNpubs []string
// Add payer npub
payerNpub, err := bech32encoding.BinToNpub(payerPubkey)
if err == nil {
authorizedNpubs = append(authorizedNpubs, string(payerNpub))
}
// Add relay npub
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
if err == nil {
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
}
// Create the private tag with comma-separated npubs
if len(authorizedNpubs) > 0 {
privateTagValue := strings.Join(authorizedNpubs, ",")
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
}
// Sign and save the event
ev.Sign(sign)
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
return fmt.Errorf("failed to save payment note: %w", err)
}
log.I.F("created payment note for %s with private authorization", hex.Enc(payerPubkey))
return nil
}
// CreateWelcomeNote creates a welcome note for first-time users with private tag for authorization
func (pp *PaymentProcessor) CreateWelcomeNote(userPubkey []byte) error {
// Get relay identity secret to sign the note
skb, err := pp.db.GetRelayIdentitySecret()
if err != nil || len(skb) != 32 {
return fmt.Errorf("no relay identity configured")
}
// Initialize signer
sign := new(p256k.Signer)
if err := sign.InitSec(skb); err != nil {
return fmt.Errorf("failed to initialize signer: %w", err)
}
monthlyPrice := pp.config.MonthlyPriceSats
if monthlyPrice <= 0 {
monthlyPrice = 6000
}
// Get relay npub for content link
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
if err != nil {
return fmt.Errorf("failed to encode relay npub: %w", err)
}
// Create the welcome note content with nostr:npub link
content := fmt.Sprintf(`Welcome to the relay! 🎉
You have a FREE 30-day trial that started when you first logged in.
💰 Subscription Details:
- Monthly price: %d sats
- Trial period: 30 days from first login
💡 How to Subscribe:
To extend your subscription after the trial ends, simply zap this note with the amount you want to pay. Each %d sats = 30 days of access.
⚡ Payment Instructions:
1. Use any Lightning wallet that supports zaps
2. Zap this note with your payment
3. Your subscription will be automatically extended
Relay: nostr:%s
Log in to the relay dashboard to access your configuration at: %s
Enjoy your time on the relay!`, monthlyPrice, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
// Build the event
ev := event.New()
ev.Kind = kind.TextNote.K // Kind 1 for text note
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Content = []byte(content)
ev.Tags = tag.NewS()
// Add "p" tag for the user
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
// Add expiration tag (5 days from creation)
noteExpiry := time.Now().AddDate(0, 0, 5)
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
// Add "private" tag with authorized npubs (user and relay)
var authorizedNpubs []string
// Add user npub
userNpub, err := bech32encoding.BinToNpub(userPubkey)
if err == nil {
authorizedNpubs = append(authorizedNpubs, string(userNpub))
}
// Add relay npub
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
if err == nil {
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
}
// Create the private tag with comma-separated npubs
if len(authorizedNpubs) > 0 {
privateTagValue := strings.Join(authorizedNpubs, ",")
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
}
// Add a special tag to mark this as a welcome note
*ev.Tags = append(*ev.Tags, tag.NewFromAny("welcome", "first-time-user"))
// Sign and save the event
ev.Sign(sign)
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
return fmt.Errorf("failed to save welcome note: %w", err)
}
log.I.F("created welcome note for first-time user %s", hex.Enc(userPubkey))
return nil
}
// SetDashboardURL sets the dynamic dashboard URL based on HTTP request
func (pp *PaymentProcessor) SetDashboardURL(url string) {
pp.dashboardURL = url
}
// getDashboardURL returns the dashboard URL for the relay
func (pp *PaymentProcessor) getDashboardURL() string {
// Use dynamic URL if available
if pp.dashboardURL != "" {
return pp.dashboardURL
}
// Fallback to static config
if pp.config.RelayURL != "" {
return pp.config.RelayURL
}
// Default fallback if no URL is configured
return "https://your-relay.example.com"
}
// extractNpubFromDescription extracts an npub from the payment description
func (pp *PaymentProcessor) extractNpubFromDescription(description string) string {
// check if the entire description is just an npub
description = strings.TrimSpace(description)
if strings.HasPrefix(description, "npub1") && len(description) == 63 {
return description
}
// Look for npub1... pattern in the description
parts := strings.Fields(description)
for _, part := range parts {
if strings.HasPrefix(part, "npub1") && len(part) == 63 {
return part
}
}
return ""
}
// npubToPubkey converts an npub string to pubkey bytes
func (pp *PaymentProcessor) npubToPubkey(npubStr string) ([]byte, error) {
// Validate npub format
if !strings.HasPrefix(npubStr, "npub1") || len(npubStr) != 63 {
return nil, fmt.Errorf("invalid npub format")
}
// Decode using bech32encoding
prefix, value, err := bech32encoding.Decode([]byte(npubStr))
if err != nil {
return nil, fmt.Errorf("failed to decode npub: %w", err)
}
if !strings.EqualFold(string(prefix), "npub") {
return nil, fmt.Errorf("invalid prefix: %s", string(prefix))
}
pubkey, ok := value.([]byte)
if !ok {
return nil, fmt.Errorf("decoded value is not []byte")
}
return pubkey, nil
}
// UpdateRelayProfile creates or updates the relay's kind 0 profile with subscription information
func (pp *PaymentProcessor) UpdateRelayProfile() error {
// Get relay identity secret to sign the profile
skb, err := pp.db.GetRelayIdentitySecret()
if err != nil || len(skb) != 32 {
return fmt.Errorf("no relay identity configured")
}
// Initialize signer
sign := new(p256k.Signer)
if err := sign.InitSec(skb); err != nil {
return fmt.Errorf("failed to initialize signer: %w", err)
}
monthlyPrice := pp.config.MonthlyPriceSats
if monthlyPrice <= 0 {
monthlyPrice = 6000
}
// Calculate daily rate
dailyRate := monthlyPrice / 30
// Get relay wss:// URL - use dashboard URL but with wss:// scheme
relayURL := strings.Replace(pp.getDashboardURL(), "https://", "wss://", 1)
// Create profile content as JSON
profileContent := fmt.Sprintf(`{
"name": "Relay Bot",
"about": "This relay requires a subscription to access. Zap any of my notes to pay for access. Monthly price: %d sats (%d sats/day). Relay: %s",
"lud16": "",
"nip05": "",
"website": "%s"
}`, monthlyPrice, dailyRate, relayURL, pp.getDashboardURL())
// Build the profile event
ev := event.New()
ev.Kind = kind.ProfileMetadata.K // Kind 0 for profile metadata
ev.Pubkey = sign.Pub()
ev.CreatedAt = timestamp.Now().V
ev.Content = []byte(profileContent)
ev.Tags = tag.NewS()
// Sign and save the event
ev.Sign(sign)
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
return fmt.Errorf("failed to save relay profile: %w", err)
}
log.I.F("updated relay profile with subscription information")
return nil
}
// decodeAnyPubkey decodes a public key from either hex string or npub format
func decodeAnyPubkey(s string) ([]byte, error) {
s = strings.TrimSpace(s)
if strings.HasPrefix(s, "npub1") {
prefix, value, err := bech32encoding.Decode([]byte(s))
if err != nil {
return nil, fmt.Errorf("failed to decode npub: %w", err)
}
if !strings.EqualFold(string(prefix), "npub") {
return nil, fmt.Errorf("invalid prefix: %s", string(prefix))
}
b, ok := value.([]byte)
if !ok {
return nil, fmt.Errorf("decoded value is not []byte")
}
return b, nil
}
// assume hex-encoded public key
return hex.Dec(s)
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"sync"
"time"
"github.com/coder/websocket"
"lol.mleku.dev/chk"
@@ -101,17 +102,17 @@ func (p *P) Receive(msg typer.T) {
if m.Cancel {
if m.Id == "" {
p.removeSubscriber(m.Conn)
log.D.F("removed listener %s", m.remote)
// log.D.F("removed listener %s", m.remote)
} else {
p.removeSubscriberId(m.Conn, m.Id)
log.D.C(
func() string {
return fmt.Sprintf(
"removed subscription %s for %s", m.Id,
m.remote,
)
},
)
// log.D.C(
// func() string {
// return fmt.Sprintf(
// "removed subscription %s for %s", m.Id,
// m.remote,
// )
// },
// )
}
return
}
@@ -123,27 +124,27 @@ func (p *P) Receive(msg typer.T) {
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
}
p.Map[m.Conn] = subs
log.D.C(
func() string {
return fmt.Sprintf(
"created new subscription for %s, %s",
m.remote,
m.Filters.Marshal(nil),
)
},
)
// log.D.C(
// func() string {
// return fmt.Sprintf(
// "created new subscription for %s, %s",
// m.remote,
// m.Filters.Marshal(nil),
// )
// },
// )
} else {
subs[m.Id] = Subscription{
S: m.Filters, remote: m.remote, AuthedPubkey: m.AuthedPubkey,
}
log.D.C(
func() string {
return fmt.Sprintf(
"added subscription %s for %s", m.Id,
m.remote,
)
},
)
// log.D.C(
// func() string {
// return fmt.Sprintf(
// "added subscription %s for %s", m.Id,
// m.remote,
// )
// },
// )
}
}
}
@@ -210,39 +211,68 @@ func (p *P) Deliver(ev *event.E) {
break
}
}
}
if !allowed {
// Skip delivery for this subscriber
continue
}
}
var res *eventenvelope.Result
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
continue
}
// Use a separate context with timeout for writes to prevent race conditions
// where the publisher context gets cancelled while writing events
writeCtx, cancel := context.WithTimeout(
context.Background(), DefaultWriteTimeout,
)
defer cancel()
}
if !allowed {
log.D.F("subscription delivery DENIED for privileged event %s to %s (auth mismatch)",
hex.Enc(ev.ID), d.sub.remote)
// Skip delivery for this subscriber
continue
}
}
var res *eventenvelope.Result
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
log.E.F("failed to create event envelope for %s to %s: %v",
hex.Enc(ev.ID), d.sub.remote, err)
continue
}
// Log delivery attempt
msgData := res.Marshal(nil)
log.D.F("attempting delivery of event %s (kind=%d, len=%d) to subscription %s @ %s",
hex.Enc(ev.ID), ev.Kind, len(msgData), d.id, d.sub.remote)
// Use a separate context with timeout for writes to prevent race conditions
// where the publisher context gets cancelled while writing events
writeCtx, cancel := context.WithTimeout(
context.Background(), DefaultWriteTimeout,
)
defer cancel()
if err = d.w.Write(
writeCtx, websocket.MessageText, res.Marshal(nil),
); err != nil {
// On error, remove the subscriber connection safely
p.removeSubscriber(d.w)
_ = d.w.CloseNow()
continue
}
log.D.C(
func() string {
return fmt.Sprintf(
"dispatched event %0x to subscription %s, %s",
ev.ID, d.id, d.sub.remote,
)
},
)
deliveryStart := time.Now()
if err = d.w.Write(
writeCtx, websocket.MessageText, msgData,
); err != nil {
deliveryDuration := time.Since(deliveryStart)
// Log detailed failure information
log.E.F("subscription delivery FAILED: event=%s to=%s sub=%s duration=%v error=%v",
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, err)
// Check for timeout specifically
if writeCtx.Err() != nil {
log.E.F("subscription delivery TIMEOUT: event=%s to=%s after %v (limit=%v)",
hex.Enc(ev.ID), d.sub.remote, deliveryDuration, DefaultWriteTimeout)
}
// Log connection cleanup
log.D.F("removing failed subscriber connection: %s", d.sub.remote)
// On error, remove the subscriber connection safely
p.removeSubscriber(d.w)
_ = d.w.CloseNow()
continue
}
deliveryDuration := time.Since(deliveryStart)
log.D.F("subscription delivery SUCCESS: event=%s to=%s sub=%s duration=%v len=%d",
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, len(msgData))
// Log slow deliveries for performance monitoring
if deliveryDuration > time.Millisecond*50 {
log.D.F("SLOW subscription delivery: event=%s to=%s duration=%v (>50ms)",
hex.Enc(ev.ID), d.sub.remote, deliveryDuration)
}
}
}

View File

@@ -2,13 +2,26 @@ package app
import (
"context"
"encoding/json"
"io"
"log"
"net/http"
"net/http/httputil"
"net/url"
"strconv"
"strings"
"sync"
"time"
"lol.mleku.dev/chk"
"next.orly.dev/app/config"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/protocol/auth"
"next.orly.dev/pkg/protocol/publish"
)
@@ -20,26 +33,55 @@ type Server struct {
publishers *publish.S
Admins [][]byte
*database.D
// optional reverse proxy for dev web server
devProxy *httputil.ReverseProxy
// Challenge storage for HTTP UI authentication
challengeMutex sync.RWMutex
challenges map[string][]byte
paymentProcessor *PaymentProcessor
}
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// log.T.C(
// func() string {
// return fmt.Sprintf("path %v header %v", r.URL, r.Header)
// },
// )
if r.Header.Get("Upgrade") == "websocket" {
s.HandleWebsocket(w, r)
} else if r.Header.Get("Accept") == "application/nostr+json" {
s.HandleRelayInfo(w, r)
} else {
if s.mux == nil {
http.Error(w, "Upgrade required", http.StatusUpgradeRequired)
} else {
s.mux.ServeHTTP(w, r)
}
// Set CORS headers for all responses
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set(
"Access-Control-Allow-Headers", "Content-Type, Authorization",
)
// Handle preflight OPTIONS requests
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
// If this is a websocket request, only intercept the relay root path.
// This allows other websocket paths (e.g., Vite HMR) to be handled by the dev proxy when enabled.
if r.Header.Get("Upgrade") == "websocket" {
if s.mux != nil && s.Config != nil && s.Config.WebDisableEmbedded && s.Config.WebDevProxyURL != "" && r.URL.Path != "/" {
// forward to mux (which will proxy to dev server)
s.mux.ServeHTTP(w, r)
return
}
s.HandleWebsocket(w, r)
return
}
if r.Header.Get("Accept") == "application/nostr+json" {
s.HandleRelayInfo(w, r)
return
}
if s.mux == nil {
http.Error(w, "Upgrade required", http.StatusUpgradeRequired)
return
}
s.mux.ServeHTTP(w, r)
}
func (s *Server) ServiceURL(req *http.Request) (st string) {
host := req.Header.Get("X-Forwarded-Host")
if host == "" {
@@ -70,3 +112,505 @@ func (s *Server) ServiceURL(req *http.Request) (st string) {
}
return proto + "://" + host
}
// DashboardURL constructs HTTPS URL for the dashboard based on the HTTP request
func (s *Server) DashboardURL(req *http.Request) string {
host := req.Header.Get("X-Forwarded-Host")
if host == "" {
host = req.Host
}
return "https://" + host
}
// UserInterface sets up a basic Nostr NDK interface that allows users to log into the relay user interface
func (s *Server) UserInterface() {
if s.mux == nil {
s.mux = http.NewServeMux()
}
// If dev proxy is configured, initialize it
if s.Config != nil && s.Config.WebDisableEmbedded && s.Config.WebDevProxyURL != "" {
proxyURL := s.Config.WebDevProxyURL
// Add default scheme if missing to avoid: proxy error: unsupported protocol scheme ""
if !strings.Contains(proxyURL, "://") {
proxyURL = "http://" + proxyURL
}
if target, err := url.Parse(proxyURL); !chk.E(err) {
if target.Scheme == "" || target.Host == "" {
// invalid URL, disable proxy
log.Printf(
"invalid ORLY_WEB_DEV_PROXY_URL: %q — disabling dev proxy\n",
s.Config.WebDevProxyURL,
)
} else {
s.devProxy = httputil.NewSingleHostReverseProxy(target)
// Ensure Host header points to upstream for dev servers that care
origDirector := s.devProxy.Director
s.devProxy.Director = func(req *http.Request) {
origDirector(req)
req.Host = target.Host
}
}
}
}
// Initialize challenge storage if not already done
if s.challenges == nil {
s.challengeMutex.Lock()
s.challenges = make(map[string][]byte)
s.challengeMutex.Unlock()
}
// Serve the main login interface (and static assets) or proxy in dev mode
s.mux.HandleFunc("/", s.handleLoginInterface)
// API endpoints for authentication
s.mux.HandleFunc("/api/auth/challenge", s.handleAuthChallenge)
s.mux.HandleFunc("/api/auth/login", s.handleAuthLogin)
s.mux.HandleFunc("/api/auth/status", s.handleAuthStatus)
s.mux.HandleFunc("/api/auth/logout", s.handleAuthLogout)
s.mux.HandleFunc("/api/permissions/", s.handlePermissions)
// Export endpoints
s.mux.HandleFunc("/api/export", s.handleExport)
s.mux.HandleFunc("/api/export/mine", s.handleExportMine)
// Events endpoints
s.mux.HandleFunc("/api/events/mine", s.handleEventsMine)
// Import endpoint (admin only)
s.mux.HandleFunc("/api/import", s.handleImport)
}
// handleLoginInterface serves the main user interface for login
func (s *Server) handleLoginInterface(w http.ResponseWriter, r *http.Request) {
// In dev mode with proxy configured, forward to dev server
if s.Config != nil && s.Config.WebDisableEmbedded && s.devProxy != nil {
s.devProxy.ServeHTTP(w, r)
return
}
// If embedded UI is disabled but no proxy configured, return a helpful message
if s.Config != nil && s.Config.WebDisableEmbedded {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(http.StatusNotFound)
w.Write([]byte("Web UI disabled (ORLY_WEB_DISABLE=true). Run the web app in standalone dev mode (e.g., npm run dev) or set ORLY_WEB_DEV_PROXY_URL to proxy through this server."))
return
}
// Default: serve embedded React app
fileServer := http.FileServer(GetReactAppFS())
fileServer.ServeHTTP(w, r)
}
// handleAuthChallenge generates and returns an authentication challenge
func (s *Server) handleAuthChallenge(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Generate a proper challenge using the auth package
challenge := auth.GenerateChallenge()
challengeHex := hex.Enc(challenge)
// Store the challenge using the hex value as the key for easy lookup
s.challengeMutex.Lock()
s.challenges[challengeHex] = challenge
s.challengeMutex.Unlock()
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"challenge": "` + challengeHex + `"}`))
}
// handleAuthLogin processes authentication requests
func (s *Server) handleAuthLogin(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Content-Type", "application/json")
// Read the request body
body, err := io.ReadAll(r.Body)
if chk.E(err) {
w.Write([]byte(`{"success": false, "error": "Failed to read request body"}`))
return
}
// Parse the signed event
var evt event.E
if err = json.Unmarshal(body, &evt); chk.E(err) {
w.Write([]byte(`{"success": false, "error": "Invalid event format"}`))
return
}
// Extract the challenge from the event to look up the stored challenge
challengeTag := evt.Tags.GetFirst([]byte("challenge"))
if challengeTag == nil {
w.Write([]byte(`{"success": false, "error": "Challenge tag missing from event"}`))
return
}
challengeHex := string(challengeTag.Value())
// Retrieve the stored challenge
s.challengeMutex.RLock()
_, exists := s.challenges[challengeHex]
s.challengeMutex.RUnlock()
if !exists {
w.Write([]byte(`{"success": false, "error": "Invalid or expired challenge"}`))
return
}
// Clean up the used challenge
s.challengeMutex.Lock()
delete(s.challenges, challengeHex)
s.challengeMutex.Unlock()
relayURL := s.ServiceURL(r)
// Validate the authentication event with the correct challenge
// The challenge in the event tag is hex-encoded, so we need to pass the hex string as bytes
ok, err := auth.Validate(&evt, []byte(challengeHex), relayURL)
if chk.E(err) || !ok {
errorMsg := "Authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
w.Write([]byte(`{"success": false, "error": "` + errorMsg + `"}`))
return
}
// Authentication successful: set a simple session cookie with the pubkey
cookie := &http.Cookie{
Name: "orly_auth",
Value: hex.Enc(evt.Pubkey),
Path: "/",
HttpOnly: true,
SameSite: http.SameSiteLaxMode,
MaxAge: 60 * 60 * 24 * 30, // 30 days
}
http.SetCookie(w, cookie)
w.Write([]byte(`{"success": true, "pubkey": "` + hex.Enc(evt.Pubkey) + `", "message": "Authentication successful"}`))
}
// handleAuthStatus returns the current authentication status
func (s *Server) handleAuthStatus(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
w.Header().Set("Content-Type", "application/json")
// Check for auth cookie
if c, err := r.Cookie("orly_auth"); err == nil && c.Value != "" {
// Validate pubkey format (hex)
if _, err := hex.Dec(c.Value); !chk.E(err) {
w.Write([]byte(`{"authenticated": true, "pubkey": "` + c.Value + `"}`))
return
}
}
w.Write([]byte(`{"authenticated": false}`))
}
// handleAuthLogout clears the auth cookie
func (s *Server) handleAuthLogout(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Expire the cookie
http.SetCookie(
w, &http.Cookie{
Name: "orly_auth",
Value: "",
Path: "/",
MaxAge: -1,
HttpOnly: true,
SameSite: http.SameSiteLaxMode,
},
)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(`{"success": true}`))
}
// handlePermissions returns the permission level for a given pubkey
func (s *Server) handlePermissions(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Extract pubkey from URL path
pubkeyHex := strings.TrimPrefix(r.URL.Path, "/api/permissions/")
if pubkeyHex == "" || pubkeyHex == "/" {
http.Error(w, "Invalid pubkey", http.StatusBadRequest)
return
}
// Convert hex to binary pubkey
pubkey, err := hex.Dec(pubkeyHex)
if chk.E(err) {
http.Error(w, "Invalid pubkey format", http.StatusBadRequest)
return
}
// Get access level using acl registry
permission := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
// Set content type and write JSON response
w.Header().Set("Content-Type", "application/json")
// Format response as proper JSON
response := struct {
Permission string `json:"permission"`
}{
Permission: permission,
}
// Marshal and write the response
jsonData, err := json.Marshal(response)
if chk.E(err) {
http.Error(
w, "Error generating response", http.StatusInternalServerError,
)
return
}
w.Write(jsonData)
}
// handleExport streams all events as JSONL (NDJSON). Admins only.
func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require auth cookie
c, err := r.Cookie("orly_auth")
if err != nil || c.Value == "" {
http.Error(w, "Not authenticated", http.StatusUnauthorized)
return
}
requesterPubHex := c.Value
requesterPub, err := hex.Dec(requesterPubHex)
if chk.E(err) {
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
return
}
// Check permissions
if acl.Registry.GetAccessLevel(requesterPub, r.RemoteAddr) != "admin" {
http.Error(w, "Forbidden", http.StatusForbidden)
return
}
// Optional filtering by pubkey(s)
var pks [][]byte
q := r.URL.Query()
for _, pkHex := range q["pubkey"] {
if pkHex == "" {
continue
}
if pk, err := hex.Dec(pkHex); !chk.E(err) {
pks = append(pks, pk)
}
}
w.Header().Set("Content-Type", "application/x-ndjson")
filename := "events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
w.Header().Set(
"Content-Disposition", "attachment; filename=\""+filename+"\"",
)
// Stream export
s.D.Export(s.Ctx, w, pks...)
}
// handleExportMine streams only the authenticated user's events as JSONL (NDJSON).
func (s *Server) handleExportMine(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require auth cookie
c, err := r.Cookie("orly_auth")
if err != nil || c.Value == "" {
http.Error(w, "Not authenticated", http.StatusUnauthorized)
return
}
pubkey, err := hex.Dec(c.Value)
if chk.E(err) {
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
return
}
w.Header().Set("Content-Type", "application/x-ndjson")
filename := "my-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
w.Header().Set(
"Content-Disposition", "attachment; filename=\""+filename+"\"",
)
// Stream export for this user's pubkey only
s.D.Export(s.Ctx, w, pubkey)
}
// handleImport receives a JSONL/NDJSON file or body and enqueues an async import. Admins only.
func (s *Server) handleImport(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require auth cookie
c, err := r.Cookie("orly_auth")
if err != nil || c.Value == "" {
http.Error(w, "Not authenticated", http.StatusUnauthorized)
return
}
requesterPub, err := hex.Dec(c.Value)
if chk.E(err) {
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
return
}
// Admins only
if acl.Registry.GetAccessLevel(requesterPub, r.RemoteAddr) != "admin" {
http.Error(w, "Forbidden", http.StatusForbidden)
return
}
ct := r.Header.Get("Content-Type")
if strings.HasPrefix(ct, "multipart/form-data") {
if err := r.ParseMultipartForm(32 << 20); chk.E(err) { // 32MB memory, rest to temp files
http.Error(w, "Failed to parse form", http.StatusBadRequest)
return
}
file, _, err := r.FormFile("file")
if chk.E(err) {
http.Error(w, "Missing file", http.StatusBadRequest)
return
}
defer file.Close()
s.D.Import(file)
} else {
if r.Body == nil {
http.Error(w, "Empty request body", http.StatusBadRequest)
return
}
s.D.Import(r.Body)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
w.Write([]byte(`{"success": true, "message": "Import started"}`))
}
// handleEventsMine returns the authenticated user's events in JSON format with pagination
func (s *Server) handleEventsMine(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require auth cookie
c, err := r.Cookie("orly_auth")
if err != nil || c.Value == "" {
http.Error(w, "Not authenticated", http.StatusUnauthorized)
return
}
pubkey, err := hex.Dec(c.Value)
if chk.E(err) {
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
return
}
// Parse pagination parameters
query := r.URL.Query()
limit := 50 // default limit
if l := query.Get("limit"); l != "" {
if parsed, err := strconv.Atoi(l); err == nil && parsed > 0 && parsed <= 100 {
limit = parsed
}
}
offset := 0
if o := query.Get("offset"); o != "" {
if parsed, err := strconv.Atoi(o); err == nil && parsed >= 0 {
offset = parsed
}
}
// Use QueryEvents with filter for this user's events
f := &filter.F{
Authors: tag.NewFromBytesSlice(pubkey),
}
log.Printf("DEBUG: Querying events for pubkey: %s", hex.Enc(pubkey))
events, err := s.D.QueryEvents(s.Ctx, f)
if chk.E(err) {
log.Printf("DEBUG: QueryEvents failed: %v", err)
http.Error(w, "Failed to query events", http.StatusInternalServerError)
return
}
log.Printf("DEBUG: QueryEvents returned %d events", len(events))
// If no events found, let's also check if there are any events at all in the database
if len(events) == 0 {
// Create a filter to get any events (no authors filter)
allEventsFilter := &filter.F{}
allEvents, err := s.D.QueryEvents(s.Ctx, allEventsFilter)
if err == nil {
log.Printf("DEBUG: Total events in database: %d", len(allEvents))
} else {
log.Printf("DEBUG: Failed to query all events: %v", err)
}
}
// Events are already sorted by QueryEvents in reverse chronological order
// Apply offset and limit manually since QueryEvents doesn't support offset
totalEvents := len(events)
start := offset
if start > totalEvents {
start = totalEvents
}
end := start + limit
if end > totalEvents {
end = totalEvents
}
paginatedEvents := events[start:end]
// Convert events to JSON response format
type EventResponse struct {
ID string `json:"id"`
Kind int `json:"kind"`
CreatedAt int64 `json:"created_at"`
Content string `json:"content"`
RawJSON string `json:"raw_json"`
}
response := struct {
Events []EventResponse `json:"events"`
Total int `json:"total"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}{
Events: make([]EventResponse, len(paginatedEvents)),
Total: totalEvents,
Offset: offset,
Limit: limit,
}
for i, ev := range paginatedEvents {
response.Events[i] = EventResponse{
ID: hex.Enc(ev.ID),
Kind: int(ev.Kind),
CreatedAt: int64(ev.CreatedAt),
Content: string(ev.Content),
RawJSON: string(ev.Serialize()),
}
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}

19
app/web.go Normal file
View File

@@ -0,0 +1,19 @@
package app
import (
"embed"
"io/fs"
"net/http"
)
//go:embed web/dist
var reactAppFS embed.FS
// GetReactAppFS returns a http.FileSystem from the embedded React app
func GetReactAppFS() http.FileSystem {
webDist, err := fs.Sub(reactAppFS, "web/dist")
if err != nil {
panic("Failed to load embedded web app: " + err.Error())
}
return http.FS(webDist)
}

30
app/web/.gitignore vendored Normal file
View File

@@ -0,0 +1,30 @@
# Dependencies
node_modules
.pnp
.pnp.js
# Bun
.bunfig.toml
bun.lockb
# Build directories
build
# Cache and logs
.cache
.temp
.log
*.log
# Environment variables
.env
.env.local
.env.development.local
.env.test.local
.env.production.local
# Editor directories and files
.idea
.vscode
*.swp
*.swo

89
app/web/README.md Normal file
View File

@@ -0,0 +1,89 @@
# Orly Web Application
This is a React web application that uses Bun for building and bundling, and is automatically embedded into the Go binary when built.
## Prerequisites
- [Bun](https://bun.sh/) - JavaScript runtime and toolkit
- Go 1.16+ (for embedding functionality)
## Development
There are two ways to develop the web app:
1) Standalone (recommended for hot reload)
- Start the Go relay with the embedded web UI disabled so the React app can run on its own dev server with HMR.
- Configure the relay via environment variables:
```bash
# In another shell at repo root
export ORLY_WEB_DISABLE=true
# Optional: if you want same-origin URLs, you can set a proxy target and access the relay on the same port
# export ORLY_WEB_DEV_PROXY_URL=http://localhost:5173
# Start the relay as usual
go run .
```
- Then start the React dev server:
```bash
cd app/web
bun install
bun dev
```
When ORLY_WEB_DISABLE=true is set, the Go server still serves the API and websocket endpoints and sends permissive CORS headers, so the dev server can access them cross-origin. If ORLY_WEB_DEV_PROXY_URL is set, the Go server will reverse-proxy non-/api paths to the dev server so you can use the same origin.
2) Embedded (no hot reload)
- Build the web app and run the Go server with defaults:
```bash
cd app/web
bun install
bun run build
cd ../../
go run .
```
## Building
The React application needs to be built before compiling the Go binary to ensure that the embedded files are available:
```bash
# Build the React application
cd app/web
bun install
bun run build
# Build the Go binary from project root
cd ../../
go build
```
## How it works
1. The React application is built to the `app/web/dist` directory
2. The Go embed directive in `app/web.go` embeds these files into the binary
3. When the server runs, it serves the embedded React app at the root path
## Build Automation
You can create a shell script to automate the build process:
```bash
#!/bin/bash
# build.sh
echo "Building React app..."
cd app/web
bun install
bun run build
echo "Building Go binary..."
cd ../../
go build
echo "Build complete!"
```
Make it executable with `chmod +x build.sh` and run with `./build.sh`.

36
app/web/bun.lock Normal file
View File

@@ -0,0 +1,36 @@
{
"lockfileVersion": 1,
"workspaces": {
"": {
"name": "orly-web",
"dependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0",
},
"devDependencies": {
"bun-types": "latest",
},
},
},
"packages": {
"@types/node": ["@types/node@24.5.2", "", { "dependencies": { "undici-types": "~7.12.0" } }, "sha512-FYxk1I7wPv3K2XBaoyH2cTnocQEu8AOZ60hPbsyukMPLv5/5qr7V1i8PLHdl6Zf87I+xZXFvPCXYjiTFq+YSDQ=="],
"@types/react": ["@types/react@19.1.13", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-hHkbU/eoO3EG5/MZkuFSKmYqPbSVk5byPFa3e7y/8TybHiLMACgI8seVYlicwk7H5K/rI2px9xrQp/C+AUDTiQ=="],
"bun-types": ["bun-types@1.2.22", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-hwaAu8tct/Zn6Zft4U9BsZcXkYomzpHJX28ofvx7k0Zz2HNz54n1n+tDgxoWFGB4PcFvJXJQloPhaV2eP3Q6EA=="],
"csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="],
"js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="],
"loose-envify": ["loose-envify@1.4.0", "", { "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": { "loose-envify": "cli.js" } }, "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q=="],
"react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="],
"react-dom": ["react-dom@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" }, "peerDependencies": { "react": "^18.3.1" } }, "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw=="],
"scheduler": ["scheduler@0.23.2", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ=="],
"undici-types": ["undici-types@7.12.0", "", {}, "sha512-goOacqME2GYyOZZfb5Lgtu+1IDmAlAEu5xnD3+xTzS10hT0vzpf0SPjkXwAw9Jm+4n/mQGDP3LO8CPbYROeBfQ=="],
}
}

160
app/web/dist/index-4xsq3yxw.js vendored Normal file

File diff suppressed because one or more lines are too long

1
app/web/dist/index-q4cwd1fy.css vendored Normal file

File diff suppressed because one or more lines are too long

30
app/web/dist/index.html vendored Normal file
View File

@@ -0,0 +1,30 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Nostr Relay</title>
<link rel="stylesheet" crossorigin href="./index-q4cwd1fy.css"><script type="module" crossorigin src="./index-4xsq3yxw.js"></script></head>
<body>
<script>
// Apply system theme preference immediately to avoid flash of wrong theme
function applyTheme(isDark) {
document.body.classList.remove('bg-white', 'bg-gray-900');
document.body.classList.add(isDark ? 'bg-gray-900' : 'bg-white');
}
// Set initial theme
applyTheme(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches);
// Listen for theme changes
if (window.matchMedia) {
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
applyTheme(e.matches);
});
}
</script>
<div id="root"></div>
</body>
</html>

112
app/web/dist/tailwind.min.css vendored Normal file
View File

@@ -0,0 +1,112 @@
/*
Local Tailwind CSS (minimal subset for this UI)
Note: This file includes just the utilities used by the app to keep size small.
You can replace this with a full Tailwind build if desired.
*/
/* Preflight-like resets (very minimal) */
*,::before,::after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}
html,body,#root{height:100%}
html{line-height:1.5;-webkit-text-size-adjust:100%;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,Segoe UI,Roboto,Helvetica,Arial,Noto Sans,\"Apple Color Emoji\",\"Segoe UI Emoji\"}
body{margin:0}
button,input{font:inherit;color:inherit}
img{display:block;max-width:100%;height:auto}
/* Layout */
.sticky{position:sticky}.relative{position:relative}.absolute{position:absolute}
.top-0{top:0}.left-0{left:0}.inset-0{top:0;right:0;bottom:0;left:0}
.z-50{z-index:50}.z-10{z-index:10}
.block{display:block}.flex{display:flex}
.items-center{align-items:center}.justify-start{justify-content:flex-start}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}
.flex-grow{flex-grow:1}.shrink-0{flex-shrink:0}
.overflow-hidden{overflow:hidden}
/* Sizing */
.w-full{width:100%}.w-auto{width:auto}.w-16{width:4rem}
.h-full{height:100%}.h-16{height:4rem}
.aspect-square{aspect-ratio:1/1}
.max-w-3xl{max-width:48rem}
/* Spacing */
.p-0{padding:0}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-6{padding:1.5rem}
.px-2{padding-left:.5rem;padding-right:.5rem}
.mr-0{margin-right:0}.mr-2{margin-right:.5rem}
.mt-2{margin-top:.5rem}.mt-5{margin-top:1.25rem}
.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-4{margin-bottom:1rem}.mb-5{margin-bottom:1.25rem}
.mx-auto{margin-left:auto;margin-right:auto}
/* Borders & Radius */
.rounded{border-radius:.25rem}.rounded-full{border-radius:9999px}
.border-0{border-width:0}.border-2{border-width:2px}
.border-white{border-color:#fff}
.border{border-width:1px}.border-gray-300{border-color:#d1d5db}.border-gray-600{border-color:#4b5563}
.border-red-500{border-color:#ef4444}.border-red-700{border-color:#b91c1c}
/* Colors / Backgrounds */
.bg-white{background-color:#fff}
.bg-gray-100{background-color:#f3f4f6}
.bg-gray-200{background-color:#e5e7eb}
.bg-gray-300{background-color:#d1d5db}
.bg-gray-600{background-color:#4b5563}
.bg-gray-700{background-color:#374151}
.bg-gray-800{background-color:#1f2937}
.bg-gray-900{background-color:#111827}
.bg-blue-500{background-color:#3b82f6}
.bg-blue-600{background-color:#2563eb}.hover\:bg-blue-700:hover{background-color:#1d4ed8}
.hover\:bg-blue-600:hover{background-color:#2563eb}
.bg-red-600{background-color:#dc2626}.hover\:bg-red-700:hover{background-color:#b91c1c}
.bg-cyan-100{background-color:#cffafe}
.bg-green-100{background-color:#d1fae5}
.bg-red-100{background-color:#fee2e2}
.bg-red-50{background-color:#fef2f2}
.bg-green-900{background-color:#064e3b}
.bg-red-900{background-color:#7f1d1d}
.bg-cyan-900{background-color:#164e63}
.bg-cover{background-size:cover}.bg-center{background-position:center}
.bg-transparent{background-color:transparent}
/* Text */
.text-left{text-align:left}
.text-white{color:#fff}
.text-gray-300{color:#d1d5db}
.text-gray-500{color:#6b7280}.hover\:text-gray-800:hover{color:#1f2937}
.hover\:text-gray-100:hover{color:#f3f4f6}
.text-gray-700{color:#374151}
.text-gray-800{color:#1f2937}
.text-gray-900{color:#111827}
.text-gray-100{color:#f3f4f6}
.text-green-800{color:#065f46}
.text-green-100{color:#dcfce7}
.text-red-800{color:#991b1b}
.text-red-200{color:#fecaca}
.text-red-100{color:#fee2e2}
.text-cyan-800{color:#155e75}
.text-cyan-100{color:#cffafe}
.text-base{font-size:1rem;line-height:1.5rem}
.text-lg{font-size:1.125rem;line-height:1.75rem}
.text-2xl{font-size:1.5rem;line-height:2rem}
.font-bold{font-weight:700}
/* Opacity */
.opacity-70{opacity:.7}
/* Effects */
.shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,0.1),0 1px 2px -1px rgba(0,0,0,0.1);box-shadow:var(--tw-shadow)}
/* Cursor */
.cursor-pointer{cursor:pointer}
/* Box model */
.box-border{box-sizing:border-box}
/* Utilities */
.hover\:bg-transparent:hover{background-color:transparent}
.hover\:bg-gray-200:hover{background-color:#e5e7eb}
.hover\:bg-gray-600:hover{background-color:#4b5563}
.focus\:ring-2:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}
.focus\:ring-blue-200:focus{--tw-ring-color:rgba(191, 219, 254, var(--tw-ring-opacity))}
.focus\:ring-blue-500:focus{--tw-ring-color:rgba(59, 130, 246, var(--tw-ring-opacity))}
.disabled\:opacity-50:disabled{opacity:.5}
.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}
/* Height for avatar images in header already inherit from container */

18
app/web/package.json Normal file
View File

@@ -0,0 +1,18 @@
{
"name": "orly-web",
"version": "0.1.0",
"private": true,
"type": "module",
"scripts": {
"dev": "bun --hot --port 5173 public/dev.html",
"build": "rm -rf dist && bun build ./public/index.html --outdir ./dist --minify --splitting && cp -r public/tailwind.min.css dist/",
"preview": "bun x serve dist"
},
"dependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0"
},
"devDependencies": {
"bun-types": "latest"
}
}

13
app/web/public/dev.html Normal file
View File

@@ -0,0 +1,13 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Nostr Relay (Dev)</title>
<link rel="stylesheet" href="tailwind.min.css" />
</head>
<body class="bg-white">
<div id="root"></div>
<script type="module" src="/src/index.jsx"></script>
</body>
</html>

30
app/web/public/index.html Normal file
View File

@@ -0,0 +1,30 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Nostr Relay</title>
<link rel="stylesheet" href="tailwind.min.css" />
</head>
<body>
<script>
// Apply system theme preference immediately to avoid flash of wrong theme
function applyTheme(isDark) {
document.body.classList.remove('bg-white', 'bg-gray-900');
document.body.classList.add(isDark ? 'bg-gray-900' : 'bg-white');
}
// Set initial theme
applyTheme(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches);
// Listen for theme changes
if (window.matchMedia) {
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
applyTheme(e.matches);
});
}
</script>
<div id="root"></div>
<script type="module" src="/src/index.jsx"></script>
</body>
</html>

112
app/web/public/tailwind.min.css vendored Normal file
View File

@@ -0,0 +1,112 @@
/*
Local Tailwind CSS (minimal subset for this UI)
Note: This file includes just the utilities used by the app to keep size small.
You can replace this with a full Tailwind build if desired.
*/
/* Preflight-like resets (very minimal) */
*,::before,::after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}
html,body,#root{height:100%}
html{line-height:1.5;-webkit-text-size-adjust:100%;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,Segoe UI,Roboto,Helvetica,Arial,Noto Sans,\"Apple Color Emoji\",\"Segoe UI Emoji\"}
body{margin:0}
button,input{font:inherit;color:inherit}
img{display:block;max-width:100%;height:auto}
/* Layout */
.sticky{position:sticky}.relative{position:relative}.absolute{position:absolute}
.top-0{top:0}.left-0{left:0}.inset-0{top:0;right:0;bottom:0;left:0}
.z-50{z-index:50}.z-10{z-index:10}
.block{display:block}.flex{display:flex}
.items-center{align-items:center}.justify-start{justify-content:flex-start}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}
.flex-grow{flex-grow:1}.shrink-0{flex-shrink:0}
.overflow-hidden{overflow:hidden}
/* Sizing */
.w-full{width:100%}.w-auto{width:auto}.w-16{width:4rem}
.h-full{height:100%}.h-16{height:4rem}
.aspect-square{aspect-ratio:1/1}
.max-w-3xl{max-width:48rem}
/* Spacing */
.p-0{padding:0}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-6{padding:1.5rem}
.px-2{padding-left:.5rem;padding-right:.5rem}
.mr-0{margin-right:0}.mr-2{margin-right:.5rem}
.mt-2{margin-top:.5rem}.mt-5{margin-top:1.25rem}
.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-4{margin-bottom:1rem}.mb-5{margin-bottom:1.25rem}
.mx-auto{margin-left:auto;margin-right:auto}
/* Borders & Radius */
.rounded{border-radius:.25rem}.rounded-full{border-radius:9999px}
.border-0{border-width:0}.border-2{border-width:2px}
.border-white{border-color:#fff}
.border{border-width:1px}.border-gray-300{border-color:#d1d5db}.border-gray-600{border-color:#4b5563}
.border-red-500{border-color:#ef4444}.border-red-700{border-color:#b91c1c}
/* Colors / Backgrounds */
.bg-white{background-color:#fff}
.bg-gray-100{background-color:#f3f4f6}
.bg-gray-200{background-color:#e5e7eb}
.bg-gray-300{background-color:#d1d5db}
.bg-gray-600{background-color:#4b5563}
.bg-gray-700{background-color:#374151}
.bg-gray-800{background-color:#1f2937}
.bg-gray-900{background-color:#111827}
.bg-blue-500{background-color:#3b82f6}
.bg-blue-600{background-color:#2563eb}.hover\:bg-blue-700:hover{background-color:#1d4ed8}
.hover\:bg-blue-600:hover{background-color:#2563eb}
.bg-red-600{background-color:#dc2626}.hover\:bg-red-700:hover{background-color:#b91c1c}
.bg-cyan-100{background-color:#cffafe}
.bg-green-100{background-color:#d1fae5}
.bg-red-100{background-color:#fee2e2}
.bg-red-50{background-color:#fef2f2}
.bg-green-900{background-color:#064e3b}
.bg-red-900{background-color:#7f1d1d}
.bg-cyan-900{background-color:#164e63}
.bg-cover{background-size:cover}.bg-center{background-position:center}
.bg-transparent{background-color:transparent}
/* Text */
.text-left{text-align:left}
.text-white{color:#fff}
.text-gray-300{color:#d1d5db}
.text-gray-500{color:#6b7280}.hover\:text-gray-800:hover{color:#1f2937}
.hover\:text-gray-100:hover{color:#f3f4f6}
.text-gray-700{color:#374151}
.text-gray-800{color:#1f2937}
.text-gray-900{color:#111827}
.text-gray-100{color:#f3f4f6}
.text-green-800{color:#065f46}
.text-green-100{color:#dcfce7}
.text-red-800{color:#991b1b}
.text-red-200{color:#fecaca}
.text-red-100{color:#fee2e2}
.text-cyan-800{color:#155e75}
.text-cyan-100{color:#cffafe}
.text-base{font-size:1rem;line-height:1.5rem}
.text-lg{font-size:1.125rem;line-height:1.75rem}
.text-2xl{font-size:1.5rem;line-height:2rem}
.font-bold{font-weight:700}
/* Opacity */
.opacity-70{opacity:.7}
/* Effects */
.shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,0.1),0 1px 2px -1px rgba(0,0,0,0.1);box-shadow:var(--tw-shadow)}
/* Cursor */
.cursor-pointer{cursor:pointer}
/* Box model */
.box-border{box-sizing:border-box}
/* Utilities */
.hover\:bg-transparent:hover{background-color:transparent}
.hover\:bg-gray-200:hover{background-color:#e5e7eb}
.hover\:bg-gray-600:hover{background-color:#4b5563}
.focus\:ring-2:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}
.focus\:ring-blue-200:focus{--tw-ring-color:rgba(191, 219, 254, var(--tw-ring-opacity))}
.focus\:ring-blue-500:focus{--tw-ring-color:rgba(59, 130, 246, var(--tw-ring-opacity))}
.disabled\:opacity-50:disabled{opacity:.5}
.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}
/* Height for avatar images in header already inherit from container */

2274
app/web/src/App.jsx Normal file

File diff suppressed because it is too large Load Diff

11
app/web/src/index.jsx Normal file
View File

@@ -0,0 +1,11 @@
import React from 'react';
import { createRoot } from 'react-dom/client';
import App from './App';
import './styles.css';
const root = createRoot(document.getElementById('root'));
root.render(
<React.StrictMode>
<App />
</React.StrictMode>
);

191
app/web/src/styles.css Normal file
View File

@@ -0,0 +1,191 @@
body {
font-family: Arial, sans-serif;
margin: 0;
padding: 0;
}
.container {
background: #f9f9f9;
padding: 30px;
border-radius: 8px;
margin-top: 20px; /* Reduced space since header is now sticky */
}
.form-group {
margin-bottom: 20px;
}
label {
display: block;
margin-bottom: 5px;
font-weight: bold;
}
input, textarea {
width: 100%;
padding: 10px;
border: 1px solid #ddd;
border-radius: 4px;
}
button {
background: #007cba;
color: white;
padding: 12px 20px;
border: none;
border-radius: 4px;
cursor: pointer;
}
button:hover {
background: #005a87;
}
.danger-button {
background: #dc3545;
}
.danger-button:hover {
background: #c82333;
}
.status {
margin-top: 20px;
margin-bottom: 20px;
padding: 10px;
border-radius: 4px;
}
.success {
background: #d4edda;
color: #155724;
}
.error {
background: #f8d7da;
color: #721c24;
}
.info {
background: #d1ecf1;
color: #0c5460;
}
.header-panel {
position: sticky;
top: 0;
left: 0;
width: 100%;
background-color: #f8f9fa;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
z-index: 1000;
height: 60px;
display: flex;
align-items: center;
background-size: cover;
background-position: center;
overflow: hidden;
}
.header-content {
display: flex;
align-items: center;
height: 100%;
padding: 0 0 0 12px;
width: 100%;
margin: 0 auto;
box-sizing: border-box;
}
.header-left {
display: flex;
align-items: center;
justify-content: flex-start;
height: 100%;
}
.header-center {
display: flex;
flex-grow: 1;
align-items: center;
justify-content: flex-start;
position: relative;
overflow: hidden;
}
.header-right {
display: flex;
align-items: center;
justify-content: flex-end;
height: 100%;
}
.header-logo {
height: 100%;
aspect-ratio: 1 / 1;
width: auto;
border-radius: 0;
object-fit: cover;
flex-shrink: 0;
}
.user-avatar {
width: 2em;
height: 2em;
border-radius: 50%;
object-fit: cover;
border: 2px solid white;
margin-right: 10px;
box-shadow: 0 1px 3px rgba(0,0,0,0.2);
}
.user-profile {
display: flex;
align-items: center;
position: relative;
z-index: 1;
}
.user-info {
font-weight: bold;
font-size: 1.2em;
text-align: left;
}
.user-name {
font-weight: bold;
font-size: 1em;
display: block;
}
.profile-banner {
position: absolute;
width: 100%;
height: 100%;
top: 0;
left: 0;
z-index: -1;
opacity: 0.7;
}
.logout-button {
background: transparent;
color: #6c757d;
border: none;
font-size: 20px;
cursor: pointer;
padding: 0;
display: flex;
align-items: center;
justify-content: center;
width: 48px;
height: 100%;
margin-left: 10px;
margin-right: 0;
flex-shrink: 0;
}
.logout-button:hover {
background: transparent;
color: #343a40;
}

View File

@@ -34,13 +34,18 @@ COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
# Make scripts executable
RUN chmod +x /app/benchmark-runner
# Create reports directory
RUN mkdir -p /reports
# Create runtime user and reports directory owned by uid 1000
RUN adduser -u 1000 -D appuser && \
mkdir -p /reports && \
chown -R 1000:1000 /app /reports
# Environment variables
ENV BENCHMARK_EVENTS=10000
ENV BENCHMARK_WORKERS=8
ENV BENCHMARK_DURATION=60s
# Drop privileges: run as uid 1000
USER 1000:1000
# Run the benchmark runner
CMD ["/app/benchmark-runner"]

View File

@@ -6,7 +6,7 @@ WORKDIR /build
COPY . .
# Build the basic-badger example
RUN cd examples/basic-badger && \
RUN echo ${pwd};cd examples/basic-badger && \
go mod tidy && \
CGO_ENABLED=0 go build -o khatru-badger .

View File

@@ -46,7 +46,13 @@ RUN go mod download
COPY . .
# Build the relay
RUN CGO_ENABLED=1 GOOS=linux go build -o relay .
RUN CGO_ENABLED=1 GOOS=linux go build -gcflags "all=-N -l" -o relay .
# Create non-root user (uid 1000) for runtime in builder stage (used by analyzer)
RUN useradd -u 1000 -m -s /bin/bash appuser && \
chown -R 1000:1000 /build
# Switch to uid 1000 for any subsequent runtime use of this stage
USER 1000:1000
# Final stage
FROM ubuntu:22.04
@@ -60,8 +66,10 @@ WORKDIR /app
# Copy binary from builder
COPY --from=builder /build/relay /app/relay
# Create data directory
RUN mkdir -p /data
# Create runtime user and writable directories
RUN useradd -u 1000 -m -s /bin/bash appuser && \
mkdir -p /data /profiles /app && \
chown -R 1000:1000 /data /profiles /app
# Expose port
EXPOSE 8080
@@ -70,11 +78,14 @@ EXPOSE 8080
ENV ORLY_DATA_DIR=/data
ENV ORLY_LISTEN=0.0.0.0
ENV ORLY_PORT=8080
ENV ORLY_LOG_LEVEL=info
ENV ORLY_LOG_LEVEL=off
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD bash -lc "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:8080 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
# Drop privileges: run as uid 1000
USER 1000:1000
# Run the relay
CMD ["/app/relay"]

View File

@@ -11,7 +11,7 @@ services:
- ORLY_DATA_DIR=/data
- ORLY_LISTEN=0.0.0.0
- ORLY_PORT=8080
- ORLY_LOG_LEVEL=info
- ORLY_LOG_LEVEL=off
volumes:
- ./data/next-orly:/data
ports:

Submodule cmd/benchmark/external/khatru deleted from 668c41b988

View File

@@ -2,7 +2,6 @@ package main
import (
"context"
"crypto/rand"
"flag"
"fmt"
"log"
@@ -63,6 +62,7 @@ type Benchmark struct {
}
func main() {
// lol.SetLogLevel("trace")
config := parseFlags()
if config.RelayURL != "" {
@@ -96,7 +96,7 @@ func parseFlags() *BenchmarkConfig {
&config.DataDir, "datadir", "/tmp/benchmark_db", "Database directory",
)
flag.IntVar(
&config.NumEvents, "events", 100000, "Number of events to generate",
&config.NumEvents, "events", 10000, "Number of events to generate",
)
flag.IntVar(
&config.ConcurrentWorkers, "workers", runtime.NumCPU(),
@@ -133,8 +133,16 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
"Network mode: relay=%s workers=%d rate=%d ev/s per worker duration=%s\n",
cfg.RelayURL, cfg.NetWorkers, cfg.NetRate, cfg.TestDuration,
)
ctx, cancel := context.WithTimeout(context.Background(), cfg.TestDuration)
// Create a timeout context for benchmark control only, not for connections
timeoutCtx, cancel := context.WithTimeout(
context.Background(), cfg.TestDuration,
)
defer cancel()
// Use a separate background context for relay connections to avoid
// cancelling the server when the benchmark timeout expires
connCtx := context.Background()
var wg sync.WaitGroup
if cfg.NetWorkers <= 0 {
cfg.NetWorkers = 1
@@ -146,8 +154,8 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
// Connect to relay
rl, err := ws.RelayConnect(ctx, cfg.RelayURL)
// Connect to relay using non-cancellable context
rl, err := ws.RelayConnect(connCtx, cfg.RelayURL)
if err != nil {
fmt.Printf(
"worker %d: failed to connect to %s: %v\n", workerID,
@@ -174,17 +182,28 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
f.Authors = tag.NewWithCap(1)
f.Authors.T = append(f.Authors.T, keys.Pub())
f.Since = timestamp.FromUnix(since)
sub, err := rl.Subscribe(ctx, filter.NewS(f))
sub, err := rl.Subscribe(connCtx, filter.NewS(f))
if err != nil {
fmt.Printf("worker %d: subscribe error: %v\n", workerID, err)
fmt.Printf(
"worker %d: subscribe error: %v\n", workerID, err,
)
return
}
defer sub.Unsub()
recv := 0
for {
select {
case <-ctx.Done():
fmt.Printf("worker %d: subscriber exiting after %d events\n", workerID, recv)
case <-timeoutCtx.Done():
fmt.Printf(
"worker %d: subscriber exiting after %d events (benchmark timeout: %v)\n",
workerID, recv, timeoutCtx.Err(),
)
return
case <-rl.Context().Done():
fmt.Printf(
"worker %d: relay connection closed; cause=%v lastErr=%v\n",
workerID, rl.ConnectionCause(), rl.LastError(),
)
return
case <-sub.EndOfStoredEvents:
// continue streaming live events
@@ -194,7 +213,10 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
}
recv++
if recv%100 == 0 {
fmt.Printf("worker %d: received %d matching events\n", workerID, recv)
fmt.Printf(
"worker %d: received %d matching events\n",
workerID, recv,
)
}
ev.Free()
}
@@ -207,7 +229,7 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
count := 0
for {
select {
case <-ctx.Done():
case <-timeoutCtx.Done():
fmt.Printf(
"worker %d: stopping after %d publishes\n", workerID,
count,
@@ -233,12 +255,16 @@ func runNetworkLoad(cfg *BenchmarkConfig) {
select {
case err := <-ch:
if err != nil {
fmt.Printf("worker %d: write error: %v\n", workerID, err)
fmt.Printf(
"worker %d: write error: %v\n", workerID, err,
)
}
default:
}
if count%100 == 0 {
fmt.Printf("worker %d: sent %d events\n", workerID, count)
fmt.Printf(
"worker %d: sent %d events\n", workerID, count,
)
}
ev.Free()
count++
@@ -284,15 +310,25 @@ func (b *Benchmark) Close() {
func (b *Benchmark) RunSuite() {
for round := 1; round <= 2; round++ {
fmt.Printf("\n=== Starting test round %d/2 ===\n", round)
fmt.Printf("RunPeakThroughputTest..\n")
b.RunPeakThroughputTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunBurstPatternTest..\n")
b.RunBurstPatternTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunMixedReadWriteTest..\n")
b.RunMixedReadWriteTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunQueryTest..\n")
b.RunQueryTest()
time.Sleep(10 * time.Second)
fmt.Printf("RunConcurrentQueryStoreTest..\n")
b.RunConcurrentQueryStoreTest()
if round < 2 {
fmt.Println("\nPausing 10s before next round...")
fmt.Printf("\nPausing 10s before next round...\n")
time.Sleep(10 * time.Second)
}
fmt.Printf("\n=== Test round completed ===\n\n")
}
}
@@ -595,21 +631,343 @@ func (b *Benchmark) RunMixedReadWriteTest() {
fmt.Printf("Combined ops/sec: %.2f\n", result.EventsPerSecond)
}
// RunQueryTest specifically benchmarks the QueryEvents function performance
func (b *Benchmark) RunQueryTest() {
fmt.Println("\n=== Query Test ===")
start := time.Now()
var totalQueries int64
var queryLatencies []time.Duration
var errors []error
var mu sync.Mutex
// Pre-populate with events for querying
numSeedEvents := 10000
seedEvents := b.generateEvents(numSeedEvents)
ctx := context.Background()
fmt.Printf(
"Pre-populating database with %d events for query tests...\n",
numSeedEvents,
)
for _, ev := range seedEvents {
b.db.SaveEvent(ctx, ev)
}
// Create different types of filters for querying
filters := []*filter.F{
func() *filter.F { // Kind filter
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
limit := uint(100)
f.Limit = &limit
return f
}(),
func() *filter.F { // Tag filter
f := filter.New()
f.Tags = tag.NewS(
tag.NewFromBytesSlice(
[]byte("t"), []byte("benchmark"),
),
)
limit := uint(100)
f.Limit = &limit
return f
}(),
func() *filter.F { // Mixed filter
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
f.Tags = tag.NewS(
tag.NewFromBytesSlice(
[]byte("t"), []byte("benchmark"),
),
)
limit := uint(50)
f.Limit = &limit
return f
}(),
}
var wg sync.WaitGroup
// Start query workers
for i := 0; i < b.config.ConcurrentWorkers; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
filterIndex := workerID % len(filters)
queryCount := 0
for time.Since(start) < b.config.TestDuration {
// Rotate through different filters
f := filters[filterIndex]
filterIndex = (filterIndex + 1) % len(filters)
// Execute query
queryStart := time.Now()
events, err := b.db.QueryEvents(ctx, f)
queryLatency := time.Since(queryStart)
mu.Lock()
if err != nil {
errors = append(errors, err)
} else {
totalQueries++
queryLatencies = append(queryLatencies, queryLatency)
// Free event memory
for _, ev := range events {
ev.Free()
}
}
mu.Unlock()
queryCount++
if queryCount%10 == 0 {
time.Sleep(10 * time.Millisecond) // Small delay every 10 queries
}
}
}(i)
}
wg.Wait()
duration := time.Since(start)
// Calculate metrics
result := &BenchmarkResult{
TestName: "Query Performance",
Duration: duration,
TotalEvents: int(totalQueries),
EventsPerSecond: float64(totalQueries) / duration.Seconds(),
ConcurrentWorkers: b.config.ConcurrentWorkers,
MemoryUsed: getMemUsage(),
}
if len(queryLatencies) > 0 {
result.AvgLatency = calculateAvgLatency(queryLatencies)
result.P90Latency = calculatePercentileLatency(queryLatencies, 0.90)
result.P95Latency = calculatePercentileLatency(queryLatencies, 0.95)
result.P99Latency = calculatePercentileLatency(queryLatencies, 0.99)
result.Bottom10Avg = calculateBottom10Avg(queryLatencies)
}
result.SuccessRate = 100.0 // No specific target count for queries
for _, err := range errors {
result.Errors = append(result.Errors, err.Error())
}
b.mu.Lock()
b.results = append(b.results, result)
b.mu.Unlock()
fmt.Printf(
"Query test completed: %d queries in %v\n", totalQueries, duration,
)
fmt.Printf("Queries/sec: %.2f\n", result.EventsPerSecond)
fmt.Printf("Avg query latency: %v\n", result.AvgLatency)
fmt.Printf("P95 query latency: %v\n", result.P95Latency)
fmt.Printf("P99 query latency: %v\n", result.P99Latency)
}
// RunConcurrentQueryStoreTest benchmarks the performance of concurrent query and store operations
func (b *Benchmark) RunConcurrentQueryStoreTest() {
fmt.Println("\n=== Concurrent Query/Store Test ===")
start := time.Now()
var totalQueries, totalWrites int64
var queryLatencies, writeLatencies []time.Duration
var errors []error
var mu sync.Mutex
// Pre-populate with some events
numSeedEvents := 5000
seedEvents := b.generateEvents(numSeedEvents)
ctx := context.Background()
fmt.Printf(
"Pre-populating database with %d events for concurrent query/store test...\n",
numSeedEvents,
)
for _, ev := range seedEvents {
b.db.SaveEvent(ctx, ev)
}
// Generate events for writing during the test
writeEvents := b.generateEvents(b.config.NumEvents)
// Create filters for querying
filters := []*filter.F{
func() *filter.F { // Recent events filter
f := filter.New()
f.Since = timestamp.FromUnix(time.Now().Add(-10 * time.Minute).Unix())
limit := uint(100)
f.Limit = &limit
return f
}(),
func() *filter.F { // Kind and tag filter
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
f.Tags = tag.NewS(
tag.NewFromBytesSlice(
[]byte("t"), []byte("benchmark"),
),
)
limit := uint(50)
f.Limit = &limit
return f
}(),
}
var wg sync.WaitGroup
// Half of the workers will be readers, half will be writers
numReaders := b.config.ConcurrentWorkers / 2
numWriters := b.config.ConcurrentWorkers - numReaders
// Start query workers (readers)
for i := 0; i < numReaders; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
filterIndex := workerID % len(filters)
queryCount := 0
for time.Since(start) < b.config.TestDuration {
// Select a filter
f := filters[filterIndex]
filterIndex = (filterIndex + 1) % len(filters)
// Execute query
queryStart := time.Now()
events, err := b.db.QueryEvents(ctx, f)
queryLatency := time.Since(queryStart)
mu.Lock()
if err != nil {
errors = append(errors, err)
} else {
totalQueries++
queryLatencies = append(queryLatencies, queryLatency)
// Free event memory
for _, ev := range events {
ev.Free()
}
}
mu.Unlock()
queryCount++
if queryCount%5 == 0 {
time.Sleep(5 * time.Millisecond) // Small delay
}
}
}(i)
}
// Start write workers
for i := 0; i < numWriters; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
eventIndex := workerID
writeCount := 0
for time.Since(start) < b.config.TestDuration && eventIndex < len(writeEvents) {
// Write operation
writeStart := time.Now()
_, _, err := b.db.SaveEvent(ctx, writeEvents[eventIndex])
writeLatency := time.Since(writeStart)
mu.Lock()
if err != nil {
errors = append(errors, err)
} else {
totalWrites++
writeLatencies = append(writeLatencies, writeLatency)
}
mu.Unlock()
eventIndex += numWriters
writeCount++
if writeCount%10 == 0 {
time.Sleep(10 * time.Millisecond) // Small delay every 10 writes
}
}
}(i)
}
wg.Wait()
duration := time.Since(start)
// Calculate metrics
totalOps := totalQueries + totalWrites
result := &BenchmarkResult{
TestName: "Concurrent Query/Store",
Duration: duration,
TotalEvents: int(totalOps),
EventsPerSecond: float64(totalOps) / duration.Seconds(),
ConcurrentWorkers: b.config.ConcurrentWorkers,
MemoryUsed: getMemUsage(),
}
// Calculate combined latencies for overall metrics
allLatencies := append(queryLatencies, writeLatencies...)
if len(allLatencies) > 0 {
result.AvgLatency = calculateAvgLatency(allLatencies)
result.P90Latency = calculatePercentileLatency(allLatencies, 0.90)
result.P95Latency = calculatePercentileLatency(allLatencies, 0.95)
result.P99Latency = calculatePercentileLatency(allLatencies, 0.99)
result.Bottom10Avg = calculateBottom10Avg(allLatencies)
}
result.SuccessRate = 100.0 // No specific target
for _, err := range errors {
result.Errors = append(result.Errors, err.Error())
}
b.mu.Lock()
b.results = append(b.results, result)
b.mu.Unlock()
// Calculate separate metrics for queries and writes
var queryAvg, writeAvg time.Duration
if len(queryLatencies) > 0 {
queryAvg = calculateAvgLatency(queryLatencies)
}
if len(writeLatencies) > 0 {
writeAvg = calculateAvgLatency(writeLatencies)
}
fmt.Printf(
"Concurrent test completed: %d operations (%d queries, %d writes) in %v\n",
totalOps, totalQueries, totalWrites, duration,
)
fmt.Printf("Operations/sec: %.2f\n", result.EventsPerSecond)
fmt.Printf("Avg latency: %v\n", result.AvgLatency)
fmt.Printf("Avg query latency: %v\n", queryAvg)
fmt.Printf("Avg write latency: %v\n", writeAvg)
fmt.Printf("P95 latency: %v\n", result.P95Latency)
fmt.Printf("P99 latency: %v\n", result.P99Latency)
}
func (b *Benchmark) generateEvents(count int) []*event.E {
events := make([]*event.E, count)
now := timestamp.Now()
// Generate a keypair for signing all events
var keys p256k.Signer
if err := keys.Generate(); err != nil {
log.Fatalf("Failed to generate keys for benchmark events: %v", err)
}
for i := 0; i < count; i++ {
ev := event.New()
// Generate random 32-byte ID
ev.ID = make([]byte, 32)
rand.Read(ev.ID)
// Generate random 32-byte pubkey
ev.Pubkey = make([]byte, 32)
rand.Read(ev.Pubkey)
ev.CreatedAt = now.I64()
ev.Kind = kind.TextNote.K
ev.Content = []byte(fmt.Sprintf(
@@ -624,6 +982,11 @@ func (b *Benchmark) generateEvents(count int) []*event.E {
),
)
// Properly sign the event instead of generating fake signatures
if err := ev.Sign(&keys); err != nil {
log.Fatalf("Failed to sign event %d: %v", i, err)
}
events[i] = ev
}

View File

@@ -1,104 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-badger_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912195906053114 INF /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912195906053741 INF /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912195906053768 INF /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912195906054020 INF (*types.Uint32)(0xc00570406c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912195906054071 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 611.579176ms
Events/sec: 16351.11
Avg latency: 474.016µs
P95 latency: 479.03µs
P99 latency: 594.73µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 160.976517ms
Burst completed: 1000 events in 153.010415ms
Burst completed: 1000 events in 146.10015ms
Burst completed: 1000 events in 148.403729ms
Burst completed: 1000 events in 141.681801ms
Burst completed: 1000 events in 154.663067ms
Burst completed: 1000 events in 135.960988ms
Burst completed: 1000 events in 136.240589ms
Burst completed: 1000 events in 141.75454ms
Burst completed: 1000 events in 152.485379ms
Burst test completed: 10000 events in 6.496690038s
Events/sec: 1539.25
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 37.695370694s
Combined ops/sec: 265.28
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 611.579176ms
Total Events: 10000
Events/sec: 16351.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 474.016µs
P95 Latency: 479.03µs
P99 Latency: 594.73µs
----------------------------------------
Test: Burst Pattern
Duration: 6.496690038s
Total Events: 10000
Events/sec: 1539.25
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 207 MB
Avg Latency: 226.602µs
P95 Latency: 239.525µs
P99 Latency: 168.561µs
----------------------------------------
Test: Mixed Read/Write
Duration: 37.695370694s
Total Events: 10000
Events/sec: 265.28
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 132 MB
Avg Latency: 9.930935ms
P95 Latency: 17.75358ms
P99 Latency: 24.256293ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
20250912195950858706 INF /tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912195951643646 INF /tmp/benchmark_khatru-badger_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912195951645255 INF /tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-badger
RELAY_URL: ws://khatru-badger:3334
TEST_TIMESTAMP: 2025-09-12T19:59:51+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,104 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-sqlite_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912195817361580 INF /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912195817362030 INF /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912195817362064 INF /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912195817362711 INF (*types.Uint32)(0xc00000005c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912195817362777 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 699.706889ms
Events/sec: 14291.70
Avg latency: 545.724µs
P95 latency: 473.43µs
P99 latency: 478.349µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 138.253122ms
Burst completed: 1000 events in 153.709429ms
Burst completed: 1000 events in 158.711026ms
Burst completed: 1000 events in 152.54677ms
Burst completed: 1000 events in 144.735244ms
Burst completed: 1000 events in 153.236893ms
Burst completed: 1000 events in 150.180515ms
Burst completed: 1000 events in 154.733588ms
Burst completed: 1000 events in 151.252182ms
Burst completed: 1000 events in 150.610613ms
Burst test completed: 10000 events in 6.534724469s
Events/sec: 1530.29
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 35.563312501s
Combined ops/sec: 281.19
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 699.706889ms
Total Events: 10000
Events/sec: 14291.70
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 545.724µs
P95 Latency: 473.43µs
P99 Latency: 478.349µs
----------------------------------------
Test: Burst Pattern
Duration: 6.534724469s
Total Events: 10000
Events/sec: 1530.29
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 208 MB
Avg Latency: 205.962µs
P95 Latency: 165.525µs
P99 Latency: 253.411µs
----------------------------------------
Test: Mixed Read/Write
Duration: 35.563312501s
Total Events: 10000
Events/sec: 281.19
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 146 MB
Avg Latency: 9.092604ms
P95 Latency: 19.302571ms
P99 Latency: 16.944829ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
20250912195900161526 INF /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912195900909573 INF /tmp/benchmark_khatru-sqlite_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912195900911092 INF /tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-sqlite
RELAY_URL: ws://khatru-sqlite:3334
TEST_TIMESTAMP: 2025-09-12T19:59:01+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,104 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912195729240522 INF /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912195729241087 INF /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912195729241168 INF /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912195729241759 INF (*types.Uint32)(0xc0001de49c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912195729241847 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 558.618706ms
Events/sec: 17901.30
Avg latency: 433.058µs
P95 latency: 456.738µs
P99 latency: 337.231µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 172.949275ms
Burst completed: 1000 events in 175.209401ms
Burst completed: 1000 events in 156.532197ms
Burst completed: 1000 events in 157.913421ms
Burst completed: 1000 events in 151.37659ms
Burst completed: 1000 events in 161.938783ms
Burst completed: 1000 events in 168.47761ms
Burst completed: 1000 events in 159.951768ms
Burst completed: 1000 events in 170.308111ms
Burst completed: 1000 events in 146.767432ms
Burst test completed: 10000 events in 6.646634323s
Events/sec: 1504.52
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 35.548232107s
Combined ops/sec: 281.31
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 558.618706ms
Total Events: 10000
Events/sec: 17901.30
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 433.058µs
P95 Latency: 456.738µs
P99 Latency: 337.231µs
----------------------------------------
Test: Burst Pattern
Duration: 6.646634323s
Total Events: 10000
Events/sec: 1504.52
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 207 MB
Avg Latency: 182.813µs
P95 Latency: 152.86µs
P99 Latency: 204.198µs
----------------------------------------
Test: Mixed Read/Write
Duration: 35.548232107s
Total Events: 10000
Events/sec: 281.31
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 215 MB
Avg Latency: 9.086952ms
P95 Latency: 18.156339ms
P99 Latency: 24.346573ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
20250912195811996353 INF /tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912195812308400 INF /tmp/benchmark_next-orly_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912195812310341 INF /tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: next-orly
RELAY_URL: ws://next-orly:8080
TEST_TIMESTAMP: 2025-09-12T19:58:12+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,104 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_nostr-rs-relay_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912200137539643 INF /tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912200137540391 INF /tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912200137540449 INF /tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912200137540903 INF (*types.Uint32)(0xc0001c24cc)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912200137540961 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 657.896815ms
Events/sec: 15199.95
Avg latency: 508.699µs
P95 latency: 1.011413ms
P99 latency: 710.782µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 149.389787ms
Burst completed: 1000 events in 138.154354ms
Burst completed: 1000 events in 139.952633ms
Burst completed: 1000 events in 148.684306ms
Burst completed: 1000 events in 154.779586ms
Burst completed: 1000 events in 163.72717ms
Burst completed: 1000 events in 142.665132ms
Burst completed: 1000 events in 151.637082ms
Burst completed: 1000 events in 143.018896ms
Burst completed: 1000 events in 157.963802ms
Burst test completed: 10000 events in 6.519459944s
Events/sec: 1533.87
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 36.26569002s
Combined ops/sec: 275.74
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 657.896815ms
Total Events: 10000
Events/sec: 15199.95
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 508.699µs
P95 Latency: 1.011413ms
P99 Latency: 710.782µs
----------------------------------------
Test: Burst Pattern
Duration: 6.519459944s
Total Events: 10000
Events/sec: 1533.87
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 206 MB
Avg Latency: 217.187µs
P95 Latency: 130.018µs
P99 Latency: 261.728µs
----------------------------------------
Test: Mixed Read/Write
Duration: 36.26569002s
Total Events: 10000
Events/sec: 275.74
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 225 MB
Avg Latency: 9.38757ms
P95 Latency: 19.250416ms
P99 Latency: 20.049957ms
----------------------------------------
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
20250912200220985006 INF /tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912200221295381 INF /tmp/benchmark_nostr-rs-relay_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912200221297677 INF /tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: nostr-rs-relay
RELAY_URL: ws://nostr-rs-relay:8080
TEST_TIMESTAMP: 2025-09-12T20:02:21+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,104 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_relayer-basic_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912195956808180 INF /tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912195956808720 INF /tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912195956808755 INF /tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912195956809102 INF (*types.Uint32)(0xc0001bc04c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912195956809190 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 605.231707ms
Events/sec: 16522.60
Avg latency: 466.066µs
P95 latency: 514.849µs
P99 latency: 451.358µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 149.715312ms
Burst completed: 1000 events in 146.385191ms
Burst completed: 1000 events in 147.010481ms
Burst completed: 1000 events in 151.671062ms
Burst completed: 1000 events in 143.215087ms
Burst completed: 1000 events in 137.331431ms
Burst completed: 1000 events in 155.735079ms
Burst completed: 1000 events in 161.246126ms
Burst completed: 1000 events in 140.174417ms
Burst completed: 1000 events in 144.819799ms
Burst test completed: 10000 events in 6.503155987s
Events/sec: 1537.71
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 37.45410417s
Combined ops/sec: 266.99
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 605.231707ms
Total Events: 10000
Events/sec: 16522.60
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 152 MB
Avg Latency: 466.066µs
P95 Latency: 514.849µs
P99 Latency: 451.358µs
----------------------------------------
Test: Burst Pattern
Duration: 6.503155987s
Total Events: 10000
Events/sec: 1537.71
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 203 MB
Avg Latency: 215.609µs
P95 Latency: 141.91µs
P99 Latency: 204.819µs
----------------------------------------
Test: Mixed Read/Write
Duration: 37.45410417s
Total Events: 10000
Events/sec: 266.99
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 148 MB
Avg Latency: 9.851217ms
P95 Latency: 23.101412ms
P99 Latency: 17.889412ms
----------------------------------------
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
20250912200041372670 INF /tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912200041686782 INF /tmp/benchmark_relayer-basic_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912200041689009 INF /tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: relayer-basic
RELAY_URL: ws://relayer-basic:7447
TEST_TIMESTAMP: 2025-09-12T20:00:41+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,35 +0,0 @@
= NOSTR Relay Benchmark Results
Generated from: aggregate_report.txt
[cols="1,^1,^1,^1,^1,^1,^1",options="header"]
|===
| Metric | next-orly | khatru-sqlite | khatru-badger | relayer-basic | strfry | nostr-rs-relay
| Store Events/sec
| 17901.30 | 14291.70 | 16351.11 | 16522.60 | 15346.12 | 15199.95
| Store Avg Latency #1
| 433.058µs | 545.724µs | 474.016µs | 466.066µs | 506.51µs | 508.699µs
| Store P95 Latency #1
| 456.738µs | 473.43µs | 479.03µs | 514.849µs | 590.442µs | 1.011413ms
| Query Events/sec #2
| 1504.52 | 1530.29 | 1539.25 | 1537.71 | 1534.88 | 1533.87
| Query Avg Latency #2
| 182.813µs | 205.962µs | 226.602µs | 215.609µs | 216.564µs | 217.187µs
| Query P95 Latency #2
| 152.86µs | 165.525µs | 239.525µs | 141.91µs | 267.91µs | 130.018µs
| Concurrent Store/Query Events/sec #3
| 17901.30 | 14291.70 | 16351.11 | 16522.60 | 15346.12 | 15199.95
| Concurrent Store/Query Avg Latency #3
| 9.086952ms | 9.092604ms | 9.930935ms | 9.851217ms | 9.938991ms | 9.38757ms
| Concurrent Store/Query P95 Latency #3
| 18.156339ms | 19.302571ms | 17.75358ms | 23.101412ms | 19.784708ms | 19.250416ms
|===

View File

@@ -1,104 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_strfry_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912200046745432 INF /tmp/benchmark_strfry_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912200046746116 INF /tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912200046746193 INF /tmp/benchmark_strfry_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912200046746576 INF (*types.Uint32)(0xc0002a9c4c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912200046746636 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 651.630667ms
Events/sec: 15346.12
Avg latency: 506.51µs
P95 latency: 590.442µs
P99 latency: 278.399µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 148.701372ms
Burst completed: 1000 events in 161.333951ms
Burst completed: 1000 events in 146.993646ms
Burst completed: 1000 events in 155.768019ms
Burst completed: 1000 events in 143.83944ms
Burst completed: 1000 events in 156.208347ms
Burst completed: 1000 events in 150.769887ms
Burst completed: 1000 events in 140.217044ms
Burst completed: 1000 events in 150.831164ms
Burst completed: 1000 events in 135.759058ms
Burst test completed: 10000 events in 6.515183689s
Events/sec: 1534.88
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 37.667054484s
Combined ops/sec: 265.48
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 651.630667ms
Total Events: 10000
Events/sec: 15346.12
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 152 MB
Avg Latency: 506.51µs
P95 Latency: 590.442µs
P99 Latency: 278.399µs
----------------------------------------
Test: Burst Pattern
Duration: 6.515183689s
Total Events: 10000
Events/sec: 1534.88
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 203 MB
Avg Latency: 216.564µs
P95 Latency: 267.91µs
P99 Latency: 310.46µs
----------------------------------------
Test: Mixed Read/Write
Duration: 37.667054484s
Total Events: 10000
Events/sec: 265.48
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 136 MB
Avg Latency: 9.938991ms
P95 Latency: 19.784708ms
P99 Latency: 18.788985ms
----------------------------------------
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
20250912200131581470 INF /tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912200132372653 INF /tmp/benchmark_strfry_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 01. Size: 21 MiB of 21 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912200132384548 INF /tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: strfry
RELAY_URL: ws://strfry:8080
TEST_TIMESTAMP: 2025-09-12T20:01:32+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,140 +0,0 @@
================================================================
NOSTR RELAY BENCHMARK AGGREGATE REPORT
================================================================
Generated: 2025-09-12T22:43:29+00:00
Benchmark Configuration:
Events per test: 10000
Concurrent workers: 8
Test duration: 60s
Relays tested: 6
================================================================
SUMMARY BY RELAY
================================================================
Relay: next-orly
----------------------------------------
Status: COMPLETED
Events/sec: 18056.94
Events/sec: 1492.32
Events/sec: 16750.82
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 428.869µs
Bottom 10% Avg Latency: 643.51µs
Avg Latency: 178.04µs
P95 Latency: 607.997µs
P95 Latency: 243.954µs
P95 Latency: 21.665387ms
Relay: khatru-sqlite
----------------------------------------
Status: COMPLETED
Events/sec: 17635.76
Events/sec: 1510.39
Events/sec: 16509.10
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 437.941µs
Bottom 10% Avg Latency: 659.71µs
Avg Latency: 203.563µs
P95 Latency: 621.964µs
P95 Latency: 330.729µs
P95 Latency: 21.838576ms
Relay: khatru-badger
----------------------------------------
Status: COMPLETED
Events/sec: 17312.60
Events/sec: 1508.54
Events/sec: 15933.99
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 448.778µs
Bottom 10% Avg Latency: 664.268µs
Avg Latency: 196.38µs
P95 Latency: 633.085µs
P95 Latency: 293.579µs
P95 Latency: 22.727378ms
Relay: relayer-basic
----------------------------------------
Status: COMPLETED
Events/sec: 15155.00
Events/sec: 1545.44
Events/sec: 14255.58
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 513.243µs
Bottom 10% Avg Latency: 864.746µs
Avg Latency: 273.645µs
P95 Latency: 792.685µs
P95 Latency: 498.989µs
P95 Latency: 22.924497ms
Relay: strfry
----------------------------------------
Status: COMPLETED
Events/sec: 15245.05
Events/sec: 1533.59
Events/sec: 15507.07
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 510.383µs
Bottom 10% Avg Latency: 831.211µs
Avg Latency: 223.359µs
P95 Latency: 769.085µs
P95 Latency: 378.145µs
P95 Latency: 22.152884ms
Relay: nostr-rs-relay
----------------------------------------
Status: COMPLETED
Events/sec: 16312.24
Events/sec: 1502.05
Events/sec: 14131.23
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 476.418µs
Bottom 10% Avg Latency: 722.179µs
Avg Latency: 182.765µs
P95 Latency: 686.836µs
P95 Latency: 257.082µs
P95 Latency: 20.680962ms
================================================================
DETAILED RESULTS
================================================================
Individual relay reports are available in:
- /reports/run_20250912_222649/khatru-badger_results.txt
- /reports/run_20250912_222649/khatru-sqlite_results.txt
- /reports/run_20250912_222649/next-orly_results.txt
- /reports/run_20250912_222649/nostr-rs-relay_results.txt
- /reports/run_20250912_222649/relayer-basic_results.txt
- /reports/run_20250912_222649/strfry_results.txt
================================================================
BENCHMARK COMPARISON TABLE
================================================================
Relay Status Peak Tput/s Avg Latency Success Rate
---- ------ ----------- ----------- ------------
next-orly OK 18056.94 428.869µs 100.0%
khatru-sqlite OK 17635.76 437.941µs 100.0%
khatru-badger OK 17312.60 448.778µs 100.0%
relayer-basic OK 15155.00 513.243µs 100.0%
strfry OK 15245.05 510.383µs 100.0%
nostr-rs-relay OK 16312.24 476.418µs 100.0%
================================================================
End of Report
================================================================

View File

@@ -1,190 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-badger_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912223222496620 INF /tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912223222497154 INF /tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912223222497184 INF /tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912223222497402 INF (*types.Uint32)(0xc0000100fc)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912223222497454 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 577.614152ms
Events/sec: 17312.60
Avg latency: 448.778µs
P90 latency: 584.783µs
P95 latency: 633.085µs
P99 latency: 749.537µs
Bottom 10% Avg latency: 664.268µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 161.62554ms
Burst completed: 1000 events in 154.666063ms
Burst completed: 1000 events in 149.999903ms
Burst completed: 1000 events in 169.141205ms
Burst completed: 1000 events in 153.987041ms
Burst completed: 1000 events in 141.227756ms
Burst completed: 1000 events in 168.989116ms
Burst completed: 1000 events in 161.032171ms
Burst completed: 1000 events in 182.128996ms
Burst completed: 1000 events in 161.86147ms
Burst test completed: 10000 events in 6.628942674s
Events/sec: 1508.54
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 36.466065909s
Combined ops/sec: 274.23
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 627.589155ms
Events/sec: 15933.99
Avg latency: 489.881µs
P90 latency: 628.857µs
P95 latency: 679.363µs
P99 latency: 828.307µs
Bottom 10% Avg latency: 716.862µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 150.262543ms
Burst completed: 1000 events in 148.027109ms
Burst completed: 1000 events in 139.184066ms
Burst completed: 1000 events in 147.196277ms
Burst completed: 1000 events in 141.143557ms
Burst completed: 1000 events in 138.727197ms
Burst completed: 1000 events in 143.014207ms
Burst completed: 1000 events in 143.355055ms
Burst completed: 1000 events in 162.573956ms
Burst completed: 1000 events in 142.875393ms
Burst test completed: 10000 events in 6.475822519s
Events/sec: 1544.21
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4742 reads in 1m0.036644794s
Combined ops/sec: 162.27
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 577.614152ms
Total Events: 10000
Events/sec: 17312.60
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 152 MB
Avg Latency: 448.778µs
P90 Latency: 584.783µs
P95 Latency: 633.085µs
P99 Latency: 749.537µs
Bottom 10% Avg Latency: 664.268µs
----------------------------------------
Test: Burst Pattern
Duration: 6.628942674s
Total Events: 10000
Events/sec: 1508.54
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 204 MB
Avg Latency: 196.38µs
P90 Latency: 260.706µs
P95 Latency: 293.579µs
P99 Latency: 385.694µs
Bottom 10% Avg Latency: 317.532µs
----------------------------------------
Test: Mixed Read/Write
Duration: 36.466065909s
Total Events: 10000
Events/sec: 274.23
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 128 MB
Avg Latency: 9.448363ms
P90 Latency: 20.988228ms
P95 Latency: 22.727378ms
P99 Latency: 25.094784ms
Bottom 10% Avg Latency: 23.01277ms
----------------------------------------
Test: Peak Throughput
Duration: 627.589155ms
Total Events: 10000
Events/sec: 15933.99
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 124 MB
Avg Latency: 489.881µs
P90 Latency: 628.857µs
P95 Latency: 679.363µs
P99 Latency: 828.307µs
Bottom 10% Avg Latency: 716.862µs
----------------------------------------
Test: Burst Pattern
Duration: 6.475822519s
Total Events: 10000
Events/sec: 1544.21
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 170 MB
Avg Latency: 215.418µs
P90 Latency: 287.237µs
P95 Latency: 339.025µs
P99 Latency: 510.682µs
Bottom 10% Avg Latency: 378.172µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.036644794s
Total Events: 9742
Events/sec: 162.27
Success Rate: 97.4%
Concurrent Workers: 8
Memory Used: 181 MB
Avg Latency: 19.714686ms
P90 Latency: 44.573506ms
P95 Latency: 46.895555ms
P99 Latency: 50.425027ms
Bottom 10% Avg Latency: 47.384489ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
20250912223503335481 INF /tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912223504473151 INF /tmp/benchmark_khatru-badger_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912223504475627 INF /tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-badger
RELAY_URL: ws://khatru-badger:3334
TEST_TIMESTAMP: 2025-09-12T22:35:04+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,190 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-sqlite_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912222936300616 INF /tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912222936301606 INF /tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912222936301647 INF /tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912222936301987 INF (*types.Uint32)(0xc0001c23f0)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912222936302060 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 567.02963ms
Events/sec: 17635.76
Avg latency: 437.941µs
P90 latency: 574.133µs
P95 latency: 621.964µs
P99 latency: 768.473µs
Bottom 10% Avg latency: 659.71µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 172.012448ms
Burst completed: 1000 events in 145.502701ms
Burst completed: 1000 events in 153.928098ms
Burst completed: 1000 events in 169.995269ms
Burst completed: 1000 events in 147.617375ms
Burst completed: 1000 events in 157.211387ms
Burst completed: 1000 events in 153.332744ms
Burst completed: 1000 events in 172.374938ms
Burst completed: 1000 events in 167.518935ms
Burst completed: 1000 events in 155.211871ms
Burst test completed: 10000 events in 6.620785215s
Events/sec: 1510.39
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 35.700582016s
Combined ops/sec: 280.11
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 605.726547ms
Events/sec: 16509.10
Avg latency: 470.577µs
P90 latency: 609.791µs
P95 latency: 660.256µs
P99 latency: 788.641µs
Bottom 10% Avg latency: 687.847µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 135.310723ms
Burst completed: 1000 events in 166.604305ms
Burst completed: 1000 events in 141.453184ms
Burst completed: 1000 events in 146.579351ms
Burst completed: 1000 events in 154.453638ms
Burst completed: 1000 events in 156.212516ms
Burst completed: 1000 events in 142.309354ms
Burst completed: 1000 events in 152.268188ms
Burst completed: 1000 events in 144.187829ms
Burst completed: 1000 events in 147.609002ms
Burst test completed: 10000 events in 6.508461808s
Events/sec: 1536.46
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4662 reads in 1m0.040595326s
Combined ops/sec: 160.92
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 567.02963ms
Total Events: 10000
Events/sec: 17635.76
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 437.941µs
P90 Latency: 574.133µs
P95 Latency: 621.964µs
P99 Latency: 768.473µs
Bottom 10% Avg Latency: 659.71µs
----------------------------------------
Test: Burst Pattern
Duration: 6.620785215s
Total Events: 10000
Events/sec: 1510.39
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 208 MB
Avg Latency: 203.563µs
P90 Latency: 274.152µs
P95 Latency: 330.729µs
P99 Latency: 521.483µs
Bottom 10% Avg Latency: 378.237µs
----------------------------------------
Test: Mixed Read/Write
Duration: 35.700582016s
Total Events: 10000
Events/sec: 280.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 232 MB
Avg Latency: 9.150925ms
P90 Latency: 20.1434ms
P95 Latency: 21.838576ms
P99 Latency: 24.0106ms
Bottom 10% Avg Latency: 22.04901ms
----------------------------------------
Test: Peak Throughput
Duration: 605.726547ms
Total Events: 10000
Events/sec: 16509.10
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 139 MB
Avg Latency: 470.577µs
P90 Latency: 609.791µs
P95 Latency: 660.256µs
P99 Latency: 788.641µs
Bottom 10% Avg Latency: 687.847µs
----------------------------------------
Test: Burst Pattern
Duration: 6.508461808s
Total Events: 10000
Events/sec: 1536.46
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 182 MB
Avg Latency: 199.49µs
P90 Latency: 261.427µs
P95 Latency: 294.771µs
P99 Latency: 406.814µs
Bottom 10% Avg Latency: 332.083µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.040595326s
Total Events: 9662
Events/sec: 160.92
Success Rate: 96.6%
Concurrent Workers: 8
Memory Used: 204 MB
Avg Latency: 19.935937ms
P90 Latency: 44.802034ms
P95 Latency: 48.282589ms
P99 Latency: 52.169026ms
Bottom 10% Avg Latency: 48.641697ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
20250912223216370778 INF /tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912223217349356 INF /tmp/benchmark_khatru-sqlite_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912223217352393 INF /tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-sqlite
RELAY_URL: ws://khatru-sqlite:3334
TEST_TIMESTAMP: 2025-09-12T22:32:17+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,190 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912222650025765 INF /tmp/benchmark_next-orly_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912222650026455 INF /tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912222650026497 INF /tmp/benchmark_next-orly_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912222650026747 INF (*types.Uint32)(0xc0001f63cc)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912222650026778 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 553.803776ms
Events/sec: 18056.94
Avg latency: 428.869µs
P90 latency: 558.663µs
P95 latency: 607.997µs
P99 latency: 749.787µs
Bottom 10% Avg latency: 643.51µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 190.801687ms
Burst completed: 1000 events in 168.170564ms
Burst completed: 1000 events in 161.16591ms
Burst completed: 1000 events in 161.43364ms
Burst completed: 1000 events in 148.293941ms
Burst completed: 1000 events in 172.875177ms
Burst completed: 1000 events in 178.930553ms
Burst completed: 1000 events in 161.052715ms
Burst completed: 1000 events in 162.071335ms
Burst completed: 1000 events in 171.849756ms
Burst test completed: 10000 events in 6.70096222s
Events/sec: 1492.32
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 35.645619485s
Combined ops/sec: 280.54
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 596.985601ms
Events/sec: 16750.82
Avg latency: 465.438µs
P90 latency: 594.151µs
P95 latency: 636.592µs
P99 latency: 757.953µs
Bottom 10% Avg latency: 672.673µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 152.121077ms
Burst completed: 1000 events in 160.774367ms
Burst completed: 1000 events in 137.913676ms
Burst completed: 1000 events in 142.916647ms
Burst completed: 1000 events in 166.771131ms
Burst completed: 1000 events in 160.016244ms
Burst completed: 1000 events in 156.369302ms
Burst completed: 1000 events in 158.850666ms
Burst completed: 1000 events in 154.842287ms
Burst completed: 1000 events in 146.828122ms
Burst test completed: 10000 events in 6.557799732s
Events/sec: 1524.90
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4782 reads in 1m0.043775785s
Combined ops/sec: 162.91
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 553.803776ms
Total Events: 10000
Events/sec: 18056.94
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 428.869µs
P90 Latency: 558.663µs
P95 Latency: 607.997µs
P99 Latency: 749.787µs
Bottom 10% Avg Latency: 643.51µs
----------------------------------------
Test: Burst Pattern
Duration: 6.70096222s
Total Events: 10000
Events/sec: 1492.32
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 204 MB
Avg Latency: 178.04µs
P90 Latency: 224.367µs
P95 Latency: 243.954µs
P99 Latency: 318.225µs
Bottom 10% Avg Latency: 264.418µs
----------------------------------------
Test: Mixed Read/Write
Duration: 35.645619485s
Total Events: 10000
Events/sec: 280.54
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 120 MB
Avg Latency: 9.118653ms
P90 Latency: 19.852346ms
P95 Latency: 21.665387ms
P99 Latency: 23.946919ms
Bottom 10% Avg Latency: 21.867062ms
----------------------------------------
Test: Peak Throughput
Duration: 596.985601ms
Total Events: 10000
Events/sec: 16750.82
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 121 MB
Avg Latency: 465.438µs
P90 Latency: 594.151µs
P95 Latency: 636.592µs
P99 Latency: 757.953µs
Bottom 10% Avg Latency: 672.673µs
----------------------------------------
Test: Burst Pattern
Duration: 6.557799732s
Total Events: 10000
Events/sec: 1524.90
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 167 MB
Avg Latency: 189.538µs
P90 Latency: 247.511µs
P95 Latency: 274.011µs
P99 Latency: 360.977µs
Bottom 10% Avg Latency: 296.967µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.043775785s
Total Events: 9782
Events/sec: 162.91
Success Rate: 97.8%
Concurrent Workers: 8
Memory Used: 193 MB
Avg Latency: 19.562536ms
P90 Latency: 43.431835ms
P95 Latency: 46.326204ms
P99 Latency: 50.533302ms
Bottom 10% Avg Latency: 46.979603ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly_8/benchmark_report.adoc
20250912222930150767 INF /tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912222931147258 INF /tmp/benchmark_next-orly_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912222931149928 INF /tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: next-orly
RELAY_URL: ws://next-orly:8080
TEST_TIMESTAMP: 2025-09-12T22:29:31+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,190 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_nostr-rs-relay_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912224044213613 INF /tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912224044214094 INF /tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912224044214130 INF /tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912224044214381 INF (*types.Uint32)(0xc000233c3c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912224044214413 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 613.036589ms
Events/sec: 16312.24
Avg latency: 476.418µs
P90 latency: 627.852µs
P95 latency: 686.836µs
P99 latency: 841.471µs
Bottom 10% Avg latency: 722.179µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 156.218882ms
Burst completed: 1000 events in 170.25756ms
Burst completed: 1000 events in 164.944293ms
Burst completed: 1000 events in 162.767866ms
Burst completed: 1000 events in 148.744622ms
Burst completed: 1000 events in 163.556351ms
Burst completed: 1000 events in 172.007512ms
Burst completed: 1000 events in 159.806858ms
Burst completed: 1000 events in 168.086258ms
Burst completed: 1000 events in 164.931889ms
Burst test completed: 10000 events in 6.657581804s
Events/sec: 1502.05
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 34.850355805s
Combined ops/sec: 286.94
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 707.652249ms
Events/sec: 14131.23
Avg latency: 551.706µs
P90 latency: 724.937µs
P95 latency: 790.563µs
P99 latency: 980.677µs
Bottom 10% Avg latency: 836.659µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 164.62419ms
Burst completed: 1000 events in 155.938167ms
Burst completed: 1000 events in 132.903056ms
Burst completed: 1000 events in 142.377596ms
Burst completed: 1000 events in 155.024184ms
Burst completed: 1000 events in 147.095521ms
Burst completed: 1000 events in 150.027389ms
Burst completed: 1000 events in 152.873043ms
Burst completed: 1000 events in 150.635479ms
Burst completed: 1000 events in 146.45553ms
Burst test completed: 10000 events in 6.519122877s
Events/sec: 1533.95
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4806 reads in 1m0.03930731s
Combined ops/sec: 163.33
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 613.036589ms
Total Events: 10000
Events/sec: 16312.24
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 476.418µs
P90 Latency: 627.852µs
P95 Latency: 686.836µs
P99 Latency: 841.471µs
Bottom 10% Avg Latency: 722.179µs
----------------------------------------
Test: Burst Pattern
Duration: 6.657581804s
Total Events: 10000
Events/sec: 1502.05
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 209 MB
Avg Latency: 182.765µs
P90 Latency: 234.409µs
P95 Latency: 257.082µs
P99 Latency: 330.764µs
Bottom 10% Avg Latency: 277.843µs
----------------------------------------
Test: Mixed Read/Write
Duration: 34.850355805s
Total Events: 10000
Events/sec: 286.94
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 221 MB
Avg Latency: 8.802188ms
P90 Latency: 19.075904ms
P95 Latency: 20.680962ms
P99 Latency: 22.78326ms
Bottom 10% Avg Latency: 20.897398ms
----------------------------------------
Test: Peak Throughput
Duration: 707.652249ms
Total Events: 10000
Events/sec: 14131.23
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 120 MB
Avg Latency: 551.706µs
P90 Latency: 724.937µs
P95 Latency: 790.563µs
P99 Latency: 980.677µs
Bottom 10% Avg Latency: 836.659µs
----------------------------------------
Test: Burst Pattern
Duration: 6.519122877s
Total Events: 10000
Events/sec: 1533.95
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 168 MB
Avg Latency: 204.873µs
P90 Latency: 271.569µs
P95 Latency: 329.28µs
P99 Latency: 558.829µs
Bottom 10% Avg Latency: 380.136µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.03930731s
Total Events: 9806
Events/sec: 163.33
Success Rate: 98.1%
Concurrent Workers: 8
Memory Used: 164 MB
Avg Latency: 19.506135ms
P90 Latency: 43.206775ms
P95 Latency: 45.944446ms
P99 Latency: 49.910436ms
Bottom 10% Avg Latency: 46.417943ms
----------------------------------------
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
20250912224323628137 INF /tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912224324180883 INF /tmp/benchmark_nostr-rs-relay_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912224324184069 INF /tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: nostr-rs-relay
RELAY_URL: ws://nostr-rs-relay:8080
TEST_TIMESTAMP: 2025-09-12T22:43:24+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,190 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_relayer-basic_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912223509638362 INF /tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912223509638864 INF /tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912223509638903 INF /tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912223509639558 INF (*types.Uint32)(0xc00570005c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912223509639620 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 659.848301ms
Events/sec: 15155.00
Avg latency: 513.243µs
P90 latency: 706.89µs
P95 latency: 792.685µs
P99 latency: 1.089215ms
Bottom 10% Avg latency: 864.746µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 142.551144ms
Burst completed: 1000 events in 137.426595ms
Burst completed: 1000 events in 139.51501ms
Burst completed: 1000 events in 143.683041ms
Burst completed: 1000 events in 136.500167ms
Burst completed: 1000 events in 139.573844ms
Burst completed: 1000 events in 145.873173ms
Burst completed: 1000 events in 144.256594ms
Burst completed: 1000 events in 157.89329ms
Burst completed: 1000 events in 153.882313ms
Burst test completed: 10000 events in 6.47066659s
Events/sec: 1545.44
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 37.483034098s
Combined ops/sec: 266.79
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 701.479526ms
Events/sec: 14255.58
Avg latency: 544.692µs
P90 latency: 742.997µs
P95 latency: 845.975µs
P99 latency: 1.147624ms
Bottom 10% Avg latency: 913.45µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 143.063212ms
Burst completed: 1000 events in 139.422008ms
Burst completed: 1000 events in 138.184516ms
Burst completed: 1000 events in 148.207616ms
Burst completed: 1000 events in 137.663883ms
Burst completed: 1000 events in 141.607643ms
Burst completed: 1000 events in 143.668551ms
Burst completed: 1000 events in 140.467359ms
Burst completed: 1000 events in 139.860509ms
Burst completed: 1000 events in 138.328306ms
Burst test completed: 10000 events in 6.43971118s
Events/sec: 1552.86
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4870 reads in 1m0.034216467s
Combined ops/sec: 164.41
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 659.848301ms
Total Events: 10000
Events/sec: 15155.00
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 513.243µs
P90 Latency: 706.89µs
P95 Latency: 792.685µs
P99 Latency: 1.089215ms
Bottom 10% Avg Latency: 864.746µs
----------------------------------------
Test: Burst Pattern
Duration: 6.47066659s
Total Events: 10000
Events/sec: 1545.44
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 206 MB
Avg Latency: 273.645µs
P90 Latency: 407.483µs
P95 Latency: 498.989µs
P99 Latency: 772.406µs
Bottom 10% Avg Latency: 574.801µs
----------------------------------------
Test: Mixed Read/Write
Duration: 37.483034098s
Total Events: 10000
Events/sec: 266.79
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 163 MB
Avg Latency: 9.873363ms
P90 Latency: 21.643466ms
P95 Latency: 22.924497ms
P99 Latency: 24.961324ms
Bottom 10% Avg Latency: 23.201171ms
----------------------------------------
Test: Peak Throughput
Duration: 701.479526ms
Total Events: 10000
Events/sec: 14255.58
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 544.692µs
P90 Latency: 742.997µs
P95 Latency: 845.975µs
P99 Latency: 1.147624ms
Bottom 10% Avg Latency: 913.45µs
----------------------------------------
Test: Burst Pattern
Duration: 6.43971118s
Total Events: 10000
Events/sec: 1552.86
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 204 MB
Avg Latency: 266.006µs
P90 Latency: 402.683µs
P95 Latency: 491.253µs
P99 Latency: 715.735µs
Bottom 10% Avg Latency: 553.762µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.034216467s
Total Events: 9870
Events/sec: 164.41
Success Rate: 98.7%
Concurrent Workers: 8
Memory Used: 184 MB
Avg Latency: 19.308183ms
P90 Latency: 42.766459ms
P95 Latency: 45.372157ms
P99 Latency: 49.993951ms
Bottom 10% Avg Latency: 46.189525ms
----------------------------------------
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
20250912223751453794 INF /tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912223752488197 INF /tmp/benchmark_relayer-basic_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912223752491495 INF /tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: relayer-basic
RELAY_URL: ws://relayer-basic:7447
TEST_TIMESTAMP: 2025-09-12T22:37:52+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,190 +0,0 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_strfry_8
Events: 10000, Workers: 8, Duration: 1m0s
20250912223757656112 INF /tmp/benchmark_strfry_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
20250912223757657685 INF /tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
20250912223757657767 INF /tmp/benchmark_strfry_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
20250912223757658314 INF (*types.Uint32)(0xc0055c63ac)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
20250912223757658385 INF migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 655.950723ms
Events/sec: 15245.05
Avg latency: 510.383µs
P90 latency: 690.815µs
P95 latency: 769.085µs
P99 latency: 1.000349ms
Bottom 10% Avg latency: 831.211µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 168.844089ms
Burst completed: 1000 events in 138.644286ms
Burst completed: 1000 events in 167.717113ms
Burst completed: 1000 events in 141.566337ms
Burst completed: 1000 events in 141.186447ms
Burst completed: 1000 events in 145.845582ms
Burst completed: 1000 events in 142.834263ms
Burst completed: 1000 events in 144.707595ms
Burst completed: 1000 events in 144.096361ms
Burst completed: 1000 events in 158.524931ms
Burst test completed: 10000 events in 6.520630606s
Events/sec: 1533.59
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 36.04854491s
Combined ops/sec: 277.40
Pausing 10s before next round...
=== Starting test round 2/2 ===
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 644.867085ms
Events/sec: 15507.07
Avg latency: 501.972µs
P90 latency: 650.197µs
P95 latency: 709.37µs
P99 latency: 914.673µs
Bottom 10% Avg latency: 754.969µs
=== Burst Pattern Test ===
Burst completed: 1000 events in 133.763626ms
Burst completed: 1000 events in 135.289448ms
Burst completed: 1000 events in 136.874215ms
Burst completed: 1000 events in 135.118277ms
Burst completed: 1000 events in 139.247778ms
Burst completed: 1000 events in 142.262475ms
Burst completed: 1000 events in 141.21783ms
Burst completed: 1000 events in 143.089554ms
Burst completed: 1000 events in 148.027057ms
Burst completed: 1000 events in 150.006497ms
Burst test completed: 10000 events in 6.429121967s
Events/sec: 1555.42
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4857 reads in 1m0.047885362s
Combined ops/sec: 164.15
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 655.950723ms
Total Events: 10000
Events/sec: 15245.05
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 154 MB
Avg Latency: 510.383µs
P90 Latency: 690.815µs
P95 Latency: 769.085µs
P99 Latency: 1.000349ms
Bottom 10% Avg Latency: 831.211µs
----------------------------------------
Test: Burst Pattern
Duration: 6.520630606s
Total Events: 10000
Events/sec: 1533.59
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 208 MB
Avg Latency: 223.359µs
P90 Latency: 321.256µs
P95 Latency: 378.145µs
P99 Latency: 530.597µs
Bottom 10% Avg Latency: 412.953µs
----------------------------------------
Test: Mixed Read/Write
Duration: 36.04854491s
Total Events: 10000
Events/sec: 277.40
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 222 MB
Avg Latency: 9.309397ms
P90 Latency: 20.403594ms
P95 Latency: 22.152884ms
P99 Latency: 24.513304ms
Bottom 10% Avg Latency: 22.447709ms
----------------------------------------
Test: Peak Throughput
Duration: 644.867085ms
Total Events: 10000
Events/sec: 15507.07
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 125 MB
Avg Latency: 501.972µs
P90 Latency: 650.197µs
P95 Latency: 709.37µs
P99 Latency: 914.673µs
Bottom 10% Avg Latency: 754.969µs
----------------------------------------
Test: Burst Pattern
Duration: 6.429121967s
Total Events: 10000
Events/sec: 1555.42
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 170 MB
Avg Latency: 239.454µs
P90 Latency: 335.133µs
P95 Latency: 408.012µs
P99 Latency: 593.458µs
Bottom 10% Avg Latency: 446.804µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.047885362s
Total Events: 9857
Events/sec: 164.15
Success Rate: 98.6%
Concurrent Workers: 8
Memory Used: 189 MB
Avg Latency: 19.373297ms
P90 Latency: 42.953055ms
P95 Latency: 45.636867ms
P99 Latency: 49.71977ms
Bottom 10% Avg Latency: 46.144029ms
----------------------------------------
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
20250912224038033173 INF /tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
20250912224039055498 INF /tmp/benchmark_strfry_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 02. Size: 41 MiB of 41 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
20250912224039058214 INF /tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: strfry
RELAY_URL: ws://strfry:8080
TEST_TIMESTAMP: 2025-09-12T22:40:39+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -1,7 +1,7 @@
================================================================
NOSTR RELAY BENCHMARK AGGREGATE REPORT
================================================================
Generated: 2025-09-12T20:02:26+00:00
Generated: 2025-09-20T11:04:39+00:00
Benchmark Configuration:
Events per test: 10000
Concurrent workers: 8
@@ -16,98 +16,98 @@ SUMMARY BY RELAY
Relay: next-orly
----------------------------------------
Status: COMPLETED
Events/sec: 17901.30
Events/sec: 1504.52
Events/sec: 17901.30
Events/sec: 1035.42
Events/sec: 659.20
Events/sec: 1094.56
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 433.058µs
Avg Latency: 182.813µs
Avg Latency: 9.086952ms
P95 Latency: 456.738µs
P95 Latency: 152.86µs
P95 Latency: 18.156339ms
Avg Latency: 470.069µs
Bottom 10% Avg Latency: 750.491µs
Avg Latency: 190.573µs
P95 Latency: 693.101µs
P95 Latency: 289.761µs
P95 Latency: 22.450848ms
Relay: khatru-sqlite
----------------------------------------
Status: COMPLETED
Events/sec: 14291.70
Events/sec: 1530.29
Events/sec: 14291.70
Events/sec: 1105.61
Events/sec: 624.87
Events/sec: 1070.10
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 545.724µs
Avg Latency: 205.962µs
Avg Latency: 9.092604ms
P95 Latency: 473.43µs
P95 Latency: 165.525µs
P95 Latency: 19.302571ms
Avg Latency: 458.035µs
Bottom 10% Avg Latency: 702.193µs
Avg Latency: 193.997µs
P95 Latency: 660.608µs
P95 Latency: 302.666µs
P95 Latency: 23.653412ms
Relay: khatru-badger
----------------------------------------
Status: COMPLETED
Events/sec: 16351.11
Events/sec: 1539.25
Events/sec: 16351.11
Events/sec: 1040.11
Events/sec: 663.14
Events/sec: 1065.58
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 474.016µs
Avg Latency: 226.602µs
Avg Latency: 9.930935ms
P95 Latency: 479.03µs
P95 Latency: 239.525µs
P95 Latency: 17.75358ms
Avg Latency: 454.784µs
Bottom 10% Avg Latency: 706.219µs
Avg Latency: 193.914µs
P95 Latency: 654.637µs
P95 Latency: 296.525µs
P95 Latency: 21.642655ms
Relay: relayer-basic
----------------------------------------
Status: COMPLETED
Events/sec: 16522.60
Events/sec: 1537.71
Events/sec: 16522.60
Events/sec: 1104.88
Events/sec: 642.17
Events/sec: 1079.27
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 466.066µs
Avg Latency: 215.609µs
Avg Latency: 9.851217ms
P95 Latency: 514.849µs
P95 Latency: 141.91µs
P95 Latency: 23.101412ms
Avg Latency: 433.89µs
Bottom 10% Avg Latency: 653.813µs
Avg Latency: 186.306µs
P95 Latency: 617.868µs
P95 Latency: 279.192µs
P95 Latency: 21.247322ms
Relay: strfry
----------------------------------------
Status: COMPLETED
Events/sec: 15346.12
Events/sec: 1534.88
Events/sec: 15346.12
Events/sec: 1090.49
Events/sec: 652.03
Events/sec: 1098.57
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 506.51µs
Avg Latency: 216.564µs
Avg Latency: 9.938991ms
P95 Latency: 590.442µs
P95 Latency: 267.91µs
P95 Latency: 19.784708ms
Avg Latency: 448.058µs
Bottom 10% Avg Latency: 729.464µs
Avg Latency: 189.06µs
P95 Latency: 667.141µs
P95 Latency: 290.433µs
P95 Latency: 20.822884ms
Relay: nostr-rs-relay
----------------------------------------
Status: COMPLETED
Events/sec: 15199.95
Events/sec: 1533.87
Events/sec: 15199.95
Events/sec: 1123.91
Events/sec: 647.62
Events/sec: 1033.64
Success Rate: 100.0%
Success Rate: 100.0%
Success Rate: 100.0%
Avg Latency: 508.699µs
Avg Latency: 217.187µs
Avg Latency: 9.38757ms
P95 Latency: 1.011413ms
P95 Latency: 130.018µs
P95 Latency: 19.250416ms
Avg Latency: 416.753µs
Bottom 10% Avg Latency: 638.318µs
Avg Latency: 185.217µs
P95 Latency: 597.338µs
P95 Latency: 273.191µs
P95 Latency: 22.416221ms
================================================================
@@ -115,12 +115,12 @@ DETAILED RESULTS
================================================================
Individual relay reports are available in:
- /reports/run_20250912_195729/khatru-badger_results.txt
- /reports/run_20250912_195729/khatru-sqlite_results.txt
- /reports/run_20250912_195729/next-orly_results.txt
- /reports/run_20250912_195729/nostr-rs-relay_results.txt
- /reports/run_20250912_195729/relayer-basic_results.txt
- /reports/run_20250912_195729/strfry_results.txt
- /reports/run_20250920_101521/khatru-badger_results.txt
- /reports/run_20250920_101521/khatru-sqlite_results.txt
- /reports/run_20250920_101521/next-orly_results.txt
- /reports/run_20250920_101521/nostr-rs-relay_results.txt
- /reports/run_20250920_101521/relayer-basic_results.txt
- /reports/run_20250920_101521/strfry_results.txt
================================================================
BENCHMARK COMPARISON TABLE
@@ -128,12 +128,12 @@ BENCHMARK COMPARISON TABLE
Relay Status Peak Tput/s Avg Latency Success Rate
---- ------ ----------- ----------- ------------
next-orly OK 17901.30 433.058µs 100.0%
khatru-sqlite OK 14291.70 545.724µs 100.0%
khatru-badger OK 16351.11 474.016µs 100.0%
relayer-basic OK 16522.60 466.066µs 100.0%
strfry OK 15346.12 506.51µs 100.0%
nostr-rs-relay OK 15199.95 508.699µs 100.0%
next-orly OK 1035.42 470.069µs 100.0%
khatru-sqlite OK 1105.61 458.035µs 100.0%
khatru-badger OK 1040.11 454.784µs 100.0%
relayer-basic OK 1104.88 433.89µs 100.0%
strfry OK 1090.49 448.058µs 100.0%
nostr-rs-relay OK 1123.91 416.753µs 100.0%
================================================================
End of Report

View File

@@ -0,0 +1,298 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-badger_8
Events: 10000, Workers: 8, Duration: 1m0s
1758364309339505/tmp/benchmark_khatru-badger_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758364309340007/tmp/benchmark_khatru-badger_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758364309340039/tmp/benchmark_khatru-badger_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758364309340327(*types.Uint32)(0xc000147840)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758364309340465migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.614321551s
Events/sec: 1040.11
Avg latency: 454.784µs
P90 latency: 596.266µs
P95 latency: 654.637µs
P99 latency: 844.569µs
Bottom 10% Avg latency: 706.219µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 136.444875ms
Burst completed: 1000 events in 141.806497ms
Burst completed: 1000 events in 168.991278ms
Burst completed: 1000 events in 167.713425ms
Burst completed: 1000 events in 162.89698ms
Burst completed: 1000 events in 157.775164ms
Burst completed: 1000 events in 166.476709ms
Burst completed: 1000 events in 161.742632ms
Burst completed: 1000 events in 162.138977ms
Burst completed: 1000 events in 156.657194ms
Burst test completed: 10000 events in 15.07982611s
Events/sec: 663.14
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 44.903267299s
Combined ops/sec: 222.70
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3166 queries in 1m0.104195004s
Queries/sec: 52.68
Avg query latency: 125.847553ms
P95 query latency: 148.109766ms
P99 query latency: 212.054697ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11366 operations (1366 queries, 10000 writes) in 1m0.127232573s
Operations/sec: 189.03
Avg latency: 16.671438ms
Avg query latency: 134.993072ms
Avg write latency: 508.703µs
P95 latency: 133.755996ms
P99 latency: 152.790563ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.384548186s
Events/sec: 1065.58
Avg latency: 566.375µs
P90 latency: 738.377µs
P95 latency: 839.679µs
P99 latency: 1.131084ms
Bottom 10% Avg latency: 1.312791ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 166.832259ms
Burst completed: 1000 events in 175.061575ms
Burst completed: 1000 events in 168.897493ms
Burst completed: 1000 events in 167.584171ms
Burst completed: 1000 events in 178.212526ms
Burst completed: 1000 events in 202.208945ms
Burst completed: 1000 events in 154.130024ms
Burst completed: 1000 events in 168.817721ms
Burst completed: 1000 events in 153.032223ms
Burst completed: 1000 events in 154.799008ms
Burst test completed: 10000 events in 15.449161726s
Events/sec: 647.28
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4582 reads in 1m0.037041762s
Combined ops/sec: 159.60
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 959 queries in 1m0.42440735s
Queries/sec: 15.87
Avg query latency: 418.846875ms
P95 query latency: 473.089327ms
P99 query latency: 650.467474ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10484 operations (484 queries, 10000 writes) in 1m0.283590079s
Operations/sec: 173.91
Avg latency: 17.921964ms
Avg query latency: 381.041592ms
Avg write latency: 346.974µs
P95 latency: 1.269749ms
P99 latency: 399.015222ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.614321551s
Total Events: 10000
Events/sec: 1040.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 118 MB
Avg Latency: 454.784µs
P90 Latency: 596.266µs
P95 Latency: 654.637µs
P99 Latency: 844.569µs
Bottom 10% Avg Latency: 706.219µs
----------------------------------------
Test: Burst Pattern
Duration: 15.07982611s
Total Events: 10000
Events/sec: 663.14
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 162 MB
Avg Latency: 193.914µs
P90 Latency: 255.617µs
P95 Latency: 296.525µs
P99 Latency: 451.81µs
Bottom 10% Avg Latency: 343.222µs
----------------------------------------
Test: Mixed Read/Write
Duration: 44.903267299s
Total Events: 10000
Events/sec: 222.70
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 121 MB
Avg Latency: 9.145633ms
P90 Latency: 19.946513ms
P95 Latency: 21.642655ms
P99 Latency: 23.951572ms
Bottom 10% Avg Latency: 21.861602ms
----------------------------------------
Test: Query Performance
Duration: 1m0.104195004s
Total Events: 3166
Events/sec: 52.68
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 188 MB
Avg Latency: 125.847553ms
P90 Latency: 140.664966ms
P95 Latency: 148.109766ms
P99 Latency: 212.054697ms
Bottom 10% Avg Latency: 164.089129ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.127232573s
Total Events: 11366
Events/sec: 189.03
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 112 MB
Avg Latency: 16.671438ms
P90 Latency: 122.627849ms
P95 Latency: 133.755996ms
P99 Latency: 152.790563ms
Bottom 10% Avg Latency: 138.087104ms
----------------------------------------
Test: Peak Throughput
Duration: 9.384548186s
Total Events: 10000
Events/sec: 1065.58
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 566.375µs
P90 Latency: 738.377µs
P95 Latency: 839.679µs
P99 Latency: 1.131084ms
Bottom 10% Avg Latency: 1.312791ms
----------------------------------------
Test: Burst Pattern
Duration: 15.449161726s
Total Events: 10000
Events/sec: 647.28
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 165 MB
Avg Latency: 186.353µs
P90 Latency: 243.413µs
P95 Latency: 283.06µs
P99 Latency: 440.76µs
Bottom 10% Avg Latency: 324.151µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.037041762s
Total Events: 9582
Events/sec: 159.60
Success Rate: 95.8%
Concurrent Workers: 8
Memory Used: 138 MB
Avg Latency: 16.358228ms
P90 Latency: 37.654373ms
P95 Latency: 40.578604ms
P99 Latency: 46.331181ms
Bottom 10% Avg Latency: 41.76124ms
----------------------------------------
Test: Query Performance
Duration: 1m0.42440735s
Total Events: 959
Events/sec: 15.87
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 110 MB
Avg Latency: 418.846875ms
P90 Latency: 448.809017ms
P95 Latency: 473.089327ms
P99 Latency: 650.467474ms
Bottom 10% Avg Latency: 518.112626ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.283590079s
Total Events: 10484
Events/sec: 173.91
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 205 MB
Avg Latency: 17.921964ms
P90 Latency: 582.319µs
P95 Latency: 1.269749ms
P99 Latency: 399.015222ms
Bottom 10% Avg Latency: 176.257001ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
1758364794792663/tmp/benchmark_khatru-badger_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758364796617126/tmp/benchmark_khatru-badger_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758364796621659/tmp/benchmark_khatru-badger_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-badger
RELAY_URL: ws://khatru-badger:3334
TEST_TIMESTAMP: 2025-09-20T10:39:56+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,298 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_khatru-sqlite_8
Events: 10000, Workers: 8, Duration: 1m0s
1758363814412229/tmp/benchmark_khatru-sqlite_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758363814412803/tmp/benchmark_khatru-sqlite_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758363814412840/tmp/benchmark_khatru-sqlite_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758363814413123(*types.Uint32)(0xc0001ea00c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758363814413200migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.044789549s
Events/sec: 1105.61
Avg latency: 458.035µs
P90 latency: 601.736µs
P95 latency: 660.608µs
P99 latency: 844.108µs
Bottom 10% Avg latency: 702.193µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 146.610877ms
Burst completed: 1000 events in 179.229665ms
Burst completed: 1000 events in 157.096919ms
Burst completed: 1000 events in 164.796374ms
Burst completed: 1000 events in 188.464354ms
Burst completed: 1000 events in 196.529596ms
Burst completed: 1000 events in 169.425581ms
Burst completed: 1000 events in 147.99354ms
Burst completed: 1000 events in 157.996252ms
Burst completed: 1000 events in 167.299262ms
Burst test completed: 10000 events in 16.003207139s
Events/sec: 624.87
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 46.924555793s
Combined ops/sec: 213.11
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3052 queries in 1m0.102264s
Queries/sec: 50.78
Avg query latency: 128.464192ms
P95 query latency: 148.086431ms
P99 query latency: 219.275394ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11296 operations (1296 queries, 10000 writes) in 1m0.108871986s
Operations/sec: 187.93
Avg latency: 16.71621ms
Avg query latency: 142.320434ms
Avg write latency: 437.903µs
P95 latency: 141.357185ms
P99 latency: 163.50992ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.344884331s
Events/sec: 1070.10
Avg latency: 578.453µs
P90 latency: 742.585µs
P95 latency: 849.679µs
P99 latency: 1.122058ms
Bottom 10% Avg latency: 1.362355ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 185.472655ms
Burst completed: 1000 events in 194.135516ms
Burst completed: 1000 events in 176.056931ms
Burst completed: 1000 events in 161.500315ms
Burst completed: 1000 events in 157.673837ms
Burst completed: 1000 events in 167.130208ms
Burst completed: 1000 events in 182.164655ms
Burst completed: 1000 events in 156.589581ms
Burst completed: 1000 events in 154.419949ms
Burst completed: 1000 events in 158.445927ms
Burst test completed: 10000 events in 15.587711126s
Events/sec: 641.53
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4405 reads in 1m0.043842569s
Combined ops/sec: 156.64
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 915 queries in 1m0.3452177s
Queries/sec: 15.16
Avg query latency: 435.125142ms
P95 query latency: 520.311963ms
P99 query latency: 618.85899ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10489 operations (489 queries, 10000 writes) in 1m0.27235761s
Operations/sec: 174.03
Avg latency: 18.043774ms
Avg query latency: 379.681531ms
Avg write latency: 359.688µs
P95 latency: 1.316628ms
P99 latency: 400.223248ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.044789549s
Total Events: 10000
Events/sec: 1105.61
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 144 MB
Avg Latency: 458.035µs
P90 Latency: 601.736µs
P95 Latency: 660.608µs
P99 Latency: 844.108µs
Bottom 10% Avg Latency: 702.193µs
----------------------------------------
Test: Burst Pattern
Duration: 16.003207139s
Total Events: 10000
Events/sec: 624.87
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 89 MB
Avg Latency: 193.997µs
P90 Latency: 261.969µs
P95 Latency: 302.666µs
P99 Latency: 431.933µs
Bottom 10% Avg Latency: 334.383µs
----------------------------------------
Test: Mixed Read/Write
Duration: 46.924555793s
Total Events: 10000
Events/sec: 213.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 96 MB
Avg Latency: 9.781737ms
P90 Latency: 21.91971ms
P95 Latency: 23.653412ms
P99 Latency: 27.511972ms
Bottom 10% Avg Latency: 24.396695ms
----------------------------------------
Test: Query Performance
Duration: 1m0.102264s
Total Events: 3052
Events/sec: 50.78
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 209 MB
Avg Latency: 128.464192ms
P90 Latency: 142.195039ms
P95 Latency: 148.086431ms
P99 Latency: 219.275394ms
Bottom 10% Avg Latency: 162.874217ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.108871986s
Total Events: 11296
Events/sec: 187.93
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 159 MB
Avg Latency: 16.71621ms
P90 Latency: 127.287246ms
P95 Latency: 141.357185ms
P99 Latency: 163.50992ms
Bottom 10% Avg Latency: 145.199189ms
----------------------------------------
Test: Peak Throughput
Duration: 9.344884331s
Total Events: 10000
Events/sec: 1070.10
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 578.453µs
P90 Latency: 742.585µs
P95 Latency: 849.679µs
P99 Latency: 1.122058ms
Bottom 10% Avg Latency: 1.362355ms
----------------------------------------
Test: Burst Pattern
Duration: 15.587711126s
Total Events: 10000
Events/sec: 641.53
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 141 MB
Avg Latency: 190.235µs
P90 Latency: 254.795µs
P95 Latency: 290.563µs
P99 Latency: 437.323µs
Bottom 10% Avg Latency: 328.752µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.043842569s
Total Events: 9405
Events/sec: 156.64
Success Rate: 94.0%
Concurrent Workers: 8
Memory Used: 105 MB
Avg Latency: 16.852438ms
P90 Latency: 39.677855ms
P95 Latency: 42.553634ms
P99 Latency: 48.262077ms
Bottom 10% Avg Latency: 43.994063ms
----------------------------------------
Test: Query Performance
Duration: 1m0.3452177s
Total Events: 915
Events/sec: 15.16
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 157 MB
Avg Latency: 435.125142ms
P90 Latency: 482.304439ms
P95 Latency: 520.311963ms
P99 Latency: 618.85899ms
Bottom 10% Avg Latency: 545.670939ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.27235761s
Total Events: 10489
Events/sec: 174.03
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 132 MB
Avg Latency: 18.043774ms
P90 Latency: 583.962µs
P95 Latency: 1.316628ms
P99 Latency: 400.223248ms
Bottom 10% Avg Latency: 177.440946ms
----------------------------------------
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
1758364302230610/tmp/benchmark_khatru-sqlite_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758364304057942/tmp/benchmark_khatru-sqlite_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758364304063521/tmp/benchmark_khatru-sqlite_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: khatru-sqlite
RELAY_URL: ws://khatru-sqlite:3334
TEST_TIMESTAMP: 2025-09-20T10:31:44+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,298 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_next-orly_8
Events: 10000, Workers: 8, Duration: 1m0s
1758363321263384/tmp/benchmark_next-orly_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758363321263864/tmp/benchmark_next-orly_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758363321263887/tmp/benchmark_next-orly_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758363321264128(*types.Uint32)(0xc0001f7ffc)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758363321264177migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.657904043s
Events/sec: 1035.42
Avg latency: 470.069µs
P90 latency: 628.167µs
P95 latency: 693.101µs
P99 latency: 922.357µs
Bottom 10% Avg latency: 750.491µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 175.034134ms
Burst completed: 1000 events in 150.401771ms
Burst completed: 1000 events in 168.992305ms
Burst completed: 1000 events in 179.447581ms
Burst completed: 1000 events in 165.602457ms
Burst completed: 1000 events in 178.649561ms
Burst completed: 1000 events in 195.002303ms
Burst completed: 1000 events in 168.970954ms
Burst completed: 1000 events in 150.818413ms
Burst completed: 1000 events in 185.285662ms
Burst test completed: 10000 events in 15.169978801s
Events/sec: 659.20
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 45.597478865s
Combined ops/sec: 219.31
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3151 queries in 1m0.067849757s
Queries/sec: 52.46
Avg query latency: 126.38548ms
P95 query latency: 149.976367ms
P99 query latency: 205.807461ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11325 operations (1325 queries, 10000 writes) in 1m0.081967157s
Operations/sec: 188.49
Avg latency: 16.694154ms
Avg query latency: 139.524748ms
Avg write latency: 419.1µs
P95 latency: 138.688202ms
P99 latency: 158.824742ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.136097148s
Events/sec: 1094.56
Avg latency: 510.7µs
P90 latency: 636.763µs
P95 latency: 705.564µs
P99 latency: 922.777µs
Bottom 10% Avg latency: 1.094965ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 176.337148ms
Burst completed: 1000 events in 177.351251ms
Burst completed: 1000 events in 181.515292ms
Burst completed: 1000 events in 164.043866ms
Burst completed: 1000 events in 152.697196ms
Burst completed: 1000 events in 144.231922ms
Burst completed: 1000 events in 162.606659ms
Burst completed: 1000 events in 137.485182ms
Burst completed: 1000 events in 163.19487ms
Burst completed: 1000 events in 147.900339ms
Burst test completed: 10000 events in 15.514130113s
Events/sec: 644.57
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4489 reads in 1m0.036174989s
Combined ops/sec: 158.05
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 900 queries in 1m0.304636826s
Queries/sec: 14.92
Avg query latency: 444.57989ms
P95 query latency: 547.598358ms
P99 query latency: 660.926147ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10462 operations (462 queries, 10000 writes) in 1m0.362856212s
Operations/sec: 173.32
Avg latency: 17.808607ms
Avg query latency: 395.594177ms
Avg write latency: 354.914µs
P95 latency: 1.221657ms
P99 latency: 411.642669ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.657904043s
Total Events: 10000
Events/sec: 1035.42
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 144 MB
Avg Latency: 470.069µs
P90 Latency: 628.167µs
P95 Latency: 693.101µs
P99 Latency: 922.357µs
Bottom 10% Avg Latency: 750.491µs
----------------------------------------
Test: Burst Pattern
Duration: 15.169978801s
Total Events: 10000
Events/sec: 659.20
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 135 MB
Avg Latency: 190.573µs
P90 Latency: 252.701µs
P95 Latency: 289.761µs
P99 Latency: 408.147µs
Bottom 10% Avg Latency: 316.797µs
----------------------------------------
Test: Mixed Read/Write
Duration: 45.597478865s
Total Events: 10000
Events/sec: 219.31
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 119 MB
Avg Latency: 9.381158ms
P90 Latency: 20.487026ms
P95 Latency: 22.450848ms
P99 Latency: 24.696325ms
Bottom 10% Avg Latency: 22.632933ms
----------------------------------------
Test: Query Performance
Duration: 1m0.067849757s
Total Events: 3151
Events/sec: 52.46
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 145 MB
Avg Latency: 126.38548ms
P90 Latency: 142.39268ms
P95 Latency: 149.976367ms
P99 Latency: 205.807461ms
Bottom 10% Avg Latency: 162.636454ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.081967157s
Total Events: 11325
Events/sec: 188.49
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 194 MB
Avg Latency: 16.694154ms
P90 Latency: 125.314618ms
P95 Latency: 138.688202ms
P99 Latency: 158.824742ms
Bottom 10% Avg Latency: 142.699977ms
----------------------------------------
Test: Peak Throughput
Duration: 9.136097148s
Total Events: 10000
Events/sec: 1094.56
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 510.7µs
P90 Latency: 636.763µs
P95 Latency: 705.564µs
P99 Latency: 922.777µs
Bottom 10% Avg Latency: 1.094965ms
----------------------------------------
Test: Burst Pattern
Duration: 15.514130113s
Total Events: 10000
Events/sec: 644.57
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 138 MB
Avg Latency: 230.062µs
P90 Latency: 316.624µs
P95 Latency: 389.882µs
P99 Latency: 859.548µs
Bottom 10% Avg Latency: 529.836µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.036174989s
Total Events: 9489
Events/sec: 158.05
Success Rate: 94.9%
Concurrent Workers: 8
Memory Used: 182 MB
Avg Latency: 16.56372ms
P90 Latency: 38.24931ms
P95 Latency: 41.187306ms
P99 Latency: 46.02529ms
Bottom 10% Avg Latency: 42.131189ms
----------------------------------------
Test: Query Performance
Duration: 1m0.304636826s
Total Events: 900
Events/sec: 14.92
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 141 MB
Avg Latency: 444.57989ms
P90 Latency: 490.730651ms
P95 Latency: 547.598358ms
P99 Latency: 660.926147ms
Bottom 10% Avg Latency: 563.628707ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.362856212s
Total Events: 10462
Events/sec: 173.32
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 152 MB
Avg Latency: 17.808607ms
P90 Latency: 631.703µs
P95 Latency: 1.221657ms
P99 Latency: 411.642669ms
Bottom 10% Avg Latency: 175.052418ms
----------------------------------------
Report saved to: /tmp/benchmark_next-orly_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_next-orly_8/benchmark_report.adoc
1758363807245770/tmp/benchmark_next-orly_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758363809118416/tmp/benchmark_next-orly_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758363809123697/tmp/benchmark_next-orly_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: next-orly
RELAY_URL: ws://next-orly:8080
TEST_TIMESTAMP: 2025-09-20T10:23:29+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,298 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_nostr-rs-relay_8
Events: 10000, Workers: 8, Duration: 1m0s
1758365785928076/tmp/benchmark_nostr-rs-relay_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758365785929028/tmp/benchmark_nostr-rs-relay_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758365785929097/tmp/benchmark_nostr-rs-relay_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758365785929509(*types.Uint32)(0xc0001c820c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758365785929573migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 8.897492256s
Events/sec: 1123.91
Avg latency: 416.753µs
P90 latency: 546.351µs
P95 latency: 597.338µs
P99 latency: 760.549µs
Bottom 10% Avg latency: 638.318µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 158.263016ms
Burst completed: 1000 events in 181.558983ms
Burst completed: 1000 events in 155.219861ms
Burst completed: 1000 events in 183.834156ms
Burst completed: 1000 events in 192.398437ms
Burst completed: 1000 events in 176.450074ms
Burst completed: 1000 events in 175.050138ms
Burst completed: 1000 events in 178.883047ms
Burst completed: 1000 events in 180.74321ms
Burst completed: 1000 events in 169.39146ms
Burst test completed: 10000 events in 15.441062872s
Events/sec: 647.62
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 45.847091984s
Combined ops/sec: 218.12
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3229 queries in 1m0.085047549s
Queries/sec: 53.74
Avg query latency: 123.209617ms
P95 query latency: 141.745618ms
P99 query latency: 154.527843ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11298 operations (1298 queries, 10000 writes) in 1m0.096751583s
Operations/sec: 188.00
Avg latency: 16.447175ms
Avg query latency: 139.791065ms
Avg write latency: 437.138µs
P95 latency: 137.879538ms
P99 latency: 162.020385ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.674593819s
Events/sec: 1033.64
Avg latency: 541.545µs
P90 latency: 693.862µs
P95 latency: 775.757µs
P99 latency: 1.05005ms
Bottom 10% Avg latency: 1.219386ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 168.056064ms
Burst completed: 1000 events in 159.819647ms
Burst completed: 1000 events in 147.500264ms
Burst completed: 1000 events in 159.150392ms
Burst completed: 1000 events in 149.954829ms
Burst completed: 1000 events in 138.082938ms
Burst completed: 1000 events in 157.234213ms
Burst completed: 1000 events in 158.468955ms
Burst completed: 1000 events in 144.346047ms
Burst completed: 1000 events in 154.930576ms
Burst test completed: 10000 events in 15.646785427s
Events/sec: 639.11
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4415 reads in 1m0.02899167s
Combined ops/sec: 156.84
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 890 queries in 1m0.279192867s
Queries/sec: 14.76
Avg query latency: 448.809547ms
P95 query latency: 607.28509ms
P99 query latency: 786.387053ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10469 operations (469 queries, 10000 writes) in 1m0.190785048s
Operations/sec: 173.93
Avg latency: 17.73903ms
Avg query latency: 388.59336ms
Avg write latency: 345.962µs
P95 latency: 1.158136ms
P99 latency: 407.947907ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 8.897492256s
Total Events: 10000
Events/sec: 1123.91
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 132 MB
Avg Latency: 416.753µs
P90 Latency: 546.351µs
P95 Latency: 597.338µs
P99 Latency: 760.549µs
Bottom 10% Avg Latency: 638.318µs
----------------------------------------
Test: Burst Pattern
Duration: 15.441062872s
Total Events: 10000
Events/sec: 647.62
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 104 MB
Avg Latency: 185.217µs
P90 Latency: 241.64µs
P95 Latency: 273.191µs
P99 Latency: 412.897µs
Bottom 10% Avg Latency: 306.752µs
----------------------------------------
Test: Mixed Read/Write
Duration: 45.847091984s
Total Events: 10000
Events/sec: 218.12
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 96 MB
Avg Latency: 9.446215ms
P90 Latency: 20.522135ms
P95 Latency: 22.416221ms
P99 Latency: 24.696283ms
Bottom 10% Avg Latency: 22.59535ms
----------------------------------------
Test: Query Performance
Duration: 1m0.085047549s
Total Events: 3229
Events/sec: 53.74
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 175 MB
Avg Latency: 123.209617ms
P90 Latency: 137.629898ms
P95 Latency: 141.745618ms
P99 Latency: 154.527843ms
Bottom 10% Avg Latency: 145.245967ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.096751583s
Total Events: 11298
Events/sec: 188.00
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 181 MB
Avg Latency: 16.447175ms
P90 Latency: 123.920421ms
P95 Latency: 137.879538ms
P99 Latency: 162.020385ms
Bottom 10% Avg Latency: 142.654147ms
----------------------------------------
Test: Peak Throughput
Duration: 9.674593819s
Total Events: 10000
Events/sec: 1033.64
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 541.545µs
P90 Latency: 693.862µs
P95 Latency: 775.757µs
P99 Latency: 1.05005ms
Bottom 10% Avg Latency: 1.219386ms
----------------------------------------
Test: Burst Pattern
Duration: 15.646785427s
Total Events: 10000
Events/sec: 639.11
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 146 MB
Avg Latency: 331.896µs
P90 Latency: 520.511µs
P95 Latency: 864.486µs
P99 Latency: 2.251087ms
Bottom 10% Avg Latency: 1.16922ms
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.02899167s
Total Events: 9415
Events/sec: 156.84
Success Rate: 94.2%
Concurrent Workers: 8
Memory Used: 147 MB
Avg Latency: 16.723365ms
P90 Latency: 39.058801ms
P95 Latency: 41.904891ms
P99 Latency: 47.156263ms
Bottom 10% Avg Latency: 42.800456ms
----------------------------------------
Test: Query Performance
Duration: 1m0.279192867s
Total Events: 890
Events/sec: 14.76
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 156 MB
Avg Latency: 448.809547ms
P90 Latency: 524.488485ms
P95 Latency: 607.28509ms
P99 Latency: 786.387053ms
Bottom 10% Avg Latency: 634.016595ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.190785048s
Total Events: 10469
Events/sec: 173.93
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 226 MB
Avg Latency: 17.73903ms
P90 Latency: 561.359µs
P95 Latency: 1.158136ms
P99 Latency: 407.947907ms
Bottom 10% Avg Latency: 174.508065ms
----------------------------------------
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
1758366272164052/tmp/benchmark_nostr-rs-relay_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758366274030399/tmp/benchmark_nostr-rs-relay_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758366274036413/tmp/benchmark_nostr-rs-relay_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: nostr-rs-relay
RELAY_URL: ws://nostr-rs-relay:8080
TEST_TIMESTAMP: 2025-09-20T11:04:34+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,298 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_relayer-basic_8
Events: 10000, Workers: 8, Duration: 1m0s
1758364801895559/tmp/benchmark_relayer-basic_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758364801896041/tmp/benchmark_relayer-basic_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758364801896078/tmp/benchmark_relayer-basic_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758364801896347(*types.Uint32)(0xc0001a801c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758364801896400migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.050770003s
Events/sec: 1104.88
Avg latency: 433.89µs
P90 latency: 567.261µs
P95 latency: 617.868µs
P99 latency: 783.593µs
Bottom 10% Avg latency: 653.813µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 183.738134ms
Burst completed: 1000 events in 155.035832ms
Burst completed: 1000 events in 160.066514ms
Burst completed: 1000 events in 183.724238ms
Burst completed: 1000 events in 178.910929ms
Burst completed: 1000 events in 168.905441ms
Burst completed: 1000 events in 172.584809ms
Burst completed: 1000 events in 177.214508ms
Burst completed: 1000 events in 169.921566ms
Burst completed: 1000 events in 162.042488ms
Burst test completed: 10000 events in 15.572250139s
Events/sec: 642.17
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 44.509677166s
Combined ops/sec: 224.67
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3253 queries in 1m0.095238426s
Queries/sec: 54.13
Avg query latency: 122.100718ms
P95 query latency: 140.360749ms
P99 query latency: 148.353154ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11408 operations (1408 queries, 10000 writes) in 1m0.117581615s
Operations/sec: 189.76
Avg latency: 16.525268ms
Avg query latency: 130.972853ms
Avg write latency: 411.048µs
P95 latency: 132.130964ms
P99 latency: 146.285305ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.265496879s
Events/sec: 1079.27
Avg latency: 529.266µs
P90 latency: 658.033µs
P95 latency: 732.024µs
P99 latency: 953.285µs
Bottom 10% Avg latency: 1.168714ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 172.300479ms
Burst completed: 1000 events in 149.247397ms
Burst completed: 1000 events in 170.000198ms
Burst completed: 1000 events in 133.786958ms
Burst completed: 1000 events in 172.157036ms
Burst completed: 1000 events in 153.284738ms
Burst completed: 1000 events in 166.711903ms
Burst completed: 1000 events in 170.635427ms
Burst completed: 1000 events in 153.381031ms
Burst completed: 1000 events in 162.125949ms
Burst test completed: 10000 events in 16.674963543s
Events/sec: 599.70
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4665 reads in 1m0.035358264s
Combined ops/sec: 160.99
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 944 queries in 1m0.383519958s
Queries/sec: 15.63
Avg query latency: 421.75292ms
P95 query latency: 491.340259ms
P99 query latency: 664.614262ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10479 operations (479 queries, 10000 writes) in 1m0.291926697s
Operations/sec: 173.80
Avg latency: 18.049265ms
Avg query latency: 385.864458ms
Avg write latency: 430.918µs
P95 latency: 3.05038ms
P99 latency: 404.540502ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.050770003s
Total Events: 10000
Events/sec: 1104.88
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 153 MB
Avg Latency: 433.89µs
P90 Latency: 567.261µs
P95 Latency: 617.868µs
P99 Latency: 783.593µs
Bottom 10% Avg Latency: 653.813µs
----------------------------------------
Test: Burst Pattern
Duration: 15.572250139s
Total Events: 10000
Events/sec: 642.17
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 134 MB
Avg Latency: 186.306µs
P90 Latency: 243.995µs
P95 Latency: 279.192µs
P99 Latency: 392.859µs
Bottom 10% Avg Latency: 303.766µs
----------------------------------------
Test: Mixed Read/Write
Duration: 44.509677166s
Total Events: 10000
Events/sec: 224.67
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 163 MB
Avg Latency: 8.892738ms
P90 Latency: 19.406836ms
P95 Latency: 21.247322ms
P99 Latency: 23.452072ms
Bottom 10% Avg Latency: 21.397913ms
----------------------------------------
Test: Query Performance
Duration: 1m0.095238426s
Total Events: 3253
Events/sec: 54.13
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 126 MB
Avg Latency: 122.100718ms
P90 Latency: 136.523661ms
P95 Latency: 140.360749ms
P99 Latency: 148.353154ms
Bottom 10% Avg Latency: 142.067372ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.117581615s
Total Events: 11408
Events/sec: 189.76
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 149 MB
Avg Latency: 16.525268ms
P90 Latency: 121.696848ms
P95 Latency: 132.130964ms
P99 Latency: 146.285305ms
Bottom 10% Avg Latency: 134.054744ms
----------------------------------------
Test: Peak Throughput
Duration: 9.265496879s
Total Events: 10000
Events/sec: 1079.27
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 529.266µs
P90 Latency: 658.033µs
P95 Latency: 732.024µs
P99 Latency: 953.285µs
Bottom 10% Avg Latency: 1.168714ms
----------------------------------------
Test: Burst Pattern
Duration: 16.674963543s
Total Events: 10000
Events/sec: 599.70
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 142 MB
Avg Latency: 264.288µs
P90 Latency: 350.187µs
P95 Latency: 519.139µs
P99 Latency: 1.961326ms
Bottom 10% Avg Latency: 877.366µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.035358264s
Total Events: 9665
Events/sec: 160.99
Success Rate: 96.7%
Concurrent Workers: 8
Memory Used: 151 MB
Avg Latency: 16.019245ms
P90 Latency: 36.340362ms
P95 Latency: 39.113864ms
P99 Latency: 44.271098ms
Bottom 10% Avg Latency: 40.108462ms
----------------------------------------
Test: Query Performance
Duration: 1m0.383519958s
Total Events: 944
Events/sec: 15.63
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 280 MB
Avg Latency: 421.75292ms
P90 Latency: 460.902551ms
P95 Latency: 491.340259ms
P99 Latency: 664.614262ms
Bottom 10% Avg Latency: 538.014725ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.291926697s
Total Events: 10479
Events/sec: 173.80
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 122 MB
Avg Latency: 18.049265ms
P90 Latency: 843.867µs
P95 Latency: 3.05038ms
P99 Latency: 404.540502ms
Bottom 10% Avg Latency: 177.245211ms
----------------------------------------
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
1758365287933287/tmp/benchmark_relayer-basic_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758365289807797/tmp/benchmark_relayer-basic_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758365289812921/tmp/benchmark_relayer-basic_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: relayer-basic
RELAY_URL: ws://relayer-basic:7447
TEST_TIMESTAMP: 2025-09-20T10:48:10+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

View File

@@ -0,0 +1,298 @@
Starting Nostr Relay Benchmark
Data Directory: /tmp/benchmark_strfry_8
Events: 10000, Workers: 8, Duration: 1m0s
1758365295110579/tmp/benchmark_strfry_8: All 0 tables opened in 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/levels.go:161 /build/pkg/database/logger.go:57
1758365295111085/tmp/benchmark_strfry_8: Discard stats nextEmptySlot: 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/discard.go:55 /build/pkg/database/logger.go:57
1758365295111113/tmp/benchmark_strfry_8: Set nextTxnTs to 0
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:358 /build/pkg/database/logger.go:57
1758365295111319(*types.Uint32)(0xc000141a3c)({
value: (uint32) 1
})
/build/pkg/database/migrations.go:65
1758365295111354migrating to version 1... /build/pkg/database/migrations.go:79
=== Starting test round 1/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.170212358s
Events/sec: 1090.49
Avg latency: 448.058µs
P90 latency: 597.558µs
P95 latency: 667.141µs
P99 latency: 920.784µs
Bottom 10% Avg latency: 729.464µs
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 172.138862ms
Burst completed: 1000 events in 168.99322ms
Burst completed: 1000 events in 162.213786ms
Burst completed: 1000 events in 161.027417ms
Burst completed: 1000 events in 183.148824ms
Burst completed: 1000 events in 178.152837ms
Burst completed: 1000 events in 158.65623ms
Burst completed: 1000 events in 186.7166ms
Burst completed: 1000 events in 177.202878ms
Burst completed: 1000 events in 182.780071ms
Burst test completed: 10000 events in 15.336760896s
Events/sec: 652.03
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 5000 reads in 44.257468151s
Combined ops/sec: 225.95
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 3002 queries in 1m0.091429487s
Queries/sec: 49.96
Avg query latency: 131.632043ms
P95 query latency: 175.810416ms
P99 query latency: 228.52716ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 11308 operations (1308 queries, 10000 writes) in 1m0.111257202s
Operations/sec: 188.12
Avg latency: 16.193707ms
Avg query latency: 137.019852ms
Avg write latency: 389.647µs
P95 latency: 136.70132ms
P99 latency: 156.996779ms
Pausing 10s before next round...
=== Test round completed ===
=== Starting test round 2/2 ===
RunPeakThroughputTest..
=== Peak Throughput Test ===
Events saved: 10000/10000 (100.0%)
Duration: 9.102738s
Events/sec: 1098.57
Avg latency: 493.093µs
P90 latency: 605.684µs
P95 latency: 659.477µs
P99 latency: 826.344µs
Bottom 10% Avg latency: 1.097884ms
RunBurstPatternTest..
=== Burst Pattern Test ===
Burst completed: 1000 events in 178.755916ms
Burst completed: 1000 events in 170.810722ms
Burst completed: 1000 events in 166.730701ms
Burst completed: 1000 events in 172.177576ms
Burst completed: 1000 events in 164.907178ms
Burst completed: 1000 events in 153.267727ms
Burst completed: 1000 events in 157.855743ms
Burst completed: 1000 events in 159.632496ms
Burst completed: 1000 events in 160.802526ms
Burst completed: 1000 events in 178.513954ms
Burst test completed: 10000 events in 15.535933443s
Events/sec: 643.67
RunMixedReadWriteTest..
=== Mixed Read/Write Test ===
Pre-populating database for read tests...
Mixed test completed: 5000 writes, 4550 reads in 1m0.032080518s
Combined ops/sec: 159.08
RunQueryTest..
=== Query Test ===
Pre-populating database with 10000 events for query tests...
Query test completed: 913 queries in 1m0.248877091s
Queries/sec: 15.15
Avg query latency: 436.472206ms
P95 query latency: 493.12732ms
P99 query latency: 623.201275ms
RunConcurrentQueryStoreTest..
=== Concurrent Query/Store Test ===
Pre-populating database with 5000 events for concurrent query/store test...
Concurrent test completed: 10470 operations (470 queries, 10000 writes) in 1m0.293280495s
Operations/sec: 173.65
Avg latency: 18.084009ms
Avg query latency: 395.171481ms
Avg write latency: 360.898µs
P95 latency: 1.338148ms
P99 latency: 413.21015ms
=== Test round completed ===
================================================================================
BENCHMARK REPORT
================================================================================
Test: Peak Throughput
Duration: 9.170212358s
Total Events: 10000
Events/sec: 1090.49
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 108 MB
Avg Latency: 448.058µs
P90 Latency: 597.558µs
P95 Latency: 667.141µs
P99 Latency: 920.784µs
Bottom 10% Avg Latency: 729.464µs
----------------------------------------
Test: Burst Pattern
Duration: 15.336760896s
Total Events: 10000
Events/sec: 652.03
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 123 MB
Avg Latency: 189.06µs
P90 Latency: 248.714µs
P95 Latency: 290.433µs
P99 Latency: 416.924µs
Bottom 10% Avg Latency: 324.174µs
----------------------------------------
Test: Mixed Read/Write
Duration: 44.257468151s
Total Events: 10000
Events/sec: 225.95
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 158 MB
Avg Latency: 8.745534ms
P90 Latency: 18.980294ms
P95 Latency: 20.822884ms
P99 Latency: 23.124918ms
Bottom 10% Avg Latency: 21.006886ms
----------------------------------------
Test: Query Performance
Duration: 1m0.091429487s
Total Events: 3002
Events/sec: 49.96
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 191 MB
Avg Latency: 131.632043ms
P90 Latency: 152.618309ms
P95 Latency: 175.810416ms
P99 Latency: 228.52716ms
Bottom 10% Avg Latency: 186.230874ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.111257202s
Total Events: 11308
Events/sec: 188.12
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 146 MB
Avg Latency: 16.193707ms
P90 Latency: 122.204256ms
P95 Latency: 136.70132ms
P99 Latency: 156.996779ms
Bottom 10% Avg Latency: 140.031139ms
----------------------------------------
Test: Peak Throughput
Duration: 9.102738s
Total Events: 10000
Events/sec: 1098.57
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 1441 MB
Avg Latency: 493.093µs
P90 Latency: 605.684µs
P95 Latency: 659.477µs
P99 Latency: 826.344µs
Bottom 10% Avg Latency: 1.097884ms
----------------------------------------
Test: Burst Pattern
Duration: 15.535933443s
Total Events: 10000
Events/sec: 643.67
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 130 MB
Avg Latency: 186.177µs
P90 Latency: 243.915µs
P95 Latency: 276.146µs
P99 Latency: 418.787µs
Bottom 10% Avg Latency: 309.015µs
----------------------------------------
Test: Mixed Read/Write
Duration: 1m0.032080518s
Total Events: 9550
Events/sec: 159.08
Success Rate: 95.5%
Concurrent Workers: 8
Memory Used: 115 MB
Avg Latency: 16.401942ms
P90 Latency: 37.575878ms
P95 Latency: 40.323279ms
P99 Latency: 45.453669ms
Bottom 10% Avg Latency: 41.331235ms
----------------------------------------
Test: Query Performance
Duration: 1m0.248877091s
Total Events: 913
Events/sec: 15.15
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 211 MB
Avg Latency: 436.472206ms
P90 Latency: 474.430346ms
P95 Latency: 493.12732ms
P99 Latency: 623.201275ms
Bottom 10% Avg Latency: 523.084076ms
----------------------------------------
Test: Concurrent Query/Store
Duration: 1m0.293280495s
Total Events: 10470
Events/sec: 173.65
Success Rate: 100.0%
Concurrent Workers: 8
Memory Used: 171 MB
Avg Latency: 18.084009ms
P90 Latency: 624.339µs
P95 Latency: 1.338148ms
P99 Latency: 413.21015ms
Bottom 10% Avg Latency: 177.8924ms
----------------------------------------
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
1758365779337138/tmp/benchmark_strfry_8: Lifetime L0 stalled for: 0s
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:536 /build/pkg/database/logger.go:57
1758365780726692/tmp/benchmark_strfry_8:
Level 0 [ ]: NumTables: 00. Size: 0 B of 0 B. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 64 MiB
Level 1 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 2 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 3 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 4 [ ]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 5 [B]: NumTables: 00. Size: 0 B of 10 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 2.0 MiB
Level 6 [ ]: NumTables: 04. Size: 87 MiB of 87 MiB. Score: 0.00->0.00 StaleData: 0 B Target FileSize: 4.0 MiB
Level Done
/go/pkg/mod/github.com/dgraph-io/badger/v4@v4.8.0/db.go:615 /build/pkg/database/logger.go:57
1758365780732292/tmp/benchmark_strfry_8: database closed /build/pkg/database/database.go:134
RELAY_NAME: strfry
RELAY_URL: ws://strfry:8080
TEST_TIMESTAMP: 2025-09-20T10:56:20+00:00
BENCHMARK_CONFIG:
Events: 10000
Workers: 8
Duration: 60s

634
cmd/stresstest/main.go Normal file
View File

@@ -0,0 +1,634 @@
package main
import (
"bufio"
"bytes"
"context"
"flag"
"fmt"
"math/rand"
"os"
"os/signal"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/p256k"
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/event/examples"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/encoders/timestamp"
"next.orly.dev/pkg/protocol/ws"
)
// randomHex returns a hex-encoded string of n random bytes (2n hex chars)
func randomHex(n int) string {
b := make([]byte, n)
_, _ = rand.Read(b)
return hex.Enc(b)
}
func makeEvent(rng *rand.Rand, signer *p256k.Signer) (*event.E, error) {
ev := &event.E{
CreatedAt: time.Now().Unix(),
Kind: kind.TextNote.K,
Tags: tag.NewS(),
Content: []byte(fmt.Sprintf("stresstest %d", rng.Int63())),
}
// Random number of p-tags up to 100
nPTags := rng.Intn(101) // 0..100 inclusive
for i := 0; i < nPTags; i++ {
// random 32-byte pubkey in hex (64 chars)
phex := randomHex(32)
ev.Tags.Append(tag.NewFromAny("p", phex))
}
// Sign and verify to ensure pubkey, id and signature are coherent
if err := ev.Sign(signer); err != nil {
return nil, err
}
if ok, err := ev.Verify(); err != nil || !ok {
return nil, fmt.Errorf("event signature verification failed: %v", err)
}
return ev, nil
}
type RelayConn struct {
mu sync.RWMutex
client *ws.Client
url string
}
type CacheIndex struct {
events []*event.E
ids [][]byte
authors [][]byte
times []int64
tags map[byte][][]byte // single-letter tag -> list of values
}
func (rc *RelayConn) Get() *ws.Client {
rc.mu.RLock()
defer rc.mu.RUnlock()
return rc.client
}
func (rc *RelayConn) Reconnect(ctx context.Context) error {
rc.mu.Lock()
defer rc.mu.Unlock()
if rc.client != nil {
_ = rc.client.Close()
}
c, err := ws.RelayConnect(ctx, rc.url)
if err != nil {
return err
}
rc.client = c
return nil
}
// loadCacheAndIndex parses examples.Cache (JSONL of events) and builds an index
func loadCacheAndIndex() (*CacheIndex, error) {
scanner := bufio.NewScanner(bytes.NewReader(examples.Cache))
idx := &CacheIndex{tags: make(map[byte][][]byte)}
for scanner.Scan() {
line := scanner.Bytes()
if len(bytes.TrimSpace(line)) == 0 {
continue
}
ev := event.New()
rem, err := ev.Unmarshal(line)
_ = rem
if err != nil {
// skip malformed lines
continue
}
idx.events = append(idx.events, ev)
// collect fields
if len(ev.ID) > 0 {
idx.ids = append(idx.ids, append([]byte(nil), ev.ID...))
}
if len(ev.Pubkey) > 0 {
idx.authors = append(idx.authors, append([]byte(nil), ev.Pubkey...))
}
idx.times = append(idx.times, ev.CreatedAt)
if ev.Tags != nil {
for _, tg := range *ev.Tags {
if tg == nil || tg.Len() < 2 {
continue
}
k := tg.Key()
if len(k) != 1 {
continue // only single-letter keys per requirement
}
key := k[0]
for _, v := range tg.T[1:] {
idx.tags[key] = append(
idx.tags[key], append([]byte(nil), v...),
)
}
}
}
}
return idx, nil
}
// publishCacheEvents uploads all cache events to the relay using multiple concurrent connections
func publishCacheEvents(
ctx context.Context, relayURL string, idx *CacheIndex,
) (sentCount int) {
numWorkers := runtime.NumCPU()
log.I.F("using %d concurrent connections for cache upload", numWorkers)
// Channel to distribute events to workers
eventChan := make(chan *event.E, len(idx.events))
var totalSent atomic.Int64
// Fill the event channel
for _, ev := range idx.events {
eventChan <- ev
}
close(eventChan)
// Start worker goroutines
var wg sync.WaitGroup
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go func(workerID int) {
defer wg.Done()
// Create separate connection for this worker
client, err := ws.RelayConnect(ctx, relayURL)
if err != nil {
log.E.F("worker %d: failed to connect: %v", workerID, err)
return
}
defer client.Close()
rc := &RelayConn{client: client, url: relayURL}
workerSent := 0
// Process events from the channel
for ev := range eventChan {
select {
case <-ctx.Done():
return
default:
}
// Get client connection
wsClient := rc.Get()
if wsClient == nil {
if err := rc.Reconnect(ctx); err != nil {
log.E.F("worker %d: reconnect failed: %v", workerID, err)
continue
}
wsClient = rc.Get()
}
// Send event without waiting for OK response (fire-and-forget)
envelope := eventenvelope.NewSubmissionWith(ev)
envBytes := envelope.Marshal(nil)
if err := <-wsClient.Write(envBytes); err != nil {
log.E.F("worker %d: write error: %v", workerID, err)
errStr := err.Error()
if strings.Contains(errStr, "connection closed") {
_ = rc.Reconnect(ctx)
}
time.Sleep(50 * time.Millisecond)
continue
}
workerSent++
totalSent.Add(1)
log.T.F("worker %d: sent event %d (total: %d)", workerID, workerSent, totalSent.Load())
// Small delay to prevent overwhelming the relay
select {
case <-time.After(10 * time.Millisecond):
case <-ctx.Done():
return
}
}
log.I.F("worker %d: completed, sent %d events", workerID, workerSent)
}(i)
}
// Wait for all workers to complete
wg.Wait()
return int(totalSent.Load())
}
// buildRandomFilter builds a filter combining random subsets of id, author, timestamp, and a single-letter tag value.
func buildRandomFilter(idx *CacheIndex, rng *rand.Rand, mask int) *filter.F {
// pick a random base event as anchor for fields
i := rng.Intn(len(idx.events))
ev := idx.events[i]
f := filter.New()
// clear defaults we don't set
f.Kinds = kind.NewS() // we don't constrain kinds
// include fields based on mask bits: 1=id, 2=author, 4=timestamp, 8=tag
if mask&1 != 0 {
f.Ids.T = append(f.Ids.T, append([]byte(nil), ev.ID...))
}
if mask&2 != 0 {
f.Authors.T = append(f.Authors.T, append([]byte(nil), ev.Pubkey...))
}
if mask&4 != 0 {
// use a tight window around the event timestamp (exact match)
f.Since = timestamp.FromUnix(ev.CreatedAt)
f.Until = timestamp.FromUnix(ev.CreatedAt)
}
if mask&8 != 0 {
// choose a random single-letter tag from this event if present; fallback to global index
var key byte
var val []byte
chosen := false
if ev.Tags != nil {
for _, tg := range *ev.Tags {
if tg == nil || tg.Len() < 2 {
continue
}
k := tg.Key()
if len(k) == 1 {
key = k[0]
vv := tg.T[1:]
val = vv[rng.Intn(len(vv))]
chosen = true
break
}
}
}
if !chosen && len(idx.tags) > 0 {
// pick a random entry from global tags map
keys := make([]byte, 0, len(idx.tags))
for k := range idx.tags {
keys = append(keys, k)
}
key = keys[rng.Intn(len(keys))]
vals := idx.tags[key]
val = vals[rng.Intn(len(vals))]
}
if key != 0 && len(val) > 0 {
f.Tags.Append(tag.NewFromBytesSlice([]byte{key}, val))
}
}
return f
}
func publisherWorker(
ctx context.Context, rc *RelayConn, id int, stats *uint64,
) {
// Unique RNG per worker
src := rand.NewSource(time.Now().UnixNano() ^ int64(id<<16))
rng := rand.New(src)
// Generate and reuse signing key per worker
signer := &p256k.Signer{}
if err := signer.Generate(); err != nil {
log.E.F("worker %d: signer generate error: %v", id, err)
return
}
for {
select {
case <-ctx.Done():
return
default:
}
ev, err := makeEvent(rng, signer)
if err != nil {
log.E.F("worker %d: makeEvent error: %v", id, err)
return
}
// Send event without waiting for OK response (fire-and-forget)
client := rc.Get()
if client == nil {
_ = rc.Reconnect(ctx)
continue
}
// Create EVENT envelope and send directly without waiting for OK
envelope := eventenvelope.NewSubmissionWith(ev)
envBytes := envelope.Marshal(nil)
if err := <-client.Write(envBytes); err != nil {
log.E.F("worker %d: write error: %v", id, err)
errStr := err.Error()
if strings.Contains(errStr, "connection closed") {
for attempt := 0; attempt < 5; attempt++ {
if ctx.Err() != nil {
return
}
if err := rc.Reconnect(ctx); err == nil {
log.I.F("worker %d: reconnected to %s", id, rc.url)
break
}
select {
case <-time.After(200 * time.Millisecond):
case <-ctx.Done():
return
}
}
}
// back off briefly on error to avoid tight loop if relay misbehaves
select {
case <-time.After(100 * time.Millisecond):
case <-ctx.Done():
return
}
continue
}
atomic.AddUint64(stats, 1)
// Randomly fluctuate pacing: small random sleep 0..50ms plus occasional longer jitter
sleep := time.Duration(rng.Intn(50)) * time.Millisecond
if rng.Intn(10) == 0 { // 10% chance add extra 100..400ms
sleep += time.Duration(100+rng.Intn(300)) * time.Millisecond
}
select {
case <-time.After(sleep):
case <-ctx.Done():
return
}
}
}
func queryWorker(
ctx context.Context, rc *RelayConn, idx *CacheIndex, id int,
queries, results *uint64, subTimeout time.Duration,
minInterval, maxInterval time.Duration,
) {
rng := rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(id<<24)))
mask := 1
for {
select {
case <-ctx.Done():
return
default:
}
if len(idx.events) == 0 {
time.Sleep(200 * time.Millisecond)
continue
}
f := buildRandomFilter(idx, rng, mask)
mask++
if mask > 15 { // all combinations of 4 criteria (excluding 0)
mask = 1
}
client := rc.Get()
if client == nil {
_ = rc.Reconnect(ctx)
continue
}
ff := filter.S{f}
sCtx, cancel := context.WithTimeout(ctx, subTimeout)
sub, err := client.Subscribe(
sCtx, &ff, ws.WithLabel("stresstest-query"),
)
if err != nil {
cancel()
// reconnect on connection issues
errStr := err.Error()
if strings.Contains(errStr, "connection closed") {
_ = rc.Reconnect(ctx)
}
continue
}
atomic.AddUint64(queries, 1)
// read until EOSE or timeout
innerDone := false
for !innerDone {
select {
case <-sCtx.Done():
innerDone = true
case <-sub.EndOfStoredEvents:
innerDone = true
case ev, ok := <-sub.Events:
if !ok {
innerDone = true
break
}
if ev != nil {
atomic.AddUint64(results, 1)
}
}
}
sub.Unsub()
cancel()
// wait a random interval between queries
interval := minInterval
if maxInterval > minInterval {
delta := rng.Int63n(int64(maxInterval - minInterval))
interval += time.Duration(delta)
}
select {
case <-time.After(interval):
case <-ctx.Done():
return
}
}
}
func startReader(ctx context.Context, rl *ws.Client, received *uint64) error {
// Broad filter: subscribe to text notes since now-5m to catch our own writes
f := filter.New()
f.Kinds = kind.NewS(kind.TextNote)
// We don't set authors to ensure we read all text notes coming in
ff := filter.S{f}
sub, err := rl.Subscribe(ctx, &ff, ws.WithLabel("stresstest-reader"))
if err != nil {
return err
}
go func() {
for {
select {
case <-ctx.Done():
return
case ev, ok := <-sub.Events:
if !ok {
return
}
if ev != nil {
atomic.AddUint64(received, 1)
}
}
}
}()
return nil
}
func main() {
var (
address string
port int
workers int
duration time.Duration
publishTimeout time.Duration
queryWorkers int
queryTimeout time.Duration
queryMinInt time.Duration
queryMaxInt time.Duration
skipCache bool
)
flag.StringVar(
&address, "address", "localhost", "relay address (host or IP)",
)
flag.IntVar(&port, "port", 3334, "relay port")
flag.IntVar(
&workers, "workers", 8, "number of concurrent publisher workers",
)
flag.DurationVar(
&duration, "duration", 60*time.Second,
"how long to run the stress test",
)
flag.DurationVar(
&publishTimeout, "publish-timeout", 15*time.Second,
"timeout waiting for OK per publish",
)
flag.IntVar(
&queryWorkers, "query-workers", 4, "number of concurrent query workers",
)
flag.DurationVar(
&queryTimeout, "query-timeout", 3*time.Second,
"subscription timeout for queries",
)
flag.DurationVar(
&queryMinInt, "query-min-interval", 50*time.Millisecond,
"minimum interval between queries per worker",
)
flag.DurationVar(
&queryMaxInt, "query-max-interval", 300*time.Millisecond,
"maximum interval between queries per worker",
)
flag.BoolVar(
&skipCache, "skip-cache", false,
"skip uploading examples.Cache before running",
)
flag.Parse()
relayURL := fmt.Sprintf("ws://%s:%d", address, port)
log.I.F("stresstest: connecting to %s", relayURL)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Handle Ctrl+C
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, os.Interrupt)
go func() {
select {
case <-sigc:
log.I.Ln("interrupt received, shutting down...")
cancel()
case <-ctx.Done():
}
}()
rl, err := ws.RelayConnect(ctx, relayURL)
if err != nil {
log.E.F("failed to connect to relay %s: %v", relayURL, err)
os.Exit(1)
}
defer rl.Close()
rc := &RelayConn{client: rl, url: relayURL}
// Load and publish cache events first (unless skipped)
idx, err := loadCacheAndIndex()
if err != nil {
log.E.F("failed to load examples.Cache: %v", err)
}
cacheSent := 0
if !skipCache && idx != nil && len(idx.events) > 0 {
log.I.F("sending %d events from examples.Cache...", len(idx.events))
cacheSent = publishCacheEvents(ctx, relayURL, idx)
log.I.F("sent %d/%d cache events", cacheSent, len(idx.events))
}
var pubOK uint64
var recvCount uint64
var qCount uint64
var qResults uint64
if err := startReader(ctx, rl, &recvCount); err != nil {
log.E.F("reader subscribe error: %v", err)
// continue anyway, we can still write
}
wg := sync.WaitGroup{}
// Start publisher workers
wg.Add(workers)
for i := 0; i < workers; i++ {
i := i
go func() {
defer wg.Done()
publisherWorker(ctx, rc, i, &pubOK)
}()
}
// Start query workers
if idx != nil && len(idx.events) > 0 && queryWorkers > 0 {
wg.Add(queryWorkers)
for i := 0; i < queryWorkers; i++ {
i := i
go func() {
defer wg.Done()
queryWorker(
ctx, rc, idx, i, &qCount, &qResults, queryTimeout,
queryMinInt, queryMaxInt,
)
}()
}
}
// Timer for duration and periodic stats
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
end := time.NewTimer(duration)
start := time.Now()
loop:
for {
select {
case <-ticker.C:
elapsed := time.Since(start).Seconds()
p := atomic.LoadUint64(&pubOK)
r := atomic.LoadUint64(&recvCount)
qc := atomic.LoadUint64(&qCount)
qr := atomic.LoadUint64(&qResults)
log.I.F(
"elapsed=%.1fs sent=%d (%.0f/s) received=%d cache_sent=%d queries=%d results=%d",
elapsed, p, float64(p)/elapsed, r, cacheSent, qc, qr,
)
case <-end.C:
break loop
case <-ctx.Done():
break loop
}
}
cancel()
wg.Wait()
p := atomic.LoadUint64(&pubOK)
r := atomic.LoadUint64(&recvCount)
qc := atomic.LoadUint64(&qCount)
qr := atomic.LoadUint64(&qResults)
log.I.F(
"stresstest complete: cache_sent=%d sent=%d received=%d queries=%d results=%d duration=%s",
cacheSent, p, r, qc, qr,
time.Since(start).Truncate(time.Millisecond),
)
}

116
debug-websocket.sh Executable file
View File

@@ -0,0 +1,116 @@
#!/bin/bash
# WebSocket Debug Script for Stella's Orly Relay
echo "🔍 Debugging WebSocket Connection for orly-relay.imwald.eu"
echo "=================================================="
echo ""
echo "📋 Step 1: Check if relay container is running"
echo "----------------------------------------------"
docker ps | grep -E "(stella|relay|orly)" || echo "❌ No relay containers found"
echo ""
echo "📋 Step 2: Test local relay connection"
echo "--------------------------------------"
if curl -s -I http://127.0.0.1:7777 | grep -q "426"; then
echo "✅ Local relay responding correctly (HTTP 426)"
else
echo "❌ Local relay not responding correctly"
curl -I http://127.0.0.1:7777
fi
echo ""
echo "📋 Step 3: Check Apache modules"
echo "------------------------------"
if apache2ctl -M 2>/dev/null | grep -q "proxy_wstunnel"; then
echo "✅ proxy_wstunnel module enabled"
else
echo "❌ proxy_wstunnel module NOT enabled"
echo "Run: sudo a2enmod proxy_wstunnel"
fi
if apache2ctl -M 2>/dev/null | grep -q "rewrite"; then
echo "✅ rewrite module enabled"
else
echo "❌ rewrite module NOT enabled"
echo "Run: sudo a2enmod rewrite"
fi
echo ""
echo "📋 Step 4: Check Plesk Apache configuration"
echo "------------------------------------------"
if [ -f "/etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf" ]; then
echo "✅ Plesk config file exists"
echo "Current proxy configuration:"
grep -E "(Proxy|Rewrite|proxy|rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf || echo "❌ No proxy/rewrite rules found"
else
echo "❌ Plesk config file not found"
fi
echo ""
echo "📋 Step 5: Test WebSocket connections"
echo "------------------------------------"
# Test with curl first (simpler)
echo "Testing HTTP upgrade request to local relay..."
if curl -s -I -H "Connection: Upgrade" -H "Upgrade: websocket" http://127.0.0.1:7777 | grep -q "426\|101"; then
echo "✅ Local relay accepts upgrade requests"
else
echo "❌ Local relay doesn't accept upgrade requests"
fi
echo "Testing HTTP upgrade request to remote relay..."
if curl -s -I -H "Connection: Upgrade" -H "Upgrade: websocket" https://orly-relay.imwald.eu | grep -q "426\|101"; then
echo "✅ Remote relay accepts upgrade requests"
else
echo "❌ Remote relay doesn't accept upgrade requests"
echo "This indicates Apache proxy issue"
fi
# Try to install websocat if not available
if ! command -v websocat >/dev/null 2>&1; then
echo ""
echo "📥 Installing websocat for proper WebSocket testing..."
if wget -q https://github.com/vi/websocat/releases/download/v1.12.0/websocat.x86_64-unknown-linux-musl -O websocat 2>/dev/null; then
chmod +x websocat
echo "✅ websocat installed"
else
echo "❌ Could not install websocat (no internet or wget issue)"
echo "Manual install: wget https://github.com/vi/websocat/releases/download/v1.12.0/websocat.x86_64-unknown-linux-musl -O websocat && chmod +x websocat"
fi
fi
# Test with websocat if available
if command -v ./websocat >/dev/null 2>&1; then
echo ""
echo "Testing actual WebSocket connection..."
echo "Local WebSocket test:"
timeout 3 bash -c 'echo "[\"REQ\",\"test\",{}]" | ./websocat ws://127.0.0.1:7777/' 2>/dev/null || echo "❌ Local WebSocket failed"
echo "Remote WebSocket test (ignoring SSL):"
timeout 3 bash -c 'echo "[\"REQ\",\"test\",{}]" | ./websocat --insecure wss://orly-relay.imwald.eu/' 2>/dev/null || echo "❌ Remote WebSocket failed"
fi
echo ""
echo "📋 Step 6: Check ports and connections"
echo "------------------------------------"
echo "Ports listening on 7777:"
netstat -tlnp 2>/dev/null | grep :7777 || ss -tlnp 2>/dev/null | grep :7777 || echo "❌ No process listening on port 7777"
echo ""
echo "📋 Step 7: Test SSL certificate"
echo "------------------------------"
echo "Certificate issuer:"
echo | openssl s_client -connect orly-relay.imwald.eu:443 -servername orly-relay.imwald.eu 2>/dev/null | openssl x509 -noout -issuer 2>/dev/null || echo "❌ SSL test failed"
echo ""
echo "🎯 RECOMMENDED NEXT STEPS:"
echo "========================="
echo "1. If proxy_wstunnel is missing: sudo a2enmod proxy_wstunnel && sudo systemctl restart apache2"
echo "2. If no proxy rules found: Add configuration in Plesk Apache & nginx Settings"
echo "3. If local WebSocket fails: Check if relay container is actually running"
echo "4. If remote WebSocket fails but local works: Apache proxy configuration issue"
echo ""
echo "🔧 Try this simple Plesk configuration:"
echo "ProxyPass / http://127.0.0.1:7777/"
echo "ProxyPassReverse / http://127.0.0.1:7777/"

93
docker-compose.yml Normal file
View File

@@ -0,0 +1,93 @@
# Docker Compose for Stella's Nostr Relay
# Owner: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
version: '3.8'
services:
stella-relay:
image: silberengel/orly-relay:latest
container_name: stella-nostr-relay
restart: unless-stopped
ports:
- "127.0.0.1:7777:7777"
volumes:
- relay_data:/data
- ./profiles:/profiles:ro
environment:
# Relay Configuration
- ORLY_DATA_DIR=/data
- ORLY_LISTEN=0.0.0.0
- ORLY_PORT=7777
- ORLY_LOG_LEVEL=info
- ORLY_MAX_CONNECTIONS=1000
- ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
- ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
# Performance Settings (based on v0.4.8 optimizations)
- ORLY_CONCURRENT_WORKERS=0 # 0 = auto-detect CPU cores
- ORLY_BATCH_SIZE=1000
- ORLY_CACHE_SIZE=10000
# Database Settings
- BADGER_LOG_LEVEL=ERROR
- BADGER_SYNC_WRITES=false # Better performance, slightly less durability
# Security Settings
- ORLY_REQUIRE_AUTH=false
- ORLY_MAX_EVENT_SIZE=65536
- ORLY_MAX_SUBSCRIPTIONS=20
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:7777"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# Resource limits
deploy:
resources:
limits:
memory: 1G
cpus: '1.0'
reservations:
memory: 256M
cpus: '0.25'
# Logging configuration
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
# Optional: Nginx reverse proxy for SSL/domain setup
nginx:
image: nginx:alpine
container_name: stella-nginx
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/ssl:/etc/nginx/ssl:ro
- nginx_logs:/var/log/nginx
depends_on:
- stella-relay
profiles:
- proxy # Only start with: docker-compose --profile proxy up
volumes:
relay_data:
driver: local
driver_opts:
type: none
o: bind
device: ./data
nginx_logs:
driver: local
networks:
default:
name: stella-relay-network

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

After

Width:  |  Height:  |  Size: 485 KiB

View File

@@ -0,0 +1,259 @@
# WebSocket REQ Handling Comparison: Khatru vs Next.orly.dev
## Overview
This document compares how two Nostr relay implementations handle WebSocket connections and REQ (subscription) messages:
1. **Khatru** - A popular Go-based Nostr relay library by fiatjaf
2. **Next.orly.dev** - A custom relay implementation with advanced features
## Architecture Comparison
### Khatru Architecture
- **Monolithic approach**: Single large `HandleWebsocket` method (~380 lines) processes all message types
- **Inline processing**: REQ handling is embedded within the main websocket handler
- **Hook-based extensibility**: Uses function slices for customizable behavior
- **Simple structure**: WebSocket struct with basic fields and mutex for thread safety
### Next.orly.dev Architecture
- **Modular approach**: Separate methods for each message type (`HandleReq`, `HandleEvent`, etc.)
- **Layered processing**: Message identification → envelope parsing → type-specific handling
- **Publisher-subscriber system**: Dedicated infrastructure for subscription management
- **Rich context**: Listener struct with detailed state tracking and metrics
## Connection Establishment
### Khatru
```go
// Simple websocket upgrade
conn, err := rl.upgrader.Upgrade(w, r, nil)
ws := &WebSocket{
conn: conn,
Request: r,
Challenge: hex.EncodeToString(challenge),
negentropySessions: xsync.NewMapOf[string, *NegentropySession](),
}
```
### Next.orly.dev
```go
// More sophisticated setup with IP whitelisting
conn, err = websocket.Accept(w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}})
listener := &Listener{
ctx: ctx,
Server: s,
conn: conn,
remote: remote,
req: r,
}
// Immediate AUTH challenge if ACLs are configured
```
**Key Differences:**
- Next.orly.dev includes IP whitelisting and immediate authentication challenges
- Khatru uses fasthttp/websocket library vs next.orly.dev using coder/websocket
- Next.orly.dev has more detailed connection state tracking
## Message Processing
### Khatru
- Uses `nostr.MessageParser` for sequential parsing
- Switch statement on envelope type within goroutine
- Direct processing without intermediate validation layers
### Next.orly.dev
- Custom envelope identification system (`envelopes.Identify`)
- Separate validation and processing phases
- Extensive logging and error handling at each step
## REQ Message Handling
### Khatru REQ Processing
```go
case *nostr.ReqEnvelope:
eose := sync.WaitGroup{}
eose.Add(len(env.Filters))
// Handle each filter separately
for _, filter := range env.Filters {
err := srl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter)
if err != nil {
// Fail everything if any filter is rejected
ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason})
return
} else {
rl.addListener(ws, env.SubscriptionID, srl, filter, cancelReqCtx)
}
}
go func() {
eose.Wait()
ws.WriteJSON(nostr.EOSEEnvelope(env.SubscriptionID))
}()
```
### Next.orly.dev REQ Processing
```go
// Comprehensive ACL and authentication checks first
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
switch accessLevel {
case "none":
return // Send auth-required response
}
// Process all filters and collect events
for _, f := range *env.Filters {
filterEvents, err = l.QueryEvents(queryCtx, f)
allEvents = append(allEvents, filterEvents...)
}
// Apply privacy and privilege checks
// Send all historical events
// Set up ongoing subscription only if needed
```
## Key Architectural Differences
### 1. **Filter Processing Strategy**
**Khatru:**
- Processes each filter independently and concurrently
- Uses WaitGroup to coordinate EOSE across all filters
- Immediately sets up listeners for ongoing subscriptions
- Fails entire subscription if any filter is rejected
**Next.orly.dev:**
- Processes all filters sequentially in a single context
- Collects all events before applying access control
- Only sets up subscriptions for filters that need ongoing updates
- Gracefully handles individual filter failures
### 2. **Access Control Integration**
**Khatru:**
- Basic NIP-42 authentication support
- Hook-based authorization via `RejectFilter` functions
- Limited built-in access control features
**Next.orly.dev:**
- Comprehensive ACL system with multiple access levels
- Built-in support for private events with npub authorization
- Privileged event filtering based on pubkey and p-tags
- Granular permission checking at multiple stages
### 3. **Subscription Management**
**Khatru:**
```go
// Simple listener registration
type listenerSpec struct {
filter nostr.Filter
cancel context.CancelCauseFunc
subRelay *Relay
}
rl.addListener(ws, subscriptionID, relay, filter, cancel)
```
**Next.orly.dev:**
```go
// Publisher-subscriber system with rich metadata
type W struct {
Conn *websocket.Conn
remote string
Id string
Receiver event.C
Filters *filter.S
AuthedPubkey []byte
}
l.publishers.Receive(&W{...})
```
### 4. **Performance Optimizations**
**Khatru:**
- Concurrent filter processing
- Immediate streaming of events as they're found
- Memory-efficient with direct event streaming
**Next.orly.dev:**
- Batch processing with deduplication
- Memory management with explicit `ev.Free()` calls
- Smart subscription cancellation for ID-only queries
- Event result caching and seen-tracking
### 5. **Error Handling & Observability**
**Khatru:**
- Basic error logging
- Simple connection state management
- Limited metrics and observability
**Next.orly.dev:**
- Comprehensive error handling with context preservation
- Detailed logging at each processing stage
- Built-in metrics (message count, REQ count, event count)
- Graceful degradation on individual component failures
## Memory Management
### Khatru
- Relies on Go's garbage collector
- Simple WebSocket struct with minimal state
- Uses sync.Map for thread-safe operations
### Next.orly.dev
- Explicit memory management with `ev.Free()` calls
- Resource pooling and reuse patterns
- Detailed tracking of connection resources
## Concurrency Models
### Khatru
- Per-connection goroutine for message reading
- Additional goroutines for each message processing
- WaitGroup coordination for multi-filter EOSE
### Next.orly.dev
- Per-connection goroutine with single-threaded message processing
- Publisher-subscriber system handles concurrent event distribution
- Context-based cancellation throughout
## Trade-offs Analysis
### Khatru Advantages
- **Simplicity**: Easier to understand and modify
- **Performance**: Lower latency due to concurrent processing
- **Flexibility**: Hook-based architecture allows extensive customization
- **Streaming**: Events sent as soon as they're found
### Khatru Disadvantages
- **Monolithic**: Large methods harder to maintain
- **Limited ACL**: Basic authentication and authorization
- **Error handling**: Less graceful failure recovery
- **Resource usage**: No explicit memory management
### Next.orly.dev Advantages
- **Security**: Comprehensive ACL and privacy features
- **Observability**: Extensive logging and metrics
- **Resource management**: Explicit memory and connection lifecycle management
- **Modularity**: Easier to test and extend individual components
- **Robustness**: Graceful handling of edge cases and failures
### Next.orly.dev Disadvantages
- **Complexity**: Higher cognitive overhead and learning curve
- **Latency**: Sequential processing may be slower for some use cases
- **Resource overhead**: More memory usage due to batching and state tracking
- **Coupling**: Tighter integration between components
## Conclusion
Both implementations represent different philosophies:
- **Khatru** prioritizes simplicity, performance, and extensibility through a hook-based architecture
- **Next.orly.dev** prioritizes security, observability, and robustness through comprehensive built-in features
The choice between them depends on specific requirements:
- Choose **Khatru** for high-performance relays with custom business logic
- Choose **Next.orly.dev** for production relays requiring comprehensive access control and monitoring
Both approaches demonstrate mature understanding of Nostr protocol requirements while making different trade-offs in complexity vs. features.

6
go.mod
View File

@@ -15,11 +15,12 @@ require (
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
go-simpler.org/env v0.12.0
go.uber.org/atomic v1.11.0
golang.org/x/crypto v0.41.0
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
golang.org/x/net v0.43.0
honnef.co/go/tools v0.6.1
lol.mleku.dev v1.0.2
lol.mleku.dev v1.0.3
lukechampine.com/frand v1.5.1
)
@@ -28,15 +29,12 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/felixge/fgprof v0.9.3 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/flatbuffers v25.2.10+incompatible // indirect
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/templexxx/cpu v0.0.1 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect

13
go.sum
View File

@@ -20,8 +20,6 @@ github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa5
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
@@ -46,10 +44,6 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@@ -82,6 +76,8 @@ go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
@@ -101,7 +97,6 @@ golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -121,7 +116,7 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
lol.mleku.dev v1.0.2 h1:bSV1hHnkmt1hq+9nSvRwN6wgcI7itbM3XRZ4dMB438c=
lol.mleku.dev v1.0.2/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
lol.mleku.dev v1.0.3 h1:IrqLd/wFRghu6MX7mgyKh//3VQiId2AM4RdCbFqSLnY=
lol.mleku.dev v1.0.3/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=

213
main.go
View File

@@ -4,7 +4,9 @@ import (
"context"
"fmt"
"net/http"
pp "net/http/pprof"
"os"
"os/exec"
"os/signal"
"runtime"
"time"
@@ -15,27 +17,157 @@ import (
"next.orly.dev/app"
"next.orly.dev/app/config"
"next.orly.dev/pkg/acl"
"next.orly.dev/pkg/crypto/keys"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/spider"
"next.orly.dev/pkg/version"
)
// openBrowser attempts to open the specified URL in the default browser.
// It supports multiple platforms including Linux, macOS, and Windows.
func openBrowser(url string) {
var err error
switch runtime.GOOS {
case "linux":
err = exec.Command("xdg-open", url).Start()
case "windows":
err = exec.Command(
"rundll32", "url.dll,FileProtocolHandler", url,
).Start()
case "darwin":
err = exec.Command("open", url).Start()
default:
log.W.F("unsupported platform for opening browser: %s", runtime.GOOS)
return
}
if err != nil {
log.E.F("failed to open browser: %v", err)
} else {
log.I.F("opened browser to %s", url)
}
}
func main() {
runtime.GOMAXPROCS(runtime.NumCPU() * 4)
var err error
var cfg *config.C
if cfg, err = config.New(); chk.T(err) {
if cfg, err = config.New(); chk.T(err) {
}
log.I.F("starting %s %s", cfg.AppName, version.V)
// Handle 'identity' subcommand: print relay identity secret and pubkey and exit
if config.IdentityRequested() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
var db *database.D
if db, err = database.New(ctx, cancel, cfg.DataDir, cfg.DBLogLevel); chk.E(err) {
os.Exit(1)
}
defer db.Close()
skb, err := db.GetOrCreateRelayIdentitySecret()
if chk.E(err) {
os.Exit(1)
}
pk, err := keys.SecretBytesToPubKeyHex(skb)
if chk.E(err) {
os.Exit(1)
}
fmt.Printf("identity secret: %s\nidentity pubkey: %s\n", hex.Enc(skb), pk)
os.Exit(0)
}
// If OpenPprofWeb is true and profiling is enabled, we need to ensure HTTP profiling is also enabled
if cfg.OpenPprofWeb && cfg.Pprof != "" && !cfg.PprofHTTP {
log.I.F("enabling HTTP pprof server to support web viewer")
cfg.PprofHTTP = true
}
log.I.F("starting %s %s", cfg.AppName, version.V)
switch cfg.Pprof {
case "cpu":
prof := profile.Start(profile.CPUProfile)
defer prof.Stop()
if cfg.PprofPath != "" {
prof := profile.Start(
profile.CPUProfile, profile.ProfilePath(cfg.PprofPath),
)
defer prof.Stop()
} else {
prof := profile.Start(profile.CPUProfile)
defer prof.Stop()
}
case "memory":
prof := profile.Start(profile.MemProfile)
defer prof.Stop()
if cfg.PprofPath != "" {
prof := profile.Start(
profile.MemProfile, profile.MemProfileRate(32),
profile.ProfilePath(cfg.PprofPath),
)
defer prof.Stop()
} else {
prof := profile.Start(profile.MemProfile)
defer prof.Stop()
}
case "allocation":
prof := profile.Start(profile.MemProfileAllocs)
defer prof.Stop()
if cfg.PprofPath != "" {
prof := profile.Start(
profile.MemProfileAllocs, profile.MemProfileRate(32),
profile.ProfilePath(cfg.PprofPath),
)
defer prof.Stop()
} else {
prof := profile.Start(profile.MemProfileAllocs)
defer prof.Stop()
}
case "heap":
if cfg.PprofPath != "" {
prof := profile.Start(
profile.MemProfileHeap, profile.ProfilePath(cfg.PprofPath),
)
defer prof.Stop()
} else {
prof := profile.Start(profile.MemProfileHeap)
defer prof.Stop()
}
case "mutex":
if cfg.PprofPath != "" {
prof := profile.Start(
profile.MutexProfile, profile.ProfilePath(cfg.PprofPath),
)
defer prof.Stop()
} else {
prof := profile.Start(profile.MutexProfile)
defer prof.Stop()
}
case "threadcreate":
if cfg.PprofPath != "" {
prof := profile.Start(
profile.ThreadcreationProfile,
profile.ProfilePath(cfg.PprofPath),
)
defer prof.Stop()
} else {
prof := profile.Start(profile.ThreadcreationProfile)
defer prof.Stop()
}
case "goroutine":
if cfg.PprofPath != "" {
prof := profile.Start(
profile.GoroutineProfile, profile.ProfilePath(cfg.PprofPath),
)
defer prof.Stop()
} else {
prof := profile.Start(profile.GoroutineProfile)
defer prof.Stop()
}
case "block":
if cfg.PprofPath != "" {
prof := profile.Start(
profile.BlockProfile, profile.ProfilePath(cfg.PprofPath),
)
defer prof.Stop()
} else {
prof := profile.Start(profile.BlockProfile)
defer prof.Stop()
}
}
ctx, cancel := context.WithCancel(context.Background())
var db *database.D
@@ -50,6 +182,53 @@ func main() {
}
acl.Registry.Syncer()
// Initialize and start spider functionality if enabled
spiderCtx, spiderCancel := context.WithCancel(ctx)
spiderInstance := spider.New(db, cfg, spiderCtx, spiderCancel)
spiderInstance.Start()
defer spiderInstance.Stop()
// Start HTTP pprof server if enabled
if cfg.PprofHTTP {
pprofAddr := fmt.Sprintf("%s:%d", cfg.Listen, 6060)
pprofMux := http.NewServeMux()
pprofMux.HandleFunc("/debug/pprof/", pp.Index)
pprofMux.HandleFunc("/debug/pprof/cmdline", pp.Cmdline)
pprofMux.HandleFunc("/debug/pprof/profile", pp.Profile)
pprofMux.HandleFunc("/debug/pprof/symbol", pp.Symbol)
pprofMux.HandleFunc("/debug/pprof/trace", pp.Trace)
for _, p := range []string{
"allocs", "block", "goroutine", "heap", "mutex", "threadcreate",
} {
pprofMux.Handle("/debug/pprof/"+p, pp.Handler(p))
}
ppSrv := &http.Server{Addr: pprofAddr, Handler: pprofMux}
go func() {
log.I.F("pprof server listening on %s", pprofAddr)
if err := ppSrv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.E.F("pprof server error: %v", err)
}
}()
go func() {
<-ctx.Done()
shutdownCtx, cancelShutdown := context.WithTimeout(
context.Background(), 2*time.Second,
)
defer cancelShutdown()
_ = ppSrv.Shutdown(shutdownCtx)
}()
// Open the pprof web viewer if enabled
if cfg.OpenPprofWeb && cfg.Pprof != "" {
pprofURL := fmt.Sprintf("http://localhost:6060/debug/pprof/")
go func() {
// Wait a moment for the server to start
time.Sleep(500 * time.Millisecond)
openBrowser(pprofURL)
}()
}
}
// Start health check HTTP server if configured
var healthSrv *http.Server
if cfg.HealthPort > 0 {
@@ -61,6 +240,20 @@ func main() {
log.I.F("health check ok")
},
)
// Optional shutdown endpoint to gracefully stop the process so profiling defers run
if cfg.EnableShutdown {
mux.HandleFunc(
"/shutdown", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("shutting down"))
log.I.F("shutdown requested via /shutdown; sending SIGINT to self")
go func() {
p, _ := os.FindProcess(os.Getpid())
_ = p.Signal(os.Interrupt)
}()
},
)
}
healthSrv = &http.Server{
Addr: fmt.Sprintf(
"%s:%d", cfg.Listen, cfg.HealthPort,
@@ -91,12 +284,14 @@ func main() {
fmt.Printf("\r")
cancel()
chk.E(db.Close())
log.I.F("exiting")
return
case <-quit:
cancel()
chk.E(db.Close())
log.I.F("exiting")
return
}
}
log.I.F("exiting")
}

89
manage-relay.sh Executable file
View File

@@ -0,0 +1,89 @@
#!/bin/bash
# Stella's Orly Relay Management Script
set -e
RELAY_SERVICE="stella-relay"
RELAY_URL="ws://127.0.0.1:7777"
case "${1:-}" in
"start")
echo "🚀 Starting Stella's Orly Relay..."
sudo systemctl start $RELAY_SERVICE
echo "✅ Relay started!"
;;
"stop")
echo "⏹️ Stopping Stella's Orly Relay..."
sudo systemctl stop $RELAY_SERVICE
echo "✅ Relay stopped!"
;;
"restart")
echo "🔄 Restarting Stella's Orly Relay..."
sudo systemctl restart $RELAY_SERVICE
echo "✅ Relay restarted!"
;;
"status")
echo "📊 Stella's Orly Relay Status:"
sudo systemctl status $RELAY_SERVICE --no-pager
;;
"logs")
echo "📜 Stella's Orly Relay Logs:"
sudo journalctl -u $RELAY_SERVICE -f --no-pager
;;
"test")
echo "🧪 Testing relay connection..."
if curl -s -I http://127.0.0.1:7777 | grep -q "426 Upgrade Required"; then
echo "✅ Relay is responding correctly!"
echo "📡 WebSocket URL: $RELAY_URL"
else
echo "❌ Relay is not responding correctly"
exit 1
fi
;;
"enable")
echo "🔧 Enabling relay to start at boot..."
sudo systemctl enable $RELAY_SERVICE
echo "✅ Relay will start automatically at boot!"
;;
"disable")
echo "🔧 Disabling relay auto-start..."
sudo systemctl disable $RELAY_SERVICE
echo "✅ Relay will not start automatically at boot!"
;;
"info")
echo "📋 Stella's Orly Relay Information:"
echo " Service: $RELAY_SERVICE"
echo " WebSocket URL: $RELAY_URL"
echo " HTTP URL: http://127.0.0.1:7777"
echo " Data Directory: /home/madmin/.local/share/orly-relay"
echo " Config Directory: $(pwd)"
echo ""
echo "🔑 Admin NPubs:"
echo " Stella: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx"
echo " Admin2: npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z"
;;
*)
echo "🌲 Stella's Orly Relay Management Script"
echo ""
echo "Usage: $0 [COMMAND]"
echo ""
echo "Commands:"
echo " start Start the relay"
echo " stop Stop the relay"
echo " restart Restart the relay"
echo " status Show relay status"
echo " logs Show relay logs (follow mode)"
echo " test Test relay connection"
echo " enable Enable auto-start at boot"
echo " disable Disable auto-start at boot"
echo " info Show relay information"
echo ""
echo "Examples:"
echo " $0 start # Start the relay"
echo " $0 status # Check if it's running"
echo " $0 test # Test WebSocket connection"
echo " $0 logs # Watch real-time logs"
echo ""
echo "🌲 Crafted in the digital forest by Stella ✨"
;;
esac

View File

@@ -66,3 +66,15 @@ func (s *S) Type() (typ string) {
}
return
}
// AddFollow forwards a pubkey to the active ACL if it supports dynamic follows
func (s *S) AddFollow(pub []byte) {
for _, i := range s.ACL {
if i.Type() == s.Active.Load() {
if f, ok := i.(*Follows); ok {
f.AddFollow(pub)
}
break
}
}
}

View File

@@ -1,6 +1,7 @@
package acl
import (
"bytes"
"context"
"reflect"
"strings"
@@ -208,6 +209,7 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
return
}
urls := f.adminRelays()
log.I.S(urls)
if len(urls) == 0 {
log.W.F("follows syncer: no admin relays found in DB (kind 10002)")
return
@@ -359,6 +361,42 @@ func (f *Follows) Syncer() {
f.updated <- struct{}{}
}
// GetFollowedPubkeys returns a copy of the followed pubkeys list
func (f *Follows) GetFollowedPubkeys() [][]byte {
f.followsMx.RLock()
defer f.followsMx.RUnlock()
followedPubkeys := make([][]byte, len(f.follows))
copy(followedPubkeys, f.follows)
return followedPubkeys
}
// AddFollow appends a pubkey to the in-memory follows list if not already present
// and signals the syncer to refresh subscriptions.
func (f *Follows) AddFollow(pub []byte) {
if len(pub) == 0 {
return
}
f.followsMx.Lock()
defer f.followsMx.Unlock()
for _, p := range f.follows {
if bytes.Equal(p, pub) {
return
}
}
b := make([]byte, len(pub))
copy(b, pub)
f.follows = append(f.follows, b)
// notify syncer if initialized
if f.updated != nil {
select {
case f.updated <- struct{}{}:
default:
// if channel is full or not yet listened to, ignore
}
}
}
func init() {
log.T.F("registering follows ACL")
Registry.Register(new(Follows))

View File

@@ -0,0 +1 @@
Code copied from https://github.com/paulmillr/nip44/tree/e7aed61aaf77240ac10c325683eed14b22e7950f/go.

View File

@@ -0,0 +1,3 @@
// Package encryption contains the message encryption schemes defined in NIP-04
// and NIP-44, used for encrypting the content of nostr messages.
package encryption

View File

@@ -0,0 +1,88 @@
package encryption
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lukechampine.com/frand"
)
// EncryptNip4 encrypts message with key using aes-256-cbc. key should be the shared secret generated by
// ComputeSharedSecret.
//
// Returns: base64(encrypted_bytes) + "?iv=" + base64(initialization_vector).
func EncryptNip4(msg, key []byte) (ct []byte, err error) {
// block size is 16 bytes
iv := make([]byte, 16)
if _, err = frand.Read(iv); chk.E(err) {
err = errorf.E("error creating initialization vector: %w", err)
return
}
// automatically picks aes-256 based on key length (32 bytes)
var block cipher.Block
if block, err = aes.NewCipher(key); chk.E(err) {
err = errorf.E("error creating block cipher: %w", err)
return
}
mode := cipher.NewCBCEncrypter(block, iv)
plaintext := []byte(msg)
// add padding
base := len(plaintext)
// this will be a number between 1 and 16 (inclusive), never 0
bs := block.BlockSize()
padding := bs - base%bs
// encode the padding in all the padding bytes themselves
padText := bytes.Repeat([]byte{byte(padding)}, padding)
paddedMsgBytes := append(plaintext, padText...)
ciphertext := make([]byte, len(paddedMsgBytes))
mode.CryptBlocks(ciphertext, paddedMsgBytes)
return []byte(base64.StdEncoding.EncodeToString(ciphertext) + "?iv=" +
base64.StdEncoding.EncodeToString(iv)), nil
}
// DecryptNip4 decrypts a content string using the shared secret key. The inverse operation to message ->
// EncryptNip4(message, key).
func DecryptNip4(content, key []byte) (msg []byte, err error) {
parts := bytes.Split(content, []byte("?iv="))
if len(parts) < 2 {
return nil, errorf.E(
"error parsing encrypted message: no initialization vector",
)
}
ciphertext := make([]byte, base64.StdEncoding.EncodedLen(len(parts[0])))
if _, err = base64.StdEncoding.Decode(ciphertext, parts[0]); chk.E(err) {
err = errorf.E("error decoding ciphertext from base64: %w", err)
return
}
iv := make([]byte, base64.StdEncoding.EncodedLen(len(parts[1])))
if _, err = base64.StdEncoding.Decode(iv, parts[1]); chk.E(err) {
err = errorf.E("error decoding iv from base64: %w", err)
return
}
var block cipher.Block
if block, err = aes.NewCipher(key); chk.E(err) {
err = errorf.E("error creating block cipher: %w", err)
return
}
mode := cipher.NewCBCDecrypter(block, iv)
msg = make([]byte, len(ciphertext))
mode.CryptBlocks(msg, ciphertext)
// remove padding
var (
plaintextLen = len(msg)
)
if plaintextLen > 0 {
// the padding amount is encoded in the padding bytes themselves
padding := int(msg[plaintextLen-1])
if padding > plaintextLen {
err = errorf.E("invalid padding amount: %d", padding)
return
}
msg = msg[0 : plaintextLen-padding]
}
return msg, nil
}

View File

@@ -0,0 +1,260 @@
package encryption
import (
"crypto/hmac"
"crypto/rand"
"encoding/base64"
"encoding/binary"
"io"
"math"
"golang.org/x/crypto/chacha20"
"golang.org/x/crypto/hkdf"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"next.orly.dev/pkg/crypto/p256k"
"next.orly.dev/pkg/crypto/sha256"
"next.orly.dev/pkg/interfaces/signer"
"next.orly.dev/pkg/utils"
)
const (
version byte = 2
MinPlaintextSize = 0x0001 // 1b msg => padded to 32b
MaxPlaintextSize = 0xffff // 65535 (64kb-1) => padded to 64kb
)
type Opts struct {
err error
nonce []byte
}
// Deprecated: use WithCustomNonce instead of WithCustomSalt, so the naming is less confusing
var WithCustomSalt = WithCustomNonce
// WithCustomNonce enables using a custom nonce (salt) instead of using the
// system crypto/rand entropy source.
func WithCustomNonce(salt []byte) func(opts *Opts) {
return func(opts *Opts) {
if len(salt) != 32 {
opts.err = errorf.E("salt must be 32 bytes, got %d", len(salt))
}
opts.nonce = salt
}
}
// Encrypt data using a provided symmetric conversation key using NIP-44
// encryption (chacha20 cipher stream and sha256 HMAC).
func Encrypt(
plaintext, conversationKey []byte, applyOptions ...func(opts *Opts),
) (
cipherString []byte, err error,
) {
var o Opts
for _, apply := range applyOptions {
apply(&o)
}
if chk.E(o.err) {
err = o.err
return
}
if o.nonce == nil {
o.nonce = make([]byte, 32)
if _, err = rand.Read(o.nonce); chk.E(err) {
return
}
}
var enc, cc20nonce, auth []byte
if enc, cc20nonce, auth, err = getKeys(
conversationKey, o.nonce,
); chk.E(err) {
return
}
plain := plaintext
size := len(plain)
if size < MinPlaintextSize || size > MaxPlaintextSize {
err = errorf.E("plaintext should be between 1b and 64kB")
return
}
padding := CalcPadding(size)
padded := make([]byte, 2+padding)
binary.BigEndian.PutUint16(padded, uint16(size))
copy(padded[2:], plain)
var cipher []byte
if cipher, err = encrypt(enc, cc20nonce, padded); chk.E(err) {
return
}
var mac []byte
if mac, err = sha256Hmac(auth, cipher, o.nonce); chk.E(err) {
return
}
ct := make([]byte, 0, 1+32+len(cipher)+32)
ct = append(ct, version)
ct = append(ct, o.nonce...)
ct = append(ct, cipher...)
ct = append(ct, mac...)
cipherString = make([]byte, base64.StdEncoding.EncodedLen(len(ct)))
base64.StdEncoding.Encode(cipherString, ct)
return
}
// Decrypt data that has been encoded using a provided symmetric conversation
// key using NIP-44 encryption (chacha20 cipher stream and sha256 HMAC).
func Decrypt(b64ciphertextWrapped, conversationKey []byte) (
plaintext []byte,
err error,
) {
cLen := len(b64ciphertextWrapped)
if cLen < 132 || cLen > 87472 {
err = errorf.E("invalid payload length: %d", cLen)
return
}
if len(b64ciphertextWrapped) > 0 && b64ciphertextWrapped[0] == '#' {
err = errorf.E("unknown version")
return
}
var decoded []byte
if decoded, err = base64.StdEncoding.DecodeString(string(b64ciphertextWrapped)); chk.E(err) {
return
}
if decoded[0] != version {
err = errorf.E("unknown version %d", decoded[0])
return
}
dLen := len(decoded)
if dLen < 99 || dLen > 65603 {
err = errorf.E("invalid data length: %d", dLen)
return
}
nonce, ciphertext, givenMac := decoded[1:33], decoded[33:dLen-32], decoded[dLen-32:]
var enc, cc20nonce, auth []byte
if enc, cc20nonce, auth, err = getKeys(conversationKey, nonce); chk.E(err) {
return
}
var expectedMac []byte
if expectedMac, err = sha256Hmac(auth, ciphertext, nonce); chk.E(err) {
return
}
if !utils.FastEqual(givenMac, expectedMac) {
err = errorf.E("invalid hmac")
return
}
var padded []byte
if padded, err = encrypt(enc, cc20nonce, ciphertext); chk.E(err) {
return
}
unpaddedLen := binary.BigEndian.Uint16(padded[0:2])
if unpaddedLen < uint16(MinPlaintextSize) || unpaddedLen > uint16(MaxPlaintextSize) ||
len(padded) != 2+CalcPadding(int(unpaddedLen)) {
err = errorf.E("invalid padding")
return
}
unpadded := padded[2:][:unpaddedLen]
if len(unpadded) == 0 || len(unpadded) != int(unpaddedLen) {
err = errorf.E("invalid padding")
return
}
plaintext = unpadded
return
}
// GenerateConversationKeyFromHex performs an ECDH key generation hashed with the nip-44-v2 using hkdf.
func GenerateConversationKeyFromHex(pkh, skh string) (ck []byte, err error) {
if skh >= "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" ||
skh == "0000000000000000000000000000000000000000000000000000000000000000" {
err = errorf.E(
"invalid private key: x coordinate %s is not on the secp256k1 curve",
skh,
)
return
}
var sign signer.I
if sign, err = p256k.NewSecFromHex(skh); chk.E(err) {
return
}
var pk []byte
if pk, err = p256k.HexToBin(pkh); chk.E(err) {
return
}
var shared []byte
if shared, err = sign.ECDH(pk); chk.E(err) {
return
}
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
return
}
func GenerateConversationKeyWithSigner(sign signer.I, pk []byte) (
ck []byte, err error,
) {
var shared []byte
if shared, err = sign.ECDH(pk); chk.E(err) {
return
}
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
return
}
func encrypt(key, nonce, message []byte) (dst []byte, err error) {
var cipher *chacha20.Cipher
if cipher, err = chacha20.NewUnauthenticatedCipher(key, nonce); chk.E(err) {
return
}
dst = make([]byte, len(message))
cipher.XORKeyStream(dst, message)
return
}
func sha256Hmac(key, ciphertext, nonce []byte) (h []byte, err error) {
if len(nonce) != sha256.Size {
err = errorf.E("nonce aad must be 32 bytes")
return
}
hm := hmac.New(sha256.New, key)
hm.Write(nonce)
hm.Write(ciphertext)
h = hm.Sum(nil)
return
}
func getKeys(conversationKey, nonce []byte) (
enc, cc20nonce, auth []byte, err error,
) {
if len(conversationKey) != 32 {
err = errorf.E("conversation key must be 32 bytes")
return
}
if len(nonce) != 32 {
err = errorf.E("nonce must be 32 bytes")
return
}
r := hkdf.Expand(sha256.New, conversationKey, nonce)
enc = make([]byte, 32)
if _, err = io.ReadFull(r, enc); chk.E(err) {
return
}
cc20nonce = make([]byte, 12)
if _, err = io.ReadFull(r, cc20nonce); chk.E(err) {
return
}
auth = make([]byte, 32)
if _, err = io.ReadFull(r, auth); chk.E(err) {
return
}
return
}
// CalcPadding creates padding for the message payload that is precisely a power
// of two in order to reduce the chances of plaintext attack. This is plainly
// retarded because it could blow out the message size a lot when just a random few
// dozen bytes and a length prefix would achieve the same result.
func CalcPadding(sLen int) (l int) {
if sLen <= 32 {
return 32
}
nextPower := 1 << int(math.Floor(math.Log2(float64(sLen-1)))+1)
chunk := int(math.Max(32, float64(nextPower/8)))
l = chunk * int(math.Floor(float64((sLen-1)/chunk))+1)
return
}

File diff suppressed because it is too large Load Diff

83
pkg/crypto/keys/keys.go Normal file
View File

@@ -0,0 +1,83 @@
// Package keys is a set of helpers for generating and converting public/secret
// keys to hex and back to binary.
package keys
import (
"bytes"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/ec/schnorr"
"next.orly.dev/pkg/crypto/p256k"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/utils"
)
// GeneratePrivateKey - deprecated, use GenerateSecretKeyHex
var GeneratePrivateKey = func() string { return GenerateSecretKeyHex() }
// GenerateSecretKey creates a new secret key and returns the bytes of the secret.
func GenerateSecretKey() (skb []byte, err error) {
signer := &p256k.Signer{}
if err = signer.Generate(); chk.E(err) {
return
}
skb = signer.Sec()
return
}
// GenerateSecretKeyHex generates a secret key and encodes the bytes as hex.
func GenerateSecretKeyHex() (sks string) {
skb, err := GenerateSecretKey()
if chk.E(err) {
return
}
return hex.Enc(skb)
}
// GetPublicKeyHex generates a public key from a hex encoded secret key.
func GetPublicKeyHex(sk string) (pk string, err error) {
var b []byte
if b, err = hex.Dec(sk); chk.E(err) {
return
}
signer := &p256k.Signer{}
if err = signer.InitSec(b); chk.E(err) {
return
}
return hex.Enc(signer.Pub()), nil
}
// SecretBytesToPubKeyHex generates a public key from secret key bytes.
func SecretBytesToPubKeyHex(skb []byte) (pk string, err error) {
signer := &p256k.Signer{}
if err = signer.InitSec(skb); chk.E(err) {
return
}
return hex.Enc(signer.Pub()), nil
}
// IsValid32ByteHex checks that a hex string is a valid 32 bytes lower case hex encoded value as
// per nostr NIP-01 spec.
func IsValid32ByteHex[V []byte | string](pk V) bool {
if utils.FastEqual(bytes.ToLower([]byte(pk)), []byte(pk)) {
return false
}
var err error
dec := make([]byte, 32)
if _, err = hex.DecBytes(dec, []byte(pk)); chk.E(err) {
}
return len(dec) == 32
}
// IsValidPublicKey checks that a hex encoded public key is a valid BIP-340 public key.
func IsValidPublicKey[V []byte | string](pk V) bool {
v, _ := hex.Dec(string(pk))
_, err := schnorr.ParsePubKey(v)
return err == nil
}
// HexPubkeyToBytes decodes a pubkey from hex encoded string/bytes.
func HexPubkeyToBytes[V []byte | string](hpk V) (pkb []byte, err error) {
return hex.DecAppend(nil, []byte(hpk))
}

View File

@@ -52,8 +52,18 @@ func New(
}
opts := badger.DefaultOptions(d.dataDir)
opts.BlockCacheSize = int64(units.Gb)
opts.BlockSize = units.Gb
// Use sane defaults to avoid excessive memory usage during startup.
// Badger's default BlockSize is small (e.g., 4KB). Overriding it to very large values
// can cause massive allocations and OOM panics during deployments.
// Set BlockCacheSize to a moderate value and keep BlockSize small.
opts.BlockCacheSize = int64(256 * units.Mb) // 256 MB cache
opts.BlockSize = 4 * units.Kb // 4 KB block size
// Prevent huge allocations during table building and memtable flush.
// Badger's TableBuilder buffer is sized by BaseTableSize; ensure it's small.
opts.BaseTableSize = 64 * units.Mb // 64 MB per table (default ~2MB, increased for fewer files but safe)
opts.MemTableSize = 64 * units.Mb // 64 MB memtable to match table size
// Keep value log files to a moderate size as well
opts.ValueLogFileSize = 256 * units.Mb // 256 MB value log files
opts.CompactL0OnClose = true
opts.LmaxCompaction = true
opts.Compression = options.None

View File

@@ -2,6 +2,7 @@ package database
import (
"bytes"
"fmt"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
@@ -25,8 +26,23 @@ func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
if v, err = item.ValueCopy(nil); chk.E(err) {
return
}
// Check if we have valid data before attempting to unmarshal
if len(v) < 32+32+1+2+1+1+64 { // ID + Pubkey + min varint fields + Sig
err = fmt.Errorf(
"incomplete event data: got %d bytes, expected at least %d",
len(v), 32+32+1+2+1+1+64,
)
return
}
ev = new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); chk.E(err) {
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); err != nil {
// Add more context to EOF errors for debugging
if err.Error() == "EOF" {
err = fmt.Errorf(
"EOF while unmarshaling event (serial=%v, data_len=%d): %w",
ser, len(v), err,
)
}
return
}
return

View File

@@ -0,0 +1,68 @@
package database
import (
"bytes"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
)
// FetchEventsBySerials fetches multiple events by their serials in a single database transaction.
// Returns a map of serial uint64 value to event, only including successfully fetched events.
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*event.E, err error) {
events = make(map[uint64]*event.E)
if len(serials) == 0 {
return events, nil
}
if err = d.View(
func(txn *badger.Txn) (err error) {
for _, ser := range serials {
buf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
// Skip this serial on error but continue with others
continue
}
var item *badger.Item
if item, err = txn.Get(buf.Bytes()); err != nil {
// Skip this serial if not found but continue with others
err = nil
continue
}
var v []byte
if v, err = item.ValueCopy(nil); chk.E(err) {
// Skip this serial on error but continue with others
err = nil
continue
}
// Check if we have valid data before attempting to unmarshal
if len(v) < 32+32+1+2+1+1+64 { // ID + Pubkey + min varint fields + Sig
// Skip this serial - incomplete data
continue
}
ev := new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); err != nil {
// Skip this serial on unmarshal error but continue with others
err = nil
continue
}
// Successfully unmarshaled event, add to results
events[ser.Get()] = ev
}
return nil
},
); err != nil {
return
}
return events, nil
}

View File

@@ -153,5 +153,35 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
if err = appendIndexBytes(&idxs, kindPubkeyIndex); chk.E(err) {
return
}
// Word token indexes (from content)
if len(ev.Content) > 0 {
for _, h := range TokenHashes(ev.Content) {
w := new(Word)
w.FromWord(h) // 8-byte truncated hash
wIdx := indexes.WordEnc(w, ser)
if err = appendIndexBytes(&idxs, wIdx); chk.E(err) {
return
}
}
}
// Extend full-text search to include all fields of all tags
if ev.Tags != nil && ev.Tags.Len() > 0 {
for _, t := range *ev.Tags {
for _, field := range t.T { // include key and all values
if len(field) == 0 {
continue
}
for _, h := range TokenHashes(field) {
w := new(Word)
w.FromWord(h)
wIdx := indexes.WordEnc(w, ser)
if err = appendIndexBytes(&idxs, wIdx); chk.E(err) {
return
}
}
}
}
}
return
}

View File

@@ -113,6 +113,27 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
return
}
// Word search: if Search field is present, generate word index ranges
if len(f.Search) > 0 {
for _, h := range TokenHashes(f.Search) {
w := new(types2.Word)
w.FromWord(h)
buf := new(bytes.Buffer)
idx := indexes.WordEnc(w, nil)
if err = idx.MarshalWrite(buf); chk.E(err) {
return
}
b := buf.Bytes()
end := make([]byte, len(b))
copy(end, b)
for i := 0; i < 5; i++ { // match any serial
end = append(end, 0xff)
}
idxs = append(idxs, Range{b, end})
}
return
}
caStart := new(types2.Uint64)
caEnd := new(types2.Uint64)
@@ -362,7 +383,6 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
if f.Authors != nil && f.Authors.Len() > 0 {
for _, author := range f.Authors.T {
var p *types2.PubHash
log.I.S(author)
if p, err = CreatePubHashFromData(author); chk.E(err) {
return
}

View File

@@ -8,6 +8,7 @@ import (
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/tag"
@@ -64,6 +65,99 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
return
}
// GetSerialsByIds takes a tag.T containing multiple IDs and returns a map of IDs to their
// corresponding serial numbers. It directly queries the IdPrefix index for matching IDs,
// which is more efficient than using GetIndexesFromFilter.
func (d *D) GetSerialsByIds(ids *tag.T) (
serials map[string]*types.Uint40, err error,
) {
return d.GetSerialsByIdsWithFilter(ids, nil)
}
// GetSerialsByIdsWithFilter takes a tag.T containing multiple IDs and returns a
// map of IDs to their corresponding serial numbers, applying a filter function
// to each event. The function directly creates ID index prefixes for efficient querying.
func (d *D) GetSerialsByIdsWithFilter(
ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool,
) (serials map[string]*types.Uint40, err error) {
log.T.F("GetSerialsByIdsWithFilter: input ids count=%d", ids.Len())
// Initialize the result map
serials = make(map[string]*types.Uint40)
// Return early if no IDs are provided
if ids.Len() == 0 {
return
}
// Process all IDs in a single transaction
if err = d.View(
func(txn *badger.Txn) (err error) {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
// Process each ID sequentially
for _, id := range ids.T {
// idHex := hex.Enc(id)
// Get the index prefix for this ID
var idxs []Range
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
// Skip this ID if we can't create its index
continue
}
// Skip if no index was created
if len(idxs) == 0 {
continue
}
// Seek to the start of this ID's range in the database
it.Seek(idxs[0].Start)
if it.ValidForPrefix(idxs[0].Start) {
// Found an entry for this ID
item := it.Item()
key := item.Key()
// Extract the serial number from the key
ser := new(types.Uint40)
buf := bytes.NewBuffer(key[len(key)-5:])
if err = ser.UnmarshalRead(buf); chk.E(err) {
continue
}
// If a filter function is provided, fetch the event and apply the filter
if fn != nil {
var ev *event.E
if ev, err = d.FetchEventBySerial(ser); err != nil {
// Skip this event if we can't fetch it
continue
}
// Apply the filter
if !fn(ev, ser) {
// Skip this event if it doesn't pass the filter
continue
}
}
// Store the serial in the result map using the hex-encoded ID as the key
serials[string(id)] = ser
}
}
return
},
); chk.E(err) {
return
}
log.T.F(
"GetSerialsByIdsWithFilter: found %d serials out of %d requested ids",
len(serials), ids.Len(),
)
return
}
// func (d *D) GetSerialBytesById(id []byte) (ser []byte, err error) {
// var idxs []Range
// if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.New(id)}); chk.E(err) {

View File

@@ -48,9 +48,11 @@ func TestGetSerialById(t *testing.T) {
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
ev.Free()
t.Fatal(err)
}
ev.Free()
events = append(events, ev)
// Save the event to the database

View File

@@ -55,8 +55,10 @@ func TestGetSerialsByRange(t *testing.T) {
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
ev.Free()
t.Fatal(err)
}
ev.Free()
events = append(events, ev)

81
pkg/database/identity.go Normal file
View File

@@ -0,0 +1,81 @@
package database
import (
"errors"
"fmt"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/crypto/keys"
"next.orly.dev/pkg/encoders/hex"
)
const relayIdentitySecretKey = "relay:identity:sk"
// GetRelayIdentitySecret returns the relay identity secret key bytes if present.
// If the key is not found, returns (nil, badger.ErrKeyNotFound).
func (d *D) GetRelayIdentitySecret() (skb []byte, err error) {
err = d.DB.View(func(txn *badger.Txn) error {
item, err := txn.Get([]byte(relayIdentitySecretKey))
if errors.Is(err, badger.ErrKeyNotFound) {
return err
}
if err != nil {
return err
}
return item.Value(func(val []byte) error {
// value stored as hex string
b, err := hex.Dec(string(val))
if err != nil {
return err
}
skb = make([]byte, len(b))
copy(skb, b)
return nil
})
})
return
}
// SetRelayIdentitySecret stores the relay identity secret key bytes (expects 32 bytes).
func (d *D) SetRelayIdentitySecret(skb []byte) (err error) {
if len(skb) != 32 {
return fmt.Errorf("invalid secret key length: %d", len(skb))
}
val := []byte(hex.Enc(skb))
return d.DB.Update(func(txn *badger.Txn) error {
return txn.Set([]byte(relayIdentitySecretKey), val)
})
}
// GetOrCreateRelayIdentitySecret retrieves the existing relay identity secret
// key or creates and stores a new one if none exists.
func (d *D) GetOrCreateRelayIdentitySecret() (skb []byte, err error) {
// Try get fast path
if skb, err = d.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
return skb, nil
}
if err != nil && !errors.Is(err, badger.ErrKeyNotFound) {
return nil, err
}
// Create new key and store atomically
var gen []byte
if gen, err = keys.GenerateSecretKey(); chk.E(err) {
return nil, err
}
if err = d.SetRelayIdentitySecret(gen); chk.E(err) {
return nil, err
}
log.I.F("generated new relay identity key (pub=%s)", mustPub(gen))
return gen, nil
}
func mustPub(skb []byte) string {
pk, err := keys.SecretBytesToPubKeyHex(skb)
if err != nil {
return ""
}
return pk
}

View File

@@ -69,6 +69,7 @@ const (
TagPubkeyPrefix = I("tpc") // tag, pubkey, created at
TagKindPubkeyPrefix = I("tkp") // tag, kind, pubkey, created at
WordPrefix = I("wrd") // word hash, serial
ExpirationPrefix = I("exp") // timestamp of expiration
VersionPrefix = I("ver") // database version number, for triggering reindexes when new keys are added (policy is add-only).
)
@@ -106,6 +107,8 @@ func Prefix(prf int) (i I) {
return ExpirationPrefix
case Version:
return VersionPrefix
case Word:
return WordPrefix
}
return
}
@@ -147,6 +150,8 @@ func Identify(r io.Reader) (i int, err error) {
case ExpirationPrefix:
i = Expiration
case WordPrefix:
i = Word
}
return
}
@@ -233,6 +238,21 @@ func FullIdPubkeyDec(
return New(NewPrefix(), ser, fid, p, ca)
}
// Word index for tokenized search terms
//
// 3 prefix|8 word-hash|5 serial
var Word = next()
func WordVars() (w *types.Word, ser *types.Uint40) {
return new(types.Word), new(types.Uint40)
}
func WordEnc(w *types.Word, ser *types.Uint40) (enc *T) {
return New(NewPrefix(Word), w, ser)
}
func WordDec(w *types.Word, ser *types.Uint40) (enc *T) {
return New(NewPrefix(), w, ser)
}
// CreatedAt is an index that allows search for the timestamp on the event.
//
// 3 prefix|8 timestamp|5 serial

62
pkg/database/markers.go Normal file
View File

@@ -0,0 +1,62 @@
package database
import (
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
)
const (
markerPrefix = "MARKER:"
)
// SetMarker stores an arbitrary marker in the database
func (d *D) SetMarker(key string, value []byte) (err error) {
markerKey := []byte(markerPrefix + key)
err = d.Update(func(txn *badger.Txn) error {
return txn.Set(markerKey, value)
})
return
}
// GetMarker retrieves an arbitrary marker from the database
func (d *D) GetMarker(key string) (value []byte, err error) {
markerKey := []byte(markerPrefix + key)
err = d.View(func(txn *badger.Txn) error {
item, err := txn.Get(markerKey)
if err != nil {
return err
}
value, err = item.ValueCopy(nil)
return err
})
return
}
// HasMarker checks if a marker exists in the database
func (d *D) HasMarker(key string) (exists bool) {
markerKey := []byte(markerPrefix + key)
err := d.View(func(txn *badger.Txn) error {
_, err := txn.Get(markerKey)
return err
})
exists = !chk.E(err)
return
}
// DeleteMarker removes a marker from the database
func (d *D) DeleteMarker(key string) (err error) {
markerKey := []byte(markerPrefix + key)
err = d.Update(func(txn *badger.Txn) error {
return txn.Delete(markerKey)
})
return
}

View File

@@ -14,7 +14,7 @@ import (
)
const (
currentVersion uint32 = 1
currentVersion uint32 = 2
)
func (d *D) RunMigrations() {
@@ -56,22 +56,8 @@ func (d *D) RunMigrations() {
}
if dbVersion == 0 {
log.D.F("no version tag found, creating...")
// write the version tag now
if err = d.Update(
func(txn *badger.Txn) (err error) {
buf := new(bytes.Buffer)
vv := new(types.Uint32)
vv.Set(currentVersion)
log.I.S(vv)
if err = indexes.VersionEnc(vv).MarshalWrite(buf); chk.E(err) {
return
}
if err = txn.Set(buf.Bytes(), nil); chk.E(err) {
return
}
return
},
); chk.E(err) {
// write the version tag now (ensure any old tags are removed first)
if err = d.writeVersionTag(currentVersion); chk.E(err) {
return
}
}
@@ -79,7 +65,136 @@ func (d *D) RunMigrations() {
log.I.F("migrating to version 1...")
// the first migration is expiration tags
d.UpdateExpirationTags()
// bump to version 1
_ = d.writeVersionTag(1)
}
if dbVersion < 2 {
log.I.F("migrating to version 2...")
// backfill word indexes
d.UpdateWordIndexes()
// bump to version 2
_ = d.writeVersionTag(2)
}
}
// writeVersionTag writes a new version tag key to the database (no value)
func (d *D) writeVersionTag(ver uint32) (err error) {
return d.Update(
func(txn *badger.Txn) (err error) {
// delete any existing version keys first (there should only be one, but be safe)
verPrf := new(bytes.Buffer)
if _, err = indexes.VersionPrefix.Write(verPrf); chk.E(err) {
return
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: verPrf.Bytes()})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
key := item.KeyCopy(nil)
if err = txn.Delete(key); chk.E(err) {
return
}
}
// now write the new version key
buf := new(bytes.Buffer)
vv := new(types.Uint32)
vv.Set(ver)
if err = indexes.VersionEnc(vv).MarshalWrite(buf); chk.E(err) {
return
}
return txn.Set(buf.Bytes(), nil)
},
)
}
func (d *D) UpdateWordIndexes() {
log.T.F("updating word indexes...")
var err error
var wordIndexes [][]byte
// iterate all events and generate word index keys from content and tags
if err = d.View(
func(txn *badger.Txn) (err error) {
prf := new(bytes.Buffer)
if err = indexes.EventEnc(nil).MarshalWrite(prf); chk.E(err) {
return
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: prf.Bytes()})
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
var val []byte
if val, err = item.ValueCopy(nil); chk.E(err) {
continue
}
// decode the event
ev := new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(val)); chk.E(err) {
continue
}
// log.I.F("updating word indexes for event: %s", ev.Serialize())
// read serial from key
key := item.Key()
ser := indexes.EventVars()
if err = indexes.EventDec(ser).UnmarshalRead(bytes.NewBuffer(key)); chk.E(err) {
continue
}
// collect unique word hashes for this event
seen := make(map[string]struct{})
// from content
if len(ev.Content) > 0 {
for _, h := range TokenHashes(ev.Content) {
seen[string(h)] = struct{}{}
}
}
// from all tag fields (key and values)
if ev.Tags != nil && ev.Tags.Len() > 0 {
for _, t := range *ev.Tags {
for _, field := range t.T {
if len(field) == 0 {
continue
}
for _, h := range TokenHashes(field) {
seen[string(h)] = struct{}{}
}
}
}
}
// build keys
for k := range seen {
w := new(types.Word)
w.FromWord([]byte(k))
buf := new(bytes.Buffer)
if err = indexes.WordEnc(
w, ser,
).MarshalWrite(buf); chk.E(err) {
continue
}
wordIndexes = append(wordIndexes, buf.Bytes())
}
}
return
},
); chk.E(err) {
return
}
// sort the indexes for ordered writes
sort.Slice(
wordIndexes, func(i, j int) bool {
return bytes.Compare(
wordIndexes[i], wordIndexes[j],
) < 0
},
)
// write in a batch
batch := d.NewWriteBatch()
for _, v := range wordIndexes {
if err = batch.Set(v, nil); chk.E(err) {
continue
}
}
_ = batch.Flush()
log.T.F("finished updating word indexes...")
}
func (d *D) UpdateExpirationTags() {

View File

@@ -0,0 +1,194 @@
package database
import (
"context"
"os"
"testing"
"time"
"lol.mleku.dev/chk"
"next.orly.dev/pkg/crypto/p256k"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/encoders/timestamp"
)
// helper to create a fresh DB
func newTestDB(t *testing.T) (*D, context.Context, context.CancelFunc, string) {
t.Helper()
tempDir, err := os.MkdirTemp("", "search-db-*")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
ctx, cancel := context.WithCancel(context.Background())
db, err := New(ctx, cancel, tempDir, "error")
if err != nil {
cancel()
os.RemoveAll(tempDir)
t.Fatalf("Failed to init DB: %v", err)
}
return db, ctx, cancel, tempDir
}
// TestQueryEventsBySearchTerms creates a small set of events with content and tags,
// saves them, then queries using filter.Search to ensure the word index works.
func TestQueryEventsBySearchTerms(t *testing.T) {
db, ctx, cancel, tempDir := newTestDB(t)
defer func() {
// cancel context first to stop background routines cleanly
cancel()
db.Close()
os.RemoveAll(tempDir)
}()
// signer for all events
sign := new(p256k.Signer)
if err := sign.Generate(); chk.E(err) {
t.Fatalf("signer generate: %v", err)
}
now := timestamp.Now().V
// Events to cover tokenizer rules:
// - regular words
// - URLs ignored
// - 64-char hex ignored
// - nostr: URIs ignored
// - #[n] mentions ignored
// - tag fields included in search
// 1. Contains words: "alpha beta", plus URL and hex (ignored)
ev1 := event.New()
ev1.Kind = kind.TextNote.K
ev1.Pubkey = sign.Pub()
ev1.CreatedAt = now - 5
ev1.Content = []byte("Alpha beta visit https://example.com deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
ev1.Tags = tag.NewS()
ev1.Sign(sign)
if _, _, err := db.SaveEvent(ctx, ev1); err != nil {
t.Fatalf("save ev1: %v", err)
}
// 2. Contains overlap word "beta" and unique "gamma" and nostr: URI ignored
ev2 := event.New()
ev2.Kind = kind.TextNote.K
ev2.Pubkey = sign.Pub()
ev2.CreatedAt = now - 4
ev2.Content = []byte("beta and GAMMA with nostr:nevent1qqqqq")
ev2.Tags = tag.NewS()
ev2.Sign(sign)
if _, _, err := db.SaveEvent(ctx, ev2); err != nil {
t.Fatalf("save ev2: %v", err)
}
// 3. Contains only a URL (should not create word tokens) and mention #[1] (ignored)
ev3 := event.New()
ev3.Kind = kind.TextNote.K
ev3.Pubkey = sign.Pub()
ev3.CreatedAt = now - 3
ev3.Content = []byte("see www.example.org #[1]")
ev3.Tags = tag.NewS()
ev3.Sign(sign)
if _, _, err := db.SaveEvent(ctx, ev3); err != nil {
t.Fatalf("save ev3: %v", err)
}
// 4. No content words, but tag value has searchable words: "delta epsilon"
ev4 := event.New()
ev4.Kind = kind.TextNote.K
ev4.Pubkey = sign.Pub()
ev4.CreatedAt = now - 2
ev4.Content = []byte("")
ev4.Tags = tag.NewS()
*ev4.Tags = append(*ev4.Tags, tag.NewFromAny("t", "delta epsilon"))
ev4.Sign(sign)
if _, _, err := db.SaveEvent(ctx, ev4); err != nil {
t.Fatalf("save ev4: %v", err)
}
// 5. Another event with both content and tag tokens for ordering checks
ev5 := event.New()
ev5.Kind = kind.TextNote.K
ev5.Pubkey = sign.Pub()
ev5.CreatedAt = now - 1
ev5.Content = []byte("alpha DELTA mixed-case and link http://foo.bar")
ev5.Tags = tag.NewS()
*ev5.Tags = append(*ev5.Tags, tag.NewFromAny("t", "zeta"))
ev5.Sign(sign)
if _, _, err := db.SaveEvent(ctx, ev5); err != nil {
t.Fatalf("save ev5: %v", err)
}
// Small sleep to ensure created_at ordering is the only factor
time.Sleep(5 * time.Millisecond)
// Helper to run a search and return IDs
run := func(q string) ([]*event.E, error) {
f := &filter.F{Search: []byte(q)}
return db.QueryEvents(ctx, f)
}
// Single-term search: alpha -> should match ev1 and ev5 ordered by created_at desc (ev5 newer)
if evs, err := run("alpha"); err != nil {
t.Fatalf("search alpha: %v", err)
} else {
if len(evs) != 2 {
t.Fatalf("alpha expected 2 results, got %d", len(evs))
}
if !(evs[0].CreatedAt >= evs[1].CreatedAt) {
t.Fatalf("results not ordered by created_at desc")
}
}
// Overlap term beta -> ev1 and ev2
if evs, err := run("beta"); err != nil {
t.Fatalf("search beta: %v", err)
} else if len(evs) != 2 {
t.Fatalf("beta expected 2 results, got %d", len(evs))
}
// Unique term gamma -> only ev2
if evs, err := run("gamma"); err != nil {
t.Fatalf("search gamma: %v", err)
} else if len(evs) != 1 {
t.Fatalf("gamma expected 1 result, got %d", len(evs))
}
// URL terms should be ignored: example -> appears only as URL in ev1/ev3/ev5; tokenizer ignores URLs so expect 0
if evs, err := run("example"); err != nil {
t.Fatalf("search example: %v", err)
} else if len(evs) != 0 {
t.Fatalf("example expected 0 results (URL tokens ignored), got %d", len(evs))
}
// Tag words searchable: delta should match ev4 and ev5 (delta in tag for ev4, in content for ev5)
if evs, err := run("delta"); err != nil {
t.Fatalf("search delta: %v", err)
} else if len(evs) != 2 {
t.Fatalf("delta expected 2 results, got %d", len(evs))
}
// Very short token ignored: single-letter should yield 0
if evs, err := run("a"); err != nil {
t.Fatalf("search short token: %v", err)
} else if len(evs) != 0 {
t.Fatalf("single-letter expected 0 results, got %d", len(evs))
}
// 64-char hex should be ignored
hex64 := "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
if evs, err := run(hex64); err != nil {
t.Fatalf("search hex64: %v", err)
} else if len(evs) != 0 {
t.Fatalf("hex64 expected 0 results, got %d", len(evs))
}
// nostr: scheme ignored
if evs, err := run("nostr:nevent1qqqqq"); err != nil {
t.Fatalf("search nostr: %v", err)
} else if len(evs) != 0 {
t.Fatalf("nostr: expected 0 results, got %d", len(evs))
}
}

View File

@@ -5,7 +5,6 @@ import (
"context"
"sort"
"strconv"
"strings"
"time"
"lol.mleku.dev/chk"
@@ -43,73 +42,66 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
var expDeletes types.Uint40s
var expEvs event.S
if f.Ids != nil && f.Ids.Len() > 0 {
// for _, id := range f.Ids.T {
// log.T.F("QueryEvents: looking for ID=%s", hex.Enc(id))
// }
// log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
for _, idx := range f.Ids.T {
// log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx))
// we know there is only Ids in this, so run the ID query and fetch.
var ser *types.Uint40
var idErr error
if ser, idErr = d.GetSerialById(idx); idErr != nil {
// Check if this is a "not found" error which is expected for IDs we don't have
if strings.Contains(idErr.Error(), "id not found in database") {
// log.T.F(
// "QueryEvents: ID not found in database: %s",
// hex.Enc(idx),
// )
} else {
// Log unexpected errors but continue processing other IDs
// log.E.F(
// "QueryEvents: error looking up id=%s err=%v",
// hex.Enc(idx), idErr,
// )
}
// Get all serials for the requested IDs in a single batch operation
log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
// Use GetSerialsByIds to batch process all IDs at once
serials, idErr := d.GetSerialsByIds(f.Ids)
if idErr != nil {
log.E.F("QueryEvents: error looking up ids: %v", idErr)
// Continue with whatever IDs we found
}
// Convert serials map to slice for batch fetch
var serialsSlice []*types.Uint40
idHexToSerial := make(map[uint64]string) // Map serial value back to original ID hex
for idHex, ser := range serials {
serialsSlice = append(serialsSlice, ser)
idHexToSerial[ser.Get()] = idHex
}
// Fetch all events in a single batch operation
var fetchedEvents map[uint64]*event.E
if fetchedEvents, err = d.FetchEventsBySerials(serialsSlice); err != nil {
log.E.F("QueryEvents: batch fetch failed: %v", err)
return
}
// Process each successfully fetched event and apply filters
for serialValue, ev := range fetchedEvents {
idHex := idHexToSerial[serialValue]
// Convert serial value back to Uint40 for expiration handling
ser := new(types.Uint40)
if err = ser.Set(serialValue); err != nil {
log.T.F(
"QueryEvents: error converting serial %d: %v", serialValue,
err,
)
continue
}
// Check if the serial is nil, which indicates the ID wasn't found
if ser == nil {
// log.T.F("QueryEvents: Serial is nil for ID: %s", hex.Enc(idx))
continue
}
// fetch the events
var ev *event.E
if ev, err = d.FetchEventBySerial(ser); err != nil {
// log.T.F(
// "QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
// hex.Enc(idx), ser, err,
// )
continue
}
// log.T.F(
// "QueryEvents: found id=%s kind=%d created_at=%d",
// hex.Enc(ev.ID), ev.Kind, ev.CreatedAt,
// )
// check for an expiration tag and delete after returning the result
if CheckExpiration(ev) {
log.T.F(
"QueryEvents: id=%s filtered out due to expiration",
hex.Enc(ev.ID),
"QueryEvents: id=%s filtered out due to expiration", idHex,
)
expDeletes = append(expDeletes, ser)
expEvs = append(expEvs, ev)
continue
}
// skip events that have been deleted by a proper deletion event
if derr := d.CheckForDeleted(ev, nil); derr != nil {
// log.T.F(
// "QueryEvents: id=%s filtered out due to deletion: %v",
// hex.Enc(ev.ID), derr,
// )
// log.T.F("QueryEvents: id=%s filtered out due to deletion: %v", idHex, derr)
continue
}
// log.T.F(
// "QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results",
// hex.Enc(ev.ID),
// )
// Add the event to the results
evs = append(evs, ev)
// log.T.F("QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results", idHex)
}
// sort the events by timestamp
sort.Slice(
evs, func(i, j int) bool {
@@ -159,16 +151,33 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
// Add deletion events to the list of events to process
idPkTs = append(idPkTs, deletionIdPkTs...)
}
// First pass: collect all deletion events
// Prepare serials for batch fetch
var allSerials []*types.Uint40
serialToIdPk := make(map[uint64]*store.IdPkTs)
for _, idpk := range idPkTs {
var ev *event.E
ser := new(types.Uint40)
if err = ser.Set(idpk.Ser); chk.E(err) {
if err = ser.Set(idpk.Ser); err != nil {
continue
}
if ev, err = d.FetchEventBySerial(ser); err != nil {
allSerials = append(allSerials, ser)
serialToIdPk[ser.Get()] = idpk
}
// Fetch all events in batch
var allEvents map[uint64]*event.E
if allEvents, err = d.FetchEventsBySerials(allSerials); err != nil {
log.E.F("QueryEvents: batch fetch failed in non-IDs path: %v", err)
return
}
// First pass: collect all deletion events
for serialValue, ev := range allEvents {
// Convert serial value back to Uint40 for expiration handling
ser := new(types.Uint40)
if err = ser.Set(serialValue); err != nil {
continue
}
// check for an expiration tag and delete after returning the result
if CheckExpiration(ev) {
expDeletes = append(expDeletes, ser)
@@ -235,7 +244,7 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
// For replaceable events, we need to check if there are any
// e-tags that reference events with the same kind and pubkey
for _, eTag := range eTags {
if eTag.Len() < 2 {
if eTag.Len() != 64 {
continue
}
// Get the event ID from the e-tag
@@ -292,15 +301,7 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
}
}
// Second pass: process all events, filtering out deleted ones
for _, idpk := range idPkTs {
var ev *event.E
ser := new(types.Uint40)
if err = ser.Set(idpk.Ser); chk.E(err) {
continue
}
if ev, err = d.FetchEventBySerial(ser); err != nil {
continue
}
for _, ev := range allEvents {
// Add logging for tag filter debugging
if f.Tags != nil && f.Tags.Len() > 0 {
// var eventTags []string

View File

@@ -56,8 +56,10 @@ func setupTestDB(t *testing.T) (
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
ev.Free()
t.Fatal(err)
}
ev.Free()
events = append(events, ev)

View File

@@ -173,9 +173,9 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
}
}
if ev.CreatedAt < maxTs {
err = fmt.Errorf(
"blocked: was deleted by address %s: event is older than the delete: event: %d delete: %d",
at, ev.CreatedAt, maxTs,
err = errorf.E(
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
ev.ID, at, ev.CreatedAt, maxTs,
)
return
}
@@ -203,22 +203,14 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
return
}
if len(s) > 0 {
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
// regardless of timestamp, since it's a specific deletion of this exact event
err = errorf.E(
"blocked: was deleted by ID and cannot be resubmitted",
// ev.ID,
)
// Any e-tag deletion found means the exact event was deleted and cannot be resubmitted
err = errorf.E("blocked: %0x has been deleted", ev.ID)
return
}
}
if len(sers) > 0 {
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
// regardless of timestamp, since it's a specific deletion of this exact event
err = errorf.E(
"blocked: was deleted by ID and cannot be resubmitted",
// ev.ID,
)
// Any e-tag deletion found means the exact event was deleted and cannot be resubmitted
err = errorf.E("blocked: %0x has been deleted", ev.ID)
return
}

View File

@@ -13,7 +13,9 @@ import (
// QueryForIds retrieves a list of IdPkTs based on the provided filter.
// It supports filtering by ranges and tags but disallows filtering by Ids.
// Results are sorted by timestamp in reverse chronological order.
// Results are sorted by timestamp in reverse chronological order by default.
// When a search query is present, results are ranked by a 50/50 blend of
// match count (how many distinct search terms matched) and recency.
// Returns an error if the filter contains Ids or if any operation fails.
func (d *D) QueryForIds(c context.Context, f *filter.F) (
idPkTs []*store.IdPkTs, err error,
@@ -29,6 +31,9 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
}
var results []*store.IdPkTs
var founds []*types.Uint40
// When searching, we want to count how many index ranges (search terms)
// matched each note. We'll track counts by serial.
counts := make(map[uint64]int)
for _, idx := range idxs {
if founds, err = d.GetSerialsByRange(idx); chk.E(err) {
return
@@ -37,6 +42,12 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
if tmp, err = d.GetFullIdPubkeyBySerials(founds); chk.E(err) {
return
}
// If this query is driven by Search terms, increment count per serial
if len(f.Search) > 0 {
for _, v := range tmp {
counts[v.Ser]++
}
}
results = append(results, tmp...)
}
// deduplicate in case this somehow happened (such as two or more
@@ -48,12 +59,58 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
idPkTs = append(idPkTs, idpk)
}
}
// sort results by timestamp in reverse chronological order
sort.Slice(
idPkTs, func(i, j int) bool {
return idPkTs[i].Ts > idPkTs[j].Ts
},
)
if len(f.Search) == 0 {
// No search query: sort by timestamp in reverse chronological order
sort.Slice(
idPkTs, func(i, j int) bool {
return idPkTs[i].Ts > idPkTs[j].Ts
},
)
} else {
// Search query present: blend match count relevance with recency (50/50)
// Normalize both match count and timestamp to [0,1] and compute score.
var maxCount int
var minTs, maxTs int64
if len(idPkTs) > 0 {
minTs, maxTs = idPkTs[0].Ts, idPkTs[0].Ts
}
for _, v := range idPkTs {
if c := counts[v.Ser]; c > maxCount {
maxCount = c
}
if v.Ts < minTs {
minTs = v.Ts
}
if v.Ts > maxTs {
maxTs = v.Ts
}
}
// Precompute denominator to avoid div-by-zero
tsSpan := maxTs - minTs
if tsSpan <= 0 {
tsSpan = 1
}
if maxCount <= 0 {
maxCount = 1
}
sort.Slice(
idPkTs, func(i, j int) bool {
ci := float64(counts[idPkTs[i].Ser]) / float64(maxCount)
cj := float64(counts[idPkTs[j].Ser]) / float64(maxCount)
ai := float64(idPkTs[i].Ts-minTs) / float64(tsSpan)
aj := float64(idPkTs[j].Ts-minTs) / float64(tsSpan)
si := 0.5*ci + 0.5*ai
sj := 0.5*cj + 0.5*aj
if si == sj {
// tie-break by recency
return idPkTs[i].Ts > idPkTs[j].Ts
}
return si > sj
},
)
}
if f.Limit != nil && len(idPkTs) > int(*f.Limit) {
idPkTs = idPkTs[:*f.Limit]
}

View File

@@ -17,11 +17,12 @@ func (d *D) QueryForSerials(c context.Context, f *filter.F) (
var founds []*types.Uint40
var idPkTs []*store.IdPkTs
if f.Ids != nil && f.Ids.Len() > 0 {
for _, id := range f.Ids.T {
var ser *types.Uint40
if ser, err = d.GetSerialById(id); chk.E(err) {
return
}
// Use batch lookup to minimize transactions when resolving IDs to serials
var serialMap map[string]*types.Uint40
if serialMap, err = d.GetSerialsByIds(f.Ids); chk.E(err) {
return
}
for _, ser := range serialMap {
founds = append(founds, ser)
}
var tmp []*store.IdPkTs

View File

@@ -9,10 +9,12 @@ import (
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag"
)
@@ -230,10 +232,10 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
return
},
)
// log.T.F(
// "total data written: %d bytes keys %d bytes values for event ID %s", kc,
// vc, hex.Enc(ev.ID),
// )
log.T.F(
"total data written: %d bytes keys %d bytes values for event ID %s", kc,
vc, hex.Enc(ev.ID),
)
// log.T.C(
// func() string {
// return fmt.Sprintf("event:\n%s\n", ev.Serialize())

View File

@@ -188,3 +188,30 @@ func (d *D) GetPaymentHistory(pubkey []byte) ([]Payment, error) {
return payments, err
}
// IsFirstTimeUser checks if a user is logging in for the first time and marks them as seen
func (d *D) IsFirstTimeUser(pubkey []byte) (bool, error) {
key := fmt.Sprintf("firstlogin:%s", hex.EncodeToString(pubkey))
isFirstTime := false
err := d.DB.Update(
func(txn *badger.Txn) error {
_, err := txn.Get([]byte(key))
if errors.Is(err, badger.ErrKeyNotFound) {
// First time - record the login
isFirstTime = true
now := time.Now()
data, err := json.Marshal(map[string]interface{}{
"first_login": now,
})
if err != nil {
return err
}
return txn.Set([]byte(key), data)
}
return err // Return any other error as-is
},
)
return isFirstTime, err
}

Some files were not shown because too many files have changed in this diff Show More