Refactor export functionality in App.svelte to support both GET and POST methods for event exports, enhancing flexibility in user permissions. Update server-side handling to accommodate pubkey filtering and improve response handling for file downloads. Adjust UI components to reflect these changes, ensuring a seamless user experience.

This commit is contained in:
2025-10-09 14:55:29 +01:00
parent f85a8b99a3
commit 9f39ca8a62
36 changed files with 948 additions and 1091 deletions

View File

@@ -38,7 +38,7 @@ describing how the item is used.
For documentation on package, summarise in up to 3 sentences the functions and
purpose of the package
Do not use markdown ** or __ or any similar things in initial words of a bullet
Do not use markdown \*\* or \_\_ or any similar things in initial words of a bullet
point, instead use standard godoc style # prefix for header sections
ALWAYS separate each bullet point with an empty line, and ALWAYS indent them
@@ -90,10 +90,10 @@ A good typical example:
```
use the source of the relay-tester to help guide what expectations the test has,
and use context7 for information about the nostr protocol, and use additional
use the source of the relay-tester to help guide what expectations the test has,
and use context7 for information about the nostr protocol, and use additional
log statements to help locate the cause of bugs
always use Go v1.25.1 for everything involving Go
always use the nips repository also for information, found at ../github.com/nostr-protocol/nips attached to the project
always use the nips repository also for information, found at ../github.com/nostr-protocol/nips attached to the project

View File

@@ -16,10 +16,9 @@ name: Go
on:
push:
tags:
- 'v[0-9]+.[0-9]+.[0-9]+'
- "v[0-9]+.[0-9]+.[0-9]+"
jobs:
build:
runs-on: ubuntu-latest
steps:
@@ -28,26 +27,25 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.25'
go-version: "1.25"
- name: Install libsecp256k1
run: ./scripts/ubuntu_install_libsecp256k1.sh
run: ./scripts/ubuntu_install_libsecp256k1.sh
- name: Build with cgo
run: go build -v ./...
run: go build -v ./...
- name: Test with cgo
run: go test -v ./...
run: go test -v ./...
- name: Set CGO off
run: echo "CGO_ENABLED=0" >> $GITHUB_ENV
run: echo "CGO_ENABLED=0" >> $GITHUB_ENV
- name: Build
run: go build -v ./...
run: go build -v ./...
- name: Test
run: go test -v ./...
run: go test -v ./...
# release:
# needs: build
# runs-on: ubuntu-latest

View File

@@ -186,10 +186,8 @@ func (s *Server) UserInterface() {
s.mux.HandleFunc("/api/auth/status", s.handleAuthStatus)
s.mux.HandleFunc("/api/auth/logout", s.handleAuthLogout)
s.mux.HandleFunc("/api/permissions/", s.handlePermissions)
// Export endpoints
// Export endpoint
s.mux.HandleFunc("/api/export", s.handleExport)
s.mux.HandleFunc("/api/export/mine", s.handleExportMine)
s.mux.HandleFunc("/export", s.handleExportAll)
// Events endpoints
s.mux.HandleFunc("/api/events/mine", s.handleEventsMine)
// Import endpoint (admin only)
@@ -442,9 +440,10 @@ func (s *Server) handlePermissions(w http.ResponseWriter, r *http.Request) {
w.Write(jsonData)
}
// handleExport streams all events as JSONL (NDJSON) using NIP-98 authentication. Admins only.
// handleExport streams events as JSONL (NDJSON) using NIP-98 authentication.
// Supports both GET (query params) and POST (JSON body) for pubkey filtering.
func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
if r.Method != http.MethodGet && r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
@@ -467,95 +466,57 @@ func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
return
}
// Optional filtering by pubkey(s)
// Parse pubkeys from request
var pks [][]byte
q := r.URL.Query()
for _, pkHex := range q["pubkey"] {
if pkHex == "" {
continue
if r.Method == http.MethodPost {
// Parse JSON body for pubkeys
var requestBody struct {
Pubkeys []string `json:"pubkeys"`
}
if pk, err := hex.Dec(pkHex); !chk.E(err) {
pks = append(pks, pk)
if err := json.NewDecoder(r.Body).Decode(&requestBody); err == nil {
// If JSON parsing succeeds, use pubkeys from body
for _, pkHex := range requestBody.Pubkeys {
if pkHex == "" {
continue
}
if pk, err := hex.Dec(pkHex); !chk.E(err) {
pks = append(pks, pk)
}
}
}
// If JSON parsing fails, fall back to empty pubkeys (export all)
} else {
// GET method - parse query parameters
q := r.URL.Query()
for _, pkHex := range q["pubkey"] {
if pkHex == "" {
continue
}
if pk, err := hex.Dec(pkHex); !chk.E(err) {
pks = append(pks, pk)
}
}
}
// Determine filename based on whether filtering by pubkeys
var filename string
if len(pks) == 0 {
filename = "all-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
} else if len(pks) == 1 {
filename = "my-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
} else {
filename = "filtered-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
}
w.Header().Set("Content-Type", "application/x-ndjson")
filename := "events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
w.Header().Set(
"Content-Disposition", "attachment; filename=\""+filename+"\"",
)
w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"")
// Stream export
s.D.Export(s.Ctx, w, pks...)
}
// handleExportMine streams only the authenticated user's events as JSONL (NDJSON) using NIP-98 authentication.
func (s *Server) handleExportMine(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
w.Header().Set("Content-Type", "application/x-ndjson")
filename := "my-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
w.Header().Set(
"Content-Disposition", "attachment; filename=\""+filename+"\"",
)
// Stream export for this user's pubkey only
s.D.Export(s.Ctx, w, pubkey)
}
// handleExportAll streams all events as JSONL (NDJSON) using NIP-98 authentication. Owner only.
func (s *Server) handleExportAll(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Validate NIP-98 authentication
valid, pubkey, err := httpauth.CheckAuth(r)
if chk.E(err) || !valid {
errorMsg := "NIP-98 authentication validation failed"
if err != nil {
errorMsg = err.Error()
}
http.Error(w, errorMsg, http.StatusUnauthorized)
return
}
// Check if user has owner permission
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
if accessLevel != "owner" {
http.Error(w, "Owner permission required", http.StatusForbidden)
return
}
// Set response headers for file download
w.Header().Set("Content-Type", "application/x-ndjson")
filename := "all-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"")
// Disable write timeouts for this operation
if flusher, ok := w.(http.Flusher); ok {
flusher.Flush()
}
// Stream export of all events
s.D.Export(s.Ctx, w)
}
// handleEventsMine returns the authenticated user's events in JSON format with pagination using NIP-98 authentication.
func (s *Server) handleEventsMine(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {

View File

@@ -1,63 +1,69 @@
html, body {
position: relative;
width: 100%;
height: 100%;
html,
body {
position: relative;
width: 100%;
height: 100%;
}
body {
color: #333;
margin: 0;
padding: 8px;
box-sizing: border-box;
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif;
color: #333;
margin: 0;
padding: 8px;
box-sizing: border-box;
font-family:
-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu,
Cantarell, "Helvetica Neue", sans-serif;
}
a {
color: rgb(0,100,200);
text-decoration: none;
color: rgb(0, 100, 200);
text-decoration: none;
}
a:hover {
text-decoration: underline;
text-decoration: underline;
}
a:visited {
color: rgb(0,80,160);
color: rgb(0, 80, 160);
}
label {
display: block;
display: block;
}
input, button, select, textarea {
font-family: inherit;
font-size: inherit;
-webkit-padding: 0.4em 0;
padding: 0.4em;
margin: 0 0 0.5em 0;
box-sizing: border-box;
border: 1px solid #ccc;
border-radius: 2px;
input,
button,
select,
textarea {
font-family: inherit;
font-size: inherit;
-webkit-padding: 0.4em 0;
padding: 0.4em;
margin: 0 0 0.5em 0;
box-sizing: border-box;
border: 1px solid #ccc;
border-radius: 2px;
}
input:disabled {
color: #ccc;
color: #ccc;
}
button {
color: #333;
background-color: #f4f4f4;
outline: none;
color: #333;
background-color: #f4f4f4;
outline: none;
}
button:disabled {
color: #999;
color: #999;
}
button:not(:disabled):active {
background-color: #ddd;
background-color: #ddd;
}
button:focus {
border-color: #666;
border-color: #666;
}

View File

@@ -1,18 +1,17 @@
<!DOCTYPE html>
<!doctype html>
<html lang="en">
<head>
<meta charset='utf-8'>
<meta name='viewport' content='width=device-width,initial-scale=1'>
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width,initial-scale=1" />
<title>ORLY?</title>
<title>ORLY?</title>
<link rel='icon' type='image/png' href='/orly.png'>
<link rel='stylesheet' href='/global.css'>
<link rel='stylesheet' href='/build/bundle.css'>
<link rel="icon" type="image/png" href="/orly.png" />
<link rel="stylesheet" href="/global.css" />
<link rel="stylesheet" href="/build/bundle.css" />
<script defer src='/build/bundle.js'></script>
</head>
<script defer src="/build/bundle.js"></script>
</head>
<body>
</body>
<body></body>
</html>

View File

@@ -1,78 +1,78 @@
import { spawn } from 'child_process';
import svelte from 'rollup-plugin-svelte';
import commonjs from '@rollup/plugin-commonjs';
import terser from '@rollup/plugin-terser';
import resolve from '@rollup/plugin-node-resolve';
import livereload from 'rollup-plugin-livereload';
import css from 'rollup-plugin-css-only';
import { spawn } from "child_process";
import svelte from "rollup-plugin-svelte";
import commonjs from "@rollup/plugin-commonjs";
import terser from "@rollup/plugin-terser";
import resolve from "@rollup/plugin-node-resolve";
import livereload from "rollup-plugin-livereload";
import css from "rollup-plugin-css-only";
const production = !process.env.ROLLUP_WATCH;
function serve() {
let server;
let server;
function toExit() {
if (server) server.kill(0);
}
function toExit() {
if (server) server.kill(0);
}
return {
writeBundle() {
if (server) return;
server = spawn('npm', ['run', 'start', '--', '--dev'], {
stdio: ['ignore', 'inherit', 'inherit'],
shell: true
});
return {
writeBundle() {
if (server) return;
server = spawn("npm", ["run", "start", "--", "--dev"], {
stdio: ["ignore", "inherit", "inherit"],
shell: true,
});
process.on('SIGTERM', toExit);
process.on('exit', toExit);
}
};
process.on("SIGTERM", toExit);
process.on("exit", toExit);
},
};
}
export default {
input: 'src/main.js',
output: {
sourcemap: true,
format: 'iife',
name: 'app',
file: 'dist/bundle.js'
},
plugins: [
svelte({
compilerOptions: {
// enable run-time checks when not in production
dev: !production
}
}),
// we'll extract any component CSS out into
// a separate file - better for performance
css({ output: 'bundle.css' }),
input: "src/main.js",
output: {
sourcemap: true,
format: "iife",
name: "app",
file: "dist/bundle.js",
},
plugins: [
svelte({
compilerOptions: {
// enable run-time checks when not in production
dev: !production,
},
}),
// we'll extract any component CSS out into
// a separate file - better for performance
css({ output: "bundle.css" }),
// If you have external dependencies installed from
// npm, you'll most likely need these plugins. In
// some cases you'll need additional configuration -
// consult the documentation for details:
// https://github.com/rollup/plugins/tree/master/packages/commonjs
resolve({
browser: true,
dedupe: ['svelte'],
exportConditions: ['svelte']
}),
commonjs(),
// If you have external dependencies installed from
// npm, you'll most likely need these plugins. In
// some cases you'll need additional configuration -
// consult the documentation for details:
// https://github.com/rollup/plugins/tree/master/packages/commonjs
resolve({
browser: true,
dedupe: ["svelte"],
exportConditions: ["svelte"],
}),
commonjs(),
// In dev mode, call `npm run start` once
// the bundle has been generated
!production && serve(),
// In dev mode, call `npm run start` once
// the bundle has been generated
!production && serve(),
// Watch the `public` directory and refresh the
// browser on changes when not in production
!production && livereload('public'),
// Watch the `public` directory and refresh the
// browser on changes when not in production
!production && livereload("public"),
// If we're building for production (npm run build
// instead of npm run dev), minify
production && terser()
],
watch: {
clearScreen: false
}
// If we're building for production (npm run build
// instead of npm run dev), minify
production && terser(),
],
watch: {
clearScreen: false,
},
};

View File

@@ -13,70 +13,78 @@
rm -rf test-template template && git clone sveltejs/template test-template && node scripts/setupTypeScript.js test-template
*/
import fs from "fs"
import path from "path"
import { argv } from "process"
import url from 'url';
import fs from "fs";
import path from "path";
import { argv } from "process";
import url from "url";
const __filename = url.fileURLToPath(import.meta.url);
const __dirname = url.fileURLToPath(new URL('.', import.meta.url));
const projectRoot = argv[2] || path.join(__dirname, "..")
const __dirname = url.fileURLToPath(new URL(".", import.meta.url));
const projectRoot = argv[2] || path.join(__dirname, "..");
// Add deps to pkg.json
const packageJSON = JSON.parse(fs.readFileSync(path.join(projectRoot, "package.json"), "utf8"))
const packageJSON = JSON.parse(
fs.readFileSync(path.join(projectRoot, "package.json"), "utf8"),
);
packageJSON.devDependencies = Object.assign(packageJSON.devDependencies, {
"svelte-check": "^3.0.0",
"svelte-preprocess": "^5.0.0",
"@rollup/plugin-typescript": "^11.0.0",
"typescript": "^4.9.0",
"tslib": "^2.5.0",
"@tsconfig/svelte": "^3.0.0"
})
typescript: "^4.9.0",
tslib: "^2.5.0",
"@tsconfig/svelte": "^3.0.0",
});
// Add script for checking
packageJSON.scripts = Object.assign(packageJSON.scripts, {
"check": "svelte-check"
})
check: "svelte-check",
});
// Write the package JSON
fs.writeFileSync(path.join(projectRoot, "package.json"), JSON.stringify(packageJSON, null, " "))
fs.writeFileSync(
path.join(projectRoot, "package.json"),
JSON.stringify(packageJSON, null, " "),
);
// mv src/main.js to main.ts - note, we need to edit rollup.config.js for this too
const beforeMainJSPath = path.join(projectRoot, "src", "main.js")
const afterMainTSPath = path.join(projectRoot, "src", "main.ts")
fs.renameSync(beforeMainJSPath, afterMainTSPath)
const beforeMainJSPath = path.join(projectRoot, "src", "main.js");
const afterMainTSPath = path.join(projectRoot, "src", "main.ts");
fs.renameSync(beforeMainJSPath, afterMainTSPath);
// Switch the app.svelte file to use TS
const appSveltePath = path.join(projectRoot, "src", "App.svelte")
let appFile = fs.readFileSync(appSveltePath, "utf8")
appFile = appFile.replace("<script>", '<script lang="ts">')
appFile = appFile.replace("export let name;", 'export let name: string;')
fs.writeFileSync(appSveltePath, appFile)
const appSveltePath = path.join(projectRoot, "src", "App.svelte");
let appFile = fs.readFileSync(appSveltePath, "utf8");
appFile = appFile.replace("<script>", '<script lang="ts">');
appFile = appFile.replace("export let name;", "export let name: string;");
fs.writeFileSync(appSveltePath, appFile);
// Edit rollup config
const rollupConfigPath = path.join(projectRoot, "rollup.config.js")
let rollupConfig = fs.readFileSync(rollupConfigPath, "utf8")
const rollupConfigPath = path.join(projectRoot, "rollup.config.js");
let rollupConfig = fs.readFileSync(rollupConfigPath, "utf8");
// Edit imports
rollupConfig = rollupConfig.replace(`'rollup-plugin-css-only';`, `'rollup-plugin-css-only';
rollupConfig = rollupConfig.replace(
`'rollup-plugin-css-only';`,
`'rollup-plugin-css-only';
import sveltePreprocess from 'svelte-preprocess';
import typescript from '@rollup/plugin-typescript';`)
import typescript from '@rollup/plugin-typescript';`,
);
// Replace name of entry point
rollupConfig = rollupConfig.replace(`'src/main.js'`, `'src/main.ts'`)
rollupConfig = rollupConfig.replace(`'src/main.js'`, `'src/main.ts'`);
// Add preprocessor
rollupConfig = rollupConfig.replace(
'compilerOptions:',
'preprocess: sveltePreprocess({ sourceMap: !production }),\n\t\t\tcompilerOptions:'
"compilerOptions:",
"preprocess: sveltePreprocess({ sourceMap: !production }),\n\t\t\tcompilerOptions:",
);
// Add TypeScript
rollupConfig = rollupConfig.replace(
'commonjs(),',
'commonjs(),\n\t\ttypescript({\n\t\t\tsourceMap: !production,\n\t\t\tinlineSources: !production\n\t\t}),'
"commonjs(),",
"commonjs(),\n\t\ttypescript({\n\t\t\tsourceMap: !production,\n\t\t\tinlineSources: !production\n\t\t}),",
);
fs.writeFileSync(rollupConfigPath, rollupConfig)
fs.writeFileSync(rollupConfigPath, rollupConfig);
// Add svelte.config.js
const tsconfig = `{
@@ -84,9 +92,9 @@ const tsconfig = `{
"include": ["src/**/*"],
"exclude": ["node_modules/*", "__sapper__/*", "public/*"]
}`
const tsconfigPath = path.join(projectRoot, "tsconfig.json")
fs.writeFileSync(tsconfigPath, tsconfig)
}`;
const tsconfigPath = path.join(projectRoot, "tsconfig.json");
fs.writeFileSync(tsconfigPath, tsconfig);
// Add TSConfig
const svelteConfig = `import sveltePreprocess from 'svelte-preprocess';
@@ -94,41 +102,46 @@ const svelteConfig = `import sveltePreprocess from 'svelte-preprocess';
export default {
preprocess: sveltePreprocess()
};
`
const svelteConfigPath = path.join(projectRoot, "svelte.config.js")
fs.writeFileSync(svelteConfigPath, svelteConfig)
`;
const svelteConfigPath = path.join(projectRoot, "svelte.config.js");
fs.writeFileSync(svelteConfigPath, svelteConfig);
// Add global.d.ts
const dtsPath = path.join(projectRoot, "src", "global.d.ts")
fs.writeFileSync(dtsPath, `/// <reference types="svelte" />`)
const dtsPath = path.join(projectRoot, "src", "global.d.ts");
fs.writeFileSync(dtsPath, `/// <reference types="svelte" />`);
// Delete this script, but not during testing
if (!argv[2]) {
// Remove the script
fs.unlinkSync(path.join(__filename))
fs.unlinkSync(path.join(__filename));
// Check for Mac's DS_store file, and if it's the only one left remove it
const remainingFiles = fs.readdirSync(path.join(__dirname))
if (remainingFiles.length === 1 && remainingFiles[0] === '.DS_store') {
fs.unlinkSync(path.join(__dirname, '.DS_store'))
const remainingFiles = fs.readdirSync(path.join(__dirname));
if (remainingFiles.length === 1 && remainingFiles[0] === ".DS_store") {
fs.unlinkSync(path.join(__dirname, ".DS_store"));
}
// Check if the scripts folder is empty
if (fs.readdirSync(path.join(__dirname)).length === 0) {
// Remove the scripts folder
fs.rmdirSync(path.join(__dirname))
fs.rmdirSync(path.join(__dirname));
}
}
// Adds the extension recommendation
fs.mkdirSync(path.join(projectRoot, ".vscode"), { recursive: true })
fs.writeFileSync(path.join(projectRoot, ".vscode", "extensions.json"), `{
fs.mkdirSync(path.join(projectRoot, ".vscode"), { recursive: true });
fs.writeFileSync(
path.join(projectRoot, ".vscode", "extensions.json"),
`{
"recommendations": ["svelte.svelte-vscode"]
}
`)
`,
);
console.log("Converted to TypeScript.")
console.log("Converted to TypeScript.");
if (fs.existsSync(path.join(projectRoot, "node_modules"))) {
console.log("\nYou will need to re-run your dependency manager to get started.")
console.log(
"\nYou will need to re-run your dependency manager to get started.",
);
}

View File

@@ -230,19 +230,27 @@
}
// Export functionality
async function exportAllEvents() {
if (!isLoggedIn || (userRole !== 'admin' && userRole !== 'owner')) {
alert('Admin or owner permission required');
async function exportEvents(pubkeys = []) {
if (!isLoggedIn) {
alert('Please log in first');
return;
}
// Check permissions for exporting all events
if (pubkeys.length === 0 && userRole !== 'admin' && userRole !== 'owner') {
alert('Admin or owner permission required to export all events');
return;
}
try {
const authHeader = await createNIP98AuthHeader('/api/export', 'GET');
const authHeader = await createNIP98AuthHeader('/api/export', 'POST');
const response = await fetch('/api/export', {
method: 'GET',
method: 'POST',
headers: {
'Authorization': authHeader
}
'Authorization': authHeader,
'Content-Type': 'application/json'
},
body: JSON.stringify({ pubkeys })
});
if (!response.ok) {
@@ -253,7 +261,18 @@
const url = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `all-events-${new Date().toISOString().slice(0, 19).replace(/:/g, '-')}.jsonl`;
// Get filename from response headers or use default
const contentDisposition = response.headers.get('Content-Disposition');
let filename = 'events.jsonl';
if (contentDisposition) {
const filenameMatch = contentDisposition.match(/filename="([^"]+)"/);
if (filenameMatch) {
filename = filenameMatch[1];
}
}
a.download = filename;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
@@ -264,38 +283,12 @@
}
}
async function exportAllEvents() {
await exportEvents([]); // Empty array means export all events
}
async function exportMyEvents() {
if (!isLoggedIn) {
alert('Please log in first');
return;
}
try {
const authHeader = await createNIP98AuthHeader('/api/export/mine', 'GET');
const response = await fetch('/api/export/mine', {
method: 'GET',
headers: {
'Authorization': authHeader
}
});
if (!response.ok) {
throw new Error(`Export failed: ${response.status} ${response.statusText}`);
}
const blob = await response.blob();
const url = window.URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `my-events-${new Date().toISOString().slice(0, 19).replace(/:/g, '-')}.jsonl`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
window.URL.revokeObjectURL(url);
} catch (error) {
console.error('Export failed:', error);
alert('Export failed: ' + error.message);
}
await exportEvents([userPubkey]); // Export only current user's events
}
// Import functionality

View File

@@ -1,11 +1,11 @@
// Default Nostr relays for searching
export const DEFAULT_RELAYS = [
'wss://relay.damus.io',
'wss://relay.nostr.band',
'wss://nos.lol',
'wss://relay.nostr.net',
'wss://relay.minibits.cash',
'wss://relay.coinos.io/',
'wss://nwc.primal.net',
'wss://relay.orly.dev',
];
"wss://relay.damus.io",
"wss://relay.nostr.band",
"wss://nos.lol",
"wss://relay.nostr.net",
"wss://relay.minibits.cash",
"wss://relay.coinos.io/",
"wss://nwc.primal.net",
"wss://relay.orly.dev",
];

View File

@@ -1,11 +1,11 @@
import App from './App.svelte';
import '../public/global.css';
import App from "./App.svelte";
import "../public/global.css";
const app = new App({
target: document.body,
props: {
name: 'world'
}
target: document.body,
props: {
name: "world",
},
});
export default app;
export default app;

View File

@@ -1,316 +1,359 @@
import { DEFAULT_RELAYS } from './constants.js';
import { DEFAULT_RELAYS } from "./constants.js";
// Simple WebSocket relay manager
class NostrClient {
constructor() {
this.relays = new Map();
this.subscriptions = new Map();
}
constructor() {
this.relays = new Map();
this.subscriptions = new Map();
}
async connect() {
console.log('Starting connection to', DEFAULT_RELAYS.length, 'relays...');
const connectionPromises = DEFAULT_RELAYS.map(relayUrl => {
return new Promise((resolve) => {
try {
console.log(`Attempting to connect to ${relayUrl}`);
const ws = new WebSocket(relayUrl);
ws.onopen = () => {
console.log(`✓ Successfully connected to ${relayUrl}`);
resolve(true);
};
ws.onerror = (error) => {
console.error(`✗ Error connecting to ${relayUrl}:`, error);
resolve(false);
};
ws.onclose = (event) => {
console.warn(`Connection closed to ${relayUrl}:`, event.code, event.reason);
};
ws.onmessage = (event) => {
console.log(`Message from ${relayUrl}:`, event.data);
try {
this.handleMessage(relayUrl, JSON.parse(event.data));
} catch (error) {
console.error(`Failed to parse message from ${relayUrl}:`, error, event.data);
}
};
this.relays.set(relayUrl, ws);
// Timeout after 5 seconds
setTimeout(() => {
if (ws.readyState !== WebSocket.OPEN) {
console.warn(`Connection timeout for ${relayUrl}`);
resolve(false);
}
}, 5000);
} catch (error) {
console.error(`Failed to create WebSocket for ${relayUrl}:`, error);
resolve(false);
}
});
});
const results = await Promise.all(connectionPromises);
const successfulConnections = results.filter(Boolean).length;
console.log(`Connected to ${successfulConnections}/${DEFAULT_RELAYS.length} relays`);
// Wait a bit more for connections to stabilize
await new Promise(resolve => setTimeout(resolve, 1000));
}
async connect() {
console.log("Starting connection to", DEFAULT_RELAYS.length, "relays...");
handleMessage(relayUrl, message) {
console.log(`Processing message from ${relayUrl}:`, message);
const [type, subscriptionId, event, ...rest] = message;
console.log(`Message type: ${type}, subscriptionId: ${subscriptionId}`);
if (type === 'EVENT') {
console.log(`Received EVENT for subscription ${subscriptionId}:`, event);
if (this.subscriptions.has(subscriptionId)) {
console.log(`Found callback for subscription ${subscriptionId}, executing...`);
const callback = this.subscriptions.get(subscriptionId);
callback(event);
} else {
console.warn(`No callback found for subscription ${subscriptionId}`);
const connectionPromises = DEFAULT_RELAYS.map((relayUrl) => {
return new Promise((resolve) => {
try {
console.log(`Attempting to connect to ${relayUrl}`);
const ws = new WebSocket(relayUrl);
ws.onopen = () => {
console.log(`✓ Successfully connected to ${relayUrl}`);
resolve(true);
};
ws.onerror = (error) => {
console.error(`✗ Error connecting to ${relayUrl}:`, error);
resolve(false);
};
ws.onclose = (event) => {
console.warn(
`Connection closed to ${relayUrl}:`,
event.code,
event.reason,
);
};
ws.onmessage = (event) => {
console.log(`Message from ${relayUrl}:`, event.data);
try {
this.handleMessage(relayUrl, JSON.parse(event.data));
} catch (error) {
console.error(
`Failed to parse message from ${relayUrl}:`,
error,
event.data,
);
}
} else if (type === 'EOSE') {
console.log(`End of stored events for subscription ${subscriptionId} from ${relayUrl}`);
} else if (type === 'NOTICE') {
console.warn(`Notice from ${relayUrl}:`, subscriptionId);
} else {
console.log(`Unknown message type ${type} from ${relayUrl}:`, message);
}
}
};
subscribe(filters, callback) {
const subscriptionId = Math.random().toString(36).substring(7);
console.log(`Creating subscription ${subscriptionId} with filters:`, filters);
this.subscriptions.set(subscriptionId, callback);
const subscription = ['REQ', subscriptionId, filters];
console.log(`Subscription message:`, JSON.stringify(subscription));
let sentCount = 0;
for (const [relayUrl, ws] of this.relays) {
console.log(`Checking relay ${relayUrl}, readyState: ${ws.readyState} (${ws.readyState === WebSocket.OPEN ? 'OPEN' : 'NOT OPEN'})`);
if (ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(subscription));
console.log(`✓ Sent subscription to ${relayUrl}`);
sentCount++;
} catch (error) {
console.error(`✗ Failed to send subscription to ${relayUrl}:`, error);
}
} else {
console.warn(`✗ Cannot send to ${relayUrl}, connection not ready`);
this.relays.set(relayUrl, ws);
// Timeout after 5 seconds
setTimeout(() => {
if (ws.readyState !== WebSocket.OPEN) {
console.warn(`Connection timeout for ${relayUrl}`);
resolve(false);
}
}, 5000);
} catch (error) {
console.error(`Failed to create WebSocket for ${relayUrl}:`, error);
resolve(false);
}
console.log(`Subscription ${subscriptionId} sent to ${sentCount}/${this.relays.size} relays`);
return subscriptionId;
});
});
const results = await Promise.all(connectionPromises);
const successfulConnections = results.filter(Boolean).length;
console.log(
`Connected to ${successfulConnections}/${DEFAULT_RELAYS.length} relays`,
);
// Wait a bit more for connections to stabilize
await new Promise((resolve) => setTimeout(resolve, 1000));
}
handleMessage(relayUrl, message) {
console.log(`Processing message from ${relayUrl}:`, message);
const [type, subscriptionId, event, ...rest] = message;
console.log(`Message type: ${type}, subscriptionId: ${subscriptionId}`);
if (type === "EVENT") {
console.log(`Received EVENT for subscription ${subscriptionId}:`, event);
if (this.subscriptions.has(subscriptionId)) {
console.log(
`Found callback for subscription ${subscriptionId}, executing...`,
);
const callback = this.subscriptions.get(subscriptionId);
callback(event);
} else {
console.warn(`No callback found for subscription ${subscriptionId}`);
}
} else if (type === "EOSE") {
console.log(
`End of stored events for subscription ${subscriptionId} from ${relayUrl}`,
);
} else if (type === "NOTICE") {
console.warn(`Notice from ${relayUrl}:`, subscriptionId);
} else {
console.log(`Unknown message type ${type} from ${relayUrl}:`, message);
}
}
subscribe(filters, callback) {
const subscriptionId = Math.random().toString(36).substring(7);
console.log(
`Creating subscription ${subscriptionId} with filters:`,
filters,
);
this.subscriptions.set(subscriptionId, callback);
const subscription = ["REQ", subscriptionId, filters];
console.log(`Subscription message:`, JSON.stringify(subscription));
let sentCount = 0;
for (const [relayUrl, ws] of this.relays) {
console.log(
`Checking relay ${relayUrl}, readyState: ${ws.readyState} (${ws.readyState === WebSocket.OPEN ? "OPEN" : "NOT OPEN"})`,
);
if (ws.readyState === WebSocket.OPEN) {
try {
ws.send(JSON.stringify(subscription));
console.log(`✓ Sent subscription to ${relayUrl}`);
sentCount++;
} catch (error) {
console.error(`✗ Failed to send subscription to ${relayUrl}:`, error);
}
} else {
console.warn(`✗ Cannot send to ${relayUrl}, connection not ready`);
}
}
unsubscribe(subscriptionId) {
this.subscriptions.delete(subscriptionId);
const closeMessage = ['CLOSE', subscriptionId];
for (const [relayUrl, ws] of this.relays) {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify(closeMessage));
}
}
}
console.log(
`Subscription ${subscriptionId} sent to ${sentCount}/${this.relays.size} relays`,
);
return subscriptionId;
}
disconnect() {
for (const [relayUrl, ws] of this.relays) {
ws.close();
}
this.relays.clear();
this.subscriptions.clear();
unsubscribe(subscriptionId) {
this.subscriptions.delete(subscriptionId);
const closeMessage = ["CLOSE", subscriptionId];
for (const [relayUrl, ws] of this.relays) {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify(closeMessage));
}
}
}
disconnect() {
for (const [relayUrl, ws] of this.relays) {
ws.close();
}
this.relays.clear();
this.subscriptions.clear();
}
}
// Create a global client instance
export const nostrClient = new NostrClient();
// IndexedDB helpers for caching events (kind 0 profiles)
const DB_NAME = 'nostrCache';
const DB_NAME = "nostrCache";
const DB_VERSION = 1;
const STORE_EVENTS = 'events';
const STORE_EVENTS = "events";
function openDB() {
return new Promise((resolve, reject) => {
try {
const req = indexedDB.open(DB_NAME, DB_VERSION);
req.onupgradeneeded = () => {
const db = req.result;
if (!db.objectStoreNames.contains(STORE_EVENTS)) {
const store = db.createObjectStore(STORE_EVENTS, { keyPath: 'id' });
store.createIndex('byKindAuthor', ['kind', 'pubkey'], { unique: false });
store.createIndex('byKindAuthorCreated', ['kind', 'pubkey', 'created_at'], { unique: false });
}
};
req.onsuccess = () => resolve(req.result);
req.onerror = () => reject(req.error);
} catch (e) {
reject(e);
return new Promise((resolve, reject) => {
try {
const req = indexedDB.open(DB_NAME, DB_VERSION);
req.onupgradeneeded = () => {
const db = req.result;
if (!db.objectStoreNames.contains(STORE_EVENTS)) {
const store = db.createObjectStore(STORE_EVENTS, { keyPath: "id" });
store.createIndex("byKindAuthor", ["kind", "pubkey"], {
unique: false,
});
store.createIndex(
"byKindAuthorCreated",
["kind", "pubkey", "created_at"],
{ unique: false },
);
}
});
};
req.onsuccess = () => resolve(req.result);
req.onerror = () => reject(req.error);
} catch (e) {
reject(e);
}
});
}
async function getLatestProfileEvent(pubkey) {
try {
const db = await openDB();
return await new Promise((resolve, reject) => {
const tx = db.transaction(STORE_EVENTS, 'readonly');
const idx = tx.objectStore(STORE_EVENTS).index('byKindAuthorCreated');
const range = IDBKeyRange.bound([0, pubkey, -Infinity], [0, pubkey, Infinity]);
const req = idx.openCursor(range, 'prev'); // newest first
req.onsuccess = () => {
const cursor = req.result;
resolve(cursor ? cursor.value : null);
};
req.onerror = () => reject(req.error);
});
} catch (e) {
console.warn('IDB getLatestProfileEvent failed', e);
return null;
}
try {
const db = await openDB();
return await new Promise((resolve, reject) => {
const tx = db.transaction(STORE_EVENTS, "readonly");
const idx = tx.objectStore(STORE_EVENTS).index("byKindAuthorCreated");
const range = IDBKeyRange.bound(
[0, pubkey, -Infinity],
[0, pubkey, Infinity],
);
const req = idx.openCursor(range, "prev"); // newest first
req.onsuccess = () => {
const cursor = req.result;
resolve(cursor ? cursor.value : null);
};
req.onerror = () => reject(req.error);
});
} catch (e) {
console.warn("IDB getLatestProfileEvent failed", e);
return null;
}
}
async function putEvent(event) {
try {
const db = await openDB();
await new Promise((resolve, reject) => {
const tx = db.transaction(STORE_EVENTS, 'readwrite');
tx.oncomplete = () => resolve();
tx.onerror = () => reject(tx.error);
tx.objectStore(STORE_EVENTS).put(event);
});
} catch (e) {
console.warn('IDB putEvent failed', e);
}
try {
const db = await openDB();
await new Promise((resolve, reject) => {
const tx = db.transaction(STORE_EVENTS, "readwrite");
tx.oncomplete = () => resolve();
tx.onerror = () => reject(tx.error);
tx.objectStore(STORE_EVENTS).put(event);
});
} catch (e) {
console.warn("IDB putEvent failed", e);
}
}
function parseProfileFromEvent(event) {
try {
const profile = JSON.parse(event.content || '{}');
return {
name: profile.name || profile.display_name || '',
picture: profile.picture || '',
banner: profile.banner || '',
about: profile.about || '',
nip05: profile.nip05 || '',
lud16: profile.lud16 || profile.lud06 || ''
};
} catch (e) {
return { name: '', picture: '', banner: '', about: '', nip05: '', lud16: '' };
}
try {
const profile = JSON.parse(event.content || "{}");
return {
name: profile.name || profile.display_name || "",
picture: profile.picture || "",
banner: profile.banner || "",
about: profile.about || "",
nip05: profile.nip05 || "",
lud16: profile.lud16 || profile.lud06 || "",
};
} catch (e) {
return {
name: "",
picture: "",
banner: "",
about: "",
nip05: "",
lud16: "",
};
}
}
// Fetch user profile metadata (kind 0)
export async function fetchUserProfile(pubkey) {
return new Promise(async (resolve, reject) => {
console.log(`Starting profile fetch for pubkey: ${pubkey}`);
return new Promise(async (resolve, reject) => {
console.log(`Starting profile fetch for pubkey: ${pubkey}`);
let resolved = false;
let newestEvent = null;
let debounceTimer = null;
let overallTimer = null;
let subscriptionId = null;
let resolved = false;
let newestEvent = null;
let debounceTimer = null;
let overallTimer = null;
let subscriptionId = null;
function cleanup() {
if (subscriptionId) {
try { nostrClient.unsubscribe(subscriptionId); } catch {}
}
if (debounceTimer) clearTimeout(debounceTimer);
if (overallTimer) clearTimeout(overallTimer);
}
// 1) Try cached profile first and resolve immediately if present
function cleanup() {
if (subscriptionId) {
try {
const cachedEvent = await getLatestProfileEvent(pubkey);
if (cachedEvent) {
console.log('Using cached profile event');
const profile = parseProfileFromEvent(cachedEvent);
resolved = true; // resolve immediately with cache
resolve(profile);
}
} catch (e) {
console.warn('Failed to load cached profile', e);
}
nostrClient.unsubscribe(subscriptionId);
} catch {}
}
if (debounceTimer) clearTimeout(debounceTimer);
if (overallTimer) clearTimeout(overallTimer);
}
// 2) Set overall timeout
overallTimer = setTimeout(() => {
if (!newestEvent) {
console.log('Profile fetch timeout reached');
if (!resolved) reject(new Error('Profile fetch timeout'));
} else if (!resolved) {
resolve(parseProfileFromEvent(newestEvent));
}
cleanup();
}, 15000);
// 1) Try cached profile first and resolve immediately if present
try {
const cachedEvent = await getLatestProfileEvent(pubkey);
if (cachedEvent) {
console.log("Using cached profile event");
const profile = parseProfileFromEvent(cachedEvent);
resolved = true; // resolve immediately with cache
resolve(profile);
}
} catch (e) {
console.warn("Failed to load cached profile", e);
}
// 3) Wait a bit to ensure connections are ready and then subscribe without limit
setTimeout(() => {
console.log('Starting subscription after connection delay...');
subscriptionId = nostrClient.subscribe(
{
kinds: [0],
authors: [pubkey]
},
(event) => {
// Collect all kind 0 events and pick the newest by created_at
if (!event || event.kind !== 0) return;
console.log('Profile event received:', event);
// 2) Set overall timeout
overallTimer = setTimeout(() => {
if (!newestEvent) {
console.log("Profile fetch timeout reached");
if (!resolved) reject(new Error("Profile fetch timeout"));
} else if (!resolved) {
resolve(parseProfileFromEvent(newestEvent));
}
cleanup();
}, 15000);
if (!newestEvent || (event.created_at || 0) > (newestEvent.created_at || 0)) {
newestEvent = event;
}
// 3) Wait a bit to ensure connections are ready and then subscribe without limit
setTimeout(() => {
console.log("Starting subscription after connection delay...");
subscriptionId = nostrClient.subscribe(
{
kinds: [0],
authors: [pubkey],
},
(event) => {
// Collect all kind 0 events and pick the newest by created_at
if (!event || event.kind !== 0) return;
console.log("Profile event received:", event);
// Debounce to wait for more relays; then finalize selection
if (debounceTimer) clearTimeout(debounceTimer);
debounceTimer = setTimeout(async () => {
try {
if (newestEvent) {
await putEvent(newestEvent); // cache newest only
const profile = parseProfileFromEvent(newestEvent);
if (
!newestEvent ||
(event.created_at || 0) > (newestEvent.created_at || 0)
) {
newestEvent = event;
}
// Notify listeners that an updated profile is available
try {
if (typeof window !== 'undefined' && window.dispatchEvent) {
window.dispatchEvent(new CustomEvent('profile-updated', {
detail: { pubkey, profile, event: newestEvent }
}));
}
} catch (e) {
console.warn('Failed to dispatch profile-updated event', e);
}
// Debounce to wait for more relays; then finalize selection
if (debounceTimer) clearTimeout(debounceTimer);
debounceTimer = setTimeout(async () => {
try {
if (newestEvent) {
await putEvent(newestEvent); // cache newest only
const profile = parseProfileFromEvent(newestEvent);
if (!resolved) {
resolve(profile);
resolved = true;
}
}
} finally {
cleanup();
}
}, 800);
// Notify listeners that an updated profile is available
try {
if (typeof window !== "undefined" && window.dispatchEvent) {
window.dispatchEvent(
new CustomEvent("profile-updated", {
detail: { pubkey, profile, event: newestEvent },
}),
);
}
} catch (e) {
console.warn("Failed to dispatch profile-updated event", e);
}
);
}, 2000);
});
if (!resolved) {
resolve(profile);
resolved = true;
}
}
} finally {
cleanup();
}
}, 800);
},
);
}, 2000);
});
}
// Initialize client connection
export async function initializeNostrClient() {
await nostrClient.connect();
}
await nostrClient.connect();
}

View File

@@ -54,6 +54,7 @@ cd cmd/benchmark
```
This will:
- Clone all external relay repositories
- Create Docker configurations for each relay
- Set up configuration files
@@ -68,6 +69,7 @@ docker compose up --build
```
The system will:
- Build and start all relay containers
- Wait for all relays to become healthy
- Run benchmarks against each relay sequentially
@@ -89,15 +91,15 @@ ls reports/run_YYYYMMDD_HHMMSS/
### Docker Compose Services
| Service | Port | Description |
|---------|------|-------------|
| next-orly | 8001 | This repository's BadgerDB relay |
| khatru-sqlite | 8002 | Khatru with SQLite backend |
| khatru-badger | 8003 | Khatru with Badger backend |
| relayer-basic | 8004 | Basic relayer example |
| strfry | 8005 | Strfry C++ LMDB relay |
| nostr-rs-relay | 8006 | Rust SQLite relay |
| benchmark-runner | - | Orchestrates tests and aggregates results |
| Service | Port | Description |
| ---------------- | ---- | ----------------------------------------- |
| next-orly | 8001 | This repository's BadgerDB relay |
| khatru-sqlite | 8002 | Khatru with SQLite backend |
| khatru-badger | 8003 | Khatru with Badger backend |
| relayer-basic | 8004 | Basic relayer example |
| strfry | 8005 | Strfry C++ LMDB relay |
| nostr-rs-relay | 8006 | Rust SQLite relay |
| benchmark-runner | - | Orchestrates tests and aggregates results |
### File Structure
@@ -130,16 +132,16 @@ The benchmark can be configured via environment variables in `docker-compose.yml
```yaml
environment:
- BENCHMARK_EVENTS=10000 # Number of events per test
- BENCHMARK_WORKERS=8 # Concurrent workers
- BENCHMARK_DURATION=60s # Test duration
- BENCHMARK_TARGETS=... # Relay endpoints to test
- BENCHMARK_EVENTS=10000 # Number of events per test
- BENCHMARK_WORKERS=8 # Concurrent workers
- BENCHMARK_DURATION=60s # Test duration
- BENCHMARK_TARGETS=... # Relay endpoints to test
```
### Custom Configuration
1. **Modify test parameters**: Edit environment variables in `docker-compose.yml`
2. **Add new relays**:
2. **Add new relays**:
- Add service to `docker-compose.yml`
- Create appropriate Dockerfile
- Update `BENCHMARK_TARGETS` environment variable
@@ -174,16 +176,19 @@ go build -o benchmark main.go
## Benchmark Results Interpretation
### Peak Throughput Test
- **High events/sec**: Good write performance
- **Low latency**: Efficient event processing
- **High success rate**: Stable under load
### Burst Pattern Test
### Burst Pattern Test
- **Consistent performance**: Good handling of variable loads
- **Low P95/P99 latency**: Predictable response times
- **No errors during bursts**: Robust queuing/buffering
### Mixed Read/Write Test
- **Balanced throughput**: Good concurrent operation handling
- **Low read latency**: Efficient query processing
- **Stable write performance**: Queries don't significantly impact writes
@@ -200,6 +205,7 @@ go build -o benchmark main.go
### Modifying Relay Configurations
Each relay's Dockerfile and configuration can be customized:
- **Resource limits**: Adjust memory/CPU limits in docker-compose.yml
- **Database settings**: Modify configuration files in `configs/`
- **Network settings**: Update port mappings and health checks
@@ -257,4 +263,4 @@ To add support for new relay implementations:
## License
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms.
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms.

View File

@@ -1,4 +1,4 @@
version: '3.8'
version: "3.8"
services:
# Next.orly.dev relay (this repository)
@@ -19,7 +19,11 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
test:
[
"CMD-SHELL",
"code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@@ -41,7 +45,11 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@@ -63,7 +71,11 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@@ -87,7 +99,11 @@ services:
postgres:
condition: service_healthy
healthcheck:
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@@ -108,7 +124,11 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null"]
test:
[
"CMD-SHELL",
"wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null",
]
interval: 30s
timeout: 10s
retries: 3
@@ -130,7 +150,15 @@ services:
networks:
- benchmark-net
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
test:
[
"CMD",
"wget",
"--quiet",
"--tries=1",
"--spider",
"http://localhost:8080",
]
interval: 30s
timeout: 10s
retries: 3
@@ -197,4 +225,4 @@ networks:
volumes:
benchmark-data:
driver: local
driver: local

View File

@@ -4,6 +4,7 @@
**Updated with real-world troubleshooting solutions and latest Orly relay improvements**
## 🎯 **What This Solves**
- WebSocket connection failures (`NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`)
- Nostr relay connectivity issues (`HTTP 426` instead of WebSocket upgrade)
- Docker container proxy configuration
@@ -16,6 +17,7 @@
## 🐳 **Step 1: Deploy Your Docker Application**
### **For Stella's Orly Relay (Latest Version with Proxy Improvements):**
```bash
# Pull and run the relay with enhanced proxy support
docker run -d \
@@ -39,6 +41,7 @@ curl -I http://127.0.0.1:7777
```
### **For Web Apps (like Jumble):**
```bash
# Run with fixed port for easier proxy setup
docker run -d \
@@ -61,34 +64,34 @@ curl -I http://127.0.0.1:3000
```apache
<VirtualHost *:443>
ServerName your-domain.com
# SSL Configuration (Let's Encrypt)
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
# Enable required modules first:
# sudo a2enmod proxy proxy_http proxy_wstunnel rewrite headers ssl
# Proxy settings
ProxyPreserveHost On
ProxyRequests Off
# WebSocket upgrade handling - CRITICAL for apps with WebSockets
RewriteEngine On
RewriteCond %{HTTP:Upgrade} websocket [NC]
RewriteCond %{HTTP:Connection} upgrade [NC]
RewriteRule ^/?(.*) "ws://127.0.0.1:PORT/$1" [P,L]
# Regular HTTP proxy
ProxyPass / http://127.0.0.1:PORT/
ProxyPassReverse / http://127.0.0.1:PORT/
# Headers for modern web apps
Header always set X-Forwarded-Proto "https"
Header always set X-Forwarded-Port "443"
Header always set X-Forwarded-For %{REMOTE_ADDR}s
# Security headers
Header always set Strict-Transport-Security "max-age=63072000; includeSubDomains"
Header always set X-Content-Type-Options nosniff
@@ -103,6 +106,7 @@ curl -I http://127.0.0.1:3000
```
**Then enable it:**
```bash
sudo a2ensite domain.conf
sudo systemctl reload apache2
@@ -121,6 +125,7 @@ sudo systemctl reload apache2
5. **In HTTPS section, add:**
**For Nostr Relay (port 7777):**
```apache
ProxyRequests Off
ProxyPreserveHost On
@@ -142,23 +147,23 @@ sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
ServerName your-domain.com
ServerAlias www.your-domain.com
ServerAlias ipv4.your-domain.com
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
DocumentRoot /var/www/relay
# For Nostr relay - proxy everything to WebSocket
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
# CORS headers
Header always set Access-Control-Allow-Origin "*"
Header always set Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
# Logging
ErrorLog /var/log/apache2/relay-error.log
CustomLog /var/log/apache2/relay-access.log combined
@@ -190,6 +195,7 @@ apache2ctl -M | grep -E "(proxy|rewrite)"
```
#### **For Web Apps (port 3000 or 32768):**
```apache
ProxyPreserveHost On
ProxyRequests Off
@@ -221,22 +227,22 @@ sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
ServerName your-domain.com
ServerAlias www.your-domain.com
ServerAlias ipv4.your-domain.com
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
DocumentRoot /var/www/relay
# For Nostr relay - proxy everything to WebSocket
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
# CORS headers
Header always set Access-Control-Allow-Origin "*"
# Logging
ErrorLog /var/log/apache2/relay-error.log
CustomLog /var/log/apache2/relay-access.log combined
@@ -269,6 +275,7 @@ sudo systemctl restart apache2
## 🆕 **Step 4: Latest Orly Relay Improvements**
### **Enhanced Proxy Support**
The latest Orly relay includes several proxy improvements:
1. **Flexible WebSocket Scheme Handling**: Accepts both `ws://` and `wss://` schemes for authentication
@@ -277,6 +284,7 @@ The latest Orly relay includes several proxy improvements:
4. **Proxy-Aware Logging**: Better debugging information for proxy setups
### **Key Environment Variables**
```bash
# Essential for proxy setups
ORLY_RELAY_URL=wss://your-domain.com # Must match your public URL
@@ -286,6 +294,7 @@ ORLY_SUBSCRIPTION_ENABLED=false # Disable payment requirements
```
### **Testing the Enhanced Relay**
```bash
# Test local connectivity
curl -I http://127.0.0.1:7777
@@ -338,32 +347,38 @@ After making changes:
## 🚨 **Real-World Troubleshooting Guide**
*Based on actual deployment experience with Plesk and WebSocket issues*
_Based on actual deployment experience with Plesk and WebSocket issues_
### **Critical Issues & Solutions:**
#### **🔴 HTTP 503 Service Unavailable**
- **Cause**: Docker container not running
- **Check**: `docker ps | grep relay`
- **Fix**: `docker start container-name`
#### **🔴 HTTP 426 Instead of WebSocket Upgrade**
- **Cause**: Apache using `http://` proxy instead of `ws://`
- **Fix**: Use `ProxyPass / ws://127.0.0.1:7777/` (not `http://`)
#### **🔴 Plesk Configuration Not Applied**
- **Symptom**: Config not in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
- **Solution**: Use Direct Apache Override method (bypass Plesk interface)
#### **🔴 Virtual Host Conflicts**
- **Check**: `apache2ctl -S | grep domain.com`
- **Fix**: Remove Plesk config: `sudo rm /etc/apache2/plesk.conf.d/vhosts/domain.conf`
#### **🔴 Nginx Intercepting (Plesk)**
- **Symptom**: Response shows `Server: nginx`
- **Fix**: Disable nginx in Plesk settings
### **Debug Commands:**
```bash
# Essential debugging
docker ps | grep relay # Container running?
@@ -383,9 +398,11 @@ docker logs relay-name | grep -i "websocket connection"
## 🚨 **Latest Troubleshooting Solutions**
### **WebSocket Scheme Validation Errors**
**Problem**: `"HTTP Scheme incorrect: expected 'ws' got 'wss'"`
**Solution**: Use the latest Orly relay image with enhanced proxy support:
```bash
# Pull the latest image with proxy improvements
docker pull silberengel/next-orly:latest
@@ -396,17 +413,21 @@ docker stop orly-relay && docker rm orly-relay
```
### **Malformed Client Data Errors**
**Problem**: `"invalid hex array size, got 2 expect 64"`
**Solution**: These are client-side issues, not server problems. The latest relay handles them gracefully:
- The relay now sends helpful error messages to clients
- Malformed requests are logged but don't crash the relay
- Normal operations continue despite client errors
### **Follows ACL Not Working**
**Problem**: Only owners can write, admins can't write
**Solution**: Ensure proper configuration:
```bash
# Check ACL configuration
docker exec orly-relay env | grep ACL
@@ -416,9 +437,11 @@ docker exec orly-relay env | grep ACL
```
### **Spider Not Syncing Content**
**Problem**: Spider enabled but not pulling events
**Solution**: Check for relay lists and follow events:
```bash
# Check spider status
docker logs orly-relay | grep -i spider
@@ -431,6 +454,7 @@ docker logs orly-relay | grep -i "kind.*3"
```
### **Working Solution (Proven):**
```apache
<VirtualHost SERVER_IP:443>
ServerName domain.com
@@ -438,20 +462,21 @@ docker logs orly-relay | grep -i "kind.*3"
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
DocumentRoot /var/www/relay
# Direct WebSocket proxy - this is the key!
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
Header always set Access-Control-Allow-Origin "*"
</VirtualHost>
```
---
**Key Lessons**:
**Key Lessons**:
1. Plesk interface often fails to apply Apache directives
2. Use `ws://` proxy for Nostr relays, not `http://`
3. Direct Apache config files are more reliable than Plesk interface
@@ -464,17 +489,20 @@ docker logs orly-relay | grep -i "kind.*3"
## 🎉 **Summary of Latest Improvements**
### **Enhanced Proxy Support**
- ✅ Flexible WebSocket scheme validation (accepts both `ws://` and `wss://`)
- ✅ Enhanced CORS headers for better web app compatibility
- ✅ Improved error handling for malformed client data
- ✅ Proxy-aware logging for better debugging
### **Spider and ACL Features**
- ✅ Follows-based access control (`ORLY_ACL_MODE=follows`)
- ✅ Content syncing from other relays (`ORLY_SPIDER_MODE=follows`)
- ✅ No payment requirements (`ORLY_SUBSCRIPTION_ENABLED=false`)
### **Production Ready**
- ✅ Robust error handling
- ✅ Enhanced logging and debugging
- ✅ Better client compatibility

View File

@@ -37,6 +37,7 @@ cp env.example .env
```
Key settings:
- `ORLY_OWNERS`: Owner npubs (comma-separated, full control)
- `ORLY_ADMINS`: Admin npubs (comma-separated, deletion permissions)
- `ORLY_PORT`: Port to listen on (default: 7777)
@@ -50,6 +51,7 @@ The relay data is stored in `./data` directory which is mounted as a volume.
### Performance Tuning
Based on the v0.4.8 optimizations:
- Concurrent event publishing using all CPU cores
- Optimized BadgerDB access patterns
- Configurable batch sizes and cache settings
@@ -105,12 +107,14 @@ go run ./cmd/stresstest -relay ws://localhost:7777
### Common Issues (Real-World Experience)
#### **Container Issues:**
1. **Port already in use**: Change `ORLY_PORT` in docker-compose.yml
2. **Permission denied**: Ensure `./data` directory is writable
3. **Container won't start**: Check logs with `docker logs container-name`
#### **WebSocket Issues:**
4. **HTTP 426 instead of WebSocket upgrade**:
4. **HTTP 426 instead of WebSocket upgrade**:
- Use `ws://127.0.0.1:7777` in proxy config, not `http://`
- Ensure `proxy_wstunnel` module is enabled
5. **Connection refused in browser but works with websocat**:
@@ -119,6 +123,7 @@ go run ./cmd/stresstest -relay ws://localhost:7777
- Add CORS headers to Apache/nginx config
#### **Plesk-Specific Issues:**
6. **Plesk not applying Apache directives**:
- Check if config appears in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
- Use direct Apache override if Plesk interface fails
@@ -127,6 +132,7 @@ go run ./cmd/stresstest -relay ws://localhost:7777
- Remove conflicting Plesk configs if needed
#### **SSL Certificate Issues:**
8. **Self-signed certificate after Let's Encrypt**:
- Plesk might not be using the correct certificate
- Import Let's Encrypt certs into Plesk or use direct Apache config
@@ -166,23 +172,24 @@ sudo tail -f /var/log/apache2/domain-error.log
### Working Reverse Proxy Config
**For Apache (direct config file):**
```apache
<VirtualHost SERVER_IP:443>
ServerName domain.com
SSLEngine on
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
# Direct WebSocket proxy for Nostr relay
ProxyRequests Off
ProxyPreserveHost On
ProxyPass / ws://127.0.0.1:7777/
ProxyPassReverse / ws://127.0.0.1:7777/
Header always set Access-Control-Allow-Origin "*"
</VirtualHost>
```
---
*Crafted for Stella's digital forest* 🌲
_Crafted for Stella's digital forest_ 🌲

View File

@@ -1,26 +1,28 @@
# Service Worker Certificate Caching Fix
## 🚨 **Problem**
When accessing Jumble from the ImWald landing page, the service worker serves a cached self-signed certificate instead of the new Let's Encrypt certificate.
## ⚡ **Solutions**
### **Option 1: Force Service Worker Update**
Add this to your Jumble app's service worker or main JavaScript:
```javascript
// Force service worker update and certificate refresh
if ('serviceWorker' in navigator) {
navigator.serviceWorker.getRegistrations().then(function(registrations) {
for(let registration of registrations) {
if ("serviceWorker" in navigator) {
navigator.serviceWorker.getRegistrations().then(function (registrations) {
for (let registration of registrations) {
registration.update(); // Force update
}
});
}
// Clear all caches on certificate update
if ('caches' in window) {
caches.keys().then(function(names) {
if ("caches" in window) {
caches.keys().then(function (names) {
for (let name of names) {
caches.delete(name);
}
@@ -29,49 +31,52 @@ if ('caches' in window) {
```
### **Option 2: Update Service Worker Cache Strategy**
In your service worker file, add cache busting for SSL-sensitive requests:
```javascript
// In your service worker
self.addEventListener('fetch', function(event) {
self.addEventListener("fetch", function (event) {
// Don't cache HTTPS requests that might have certificate issues
if (event.request.url.startsWith('https://') &&
event.request.url.includes('imwald.eu')) {
event.respondWith(
fetch(event.request, { cache: 'no-store' })
);
if (
event.request.url.startsWith("https://") &&
event.request.url.includes("imwald.eu")
) {
event.respondWith(fetch(event.request, { cache: "no-store" }));
return;
}
// Your existing fetch handling...
});
```
### **Option 3: Version Your Service Worker**
Update your service worker with a new version number:
```javascript
// At the top of your service worker
const CACHE_VERSION = 'v2.0.1'; // Increment this when certificates change
const CACHE_VERSION = "v2.0.1"; // Increment this when certificates change
const CACHE_NAME = `jumble-cache-${CACHE_VERSION}`;
// Clear old caches
self.addEventListener('activate', function(event) {
self.addEventListener("activate", function (event) {
event.waitUntil(
caches.keys().then(function(cacheNames) {
caches.keys().then(function (cacheNames) {
return Promise.all(
cacheNames.map(function(cacheName) {
cacheNames.map(function (cacheName) {
if (cacheName !== CACHE_NAME) {
return caches.delete(cacheName);
}
})
}),
);
})
}),
);
});
```
### **Option 4: Add Cache Headers**
In your Plesk Apache config for Jumble, add:
```apache

View File

@@ -1,11 +1,13 @@
# WebSocket Connection Debug Guide
## 🚨 **Current Issue**
`wss://orly-relay.imwald.eu/` returns `NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`
## 🔍 **Debug Steps**
### **Step 1: Verify Relay is Running**
```bash
# On your server
curl -I http://127.0.0.1:7777
@@ -16,6 +18,7 @@ docker ps | grep stella
```
### **Step 2: Test Apache Modules**
```bash
# Check if WebSocket modules are enabled
apache2ctl -M | grep -E "(proxy|rewrite)"
@@ -30,6 +33,7 @@ sudo systemctl restart apache2
```
### **Step 3: Check Apache Configuration**
```bash
# Check what Plesk generated
sudo cat /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
@@ -39,6 +43,7 @@ grep -E "(Proxy|Rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.
```
### **Step 4: Test Direct WebSocket Connection**
```bash
# Test if the issue is Apache or the relay itself
echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/
@@ -48,6 +53,7 @@ echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/
```
### **Step 5: Check Apache Error Logs**
```bash
# Watch Apache errors in real-time
sudo tail -f /var/log/apache2/error.log
@@ -83,6 +89,7 @@ ProxyAddHeaders On
```
### **Alternative Simpler Version:**
If the above doesn't work, try just:
```apache

View File

@@ -4,9 +4,9 @@
services:
orly-relay:
build:
context: ../..
context: ../..
dockerfile: Dockerfile
image: silberengel/next-orly:latest
image: silberengel/next-orly:latest
container_name: orly-relay
restart: unless-stopped
ports:
@@ -23,40 +23,40 @@ services:
- ORLY_DB_LOG_LEVEL=error
- ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
- ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1m4ny6hjqzepn4rxknuq94c2gpqzr29ufkkw7ttcxyak7v43n6vvsajc2jl,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
# ACL and Spider Configuration
- ORLY_ACL_MODE=follows
- ORLY_SPIDER_MODE=follows
# Bootstrap relay URLs for initial sync
- ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.nostr.band,wss://relay.damus.io
# Subscription Settings (optional)
- ORLY_SUBSCRIPTION_ENABLED=false
- ORLY_MONTHLY_PRICE_SATS=0
# Performance Settings
- ORLY_MAX_CONNECTIONS=1000
- ORLY_MAX_EVENT_SIZE=65536
- ORLY_MAX_SUBSCRIPTIONS=20
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:7777"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
# Resource limits
deploy:
resources:
limits:
memory: 1G
cpus: '1.0'
cpus: "1.0"
reservations:
memory: 256M
cpus: '0.25'
cpus: "0.25"
# Logging configuration
logging:
driver: "json-file"
@@ -79,7 +79,7 @@ services:
depends_on:
- orly-relay
profiles:
- proxy # Only start with: docker-compose --profile proxy up
- proxy # Only start with: docker-compose --profile proxy up
volumes:
relay_data:

View File

@@ -10,12 +10,14 @@ This document compares how two Nostr relay implementations handle WebSocket conn
## Architecture Comparison
### Khatru Architecture
- **Monolithic approach**: Single large `HandleWebsocket` method (~380 lines) processes all message types
- **Inline processing**: REQ handling is embedded within the main websocket handler
- **Hook-based extensibility**: Uses function slices for customizable behavior
- **Simple structure**: WebSocket struct with basic fields and mutex for thread safety
### Next.orly.dev Architecture
### Next.orly.dev Architecture
- **Modular approach**: Separate methods for each message type (`HandleReq`, `HandleEvent`, etc.)
- **Layered processing**: Message identification → envelope parsing → type-specific handling
- **Publisher-subscriber system**: Dedicated infrastructure for subscription management
@@ -24,6 +26,7 @@ This document compares how two Nostr relay implementations handle WebSocket conn
## Connection Establishment
### Khatru
```go
// Simple websocket upgrade
conn, err := rl.upgrader.Upgrade(w, r, nil)
@@ -36,6 +39,7 @@ ws := &WebSocket{
```
### Next.orly.dev
```go
// More sophisticated setup with IP whitelisting
conn, err = websocket.Accept(w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}})
@@ -50,6 +54,7 @@ listener := &Listener{
```
**Key Differences:**
- Next.orly.dev includes IP whitelisting and immediate authentication challenges
- Khatru uses fasthttp/websocket library vs next.orly.dev using coder/websocket
- Next.orly.dev has more detailed connection state tracking
@@ -57,11 +62,13 @@ listener := &Listener{
## Message Processing
### Khatru
- Uses `nostr.MessageParser` for sequential parsing
- Switch statement on envelope type within goroutine
- Direct processing without intermediate validation layers
### Next.orly.dev
- Custom envelope identification system (`envelopes.Identify`)
- Separate validation and processing phases
- Extensive logging and error handling at each step
@@ -69,11 +76,12 @@ listener := &Listener{
## REQ Message Handling
### Khatru REQ Processing
```go
case *nostr.ReqEnvelope:
eose := sync.WaitGroup{}
eose.Add(len(env.Filters))
// Handle each filter separately
for _, filter := range env.Filters {
err := srl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter)
@@ -85,7 +93,7 @@ case *nostr.ReqEnvelope:
rl.addListener(ws, env.SubscriptionID, srl, filter, cancelReqCtx)
}
}
go func() {
eose.Wait()
ws.WriteJSON(nostr.EOSEEnvelope(env.SubscriptionID))
@@ -93,6 +101,7 @@ case *nostr.ReqEnvelope:
```
### Next.orly.dev REQ Processing
```go
// Comprehensive ACL and authentication checks first
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
@@ -117,12 +126,14 @@ for _, f := range *env.Filters {
### 1. **Filter Processing Strategy**
**Khatru:**
- Processes each filter independently and concurrently
- Uses WaitGroup to coordinate EOSE across all filters
- Immediately sets up listeners for ongoing subscriptions
- Fails entire subscription if any filter is rejected
**Next.orly.dev:**
- Processes all filters sequentially in a single context
- Collects all events before applying access control
- Only sets up subscriptions for filters that need ongoing updates
@@ -131,11 +142,13 @@ for _, f := range *env.Filters {
### 2. **Access Control Integration**
**Khatru:**
- Basic NIP-42 authentication support
- Hook-based authorization via `RejectFilter` functions
- Limited built-in access control features
**Next.orly.dev:**
- Comprehensive ACL system with multiple access levels
- Built-in support for private events with npub authorization
- Privileged event filtering based on pubkey and p-tags
@@ -144,6 +157,7 @@ for _, f := range *env.Filters {
### 3. **Subscription Management**
**Khatru:**
```go
// Simple listener registration
type listenerSpec struct {
@@ -155,6 +169,7 @@ rl.addListener(ws, subscriptionID, relay, filter, cancel)
```
**Next.orly.dev:**
```go
// Publisher-subscriber system with rich metadata
type W struct {
@@ -171,11 +186,13 @@ l.publishers.Receive(&W{...})
### 4. **Performance Optimizations**
**Khatru:**
- Concurrent filter processing
- Immediate streaming of events as they're found
- Memory-efficient with direct event streaming
**Next.orly.dev:**
- Batch processing with deduplication
- Memory management with explicit `ev.Free()` calls
- Smart subscription cancellation for ID-only queries
@@ -184,11 +201,13 @@ l.publishers.Receive(&W{...})
### 5. **Error Handling & Observability**
**Khatru:**
- Basic error logging
- Simple connection state management
- Limited metrics and observability
**Next.orly.dev:**
- Comprehensive error handling with context preservation
- Detailed logging at each processing stage
- Built-in metrics (message count, REQ count, event count)
@@ -197,11 +216,13 @@ l.publishers.Receive(&W{...})
## Memory Management
### Khatru
- Relies on Go's garbage collector
- Simple WebSocket struct with minimal state
- Uses sync.Map for thread-safe operations
### Next.orly.dev
- Explicit memory management with `ev.Free()` calls
- Resource pooling and reuse patterns
- Detailed tracking of connection resources
@@ -209,11 +230,13 @@ l.publishers.Receive(&W{...})
## Concurrency Models
### Khatru
- Per-connection goroutine for message reading
- Additional goroutines for each message processing
- WaitGroup coordination for multi-filter EOSE
### Next.orly.dev
- Per-connection goroutine with single-threaded message processing
- Publisher-subscriber system handles concurrent event distribution
- Context-based cancellation throughout
@@ -221,18 +244,21 @@ l.publishers.Receive(&W{...})
## Trade-offs Analysis
### Khatru Advantages
- **Simplicity**: Easier to understand and modify
- **Performance**: Lower latency due to concurrent processing
- **Flexibility**: Hook-based architecture allows extensive customization
- **Streaming**: Events sent as soon as they're found
### Khatru Disadvantages
- **Monolithic**: Large methods harder to maintain
- **Limited ACL**: Basic authentication and authorization
- **Error handling**: Less graceful failure recovery
- **Resource usage**: No explicit memory management
### Next.orly.dev Advantages
- **Security**: Comprehensive ACL and privacy features
- **Observability**: Extensive logging and metrics
- **Resource management**: Explicit memory and connection lifecycle management
@@ -240,6 +266,7 @@ l.publishers.Receive(&W{...})
- **Robustness**: Graceful handling of edge cases and failures
### Next.orly.dev Disadvantages
- **Complexity**: Higher cognitive overhead and learning curve
- **Latency**: Sequential processing may be slower for some use cases
- **Resource overhead**: More memory usage due to batching and state tracking
@@ -253,7 +280,8 @@ Both implementations represent different philosophies:
- **Next.orly.dev** prioritizes security, observability, and robustness through comprehensive built-in features
The choice between them depends on specific requirements:
- Choose **Khatru** for high-performance relays with custom business logic
- Choose **Next.orly.dev** for production relays requiring comprehensive access control and monitoring
Both approaches demonstrate mature understanding of Nostr protocol requirements while making different trade-offs in complexity vs. features.
Both approaches demonstrate mature understanding of Nostr protocol requirements while making different trade-offs in complexity vs. features.

View File

@@ -1,5 +1,4 @@
realy.lol/pkg/ec
=====
# realy.lol/pkg/ec
This is a full drop-in replacement for
[github.com/btcsuite/btcd/btcec](https://github.com/btcsuite/btcd/tree/master/btcec)
@@ -20,7 +19,7 @@ message signing with the extra test vectors present and passing.
The remainder of this document is from the original README.md.
------------------------------------------------------------------------------
---
Package `ec` implements elliptic curve cryptography needed for working with
Bitcoin. It is designed so that it may be used with the standard

View File

@@ -1,8 +1,6 @@
chainhash
=========
# chainhash
[![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
=======
# [![ISC License](http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
chainhash provides a generic hash type and associated functions that allows the
specific hash algorithm to be abstracted.

View File

@@ -1,5 +1,4 @@
ecdsa
=====
# ecdsa
[![ISC License](https://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org)
[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/mleku.online/git/ec/secp/ecdsa)

View File

@@ -14,45 +14,25 @@
],
"valid_test_cases": [
{
"key_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"expected": "90539EEDE565F5D054F32CC0C220126889ED1E5D193BAF15AEF344FE59D4610C"
},
{
"key_indices": [
2,
1,
0
],
"key_indices": [2, 1, 0],
"expected": "6204DE8B083426DC6EAF9502D27024D53FC826BF7D2012148A0575435DF54B2B"
},
{
"key_indices": [
0,
0,
0
],
"key_indices": [0, 0, 0],
"expected": "B436E3BAD62B8CD409969A224731C193D051162D8C5AE8B109306127DA3AA935"
},
{
"key_indices": [
0,
0,
1,
1
],
"key_indices": [0, 0, 1, 1],
"expected": "69BC22BFA5D106306E48A20679DE1D7389386124D07571D0D872686028C26A3E"
}
],
"error_test_cases": [
{
"key_indices": [
0,
3
],
"key_indices": [0, 3],
"tweak_indices": [],
"is_xonly": [],
"error": {
@@ -63,10 +43,7 @@
"comment": "Invalid public key"
},
{
"key_indices": [
0,
4
],
"key_indices": [0, 4],
"tweak_indices": [],
"is_xonly": [],
"error": {
@@ -77,10 +54,7 @@
"comment": "Public key exceeds field size"
},
{
"key_indices": [
5,
0
],
"key_indices": [5, 0],
"tweak_indices": [],
"is_xonly": [],
"error": {
@@ -91,16 +65,9 @@
"comment": "First byte of public key is not 2 or 3"
},
{
"key_indices": [
0,
1
],
"tweak_indices": [
0
],
"is_xonly": [
true
],
"key_indices": [0, 1],
"tweak_indices": [0],
"is_xonly": [true],
"error": {
"type": "value",
"message": "The tweak must be less than n."
@@ -108,15 +75,9 @@
"comment": "Tweak is out of range"
},
{
"key_indices": [
6
],
"tweak_indices": [
1
],
"is_xonly": [
false
],
"key_indices": [6],
"tweak_indices": [1],
"is_xonly": [false],
"error": {
"type": "value",
"message": "The result of tweaking cannot be infinity."

View File

@@ -10,27 +10,18 @@
],
"valid_test_cases": [
{
"pnonce_indices": [
0,
1
],
"pnonce_indices": [0, 1],
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B024725377345BDE0E9C33AF3C43C0A29A9249F2F2956FA8CFEB55C8573D0262DC8"
},
{
"pnonce_indices": [
2,
3
],
"pnonce_indices": [2, 3],
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B000000000000000000000000000000000000000000000000000000000000000000",
"comment": "Sum of second points encoded in the nonces is point at infinity which is serialized as 33 zero bytes"
}
],
"error_test_cases": [
{
"pnonce_indices": [
0,
4
],
"pnonce_indices": [0, 4],
"error": {
"type": "invalid_contribution",
"signer": 1,
@@ -40,10 +31,7 @@
"btcec_err": "invalid public key: unsupported format: 4"
},
{
"pnonce_indices": [
5,
1
],
"pnonce_indices": [5, 1],
"error": {
"type": "invalid_contribution",
"signer": 0,
@@ -53,10 +41,7 @@
"btcec_err": "invalid public key: x coordinate 48c264cdd57d3c24d79990b0f865674eb62a0f9018277a95011b41bfc193b831 is not on the secp256k1 curve"
},
{
"pnonce_indices": [
6,
1
],
"pnonce_indices": [6, 1],
"error": {
"type": "invalid_contribution",
"signer": 0,

View File

@@ -37,4 +37,4 @@
"expected": "890E83616A3BC4640AB9B6374F21C81FF89CDDDBAFAA7475AE2A102A92E3EDB29FD7E874E23342813A60D9646948242646B7951CA046B4B36D7D6078506D3C9402F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9"
}
]
}
}

View File

@@ -33,114 +33,49 @@
"valid_test_cases": [
{
"aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B",
"nonce_indices": [
0,
1
],
"key_indices": [
0,
1
],
"nonce_indices": [0, 1],
"key_indices": [0, 1],
"tweak_indices": [],
"is_xonly": [],
"psig_indices": [
0,
1
],
"psig_indices": [0, 1],
"expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E"
},
{
"aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20",
"nonce_indices": [
0,
2
],
"key_indices": [
0,
2
],
"nonce_indices": [0, 2],
"key_indices": [0, 2],
"tweak_indices": [],
"is_xonly": [],
"psig_indices": [
2,
3
],
"psig_indices": [2, 3],
"expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9"
},
{
"aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D",
"nonce_indices": [
0,
3
],
"key_indices": [
0,
2
],
"tweak_indices": [
0
],
"is_xonly": [
false
],
"psig_indices": [
4,
5
],
"nonce_indices": [0, 3],
"key_indices": [0, 2],
"tweak_indices": [0],
"is_xonly": [false],
"psig_indices": [4, 5],
"expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC"
},
{
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
"nonce_indices": [
0,
4
],
"key_indices": [
0,
3
],
"tweak_indices": [
0,
1,
2
],
"is_xonly": [
true,
false,
true
],
"psig_indices": [
6,
7
],
"nonce_indices": [0, 4],
"key_indices": [0, 3],
"tweak_indices": [0, 1, 2],
"is_xonly": [true, false, true],
"psig_indices": [6, 7],
"expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E"
}
],
"error_test_cases": [
{
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
"nonce_indices": [
0,
4
],
"key_indices": [
0,
3
],
"tweak_indices": [
0,
1,
2
],
"is_xonly": [
true,
false,
true
],
"psig_indices": [
7,
8
],
"nonce_indices": [0, 4],
"key_indices": [0, 3],
"tweak_indices": [0, 1, 2],
"is_xonly": [true, false, true],
"psig_indices": [7, 8],
"error": {
"type": "invalid_contribution",
"signer": 1
@@ -148,4 +83,4 @@
"comment": "Partial signature is invalid because it exceeds group size"
}
]
}
}

View File

@@ -31,62 +31,32 @@
],
"valid_test_cases": [
{
"key_indices": [
0,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [0, 1, 2],
"aggnonce_index": 0,
"msg_index": 0,
"signer_index": 0,
"expected": "012ABBCB52B3016AC03AD82395A1A415C48B93DEF78718E62A7A90052FE224FB"
},
{
"key_indices": [
1,
0,
2
],
"nonce_indices": [
1,
0,
2
],
"key_indices": [1, 0, 2],
"nonce_indices": [1, 0, 2],
"aggnonce_index": 0,
"msg_index": 0,
"signer_index": 1,
"expected": "9FF2F7AAA856150CC8819254218D3ADEEB0535269051897724F9DB3789513A52"
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"aggnonce_index": 0,
"msg_index": 0,
"signer_index": 2,
"expected": "FA23C359F6FAC4E7796BB93BC9F0532A95468C539BA20FF86D7C76ED92227900"
},
{
"key_indices": [
0,
1
],
"nonce_indices": [
0,
3
],
"key_indices": [0, 1],
"nonce_indices": [0, 3],
"aggnonce_index": 1,
"msg_index": 0,
"signer_index": 0,
@@ -96,10 +66,7 @@
],
"sign_error_test_cases": [
{
"key_indices": [
1,
2
],
"key_indices": [1, 2],
"aggnonce_index": 0,
"msg_index": 0,
"secnonce_index": 0,
@@ -110,11 +77,7 @@
"comment": "The signers pubkey is not in the list of pubkeys"
},
{
"key_indices": [
1,
0,
3
],
"key_indices": [1, 0, 3],
"aggnonce_index": 0,
"msg_index": 0,
"secnonce_index": 0,
@@ -126,11 +89,7 @@
"comment": "Signer 2 provided an invalid public key"
},
{
"key_indices": [
1,
2,
0
],
"key_indices": [1, 2, 0],
"aggnonce_index": 2,
"msg_index": 0,
"secnonce_index": 0,
@@ -142,11 +101,7 @@
"comment": "Aggregate nonce is invalid due wrong tag, 0x04, in the first half"
},
{
"key_indices": [
1,
2,
0
],
"key_indices": [1, 2, 0],
"aggnonce_index": 3,
"msg_index": 0,
"secnonce_index": 0,
@@ -158,11 +113,7 @@
"comment": "Aggregate nonce is invalid because the second half does not correspond to an X coordinate"
},
{
"key_indices": [
1,
2,
0
],
"key_indices": [1, 2, 0],
"aggnonce_index": 4,
"msg_index": 0,
"secnonce_index": 0,
@@ -174,11 +125,7 @@
"comment": "Aggregate nonce is invalid because second half exceeds field size"
},
{
"key_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"aggnonce_index": 0,
"msg_index": 0,
"signer_index": 0,
@@ -193,48 +140,24 @@
"verify_fail_test_cases": [
{
"sig": "97AC833ADCB1AFA42EBF9E0725616F3C9A0D5B614F6FE283CEAAA37A8FFAF406",
"key_indices": [
0,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [0, 1, 2],
"msg_index": 0,
"signer_index": 0,
"comment": "Wrong signature (which is equal to the negation of valid signature)"
},
{
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
"key_indices": [
0,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [0, 1, 2],
"msg_index": 0,
"signer_index": 1,
"comment": "Wrong signer"
},
{
"sig": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
"key_indices": [
0,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [0, 1, 2],
"msg_index": 0,
"signer_index": 0,
"comment": "Signature exceeds group size"
@@ -243,16 +166,8 @@
"verify_error_test_cases": [
{
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
"key_indices": [
0,
1,
2
],
"nonce_indices": [
4,
1,
2
],
"key_indices": [0, 1, 2],
"nonce_indices": [4, 1, 2],
"msg_index": 0,
"signer_index": 0,
"error": {
@@ -264,16 +179,8 @@
},
{
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
"key_indices": [
3,
1,
2
],
"nonce_indices": [
0,
1,
2
],
"key_indices": [3, 1, 2],
"nonce_indices": [0, 1, 2],
"msg_index": 0,
"signer_index": 0,
"error": {

View File

@@ -22,120 +22,46 @@
"msg": "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
"valid_test_cases": [
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0
],
"is_xonly": [
true
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0],
"is_xonly": [true],
"signer_index": 2,
"expected": "E28A5C66E61E178C2BA19DB77B6CF9F7E2F0F56C17918CD13135E60CC848FE91",
"comment": "A single x-only tweak"
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0
],
"is_xonly": [
false
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0],
"is_xonly": [false],
"signer_index": 2,
"expected": "38B0767798252F21BF5702C48028B095428320F73A4B14DB1E25DE58543D2D2D",
"comment": "A single plain tweak"
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0,
1
],
"is_xonly": [
false,
true
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0, 1],
"is_xonly": [false, true],
"signer_index": 2,
"expected": "408A0A21C4A0F5DACAF9646AD6EB6FECD7F7A11F03ED1F48DFFF2185BC2C2408",
"comment": "A plain tweak followed by an x-only tweak"
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0,
1,
2,
3
],
"is_xonly": [
false,
false,
true,
true
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0, 1, 2, 3],
"is_xonly": [false, false, true, true],
"signer_index": 2,
"expected": "45ABD206E61E3DF2EC9E264A6FEC8292141A633C28586388235541F9ADE75435",
"comment": "Four tweaks: plain, plain, x-only, x-only."
},
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
0,
1,
2,
3
],
"is_xonly": [
true,
false,
true,
false
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [0, 1, 2, 3],
"is_xonly": [true, false, true, false],
"signer_index": 2,
"expected": "B255FDCAC27B40C7CE7848E2D3B7BF5EA0ED756DA81565AC804CCCA3E1D5D239",
"comment": "Four tweaks: x-only, plain, x-only, plain. If an implementation prohibits applying plain tweaks after x-only tweaks, it can skip this test vector or return an error."
@@ -143,22 +69,10 @@
],
"error_test_cases": [
{
"key_indices": [
1,
2,
0
],
"nonce_indices": [
1,
2,
0
],
"tweak_indices": [
4
],
"is_xonly": [
false
],
"key_indices": [1, 2, 0],
"nonce_indices": [1, 2, 0],
"tweak_indices": [4],
"is_xonly": [false],
"signer_index": 2,
"error": {
"type": "value",

View File

@@ -25,16 +25,16 @@ An overview of the features provided by this package are as follows:
- Secret key generation, serialization, and parsing
- Public key generation, serialization and parsing per ANSI X9.62-1998
- Parses uncompressed, compressed, and hybrid public keys
- Serializes uncompressed and compressed public keys
- Parses uncompressed, compressed, and hybrid public keys
- Serializes uncompressed and compressed public keys
- Specialized types for performing optimized and constant time field operations
- `FieldVal` type for working modulo the secp256k1 field prime
- `ModNScalar` type for working modulo the secp256k1 group order
- `FieldVal` type for working modulo the secp256k1 field prime
- `ModNScalar` type for working modulo the secp256k1 group order
- Elliptic curve operations in Jacobian projective coordinates
- Point addition
- Point doubling
- Scalar multiplication with an arbitrary point
- Scalar multiplication with the base point (group generator)
- Point addition
- Point doubling
- Scalar multiplication with an arbitrary point
- Scalar multiplication with the base point (group generator)
- Point decompression from a given x coordinate
- Nonce generation via RFC6979 with support for extra data and version
information that can be used to prevent nonce reuse between signing algorithms

View File

@@ -25,7 +25,7 @@ it
For ubuntu, you need these:
sudo apt -y install build-essential autoconf libtool
sudo apt -y install build-essential autoconf libtool
For other linux distributions, the process is the same but the dependencies are
likely different. The main thing is it requires make, gcc/++, autoconf and
@@ -65,4 +65,4 @@ coordinate and this is incorrect for nostr. It will be enabled soon... for now
it is done with the `btcec` fallback version. This is slower, however previous
tests have shown that this ECDH library is fast enough to enable 8mb/s
throughput per CPU thread when used to generate a distinct secret for TCP
packets. The C library will likely raise this to 20mb/s or more.
packets. The C library will likely raise this to 20mb/s or more.

View File

@@ -95,9 +95,9 @@ Note that, because of the scheduling overhead, for small messages (< 1 MB) you
will be better off using the regular SHA256 hashing (but those are typically not
performance critical anyway). Some other tips to get the best performance:
* Have many go routines doing SHA256 calculations in parallel.
* Try to Write() messages in multiples of 64 bytes.
* Try to keep the overall length of messages to a roughly similar size ie. 5
- Have many go routines doing SHA256 calculations in parallel.
- Try to Write() messages in multiples of 64 bytes.
- Try to keep the overall length of messages to a roughly similar size ie. 5
MB (this way all 16 lanes in the AVX512 computations are contributing as
much as possible).
@@ -128,7 +128,7 @@ Below is the speed in MB/s for a single core (ranked fast to slow) for blocks
larger than 1 MB.
| Processor | SIMD | Speed (MB/s) |
|-----------------------------------|---------|-------------:|
| --------------------------------- | ------- | -----------: |
| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 |
| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 |
| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 |
@@ -160,18 +160,18 @@ Below you can see a small excerpt highlighting one of the rounds as is done for
the SHA256 calculation process (for full code
see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)).
```
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
rev32 v8.16b, v8.16b
add v9.4s, v7.4s, v18.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
```
```
sha256h q2, q3, v9.4s
sha256h2 q3, q4, v9.4s
sha256su0 v5.4s, v6.4s
rev32 v8.16b, v8.16b
add v9.4s, v7.4s, v18.4s
mov v4.16b, v2.16b
sha256h q2, q3, v10.4s
sha256h2 q3, q4, v10.4s
sha256su0 v6.4s, v7.4s
sha256su1 v5.4s, v7.4s, v8.4s
```
### Detailed benchmarks

View File

@@ -28,7 +28,7 @@ err = client.Request(ctx, "make_invoice", params, &invoice)
## Methods
- `get_info` - Get wallet info
- `get_balance` - Get wallet balance
- `get_balance` - Get wallet balance
- `make_invoice` - Create invoice
- `lookup_invoice` - Check invoice status
- `pay_invoice` - Pay invoice
@@ -53,4 +53,4 @@ err = client.SubscribeNotifications(ctx, func(notificationType string, notificat
- Event signing
- Relay communication
- Payment notifications
- Error handling
- Error handling

View File

@@ -4,14 +4,15 @@ coverage:
precision: 2
status:
project: # measuring the overall project coverage
default: # context, you can create multiple ones with custom titles
enabled: yes # must be yes|true to enable this status
target: 100 # specify the target coverage for each commit status
# option: "auto" (must increase from parent commit or pull request base)
# option: "X%" a static target percentage to hit
if_not_found: success # if parent is not found report status as success, error, or failure
if_ci_failed: error # if ci fails report status as success, error, or failure
project: # measuring the overall project coverage
default: # context, you can create multiple ones with custom titles
enabled: yes # must be yes|true to enable this status
target:
100 # specify the target coverage for each commit status
# option: "auto" (must increase from parent commit or pull request base)
# option: "X%" a static target percentage to hit
if_not_found: success # if parent is not found report status as success, error, or failure
if_ci_failed: error # if ci fails report status as success, error, or failure
# Also update COVER_IGNORE_PKGS in the Makefile.
ignore:

View File

@@ -1,24 +1,31 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
- No changes yet.
## [1.11.0] - 2023-05-02
### Fixed
- Fix `Swap` and `CompareAndSwap` for `Value` wrappers without initialization.
### Added
- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print
underlying values of pointers.
underlying values of pointers.
[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0
## [1.10.0] - 2022-08-11
### Added
- Add `atomic.Float32` type for atomic operations on `float32`.
- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
and `atomic.Value`.
@@ -27,6 +34,7 @@ underlying values of pointers.
replacement for the standard library's `sync/atomic.Pointer` type.
### Changed
- Deprecate `CAS` methods on all types in favor of corresponding
`CompareAndSwap` methods.
@@ -35,46 +43,59 @@ Thanks to @eNV25 and @icpd for their contributions to this release.
[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
## [1.9.0] - 2021-07-15
### Added
- Add `Float64.Swap` to match int atomic operations.
- Add `atomic.Time` type for atomic operations on `time.Time` values.
[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
## [1.8.0] - 2021-06-09
### Added
- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
## [1.7.0] - 2020-09-14
### Added
- Support JSON serialization and deserialization of primitive atomic types.
- Support Text marshalling and unmarshalling for string atomics.
### Changed
- Disallow incorrect comparison of atomic values in a non-atomic way.
### Removed
- Remove dependency on `golang.org/x/{lint, tools}`.
[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
## [1.6.0] - 2020-02-24
### Changed
- Drop library dependency on `golang.org/x/{lint, tools}`.
[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
## [1.5.1] - 2019-11-19
- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
causing `CAS` to fail even though the old value matches.
[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
## [1.5.0] - 2019-10-29
### Changed
- With Go modules, only the `go.uber.org/atomic` import path is supported now.
If you need to use the old import path, please add a `replace` directive to
your `go.mod`.
@@ -82,43 +103,57 @@ Thanks to @eNV25 and @icpd for their contributions to this release.
[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
## [1.4.0] - 2019-05-01
### Added
- Add `atomic.Error` type for atomic operations on `error` values.
- Add `atomic.Error` type for atomic operations on `error` values.
[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
## [1.3.2] - 2018-05-02
### Added
- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
## [1.3.1] - 2017-11-14
### Fixed
- Revert optimization for `atomic.String.Store("")` which caused data races.
[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
## [1.3.0] - 2017-11-13
### Added
- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
### Changed
- Optimize `atomic.String.Store("")` by avoiding an allocation.
[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
## [1.2.0] - 2017-04-12
### Added
- Shadow `atomic.Value` from `sync/atomic`.
[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
## [1.1.0] - 2017-03-10
### Added
- Add atomic `Float64` type.
### Changed
- Support new `go.uber.org/atomic` import path.
[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0

View File

@@ -30,4 +30,4 @@ Stable.
---
Released under the [MIT License](LICENSE.txt).
Released under the [MIT License](LICENSE.txt).

View File

@@ -1,2 +1,3 @@
# interrupt
Handle shutdowns cleanly and enable hot reload