Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
83c27a52b0
|
|||
|
1e9c447fe6
|
|||
|
6b98c23606
|
|||
|
8dbc19ee9e
|
|||
|
290fcbf8f0
|
|||
|
54ead81791
|
|||
|
746523ea78
|
|||
|
52189633d9
|
|||
|
59247400dc
|
|||
|
7a27c44bc9
|
|||
|
6bd56a30c9
|
|||
|
880772cab1
|
|||
|
1851ba39fa
|
|||
|
de290aeb25
|
|||
|
0a61f274d5
|
|||
|
c8fac06f24
|
|||
|
64c6bd8bdd
|
|||
|
58d75bfc5a
|
|||
|
69e2c873d8
|
|||
|
6c7d55ff7e
|
|||
|
3c17e975df
|
|||
|
feae79af1a
|
|||
|
ebef8605eb
|
|||
|
c5db0abf73
|
|||
|
016e97925a
|
|||
|
042b47a4d9
|
@@ -153,7 +153,35 @@
|
||||
"Bash(git check-ignore:*)",
|
||||
"Bash(git commit:*)",
|
||||
"WebFetch(domain:www.npmjs.com)",
|
||||
"Bash(git stash:*)"
|
||||
"Bash(git stash:*)",
|
||||
"WebFetch(domain:arxiv.org)",
|
||||
"WebFetch(domain:hal.science)",
|
||||
"WebFetch(domain:pkg.go.dev)",
|
||||
"Bash(GOOS=js GOARCH=wasm CGO_ENABLED=0 go build:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go doc:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm CGO_ENABLED=0 go test:*)",
|
||||
"Bash(node --version:*)",
|
||||
"Bash(npm install)",
|
||||
"Bash(node run_wasm_tests.mjs:*)",
|
||||
"Bash(go env:*)",
|
||||
"Bash(GOROOT=/home/mleku/go node run_wasm_tests.mjs:*)",
|
||||
"Bash(./orly:*)",
|
||||
"Bash(./orly -version:*)",
|
||||
"Bash(./orly --version:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go test:*)",
|
||||
"Bash(ls:*)",
|
||||
"Bash(GOROOT=/home/mleku/go node:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go build:*)",
|
||||
"Bash(go mod graph:*)",
|
||||
"Bash(xxd:*)",
|
||||
"Bash(CGO_ENABLED=0 go mod tidy:*)",
|
||||
"WebFetch(domain:git.mleku.dev)",
|
||||
"Bash(CGO_ENABLED=0 LOG_LEVEL=trace go test:*)",
|
||||
"Bash(go vet:*)",
|
||||
"Bash(gofmt:*)",
|
||||
"Skill(cypher)",
|
||||
"Bash(git mv:*)",
|
||||
"Bash(CGO_ENABLED=0 go run:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
395
.claude/skills/cypher/SKILL.md
Normal file
395
.claude/skills/cypher/SKILL.md
Normal file
@@ -0,0 +1,395 @@
|
||||
---
|
||||
name: cypher
|
||||
description: This skill should be used when writing, debugging, or discussing Neo4j Cypher queries. Provides comprehensive knowledge of Cypher syntax, query patterns, performance optimization, and common mistakes. Particularly useful for translating between domain models and graph queries.
|
||||
---
|
||||
|
||||
# Neo4j Cypher Query Language
|
||||
|
||||
## Purpose
|
||||
|
||||
This skill provides expert-level guidance for writing Neo4j Cypher queries, including syntax, patterns, performance optimization, and common pitfalls. It is particularly tuned for the patterns used in this ORLY Nostr relay codebase.
|
||||
|
||||
## When to Use
|
||||
|
||||
Activate this skill when:
|
||||
- Writing Cypher queries for Neo4j
|
||||
- Debugging Cypher syntax errors
|
||||
- Optimizing query performance
|
||||
- Translating Nostr filter queries to Cypher
|
||||
- Working with graph relationships and traversals
|
||||
- Creating or modifying schema (indexes, constraints)
|
||||
|
||||
## Core Cypher Syntax
|
||||
|
||||
### Clause Order (CRITICAL)
|
||||
|
||||
Cypher requires clauses in a specific order. Violating this causes syntax errors:
|
||||
|
||||
```cypher
|
||||
// CORRECT order of clauses
|
||||
MATCH (n:Label) // 1. Pattern matching
|
||||
WHERE n.prop = value // 2. Filtering
|
||||
WITH n, count(*) AS cnt // 3. Intermediate results (resets scope)
|
||||
OPTIONAL MATCH (n)-[r]-() // 4. Optional patterns
|
||||
CREATE (m:NewNode) // 5. Node/relationship creation
|
||||
SET n.prop = value // 6. Property updates
|
||||
DELETE r // 7. Deletions
|
||||
RETURN n.prop AS result // 8. Return clause
|
||||
ORDER BY result DESC // 9. Ordering
|
||||
SKIP 10 LIMIT 20 // 10. Pagination
|
||||
```
|
||||
|
||||
### The WITH Clause (CRITICAL)
|
||||
|
||||
The `WITH` clause is required to transition between certain operations:
|
||||
|
||||
**Rule: Cannot use MATCH after CREATE without WITH**
|
||||
|
||||
```cypher
|
||||
// WRONG - MATCH after CREATE without WITH
|
||||
CREATE (e:Event {id: $id})
|
||||
MATCH (ref:Event {id: $refId}) // ERROR!
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
|
||||
// CORRECT - Use WITH to carry variables forward
|
||||
CREATE (e:Event {id: $id})
|
||||
WITH e
|
||||
MATCH (ref:Event {id: $refId})
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
```
|
||||
|
||||
**Rule: WITH resets the scope**
|
||||
|
||||
Variables not included in WITH are no longer accessible:
|
||||
|
||||
```cypher
|
||||
// WRONG - 'a' is lost after WITH
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH e
|
||||
WHERE a.pubkey = $pubkey // ERROR: 'a' not defined
|
||||
|
||||
// CORRECT - Include all needed variables
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH a, e
|
||||
WHERE a.pubkey = $pubkey
|
||||
```
|
||||
|
||||
### Node and Relationship Patterns
|
||||
|
||||
```cypher
|
||||
// Nodes
|
||||
(n) // Anonymous node
|
||||
(n:Label) // Labeled node
|
||||
(n:Label {prop: value}) // Node with properties
|
||||
(n:Label:OtherLabel) // Multiple labels
|
||||
|
||||
// Relationships
|
||||
-[r]-> // Directed, anonymous
|
||||
-[r:TYPE]-> // Typed relationship
|
||||
-[r:TYPE {prop: value}]-> // With properties
|
||||
-[r:TYPE|OTHER]-> // Multiple types (OR)
|
||||
-[*1..3]-> // Variable length (1 to 3 hops)
|
||||
-[*]-> // Any number of hops
|
||||
```
|
||||
|
||||
### MERGE vs CREATE
|
||||
|
||||
**CREATE**: Always creates new nodes/relationships (may create duplicates)
|
||||
|
||||
```cypher
|
||||
CREATE (n:Event {id: $id}) // Creates even if id exists
|
||||
```
|
||||
|
||||
**MERGE**: Finds or creates (idempotent)
|
||||
|
||||
```cypher
|
||||
MERGE (n:Event {id: $id}) // Finds existing or creates new
|
||||
ON CREATE SET n.created = timestamp()
|
||||
ON MATCH SET n.accessed = timestamp()
|
||||
```
|
||||
|
||||
**Best Practice**: Use MERGE for reference nodes, CREATE for unique events
|
||||
|
||||
```cypher
|
||||
// Reference nodes - use MERGE (idempotent)
|
||||
MERGE (author:Author {pubkey: $pubkey})
|
||||
|
||||
// Unique events - use CREATE (after checking existence)
|
||||
CREATE (e:Event {id: $eventId, ...})
|
||||
```
|
||||
|
||||
### OPTIONAL MATCH
|
||||
|
||||
Returns NULL for non-matching patterns (like LEFT JOIN):
|
||||
|
||||
```cypher
|
||||
// Find events, with or without tags
|
||||
MATCH (e:Event)
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
RETURN e.id, collect(t.value) AS tags
|
||||
```
|
||||
|
||||
### Conditional Creation with FOREACH
|
||||
|
||||
To conditionally create relationships:
|
||||
|
||||
```cypher
|
||||
// FOREACH trick for conditional operations
|
||||
OPTIONAL MATCH (ref:Event {id: $refId})
|
||||
FOREACH (ignoreMe IN CASE WHEN ref IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
)
|
||||
```
|
||||
|
||||
### Aggregation Functions
|
||||
|
||||
```cypher
|
||||
count(*) // Count all rows
|
||||
count(n) // Count non-null values
|
||||
count(DISTINCT n) // Count unique values
|
||||
collect(n) // Collect into list
|
||||
collect(DISTINCT n) // Collect unique values
|
||||
sum(n.value) // Sum values
|
||||
avg(n.value) // Average
|
||||
min(n.value), max(n.value) // Min/max
|
||||
```
|
||||
|
||||
### String Operations
|
||||
|
||||
```cypher
|
||||
// String matching
|
||||
WHERE n.name STARTS WITH 'prefix'
|
||||
WHERE n.name ENDS WITH 'suffix'
|
||||
WHERE n.name CONTAINS 'substring'
|
||||
WHERE n.name =~ 'regex.*pattern' // Regex
|
||||
|
||||
// String functions
|
||||
toLower(str), toUpper(str)
|
||||
trim(str), ltrim(str), rtrim(str)
|
||||
substring(str, start, length)
|
||||
replace(str, search, replacement)
|
||||
```
|
||||
|
||||
### List Operations
|
||||
|
||||
```cypher
|
||||
// IN clause
|
||||
WHERE n.kind IN [1, 7, 30023]
|
||||
WHERE n.pubkey IN $pubkeyList
|
||||
|
||||
// List comprehension
|
||||
[x IN list WHERE x > 0 | x * 2]
|
||||
|
||||
// UNWIND - expand list into rows
|
||||
UNWIND $pubkeys AS pubkey
|
||||
MERGE (u:User {pubkey: pubkey})
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
Always use parameters for values (security + performance):
|
||||
|
||||
```cypher
|
||||
// CORRECT - parameterized
|
||||
MATCH (e:Event {id: $eventId})
|
||||
WHERE e.kind IN $kinds
|
||||
|
||||
// WRONG - string interpolation (SQL injection risk!)
|
||||
MATCH (e:Event {id: '" + eventId + "'})
|
||||
```
|
||||
|
||||
## Schema Management
|
||||
|
||||
### Constraints
|
||||
|
||||
```cypher
|
||||
// Uniqueness constraint (also creates index)
|
||||
CREATE CONSTRAINT event_id_unique IF NOT EXISTS
|
||||
FOR (e:Event) REQUIRE e.id IS UNIQUE
|
||||
|
||||
// Composite uniqueness
|
||||
CREATE CONSTRAINT card_unique IF NOT EXISTS
|
||||
FOR (c:Card) REQUIRE (c.customer_id, c.observee_pubkey) IS UNIQUE
|
||||
|
||||
// Drop constraint
|
||||
DROP CONSTRAINT event_id_unique IF EXISTS
|
||||
```
|
||||
|
||||
### Indexes
|
||||
|
||||
```cypher
|
||||
// Single property index
|
||||
CREATE INDEX event_kind IF NOT EXISTS FOR (e:Event) ON (e.kind)
|
||||
|
||||
// Composite index
|
||||
CREATE INDEX event_kind_created IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind, e.created_at)
|
||||
|
||||
// Drop index
|
||||
DROP INDEX event_kind IF EXISTS
|
||||
```
|
||||
|
||||
## Common Query Patterns
|
||||
|
||||
### Find with Filter
|
||||
|
||||
```cypher
|
||||
// Multiple conditions with OR
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
AND (e.id = $id1 OR e.id = $id2)
|
||||
AND e.created_at >= $since
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Graph Traversal
|
||||
|
||||
```cypher
|
||||
// Find events by author
|
||||
MATCH (e:Event)-[:AUTHORED_BY]->(a:Author {pubkey: $pubkey})
|
||||
RETURN e
|
||||
|
||||
// Find followers of a user
|
||||
MATCH (follower:NostrUser)-[:FOLLOWS]->(user:NostrUser {pubkey: $pubkey})
|
||||
RETURN follower.pubkey
|
||||
|
||||
// Find mutual follows (friends)
|
||||
MATCH (a:NostrUser {pubkey: $pubkeyA})-[:FOLLOWS]->(b:NostrUser)
|
||||
WHERE (b)-[:FOLLOWS]->(a)
|
||||
RETURN b.pubkey AS mutual_friend
|
||||
```
|
||||
|
||||
### Upsert Pattern
|
||||
|
||||
```cypher
|
||||
MERGE (n:Node {key: $key})
|
||||
ON CREATE SET
|
||||
n.created_at = timestamp(),
|
||||
n.value = $value
|
||||
ON MATCH SET
|
||||
n.updated_at = timestamp(),
|
||||
n.value = $value
|
||||
RETURN n
|
||||
```
|
||||
|
||||
### Batch Processing with UNWIND
|
||||
|
||||
```cypher
|
||||
// Create multiple nodes from list
|
||||
UNWIND $items AS item
|
||||
CREATE (n:Node {id: item.id, value: item.value})
|
||||
|
||||
// Create relationships from list
|
||||
UNWIND $follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[:FOLLOWS]->(followed)
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Index Usage
|
||||
|
||||
1. **Start with indexed properties** - Begin MATCH with most selective indexed field
|
||||
2. **Use composite indexes** - For queries filtering on multiple properties
|
||||
3. **Profile queries** - Use `PROFILE` prefix to see execution plan
|
||||
|
||||
```cypher
|
||||
PROFILE MATCH (e:Event {kind: 1})
|
||||
WHERE e.created_at > $since
|
||||
RETURN e LIMIT 100
|
||||
```
|
||||
|
||||
### Query Optimization Tips
|
||||
|
||||
1. **Filter early** - Put WHERE conditions close to MATCH
|
||||
2. **Limit early** - Use LIMIT as early as possible
|
||||
3. **Avoid Cartesian products** - Connect patterns or use WITH
|
||||
4. **Use parameters** - Enables query plan caching
|
||||
|
||||
```cypher
|
||||
// GOOD - Filter and limit early
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds AND e.created_at >= $since
|
||||
WITH e ORDER BY e.created_at DESC LIMIT 100
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
RETURN e, collect(t)
|
||||
|
||||
// BAD - Late filtering
|
||||
MATCH (e:Event), (t:Tag)
|
||||
WHERE e.kind IN $kinds
|
||||
RETURN e, t LIMIT 100
|
||||
```
|
||||
|
||||
## Reference Materials
|
||||
|
||||
For detailed information, consult the reference files:
|
||||
|
||||
- **references/syntax-reference.md** - Complete Cypher syntax guide with all clause types, operators, and functions
|
||||
- **references/common-patterns.md** - Project-specific patterns for ORLY Nostr relay including event storage, tag queries, and social graph traversals
|
||||
- **references/common-mistakes.md** - Frequent Cypher errors and how to avoid them
|
||||
|
||||
## ORLY-Specific Patterns
|
||||
|
||||
This codebase uses these specific Cypher patterns:
|
||||
|
||||
### Event Storage Pattern
|
||||
|
||||
```cypher
|
||||
// Create event with author relationship
|
||||
MERGE (a:Author {pubkey: $pubkey})
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
kind: $kind,
|
||||
created_at: $createdAt,
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tags
|
||||
})
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
```
|
||||
|
||||
### Tag Query Pattern
|
||||
|
||||
```cypher
|
||||
// Query events by tag (Nostr #<tag> filter)
|
||||
MATCH (e:Event)-[:TAGGED_WITH]->(t:Tag {type: $tagType})
|
||||
WHERE t.value IN $tagValues
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Social Graph Pattern
|
||||
|
||||
```cypher
|
||||
// Process contact list with diff-based updates
|
||||
// Mark old as superseded
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
|
||||
// Create tracking node
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 3,
|
||||
pubkey: $author_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp()
|
||||
})
|
||||
|
||||
// Update relationships
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
WITH author
|
||||
UNWIND $added_follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[:FOLLOWS]->(followed)
|
||||
```
|
||||
|
||||
## Official Resources
|
||||
|
||||
- Neo4j Cypher Manual: https://neo4j.com/docs/cypher-manual/current/
|
||||
- Cypher Cheat Sheet: https://neo4j.com/docs/cypher-cheat-sheet/current/
|
||||
- Query Tuning: https://neo4j.com/docs/cypher-manual/current/query-tuning/
|
||||
381
.claude/skills/cypher/references/common-mistakes.md
Normal file
381
.claude/skills/cypher/references/common-mistakes.md
Normal file
@@ -0,0 +1,381 @@
|
||||
# Common Cypher Mistakes and How to Avoid Them
|
||||
|
||||
## Clause Ordering Errors
|
||||
|
||||
### MATCH After CREATE Without WITH
|
||||
|
||||
**Error**: `Invalid input 'MATCH': expected ... WITH`
|
||||
|
||||
```cypher
|
||||
// WRONG
|
||||
CREATE (e:Event {id: $id})
|
||||
MATCH (ref:Event {id: $refId}) // ERROR!
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
|
||||
// CORRECT - Use WITH to transition
|
||||
CREATE (e:Event {id: $id})
|
||||
WITH e
|
||||
MATCH (ref:Event {id: $refId})
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
```
|
||||
|
||||
**Rule**: After CREATE, you must use WITH before MATCH.
|
||||
|
||||
### WHERE After WITH Without Carrying Variables
|
||||
|
||||
**Error**: `Variable 'x' not defined`
|
||||
|
||||
```cypher
|
||||
// WRONG - 'a' is lost
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH e
|
||||
WHERE a.pubkey = $pubkey // ERROR: 'a' not in scope
|
||||
|
||||
// CORRECT - Include all needed variables
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH a, e
|
||||
WHERE a.pubkey = $pubkey
|
||||
```
|
||||
|
||||
**Rule**: WITH resets the scope. Include all variables you need.
|
||||
|
||||
### ORDER BY Without Aliased Return
|
||||
|
||||
**Error**: `Invalid input 'ORDER': expected ... AS`
|
||||
|
||||
```cypher
|
||||
// WRONG in some contexts
|
||||
RETURN n.name
|
||||
ORDER BY n.name
|
||||
|
||||
// SAFER - Use alias
|
||||
RETURN n.name AS name
|
||||
ORDER BY name
|
||||
```
|
||||
|
||||
## MERGE Mistakes
|
||||
|
||||
### MERGE on Complex Pattern Creates Duplicates
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - May create duplicate nodes
|
||||
MERGE (a:Person {name: 'Alice'})-[:KNOWS]->(b:Person {name: 'Bob'})
|
||||
|
||||
// CORRECT - MERGE nodes separately first
|
||||
MERGE (a:Person {name: 'Alice'})
|
||||
MERGE (b:Person {name: 'Bob'})
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
```
|
||||
|
||||
**Rule**: MERGE simple patterns, not complex ones.
|
||||
|
||||
### MERGE Without Unique Property
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - Will keep creating nodes
|
||||
MERGE (p:Person) // No unique identifier!
|
||||
SET p.name = 'Alice'
|
||||
|
||||
// CORRECT - Provide unique key
|
||||
MERGE (p:Person {email: $email})
|
||||
SET p.name = 'Alice'
|
||||
```
|
||||
|
||||
**Rule**: MERGE must have properties that uniquely identify the node.
|
||||
|
||||
### Missing ON CREATE/ON MATCH
|
||||
|
||||
```cypher
|
||||
// LOSES context of whether new or existing
|
||||
MERGE (p:Person {id: $id})
|
||||
SET p.updated_at = timestamp() // Always runs
|
||||
|
||||
// BETTER - Handle each case
|
||||
MERGE (p:Person {id: $id})
|
||||
ON CREATE SET p.created_at = timestamp()
|
||||
ON MATCH SET p.updated_at = timestamp()
|
||||
```
|
||||
|
||||
## NULL Handling Errors
|
||||
|
||||
### Comparing with NULL
|
||||
|
||||
```cypher
|
||||
// WRONG - NULL = NULL is NULL, not true
|
||||
WHERE n.email = null // Never matches!
|
||||
|
||||
// CORRECT
|
||||
WHERE n.email IS NULL
|
||||
WHERE n.email IS NOT NULL
|
||||
```
|
||||
|
||||
### NULL in Aggregations
|
||||
|
||||
```cypher
|
||||
// count(NULL) returns 0, collect(NULL) includes NULL
|
||||
MATCH (n:Person)
|
||||
OPTIONAL MATCH (n)-[:BOUGHT]->(p:Product)
|
||||
RETURN n.name, count(p) // count ignores NULL
|
||||
```
|
||||
|
||||
### NULL Propagation in Expressions
|
||||
|
||||
```cypher
|
||||
// Any operation with NULL returns NULL
|
||||
WHERE n.age + 1 > 21 // If n.age is NULL, whole expression is NULL (falsy)
|
||||
|
||||
// Handle with coalesce
|
||||
WHERE coalesce(n.age, 0) + 1 > 21
|
||||
```
|
||||
|
||||
## List and IN Clause Errors
|
||||
|
||||
### Empty List in IN
|
||||
|
||||
```cypher
|
||||
// An empty list never matches
|
||||
WHERE n.kind IN [] // Always false
|
||||
|
||||
// Check for empty list in application code before query
|
||||
// Or use CASE:
|
||||
WHERE CASE WHEN size($kinds) > 0 THEN n.kind IN $kinds ELSE true END
|
||||
```
|
||||
|
||||
### IN with NULL Values
|
||||
|
||||
```cypher
|
||||
// NULL in the list causes issues
|
||||
WHERE n.id IN [1, NULL, 3] // NULL is never equal to anything
|
||||
|
||||
// Filter NULLs in application code
|
||||
```
|
||||
|
||||
## Relationship Pattern Errors
|
||||
|
||||
### Forgetting Direction
|
||||
|
||||
```cypher
|
||||
// WRONG - Creates both directions
|
||||
MATCH (a)-[:FOLLOWS]-(b) // Undirected!
|
||||
|
||||
// CORRECT - Specify direction
|
||||
MATCH (a)-[:FOLLOWS]->(b) // a follows b
|
||||
MATCH (a)<-[:FOLLOWS]-(b) // b follows a
|
||||
```
|
||||
|
||||
### Variable-Length Without Bounds
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - Potentially explosive
|
||||
MATCH (a)-[*]->(b) // Any length path!
|
||||
|
||||
// SAFE - Set bounds
|
||||
MATCH (a)-[*1..3]->(b) // 1 to 3 hops max
|
||||
```
|
||||
|
||||
### Creating Duplicate Relationships
|
||||
|
||||
```cypher
|
||||
// May create duplicates
|
||||
CREATE (a)-[:KNOWS]->(b)
|
||||
|
||||
// Idempotent
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
```
|
||||
|
||||
## Performance Mistakes
|
||||
|
||||
### Cartesian Products
|
||||
|
||||
```cypher
|
||||
// WRONG - Cartesian product
|
||||
MATCH (a:Person), (b:Product)
|
||||
WHERE a.id = $personId AND b.id = $productId
|
||||
CREATE (a)-[:BOUGHT]->(b)
|
||||
|
||||
// CORRECT - Single pattern or sequential
|
||||
MATCH (a:Person {id: $personId})
|
||||
MATCH (b:Product {id: $productId})
|
||||
CREATE (a)-[:BOUGHT]->(b)
|
||||
```
|
||||
|
||||
### Late Filtering
|
||||
|
||||
```cypher
|
||||
// SLOW - Filters after collecting everything
|
||||
MATCH (e:Event)
|
||||
WITH e
|
||||
WHERE e.kind = 1 // Should be in MATCH or right after
|
||||
|
||||
// FAST - Filter early
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind = 1
|
||||
```
|
||||
|
||||
### Missing LIMIT with ORDER BY
|
||||
|
||||
```cypher
|
||||
// SLOW - Sorts all results
|
||||
MATCH (e:Event)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
|
||||
// FAST - Limits result set
|
||||
MATCH (e:Event)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT 100
|
||||
```
|
||||
|
||||
### Unparameterized Queries
|
||||
|
||||
```cypher
|
||||
// WRONG - No query plan caching, injection risk
|
||||
MATCH (e:Event {id: '" + eventId + "'})
|
||||
|
||||
// CORRECT - Use parameters
|
||||
MATCH (e:Event {id: $eventId})
|
||||
```
|
||||
|
||||
## String Comparison Errors
|
||||
|
||||
### Case Sensitivity
|
||||
|
||||
```cypher
|
||||
// Cypher strings are case-sensitive
|
||||
WHERE n.name = 'alice' // Won't match 'Alice'
|
||||
|
||||
// Use toLower/toUpper for case-insensitive
|
||||
WHERE toLower(n.name) = toLower($name)
|
||||
|
||||
// Or use regex with (?i)
|
||||
WHERE n.name =~ '(?i)alice'
|
||||
```
|
||||
|
||||
### LIKE vs CONTAINS
|
||||
|
||||
```cypher
|
||||
// There's no LIKE in Cypher
|
||||
WHERE n.name LIKE '%alice%' // ERROR!
|
||||
|
||||
// Use CONTAINS, STARTS WITH, ENDS WITH
|
||||
WHERE n.name CONTAINS 'alice'
|
||||
WHERE n.name STARTS WITH 'ali'
|
||||
WHERE n.name ENDS WITH 'ice'
|
||||
|
||||
// Or regex for complex patterns
|
||||
WHERE n.name =~ '.*ali.*ce.*'
|
||||
```
|
||||
|
||||
## Index Mistakes
|
||||
|
||||
### Constraint vs Index
|
||||
|
||||
```cypher
|
||||
// Constraint (also creates index, enforces uniqueness)
|
||||
CREATE CONSTRAINT foo IF NOT EXISTS FOR (n:Node) REQUIRE n.id IS UNIQUE
|
||||
|
||||
// Index only (no uniqueness enforcement)
|
||||
CREATE INDEX bar IF NOT EXISTS FOR (n:Node) ON (n.id)
|
||||
```
|
||||
|
||||
### Index Not Used
|
||||
|
||||
```cypher
|
||||
// Index on n.id won't help here
|
||||
WHERE toLower(n.id) = $id // Function applied to indexed property!
|
||||
|
||||
// Store lowercase if needed, or create computed property
|
||||
```
|
||||
|
||||
### Wrong Composite Index Order
|
||||
|
||||
```cypher
|
||||
// Index on (kind, created_at) won't help query by created_at alone
|
||||
MATCH (e:Event) WHERE e.created_at > $since // Index not used
|
||||
|
||||
// Either create single-property index or query by kind too
|
||||
CREATE INDEX event_created_at FOR (e:Event) ON (e.created_at)
|
||||
```
|
||||
|
||||
## Transaction Errors
|
||||
|
||||
### Read After Write in Same Transaction
|
||||
|
||||
```cypher
|
||||
// In Neo4j, reads in a transaction see the writes
|
||||
// But be careful with external processes
|
||||
CREATE (n:Node {id: 'new'})
|
||||
WITH n
|
||||
MATCH (m:Node {id: 'new'}) // Will find 'n'
|
||||
```
|
||||
|
||||
### Locks and Deadlocks
|
||||
|
||||
```cypher
|
||||
// MERGE takes locks; avoid complex patterns that might deadlock
|
||||
// Bad: two MERGEs on same labels in different order
|
||||
Session 1: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
Session 2: MERGE (b:Person {id: 2}) MERGE (a:Person {id: 1}) // Potential deadlock
|
||||
|
||||
// Good: consistent ordering
|
||||
Session 1: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
Session 2: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
```
|
||||
|
||||
## Type Coercion Issues
|
||||
|
||||
### Integer vs String
|
||||
|
||||
```cypher
|
||||
// Types must match
|
||||
WHERE n.id = 123 // Won't match if n.id is "123"
|
||||
WHERE n.id = '123' // Won't match if n.id is 123
|
||||
|
||||
// Use appropriate parameter types from Go
|
||||
params["id"] = int64(123) // For integer
|
||||
params["id"] = "123" // For string
|
||||
```
|
||||
|
||||
### Boolean Handling
|
||||
|
||||
```cypher
|
||||
// Neo4j booleans vs strings
|
||||
WHERE n.active = true // Boolean
|
||||
WHERE n.active = 'true' // String - different!
|
||||
```
|
||||
|
||||
## Delete Errors
|
||||
|
||||
### Delete Node With Relationships
|
||||
|
||||
```cypher
|
||||
// ERROR - Node still has relationships
|
||||
MATCH (n:Person {id: $id})
|
||||
DELETE n
|
||||
|
||||
// CORRECT - Delete relationships first
|
||||
MATCH (n:Person {id: $id})
|
||||
DETACH DELETE n
|
||||
```
|
||||
|
||||
### Optional Match and Delete
|
||||
|
||||
```cypher
|
||||
// WRONG - DELETE NULL causes no error but also doesn't help
|
||||
OPTIONAL MATCH (n:Node {id: $id})
|
||||
DELETE n // If n is NULL, nothing happens silently
|
||||
|
||||
// Better - Check existence first or handle in application
|
||||
MATCH (n:Node {id: $id})
|
||||
DELETE n
|
||||
```
|
||||
|
||||
## Debugging Tips
|
||||
|
||||
1. **Use EXPLAIN** to see query plan without executing
|
||||
2. **Use PROFILE** to see actual execution metrics
|
||||
3. **Break complex queries** into smaller parts to isolate issues
|
||||
4. **Check parameter types** - mismatched types are a common issue
|
||||
5. **Verify indexes exist** with `SHOW INDEXES`
|
||||
6. **Check constraints** with `SHOW CONSTRAINTS`
|
||||
397
.claude/skills/cypher/references/common-patterns.md
Normal file
397
.claude/skills/cypher/references/common-patterns.md
Normal file
@@ -0,0 +1,397 @@
|
||||
# Common Cypher Patterns for ORLY Nostr Relay
|
||||
|
||||
This reference contains project-specific Cypher patterns used in the ORLY Nostr relay's Neo4j backend.
|
||||
|
||||
## Schema Overview
|
||||
|
||||
### Node Types
|
||||
|
||||
| Label | Purpose | Key Properties |
|
||||
|-------|---------|----------------|
|
||||
| `Event` | Nostr events (NIP-01) | `id`, `kind`, `pubkey`, `created_at`, `content`, `sig`, `tags`, `serial` |
|
||||
| `Author` | Event authors (for NIP-01 queries) | `pubkey` |
|
||||
| `Tag` | Generic tags | `type`, `value` |
|
||||
| `NostrUser` | Social graph users (WoT) | `pubkey`, `name`, `about`, `picture`, `nip05` |
|
||||
| `ProcessedSocialEvent` | Social event tracking | `event_id`, `event_kind`, `pubkey`, `superseded_by` |
|
||||
| `Marker` | Internal state markers | `key`, `value` |
|
||||
|
||||
### Relationship Types
|
||||
|
||||
| Type | From | To | Purpose |
|
||||
|------|------|-----|---------|
|
||||
| `AUTHORED_BY` | Event | Author | Links event to author |
|
||||
| `TAGGED_WITH` | Event | Tag | Links event to tags |
|
||||
| `REFERENCES` | Event | Event | e-tag references |
|
||||
| `MENTIONS` | Event | Author | p-tag mentions |
|
||||
| `FOLLOWS` | NostrUser | NostrUser | Contact list (kind 3) |
|
||||
| `MUTES` | NostrUser | NostrUser | Mute list (kind 10000) |
|
||||
| `REPORTS` | NostrUser | NostrUser | Reports (kind 1984) |
|
||||
|
||||
## Event Storage Patterns
|
||||
|
||||
### Create Event with Full Relationships
|
||||
|
||||
This pattern creates an event and all related nodes/relationships atomically:
|
||||
|
||||
```cypher
|
||||
// 1. Create or get author
|
||||
MERGE (a:Author {pubkey: $pubkey})
|
||||
|
||||
// 2. Create event node
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
kind: $kind,
|
||||
created_at: $createdAt,
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tagsJson // JSON string for full tag data
|
||||
})
|
||||
|
||||
// 3. Link to author
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
|
||||
// 4. Process e-tags (event references)
|
||||
WITH e, a
|
||||
OPTIONAL MATCH (ref0:Event {id: $eTag_0})
|
||||
FOREACH (_ IN CASE WHEN ref0 IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (e)-[:REFERENCES]->(ref0)
|
||||
)
|
||||
|
||||
// 5. Process p-tags (mentions)
|
||||
WITH e, a
|
||||
MERGE (mentioned0:Author {pubkey: $pTag_0})
|
||||
CREATE (e)-[:MENTIONS]->(mentioned0)
|
||||
|
||||
// 6. Process other tags
|
||||
WITH e, a
|
||||
MERGE (tag0:Tag {type: $tagType_0, value: $tagValue_0})
|
||||
CREATE (e)-[:TAGGED_WITH]->(tag0)
|
||||
|
||||
RETURN e.id AS id
|
||||
```
|
||||
|
||||
### Check Event Existence
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {id: $id})
|
||||
RETURN e.id AS id
|
||||
LIMIT 1
|
||||
```
|
||||
|
||||
### Get Next Serial Number
|
||||
|
||||
```cypher
|
||||
MERGE (m:Marker {key: 'serial'})
|
||||
ON CREATE SET m.value = 1
|
||||
ON MATCH SET m.value = m.value + 1
|
||||
RETURN m.value AS serial
|
||||
```
|
||||
|
||||
## Query Patterns
|
||||
|
||||
### Basic Filter Query (NIP-01)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
AND e.pubkey IN $authors
|
||||
AND e.created_at >= $since
|
||||
AND e.created_at <= $until
|
||||
RETURN e.id AS id,
|
||||
e.kind AS kind,
|
||||
e.created_at AS created_at,
|
||||
e.content AS content,
|
||||
e.sig AS sig,
|
||||
e.pubkey AS pubkey,
|
||||
e.tags AS tags,
|
||||
e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Query by Event ID (with prefix support)
|
||||
|
||||
```cypher
|
||||
// Exact match
|
||||
MATCH (e:Event {id: $id})
|
||||
RETURN e
|
||||
|
||||
// Prefix match
|
||||
MATCH (e:Event)
|
||||
WHERE e.id STARTS WITH $idPrefix
|
||||
RETURN e
|
||||
```
|
||||
|
||||
### Query by Tag (#<tag> filter)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
WHERE t.type = $tagType AND t.value IN $tagValues
|
||||
RETURN DISTINCT e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Count Events
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
RETURN count(e) AS count
|
||||
```
|
||||
|
||||
### Query Delete Events Targeting an Event
|
||||
|
||||
```cypher
|
||||
MATCH (target:Event {id: $targetId})
|
||||
MATCH (e:Event {kind: 5})-[:REFERENCES]->(target)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
### Replaceable Event Check (kinds 0, 3, 10000-19999)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {kind: $kind, pubkey: $pubkey})
|
||||
WHERE e.created_at < $newCreatedAt
|
||||
RETURN e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
### Parameterized Replaceable Event Check (kinds 30000-39999)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {kind: $kind, pubkey: $pubkey})-[:TAGGED_WITH]->(t:Tag {type: 'd', value: $dValue})
|
||||
WHERE e.created_at < $newCreatedAt
|
||||
RETURN e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
## Social Graph Patterns
|
||||
|
||||
### Update Profile (Kind 0)
|
||||
|
||||
```cypher
|
||||
MERGE (user:NostrUser {pubkey: $pubkey})
|
||||
ON CREATE SET
|
||||
user.created_at = timestamp(),
|
||||
user.first_seen_event = $event_id
|
||||
ON MATCH SET
|
||||
user.last_profile_update = $created_at
|
||||
SET
|
||||
user.name = $name,
|
||||
user.about = $about,
|
||||
user.picture = $picture,
|
||||
user.nip05 = $nip05,
|
||||
user.lud16 = $lud16,
|
||||
user.display_name = $display_name
|
||||
```
|
||||
|
||||
### Contact List Update (Kind 3) - Diff-Based
|
||||
|
||||
```cypher
|
||||
// Mark old event as superseded
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
|
||||
// Create new event tracking
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 3,
|
||||
pubkey: $author_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp(),
|
||||
relationship_count: $total_follows,
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// Get or create author
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
|
||||
// Update unchanged relationships to new event
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[unchanged:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE unchanged.created_by_event = $old_event_id
|
||||
AND NOT followed.pubkey IN $removed_follows
|
||||
SET unchanged.created_by_event = $new_event_id,
|
||||
unchanged.created_at = $created_at
|
||||
|
||||
// Remove old relationships for removed follows
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[old_follows:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE old_follows.created_by_event = $old_event_id
|
||||
AND followed.pubkey IN $removed_follows
|
||||
DELETE old_follows
|
||||
|
||||
// Create new relationships for added follows
|
||||
WITH author
|
||||
UNWIND $added_follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[new_follows:FOLLOWS]->(followed)
|
||||
ON CREATE SET
|
||||
new_follows.created_by_event = $new_event_id,
|
||||
new_follows.created_at = $created_at,
|
||||
new_follows.relay_received_at = timestamp()
|
||||
ON MATCH SET
|
||||
new_follows.created_by_event = $new_event_id,
|
||||
new_follows.created_at = $created_at
|
||||
```
|
||||
|
||||
### Create Report (Kind 1984)
|
||||
|
||||
```cypher
|
||||
// Create tracking node
|
||||
CREATE (evt:ProcessedSocialEvent {
|
||||
event_id: $event_id,
|
||||
event_kind: 1984,
|
||||
pubkey: $reporter_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp(),
|
||||
relationship_count: 1,
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// Create users and relationship
|
||||
MERGE (reporter:NostrUser {pubkey: $reporter_pubkey})
|
||||
MERGE (reported:NostrUser {pubkey: $reported_pubkey})
|
||||
CREATE (reporter)-[:REPORTS {
|
||||
created_by_event: $event_id,
|
||||
created_at: $created_at,
|
||||
relay_received_at: timestamp(),
|
||||
report_type: $report_type
|
||||
}]->(reported)
|
||||
```
|
||||
|
||||
### Get Latest Social Event for Pubkey
|
||||
|
||||
```cypher
|
||||
MATCH (evt:ProcessedSocialEvent {pubkey: $pubkey, event_kind: $kind})
|
||||
WHERE evt.superseded_by IS NULL
|
||||
RETURN evt.event_id AS event_id,
|
||||
evt.created_at AS created_at,
|
||||
evt.relationship_count AS relationship_count
|
||||
ORDER BY evt.created_at DESC
|
||||
LIMIT 1
|
||||
```
|
||||
|
||||
### Get Follows for Event
|
||||
|
||||
```cypher
|
||||
MATCH (author:NostrUser)-[f:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE f.created_by_event = $event_id
|
||||
RETURN collect(followed.pubkey) AS pubkeys
|
||||
```
|
||||
|
||||
## WoT Query Patterns
|
||||
|
||||
### Find Mutual Follows
|
||||
|
||||
```cypher
|
||||
MATCH (a:NostrUser {pubkey: $pubkeyA})-[:FOLLOWS]->(b:NostrUser)
|
||||
WHERE (b)-[:FOLLOWS]->(a)
|
||||
RETURN b.pubkey AS mutual_friend
|
||||
```
|
||||
|
||||
### Find Followers
|
||||
|
||||
```cypher
|
||||
MATCH (follower:NostrUser)-[:FOLLOWS]->(user:NostrUser {pubkey: $pubkey})
|
||||
RETURN follower.pubkey, follower.name
|
||||
```
|
||||
|
||||
### Find Following
|
||||
|
||||
```cypher
|
||||
MATCH (user:NostrUser {pubkey: $pubkey})-[:FOLLOWS]->(following:NostrUser)
|
||||
RETURN following.pubkey, following.name
|
||||
```
|
||||
|
||||
### Hop Distance (Trust Path)
|
||||
|
||||
```cypher
|
||||
MATCH (start:NostrUser {pubkey: $startPubkey})
|
||||
MATCH (end:NostrUser {pubkey: $endPubkey})
|
||||
MATCH path = shortestPath((start)-[:FOLLOWS*..6]->(end))
|
||||
RETURN length(path) AS hops, [n IN nodes(path) | n.pubkey] AS path
|
||||
```
|
||||
|
||||
### Second-Degree Connections
|
||||
|
||||
```cypher
|
||||
MATCH (me:NostrUser {pubkey: $myPubkey})-[:FOLLOWS]->(:NostrUser)-[:FOLLOWS]->(suggested:NostrUser)
|
||||
WHERE NOT (me)-[:FOLLOWS]->(suggested)
|
||||
AND suggested.pubkey <> $myPubkey
|
||||
RETURN suggested.pubkey, count(*) AS commonFollows
|
||||
ORDER BY commonFollows DESC
|
||||
LIMIT 20
|
||||
```
|
||||
|
||||
## Schema Management Patterns
|
||||
|
||||
### Create Constraint
|
||||
|
||||
```cypher
|
||||
CREATE CONSTRAINT event_id_unique IF NOT EXISTS
|
||||
FOR (e:Event) REQUIRE e.id IS UNIQUE
|
||||
```
|
||||
|
||||
### Create Index
|
||||
|
||||
```cypher
|
||||
CREATE INDEX event_kind IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind)
|
||||
```
|
||||
|
||||
### Create Composite Index
|
||||
|
||||
```cypher
|
||||
CREATE INDEX event_kind_created_at IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind, e.created_at)
|
||||
```
|
||||
|
||||
### Drop All Data (Testing Only)
|
||||
|
||||
```cypher
|
||||
MATCH (n) DETACH DELETE n
|
||||
```
|
||||
|
||||
## Performance Patterns
|
||||
|
||||
### Use EXPLAIN/PROFILE
|
||||
|
||||
```cypher
|
||||
// See query plan without running
|
||||
EXPLAIN MATCH (e:Event) WHERE e.kind = 1 RETURN e
|
||||
|
||||
// Run and see actual metrics
|
||||
PROFILE MATCH (e:Event) WHERE e.kind = 1 RETURN e
|
||||
```
|
||||
|
||||
### Batch Import with UNWIND
|
||||
|
||||
```cypher
|
||||
UNWIND $events AS evt
|
||||
CREATE (e:Event {
|
||||
id: evt.id,
|
||||
kind: evt.kind,
|
||||
pubkey: evt.pubkey,
|
||||
created_at: evt.created_at,
|
||||
content: evt.content,
|
||||
sig: evt.sig,
|
||||
tags: evt.tags
|
||||
})
|
||||
```
|
||||
|
||||
### Efficient Pagination
|
||||
|
||||
```cypher
|
||||
// Use indexed ORDER BY with WHERE for cursor-based pagination
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind = 1 AND e.created_at < $cursor
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT 20
|
||||
```
|
||||
540
.claude/skills/cypher/references/syntax-reference.md
Normal file
540
.claude/skills/cypher/references/syntax-reference.md
Normal file
@@ -0,0 +1,540 @@
|
||||
# Cypher Syntax Reference
|
||||
|
||||
Complete syntax reference for Neo4j Cypher query language.
|
||||
|
||||
## Clause Reference
|
||||
|
||||
### Reading Clauses
|
||||
|
||||
#### MATCH
|
||||
|
||||
Finds patterns in the graph.
|
||||
|
||||
```cypher
|
||||
// Basic node match
|
||||
MATCH (n:Label)
|
||||
|
||||
// Match with properties
|
||||
MATCH (n:Label {key: value})
|
||||
|
||||
// Match relationships
|
||||
MATCH (a)-[r:RELATES_TO]->(b)
|
||||
|
||||
// Match path
|
||||
MATCH path = (a)-[*1..3]->(b)
|
||||
```
|
||||
|
||||
#### OPTIONAL MATCH
|
||||
|
||||
Like MATCH but returns NULL for non-matches (LEFT OUTER JOIN).
|
||||
|
||||
```cypher
|
||||
MATCH (a:Person)
|
||||
OPTIONAL MATCH (a)-[:KNOWS]->(b:Person)
|
||||
RETURN a.name, b.name // b.name may be NULL
|
||||
```
|
||||
|
||||
#### WHERE
|
||||
|
||||
Filters results.
|
||||
|
||||
```cypher
|
||||
// Comparison operators
|
||||
WHERE n.age > 21
|
||||
WHERE n.age >= 21
|
||||
WHERE n.age < 65
|
||||
WHERE n.age <= 65
|
||||
WHERE n.name = 'Alice'
|
||||
WHERE n.name <> 'Bob'
|
||||
|
||||
// Boolean operators
|
||||
WHERE n.age > 21 AND n.active = true
|
||||
WHERE n.age < 18 OR n.age > 65
|
||||
WHERE NOT n.deleted
|
||||
|
||||
// NULL checks
|
||||
WHERE n.email IS NULL
|
||||
WHERE n.email IS NOT NULL
|
||||
|
||||
// Pattern predicates
|
||||
WHERE (n)-[:KNOWS]->(:Person)
|
||||
WHERE NOT (n)-[:BLOCKED]->()
|
||||
WHERE exists((n)-[:FOLLOWS]->())
|
||||
|
||||
// String predicates
|
||||
WHERE n.name STARTS WITH 'A'
|
||||
WHERE n.name ENDS WITH 'son'
|
||||
WHERE n.name CONTAINS 'li'
|
||||
WHERE n.name =~ '(?i)alice.*' // Case-insensitive regex
|
||||
|
||||
// List predicates
|
||||
WHERE n.status IN ['active', 'pending']
|
||||
WHERE any(x IN n.tags WHERE x = 'important')
|
||||
WHERE all(x IN n.scores WHERE x > 50)
|
||||
WHERE none(x IN n.errors WHERE x IS NOT NULL)
|
||||
WHERE single(x IN n.items WHERE x.primary = true)
|
||||
```
|
||||
|
||||
### Writing Clauses
|
||||
|
||||
#### CREATE
|
||||
|
||||
Creates nodes and relationships.
|
||||
|
||||
```cypher
|
||||
// Create node
|
||||
CREATE (n:Label {key: value})
|
||||
|
||||
// Create multiple nodes
|
||||
CREATE (a:Person {name: 'Alice'}), (b:Person {name: 'Bob'})
|
||||
|
||||
// Create relationship
|
||||
CREATE (a)-[r:KNOWS {since: 2020}]->(b)
|
||||
|
||||
// Create path
|
||||
CREATE p = (a)-[:KNOWS]->(b)-[:KNOWS]->(c)
|
||||
```
|
||||
|
||||
#### MERGE
|
||||
|
||||
Find or create pattern. **Critical for idempotency**.
|
||||
|
||||
```cypher
|
||||
// MERGE node
|
||||
MERGE (n:Label {key: $uniqueKey})
|
||||
|
||||
// MERGE with ON CREATE / ON MATCH
|
||||
MERGE (n:Person {email: $email})
|
||||
ON CREATE SET n.created = timestamp(), n.name = $name
|
||||
ON MATCH SET n.accessed = timestamp()
|
||||
|
||||
// MERGE relationship (both nodes must exist or be in scope)
|
||||
MERGE (a)-[r:KNOWS]->(b)
|
||||
ON CREATE SET r.since = date()
|
||||
```
|
||||
|
||||
**MERGE Gotcha**: MERGE on a pattern locks the entire pattern. For relationships, MERGE each node first:
|
||||
|
||||
```cypher
|
||||
// CORRECT
|
||||
MERGE (a:Person {id: $id1})
|
||||
MERGE (b:Person {id: $id2})
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
|
||||
// RISKY - may create duplicate nodes
|
||||
MERGE (a:Person {id: $id1})-[:KNOWS]->(b:Person {id: $id2})
|
||||
```
|
||||
|
||||
#### SET
|
||||
|
||||
Updates properties.
|
||||
|
||||
```cypher
|
||||
// Set single property
|
||||
SET n.name = 'Alice'
|
||||
|
||||
// Set multiple properties
|
||||
SET n.name = 'Alice', n.age = 30
|
||||
|
||||
// Set from map (replaces all properties)
|
||||
SET n = {name: 'Alice', age: 30}
|
||||
|
||||
// Set from map (adds/updates, keeps existing)
|
||||
SET n += {name: 'Alice'}
|
||||
|
||||
// Set label
|
||||
SET n:NewLabel
|
||||
|
||||
// Remove property
|
||||
SET n.obsolete = null
|
||||
```
|
||||
|
||||
#### DELETE / DETACH DELETE
|
||||
|
||||
Removes nodes and relationships.
|
||||
|
||||
```cypher
|
||||
// Delete relationship
|
||||
MATCH (a)-[r:KNOWS]->(b)
|
||||
DELETE r
|
||||
|
||||
// Delete node (must have no relationships)
|
||||
MATCH (n:Orphan)
|
||||
DELETE n
|
||||
|
||||
// Delete node and all relationships
|
||||
MATCH (n:Person {name: 'Bob'})
|
||||
DETACH DELETE n
|
||||
```
|
||||
|
||||
#### REMOVE
|
||||
|
||||
Removes properties and labels.
|
||||
|
||||
```cypher
|
||||
// Remove property
|
||||
REMOVE n.temporary
|
||||
|
||||
// Remove label
|
||||
REMOVE n:OldLabel
|
||||
```
|
||||
|
||||
### Projection Clauses
|
||||
|
||||
#### RETURN
|
||||
|
||||
Specifies output.
|
||||
|
||||
```cypher
|
||||
// Return nodes
|
||||
RETURN n
|
||||
|
||||
// Return properties
|
||||
RETURN n.name, n.age
|
||||
|
||||
// Return with alias
|
||||
RETURN n.name AS name, n.age AS age
|
||||
|
||||
// Return all
|
||||
RETURN *
|
||||
|
||||
// Return distinct
|
||||
RETURN DISTINCT n.category
|
||||
|
||||
// Return expression
|
||||
RETURN n.price * n.quantity AS total
|
||||
```
|
||||
|
||||
#### WITH
|
||||
|
||||
Passes results between query parts. **Critical for multi-part queries**.
|
||||
|
||||
```cypher
|
||||
// Filter and pass
|
||||
MATCH (n:Person)
|
||||
WITH n WHERE n.age > 21
|
||||
RETURN n
|
||||
|
||||
// Aggregate and continue
|
||||
MATCH (n:Person)-[:BOUGHT]->(p:Product)
|
||||
WITH n, count(p) AS purchases
|
||||
WHERE purchases > 5
|
||||
RETURN n.name, purchases
|
||||
|
||||
// Order and limit mid-query
|
||||
MATCH (n:Person)
|
||||
WITH n ORDER BY n.age DESC LIMIT 10
|
||||
MATCH (n)-[:LIVES_IN]->(c:City)
|
||||
RETURN n.name, c.name
|
||||
```
|
||||
|
||||
**WITH resets scope**: Variables not listed in WITH are no longer available.
|
||||
|
||||
#### ORDER BY
|
||||
|
||||
Sorts results.
|
||||
|
||||
```cypher
|
||||
ORDER BY n.name // Ascending (default)
|
||||
ORDER BY n.name ASC // Explicit ascending
|
||||
ORDER BY n.name DESC // Descending
|
||||
ORDER BY n.lastName, n.firstName // Multiple fields
|
||||
ORDER BY n.priority DESC, n.name // Mixed
|
||||
```
|
||||
|
||||
#### SKIP and LIMIT
|
||||
|
||||
Pagination.
|
||||
|
||||
```cypher
|
||||
// Skip first 10
|
||||
SKIP 10
|
||||
|
||||
// Return only 20
|
||||
LIMIT 20
|
||||
|
||||
// Pagination
|
||||
ORDER BY n.created_at DESC
|
||||
SKIP $offset LIMIT $pageSize
|
||||
```
|
||||
|
||||
### Sub-queries
|
||||
|
||||
#### CALL (Subquery)
|
||||
|
||||
Execute subquery for each row.
|
||||
|
||||
```cypher
|
||||
MATCH (p:Person)
|
||||
CALL {
|
||||
WITH p
|
||||
MATCH (p)-[:BOUGHT]->(prod:Product)
|
||||
RETURN count(prod) AS purchaseCount
|
||||
}
|
||||
RETURN p.name, purchaseCount
|
||||
```
|
||||
|
||||
#### UNION
|
||||
|
||||
Combine results from multiple queries.
|
||||
|
||||
```cypher
|
||||
MATCH (n:Person) RETURN n.name AS name
|
||||
UNION
|
||||
MATCH (n:Company) RETURN n.name AS name
|
||||
|
||||
// UNION ALL keeps duplicates
|
||||
MATCH (n:Person) RETURN n.name AS name
|
||||
UNION ALL
|
||||
MATCH (n:Company) RETURN n.name AS name
|
||||
```
|
||||
|
||||
### Control Flow
|
||||
|
||||
#### FOREACH
|
||||
|
||||
Iterate over list, execute updates.
|
||||
|
||||
```cypher
|
||||
// Set property on path nodes
|
||||
MATCH path = (a)-[*]->(b)
|
||||
FOREACH (n IN nodes(path) | SET n.visited = true)
|
||||
|
||||
// Conditional operation (common pattern)
|
||||
OPTIONAL MATCH (target:Node {id: $id})
|
||||
FOREACH (_ IN CASE WHEN target IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (source)-[:LINKS_TO]->(target)
|
||||
)
|
||||
```
|
||||
|
||||
#### CASE
|
||||
|
||||
Conditional expressions.
|
||||
|
||||
```cypher
|
||||
// Simple CASE
|
||||
RETURN CASE n.status
|
||||
WHEN 'active' THEN 'A'
|
||||
WHEN 'pending' THEN 'P'
|
||||
ELSE 'X'
|
||||
END AS code
|
||||
|
||||
// Generic CASE
|
||||
RETURN CASE
|
||||
WHEN n.age < 18 THEN 'minor'
|
||||
WHEN n.age < 65 THEN 'adult'
|
||||
ELSE 'senior'
|
||||
END AS category
|
||||
```
|
||||
|
||||
## Operators
|
||||
|
||||
### Comparison
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `=` | Equal |
|
||||
| `<>` | Not equal |
|
||||
| `<` | Less than |
|
||||
| `>` | Greater than |
|
||||
| `<=` | Less than or equal |
|
||||
| `>=` | Greater than or equal |
|
||||
| `IS NULL` | Is null |
|
||||
| `IS NOT NULL` | Is not null |
|
||||
|
||||
### Boolean
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `AND` | Logical AND |
|
||||
| `OR` | Logical OR |
|
||||
| `NOT` | Logical NOT |
|
||||
| `XOR` | Exclusive OR |
|
||||
|
||||
### String
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `STARTS WITH` | Prefix match |
|
||||
| `ENDS WITH` | Suffix match |
|
||||
| `CONTAINS` | Substring match |
|
||||
| `=~` | Regex match |
|
||||
|
||||
### List
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `IN` | List membership |
|
||||
| `+` | List concatenation |
|
||||
|
||||
### Mathematical
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `+` | Addition |
|
||||
| `-` | Subtraction |
|
||||
| `*` | Multiplication |
|
||||
| `/` | Division |
|
||||
| `%` | Modulo |
|
||||
| `^` | Exponentiation |
|
||||
|
||||
## Functions
|
||||
|
||||
### Aggregation
|
||||
|
||||
```cypher
|
||||
count(*) // Count rows
|
||||
count(n) // Count non-null
|
||||
count(DISTINCT n) // Count unique
|
||||
sum(n.value) // Sum
|
||||
avg(n.value) // Average
|
||||
min(n.value) // Minimum
|
||||
max(n.value) // Maximum
|
||||
collect(n) // Collect to list
|
||||
collect(DISTINCT n) // Collect unique
|
||||
stDev(n.value) // Standard deviation
|
||||
percentileCont(n.value, 0.5) // Median
|
||||
```
|
||||
|
||||
### Scalar
|
||||
|
||||
```cypher
|
||||
// Type functions
|
||||
id(n) // Internal node ID (deprecated, use elementId)
|
||||
elementId(n) // Element ID string
|
||||
labels(n) // Node labels
|
||||
type(r) // Relationship type
|
||||
properties(n) // Property map
|
||||
|
||||
// Math
|
||||
abs(x)
|
||||
ceil(x)
|
||||
floor(x)
|
||||
round(x)
|
||||
sign(x)
|
||||
sqrt(x)
|
||||
rand() // Random 0-1
|
||||
|
||||
// String
|
||||
size(str) // String length
|
||||
toLower(str)
|
||||
toUpper(str)
|
||||
trim(str)
|
||||
ltrim(str)
|
||||
rtrim(str)
|
||||
replace(str, from, to)
|
||||
substring(str, start, len)
|
||||
left(str, len)
|
||||
right(str, len)
|
||||
split(str, delimiter)
|
||||
reverse(str)
|
||||
toString(val)
|
||||
|
||||
// Null handling
|
||||
coalesce(val1, val2, ...) // First non-null
|
||||
nullIf(val1, val2) // NULL if equal
|
||||
|
||||
// Type conversion
|
||||
toInteger(val)
|
||||
toFloat(val)
|
||||
toBoolean(val)
|
||||
toString(val)
|
||||
```
|
||||
|
||||
### List Functions
|
||||
|
||||
```cypher
|
||||
size(list) // List length
|
||||
head(list) // First element
|
||||
tail(list) // All but first
|
||||
last(list) // Last element
|
||||
range(start, end) // Create range [start..end]
|
||||
range(start, end, step)
|
||||
reverse(list)
|
||||
keys(map) // Map keys as list
|
||||
values(map) // Map values as list
|
||||
|
||||
// List predicates
|
||||
any(x IN list WHERE predicate)
|
||||
all(x IN list WHERE predicate)
|
||||
none(x IN list WHERE predicate)
|
||||
single(x IN list WHERE predicate)
|
||||
|
||||
// List manipulation
|
||||
[x IN list WHERE predicate] // Filter
|
||||
[x IN list | expression] // Map
|
||||
[x IN list WHERE pred | expr] // Filter and map
|
||||
reduce(s = initial, x IN list | s + x) // Reduce
|
||||
```
|
||||
|
||||
### Path Functions
|
||||
|
||||
```cypher
|
||||
nodes(path) // Nodes in path
|
||||
relationships(path) // Relationships in path
|
||||
length(path) // Number of relationships
|
||||
shortestPath((a)-[*]-(b))
|
||||
allShortestPaths((a)-[*]-(b))
|
||||
```
|
||||
|
||||
### Temporal Functions
|
||||
|
||||
```cypher
|
||||
timestamp() // Current Unix timestamp (ms)
|
||||
datetime() // Current datetime
|
||||
date() // Current date
|
||||
time() // Current time
|
||||
duration({days: 1, hours: 12})
|
||||
|
||||
// Components
|
||||
datetime().year
|
||||
datetime().month
|
||||
datetime().day
|
||||
datetime().hour
|
||||
|
||||
// Parsing
|
||||
date('2024-01-15')
|
||||
datetime('2024-01-15T10:30:00Z')
|
||||
```
|
||||
|
||||
### Spatial Functions
|
||||
|
||||
```cypher
|
||||
point({x: 1, y: 2})
|
||||
point({latitude: 37.5, longitude: -122.4})
|
||||
distance(point1, point2)
|
||||
```
|
||||
|
||||
## Comments
|
||||
|
||||
```cypher
|
||||
// Single line comment
|
||||
|
||||
/* Multi-line
|
||||
comment */
|
||||
```
|
||||
|
||||
## Transaction Control
|
||||
|
||||
```cypher
|
||||
// In procedures/transactions
|
||||
:begin
|
||||
:commit
|
||||
:rollback
|
||||
```
|
||||
|
||||
## Parameter Syntax
|
||||
|
||||
```cypher
|
||||
// Parameter reference
|
||||
$paramName
|
||||
|
||||
// In properties
|
||||
{key: $value}
|
||||
|
||||
// In WHERE
|
||||
WHERE n.id = $id
|
||||
|
||||
// In expressions
|
||||
RETURN $multiplier * n.value
|
||||
```
|
||||
1115
.claude/skills/distributed-systems/SKILL.md
Normal file
1115
.claude/skills/distributed-systems/SKILL.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,610 @@
|
||||
# Consensus Protocols - Detailed Reference
|
||||
|
||||
Complete specifications and implementation details for major consensus protocols.
|
||||
|
||||
## Paxos Complete Specification
|
||||
|
||||
### Proposal Numbers
|
||||
|
||||
Proposal numbers must be:
|
||||
- **Unique**: No two proposers use the same number
|
||||
- **Totally ordered**: Any two can be compared
|
||||
|
||||
**Implementation**: `(round_number, proposer_id)` where proposer_id breaks ties.
|
||||
|
||||
### Single-Decree Paxos State
|
||||
|
||||
**Proposer state**:
|
||||
```
|
||||
proposal_number: int
|
||||
value: any
|
||||
```
|
||||
|
||||
**Acceptor state (persistent)**:
|
||||
```
|
||||
highest_promised: int # Highest proposal number promised
|
||||
accepted_proposal: int # Number of accepted proposal (0 if none)
|
||||
accepted_value: any # Value of accepted proposal (null if none)
|
||||
```
|
||||
|
||||
### Message Format
|
||||
|
||||
**Prepare** (Phase 1a):
|
||||
```
|
||||
{
|
||||
type: "PREPARE",
|
||||
proposal_number: n
|
||||
}
|
||||
```
|
||||
|
||||
**Promise** (Phase 1b):
|
||||
```
|
||||
{
|
||||
type: "PROMISE",
|
||||
proposal_number: n,
|
||||
accepted_proposal: m, # null if nothing accepted
|
||||
accepted_value: v # null if nothing accepted
|
||||
}
|
||||
```
|
||||
|
||||
**Accept** (Phase 2a):
|
||||
```
|
||||
{
|
||||
type: "ACCEPT",
|
||||
proposal_number: n,
|
||||
value: v
|
||||
}
|
||||
```
|
||||
|
||||
**Accepted** (Phase 2b):
|
||||
```
|
||||
{
|
||||
type: "ACCEPTED",
|
||||
proposal_number: n,
|
||||
value: v
|
||||
}
|
||||
```
|
||||
|
||||
### Proposer Algorithm
|
||||
|
||||
```
|
||||
function propose(value):
|
||||
n = generate_proposal_number()
|
||||
|
||||
# Phase 1: Prepare
|
||||
promises = []
|
||||
for acceptor in acceptors:
|
||||
send PREPARE(n) to acceptor
|
||||
|
||||
wait until |promises| > |acceptors|/2 or timeout
|
||||
|
||||
if timeout:
|
||||
return FAILED
|
||||
|
||||
# Choose value
|
||||
highest = max(promises, key=p.accepted_proposal)
|
||||
if highest.accepted_value is not null:
|
||||
value = highest.accepted_value
|
||||
|
||||
# Phase 2: Accept
|
||||
accepts = []
|
||||
for acceptor in acceptors:
|
||||
send ACCEPT(n, value) to acceptor
|
||||
|
||||
wait until |accepts| > |acceptors|/2 or timeout
|
||||
|
||||
if timeout:
|
||||
return FAILED
|
||||
|
||||
return SUCCESS(value)
|
||||
```
|
||||
|
||||
### Acceptor Algorithm
|
||||
|
||||
```
|
||||
on receive PREPARE(n):
|
||||
if n > highest_promised:
|
||||
highest_promised = n
|
||||
persist(highest_promised)
|
||||
reply PROMISE(n, accepted_proposal, accepted_value)
|
||||
else:
|
||||
# Optionally reply NACK(highest_promised)
|
||||
ignore or reject
|
||||
|
||||
on receive ACCEPT(n, v):
|
||||
if n >= highest_promised:
|
||||
highest_promised = n
|
||||
accepted_proposal = n
|
||||
accepted_value = v
|
||||
persist(highest_promised, accepted_proposal, accepted_value)
|
||||
reply ACCEPTED(n, v)
|
||||
else:
|
||||
ignore or reject
|
||||
```
|
||||
|
||||
### Multi-Paxos Optimization
|
||||
|
||||
**Stable leader**:
|
||||
```
|
||||
# Leader election (using Paxos or other method)
|
||||
leader = elect_leader()
|
||||
|
||||
# Leader's Phase 1 for all future instances
|
||||
leader sends PREPARE(n) for instance range [i, ∞)
|
||||
|
||||
# For each command:
|
||||
function propose_as_leader(value, instance):
|
||||
# Skip Phase 1 if already leader
|
||||
for acceptor in acceptors:
|
||||
send ACCEPT(n, value, instance) to acceptor
|
||||
wait for majority ACCEPTED
|
||||
return SUCCESS
|
||||
```
|
||||
|
||||
### Paxos Safety Proof Sketch
|
||||
|
||||
**Invariant**: If a value v is chosen for instance i, no other value can be chosen.
|
||||
|
||||
**Proof**:
|
||||
1. Value chosen → accepted by majority with proposal n
|
||||
2. Any higher proposal n' must contact majority
|
||||
3. Majorities intersect → at least one acceptor has accepted v
|
||||
4. New proposer adopts v (or higher already-accepted value)
|
||||
5. By induction, all future proposals use v
|
||||
|
||||
## Raft Complete Specification
|
||||
|
||||
### State
|
||||
|
||||
**All servers (persistent)**:
|
||||
```
|
||||
currentTerm: int # Latest term seen
|
||||
votedFor: ServerId # Candidate voted for in current term (null if none)
|
||||
log[]: LogEntry # Log entries
|
||||
```
|
||||
|
||||
**All servers (volatile)**:
|
||||
```
|
||||
commitIndex: int # Highest log index known to be committed
|
||||
lastApplied: int # Highest log index applied to state machine
|
||||
```
|
||||
|
||||
**Leader (volatile, reinitialized after election)**:
|
||||
```
|
||||
nextIndex[]: int # For each server, next log index to send
|
||||
matchIndex[]: int # For each server, highest log index replicated
|
||||
```
|
||||
|
||||
**LogEntry**:
|
||||
```
|
||||
{
|
||||
term: int,
|
||||
command: any
|
||||
}
|
||||
```
|
||||
|
||||
### RequestVote RPC
|
||||
|
||||
**Request**:
|
||||
```
|
||||
{
|
||||
term: int, # Candidate's term
|
||||
candidateId: ServerId, # Candidate requesting vote
|
||||
lastLogIndex: int, # Index of candidate's last log entry
|
||||
lastLogTerm: int # Term of candidate's last log entry
|
||||
}
|
||||
```
|
||||
|
||||
**Response**:
|
||||
```
|
||||
{
|
||||
term: int, # currentTerm, for candidate to update itself
|
||||
voteGranted: bool # True if candidate received vote
|
||||
}
|
||||
```
|
||||
|
||||
**Receiver implementation**:
|
||||
```
|
||||
on receive RequestVote(term, candidateId, lastLogIndex, lastLogTerm):
|
||||
if term < currentTerm:
|
||||
return {term: currentTerm, voteGranted: false}
|
||||
|
||||
if term > currentTerm:
|
||||
currentTerm = term
|
||||
votedFor = null
|
||||
convert to follower
|
||||
|
||||
# Check if candidate's log is at least as up-to-date as ours
|
||||
ourLastTerm = log[len(log)-1].term if log else 0
|
||||
ourLastIndex = len(log) - 1
|
||||
|
||||
logOK = (lastLogTerm > ourLastTerm) or
|
||||
(lastLogTerm == ourLastTerm and lastLogIndex >= ourLastIndex)
|
||||
|
||||
if (votedFor is null or votedFor == candidateId) and logOK:
|
||||
votedFor = candidateId
|
||||
persist(currentTerm, votedFor)
|
||||
reset election timer
|
||||
return {term: currentTerm, voteGranted: true}
|
||||
|
||||
return {term: currentTerm, voteGranted: false}
|
||||
```
|
||||
|
||||
### AppendEntries RPC
|
||||
|
||||
**Request**:
|
||||
```
|
||||
{
|
||||
term: int, # Leader's term
|
||||
leaderId: ServerId, # For follower to redirect clients
|
||||
prevLogIndex: int, # Index of log entry preceding new ones
|
||||
prevLogTerm: int, # Term of prevLogIndex entry
|
||||
entries[]: LogEntry, # Log entries to store (empty for heartbeat)
|
||||
leaderCommit: int # Leader's commitIndex
|
||||
}
|
||||
```
|
||||
|
||||
**Response**:
|
||||
```
|
||||
{
|
||||
term: int, # currentTerm, for leader to update itself
|
||||
success: bool # True if follower had matching prevLog entry
|
||||
}
|
||||
```
|
||||
|
||||
**Receiver implementation**:
|
||||
```
|
||||
on receive AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit):
|
||||
if term < currentTerm:
|
||||
return {term: currentTerm, success: false}
|
||||
|
||||
reset election timer
|
||||
|
||||
if term > currentTerm:
|
||||
currentTerm = term
|
||||
votedFor = null
|
||||
|
||||
convert to follower
|
||||
|
||||
# Check log consistency
|
||||
if prevLogIndex >= len(log) or
|
||||
(prevLogIndex >= 0 and log[prevLogIndex].term != prevLogTerm):
|
||||
return {term: currentTerm, success: false}
|
||||
|
||||
# Append new entries (handling conflicts)
|
||||
for i, entry in enumerate(entries):
|
||||
index = prevLogIndex + 1 + i
|
||||
if index < len(log):
|
||||
if log[index].term != entry.term:
|
||||
# Delete conflicting entry and all following
|
||||
log = log[:index]
|
||||
log.append(entry)
|
||||
else:
|
||||
log.append(entry)
|
||||
|
||||
persist(currentTerm, votedFor, log)
|
||||
|
||||
# Update commit index
|
||||
if leaderCommit > commitIndex:
|
||||
commitIndex = min(leaderCommit, len(log) - 1)
|
||||
|
||||
return {term: currentTerm, success: true}
|
||||
```
|
||||
|
||||
### Leader Behavior
|
||||
|
||||
```
|
||||
on becoming leader:
|
||||
for each server:
|
||||
nextIndex[server] = len(log)
|
||||
matchIndex[server] = 0
|
||||
|
||||
start sending heartbeats
|
||||
|
||||
on receiving client command:
|
||||
append entry to local log
|
||||
persist log
|
||||
send AppendEntries to all followers
|
||||
|
||||
on receiving AppendEntries response from server:
|
||||
if response.success:
|
||||
matchIndex[server] = prevLogIndex + len(entries)
|
||||
nextIndex[server] = matchIndex[server] + 1
|
||||
|
||||
# Update commit index
|
||||
for N from commitIndex+1 to len(log)-1:
|
||||
if log[N].term == currentTerm and
|
||||
|{s : matchIndex[s] >= N}| > |servers|/2:
|
||||
commitIndex = N
|
||||
else:
|
||||
nextIndex[server] = max(1, nextIndex[server] - 1)
|
||||
retry AppendEntries with lower prevLogIndex
|
||||
|
||||
on commitIndex update:
|
||||
while lastApplied < commitIndex:
|
||||
lastApplied++
|
||||
apply log[lastApplied].command to state machine
|
||||
```
|
||||
|
||||
### Election Timeout
|
||||
|
||||
```
|
||||
on election timeout (follower or candidate):
|
||||
currentTerm++
|
||||
convert to candidate
|
||||
votedFor = self
|
||||
persist(currentTerm, votedFor)
|
||||
reset election timer
|
||||
votes = 1 # Vote for self
|
||||
|
||||
for each server except self:
|
||||
send RequestVote(currentTerm, self, lastLogIndex, lastLogTerm)
|
||||
|
||||
wait for responses or timeout:
|
||||
if received votes > |servers|/2:
|
||||
become leader
|
||||
if received AppendEntries from valid leader:
|
||||
become follower
|
||||
if timeout:
|
||||
start new election
|
||||
```
|
||||
|
||||
## PBFT Complete Specification
|
||||
|
||||
### Message Types
|
||||
|
||||
**REQUEST**:
|
||||
```
|
||||
{
|
||||
type: "REQUEST",
|
||||
operation: o, # Operation to execute
|
||||
timestamp: t, # Client timestamp (for reply matching)
|
||||
client: c # Client identifier
|
||||
}
|
||||
```
|
||||
|
||||
**PRE-PREPARE**:
|
||||
```
|
||||
{
|
||||
type: "PRE-PREPARE",
|
||||
view: v, # Current view number
|
||||
sequence: n, # Sequence number
|
||||
digest: d, # Hash of request
|
||||
request: m # The request message
|
||||
}
|
||||
signature(primary)
|
||||
```
|
||||
|
||||
**PREPARE**:
|
||||
```
|
||||
{
|
||||
type: "PREPARE",
|
||||
view: v,
|
||||
sequence: n,
|
||||
digest: d,
|
||||
replica: i # Sending replica
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**COMMIT**:
|
||||
```
|
||||
{
|
||||
type: "COMMIT",
|
||||
view: v,
|
||||
sequence: n,
|
||||
digest: d,
|
||||
replica: i
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**REPLY**:
|
||||
```
|
||||
{
|
||||
type: "REPLY",
|
||||
view: v,
|
||||
timestamp: t,
|
||||
client: c,
|
||||
replica: i,
|
||||
result: r # Execution result
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
### Replica State
|
||||
|
||||
```
|
||||
view: int # Current view
|
||||
sequence: int # Last assigned sequence number (primary)
|
||||
log[]: {request, prepares, commits, state} # Log of requests
|
||||
prepared_certificates: {} # Prepared certificates (2f+1 prepares)
|
||||
committed_certificates: {} # Committed certificates (2f+1 commits)
|
||||
h: int # Low water mark
|
||||
H: int # High water mark (h + L)
|
||||
```
|
||||
|
||||
### Normal Operation Protocol
|
||||
|
||||
**Primary (replica p = v mod n)**:
|
||||
```
|
||||
on receive REQUEST(m) from client:
|
||||
if not primary for current view:
|
||||
forward to primary
|
||||
return
|
||||
|
||||
n = assign_sequence_number()
|
||||
d = hash(m)
|
||||
|
||||
broadcast PRE-PREPARE(v, n, d, m) to all replicas
|
||||
add to log
|
||||
```
|
||||
|
||||
**All replicas**:
|
||||
```
|
||||
on receive PRE-PREPARE(v, n, d, m) from primary:
|
||||
if v != current_view:
|
||||
ignore
|
||||
if already accepted pre-prepare for (v, n) with different digest:
|
||||
ignore
|
||||
if not in_view_as_backup(v):
|
||||
ignore
|
||||
if not h < n <= H:
|
||||
ignore # Outside sequence window
|
||||
|
||||
# Valid pre-prepare
|
||||
add to log
|
||||
broadcast PREPARE(v, n, d, i) to all replicas
|
||||
|
||||
on receive PREPARE(v, n, d, j) from replica j:
|
||||
if v != current_view:
|
||||
ignore
|
||||
|
||||
add to log[n].prepares
|
||||
|
||||
if |log[n].prepares| >= 2f and not already_prepared(v, n, d):
|
||||
# Prepared certificate complete
|
||||
mark as prepared
|
||||
broadcast COMMIT(v, n, d, i) to all replicas
|
||||
|
||||
on receive COMMIT(v, n, d, j) from replica j:
|
||||
if v != current_view:
|
||||
ignore
|
||||
|
||||
add to log[n].commits
|
||||
|
||||
if |log[n].commits| >= 2f + 1 and prepared(v, n, d):
|
||||
# Committed certificate complete
|
||||
if all entries < n are committed:
|
||||
execute(m)
|
||||
send REPLY(v, t, c, i, result) to client
|
||||
```
|
||||
|
||||
### View Change Protocol
|
||||
|
||||
**Timeout trigger**:
|
||||
```
|
||||
on request timeout (no progress):
|
||||
view_change_timeout++
|
||||
broadcast VIEW-CHANGE(v+1, n, C, P, i)
|
||||
|
||||
where:
|
||||
n = last stable checkpoint sequence number
|
||||
C = checkpoint certificate (2f+1 checkpoint messages)
|
||||
P = set of prepared certificates for messages after n
|
||||
```
|
||||
|
||||
**VIEW-CHANGE**:
|
||||
```
|
||||
{
|
||||
type: "VIEW-CHANGE",
|
||||
view: v, # New view number
|
||||
sequence: n, # Checkpoint sequence
|
||||
checkpoints: C, # Checkpoint certificate
|
||||
prepared: P, # Set of prepared certificates
|
||||
replica: i
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**New primary (p' = v mod n)**:
|
||||
```
|
||||
on receive 2f VIEW-CHANGE for view v:
|
||||
V = set of valid view-change messages
|
||||
|
||||
# Compute O: set of requests to re-propose
|
||||
O = {}
|
||||
for seq in max_checkpoint_seq(V) to max_seq(V):
|
||||
if exists prepared certificate for seq in V:
|
||||
O[seq] = request from certificate
|
||||
else:
|
||||
O[seq] = null-request # No-op
|
||||
|
||||
broadcast NEW-VIEW(v, V, O)
|
||||
|
||||
# Re-run protocol for requests in O
|
||||
for seq, request in O:
|
||||
if request != null:
|
||||
send PRE-PREPARE(v, seq, hash(request), request)
|
||||
```
|
||||
|
||||
**NEW-VIEW**:
|
||||
```
|
||||
{
|
||||
type: "NEW-VIEW",
|
||||
view: v,
|
||||
view_changes: V, # 2f+1 view-change messages
|
||||
pre_prepares: O # Set of pre-prepare messages
|
||||
}
|
||||
signature(primary)
|
||||
```
|
||||
|
||||
### Checkpointing
|
||||
|
||||
Periodic stable checkpoints to garbage collect logs:
|
||||
|
||||
```
|
||||
every K requests:
|
||||
state_hash = hash(state_machine_state)
|
||||
broadcast CHECKPOINT(n, state_hash, i)
|
||||
|
||||
on receive 2f+1 CHECKPOINT for (n, d):
|
||||
if all digests match:
|
||||
create stable checkpoint
|
||||
h = n # Move low water mark
|
||||
garbage_collect(entries < n)
|
||||
```
|
||||
|
||||
## HotStuff Protocol
|
||||
|
||||
Linear complexity BFT using threshold signatures.
|
||||
|
||||
### Key Innovation
|
||||
|
||||
- **Three-phase**: prepare → pre-commit → commit → decide
|
||||
- **Pipelining**: Next proposal starts before current finishes
|
||||
- **Threshold signatures**: O(n) total messages instead of O(n²)
|
||||
|
||||
### Message Flow
|
||||
|
||||
```
|
||||
Phase 1 (Prepare):
|
||||
Leader: broadcast PREPARE(v, node)
|
||||
Replicas: sign and send partial signature to leader
|
||||
Leader: aggregate into prepare certificate QC
|
||||
|
||||
Phase 2 (Pre-commit):
|
||||
Leader: broadcast PRE-COMMIT(v, QC_prepare)
|
||||
Replicas: sign and send partial signature
|
||||
Leader: aggregate into pre-commit certificate
|
||||
|
||||
Phase 3 (Commit):
|
||||
Leader: broadcast COMMIT(v, QC_precommit)
|
||||
Replicas: sign and send partial signature
|
||||
Leader: aggregate into commit certificate
|
||||
|
||||
Phase 4 (Decide):
|
||||
Leader: broadcast DECIDE(v, QC_commit)
|
||||
Replicas: execute and commit
|
||||
```
|
||||
|
||||
### Pipelining
|
||||
|
||||
```
|
||||
Block k: [prepare] [pre-commit] [commit] [decide]
|
||||
Block k+1: [prepare] [pre-commit] [commit] [decide]
|
||||
Block k+2: [prepare] [pre-commit] [commit] [decide]
|
||||
```
|
||||
|
||||
Each phase of block k+1 piggybacks on messages for block k.
|
||||
|
||||
## Protocol Comparison Matrix
|
||||
|
||||
| Feature | Paxos | Raft | PBFT | HotStuff |
|
||||
|---------|-------|------|------|----------|
|
||||
| Fault model | Crash | Crash | Byzantine | Byzantine |
|
||||
| Fault tolerance | f with 2f+1 | f with 2f+1 | f with 3f+1 | f with 3f+1 |
|
||||
| Message complexity | O(n) | O(n) | O(n²) | O(n) |
|
||||
| Leader required | No (helps) | Yes | Yes | Yes |
|
||||
| Phases | 2 | 2 | 3 | 3 |
|
||||
| View change | Complex | Simple | Complex | Simple |
|
||||
610
.claude/skills/distributed-systems/references/logical-clocks.md
Normal file
610
.claude/skills/distributed-systems/references/logical-clocks.md
Normal file
@@ -0,0 +1,610 @@
|
||||
# Logical Clocks - Implementation Reference
|
||||
|
||||
Detailed implementations and algorithms for causality tracking.
|
||||
|
||||
## Lamport Clock Implementation
|
||||
|
||||
### Data Structure
|
||||
|
||||
```go
|
||||
type LamportClock struct {
|
||||
counter uint64
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewLamportClock() *LamportClock {
|
||||
return &LamportClock{counter: 0}
|
||||
}
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
```go
|
||||
// Tick increments clock for local event
|
||||
func (c *LamportClock) Tick() uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.counter++
|
||||
return c.counter
|
||||
}
|
||||
|
||||
// Send returns timestamp for outgoing message
|
||||
func (c *LamportClock) Send() uint64 {
|
||||
return c.Tick()
|
||||
}
|
||||
|
||||
// Receive updates clock based on incoming message timestamp
|
||||
func (c *LamportClock) Receive(msgTime uint64) uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if msgTime > c.counter {
|
||||
c.counter = msgTime
|
||||
}
|
||||
c.counter++
|
||||
return c.counter
|
||||
}
|
||||
|
||||
// Time returns current clock value without incrementing
|
||||
func (c *LamportClock) Time() uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.counter
|
||||
}
|
||||
```
|
||||
|
||||
### Usage Example
|
||||
|
||||
```go
|
||||
// Process A
|
||||
clockA := NewLamportClock()
|
||||
e1 := clockA.Tick() // Event 1: time=1
|
||||
msgTime := clockA.Send() // Send: time=2
|
||||
|
||||
// Process B
|
||||
clockB := NewLamportClock()
|
||||
e2 := clockB.Tick() // Event 2: time=1
|
||||
e3 := clockB.Receive(msgTime) // Receive: time=3 (max(1,2)+1)
|
||||
```
|
||||
|
||||
## Vector Clock Implementation
|
||||
|
||||
### Data Structure
|
||||
|
||||
```go
|
||||
type VectorClock struct {
|
||||
clocks map[string]uint64 // processID -> logical time
|
||||
self string // this process's ID
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewVectorClock(processID string, allProcesses []string) *VectorClock {
|
||||
clocks := make(map[string]uint64)
|
||||
for _, p := range allProcesses {
|
||||
clocks[p] = 0
|
||||
}
|
||||
return &VectorClock{
|
||||
clocks: clocks,
|
||||
self: processID,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
```go
|
||||
// Tick increments own clock
|
||||
func (vc *VectorClock) Tick() map[string]uint64 {
|
||||
vc.mu.Lock()
|
||||
defer vc.mu.Unlock()
|
||||
|
||||
vc.clocks[vc.self]++
|
||||
return vc.copy()
|
||||
}
|
||||
|
||||
// Send returns copy of vector for message
|
||||
func (vc *VectorClock) Send() map[string]uint64 {
|
||||
return vc.Tick()
|
||||
}
|
||||
|
||||
// Receive merges incoming vector and increments
|
||||
func (vc *VectorClock) Receive(incoming map[string]uint64) map[string]uint64 {
|
||||
vc.mu.Lock()
|
||||
defer vc.mu.Unlock()
|
||||
|
||||
// Merge: take max of each component
|
||||
for pid, time := range incoming {
|
||||
if time > vc.clocks[pid] {
|
||||
vc.clocks[pid] = time
|
||||
}
|
||||
}
|
||||
|
||||
// Increment own clock
|
||||
vc.clocks[vc.self]++
|
||||
return vc.copy()
|
||||
}
|
||||
|
||||
// copy returns a copy of the vector
|
||||
func (vc *VectorClock) copy() map[string]uint64 {
|
||||
result := make(map[string]uint64)
|
||||
for k, v := range vc.clocks {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
```
|
||||
|
||||
### Comparison Functions
|
||||
|
||||
```go
|
||||
// Compare returns ordering relationship between two vectors
|
||||
type Ordering int
|
||||
|
||||
const (
|
||||
Equal Ordering = iota // V1 == V2
|
||||
HappenedBefore // V1 < V2
|
||||
HappenedAfter // V1 > V2
|
||||
Concurrent // V1 || V2
|
||||
)
|
||||
|
||||
func Compare(v1, v2 map[string]uint64) Ordering {
|
||||
less := false
|
||||
greater := false
|
||||
|
||||
// Get all keys
|
||||
allKeys := make(map[string]bool)
|
||||
for k := range v1 {
|
||||
allKeys[k] = true
|
||||
}
|
||||
for k := range v2 {
|
||||
allKeys[k] = true
|
||||
}
|
||||
|
||||
for k := range allKeys {
|
||||
t1 := v1[k] // 0 if not present
|
||||
t2 := v2[k]
|
||||
|
||||
if t1 < t2 {
|
||||
less = true
|
||||
}
|
||||
if t1 > t2 {
|
||||
greater = true
|
||||
}
|
||||
}
|
||||
|
||||
if !less && !greater {
|
||||
return Equal
|
||||
}
|
||||
if less && !greater {
|
||||
return HappenedBefore
|
||||
}
|
||||
if greater && !less {
|
||||
return HappenedAfter
|
||||
}
|
||||
return Concurrent
|
||||
}
|
||||
|
||||
// IsConcurrent checks if two events are concurrent
|
||||
func IsConcurrent(v1, v2 map[string]uint64) bool {
|
||||
return Compare(v1, v2) == Concurrent
|
||||
}
|
||||
|
||||
// HappenedBefore checks if v1 -> v2 (v1 causally precedes v2)
|
||||
func HappenedBefore(v1, v2 map[string]uint64) bool {
|
||||
return Compare(v1, v2) == HappenedBefore
|
||||
}
|
||||
```
|
||||
|
||||
## Interval Tree Clock Implementation
|
||||
|
||||
### Data Structures
|
||||
|
||||
```go
|
||||
// ID represents the identity tree
|
||||
type ID struct {
|
||||
IsLeaf bool
|
||||
Value int // 0 or 1 for leaves
|
||||
Left *ID // nil for leaves
|
||||
Right *ID
|
||||
}
|
||||
|
||||
// Stamp represents the event tree
|
||||
type Stamp struct {
|
||||
Base int
|
||||
Left *Stamp // nil for leaf stamps
|
||||
Right *Stamp
|
||||
}
|
||||
|
||||
// ITC combines ID and Stamp
|
||||
type ITC struct {
|
||||
ID *ID
|
||||
Stamp *Stamp
|
||||
}
|
||||
```
|
||||
|
||||
### ID Operations
|
||||
|
||||
```go
|
||||
// NewSeedID creates initial full ID (1)
|
||||
func NewSeedID() *ID {
|
||||
return &ID{IsLeaf: true, Value: 1}
|
||||
}
|
||||
|
||||
// Fork splits an ID into two
|
||||
func (id *ID) Fork() (*ID, *ID) {
|
||||
if id.IsLeaf {
|
||||
if id.Value == 0 {
|
||||
// Cannot fork zero ID
|
||||
return &ID{IsLeaf: true, Value: 0},
|
||||
&ID{IsLeaf: true, Value: 0}
|
||||
}
|
||||
// Split full ID into left and right halves
|
||||
return &ID{
|
||||
IsLeaf: false,
|
||||
Left: &ID{IsLeaf: true, Value: 1},
|
||||
Right: &ID{IsLeaf: true, Value: 0},
|
||||
},
|
||||
&ID{
|
||||
IsLeaf: false,
|
||||
Left: &ID{IsLeaf: true, Value: 0},
|
||||
Right: &ID{IsLeaf: true, Value: 1},
|
||||
}
|
||||
}
|
||||
|
||||
// Fork from non-leaf: give half to each
|
||||
if id.Left.IsLeaf && id.Left.Value == 0 {
|
||||
// Left is zero, fork right
|
||||
newRight1, newRight2 := id.Right.Fork()
|
||||
return &ID{IsLeaf: false, Left: id.Left, Right: newRight1},
|
||||
&ID{IsLeaf: false, Left: &ID{IsLeaf: true, Value: 0}, Right: newRight2}
|
||||
}
|
||||
if id.Right.IsLeaf && id.Right.Value == 0 {
|
||||
// Right is zero, fork left
|
||||
newLeft1, newLeft2 := id.Left.Fork()
|
||||
return &ID{IsLeaf: false, Left: newLeft1, Right: id.Right},
|
||||
&ID{IsLeaf: false, Left: newLeft2, Right: &ID{IsLeaf: true, Value: 0}}
|
||||
}
|
||||
|
||||
// Both have IDs, split
|
||||
return &ID{IsLeaf: false, Left: id.Left, Right: &ID{IsLeaf: true, Value: 0}},
|
||||
&ID{IsLeaf: false, Left: &ID{IsLeaf: true, Value: 0}, Right: id.Right}
|
||||
}
|
||||
|
||||
// Join merges two IDs
|
||||
func Join(id1, id2 *ID) *ID {
|
||||
if id1.IsLeaf && id1.Value == 0 {
|
||||
return id2
|
||||
}
|
||||
if id2.IsLeaf && id2.Value == 0 {
|
||||
return id1
|
||||
}
|
||||
if id1.IsLeaf && id2.IsLeaf && id1.Value == 1 && id2.Value == 1 {
|
||||
return &ID{IsLeaf: true, Value: 1}
|
||||
}
|
||||
|
||||
// Normalize to non-leaf
|
||||
left1 := id1.Left
|
||||
right1 := id1.Right
|
||||
left2 := id2.Left
|
||||
right2 := id2.Right
|
||||
|
||||
if id1.IsLeaf {
|
||||
left1 = id1
|
||||
right1 = id1
|
||||
}
|
||||
if id2.IsLeaf {
|
||||
left2 = id2
|
||||
right2 = id2
|
||||
}
|
||||
|
||||
newLeft := Join(left1, left2)
|
||||
newRight := Join(right1, right2)
|
||||
|
||||
return normalize(&ID{IsLeaf: false, Left: newLeft, Right: newRight})
|
||||
}
|
||||
|
||||
func normalize(id *ID) *ID {
|
||||
if !id.IsLeaf {
|
||||
if id.Left.IsLeaf && id.Right.IsLeaf &&
|
||||
id.Left.Value == id.Right.Value {
|
||||
return &ID{IsLeaf: true, Value: id.Left.Value}
|
||||
}
|
||||
}
|
||||
return id
|
||||
}
|
||||
```
|
||||
|
||||
### Stamp Operations
|
||||
|
||||
```go
|
||||
// NewStamp creates initial stamp (0)
|
||||
func NewStamp() *Stamp {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
|
||||
// Event increments the stamp for the given ID
|
||||
func Event(id *ID, stamp *Stamp) *Stamp {
|
||||
if id.IsLeaf {
|
||||
if id.Value == 1 {
|
||||
return &Stamp{Base: stamp.Base + 1}
|
||||
}
|
||||
return stamp // Cannot increment with zero ID
|
||||
}
|
||||
|
||||
// Non-leaf ID: fill where we have ID
|
||||
if id.Left.IsLeaf && id.Left.Value == 1 {
|
||||
// Have left ID, increment left
|
||||
newLeft := Event(&ID{IsLeaf: true, Value: 1}, getLeft(stamp))
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: newLeft,
|
||||
Right: getRight(stamp),
|
||||
})
|
||||
}
|
||||
if id.Right.IsLeaf && id.Right.Value == 1 {
|
||||
newRight := Event(&ID{IsLeaf: true, Value: 1}, getRight(stamp))
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: getLeft(stamp),
|
||||
Right: newRight,
|
||||
})
|
||||
}
|
||||
|
||||
// Both non-zero, choose lower side
|
||||
leftMax := maxStamp(getLeft(stamp))
|
||||
rightMax := maxStamp(getRight(stamp))
|
||||
|
||||
if leftMax <= rightMax {
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: Event(id.Left, getLeft(stamp)),
|
||||
Right: getRight(stamp),
|
||||
})
|
||||
}
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: getLeft(stamp),
|
||||
Right: Event(id.Right, getRight(stamp)),
|
||||
})
|
||||
}
|
||||
|
||||
func getLeft(s *Stamp) *Stamp {
|
||||
if s.Left == nil {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
return s.Left
|
||||
}
|
||||
|
||||
func getRight(s *Stamp) *Stamp {
|
||||
if s.Right == nil {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
return s.Right
|
||||
}
|
||||
|
||||
func maxStamp(s *Stamp) int {
|
||||
if s.Left == nil && s.Right == nil {
|
||||
return s.Base
|
||||
}
|
||||
left := 0
|
||||
right := 0
|
||||
if s.Left != nil {
|
||||
left = maxStamp(s.Left)
|
||||
}
|
||||
if s.Right != nil {
|
||||
right = maxStamp(s.Right)
|
||||
}
|
||||
max := left
|
||||
if right > max {
|
||||
max = right
|
||||
}
|
||||
return s.Base + max
|
||||
}
|
||||
|
||||
// JoinStamps merges two stamps
|
||||
func JoinStamps(s1, s2 *Stamp) *Stamp {
|
||||
// Take max at each level
|
||||
base := s1.Base
|
||||
if s2.Base > base {
|
||||
base = s2.Base
|
||||
}
|
||||
|
||||
// Adjust for base difference
|
||||
adj1 := s1.Base
|
||||
adj2 := s2.Base
|
||||
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: base,
|
||||
Left: joinStampsRecursive(s1.Left, s2.Left, adj1-base, adj2-base),
|
||||
Right: joinStampsRecursive(s1.Right, s2.Right, adj1-base, adj2-base),
|
||||
})
|
||||
}
|
||||
|
||||
func normalizeStamp(s *Stamp) *Stamp {
|
||||
if s.Left == nil && s.Right == nil {
|
||||
return s
|
||||
}
|
||||
if s.Left != nil && s.Right != nil {
|
||||
if s.Left.Base > 0 && s.Right.Base > 0 {
|
||||
min := s.Left.Base
|
||||
if s.Right.Base < min {
|
||||
min = s.Right.Base
|
||||
}
|
||||
return &Stamp{
|
||||
Base: s.Base + min,
|
||||
Left: &Stamp{Base: s.Left.Base - min, Left: s.Left.Left, Right: s.Left.Right},
|
||||
Right: &Stamp{Base: s.Right.Base - min, Left: s.Right.Left, Right: s.Right.Right},
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
```
|
||||
|
||||
## Hybrid Logical Clock Implementation
|
||||
|
||||
```go
|
||||
type HLC struct {
|
||||
l int64 // logical component (physical time)
|
||||
c int64 // counter
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewHLC() *HLC {
|
||||
return &HLC{l: 0, c: 0}
|
||||
}
|
||||
|
||||
type HLCTimestamp struct {
|
||||
L int64
|
||||
C int64
|
||||
}
|
||||
|
||||
func (hlc *HLC) physicalTime() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// Now returns current HLC timestamp for local/send event
|
||||
func (hlc *HLC) Now() HLCTimestamp {
|
||||
hlc.mu.Lock()
|
||||
defer hlc.mu.Unlock()
|
||||
|
||||
pt := hlc.physicalTime()
|
||||
|
||||
if pt > hlc.l {
|
||||
hlc.l = pt
|
||||
hlc.c = 0
|
||||
} else {
|
||||
hlc.c++
|
||||
}
|
||||
|
||||
return HLCTimestamp{L: hlc.l, C: hlc.c}
|
||||
}
|
||||
|
||||
// Update updates HLC based on received timestamp
|
||||
func (hlc *HLC) Update(received HLCTimestamp) HLCTimestamp {
|
||||
hlc.mu.Lock()
|
||||
defer hlc.mu.Unlock()
|
||||
|
||||
pt := hlc.physicalTime()
|
||||
|
||||
if pt > hlc.l && pt > received.L {
|
||||
hlc.l = pt
|
||||
hlc.c = 0
|
||||
} else if received.L > hlc.l {
|
||||
hlc.l = received.L
|
||||
hlc.c = received.C + 1
|
||||
} else if hlc.l > received.L {
|
||||
hlc.c++
|
||||
} else { // hlc.l == received.L
|
||||
if received.C > hlc.c {
|
||||
hlc.c = received.C + 1
|
||||
} else {
|
||||
hlc.c++
|
||||
}
|
||||
}
|
||||
|
||||
return HLCTimestamp{L: hlc.l, C: hlc.c}
|
||||
}
|
||||
|
||||
// Compare compares two HLC timestamps
|
||||
func (t1 HLCTimestamp) Compare(t2 HLCTimestamp) int {
|
||||
if t1.L < t2.L {
|
||||
return -1
|
||||
}
|
||||
if t1.L > t2.L {
|
||||
return 1
|
||||
}
|
||||
if t1.C < t2.C {
|
||||
return -1
|
||||
}
|
||||
if t1.C > t2.C {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
```
|
||||
|
||||
## Causal Broadcast Implementation
|
||||
|
||||
```go
|
||||
type CausalBroadcast struct {
|
||||
vc *VectorClock
|
||||
pending []PendingMessage
|
||||
deliver func(Message)
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type PendingMessage struct {
|
||||
Msg Message
|
||||
Timestamp map[string]uint64
|
||||
}
|
||||
|
||||
func NewCausalBroadcast(processID string, processes []string, deliver func(Message)) *CausalBroadcast {
|
||||
return &CausalBroadcast{
|
||||
vc: NewVectorClock(processID, processes),
|
||||
pending: make([]PendingMessage, 0),
|
||||
deliver: deliver,
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast sends a message to all processes
|
||||
func (cb *CausalBroadcast) Broadcast(msg Message) map[string]uint64 {
|
||||
cb.mu.Lock()
|
||||
defer cb.mu.Unlock()
|
||||
|
||||
timestamp := cb.vc.Send()
|
||||
// Actual network broadcast would happen here
|
||||
return timestamp
|
||||
}
|
||||
|
||||
// Receive handles an incoming message
|
||||
func (cb *CausalBroadcast) Receive(msg Message, sender string, timestamp map[string]uint64) {
|
||||
cb.mu.Lock()
|
||||
defer cb.mu.Unlock()
|
||||
|
||||
// Add to pending
|
||||
cb.pending = append(cb.pending, PendingMessage{Msg: msg, Timestamp: timestamp})
|
||||
|
||||
// Try to deliver pending messages
|
||||
cb.tryDeliver()
|
||||
}
|
||||
|
||||
func (cb *CausalBroadcast) tryDeliver() {
|
||||
changed := true
|
||||
for changed {
|
||||
changed = false
|
||||
|
||||
for i, pending := range cb.pending {
|
||||
if cb.canDeliver(pending.Timestamp) {
|
||||
// Deliver message
|
||||
cb.vc.Receive(pending.Timestamp)
|
||||
cb.deliver(pending.Msg)
|
||||
|
||||
// Remove from pending
|
||||
cb.pending = append(cb.pending[:i], cb.pending[i+1:]...)
|
||||
changed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cb *CausalBroadcast) canDeliver(msgVC map[string]uint64) bool {
|
||||
currentVC := cb.vc.clocks
|
||||
|
||||
for pid, msgTime := range msgVC {
|
||||
if pid == cb.vc.self {
|
||||
// Must be next expected from sender
|
||||
if msgTime != currentVC[pid]+1 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// All other dependencies must be satisfied
|
||||
if msgTime > currentVC[pid] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
```
|
||||
369
.claude/skills/elliptic-curves/SKILL.md
Normal file
369
.claude/skills/elliptic-curves/SKILL.md
Normal file
@@ -0,0 +1,369 @@
|
||||
---
|
||||
name: elliptic-curves
|
||||
description: This skill should be used when working with elliptic curve cryptography, implementing or debugging secp256k1 operations, understanding modular arithmetic and finite fields, or implementing signature schemes like ECDSA and Schnorr. Provides comprehensive knowledge of group theory foundations, curve mathematics, point multiplication algorithms, and cryptographic optimizations.
|
||||
---
|
||||
|
||||
# Elliptic Curve Cryptography
|
||||
|
||||
This skill provides deep knowledge of elliptic curve cryptography (ECC), with particular focus on the secp256k1 curve used in Bitcoin and Nostr, including the mathematical foundations and implementation considerations.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
- Implementing or debugging elliptic curve operations
|
||||
- Working with secp256k1, ECDSA, or Schnorr signatures
|
||||
- Understanding modular arithmetic and finite field operations
|
||||
- Optimizing cryptographic code for performance
|
||||
- Analyzing security properties of curve-based cryptography
|
||||
|
||||
## Mathematical Foundations
|
||||
|
||||
### Groups in Cryptography
|
||||
|
||||
A **group** is a set G with a binary operation (often denoted · or +) satisfying:
|
||||
|
||||
1. **Closure**: For all a, b ∈ G, the result a · b is also in G
|
||||
2. **Associativity**: (a · b) · c = a · (b · c)
|
||||
3. **Identity**: There exists e ∈ G such that e · a = a · e = a
|
||||
4. **Inverse**: For each a ∈ G, there exists a⁻¹ such that a · a⁻¹ = e
|
||||
|
||||
A **cyclic group** is generated by repeatedly applying the operation to a single element (the generator). The **order** of a group is the number of elements.
|
||||
|
||||
**Why groups matter in cryptography**: The discrete logarithm problem—given g and gⁿ, find n—is computationally hard in certain groups, forming the security basis for ECC.
|
||||
|
||||
### Modular Arithmetic
|
||||
|
||||
Modular arithmetic constrains calculations to a finite range [0, p-1] for some modulus p:
|
||||
|
||||
```
|
||||
a ≡ b (mod p) means p divides (a - b)
|
||||
|
||||
Operations:
|
||||
- Addition: (a + b) mod p
|
||||
- Subtraction: (a - b + p) mod p
|
||||
- Multiplication: (a × b) mod p
|
||||
- Inverse: a⁻¹ where (a × a⁻¹) ≡ 1 (mod p)
|
||||
```
|
||||
|
||||
**Computing modular inverse**:
|
||||
- **Fermat's Little Theorem**: If p is prime, a⁻¹ ≡ a^(p-2) (mod p)
|
||||
- **Extended Euclidean Algorithm**: More efficient for general cases
|
||||
- **SafeGCD Algorithm**: Constant-time, used in libsecp256k1
|
||||
|
||||
### Finite Fields (Galois Fields)
|
||||
|
||||
A **finite field** GF(p) or 𝔽ₚ is a field with a finite number of elements where:
|
||||
- p must be prime (or a prime power for extension fields)
|
||||
- All arithmetic operations are defined and produce elements within the field
|
||||
- Every non-zero element has a multiplicative inverse
|
||||
|
||||
For cryptographic curves like secp256k1, the field is 𝔽ₚ where p is a 256-bit prime.
|
||||
|
||||
**Key property**: The non-zero elements of a finite field form a cyclic group under multiplication.
|
||||
|
||||
## Elliptic Curves
|
||||
|
||||
### The Curve Equation
|
||||
|
||||
An elliptic curve over a finite field 𝔽ₚ is defined by the Weierstrass equation:
|
||||
|
||||
```
|
||||
y² = x³ + ax + b (mod p)
|
||||
```
|
||||
|
||||
The curve must satisfy the non-singularity condition: 4a³ + 27b² ≠ 0
|
||||
|
||||
### Points on the Curve
|
||||
|
||||
A point P = (x, y) is on the curve if it satisfies the equation. The set of all points, plus a special "point at infinity" O (the identity element), forms an abelian group.
|
||||
|
||||
### Point Operations
|
||||
|
||||
**Point Addition (P + Q where P ≠ Q)**:
|
||||
```
|
||||
λ = (y₂ - y₁) / (x₂ - x₁) (mod p)
|
||||
x₃ = λ² - x₁ - x₂ (mod p)
|
||||
y₃ = λ(x₁ - x₃) - y₁ (mod p)
|
||||
```
|
||||
|
||||
**Point Doubling (P + P = 2P)**:
|
||||
```
|
||||
λ = (3x₁² + a) / (2y₁) (mod p)
|
||||
x₃ = λ² - 2x₁ (mod p)
|
||||
y₃ = λ(x₁ - x₃) - y₁ (mod p)
|
||||
```
|
||||
|
||||
**Point at Infinity**: Acts as the identity element; P + O = P for all P.
|
||||
|
||||
**Point Negation**: -P = (x, -y) = (x, p - y)
|
||||
|
||||
## The secp256k1 Curve
|
||||
|
||||
### Parameters
|
||||
|
||||
secp256k1 is defined by SECG (Standards for Efficient Cryptography Group):
|
||||
|
||||
```
|
||||
Curve equation: y² = x³ + 7 (a = 0, b = 7)
|
||||
|
||||
Prime modulus p:
|
||||
0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F
|
||||
= 2²⁵⁶ - 2³² - 977
|
||||
|
||||
Group order n:
|
||||
0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE BAAEDCE6 AF48A03B BFD25E8C D0364141
|
||||
|
||||
Generator point G:
|
||||
Gx = 0x79BE667E F9DCBBAC 55A06295 CE870B07 029BFCDB 2DCE28D9 59F2815B 16F81798
|
||||
Gy = 0x483ADA77 26A3C465 5DA4FBFC 0E1108A8 FD17B448 A6855419 9C47D08F FB10D4B8
|
||||
|
||||
Cofactor h = 1
|
||||
```
|
||||
|
||||
### Why secp256k1?
|
||||
|
||||
1. **Koblitz curve**: a = 0 enables faster computation (no ax term)
|
||||
2. **Special prime**: p = 2²⁵⁶ - 2³² - 977 allows efficient modular reduction
|
||||
3. **Deterministic construction**: Not randomly generated, reducing backdoor concerns
|
||||
4. **~30% faster** than random curves when fully optimized
|
||||
|
||||
### Efficient Modular Reduction
|
||||
|
||||
The special form of p enables fast reduction without general division:
|
||||
|
||||
```
|
||||
For p = 2²⁵⁶ - 2³² - 977:
|
||||
To reduce a 512-bit number c = c_high × 2²⁵⁶ + c_low:
|
||||
c ≡ c_low + c_high × 2³² + c_high × 977 (mod p)
|
||||
```
|
||||
|
||||
## Point Multiplication Algorithms
|
||||
|
||||
Scalar multiplication kP (computing P + P + ... + P, k times) is the core operation.
|
||||
|
||||
### Double-and-Add (Binary Method)
|
||||
|
||||
```
|
||||
Input: k (scalar), P (point)
|
||||
Output: kP
|
||||
|
||||
R = O (point at infinity)
|
||||
for i from bit_length(k)-1 down to 0:
|
||||
R = 2R # Point doubling
|
||||
if bit i of k is 1:
|
||||
R = R + P # Point addition
|
||||
return R
|
||||
```
|
||||
|
||||
**Complexity**: O(log k) point operations
|
||||
**Vulnerability**: Timing side-channels (different branches for 0/1 bits)
|
||||
|
||||
### Montgomery Ladder
|
||||
|
||||
Constant-time algorithm that performs the same operations regardless of bit values:
|
||||
|
||||
```
|
||||
Input: k (scalar), P (point)
|
||||
Output: kP
|
||||
|
||||
R0 = O
|
||||
R1 = P
|
||||
for i from bit_length(k)-1 down to 0:
|
||||
if bit i of k is 0:
|
||||
R1 = R0 + R1
|
||||
R0 = 2R0
|
||||
else:
|
||||
R0 = R0 + R1
|
||||
R1 = 2R1
|
||||
return R0
|
||||
```
|
||||
|
||||
**Advantage**: Resistant to simple power analysis and timing attacks.
|
||||
|
||||
### Window Methods (w-NAF)
|
||||
|
||||
Precompute small multiples of P, then process w bits at a time:
|
||||
|
||||
```
|
||||
w-NAF representation reduces additions by ~1/3 compared to binary
|
||||
Precomputation table: [P, 3P, 5P, 7P, ...] for w=4
|
||||
```
|
||||
|
||||
### Endomorphism Optimization (GLV Method)
|
||||
|
||||
secp256k1 has an efficiently computable endomorphism φ where:
|
||||
```
|
||||
φ(x, y) = (βx, y) where β³ ≡ 1 (mod p)
|
||||
φ(P) = λP where λ³ ≡ 1 (mod n)
|
||||
```
|
||||
|
||||
This allows splitting scalar k into k₁ + k₂λ with smaller k₁, k₂, reducing operations by ~33-50%.
|
||||
|
||||
### Multi-Scalar Multiplication (Strauss-Shamir)
|
||||
|
||||
For computing k₁P₁ + k₂P₂ (common in signature verification):
|
||||
|
||||
```
|
||||
Process both scalars simultaneously, combining operations
|
||||
Reduces work compared to separate multiplications
|
||||
```
|
||||
|
||||
## Coordinate Systems
|
||||
|
||||
### Affine Coordinates
|
||||
|
||||
Standard (x, y) representation. Requires modular inversion for each operation.
|
||||
|
||||
### Projective Coordinates
|
||||
|
||||
Represent (X:Y:Z) where x = X/Z, y = Y/Z:
|
||||
- Avoids inversions during intermediate computations
|
||||
- Only one inversion at the end to convert back to affine
|
||||
|
||||
### Jacobian Coordinates
|
||||
|
||||
Represent (X:Y:Z) where x = X/Z², y = Y/Z³:
|
||||
- Fastest for point doubling
|
||||
- Used extensively in libsecp256k1
|
||||
|
||||
### López-Dahab Coordinates
|
||||
|
||||
For curves over GF(2ⁿ), optimized for binary field arithmetic.
|
||||
|
||||
## Signature Schemes
|
||||
|
||||
### ECDSA (Elliptic Curve Digital Signature Algorithm)
|
||||
|
||||
**Key Generation**:
|
||||
```
|
||||
Private key: d (random integer in [1, n-1])
|
||||
Public key: Q = dG
|
||||
```
|
||||
|
||||
**Signing message m**:
|
||||
```
|
||||
1. Hash: e = H(m) truncated to curve order bit length
|
||||
2. Random: k ∈ [1, n-1]
|
||||
3. Compute: (x, y) = kG
|
||||
4. Calculate: r = x mod n (if r = 0, restart with new k)
|
||||
5. Calculate: s = k⁻¹(e + rd) mod n (if s = 0, restart)
|
||||
6. Signature: (r, s)
|
||||
```
|
||||
|
||||
**Verification of signature (r, s) on message m**:
|
||||
```
|
||||
1. Check: r, s ∈ [1, n-1]
|
||||
2. Hash: e = H(m)
|
||||
3. Compute: w = s⁻¹ mod n
|
||||
4. Compute: u₁ = ew mod n, u₂ = rw mod n
|
||||
5. Compute: (x, y) = u₁G + u₂Q
|
||||
6. Valid if: r ≡ x (mod n)
|
||||
```
|
||||
|
||||
**Security considerations**:
|
||||
- k MUST be unique per signature (reuse leaks private key)
|
||||
- Use RFC 6979 for deterministic k derivation
|
||||
|
||||
### Schnorr Signatures (BIP-340)
|
||||
|
||||
Simpler, more efficient, with provable security.
|
||||
|
||||
**Signing message m**:
|
||||
```
|
||||
1. Random: k ∈ [1, n-1]
|
||||
2. Compute: R = kG
|
||||
3. Challenge: e = H(R || Q || m)
|
||||
4. Response: s = k + ed mod n
|
||||
5. Signature: (R, s) or (r_x, s) where r_x is x-coordinate of R
|
||||
```
|
||||
|
||||
**Verification**:
|
||||
```
|
||||
1. Compute: e = H(R || Q || m)
|
||||
2. Check: sG = R + eQ
|
||||
```
|
||||
|
||||
**Advantages over ECDSA**:
|
||||
- Linear: enables signature aggregation (MuSig)
|
||||
- Simpler verification (no modular inverse)
|
||||
- Batch verification support
|
||||
- Provably secure in Random Oracle Model
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
### Constant-Time Operations
|
||||
|
||||
To prevent timing attacks:
|
||||
- Avoid branches dependent on secret data
|
||||
- Use constant-time comparison functions
|
||||
- Mask operations to hide data-dependent timing
|
||||
|
||||
```go
|
||||
// BAD: Timing leak
|
||||
if secretBit == 1 {
|
||||
doOperation()
|
||||
}
|
||||
|
||||
// GOOD: Constant-time conditional
|
||||
result = conditionalSelect(secretBit, value1, value0)
|
||||
```
|
||||
|
||||
### Memory Safety
|
||||
|
||||
- Zeroize sensitive data after use
|
||||
- Avoid leaving secrets in registers or cache
|
||||
- Use secure memory allocation when available
|
||||
|
||||
### Side-Channel Protections
|
||||
|
||||
- **Timing attacks**: Use constant-time algorithms
|
||||
- **Power analysis**: Montgomery ladder, point blinding
|
||||
- **Cache attacks**: Avoid table lookups indexed by secrets
|
||||
|
||||
### Random Number Generation
|
||||
|
||||
- Use cryptographically secure RNG for k in ECDSA
|
||||
- Consider deterministic k (RFC 6979) for reproducibility
|
||||
- Validate output is in valid range [1, n-1]
|
||||
|
||||
## libsecp256k1 Optimizations
|
||||
|
||||
The Bitcoin Core library includes:
|
||||
|
||||
1. **Field arithmetic**: 5×52-bit limbs for 64-bit platforms
|
||||
2. **Scalar arithmetic**: 4×64-bit representation
|
||||
3. **Endomorphism**: GLV decomposition enabled by default
|
||||
4. **Batch inversion**: Amortizes expensive inversions
|
||||
5. **SafeGCD**: Constant-time modular inverse
|
||||
6. **Precomputed tables**: For generator point multiplications
|
||||
|
||||
## Security Properties
|
||||
|
||||
### Discrete Logarithm Problem (DLP)
|
||||
|
||||
Given P and Q = kP, finding k is computationally infeasible.
|
||||
|
||||
**Best known attacks**:
|
||||
- Generic: Baby-step Giant-step, Pollard's rho: O(√n) operations
|
||||
- For secp256k1: ~2¹²⁸ operations (128-bit security)
|
||||
|
||||
### Curve Security Criteria
|
||||
|
||||
- Large prime order subgroup
|
||||
- Cofactor 1 (no small subgroup attacks)
|
||||
- Resistant to MOV attack (embedding degree)
|
||||
- Not anomalous (n ≠ p)
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. **k reuse in ECDSA**: Immediately leaks private key
|
||||
2. **Weak random k**: Partially leaks key over multiple signatures
|
||||
3. **Invalid curve points**: Validate points are on curve
|
||||
4. **Small subgroup attacks**: Check point order (cofactor = 1 helps)
|
||||
5. **Timing leaks**: Non-constant-time scalar multiplication
|
||||
|
||||
## References
|
||||
|
||||
For detailed implementations, see:
|
||||
- `references/secp256k1-parameters.md` - Full curve parameters
|
||||
- `references/algorithms.md` - Detailed algorithm pseudocode
|
||||
- `references/security.md` - Security analysis and attack vectors
|
||||
513
.claude/skills/elliptic-curves/references/algorithms.md
Normal file
513
.claude/skills/elliptic-curves/references/algorithms.md
Normal file
@@ -0,0 +1,513 @@
|
||||
# Elliptic Curve Algorithms
|
||||
|
||||
Detailed pseudocode for core elliptic curve operations.
|
||||
|
||||
## Field Arithmetic
|
||||
|
||||
### Modular Addition
|
||||
|
||||
```
|
||||
function mod_add(a, b, p):
|
||||
result = a + b
|
||||
if result >= p:
|
||||
result = result - p
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Subtraction
|
||||
|
||||
```
|
||||
function mod_sub(a, b, p):
|
||||
if a >= b:
|
||||
return a - b
|
||||
else:
|
||||
return p - b + a
|
||||
```
|
||||
|
||||
### Modular Multiplication
|
||||
|
||||
For general case:
|
||||
```
|
||||
function mod_mul(a, b, p):
|
||||
return (a * b) mod p
|
||||
```
|
||||
|
||||
For secp256k1 optimized (Barrett reduction):
|
||||
```
|
||||
function mod_mul_secp256k1(a, b):
|
||||
# Compute full 512-bit product
|
||||
product = a * b
|
||||
|
||||
# Split into high and low 256-bit parts
|
||||
low = product & ((1 << 256) - 1)
|
||||
high = product >> 256
|
||||
|
||||
# Reduce: result ≡ low + high * (2³² + 977) (mod p)
|
||||
result = low + high * (1 << 32) + high * 977
|
||||
|
||||
# May need additional reduction
|
||||
while result >= p:
|
||||
result = result - p
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Inverse
|
||||
|
||||
**Extended Euclidean Algorithm**:
|
||||
```
|
||||
function mod_inverse(a, p):
|
||||
if a == 0:
|
||||
error "No inverse exists for 0"
|
||||
|
||||
old_r, r = p, a
|
||||
old_s, s = 0, 1
|
||||
|
||||
while r != 0:
|
||||
quotient = old_r / r
|
||||
old_r, r = r, old_r - quotient * r
|
||||
old_s, s = s, old_s - quotient * s
|
||||
|
||||
if old_r != 1:
|
||||
error "No inverse exists"
|
||||
|
||||
if old_s < 0:
|
||||
old_s = old_s + p
|
||||
|
||||
return old_s
|
||||
```
|
||||
|
||||
**Fermat's Little Theorem** (for prime p):
|
||||
```
|
||||
function mod_inverse_fermat(a, p):
|
||||
return mod_exp(a, p - 2, p)
|
||||
```
|
||||
|
||||
### Modular Exponentiation (Square-and-Multiply)
|
||||
|
||||
```
|
||||
function mod_exp(base, exp, p):
|
||||
result = 1
|
||||
base = base mod p
|
||||
|
||||
while exp > 0:
|
||||
if exp & 1: # exp is odd
|
||||
result = (result * base) mod p
|
||||
exp = exp >> 1
|
||||
base = (base * base) mod p
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Square Root (Tonelli-Shanks)
|
||||
|
||||
For secp256k1 where p ≡ 3 (mod 4):
|
||||
```
|
||||
function mod_sqrt(a, p):
|
||||
# For p ≡ 3 (mod 4), sqrt(a) = a^((p+1)/4)
|
||||
return mod_exp(a, (p + 1) / 4, p)
|
||||
```
|
||||
|
||||
## Point Operations
|
||||
|
||||
### Point Validation
|
||||
|
||||
```
|
||||
function is_on_curve(P, a, b, p):
|
||||
if P is infinity:
|
||||
return true
|
||||
|
||||
x, y = P
|
||||
left = (y * y) mod p
|
||||
right = (x * x * x + a * x + b) mod p
|
||||
|
||||
return left == right
|
||||
```
|
||||
|
||||
### Point Addition (Affine Coordinates)
|
||||
|
||||
```
|
||||
function point_add(P, Q, a, p):
|
||||
if P is infinity:
|
||||
return Q
|
||||
if Q is infinity:
|
||||
return P
|
||||
|
||||
x1, y1 = P
|
||||
x2, y2 = Q
|
||||
|
||||
if x1 == x2:
|
||||
if y1 == mod_neg(y2, p): # P = -Q
|
||||
return infinity
|
||||
else: # P == Q
|
||||
return point_double(P, a, p)
|
||||
|
||||
# λ = (y2 - y1) / (x2 - x1)
|
||||
numerator = mod_sub(y2, y1, p)
|
||||
denominator = mod_sub(x2, x1, p)
|
||||
λ = mod_mul(numerator, mod_inverse(denominator, p), p)
|
||||
|
||||
# x3 = λ² - x1 - x2
|
||||
x3 = mod_sub(mod_sub(mod_mul(λ, λ, p), x1, p), x2, p)
|
||||
|
||||
# y3 = λ(x1 - x3) - y1
|
||||
y3 = mod_sub(mod_mul(λ, mod_sub(x1, x3, p), p), y1, p)
|
||||
|
||||
return (x3, y3)
|
||||
```
|
||||
|
||||
### Point Doubling (Affine Coordinates)
|
||||
|
||||
```
|
||||
function point_double(P, a, p):
|
||||
if P is infinity:
|
||||
return infinity
|
||||
|
||||
x, y = P
|
||||
|
||||
if y == 0:
|
||||
return infinity
|
||||
|
||||
# λ = (3x² + a) / (2y)
|
||||
numerator = mod_add(mod_mul(3, mod_mul(x, x, p), p), a, p)
|
||||
denominator = mod_mul(2, y, p)
|
||||
λ = mod_mul(numerator, mod_inverse(denominator, p), p)
|
||||
|
||||
# x3 = λ² - 2x
|
||||
x3 = mod_sub(mod_mul(λ, λ, p), mod_mul(2, x, p), p)
|
||||
|
||||
# y3 = λ(x - x3) - y
|
||||
y3 = mod_sub(mod_mul(λ, mod_sub(x, x3, p), p), y, p)
|
||||
|
||||
return (x3, y3)
|
||||
```
|
||||
|
||||
### Point Negation
|
||||
|
||||
```
|
||||
function point_negate(P, p):
|
||||
if P is infinity:
|
||||
return infinity
|
||||
|
||||
x, y = P
|
||||
return (x, p - y)
|
||||
```
|
||||
|
||||
## Scalar Multiplication
|
||||
|
||||
### Double-and-Add (Left-to-Right)
|
||||
|
||||
```
|
||||
function scalar_mult_double_add(k, P, a, p):
|
||||
if k == 0 or P is infinity:
|
||||
return infinity
|
||||
|
||||
if k < 0:
|
||||
k = -k
|
||||
P = point_negate(P, p)
|
||||
|
||||
R = infinity
|
||||
bits = binary_representation(k) # MSB first
|
||||
|
||||
for bit in bits:
|
||||
R = point_double(R, a, p)
|
||||
if bit == 1:
|
||||
R = point_add(R, P, a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
### Montgomery Ladder (Constant-Time)
|
||||
|
||||
```
|
||||
function scalar_mult_montgomery(k, P, a, p):
|
||||
R0 = infinity
|
||||
R1 = P
|
||||
|
||||
bits = binary_representation(k) # MSB first
|
||||
|
||||
for bit in bits:
|
||||
if bit == 0:
|
||||
R1 = point_add(R0, R1, a, p)
|
||||
R0 = point_double(R0, a, p)
|
||||
else:
|
||||
R0 = point_add(R0, R1, a, p)
|
||||
R1 = point_double(R1, a, p)
|
||||
|
||||
return R0
|
||||
```
|
||||
|
||||
### w-NAF Scalar Multiplication
|
||||
|
||||
```
|
||||
function compute_wNAF(k, w):
|
||||
# Convert scalar to width-w Non-Adjacent Form
|
||||
naf = []
|
||||
|
||||
while k > 0:
|
||||
if k & 1: # k is odd
|
||||
# Get w-bit window
|
||||
digit = k mod (1 << w)
|
||||
if digit >= (1 << (w-1)):
|
||||
digit = digit - (1 << w)
|
||||
naf.append(digit)
|
||||
k = k - digit
|
||||
else:
|
||||
naf.append(0)
|
||||
k = k >> 1
|
||||
|
||||
return naf
|
||||
|
||||
function scalar_mult_wNAF(k, P, w, a, p):
|
||||
# Precompute odd multiples: [P, 3P, 5P, ..., (2^(w-1)-1)P]
|
||||
precomp = [P]
|
||||
P2 = point_double(P, a, p)
|
||||
for i in range(1, 1 << (w-1)):
|
||||
precomp.append(point_add(precomp[-1], P2, a, p))
|
||||
|
||||
# Convert k to w-NAF
|
||||
naf = compute_wNAF(k, w)
|
||||
|
||||
# Compute scalar multiplication
|
||||
R = infinity
|
||||
for i in range(len(naf) - 1, -1, -1):
|
||||
R = point_double(R, a, p)
|
||||
digit = naf[i]
|
||||
if digit > 0:
|
||||
R = point_add(R, precomp[(digit - 1) / 2], a, p)
|
||||
elif digit < 0:
|
||||
R = point_add(R, point_negate(precomp[(-digit - 1) / 2], p), a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
### Shamir's Trick (Multi-Scalar)
|
||||
|
||||
For computing k₁P + k₂Q efficiently:
|
||||
|
||||
```
|
||||
function multi_scalar_mult(k1, P, k2, Q, a, p):
|
||||
# Precompute P + Q
|
||||
PQ = point_add(P, Q, a, p)
|
||||
|
||||
# Get binary representations (same length, padded)
|
||||
bits1 = binary_representation(k1)
|
||||
bits2 = binary_representation(k2)
|
||||
max_len = max(len(bits1), len(bits2))
|
||||
bits1 = pad_left(bits1, max_len)
|
||||
bits2 = pad_left(bits2, max_len)
|
||||
|
||||
R = infinity
|
||||
|
||||
for i in range(max_len):
|
||||
R = point_double(R, a, p)
|
||||
|
||||
b1, b2 = bits1[i], bits2[i]
|
||||
|
||||
if b1 == 1 and b2 == 1:
|
||||
R = point_add(R, PQ, a, p)
|
||||
elif b1 == 1:
|
||||
R = point_add(R, P, a, p)
|
||||
elif b2 == 1:
|
||||
R = point_add(R, Q, a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
## Jacobian Coordinates
|
||||
|
||||
More efficient for repeated operations.
|
||||
|
||||
### Conversion
|
||||
|
||||
```
|
||||
# Affine to Jacobian
|
||||
function affine_to_jacobian(P):
|
||||
if P is infinity:
|
||||
return (1, 1, 0) # Jacobian infinity
|
||||
x, y = P
|
||||
return (x, y, 1)
|
||||
|
||||
# Jacobian to Affine
|
||||
function jacobian_to_affine(P, p):
|
||||
X, Y, Z = P
|
||||
if Z == 0:
|
||||
return infinity
|
||||
|
||||
Z_inv = mod_inverse(Z, p)
|
||||
Z_inv2 = mod_mul(Z_inv, Z_inv, p)
|
||||
Z_inv3 = mod_mul(Z_inv2, Z_inv, p)
|
||||
|
||||
x = mod_mul(X, Z_inv2, p)
|
||||
y = mod_mul(Y, Z_inv3, p)
|
||||
|
||||
return (x, y)
|
||||
```
|
||||
|
||||
### Point Doubling (Jacobian)
|
||||
|
||||
For curve y² = x³ + 7 (a = 0):
|
||||
|
||||
```
|
||||
function jacobian_double(P, p):
|
||||
X, Y, Z = P
|
||||
|
||||
if Y == 0:
|
||||
return (1, 1, 0) # infinity
|
||||
|
||||
# For a = 0: M = 3*X²
|
||||
S = mod_mul(4, mod_mul(X, mod_mul(Y, Y, p), p), p)
|
||||
M = mod_mul(3, mod_mul(X, X, p), p)
|
||||
|
||||
X3 = mod_sub(mod_mul(M, M, p), mod_mul(2, S, p), p)
|
||||
Y3 = mod_sub(mod_mul(M, mod_sub(S, X3, p), p),
|
||||
mod_mul(8, mod_mul(Y, Y, mod_mul(Y, Y, p), p), p), p)
|
||||
Z3 = mod_mul(2, mod_mul(Y, Z, p), p)
|
||||
|
||||
return (X3, Y3, Z3)
|
||||
```
|
||||
|
||||
### Point Addition (Jacobian + Affine)
|
||||
|
||||
Mixed addition is faster when one point is in affine:
|
||||
|
||||
```
|
||||
function jacobian_add_affine(P, Q, p):
|
||||
# P in Jacobian (X1, Y1, Z1), Q in affine (x2, y2)
|
||||
X1, Y1, Z1 = P
|
||||
x2, y2 = Q
|
||||
|
||||
if Z1 == 0:
|
||||
return affine_to_jacobian(Q)
|
||||
|
||||
Z1Z1 = mod_mul(Z1, Z1, p)
|
||||
U2 = mod_mul(x2, Z1Z1, p)
|
||||
S2 = mod_mul(y2, mod_mul(Z1, Z1Z1, p), p)
|
||||
|
||||
H = mod_sub(U2, X1, p)
|
||||
HH = mod_mul(H, H, p)
|
||||
I = mod_mul(4, HH, p)
|
||||
J = mod_mul(H, I, p)
|
||||
r = mod_mul(2, mod_sub(S2, Y1, p), p)
|
||||
V = mod_mul(X1, I, p)
|
||||
|
||||
X3 = mod_sub(mod_sub(mod_mul(r, r, p), J, p), mod_mul(2, V, p), p)
|
||||
Y3 = mod_sub(mod_mul(r, mod_sub(V, X3, p), p), mod_mul(2, mod_mul(Y1, J, p), p), p)
|
||||
Z3 = mod_mul(mod_sub(mod_mul(mod_add(Z1, H, p), mod_add(Z1, H, p), p),
|
||||
mod_add(Z1Z1, HH, p), p), 1, p)
|
||||
|
||||
return (X3, Y3, Z3)
|
||||
```
|
||||
|
||||
## GLV Endomorphism (secp256k1)
|
||||
|
||||
### Scalar Decomposition
|
||||
|
||||
```
|
||||
# Constants for secp256k1
|
||||
LAMBDA = 0x5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72
|
||||
BETA = 0x7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE
|
||||
|
||||
# Decomposition coefficients
|
||||
A1 = 0x3086D221A7D46BCDE86C90E49284EB15
|
||||
B1 = 0x114CA50F7A8E2F3F657C1108D9D44CFD8
|
||||
A2 = 0xE4437ED6010E88286F547FA90ABFE4C3
|
||||
B2 = A1
|
||||
|
||||
function glv_decompose(k, n):
|
||||
# Compute c1 = round(b2 * k / n)
|
||||
# Compute c2 = round(-b1 * k / n)
|
||||
c1 = (B2 * k + n // 2) // n
|
||||
c2 = (-B1 * k + n // 2) // n
|
||||
|
||||
# k1 = k - c1*A1 - c2*A2
|
||||
# k2 = -c1*B1 - c2*B2
|
||||
k1 = k - c1 * A1 - c2 * A2
|
||||
k2 = -c1 * B1 - c2 * B2
|
||||
|
||||
return (k1, k2)
|
||||
|
||||
function glv_scalar_mult(k, P, p, n):
|
||||
k1, k2 = glv_decompose(k, n)
|
||||
|
||||
# Compute endomorphism: φ(P) = (β*x, y)
|
||||
x, y = P
|
||||
phi_P = (mod_mul(BETA, x, p), y)
|
||||
|
||||
# Use Shamir's trick: k1*P + k2*φ(P)
|
||||
return multi_scalar_mult(k1, P, k2, phi_P, 0, p)
|
||||
```
|
||||
|
||||
## Batch Inversion
|
||||
|
||||
Amortize expensive inversions over multiple points:
|
||||
|
||||
```
|
||||
function batch_invert(values, p):
|
||||
n = len(values)
|
||||
if n == 0:
|
||||
return []
|
||||
|
||||
# Compute cumulative products
|
||||
products = [values[0]]
|
||||
for i in range(1, n):
|
||||
products.append(mod_mul(products[-1], values[i], p))
|
||||
|
||||
# Invert the final product
|
||||
inv = mod_inverse(products[-1], p)
|
||||
|
||||
# Compute individual inverses
|
||||
inverses = [0] * n
|
||||
for i in range(n - 1, 0, -1):
|
||||
inverses[i] = mod_mul(inv, products[i - 1], p)
|
||||
inv = mod_mul(inv, values[i], p)
|
||||
inverses[0] = inv
|
||||
|
||||
return inverses
|
||||
```
|
||||
|
||||
## Key Generation
|
||||
|
||||
```
|
||||
function generate_keypair(G, n, p):
|
||||
# Generate random private key
|
||||
d = random_integer(1, n - 1)
|
||||
|
||||
# Compute public key
|
||||
Q = scalar_mult(d, G)
|
||||
|
||||
return (d, Q)
|
||||
```
|
||||
|
||||
## Point Compression/Decompression
|
||||
|
||||
```
|
||||
function compress_point(P, p):
|
||||
if P is infinity:
|
||||
return bytes([0x00])
|
||||
|
||||
x, y = P
|
||||
prefix = 0x02 if (y % 2 == 0) else 0x03
|
||||
return bytes([prefix]) + x.to_bytes(32, 'big')
|
||||
|
||||
function decompress_point(compressed, a, b, p):
|
||||
prefix = compressed[0]
|
||||
|
||||
if prefix == 0x00:
|
||||
return infinity
|
||||
|
||||
x = int.from_bytes(compressed[1:], 'big')
|
||||
|
||||
# Compute y² = x³ + ax + b
|
||||
y_squared = mod_add(mod_add(mod_mul(x, mod_mul(x, x, p), p),
|
||||
mod_mul(a, x, p), p), b, p)
|
||||
|
||||
# Compute y = sqrt(y²)
|
||||
y = mod_sqrt(y_squared, p)
|
||||
|
||||
# Select correct y based on prefix
|
||||
if (prefix == 0x02) != (y % 2 == 0):
|
||||
y = p - y
|
||||
|
||||
return (x, y)
|
||||
```
|
||||
@@ -0,0 +1,194 @@
|
||||
# secp256k1 Complete Parameters
|
||||
|
||||
## Curve Definition
|
||||
|
||||
**Name**: secp256k1 (Standards for Efficient Cryptography, prime field, 256-bit, Koblitz curve #1)
|
||||
|
||||
**Equation**: y² = x³ + 7 (mod p)
|
||||
|
||||
This is the short Weierstrass form with coefficients a = 0, b = 7.
|
||||
|
||||
## Field Parameters
|
||||
|
||||
### Prime Modulus p
|
||||
|
||||
```
|
||||
Decimal:
|
||||
115792089237316195423570985008687907853269984665640564039457584007908834671663
|
||||
|
||||
Hexadecimal:
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
|
||||
|
||||
Binary representation:
|
||||
2²⁵⁶ - 2³² - 2⁹ - 2⁸ - 2⁷ - 2⁶ - 2⁴ - 1
|
||||
= 2²⁵⁶ - 2³² - 977
|
||||
```
|
||||
|
||||
**Special form benefits**:
|
||||
- Efficient modular reduction using: c mod p = c_low + c_high × (2³² + 977)
|
||||
- Near-Mersenne prime enables fast arithmetic
|
||||
|
||||
### Group Order n
|
||||
|
||||
```
|
||||
Decimal:
|
||||
115792089237316195423570985008687907852837564279074904382605163141518161494337
|
||||
|
||||
Hexadecimal:
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
|
||||
```
|
||||
|
||||
The number of points on the curve, including the point at infinity.
|
||||
|
||||
### Cofactor h
|
||||
|
||||
```
|
||||
h = 1
|
||||
```
|
||||
|
||||
Cofactor 1 means the group order n equals the curve order, simplifying security analysis and eliminating small subgroup attacks.
|
||||
|
||||
## Generator Point G
|
||||
|
||||
### Compressed Form
|
||||
|
||||
```
|
||||
02 79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
```
|
||||
|
||||
The 02 prefix indicates the y-coordinate is even.
|
||||
|
||||
### Uncompressed Form
|
||||
|
||||
```
|
||||
04 79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
|
||||
```
|
||||
|
||||
### Individual Coordinates
|
||||
|
||||
**Gx**:
|
||||
```
|
||||
Decimal:
|
||||
55066263022277343669578718895168534326250603453777594175500187360389116729240
|
||||
|
||||
Hexadecimal:
|
||||
0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
```
|
||||
|
||||
**Gy**:
|
||||
```
|
||||
Decimal:
|
||||
32670510020758816978083085130507043184471273380659243275938904335757337482424
|
||||
|
||||
Hexadecimal:
|
||||
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
|
||||
```
|
||||
|
||||
## Endomorphism Parameters
|
||||
|
||||
secp256k1 has an efficiently computable endomorphism φ: (x, y) → (βx, y).
|
||||
|
||||
### β (Beta)
|
||||
|
||||
```
|
||||
Hexadecimal:
|
||||
0x7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE
|
||||
|
||||
Property: β³ ≡ 1 (mod p)
|
||||
```
|
||||
|
||||
### λ (Lambda)
|
||||
|
||||
```
|
||||
Hexadecimal:
|
||||
0x5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72
|
||||
|
||||
Property: λ³ ≡ 1 (mod n)
|
||||
Relationship: φ(P) = λP for all points P
|
||||
```
|
||||
|
||||
### GLV Decomposition Constants
|
||||
|
||||
For splitting scalar k into k₁ + k₂λ:
|
||||
|
||||
```
|
||||
a₁ = 0x3086D221A7D46BCDE86C90E49284EB15
|
||||
b₁ = -0xE4437ED6010E88286F547FA90ABFE4C3
|
||||
a₂ = 0x114CA50F7A8E2F3F657C1108D9D44CFD8
|
||||
b₂ = a₁
|
||||
```
|
||||
|
||||
## Derived Constants
|
||||
|
||||
### Field Characteristics
|
||||
|
||||
```
|
||||
(p + 1) / 4 = 0x3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFBFFFFF0C
|
||||
Used for computing modular square roots via Tonelli-Shanks shortcut
|
||||
```
|
||||
|
||||
### Order Characteristics
|
||||
|
||||
```
|
||||
(n - 1) / 2 = 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0
|
||||
Used in low-S normalization for ECDSA signatures
|
||||
```
|
||||
|
||||
## Validation Formulas
|
||||
|
||||
### Point on Curve Check
|
||||
|
||||
For point (x, y), verify:
|
||||
```
|
||||
y² ≡ x³ + 7 (mod p)
|
||||
```
|
||||
|
||||
### Generator Verification
|
||||
|
||||
Verify G is on curve:
|
||||
```
|
||||
Gy² mod p = 0x9C47D08FFB10D4B8 ... (truncated for display)
|
||||
Gx³ + 7 mod p = same value
|
||||
```
|
||||
|
||||
### Order Verification
|
||||
|
||||
Verify nG = O (point at infinity):
|
||||
```
|
||||
Computing n × G should yield the identity element
|
||||
```
|
||||
|
||||
## Bit Lengths
|
||||
|
||||
| Parameter | Bits | Bytes |
|
||||
|-----------|------|-------|
|
||||
| p (prime) | 256 | 32 |
|
||||
| n (order) | 256 | 32 |
|
||||
| Private key | 256 | 32 |
|
||||
| Public key (compressed) | 257 | 33 |
|
||||
| Public key (uncompressed) | 513 | 65 |
|
||||
| ECDSA signature | 512 | 64 |
|
||||
| Schnorr signature | 512 | 64 |
|
||||
|
||||
## Security Level
|
||||
|
||||
- **Equivalent symmetric key strength**: 128 bits
|
||||
- **Best known attack complexity**: ~2¹²⁸ operations (Pollard's rho)
|
||||
- **Safe until**: Quantum computers with ~1500+ logical qubits
|
||||
|
||||
## ASN.1 OID
|
||||
|
||||
```
|
||||
1.3.132.0.10
|
||||
iso(1) identified-organization(3) certicom(132) curve(0) secp256k1(10)
|
||||
```
|
||||
|
||||
## Comparison with Other Curves
|
||||
|
||||
| Curve | Field Size | Security | Speed | Use Case |
|
||||
|-------|------------|----------|-------|----------|
|
||||
| secp256k1 | 256-bit | 128-bit | Fast (Koblitz) | Bitcoin, Nostr |
|
||||
| secp256r1 (P-256) | 256-bit | 128-bit | Moderate | TLS, general |
|
||||
| Curve25519 | 255-bit | ~128-bit | Very fast | Modern crypto |
|
||||
| secp384r1 (P-384) | 384-bit | 192-bit | Slower | High security |
|
||||
291
.claude/skills/elliptic-curves/references/security.md
Normal file
291
.claude/skills/elliptic-curves/references/security.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# Elliptic Curve Security Analysis
|
||||
|
||||
Security properties, attack vectors, and mitigations for elliptic curve cryptography.
|
||||
|
||||
## The Discrete Logarithm Problem (ECDLP)
|
||||
|
||||
### Definition
|
||||
|
||||
Given points P and Q = kP on an elliptic curve, find the scalar k.
|
||||
|
||||
**Security assumption**: For properly chosen curves, this problem is computationally infeasible.
|
||||
|
||||
### Best Known Attacks
|
||||
|
||||
#### Generic Attacks (Work on Any Group)
|
||||
|
||||
| Attack | Complexity | Notes |
|
||||
|--------|------------|-------|
|
||||
| Baby-step Giant-step | O(√n) space and time | Requires √n storage |
|
||||
| Pollard's rho | O(√n) time, O(1) space | Practical for large groups |
|
||||
| Pollard's lambda | O(√n) | When k is in known range |
|
||||
| Pohlig-Hellman | O(√p) where p is largest prime factor | Exploits factorization of n |
|
||||
|
||||
For secp256k1 (n ≈ 2²⁵⁶):
|
||||
- Generic attack complexity: ~2¹²⁸ operations
|
||||
- Equivalent to 128-bit symmetric security
|
||||
|
||||
#### Curve-Specific Attacks
|
||||
|
||||
| Attack | Applicable When | Mitigation |
|
||||
|--------|-----------------|------------|
|
||||
| MOV/FR reduction | Low embedding degree | Use curves with high embedding degree |
|
||||
| Anomalous curve attack | n = p | Ensure n ≠ p |
|
||||
| GHS attack | Extension field curves | Use prime field curves |
|
||||
|
||||
**secp256k1 is immune to all known curve-specific attacks**.
|
||||
|
||||
## Side-Channel Attacks
|
||||
|
||||
### Timing Attacks
|
||||
|
||||
**Vulnerability**: Execution time varies based on secret data.
|
||||
|
||||
**Examples**:
|
||||
- Conditional branches on secret bits
|
||||
- Early exit conditions
|
||||
- Variable-time modular operations
|
||||
|
||||
**Mitigations**:
|
||||
- Constant-time algorithms (Montgomery ladder)
|
||||
- Fixed execution paths
|
||||
- Dummy operations to equalize timing
|
||||
|
||||
### Power Analysis
|
||||
|
||||
**Simple Power Analysis (SPA)**: Single trace reveals operations.
|
||||
- Double-and-add visible as different power signatures
|
||||
- Mitigation: Montgomery ladder (uniform operations)
|
||||
|
||||
**Differential Power Analysis (DPA)**: Statistical analysis of many traces.
|
||||
- Mitigation: Point blinding, scalar blinding
|
||||
|
||||
### Cache Attacks
|
||||
|
||||
**FLUSH+RELOAD Attack**:
|
||||
```
|
||||
1. Attacker flushes cache line containing lookup table
|
||||
2. Victim performs table lookup based on secret
|
||||
3. Attacker measures reload time to determine which entry was accessed
|
||||
```
|
||||
|
||||
**Mitigations**:
|
||||
- Avoid secret-dependent table lookups
|
||||
- Use constant-time table access patterns
|
||||
- Scatter tables to prevent cache line sharing
|
||||
|
||||
### Electromagnetic (EM) Attacks
|
||||
|
||||
Similar to power analysis but captures electromagnetic emissions.
|
||||
|
||||
**Mitigations**:
|
||||
- Shielding
|
||||
- Same algorithmic protections as power analysis
|
||||
|
||||
## Implementation Vulnerabilities
|
||||
|
||||
### k-Reuse in ECDSA
|
||||
|
||||
**The Sony PS3 Hack (2010)**:
|
||||
|
||||
If the same k is used for two signatures (r₁, s₁) and (r₂, s₂) on messages m₁ and m₂:
|
||||
|
||||
```
|
||||
s₁ = k⁻¹(e₁ + rd) mod n
|
||||
s₂ = k⁻¹(e₂ + rd) mod n
|
||||
|
||||
Since k is the same:
|
||||
s₁ - s₂ = k⁻¹(e₁ - e₂) mod n
|
||||
k = (e₁ - e₂)(s₁ - s₂)⁻¹ mod n
|
||||
|
||||
Once k is known:
|
||||
d = (s₁k - e₁)r⁻¹ mod n
|
||||
```
|
||||
|
||||
**Mitigation**: Use deterministic k (RFC 6979).
|
||||
|
||||
### Weak Random k
|
||||
|
||||
Even with unique k values, if the RNG is biased:
|
||||
- Lattice-based attacks can recover private key
|
||||
- Only ~1% bias in k can be exploitable with enough signatures
|
||||
|
||||
**Mitigations**:
|
||||
- Use cryptographically secure RNG
|
||||
- Use deterministic k (RFC 6979)
|
||||
- Verify k is in valid range [1, n-1]
|
||||
|
||||
### Invalid Curve Attacks
|
||||
|
||||
**Attack**: Attacker provides point not on the curve.
|
||||
- Point may be on a weaker curve
|
||||
- Operations may leak information
|
||||
|
||||
**Mitigation**: Always validate points are on curve:
|
||||
```
|
||||
Verify: y² ≡ x³ + ax + b (mod p)
|
||||
```
|
||||
|
||||
### Small Subgroup Attacks
|
||||
|
||||
**Attack**: If cofactor h > 1, points of small order exist.
|
||||
- Attacker sends point of small order
|
||||
- Response reveals private key mod (small order)
|
||||
|
||||
**Mitigation**:
|
||||
- Use curves with cofactor 1 (secp256k1 has h = 1)
|
||||
- Multiply received points by cofactor
|
||||
- Validate point order
|
||||
|
||||
### Fault Attacks
|
||||
|
||||
**Attack**: Induce computational errors (voltage glitches, radiation).
|
||||
- Corrupted intermediate values may leak information
|
||||
- Differential fault analysis can recover keys
|
||||
|
||||
**Mitigations**:
|
||||
- Redundant computations with comparison
|
||||
- Verify final results
|
||||
- Hardware protections
|
||||
|
||||
## Signature Malleability
|
||||
|
||||
### ECDSA Malleability
|
||||
|
||||
Given valid signature (r, s), signature (r, n - s) is also valid for the same message.
|
||||
|
||||
**Impact**: Transaction ID malleability (historical Bitcoin issue)
|
||||
|
||||
**Mitigation**: Enforce low-S normalization:
|
||||
```
|
||||
if s > n/2:
|
||||
s = n - s
|
||||
```
|
||||
|
||||
### Schnorr Non-Malleability
|
||||
|
||||
BIP-340 Schnorr signatures are non-malleable by design:
|
||||
- Use x-only public keys
|
||||
- Deterministic nonce derivation
|
||||
|
||||
## Quantum Threats
|
||||
|
||||
### Shor's Algorithm
|
||||
|
||||
**Threat**: Polynomial-time discrete log on quantum computers.
|
||||
- Requires ~1500-2000 logical qubits for secp256k1
|
||||
- Current quantum computers: <100 noisy qubits
|
||||
|
||||
**Timeline**: Estimated 10-20+ years for cryptographically relevant quantum computers.
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
1. **Monitor**: Track quantum computing progress
|
||||
2. **Prepare**: Develop post-quantum alternatives
|
||||
3. **Hybrid**: Use classical + post-quantum in transition
|
||||
4. **Migrate**: Full transition when necessary
|
||||
|
||||
### Post-Quantum Alternatives
|
||||
|
||||
- Lattice-based signatures (CRYSTALS-Dilithium)
|
||||
- Hash-based signatures (SPHINCS+)
|
||||
- Code-based cryptography
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Key Generation
|
||||
|
||||
```
|
||||
DO:
|
||||
- Use cryptographically secure RNG
|
||||
- Validate private key is in [1, n-1]
|
||||
- Verify public key is on curve
|
||||
- Verify public key is not point at infinity
|
||||
|
||||
DON'T:
|
||||
- Use predictable seeds
|
||||
- Use truncated random values
|
||||
- Skip validation
|
||||
```
|
||||
|
||||
### Signature Generation
|
||||
|
||||
```
|
||||
DO:
|
||||
- Use RFC 6979 for deterministic k
|
||||
- Validate all inputs
|
||||
- Use constant-time operations
|
||||
- Clear sensitive memory after use
|
||||
|
||||
DON'T:
|
||||
- Reuse k values
|
||||
- Use weak/biased RNG
|
||||
- Skip low-S normalization (ECDSA)
|
||||
```
|
||||
|
||||
### Signature Verification
|
||||
|
||||
```
|
||||
DO:
|
||||
- Validate r, s are in [1, n-1]
|
||||
- Validate public key is on curve
|
||||
- Validate public key is not infinity
|
||||
- Use batch verification when possible
|
||||
|
||||
DON'T:
|
||||
- Skip any validation steps
|
||||
- Accept malformed signatures
|
||||
```
|
||||
|
||||
### Public Key Handling
|
||||
|
||||
```
|
||||
DO:
|
||||
- Validate received points are on curve
|
||||
- Check point is not infinity
|
||||
- Prefer compressed format for storage
|
||||
|
||||
DON'T:
|
||||
- Accept unvalidated points
|
||||
- Skip curve membership check
|
||||
```
|
||||
|
||||
## Security Checklist
|
||||
|
||||
### Implementation Review
|
||||
|
||||
- [ ] All scalar multiplications are constant-time
|
||||
- [ ] No secret-dependent branches
|
||||
- [ ] No secret-indexed table lookups
|
||||
- [ ] Memory is zeroized after use
|
||||
- [ ] Random k uses CSPRNG or RFC 6979
|
||||
- [ ] All received points are validated
|
||||
- [ ] Private keys are in valid range
|
||||
- [ ] Signatures use low-S normalization
|
||||
|
||||
### Operational Security
|
||||
|
||||
- [ ] Private keys stored securely (HSM, secure enclave)
|
||||
- [ ] Key derivation uses proper KDF
|
||||
- [ ] Backups are encrypted
|
||||
- [ ] Key rotation policy exists
|
||||
- [ ] Audit logging enabled
|
||||
- [ ] Incident response plan exists
|
||||
|
||||
## Security Levels Comparison
|
||||
|
||||
| Curve | Bits | Symmetric Equivalent | RSA Equivalent |
|
||||
|-------|------|---------------------|----------------|
|
||||
| secp192r1 | 192 | 96 | 1536 |
|
||||
| secp224r1 | 224 | 112 | 2048 |
|
||||
| secp256k1 | 256 | 128 | 3072 |
|
||||
| secp384r1 | 384 | 192 | 7680 |
|
||||
| secp521r1 | 521 | 256 | 15360 |
|
||||
|
||||
## References
|
||||
|
||||
- NIST SP 800-57: Recommendation for Key Management
|
||||
- SEC 1: Elliptic Curve Cryptography
|
||||
- RFC 6979: Deterministic Usage of DSA and ECDSA
|
||||
- BIP-340: Schnorr Signatures for secp256k1
|
||||
- SafeCurves: Choosing Safe Curves for Elliptic-Curve Cryptography
|
||||
@@ -82,6 +82,49 @@ func (f *File) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
```
|
||||
|
||||
### Interface Design - CRITICAL RULES
|
||||
|
||||
**Rule 1: Define interfaces in a dedicated package (e.g., `pkg/interfaces/<name>/`)**
|
||||
- Interfaces provide isolation between packages and enable dependency inversion
|
||||
- Keeping interfaces in a dedicated package prevents circular dependencies
|
||||
- Each interface package should be minimal (just the interface, no implementations)
|
||||
|
||||
**Rule 2: NEVER use type assertions with interface literals**
|
||||
- **NEVER** write `.(interface{ Method() Type })` - this is non-idiomatic and unmaintainable
|
||||
- Interface literals cannot be documented, tested for satisfaction, or reused
|
||||
|
||||
```go
|
||||
// BAD - interface literal in type assertion (NEVER DO THIS)
|
||||
if checker, ok := obj.(interface{ Check() bool }); ok {
|
||||
checker.Check()
|
||||
}
|
||||
|
||||
// GOOD - use defined interface from dedicated package
|
||||
import "myproject/pkg/interfaces/checker"
|
||||
|
||||
if c, ok := obj.(checker.Checker); ok {
|
||||
c.Check()
|
||||
}
|
||||
```
|
||||
|
||||
**Rule 3: Resolving Circular Dependencies**
|
||||
- If a circular dependency occurs, move the interface to `pkg/interfaces/`
|
||||
- The implementing type stays in its original package
|
||||
- The consuming code imports only the interface package
|
||||
- Pattern:
|
||||
```
|
||||
pkg/interfaces/foo/ <- interface definition (no dependencies)
|
||||
↑ ↑
|
||||
pkg/bar/ pkg/baz/
|
||||
(implements) (consumes via interface)
|
||||
```
|
||||
|
||||
**Rule 4: Verify interface satisfaction at compile time**
|
||||
```go
|
||||
// Add this line to ensure *MyType implements MyInterface
|
||||
var _ MyInterface = (*MyType)(nil)
|
||||
```
|
||||
|
||||
### Concurrency
|
||||
|
||||
Use goroutines and channels for concurrent programming:
|
||||
@@ -178,6 +221,26 @@ For detailed information, consult the reference files:
|
||||
- Start comments with the name being described
|
||||
- Use godoc format
|
||||
|
||||
6. **Configuration - CRITICAL**
|
||||
- **NEVER** use `os.Getenv()` scattered throughout packages
|
||||
- **ALWAYS** centralize environment variable parsing in a single config package (e.g., `app/config/`)
|
||||
- Pass configuration via structs, not by reading environment directly
|
||||
- This ensures discoverability, documentation, and testability of all config options
|
||||
|
||||
7. **Constants - CRITICAL**
|
||||
- **ALWAYS** define named constants for values used more than a few times
|
||||
- **ALWAYS** define named constants if multiple packages depend on the same value
|
||||
- Constants shared across packages belong in a dedicated package (e.g., `pkg/constants/`)
|
||||
- Magic numbers and strings are forbidden
|
||||
```go
|
||||
// BAD - magic number
|
||||
if size > 1024 {
|
||||
|
||||
// GOOD - named constant
|
||||
const MaxBufferSize = 1024
|
||||
if size > MaxBufferSize {
|
||||
```
|
||||
|
||||
## Common Commands
|
||||
|
||||
```bash
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -10,8 +10,6 @@
|
||||
# Especially these
|
||||
.vscode/
|
||||
**/.vscode/
|
||||
node_modules/
|
||||
**/node_modules/
|
||||
/test*
|
||||
.idea/
|
||||
# and others
|
||||
@@ -98,6 +96,10 @@ cmd/benchmark/data
|
||||
# Re-ignore IDE directories (must come after !*/)
|
||||
.idea/
|
||||
**/.idea/
|
||||
|
||||
# Re-ignore node_modules everywhere (must come after !*/)
|
||||
node_modules/
|
||||
**/node_modules/
|
||||
/blocklist.json
|
||||
/gui/gui/main.wasm
|
||||
/gui/gui/index.html
|
||||
@@ -105,7 +107,6 @@ pkg/database/testrealy
|
||||
/ctxproxy.config.yml
|
||||
cmd/benchmark/external/**
|
||||
private*
|
||||
pkg/protocol/directory-client/node_modules
|
||||
|
||||
# Build outputs
|
||||
build/orly-*
|
||||
|
||||
@@ -1,319 +0,0 @@
|
||||
# Badger Database Migration Guide
|
||||
|
||||
## Overview
|
||||
|
||||
This guide covers migrating your ORLY relay database when changing Badger configuration parameters, specifically for the VLogPercentile and table size optimizations.
|
||||
|
||||
## When Migration is Needed
|
||||
|
||||
Based on research of Badger v4 source code and documentation:
|
||||
|
||||
### Configuration Changes That DON'T Require Migration
|
||||
|
||||
The following options can be changed **without migration**:
|
||||
- `BlockCacheSize` - Only affects in-memory cache
|
||||
- `IndexCacheSize` - Only affects in-memory cache
|
||||
- `NumCompactors` - Runtime setting
|
||||
- `NumLevelZeroTables` - Affects compaction timing
|
||||
- `NumMemtables` - Affects write buffering
|
||||
- `DetectConflicts` - Runtime conflict detection
|
||||
- `Compression` - New data uses new compression, old data remains as-is
|
||||
- `BlockSize` - Explicitly stated in Badger source: "Changing BlockSize across DB runs will not break badger"
|
||||
|
||||
### Configuration Changes That BENEFIT from Migration
|
||||
|
||||
The following options apply to **new writes only** - existing data gradually adopts new settings through compaction:
|
||||
- `VLogPercentile` - Affects where **new** values are stored (LSM vs vlog)
|
||||
- `BaseTableSize` - **New** SST files use new size
|
||||
- `MemTableSize` - Affects new write buffering
|
||||
- `BaseLevelSize` - Affects new LSM tree structure
|
||||
- `ValueLogFileSize` - New vlog files use new size
|
||||
|
||||
**Migration Impact:** Without migration, existing data remains in its current location (LSM tree or value log). The database will **gradually** adapt through normal compaction, which may take days or weeks depending on write volume.
|
||||
|
||||
## Migration Options
|
||||
|
||||
### Option 1: No Migration (Let Natural Compaction Handle It)
|
||||
|
||||
**Best for:** Low-traffic relays, testing environments
|
||||
|
||||
**Pros:**
|
||||
- No downtime required
|
||||
- No manual intervention
|
||||
- Zero risk of data loss
|
||||
|
||||
**Cons:**
|
||||
- Benefits take time to materialize (days/weeks)
|
||||
- Old data layout persists until natural compaction
|
||||
- Cache tuning benefits delayed
|
||||
|
||||
**Steps:**
|
||||
1. Update Badger configuration in `pkg/database/database.go`
|
||||
2. Restart ORLY relay
|
||||
3. Monitor performance over several days
|
||||
4. Optionally run manual GC: `db.RunValueLogGC(0.5)` periodically
|
||||
|
||||
### Option 2: Manual Value Log Garbage Collection
|
||||
|
||||
**Best for:** Medium-traffic relays wanting faster optimization
|
||||
|
||||
**Pros:**
|
||||
- Faster than natural compaction
|
||||
- Still safe (no export/import)
|
||||
- Can run while relay is online
|
||||
|
||||
**Cons:**
|
||||
- Still gradual (hours instead of days)
|
||||
- CPU/disk intensive during GC
|
||||
- Partial benefit until GC completes
|
||||
|
||||
**Steps:**
|
||||
1. Update Badger configuration
|
||||
2. Restart ORLY relay
|
||||
3. Monitor logs for compaction activity
|
||||
4. Manually trigger GC if needed (future feature - not currently exposed)
|
||||
|
||||
### Option 3: Full Export/Import Migration (RECOMMENDED for Production)
|
||||
|
||||
**Best for:** Production relays, large databases, maximum performance
|
||||
|
||||
**Pros:**
|
||||
- Immediate full benefit of new configuration
|
||||
- Clean database structure
|
||||
- Predictable migration time
|
||||
- Reclaims all disk space
|
||||
|
||||
**Cons:**
|
||||
- Requires relay downtime (several hours for large DBs)
|
||||
- Requires 2x disk space temporarily
|
||||
- More complex procedure
|
||||
|
||||
**Steps:** See detailed procedure below
|
||||
|
||||
## Full Migration Procedure (Option 3)
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Disk space:** At minimum 2.5x current database size
|
||||
- 1x for current database
|
||||
- 1x for JSONL export
|
||||
- 0.5x for new database (will be smaller with compression)
|
||||
|
||||
2. **Time estimate:**
|
||||
- Export: ~100-500 MB/s depending on disk speed
|
||||
- Import: ~50-200 MB/s with indexing overhead
|
||||
- Example: 10 GB database = ~10-30 minutes total
|
||||
|
||||
3. **Backup:** Ensure you have a recent backup before proceeding
|
||||
|
||||
### Step-by-Step Migration
|
||||
|
||||
#### 1. Prepare Migration Script
|
||||
|
||||
Use the provided `scripts/migrate-badger-config.sh` script (see below).
|
||||
|
||||
#### 2. Stop the Relay
|
||||
|
||||
```bash
|
||||
# If using systemd
|
||||
sudo systemctl stop orly
|
||||
|
||||
# If running manually
|
||||
pkill orly
|
||||
```
|
||||
|
||||
#### 3. Run Migration
|
||||
|
||||
```bash
|
||||
cd ~/src/next.orly.dev
|
||||
chmod +x scripts/migrate-badger-config.sh
|
||||
./scripts/migrate-badger-config.sh
|
||||
```
|
||||
|
||||
The script will:
|
||||
- Export all events to JSONL format
|
||||
- Move old database to backup location
|
||||
- Create new database with updated configuration
|
||||
- Import all events (rebuilds indexes automatically)
|
||||
- Verify event count matches
|
||||
|
||||
#### 4. Verify Migration
|
||||
|
||||
```bash
|
||||
# Check that events were migrated
|
||||
echo "Old event count:"
|
||||
cat ~/.local/share/ORLY-backup-*/migration.log | grep "exported.*events"
|
||||
|
||||
echo "New event count:"
|
||||
cat ~/.local/share/ORLY/migration.log | grep "saved.*events"
|
||||
```
|
||||
|
||||
#### 5. Restart Relay
|
||||
|
||||
```bash
|
||||
# If using systemd
|
||||
sudo systemctl start orly
|
||||
sudo journalctl -u orly -f
|
||||
|
||||
# If running manually
|
||||
./orly
|
||||
```
|
||||
|
||||
#### 6. Monitor Performance
|
||||
|
||||
Watch for improvements in:
|
||||
- Cache hit ratio (should be >85% with new config)
|
||||
- Average query latency (should be <3ms for cached events)
|
||||
- No "Block cache too small" warnings in logs
|
||||
|
||||
#### 7. Clean Up (After Verification)
|
||||
|
||||
```bash
|
||||
# Once you confirm everything works (wait 24-48 hours)
|
||||
rm -rf ~/.local/share/ORLY-backup-*
|
||||
rm ~/.local/share/ORLY/events-export.jsonl
|
||||
```
|
||||
|
||||
## Migration Script
|
||||
|
||||
The migration script is located at `scripts/migrate-badger-config.sh` and handles:
|
||||
- Automatic export of all events to JSONL
|
||||
- Safe backup of existing database
|
||||
- Creation of new database with updated config
|
||||
- Import and indexing of all events
|
||||
- Verification of event counts
|
||||
|
||||
## Rollback Procedure
|
||||
|
||||
If migration fails or performance degrades:
|
||||
|
||||
```bash
|
||||
# Stop the relay
|
||||
sudo systemctl stop orly # or pkill orly
|
||||
|
||||
# Restore old database
|
||||
rm -rf ~/.local/share/ORLY
|
||||
mv ~/.local/share/ORLY-backup-$(date +%Y%m%d)* ~/.local/share/ORLY
|
||||
|
||||
# Restart with old configuration
|
||||
sudo systemctl start orly
|
||||
```
|
||||
|
||||
## Configuration Changes Summary
|
||||
|
||||
### Changes Applied in pkg/database/database.go
|
||||
|
||||
```go
|
||||
// Cache sizes (can change without migration)
|
||||
opts.BlockCacheSize = 16384 MB (was 512 MB)
|
||||
opts.IndexCacheSize = 4096 MB (was 256 MB)
|
||||
|
||||
// Table sizes (benefits from migration)
|
||||
opts.BaseTableSize = 8 MB (was 64 MB)
|
||||
opts.MemTableSize = 16 MB (was 64 MB)
|
||||
opts.ValueLogFileSize = 128 MB (was 256 MB)
|
||||
|
||||
// Inline event optimization (CRITICAL - benefits from migration)
|
||||
opts.VLogPercentile = 0.99 (was 0.0 - default)
|
||||
|
||||
// LSM structure (benefits from migration)
|
||||
opts.BaseLevelSize = 64 MB (was 10 MB - default)
|
||||
|
||||
// Performance settings (no migration needed)
|
||||
opts.DetectConflicts = false (was true)
|
||||
opts.Compression = options.ZSTD (was options.None)
|
||||
opts.NumCompactors = 8 (was 4)
|
||||
opts.NumMemtables = 8 (was 5)
|
||||
```
|
||||
|
||||
## Expected Improvements
|
||||
|
||||
### Before Migration
|
||||
- Cache hit ratio: 33%
|
||||
- Average latency: 9.35ms
|
||||
- P95 latency: 34.48ms
|
||||
- Block cache warnings: Yes
|
||||
|
||||
### After Migration
|
||||
- Cache hit ratio: 85-95%
|
||||
- Average latency: <3ms
|
||||
- P95 latency: <8ms
|
||||
- Block cache warnings: No
|
||||
- Inline events: 3-5x faster reads
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Migration Script Fails
|
||||
|
||||
**Error:** "Not enough disk space"
|
||||
- Free up space or use Option 1 (natural compaction)
|
||||
- Ensure you have 2.5x current DB size available
|
||||
|
||||
**Error:** "Export failed"
|
||||
- Check database is not corrupted
|
||||
- Ensure ORLY is stopped
|
||||
- Check file permissions
|
||||
|
||||
**Error:** "Import count mismatch"
|
||||
- This is informational - some events may be duplicates
|
||||
- Check logs for specific errors
|
||||
- Verify core events are present via relay queries
|
||||
|
||||
### Performance Not Improved
|
||||
|
||||
**After migration, performance is the same:**
|
||||
1. Verify configuration was actually applied:
|
||||
```bash
|
||||
# Check running relay logs for config output
|
||||
sudo journalctl -u orly | grep -i "block.*cache\|vlog"
|
||||
```
|
||||
|
||||
2. Wait for cache to warm up (2-5 minutes after start)
|
||||
|
||||
3. Check if workload changed (different query patterns)
|
||||
|
||||
4. Verify disk I/O is not bottleneck:
|
||||
```bash
|
||||
iostat -x 5
|
||||
```
|
||||
|
||||
### High CPU During Migration
|
||||
|
||||
- This is normal - import rebuilds all indexes
|
||||
- Migration is single-threaded by design (data consistency)
|
||||
- Expect 30-60% CPU usage on one core
|
||||
|
||||
## Additional Notes
|
||||
|
||||
### Compression Impact
|
||||
|
||||
The `Compression = options.ZSTD` setting:
|
||||
- Only compresses **new** data
|
||||
- Old data remains uncompressed until rewritten by compaction
|
||||
- Migration forces all data to be rewritten → immediate compression benefit
|
||||
- Expect 2-3x compression ratio for event data
|
||||
|
||||
### VLogPercentile Behavior
|
||||
|
||||
With `VLogPercentile = 0.99`:
|
||||
- **99% of values** stored in LSM tree (fast access)
|
||||
- **1% of values** stored in value log (large events >100 KB)
|
||||
- Threshold dynamically adjusted based on value size distribution
|
||||
- Perfect for ORLY's inline event optimization
|
||||
|
||||
### Production Considerations
|
||||
|
||||
For production relays:
|
||||
1. Schedule migration during low-traffic period
|
||||
2. Notify users of maintenance window
|
||||
3. Have rollback plan ready
|
||||
4. Monitor closely for 24-48 hours after migration
|
||||
5. Keep backup for at least 1 week
|
||||
|
||||
## References
|
||||
|
||||
- Badger v4 Documentation: https://pkg.go.dev/github.com/dgraph-io/badger/v4
|
||||
- ORLY Database Package: `pkg/database/database.go`
|
||||
- Export/Import Implementation: `pkg/database/{export,import}.go`
|
||||
- Cache Optimization Analysis: `cmd/benchmark/CACHE_OPTIMIZATION_STRATEGY.md`
|
||||
- Inline Event Optimization: `cmd/benchmark/INLINE_EVENT_OPTIMIZATION.md`
|
||||
400
CLAUDE.md
400
CLAUDE.md
@@ -8,11 +8,12 @@ ORLY is a high-performance Nostr relay written in Go, designed for personal rela
|
||||
|
||||
**Key Technologies:**
|
||||
- **Language**: Go 1.25.3+
|
||||
- **Database**: Badger v4 (embedded key-value store) or DGraph (distributed graph database)
|
||||
- **Database**: Badger v4 (embedded) or Neo4j (social graph)
|
||||
- **Cryptography**: Custom p8k library using purego for secp256k1 operations (no CGO)
|
||||
- **Web UI**: Svelte frontend embedded in the binary
|
||||
- **WebSocket**: gorilla/websocket for Nostr protocol
|
||||
- **Performance**: SIMD-accelerated SHA256 and hex encoding, query result caching with zstd compression
|
||||
- **Social Graph**: Neo4j backend with Web of Trust (WoT) extensions for trust metrics
|
||||
|
||||
## Build Commands
|
||||
|
||||
@@ -139,9 +140,13 @@ export ORLY_SPROCKET_ENABLED=true
|
||||
# Enable policy system
|
||||
export ORLY_POLICY_ENABLED=true
|
||||
|
||||
# Database backend selection (badger or dgraph)
|
||||
# Database backend selection (badger or neo4j)
|
||||
export ORLY_DB_TYPE=badger
|
||||
export ORLY_DGRAPH_URL=localhost:9080 # Only for dgraph backend
|
||||
|
||||
# Neo4j configuration (only when ORLY_DB_TYPE=neo4j)
|
||||
export ORLY_NEO4J_URI=bolt://localhost:7687
|
||||
export ORLY_NEO4J_USER=neo4j
|
||||
export ORLY_NEO4J_PASSWORD=password
|
||||
|
||||
# Query cache configuration (improves REQ response times)
|
||||
export ORLY_QUERY_CACHE_SIZE_MB=512 # Default: 512MB
|
||||
@@ -150,6 +155,24 @@ export ORLY_QUERY_CACHE_MAX_AGE=5m # Cache expiry time
|
||||
# Database cache tuning (for Badger backend)
|
||||
export ORLY_DB_BLOCK_CACHE_MB=512 # Block cache size
|
||||
export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
export ORLY_DB_ZSTD_LEVEL=1 # ZSTD level: 0=off, 1=fast, 3=default, 9=best
|
||||
|
||||
# Serial cache for compact event storage (Badger backend)
|
||||
export ORLY_SERIAL_CACHE_PUBKEYS=100000 # Max pubkeys to cache (~3.2MB memory)
|
||||
export ORLY_SERIAL_CACHE_EVENT_IDS=500000 # Max event IDs to cache (~16MB memory)
|
||||
|
||||
# Directory Spider (metadata sync from other relays)
|
||||
export ORLY_DIRECTORY_SPIDER=true # Enable directory spider
|
||||
export ORLY_DIRECTORY_SPIDER_INTERVAL=24h # How often to run
|
||||
export ORLY_DIRECTORY_SPIDER_HOPS=3 # Max hops for relay discovery
|
||||
|
||||
# NIP-43 Relay Access Metadata
|
||||
export ORLY_NIP43_ENABLED=true # Enable invite system
|
||||
export ORLY_NIP43_INVITE_EXPIRY=24h # Invite code validity
|
||||
|
||||
# Authentication modes
|
||||
export ORLY_AUTH_REQUIRED=false # Require auth for all requests
|
||||
export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
```
|
||||
|
||||
## Code Architecture
|
||||
@@ -177,7 +200,7 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
|
||||
**`pkg/database/`** - Database abstraction layer with multiple backend support
|
||||
- `interface.go` - Database interface definition for pluggable backends
|
||||
- `factory.go` - Database backend selection (Badger or DGraph)
|
||||
- `factory.go` - Database backend selection (Badger or Neo4j)
|
||||
- `database.go` - Badger implementation with cache tuning and query cache
|
||||
- `save-event.go` - Event storage with index updates
|
||||
- `query-events.go` - Main query execution engine with filter normalization
|
||||
@@ -188,6 +211,15 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- `identity.go` - Relay identity key management
|
||||
- `migrations.go` - Database schema migration runner
|
||||
|
||||
**`pkg/neo4j/`** - Neo4j graph database backend with social graph support
|
||||
- `neo4j.go` - Main database implementation
|
||||
- `schema.go` - Graph schema and index definitions (includes WoT extensions)
|
||||
- `query-events.go` - REQ filter to Cypher translation
|
||||
- `save-event.go` - Event storage with relationship creation
|
||||
- `social-event-processor.go` - Processes kinds 0, 3, 1984, 10000 for social graph
|
||||
- `WOT_SPEC.md` - Web of Trust data model specification (NostrUser nodes, trust metrics)
|
||||
- `MODIFYING_SCHEMA.md` - Guide for schema modifications
|
||||
|
||||
**`pkg/protocol/`** - Nostr protocol implementation
|
||||
- `ws/` - WebSocket message framing and parsing
|
||||
- `auth/` - NIP-42 authentication challenge/response
|
||||
@@ -223,6 +255,9 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
**`pkg/policy/`** - Event filtering and validation policies
|
||||
- Policy configuration loaded from `~/.config/ORLY/policy.json`
|
||||
- Per-kind size limits, age restrictions, custom scripts
|
||||
- **Write-Only Validation**: Size, age, tag, and expiry validations apply ONLY to write operations
|
||||
- **Read-Only Filtering**: `read_allow`, `read_deny`, `privileged` apply ONLY to read operations
|
||||
- See `docs/POLICY_CONFIGURATION_REFERENCE.md` for authoritative read vs write applicability
|
||||
- **Dynamic Policy Hot Reload via Kind 12345 Events:**
|
||||
- Policy admins can update policy configuration without relay restart
|
||||
- Kind 12345 events contain JSON policy in content field
|
||||
@@ -231,12 +266,16 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- Policy admin follow lists (kind 3) trigger immediate cache refresh
|
||||
- `WriteAllowFollows` rule grants both read+write access to admin follows
|
||||
- Tag validation supports regex patterns per tag type
|
||||
- **New Policy Rule Fields:**
|
||||
- **Policy Rule Fields:**
|
||||
- `max_expiry_duration`: ISO-8601 duration format (e.g., "P7D", "PT1H30M") for event expiry limits
|
||||
- `protected_required`: Requires NIP-70 protected events (must have "-" tag)
|
||||
- `identifier_regex`: Regex pattern for validating "d" tag identifiers
|
||||
- `follows_whitelist_admins`: Per-rule admin pubkeys whose follows are whitelisted
|
||||
- `write_allow` / `write_deny`: Pubkey whitelist/blacklist for writing (write-only)
|
||||
- `read_allow` / `read_deny`: Pubkey whitelist/blacklist for reading (read-only)
|
||||
- `privileged`: Party-involved access control (read-only)
|
||||
- See `docs/POLICY_USAGE_GUIDE.md` for configuration examples
|
||||
- See `pkg/policy/README.md` for quick reference
|
||||
|
||||
**`pkg/sync/`** - Distributed synchronization
|
||||
- `cluster_manager.go` - Active replication between relay peers
|
||||
@@ -246,6 +285,12 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
**`pkg/spider/`** - Event syncing from other relays
|
||||
- `spider.go` - Spider manager for "follows" mode
|
||||
- Fetches events from admin relays for followed pubkeys
|
||||
- **Directory Spider** (`directory.go`):
|
||||
- Discovers relays by crawling kind 10002 (relay list) events
|
||||
- Expands outward from seed pubkeys (whitelisted users) via hop distance
|
||||
- Fetches metadata events (kinds 0, 3, 10000, 10002) from discovered relays
|
||||
- Self-detection prevents querying own relay
|
||||
- Configurable interval and max hops via `ORLY_DIRECTORY_SPIDER_*` env vars
|
||||
|
||||
**`pkg/utils/`** - Shared utilities
|
||||
- `atomic/` - Extended atomic operations
|
||||
@@ -278,7 +323,11 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
**Database Backend Selection:**
|
||||
- Supports multiple backends via `ORLY_DB_TYPE` environment variable
|
||||
- **Badger** (default): Embedded key-value store with custom indexing, ideal for single-instance deployments
|
||||
- **DGraph**: Distributed graph database for larger, multi-node deployments
|
||||
- **Neo4j**: Graph database with social graph and Web of Trust (WoT) extensions
|
||||
- Processes kinds 0 (profile), 3 (contacts), 1984 (reports), 10000 (mute list) for social graph
|
||||
- NostrUser nodes with trust metrics (influence, PageRank)
|
||||
- FOLLOWS, MUTES, REPORTS relationships for WoT analysis
|
||||
- See `pkg/neo4j/WOT_SPEC.md` for full schema specification
|
||||
- Backend selected via factory pattern in `pkg/database/factory.go`
|
||||
- All backends implement the same `Database` interface defined in `pkg/database/interface.go`
|
||||
|
||||
@@ -297,11 +346,33 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
4. Events stored via `database.SaveEvent()`
|
||||
5. Active subscriptions notified via `publishers.Publish()`
|
||||
|
||||
**Configuration System:**
|
||||
**Configuration System - CRITICAL RULES:**
|
||||
- Uses `go-simpler.org/env` for struct tags
|
||||
- All config in `app/config/config.go` with `ORLY_` prefix
|
||||
- **ALL environment variables MUST be defined in `app/config/config.go`**
|
||||
- **NEVER** use `os.Getenv()` directly in packages - always pass config via structs
|
||||
- **NEVER** parse environment variables outside of `app/config/`
|
||||
- This ensures all config options appear in `./orly help` output
|
||||
- Database backends receive config via `database.DatabaseConfig` struct
|
||||
- Use `GetDatabaseConfigValues()` helper to extract DB config from app config
|
||||
- All config fields use `ORLY_` prefix with struct tags defining defaults and usage
|
||||
- Supports XDG directories via `github.com/adrg/xdg`
|
||||
- Default data directory: `~/.local/share/ORLY`
|
||||
- Database-specific config (Neo4j, Badger) is passed via `DatabaseConfig` struct in `pkg/database/factory.go`
|
||||
|
||||
**Constants - CRITICAL RULES:**
|
||||
- **ALWAYS** define named constants for values used more than a few times
|
||||
- **ALWAYS** define named constants if multiple packages depend on the same value
|
||||
- Constants shared across packages should be in a dedicated package (e.g., `pkg/constants/`)
|
||||
- Magic numbers and strings are forbidden - use named constants with clear documentation
|
||||
- Example:
|
||||
```go
|
||||
// BAD - magic number
|
||||
if timeout > 30 {
|
||||
|
||||
// GOOD - named constant
|
||||
const DefaultTimeoutSeconds = 30
|
||||
if timeout > DefaultTimeoutSeconds {
|
||||
```
|
||||
|
||||
**Event Publishing:**
|
||||
- `pkg/protocol/publish/` manages publisher registry
|
||||
@@ -322,22 +393,120 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- External packages (e.g., `app/`) should ONLY use public API methods, never access internal fields
|
||||
- **DO NOT** change unexported fields to exported when fixing bugs - this breaks the domain boundary
|
||||
|
||||
**Binary-Optimized Tag Storage (IMPORTANT):**
|
||||
- The nostr library (`git.mleku.dev/mleku/nostr/encoders/tag`) uses binary optimization for `e` and `p` tags
|
||||
- When events are unmarshaled from JSON, 64-character hex values in e/p tags are converted to 33-byte binary format (32 bytes hash + null terminator)
|
||||
- **DO NOT** use `tag.Value()` directly for e/p tags - it returns raw bytes which may be binary, not hex
|
||||
- **ALWAYS** use these methods instead:
|
||||
- `tag.ValueHex()` - Returns hex string regardless of storage format (handles both binary and hex)
|
||||
- `tag.ValueBinary()` - Returns 32-byte binary if stored in binary format, nil otherwise
|
||||
- Example pattern for comparing pubkeys:
|
||||
```go
|
||||
// CORRECT: Use ValueHex() for hex decoding
|
||||
pt, err := hex.Dec(string(pTag.ValueHex()))
|
||||
**Binary-Optimized Tag Storage (CRITICAL - Read Carefully):**
|
||||
|
||||
// WRONG: Value() may return binary bytes, not hex
|
||||
pt, err := hex.Dec(string(pTag.Value())) // Will fail for binary-encoded tags!
|
||||
The nostr library (`git.mleku.dev/mleku/nostr/encoders/tag`) uses binary optimization for `e` and `p` tags. This is a common source of bugs when working with pubkeys and event IDs.
|
||||
|
||||
**How Binary Encoding Works:**
|
||||
- When events are unmarshaled from JSON, 64-character hex values in e/p tags are converted to 33-byte binary format (32 bytes hash + null terminator)
|
||||
- The `tag.T` field contains `[][]byte` where each element may be binary or hex depending on tag type
|
||||
- `event.E.ID`, `event.E.Pubkey`, and `event.E.Sig` are always stored as fixed-size byte arrays (`[32]byte` or `[64]byte`)
|
||||
|
||||
**NEVER Do This:**
|
||||
```go
|
||||
// WRONG: tag.T[1] may be 33-byte binary, not 64-char hex!
|
||||
pubkey := string(tag.T[1]) // Results in garbage for binary-encoded tags
|
||||
|
||||
// WRONG: Will fail for binary-encoded e/p tags
|
||||
pt, err := hex.Dec(string(pTag.Value()))
|
||||
```
|
||||
|
||||
**ALWAYS Do This:**
|
||||
```go
|
||||
// CORRECT: Use ValueHex() which handles both binary and hex formats
|
||||
pubkey := string(pTag.ValueHex()) // Always returns lowercase hex
|
||||
|
||||
// CORRECT: For decoding to bytes
|
||||
pt, err := hex.Dec(string(pTag.ValueHex()))
|
||||
|
||||
// CORRECT: For event.E fields (always binary, use hex.Enc)
|
||||
pubkeyHex := hex.Enc(ev.Pubkey[:]) // Always produces lowercase hex
|
||||
eventIDHex := hex.Enc(ev.ID[:])
|
||||
sigHex := hex.Enc(ev.Sig[:])
|
||||
```
|
||||
|
||||
**Tag Methods Reference:**
|
||||
- `tag.ValueHex()` - Returns hex string regardless of storage format (handles both binary and hex)
|
||||
- `tag.ValueBinary()` - Returns 32-byte binary if stored in binary format, nil otherwise
|
||||
- `tag.Value()` - Returns raw bytes **DANGEROUS for e/p tags** - may be binary
|
||||
|
||||
**Hex Case Sensitivity:**
|
||||
- The hex encoder (`git.mleku.dev/mleku/nostr/encoders/hex`) **always produces lowercase hex**
|
||||
- External sources may send uppercase hex (e.g., `"ABCD..."` instead of `"abcd..."`)
|
||||
- When storing pubkeys/event IDs (especially in Neo4j), **always normalize to lowercase**
|
||||
- Mixed case causes duplicate entities in graph databases
|
||||
|
||||
**Neo4j-Specific Helpers (pkg/neo4j/hex_utils.go):**
|
||||
```go
|
||||
// ExtractPTagValue handles binary encoding and normalizes to lowercase
|
||||
pubkey := ExtractPTagValue(pTag)
|
||||
|
||||
// ExtractETagValue handles binary encoding and normalizes to lowercase
|
||||
eventID := ExtractETagValue(eTag)
|
||||
|
||||
// NormalizePubkeyHex handles both binary and uppercase hex
|
||||
normalized := NormalizePubkeyHex(rawValue)
|
||||
|
||||
// IsValidHexPubkey validates 64-char hex
|
||||
if IsValidHexPubkey(pubkey) { ... }
|
||||
```
|
||||
|
||||
**Files Most Affected by These Rules:**
|
||||
- `pkg/neo4j/save-event.go` - Event storage with e/p tag handling
|
||||
- `pkg/neo4j/social-event-processor.go` - Social graph with p-tag extraction
|
||||
- `pkg/neo4j/query-events.go` - Filter queries with tag matching
|
||||
- `pkg/database/save-event.go` - Badger event storage
|
||||
- `pkg/database/filter_utils.go` - Tag normalization utilities
|
||||
- `pkg/find/parser.go` - FIND protocol parser with p-tag extraction
|
||||
|
||||
This optimization saves memory and enables faster comparisons in the database layer.
|
||||
|
||||
**Interface Design - CRITICAL RULES:**
|
||||
|
||||
**Rule 1: ALL interfaces MUST be defined in `pkg/interfaces/<name>/`**
|
||||
- Interfaces provide isolation between packages and enable dependency inversion
|
||||
- Keeping interfaces in a dedicated package prevents circular dependencies
|
||||
- Each interface package should be minimal (just the interface, no implementations)
|
||||
|
||||
**Rule 2: NEVER use type assertions with interface literals**
|
||||
- **NEVER** write `.(interface{ Method() Type })` - this is non-idiomatic and unmaintainable
|
||||
- Interface literals cannot be documented, tested for satisfaction, or reused
|
||||
- Example of WRONG approach:
|
||||
```go
|
||||
// BAD - interface literal in type assertion
|
||||
if checker, ok := obj.(interface{ Check() bool }); ok {
|
||||
checker.Check()
|
||||
}
|
||||
```
|
||||
- This optimization saves memory and enables faster comparisons in the database layer
|
||||
- Example of CORRECT approach:
|
||||
```go
|
||||
// GOOD - use defined interface from pkg/interfaces/
|
||||
import "next.orly.dev/pkg/interfaces/checker"
|
||||
|
||||
if c, ok := obj.(checker.Checker); ok {
|
||||
c.Check()
|
||||
}
|
||||
```
|
||||
|
||||
**Rule 3: Resolving Circular Dependencies**
|
||||
- If a circular dependency occurs when adding an interface, move the interface to `pkg/interfaces/`
|
||||
- The implementing type stays in its original package
|
||||
- The consuming code imports only the interface package
|
||||
- This pattern:
|
||||
```
|
||||
pkg/interfaces/foo/ <- interface definition (no dependencies)
|
||||
↑ ↑
|
||||
pkg/bar/ pkg/baz/
|
||||
(implements) (consumes via interface)
|
||||
```
|
||||
|
||||
**Existing interfaces in `pkg/interfaces/`:**
|
||||
- `acl/` - ACL and PolicyChecker interfaces
|
||||
- `neterr/` - TimeoutError interface for network errors
|
||||
- `resultiter/` - Neo4jResultIterator for database results
|
||||
- `store/` - Storage-related interfaces
|
||||
- `publisher/` - Event publishing interfaces
|
||||
- `typer/` - Type identification interface
|
||||
|
||||
## Development Workflow
|
||||
|
||||
@@ -436,7 +605,76 @@ sudo journalctl -u orly -f
|
||||
- `github.com/templexxx/xhex` - SIMD hex encoding
|
||||
- `github.com/ebitengine/purego` - CGO-free C library loading
|
||||
- `go-simpler.org/env` - Environment variable configuration
|
||||
- `lol.mleku.dev` - Custom logging library
|
||||
- `lol.mleku.dev` - Custom logging library (see Logging section below)
|
||||
|
||||
## Logging (lol.mleku.dev)
|
||||
|
||||
The project uses `lol.mleku.dev` (Log Of Location), a simple logging library that prints timestamps and source code locations.
|
||||
|
||||
### Log Levels (lowest to highest verbosity)
|
||||
| Level | Constant | Emoji | Usage |
|
||||
|-------|----------|-------|-------|
|
||||
| Off | `Off` | (none) | Disables all logging |
|
||||
| Fatal | `Fatal` | ☠️ | Unrecoverable errors, program exits |
|
||||
| Error | `Error` | 🚨 | Errors that need attention |
|
||||
| Warn | `Warn` | ⚠️ | Warnings, non-critical issues |
|
||||
| Info | `Info` | ℹ️ | General information (default) |
|
||||
| Debug | `Debug` | 🔎 | Debug information for development |
|
||||
| Trace | `Trace` | 👻 | Very detailed tracing, most verbose |
|
||||
|
||||
### Environment Variable
|
||||
Set log level via `LOG_LEVEL` environment variable:
|
||||
```bash
|
||||
export LOG_LEVEL=trace # Most verbose
|
||||
export LOG_LEVEL=debug # Development debugging
|
||||
export LOG_LEVEL=info # Default
|
||||
export LOG_LEVEL=warn # Only warnings and errors
|
||||
export LOG_LEVEL=error # Only errors
|
||||
export LOG_LEVEL=off # Silent
|
||||
```
|
||||
|
||||
**Note**: ORLY uses `ORLY_LOG_LEVEL` which is mapped to the underlying `LOG_LEVEL`.
|
||||
|
||||
### Usage in Code
|
||||
Import and use the log package:
|
||||
```go
|
||||
import "lol.mleku.dev/log"
|
||||
|
||||
// Log methods (each has .Ln, .F, .S, .C variants)
|
||||
log.T.F("trace: %s", msg) // Trace level - very detailed
|
||||
log.D.F("debug: %s", msg) // Debug level
|
||||
log.I.F("info: %s", msg) // Info level
|
||||
log.W.F("warn: %s", msg) // Warning level
|
||||
log.E.F("error: %s", msg) // Error level
|
||||
log.F.F("fatal: %s", msg) // Fatal level
|
||||
|
||||
// Check errors (prints if error is not nil, returns bool)
|
||||
import "lol.mleku.dev/chk"
|
||||
if chk.E(err) { // chk.E = Error level check
|
||||
return // Error was logged
|
||||
}
|
||||
if chk.D(err) { // chk.D = Debug level check
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
### Log Printer Variants
|
||||
Each level has these printer types:
|
||||
- `.Ln(a...)` - Print items with spaces between
|
||||
- `.F(format, a...)` - Printf-style formatting
|
||||
- `.S(a...)` - Spew dump (detailed struct output)
|
||||
- `.C(func() string)` - Lazy evaluation (only runs closure if level is enabled)
|
||||
- `.Chk(error) bool` - Returns true if error is not nil, logs if so
|
||||
- `.Err(format, a...) error` - Logs and returns an error
|
||||
|
||||
### Output Format
|
||||
```
|
||||
1764783029014485👻 message text /path/to/file.go:123
|
||||
```
|
||||
- Unix microsecond timestamp
|
||||
- Level emoji
|
||||
- Message text
|
||||
- Source file:line location
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
@@ -468,6 +706,14 @@ ORLY has received several significant performance improvements in recent updates
|
||||
- Dramatically reduces database load for repeated queries (common in Nostr clients)
|
||||
- Cache key includes normalized filter representation for optimal hit rate
|
||||
|
||||
### Compact Event Storage (Latest)
|
||||
- Events stored with 5-byte serial references instead of 32-byte IDs/pubkeys
|
||||
- Achieves up to 40% space savings on event data
|
||||
- Serial cache for fast lookups (configurable via `ORLY_SERIAL_CACHE_PUBKEYS` and `ORLY_SERIAL_CACHE_EVENT_IDS`)
|
||||
- Automatic migration from legacy format (version 6)
|
||||
- Cleanup removes redundant legacy storage after migration
|
||||
- Storage stats available via `db.CompactStorageStats()` and `db.LogCompactSavings()`
|
||||
|
||||
### Badger Cache Tuning
|
||||
- Optimized block cache (default 512MB, tune via `ORLY_DB_BLOCK_CACHE_MB`)
|
||||
- Optimized index cache (default 256MB, tune via `ORLY_DB_INDEX_CACHE_MB`)
|
||||
@@ -524,3 +770,111 @@ Files modified:
|
||||
```
|
||||
3. GitHub Actions workflow builds binaries for multiple platforms
|
||||
4. Release created automatically with binaries and checksums
|
||||
|
||||
## Recent Features (v0.31.x)
|
||||
|
||||
### Directory Spider
|
||||
The directory spider (`pkg/spider/directory.go`) automatically discovers and syncs metadata from other relays:
|
||||
- Crawls kind 10002 (relay list) events to discover relays
|
||||
- Expands outward from seed pubkeys (whitelisted users) via configurable hop distance
|
||||
- Fetches essential metadata events (kinds 0, 3, 10000, 10002)
|
||||
- Self-detection prevents querying own relay
|
||||
- Enable with `ORLY_DIRECTORY_SPIDER=true`
|
||||
|
||||
### Neo4j Social Graph Backend
|
||||
The Neo4j backend (`pkg/neo4j/`) includes Web of Trust (WoT) extensions:
|
||||
- **Social Event Processor**: Handles kinds 0, 3, 1984, 10000 for social graph management
|
||||
- **NostrUser nodes**: Store profile data and trust metrics (influence, PageRank)
|
||||
- **Relationships**: FOLLOWS, MUTES, REPORTS for social graph analysis
|
||||
- **WoT Schema**: See `pkg/neo4j/WOT_SPEC.md` for full specification
|
||||
- **Schema Modifications**: See `pkg/neo4j/MODIFYING_SCHEMA.md` for how to update
|
||||
|
||||
### Policy System Enhancements
|
||||
- **Default-Permissive Model**: Read and write are allowed by default unless restrictions are configured
|
||||
- **Write-Only Validation**: Size, age, tag validations apply ONLY to writes
|
||||
- **Read-Only Filtering**: `read_allow`, `read_follows_whitelist`, `privileged` apply ONLY to reads
|
||||
- **Separate Follows Whitelists**: `read_follows_whitelist` and `write_follows_whitelist` for fine-grained control
|
||||
- **Scripts**: Policy scripts execute ONLY for write operations
|
||||
- **Reference Documentation**: `docs/POLICY_CONFIGURATION_REFERENCE.md` provides authoritative read vs write applicability
|
||||
- See also: `pkg/policy/README.md` for quick reference
|
||||
|
||||
### Policy JSON Configuration Quick Reference
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow|deny",
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 4], // Only these kinds allowed
|
||||
"blacklist": [4] // These kinds denied (ignored if whitelist set)
|
||||
},
|
||||
"global": {
|
||||
// Rule fields applied to ALL events
|
||||
"size_limit": 100000, // Max event size (bytes)
|
||||
"content_limit": 50000, // Max content size (bytes)
|
||||
"max_age_of_event": 86400, // Max age (seconds)
|
||||
"max_age_event_in_future": 300, // Max future time (seconds)
|
||||
"max_expiry_duration": "P7D", // ISO-8601 expiry limit
|
||||
"must_have_tags": ["d", "t"], // Required tag keys
|
||||
"protected_required": false, // Require NIP-70 "-" tag
|
||||
"identifier_regex": "^[a-z0-9-]{1,64}$", // Regex for "d" tags
|
||||
"tag_validation": {"t": "^[a-z0-9]+$"}, // Regex for any tag
|
||||
"privileged": false, // READ-ONLY: party-involved check
|
||||
"write_allow": ["pubkey_hex"], // Pubkeys allowed to write
|
||||
"write_deny": ["pubkey_hex"], // Pubkeys denied from writing
|
||||
"read_allow": ["pubkey_hex"], // Pubkeys allowed to read
|
||||
"read_deny": ["pubkey_hex"], // Pubkeys denied from reading
|
||||
"read_follows_whitelist": ["pubkey_hex"], // Pubkeys whose follows can read
|
||||
"write_follows_whitelist": ["pubkey_hex"], // Pubkeys whose follows can write
|
||||
"script": "/path/to/script.sh" // External validation script
|
||||
},
|
||||
"rules": {
|
||||
"1": { /* Same fields as global, for kind 1 */ },
|
||||
"30023": { /* Same fields as global, for kind 30023 */ }
|
||||
},
|
||||
"policy_admins": ["pubkey_hex"], // Can update via kind 12345
|
||||
"owners": ["pubkey_hex"], // Full policy control
|
||||
"policy_follow_whitelist_enabled": false // Enable legacy write_allow_follows
|
||||
}
|
||||
```
|
||||
|
||||
**Access Control Summary:**
|
||||
| Restriction Field | Applies To | When Set |
|
||||
|-------------------|------------|----------|
|
||||
| `read_allow` | READ | Only listed pubkeys can read |
|
||||
| `read_deny` | READ | Listed pubkeys denied (if no read_allow) |
|
||||
| `read_follows_whitelist` | READ | Named pubkeys + their follows can read |
|
||||
| `write_allow` | WRITE | Only listed pubkeys can write |
|
||||
| `write_deny` | WRITE | Listed pubkeys denied (if no write_allow) |
|
||||
| `write_follows_whitelist` | WRITE | Named pubkeys + their follows can write |
|
||||
| `privileged` | READ | Only author + p-tag recipients can read |
|
||||
|
||||
**Nil Policy Error Handling:**
|
||||
- If `ORLY_POLICY_ENABLED=true` but the policy fails to load (nil policy), the relay will:
|
||||
- Log a FATAL error message indicating misconfiguration
|
||||
- Return an error for all `CheckPolicy` calls
|
||||
- Deny all events until the configuration is fixed
|
||||
- This is a safety measure - a nil policy with policy enabled indicates configuration error
|
||||
|
||||
### Authentication Modes
|
||||
- `ORLY_AUTH_REQUIRED=true`: Require authentication for ALL requests
|
||||
- `ORLY_AUTH_TO_WRITE=true`: Require authentication only for writes (allow anonymous reads)
|
||||
|
||||
### NIP-43 Relay Access Metadata
|
||||
Invite-based access control system:
|
||||
- `ORLY_NIP43_ENABLED=true`: Enable invite system
|
||||
- Publishes kind 8000/8001 events for member changes
|
||||
- Publishes kind 13534 membership list events
|
||||
- Configurable invite expiry via `ORLY_NIP43_INVITE_EXPIRY`
|
||||
|
||||
## Documentation Index
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| `docs/POLICY_CONFIGURATION_REFERENCE.md` | Authoritative policy config reference with read/write applicability |
|
||||
| `docs/POLICY_USAGE_GUIDE.md` | Comprehensive policy system user guide |
|
||||
| `pkg/policy/README.md` | Policy system quick reference |
|
||||
| `pkg/neo4j/README.md` | Neo4j backend overview |
|
||||
| `pkg/neo4j/WOT_SPEC.md` | Web of Trust schema specification |
|
||||
| `pkg/neo4j/MODIFYING_SCHEMA.md` | How to modify Neo4j schema |
|
||||
| `pkg/neo4j/TESTING.md` | Neo4j testing guide |
|
||||
| `readme.adoc` | Project README with feature overview |
|
||||
|
||||
@@ -1,387 +0,0 @@
|
||||
# Dgraph Database Implementation Status
|
||||
|
||||
## Overview
|
||||
|
||||
This document tracks the implementation of Dgraph as an alternative database backend for ORLY. The implementation allows switching between Badger (default) and Dgraph via the `ORLY_DB_TYPE` environment variable.
|
||||
|
||||
## Completion Status: ✅ STEP 1 COMPLETE - DGRAPH SERVER INTEGRATION + TESTS
|
||||
|
||||
**Build Status:** ✅ Successfully compiles with `CGO_ENABLED=0`
|
||||
**Binary Test:** ✅ ORLY v0.29.0 starts and runs successfully
|
||||
**Database Backend:** Uses badger by default, dgraph client integration complete
|
||||
**Dgraph Integration:** ✅ Real dgraph client connection via dgo library
|
||||
**Test Suite:** ✅ Comprehensive test suite mirroring badger tests
|
||||
|
||||
### ✅ Completed Components
|
||||
|
||||
1. **Core Infrastructure**
|
||||
- Database interface abstraction (`pkg/database/interface.go`)
|
||||
- Database factory with `ORLY_DB_TYPE` configuration
|
||||
- Dgraph package structure (`pkg/dgraph/`)
|
||||
- Schema definition for Nostr events, authors, tags, and markers
|
||||
- Lifecycle management (initialization, shutdown)
|
||||
|
||||
2. **Serial Number Generation**
|
||||
- Atomic counter using Dgraph markers (`pkg/dgraph/serial.go`)
|
||||
- Automatic initialization on startup
|
||||
- Thread-safe increment with mutex protection
|
||||
- Serial numbers assigned during SaveEvent
|
||||
|
||||
3. **Event Operations**
|
||||
- `SaveEvent`: Store events with graph relationships
|
||||
- `QueryEvents`: DQL query generation from Nostr filters
|
||||
- `QueryEventsWithOptions`: Support for delete events and versions
|
||||
- `CountEvents`: Event counting
|
||||
- `FetchEventBySerial`: Retrieve by serial number
|
||||
- `DeleteEvent`: Event deletion by ID
|
||||
- `Delete EventBySerial`: Event deletion by serial
|
||||
- `ProcessDelete`: Kind 5 deletion processing
|
||||
|
||||
4. **Metadata Storage (Marker-based)**
|
||||
- `SetMarker`/`GetMarker`/`HasMarker`/`DeleteMarker`: Key-value storage
|
||||
- Relay identity storage (using markers)
|
||||
- All metadata stored as special Marker nodes in graph
|
||||
|
||||
5. **Subscriptions & Payments**
|
||||
- `GetSubscription`/`IsSubscriptionActive`/`ExtendSubscription`
|
||||
- `RecordPayment`/`GetPaymentHistory`
|
||||
- `ExtendBlossomSubscription`/`GetBlossomStorageQuota`
|
||||
- `IsFirstTimeUser`
|
||||
- All implemented using JSON-encoded markers
|
||||
|
||||
6. **NIP-43 Invite System**
|
||||
- `AddNIP43Member`/`RemoveNIP43Member`/`IsNIP43Member`
|
||||
- `GetNIP43Membership`/`GetAllNIP43Members`
|
||||
- `StoreInviteCode`/`ValidateInviteCode`/`DeleteInviteCode`
|
||||
- All implemented using JSON-encoded markers
|
||||
|
||||
7. **Import/Export**
|
||||
- `Import`/`ImportEventsFromReader`/`ImportEventsFromStrings`
|
||||
- JSONL format support
|
||||
- Basic `Export` stub
|
||||
|
||||
8. **Configuration**
|
||||
- `ORLY_DB_TYPE` environment variable added
|
||||
- Factory pattern for database instantiation
|
||||
- main.go updated to use database.Database interface
|
||||
|
||||
9. **Compilation Fixes (Completed)**
|
||||
- ✅ All interface signatures matched to badger implementation
|
||||
- ✅ Fixed 100+ type errors in pkg/dgraph package
|
||||
- ✅ Updated app layer to use database interface instead of concrete types
|
||||
- ✅ Added type assertions for compatibility with existing managers
|
||||
- ✅ Project compiles successfully with both badger and dgraph implementations
|
||||
|
||||
10. **Dgraph Server Integration (✅ STEP 1 COMPLETE)**
|
||||
- ✅ Added dgo client library (v230.0.1)
|
||||
- ✅ Implemented gRPC connection to external dgraph instance
|
||||
- ✅ Real Query() and Mutate() methods using dgraph client
|
||||
- ✅ Schema definition and automatic application on startup
|
||||
- ✅ ORLY_DGRAPH_URL configuration (default: localhost:9080)
|
||||
- ✅ Proper connection lifecycle management
|
||||
- ✅ Badger metadata store for local key-value storage
|
||||
- ✅ Dual-storage architecture: dgraph for events, badger for metadata
|
||||
|
||||
11. **Test Suite (✅ COMPLETE)**
|
||||
- ✅ Test infrastructure (testmain_test.go, helpers_test.go)
|
||||
- ✅ Comprehensive save-event tests
|
||||
- ✅ Comprehensive query-events tests
|
||||
- ✅ Docker-compose setup for dgraph server
|
||||
- ✅ Automated test scripts (test-dgraph.sh, dgraph-start.sh)
|
||||
- ✅ Test documentation (DGRAPH_TESTING.md)
|
||||
- ✅ All tests compile successfully
|
||||
- ⏳ Tests require running dgraph server to execute
|
||||
|
||||
### ⚠️ Remaining Work (For Production Use)
|
||||
|
||||
1. **Unimplemented Methods** (Stubs - Not Critical)
|
||||
- `GetSerialsFromFilter`: Returns "not implemented" error
|
||||
- `GetSerialsByRange`: Returns "not implemented" error
|
||||
- `EventIdsBySerial`: Returns "not implemented" error
|
||||
- These are helper methods that may not be critical for basic operation
|
||||
|
||||
2. **📝 STEP 2: DQL Implementation** (Next Priority)
|
||||
- Update save-event.go to use real Mutate() calls with RDF N-Quads
|
||||
- Update query-events.go to parse actual DQL responses
|
||||
- Implement proper event JSON unmarshaling from dgraph responses
|
||||
- Add error handling for dgraph-specific errors
|
||||
- Optimize DQL queries for performance
|
||||
|
||||
3. **Schema Optimizations**
|
||||
- Current tag queries are simplified
|
||||
- Complex tag filters may need refinement
|
||||
- Consider using Dgraph facets for better tag indexing
|
||||
|
||||
4. **📝 STEP 3: Testing** (After DQL Implementation)
|
||||
- Set up local dgraph instance for testing
|
||||
- Integration testing with relay-tester
|
||||
- Performance comparison with Badger
|
||||
- Memory usage profiling
|
||||
- Test with actual dgraph server instance
|
||||
|
||||
### 📦 Dependencies Added
|
||||
|
||||
```bash
|
||||
go get github.com/dgraph-io/dgo/v230@v230.0.1
|
||||
go get google.golang.org/grpc@latest
|
||||
go get github.com/dgraph-io/badger/v4 # For metadata storage
|
||||
```
|
||||
|
||||
All dependencies have been added and `go mod tidy` completed successfully.
|
||||
|
||||
### 🔌 Dgraph Server Integration Details
|
||||
|
||||
The implementation uses a **client-server architecture**:
|
||||
|
||||
1. **Dgraph Server** (External)
|
||||
- Runs as a separate process (via docker or standalone)
|
||||
- Default gRPC endpoint: `localhost:9080`
|
||||
- Configured via `ORLY_DGRAPH_URL` environment variable
|
||||
|
||||
2. **ORLY Dgraph Client** (Integrated)
|
||||
- Uses dgo library for gRPC communication
|
||||
- Connects on startup, applies Nostr schema automatically
|
||||
- Query and Mutate methods communicate with dgraph server
|
||||
|
||||
3. **Dual Storage Architecture**
|
||||
- **Dgraph**: Event graph storage (events, authors, tags, relationships)
|
||||
- **Badger**: Metadata storage (markers, counters, relay identity)
|
||||
- This hybrid approach leverages strengths of both databases
|
||||
|
||||
## Implementation Approach
|
||||
|
||||
### Marker-Based Storage
|
||||
|
||||
For metadata that doesn't fit the graph model (subscriptions, NIP-43, identity), we use a marker-based approach:
|
||||
|
||||
1. **Markers** are special graph nodes with type "Marker"
|
||||
2. Each marker has:
|
||||
- `marker.key`: String index for lookup
|
||||
- `marker.value`: Hex-encoded or JSON-encoded data
|
||||
3. This provides key-value storage within the graph database
|
||||
|
||||
### Serial Number Management
|
||||
|
||||
Serial numbers are critical for event ordering. Implementation:
|
||||
|
||||
```go
|
||||
// Serial counter stored as a special marker
|
||||
const serialCounterKey = "serial_counter"
|
||||
|
||||
// Atomic increment with mutex protection
|
||||
func (d *D) getNextSerial() (uint64, error) {
|
||||
serialMutex.Lock()
|
||||
defer serialMutex.Unlock()
|
||||
|
||||
// Query current value, increment, save
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Event Storage
|
||||
|
||||
Events are stored as graph nodes with relationships:
|
||||
|
||||
- **Event nodes**: ID, serial, kind, created_at, content, sig, pubkey, tags
|
||||
- **Author nodes**: Pubkey with reverse edges to events
|
||||
- **Tag nodes**: Tag type and value with reverse edges
|
||||
- **Relationships**: `authored_by`, `references`, `mentions`, `tagged_with`
|
||||
|
||||
## Files Created/Modified
|
||||
|
||||
### New Files (`pkg/dgraph/`)
|
||||
- `dgraph.go`: Main implementation, initialization, schema
|
||||
- `save-event.go`: Event storage with RDF triple generation
|
||||
- `query-events.go`: Nostr filter to DQL translation
|
||||
- `fetch-event.go`: Event retrieval methods
|
||||
- `delete.go`: Event deletion
|
||||
- `markers.go`: Key-value metadata storage
|
||||
- `identity.go`: Relay identity management
|
||||
- `serial.go`: Serial number generation
|
||||
- `subscriptions.go`: Subscription/payment methods
|
||||
- `nip43.go`: NIP-43 invite system
|
||||
- `import-export.go`: Import/export operations
|
||||
- `logger.go`: Logging adapter
|
||||
- `utils.go`: Helper functions
|
||||
- `README.md`: Documentation
|
||||
|
||||
### Modified Files
|
||||
- `pkg/database/interface.go`: Database interface definition
|
||||
- `pkg/database/factory.go`: Database factory
|
||||
- `pkg/database/database.go`: Badger compile-time check
|
||||
- `app/config/config.go`: Added `ORLY_DB_TYPE` config
|
||||
- `app/server.go`: Changed to use Database interface
|
||||
- `app/main.go`: Updated to use Database interface
|
||||
- `main.go`: Added dgraph import and factory usage
|
||||
|
||||
## Usage
|
||||
|
||||
### Setting Up Dgraph Server
|
||||
|
||||
Before using dgraph mode, start a dgraph server:
|
||||
|
||||
```bash
|
||||
# Using docker (recommended)
|
||||
docker run -d -p 8080:8080 -p 9080:9080 -p 8000:8000 \
|
||||
-v ~/dgraph:/dgraph \
|
||||
dgraph/standalone:latest
|
||||
|
||||
# Or using docker-compose (see docs/dgraph-docker-compose.yml)
|
||||
docker-compose up -d dgraph
|
||||
```
|
||||
|
||||
### Environment Configuration
|
||||
|
||||
```bash
|
||||
# Use Badger (default)
|
||||
./orly
|
||||
|
||||
# Use Dgraph with default localhost connection
|
||||
export ORLY_DB_TYPE=dgraph
|
||||
./orly
|
||||
|
||||
# Use Dgraph with custom server
|
||||
export ORLY_DB_TYPE=dgraph
|
||||
export ORLY_DGRAPH_URL=remote.dgraph.server:9080
|
||||
./orly
|
||||
|
||||
# With full configuration
|
||||
export ORLY_DB_TYPE=dgraph
|
||||
export ORLY_DGRAPH_URL=localhost:9080
|
||||
export ORLY_DATA_DIR=/path/to/data
|
||||
./orly
|
||||
```
|
||||
|
||||
### Data Storage
|
||||
|
||||
#### Badger
|
||||
- Single directory with SST files
|
||||
- Typical size: 100-500MB for moderate usage
|
||||
|
||||
#### Dgraph
|
||||
- Three subdirectories:
|
||||
- `p/`: Postings (main data)
|
||||
- `w/`: Write-ahead log
|
||||
- Typical size: 500MB-2GB overhead + event data
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Memory Usage
|
||||
- **Badger**: ~100-200MB baseline
|
||||
- **Dgraph**: ~500MB-1GB baseline
|
||||
|
||||
### Query Performance
|
||||
- **Simple queries** (by ID, kind, author): Dgraph may be slower than Badger
|
||||
- **Graph traversals** (follows-of-follows): Dgraph significantly faster
|
||||
- **Full-text search**: Dgraph has built-in support
|
||||
|
||||
### Recommendations
|
||||
1. Use Badger for simple, high-performance relays
|
||||
2. Use Dgraph for relays needing complex graph queries
|
||||
3. Consider hybrid approach: Badger primary + Dgraph secondary
|
||||
|
||||
## Next Steps to Complete
|
||||
|
||||
### ✅ STEP 1: Dgraph Server Integration (COMPLETED)
|
||||
- ✅ Added dgo client library
|
||||
- ✅ Implemented gRPC connection
|
||||
- ✅ Real Query/Mutate methods
|
||||
- ✅ Schema application
|
||||
- ✅ Configuration added
|
||||
|
||||
### 📝 STEP 2: DQL Implementation (Next Priority)
|
||||
|
||||
1. **Update SaveEvent Implementation** (2-3 hours)
|
||||
- Replace RDF string building with actual Mutate() calls
|
||||
- Use dgraph's SetNquads for event insertion
|
||||
- Handle UIDs and references properly
|
||||
- Add error handling and transaction rollback
|
||||
|
||||
2. **Update QueryEvents Implementation** (2-3 hours)
|
||||
- Parse actual JSON responses from dgraph Query()
|
||||
- Implement proper event deserialization
|
||||
- Handle pagination with DQL offset/limit
|
||||
- Add query optimization for common patterns
|
||||
|
||||
3. **Implement Helper Methods** (1-2 hours)
|
||||
- FetchEventBySerial using DQL
|
||||
- GetSerialsByIds using DQL
|
||||
- CountEvents using DQL aggregation
|
||||
- DeleteEvent using dgraph mutations
|
||||
|
||||
### 📝 STEP 3: Testing (After DQL)
|
||||
|
||||
1. **Setup Dgraph Test Instance** (30 minutes)
|
||||
```bash
|
||||
# Start dgraph server
|
||||
docker run -d -p 9080:9080 dgraph/standalone:latest
|
||||
|
||||
# Test connection
|
||||
ORLY_DB_TYPE=dgraph ORLY_DGRAPH_URL=localhost:9080 ./orly
|
||||
```
|
||||
|
||||
2. **Basic Functional Testing** (1 hour)
|
||||
```bash
|
||||
# Start with dgraph
|
||||
ORLY_DB_TYPE=dgraph ./orly
|
||||
|
||||
# Test with relay-tester
|
||||
go run cmd/relay-tester/main.go -url ws://localhost:3334
|
||||
```
|
||||
|
||||
3. **Performance Testing** (2 hours)
|
||||
```bash
|
||||
# Compare query performance
|
||||
# Memory profiling
|
||||
# Load testing
|
||||
```
|
||||
|
||||
## Known Limitations
|
||||
|
||||
1. **Subscription Storage**: Uses simple JSON encoding in markers rather than proper graph nodes
|
||||
2. **Tag Queries**: Simplified implementation may not handle all complex tag filter combinations
|
||||
3. **Export**: Basic stub - needs full implementation for production use
|
||||
4. **Migrations**: Not implemented (Dgraph schema changes require manual updates)
|
||||
|
||||
## Conclusion
|
||||
|
||||
The Dgraph implementation has completed **✅ STEP 1: DGRAPH SERVER INTEGRATION** successfully.
|
||||
|
||||
### What Works Now (Step 1 Complete)
|
||||
- ✅ Full database interface implementation
|
||||
- ✅ All method signatures match badger implementation
|
||||
- ✅ Project compiles successfully with `CGO_ENABLED=0`
|
||||
- ✅ Binary runs and starts successfully
|
||||
- ✅ Real dgraph client connection via dgo library
|
||||
- ✅ gRPC communication with external dgraph server
|
||||
- ✅ Schema application on startup
|
||||
- ✅ Query() and Mutate() methods implemented
|
||||
- ✅ ORLY_DGRAPH_URL configuration
|
||||
- ✅ Dual-storage architecture (dgraph + badger metadata)
|
||||
|
||||
### Implementation Status
|
||||
- **Step 1: Dgraph Server Integration** ✅ COMPLETE
|
||||
- **Step 2: DQL Implementation** 📝 Next (save-event.go and query-events.go need updates)
|
||||
- **Step 3: Testing** 📝 After Step 2 (relay-tester, performance benchmarks)
|
||||
|
||||
### Architecture Summary
|
||||
|
||||
The implementation uses a **client-server architecture** with dual storage:
|
||||
|
||||
1. **Dgraph Client** (ORLY)
|
||||
- Connects to external dgraph via gRPC (default: localhost:9080)
|
||||
- Applies Nostr schema automatically on startup
|
||||
- Query/Mutate methods ready for DQL operations
|
||||
|
||||
2. **Dgraph Server** (External)
|
||||
- Run separately via docker or standalone binary
|
||||
- Stores event graph data (events, authors, tags, relationships)
|
||||
- Handles all graph queries and mutations
|
||||
|
||||
3. **Badger Metadata Store** (Local)
|
||||
- Stores markers, counters, relay identity
|
||||
- Provides fast key-value access for non-graph data
|
||||
- Complements dgraph for hybrid storage benefits
|
||||
|
||||
The abstraction layer is complete and the dgraph client integration is functional. Next step is implementing actual DQL query/mutation logic in save-event.go and query-events.go.
|
||||
|
||||
@@ -1,197 +0,0 @@
|
||||
# Migration to git.mleku.dev/mleku/nostr Library
|
||||
|
||||
## Overview
|
||||
|
||||
Successfully migrated the ORLY relay codebase to use the external `git.mleku.dev/mleku/nostr` library instead of maintaining duplicate protocol code internally.
|
||||
|
||||
## Migration Statistics
|
||||
|
||||
- **Files Changed**: 449
|
||||
- **Lines Added**: 624
|
||||
- **Lines Removed**: 65,132
|
||||
- **Net Reduction**: **64,508 lines of code** (~30-40% of the codebase)
|
||||
|
||||
## Packages Migrated
|
||||
|
||||
### Removed from next.orly.dev/pkg/
|
||||
|
||||
The following packages were completely removed as they now come from the nostr library:
|
||||
|
||||
#### Encoders (`pkg/encoders/`)
|
||||
- `encoders/event/` → `git.mleku.dev/mleku/nostr/encoders/event`
|
||||
- `encoders/filter/` → `git.mleku.dev/mleku/nostr/encoders/filter`
|
||||
- `encoders/tag/` → `git.mleku.dev/mleku/nostr/encoders/tag`
|
||||
- `encoders/kind/` → `git.mleku.dev/mleku/nostr/encoders/kind`
|
||||
- `encoders/timestamp/` → `git.mleku.dev/mleku/nostr/encoders/timestamp`
|
||||
- `encoders/hex/` → `git.mleku.dev/mleku/nostr/encoders/hex`
|
||||
- `encoders/text/` → `git.mleku.dev/mleku/nostr/encoders/text`
|
||||
- `encoders/ints/` → `git.mleku.dev/mleku/nostr/encoders/ints`
|
||||
- `encoders/bech32encoding/` → `git.mleku.dev/mleku/nostr/encoders/bech32encoding`
|
||||
- `encoders/reason/` → `git.mleku.dev/mleku/nostr/encoders/reason`
|
||||
- `encoders/varint/` → `git.mleku.dev/mleku/nostr/encoders/varint`
|
||||
|
||||
#### Envelopes (`pkg/encoders/envelopes/`)
|
||||
- `envelopes/eventenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/eventenvelope`
|
||||
- `envelopes/reqenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/reqenvelope`
|
||||
- `envelopes/okenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/okenvelope`
|
||||
- `envelopes/noticeenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/noticeenvelope`
|
||||
- `envelopes/eoseenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/eoseenvelope`
|
||||
- `envelopes/closedenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/closedenvelope`
|
||||
- `envelopes/closeenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/closeenvelope`
|
||||
- `envelopes/countenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/countenvelope`
|
||||
- `envelopes/authenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/authenvelope`
|
||||
|
||||
#### Cryptography (`pkg/crypto/`)
|
||||
- `crypto/p8k/` → `git.mleku.dev/mleku/nostr/crypto/p8k`
|
||||
- `crypto/ec/schnorr/` → `git.mleku.dev/mleku/nostr/crypto/ec/schnorr`
|
||||
- `crypto/ec/secp256k1/` → `git.mleku.dev/mleku/nostr/crypto/ec/secp256k1`
|
||||
- `crypto/ec/bech32/` → `git.mleku.dev/mleku/nostr/crypto/ec/bech32`
|
||||
- `crypto/ec/musig2/` → `git.mleku.dev/mleku/nostr/crypto/ec/musig2`
|
||||
- `crypto/ec/base58/` → `git.mleku.dev/mleku/nostr/crypto/ec/base58`
|
||||
- `crypto/ec/ecdsa/` → `git.mleku.dev/mleku/nostr/crypto/ec/ecdsa`
|
||||
- `crypto/ec/taproot/` → `git.mleku.dev/mleku/nostr/crypto/ec/taproot`
|
||||
- `crypto/keys/` → `git.mleku.dev/mleku/nostr/crypto/keys`
|
||||
- `crypto/encryption/` → `git.mleku.dev/mleku/nostr/crypto/encryption`
|
||||
|
||||
#### Interfaces (`pkg/interfaces/`)
|
||||
- `interfaces/signer/` → `git.mleku.dev/mleku/nostr/interfaces/signer`
|
||||
- `interfaces/signer/p8k/` → `git.mleku.dev/mleku/nostr/interfaces/signer/p8k`
|
||||
- `interfaces/codec/` → `git.mleku.dev/mleku/nostr/interfaces/codec`
|
||||
|
||||
#### Protocol (`pkg/protocol/`)
|
||||
- `protocol/ws/` → `git.mleku.dev/mleku/nostr/ws` (note: moved to root level in library)
|
||||
- `protocol/auth/` → `git.mleku.dev/mleku/nostr/protocol/auth`
|
||||
- `protocol/relayinfo/` → `git.mleku.dev/mleku/nostr/relayinfo`
|
||||
- `protocol/httpauth/` → `git.mleku.dev/mleku/nostr/httpauth`
|
||||
|
||||
#### Utilities (`pkg/utils/`)
|
||||
- `utils/bufpool/` → `git.mleku.dev/mleku/nostr/utils/bufpool`
|
||||
- `utils/normalize/` → `git.mleku.dev/mleku/nostr/utils/normalize`
|
||||
- `utils/constraints/` → `git.mleku.dev/mleku/nostr/utils/constraints`
|
||||
- `utils/number/` → `git.mleku.dev/mleku/nostr/utils/number`
|
||||
- `utils/pointers/` → `git.mleku.dev/mleku/nostr/utils/pointers`
|
||||
- `utils/units/` → `git.mleku.dev/mleku/nostr/utils/units`
|
||||
- `utils/values/` → `git.mleku.dev/mleku/nostr/utils/values`
|
||||
|
||||
### Packages Kept in ORLY (Relay-Specific)
|
||||
|
||||
The following packages remain in the ORLY codebase as they are relay-specific:
|
||||
|
||||
- `pkg/database/` - Database abstraction layer (Badger, DGraph backends)
|
||||
- `pkg/acl/` - Access control systems (follows, managed, none)
|
||||
- `pkg/policy/` - Event filtering and validation policies
|
||||
- `pkg/spider/` - Event syncing from other relays
|
||||
- `pkg/sync/` - Distributed relay synchronization
|
||||
- `pkg/protocol/blossom/` - Blossom blob storage protocol implementation
|
||||
- `pkg/protocol/directory/` - Directory service
|
||||
- `pkg/protocol/nwc/` - Nostr Wallet Connect
|
||||
- `pkg/protocol/nip43/` - NIP-43 relay management
|
||||
- `pkg/protocol/publish/` - Event publisher for WebSocket subscriptions
|
||||
- `pkg/interfaces/publisher/` - Publisher interface
|
||||
- `pkg/interfaces/store/` - Storage interface
|
||||
- `pkg/interfaces/acl/` - ACL interface
|
||||
- `pkg/interfaces/typer/` - Type identification interface (not in nostr library)
|
||||
- `pkg/utils/atomic/` - Extended atomic operations
|
||||
- `pkg/utils/interrupt/` - Signal handling
|
||||
- `pkg/utils/apputil/` - Application utilities
|
||||
- `pkg/utils/qu/` - Queue utilities
|
||||
- `pkg/utils/fastequal.go` - Fast byte comparison
|
||||
- `pkg/utils/subscription.go` - Subscription utilities
|
||||
- `pkg/run/` - Run utilities
|
||||
- `pkg/version/` - Version information
|
||||
- `app/` - All relay server code
|
||||
|
||||
## Migration Process
|
||||
|
||||
### 1. Added Dependency
|
||||
```bash
|
||||
go get git.mleku.dev/mleku/nostr@latest
|
||||
```
|
||||
|
||||
### 2. Updated Imports
|
||||
Created automated migration script to update all import paths from:
|
||||
- `next.orly.dev/pkg/encoders/*` → `git.mleku.dev/mleku/nostr/encoders/*`
|
||||
- `next.orly.dev/pkg/crypto/*` → `git.mleku.dev/mleku/nostr/crypto/*`
|
||||
- etc.
|
||||
|
||||
Processed **240+ files** with encoder imports, **74 files** with crypto imports, and **9 files** with WebSocket client imports.
|
||||
|
||||
### 3. Special Cases
|
||||
- **pkg/interfaces/typer/**: Restored from git as it's not in the nostr library (relay-specific)
|
||||
- **pkg/protocol/ws/**: Mapped to root-level `ws/` in the nostr library
|
||||
- **Test helpers**: Updated to use `git.mleku.dev/mleku/nostr/encoders/event/examples`
|
||||
- **atag package**: Migrated to `git.mleku.dev/mleku/nostr/encoders/tag/atag`
|
||||
|
||||
### 4. Removed Redundant Code
|
||||
```bash
|
||||
rm -rf pkg/encoders pkg/crypto pkg/interfaces/signer pkg/interfaces/codec \
|
||||
pkg/protocol/ws pkg/protocol/auth pkg/protocol/relayinfo \
|
||||
pkg/protocol/httpauth pkg/utils/bufpool pkg/utils/normalize \
|
||||
pkg/utils/constraints pkg/utils/number pkg/utils/pointers \
|
||||
pkg/utils/units pkg/utils/values
|
||||
```
|
||||
|
||||
### 5. Fixed Dependencies
|
||||
- Ran `go mod tidy` to clean up go.mod
|
||||
- Rebuilt with `CGO_ENABLED=0 GOFLAGS=-mod=mod go build -o orly .`
|
||||
- Verified tests pass
|
||||
|
||||
## Benefits
|
||||
|
||||
### 1. Code Reduction
|
||||
- **64,508 fewer lines** of code to maintain
|
||||
- Simplified codebase focused on relay-specific functionality
|
||||
- Reduced maintenance burden
|
||||
|
||||
### 2. Code Reuse
|
||||
- Nostr protocol code can be shared across multiple projects
|
||||
- Clients and other tools can use the same library
|
||||
- Consistent implementation across the ecosystem
|
||||
|
||||
### 3. Separation of Concerns
|
||||
- Clear boundary between general Nostr protocol code (library) and relay-specific code (ORLY)
|
||||
- Easier to understand which code is protocol-level vs. application-level
|
||||
|
||||
### 4. Improved Development
|
||||
- Protocol improvements benefit all projects using the library
|
||||
- Bug fixes are centralized
|
||||
- Testing is consolidated
|
||||
|
||||
## Verification
|
||||
|
||||
### Build Status
|
||||
✅ **Build successful**: Binary builds without errors
|
||||
|
||||
### Test Status
|
||||
✅ **App tests passed**: All application-level tests pass
|
||||
⏳ **Database tests**: Run extensively (timing out due to comprehensive query tests, but functionally working)
|
||||
|
||||
### Binary Output
|
||||
```
|
||||
$ ./orly version
|
||||
ℹ️ starting ORLY v0.29.14
|
||||
✅ Successfully initialized with nostr library
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Commit Changes**: Review and commit the migration
|
||||
2. **Update Documentation**: Update CLAUDE.md to reflect the new architecture
|
||||
3. **CI/CD**: Ensure CI pipeline works with the new dependency
|
||||
4. **Testing**: Run full test suite to verify all functionality
|
||||
|
||||
## Notes
|
||||
|
||||
- The migration maintains full compatibility with existing ORLY functionality
|
||||
- No changes to relay behavior or API
|
||||
- All relay-specific features remain intact
|
||||
- The nostr library is actively maintained at `git.mleku.dev/mleku/nostr`
|
||||
- Library version: **v1.0.2**
|
||||
|
||||
## Migration Scripts
|
||||
|
||||
Created helper scripts (can be removed after commit):
|
||||
- `migrate-imports.sh` - Original comprehensive migration script
|
||||
- `migrate-fast.sh` - Fast sed-based migration script (used)
|
||||
|
||||
These scripts can be deleted after the migration is committed.
|
||||
@@ -1,5 +1,13 @@
|
||||
// Package config provides a go-simpler.org/env configuration table and helpers
|
||||
// for working with the list of key/value lists stored in .env files.
|
||||
//
|
||||
// IMPORTANT: This file is the SINGLE SOURCE OF TRUTH for all environment variables.
|
||||
// All configuration options MUST be defined here with proper `env` struct tags.
|
||||
// Never use os.Getenv() directly in other packages - pass configuration via structs.
|
||||
// This ensures all options appear in `./orly help` output and are documented.
|
||||
//
|
||||
// For database backends, use GetDatabaseConfigValues() to extract database-specific
|
||||
// settings, then construct a database.DatabaseConfig in the caller (e.g., main.go).
|
||||
package config
|
||||
|
||||
import (
|
||||
@@ -33,6 +41,7 @@ type C struct {
|
||||
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
||||
DBBlockCacheMB int `env:"ORLY_DB_BLOCK_CACHE_MB" default:"512" usage:"Badger block cache size in MB (higher improves read hit ratio)"`
|
||||
DBIndexCacheMB int `env:"ORLY_DB_INDEX_CACHE_MB" default:"256" usage:"Badger index cache size in MB (improves index lookup performance)"`
|
||||
DBZSTDLevel int `env:"ORLY_DB_ZSTD_LEVEL" default:"1" usage:"Badger ZSTD compression level (1=fast/500MB/s, 3=default, 9=best ratio, 0=disable)"`
|
||||
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation,heap,block,goroutine,threadcreate,mutex"`
|
||||
PprofPath string `env:"ORLY_PPROF_PATH" usage:"optional directory to write pprof profiles into (inside container); default is temporary dir"`
|
||||
@@ -82,11 +91,19 @@ type C struct {
|
||||
NIP43InviteExpiry time.Duration `env:"ORLY_NIP43_INVITE_EXPIRY" default:"24h" usage:"how long invite codes remain valid"`
|
||||
|
||||
// Database configuration
|
||||
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger or dgraph"`
|
||||
DgraphURL string `env:"ORLY_DGRAPH_URL" default:"localhost:9080" usage:"dgraph gRPC endpoint address (only used when ORLY_DB_TYPE=dgraph)"`
|
||||
QueryCacheSizeMB int `env:"ORLY_QUERY_CACHE_SIZE_MB" default:"512" usage:"query cache size in MB (caches database query results for faster REQ responses)"`
|
||||
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger or neo4j"`
|
||||
QueryCacheSizeMB int `env:"ORLY_QUERY_CACHE_SIZE_MB" default:"512" usage:"query cache size in MB (caches database query results for faster REQ responses)"`
|
||||
QueryCacheMaxAge string `env:"ORLY_QUERY_CACHE_MAX_AGE" default:"5m" usage:"maximum age for cached query results (e.g., 5m, 10m, 1h)"`
|
||||
|
||||
// Neo4j configuration (only used when ORLY_DB_TYPE=neo4j)
|
||||
Neo4jURI string `env:"ORLY_NEO4J_URI" default:"bolt://localhost:7687" usage:"Neo4j bolt URI (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
Neo4jUser string `env:"ORLY_NEO4J_USER" default:"neo4j" usage:"Neo4j authentication username (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
Neo4jPassword string `env:"ORLY_NEO4J_PASSWORD" default:"password" usage:"Neo4j authentication password (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
|
||||
// Advanced database tuning
|
||||
SerialCachePubkeys int `env:"ORLY_SERIAL_CACHE_PUBKEYS" default:"100000" usage:"max pubkeys to cache for compact event storage (default: 100000, ~3.2MB memory)"`
|
||||
SerialCacheEventIds int `env:"ORLY_SERIAL_CACHE_EVENT_IDS" default:"500000" usage:"max event IDs to cache for compact event storage (default: 500000, ~16MB memory)"`
|
||||
|
||||
// TLS configuration
|
||||
TLSDomains []string `env:"ORLY_TLS_DOMAINS" usage:"comma-separated list of domains to respond to for TLS"`
|
||||
Certs []string `env:"ORLY_CERTS" usage:"comma-separated list of paths to certificate root names (e.g., /path/to/cert will load /path/to/cert.pem and /path/to/cert.key)"`
|
||||
@@ -217,6 +234,21 @@ func ServeRequested() (requested bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// VersionRequested checks if the first command line argument is "version" and returns
|
||||
// whether the version should be printed and the program should exit.
|
||||
//
|
||||
// Return Values
|
||||
// - requested: true if the 'version' subcommand was provided, false otherwise.
|
||||
func VersionRequested() (requested bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "version", "-v", "--v", "-version", "--version":
|
||||
requested = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KV is a key/value pair.
|
||||
type KV struct{ Key, Value string }
|
||||
|
||||
@@ -348,7 +380,7 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
)
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
`Usage: %s [env|help|identity|serve]
|
||||
`Usage: %s [env|help|identity|serve|version]
|
||||
|
||||
- env: print environment variables configuring %s
|
||||
- help: print this help text
|
||||
@@ -356,6 +388,7 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
- serve: start ephemeral relay with RAM-based storage at /dev/shm/orlyserve
|
||||
listening on 0.0.0.0:10547 with 'none' ACL mode (open relay)
|
||||
useful for testing and benchmarking
|
||||
- version: print version and exit (also: -v, --v, -version, --version)
|
||||
|
||||
`,
|
||||
cfg.AppName, cfg.AppName,
|
||||
@@ -369,3 +402,30 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
PrintEnv(cfg, printer)
|
||||
fmt.Fprintln(printer)
|
||||
}
|
||||
|
||||
// GetDatabaseConfigValues returns the database configuration values as individual fields.
|
||||
// This avoids circular imports with pkg/database while allowing main.go to construct
|
||||
// a database.DatabaseConfig with the correct type.
|
||||
func (cfg *C) GetDatabaseConfigValues() (
|
||||
dataDir, logLevel string,
|
||||
blockCacheMB, indexCacheMB, queryCacheSizeMB int,
|
||||
queryCacheMaxAge time.Duration,
|
||||
serialCachePubkeys, serialCacheEventIds int,
|
||||
zstdLevel int,
|
||||
neo4jURI, neo4jUser, neo4jPassword string,
|
||||
) {
|
||||
// Parse query cache max age from string to duration
|
||||
queryCacheMaxAge = 5 * time.Minute // Default
|
||||
if cfg.QueryCacheMaxAge != "" {
|
||||
if duration, err := time.ParseDuration(cfg.QueryCacheMaxAge); err == nil {
|
||||
queryCacheMaxAge = duration
|
||||
}
|
||||
}
|
||||
|
||||
return cfg.DataDir, cfg.DBLogLevel,
|
||||
cfg.DBBlockCacheMB, cfg.DBIndexCacheMB, cfg.QueryCacheSizeMB,
|
||||
queryCacheMaxAge,
|
||||
cfg.SerialCachePubkeys, cfg.SerialCacheEventIds,
|
||||
cfg.DBZSTDLevel,
|
||||
cfg.Neo4jURI, cfg.Neo4jUser, cfg.Neo4jPassword
|
||||
}
|
||||
|
||||
@@ -25,7 +25,15 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||
log.I.F("HandleDelete: processing delete event %0x from pubkey %0x", env.E.ID, env.E.Pubkey)
|
||||
log.I.F("HandleDelete: delete event tags: %d tags", len(*env.E.Tags))
|
||||
for i, t := range *env.E.Tags {
|
||||
log.I.F("HandleDelete: tag %d: %s = %s", i, string(t.Key()), string(t.Value()))
|
||||
// Use ValueHex() for e/p tags to properly display binary-encoded values
|
||||
key := string(t.Key())
|
||||
var val string
|
||||
if key == "e" || key == "p" {
|
||||
val = string(t.ValueHex()) // Properly converts binary to hex
|
||||
} else {
|
||||
val = string(t.Value())
|
||||
}
|
||||
log.I.F("HandleDelete: tag %d: %s = %s", i, key, val)
|
||||
}
|
||||
|
||||
// Debug: log admin and owner lists
|
||||
@@ -142,27 +150,21 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||
// if e tags are found, delete them if the author is signer, or one of
|
||||
// the owners is signer
|
||||
if utils.FastEqual(t.Key(), []byte("e")) {
|
||||
// First try binary format (optimized storage for e-tags)
|
||||
var dst []byte
|
||||
if binVal := t.ValueBinary(); binVal != nil {
|
||||
dst = binVal
|
||||
log.I.F("HandleDelete: processing binary e-tag event ID: %0x", dst)
|
||||
} else {
|
||||
// Fall back to hex decoding for non-binary values
|
||||
val := t.Value()
|
||||
if len(val) == 0 {
|
||||
log.W.F("HandleDelete: empty e-tag value")
|
||||
continue
|
||||
}
|
||||
log.I.F("HandleDelete: processing e-tag with value: %s", string(val))
|
||||
if b, e := hex.Dec(string(val)); chk.E(e) {
|
||||
log.E.F("HandleDelete: failed to decode hex event ID %s: %v", string(val), e)
|
||||
continue
|
||||
} else {
|
||||
dst = b
|
||||
log.I.F("HandleDelete: decoded event ID: %0x", dst)
|
||||
}
|
||||
// Use ValueHex() which properly handles both binary-encoded and hex string formats
|
||||
hexVal := t.ValueHex()
|
||||
if len(hexVal) == 0 {
|
||||
log.W.F("HandleDelete: empty e-tag value")
|
||||
continue
|
||||
}
|
||||
log.I.F("HandleDelete: processing e-tag event ID: %s", string(hexVal))
|
||||
|
||||
// Decode hex to binary for filter
|
||||
dst, e := hex.Dec(string(hexVal))
|
||||
if chk.E(e) {
|
||||
log.E.F("HandleDelete: failed to decode event ID %s: %v", string(hexVal), e)
|
||||
continue
|
||||
}
|
||||
|
||||
f := &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(dst),
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"next.orly.dev/pkg/acl"
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/authenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/eventenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/noticeenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/okenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
@@ -19,8 +21,185 @@ import (
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// validateLowercaseHexInJSON checks that all hex-encoded fields in the raw JSON are lowercase.
|
||||
// NIP-01 specifies that hex encoding must be lowercase.
|
||||
// This must be called on the raw message BEFORE unmarshaling, since unmarshal converts
|
||||
// hex strings to binary and loses case information.
|
||||
// Returns an error message if validation fails, or empty string if valid.
|
||||
func validateLowercaseHexInJSON(msg []byte) string {
|
||||
// Find and validate "id" field (64 hex chars)
|
||||
if err := validateJSONHexField(msg, `"id"`); err != "" {
|
||||
return err + " (id)"
|
||||
}
|
||||
|
||||
// Find and validate "pubkey" field (64 hex chars)
|
||||
if err := validateJSONHexField(msg, `"pubkey"`); err != "" {
|
||||
return err + " (pubkey)"
|
||||
}
|
||||
|
||||
// Find and validate "sig" field (128 hex chars)
|
||||
if err := validateJSONHexField(msg, `"sig"`); err != "" {
|
||||
return err + " (sig)"
|
||||
}
|
||||
|
||||
// Validate e and p tags in the tags array
|
||||
// Tags format: ["e", "hexvalue", ...] or ["p", "hexvalue", ...]
|
||||
if err := validateEPTagsInJSON(msg); err != "" {
|
||||
return err
|
||||
}
|
||||
|
||||
return "" // Valid
|
||||
}
|
||||
|
||||
// validateJSONHexField finds a JSON field and checks if its hex value contains uppercase.
|
||||
func validateJSONHexField(msg []byte, fieldName string) string {
|
||||
// Find the field name
|
||||
idx := bytes.Index(msg, []byte(fieldName))
|
||||
if idx == -1 {
|
||||
return "" // Field not found, skip
|
||||
}
|
||||
|
||||
// Find the colon after the field name
|
||||
colonIdx := bytes.Index(msg[idx:], []byte(":"))
|
||||
if colonIdx == -1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Find the opening quote of the value
|
||||
valueStart := idx + colonIdx + 1
|
||||
for valueStart < len(msg) && (msg[valueStart] == ' ' || msg[valueStart] == '\t' || msg[valueStart] == '\n' || msg[valueStart] == '\r') {
|
||||
valueStart++
|
||||
}
|
||||
if valueStart >= len(msg) || msg[valueStart] != '"' {
|
||||
return ""
|
||||
}
|
||||
valueStart++ // Skip the opening quote
|
||||
|
||||
// Find the closing quote
|
||||
valueEnd := valueStart
|
||||
for valueEnd < len(msg) && msg[valueEnd] != '"' {
|
||||
valueEnd++
|
||||
}
|
||||
|
||||
// Extract the hex value and check for uppercase
|
||||
hexValue := msg[valueStart:valueEnd]
|
||||
if containsUppercaseHex(hexValue) {
|
||||
return "blocked: hex fields may only be lower case, see NIP-01"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// validateEPTagsInJSON checks e and p tags in the JSON for uppercase hex.
|
||||
func validateEPTagsInJSON(msg []byte) string {
|
||||
// Find the tags array
|
||||
tagsIdx := bytes.Index(msg, []byte(`"tags"`))
|
||||
if tagsIdx == -1 {
|
||||
return "" // No tags
|
||||
}
|
||||
|
||||
// Find the opening bracket of the tags array
|
||||
bracketIdx := bytes.Index(msg[tagsIdx:], []byte("["))
|
||||
if bracketIdx == -1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
tagsStart := tagsIdx + bracketIdx
|
||||
|
||||
// Scan through to find ["e", ...] and ["p", ...] patterns
|
||||
// This is a simplified parser that looks for specific patterns
|
||||
pos := tagsStart
|
||||
for pos < len(msg) {
|
||||
// Look for ["e" or ["p" pattern
|
||||
eTagPattern := bytes.Index(msg[pos:], []byte(`["e"`))
|
||||
pTagPattern := bytes.Index(msg[pos:], []byte(`["p"`))
|
||||
|
||||
var tagType string
|
||||
var nextIdx int
|
||||
|
||||
if eTagPattern == -1 && pTagPattern == -1 {
|
||||
break // No more e or p tags
|
||||
} else if eTagPattern == -1 {
|
||||
nextIdx = pos + pTagPattern
|
||||
tagType = "p"
|
||||
} else if pTagPattern == -1 {
|
||||
nextIdx = pos + eTagPattern
|
||||
tagType = "e"
|
||||
} else if eTagPattern < pTagPattern {
|
||||
nextIdx = pos + eTagPattern
|
||||
tagType = "e"
|
||||
} else {
|
||||
nextIdx = pos + pTagPattern
|
||||
tagType = "p"
|
||||
}
|
||||
|
||||
// Find the hex value after the tag type
|
||||
// Pattern: ["e", "hexvalue" or ["p", "hexvalue"
|
||||
commaIdx := bytes.Index(msg[nextIdx:], []byte(","))
|
||||
if commaIdx == -1 {
|
||||
pos = nextIdx + 4
|
||||
continue
|
||||
}
|
||||
|
||||
// Find the opening quote of the hex value
|
||||
valueStart := nextIdx + commaIdx + 1
|
||||
for valueStart < len(msg) && (msg[valueStart] == ' ' || msg[valueStart] == '\t' || msg[valueStart] == '"') {
|
||||
if msg[valueStart] == '"' {
|
||||
valueStart++
|
||||
break
|
||||
}
|
||||
valueStart++
|
||||
}
|
||||
|
||||
// Find the closing quote
|
||||
valueEnd := valueStart
|
||||
for valueEnd < len(msg) && msg[valueEnd] != '"' {
|
||||
valueEnd++
|
||||
}
|
||||
|
||||
// Check if this looks like a hex value (64 chars for pubkey/event ID)
|
||||
hexValue := msg[valueStart:valueEnd]
|
||||
if len(hexValue) == 64 && containsUppercaseHex(hexValue) {
|
||||
return fmt.Sprintf("blocked: hex fields may only be lower case, see NIP-01 (%s tag)", tagType)
|
||||
}
|
||||
|
||||
pos = valueEnd + 1
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// containsUppercaseHex checks if a byte slice (representing hex) contains uppercase letters A-F.
|
||||
func containsUppercaseHex(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if c >= 'A' && c <= 'F' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
log.D.F("HandleEvent: START handling event: %s", msg)
|
||||
|
||||
// Validate that all hex fields are lowercase BEFORE unmarshaling
|
||||
// (unmarshal converts hex to binary and loses case information)
|
||||
if errMsg := validateLowercaseHexInJSON(msg); errMsg != "" {
|
||||
log.W.F("HandleEvent: rejecting event with uppercase hex: %s", errMsg)
|
||||
// Send NOTICE to alert client developers about the issue
|
||||
if noticeErr := noticeenvelope.NewFrom(errMsg).Write(l); noticeErr != nil {
|
||||
log.E.F("failed to send NOTICE for uppercase hex: %v", noticeErr)
|
||||
}
|
||||
// Send OK false with the error message
|
||||
if err = okenvelope.NewFrom(
|
||||
nil, false,
|
||||
reason.Blocked.F(errMsg),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode the envelope
|
||||
env := eventenvelope.NewSubmission()
|
||||
log.I.F("HandleEvent: received event message length: %d", len(msg))
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/reason"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"next.orly.dev/pkg/policy"
|
||||
"next.orly.dev/pkg/protocol/graph"
|
||||
"next.orly.dev/pkg/protocol/nip43"
|
||||
"git.mleku.dev/mleku/nostr/utils/normalize"
|
||||
"git.mleku.dev/mleku/nostr/utils/pointers"
|
||||
@@ -142,6 +143,71 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// Check for NIP-XX graph queries in filters
|
||||
// Graph queries use the _graph filter extension to traverse the social graph
|
||||
for _, f := range *env.Filters {
|
||||
if f != nil && graph.IsGraphQuery(f) {
|
||||
graphQuery, graphErr := graph.ExtractFromFilter(f)
|
||||
if graphErr != nil {
|
||||
log.W.F("invalid _graph query from %s: %v", l.remote, graphErr)
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription,
|
||||
reason.Error.F("invalid _graph query: %s", graphErr.Error()),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
if graphQuery != nil {
|
||||
log.I.F("graph query from %s: method=%s seed=%s depth=%d",
|
||||
l.remote, graphQuery.Method, graphQuery.Seed, graphQuery.Depth)
|
||||
|
||||
// Check if graph executor is available
|
||||
if l.graphExecutor == nil {
|
||||
log.W.F("graph query received but executor not initialized")
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription,
|
||||
reason.Error.F("graph queries not supported on this relay"),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Execute the graph query
|
||||
resultEvent, execErr := l.graphExecutor.Execute(graphQuery)
|
||||
if execErr != nil {
|
||||
log.W.F("graph query execution failed from %s: %v", l.remote, execErr)
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription,
|
||||
reason.Error.F("graph query failed: %s", execErr.Error()),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Send the result event
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(env.Subscription, resultEvent); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = res.Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Send EOSE to signal completion
|
||||
if err = eoseenvelope.NewFrom(env.Subscription).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
log.I.F("graph query completed for %s: method=%s, returned event kind %d",
|
||||
l.remote, graphQuery.Method, resultEvent.Kind)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Filter out policy config events (kind 12345) for non-policy-admin users
|
||||
// Policy config events should only be visible to policy administrators
|
||||
if l.policyManager != nil && l.policyManager.IsEnabled() {
|
||||
|
||||
18
app/main.go
18
app/main.go
@@ -18,6 +18,7 @@ import (
|
||||
"next.orly.dev/pkg/database"
|
||||
"git.mleku.dev/mleku/nostr/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/policy"
|
||||
"next.orly.dev/pkg/protocol/graph"
|
||||
"next.orly.dev/pkg/protocol/nip43"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
"next.orly.dev/pkg/spider"
|
||||
@@ -120,6 +121,23 @@ func Run(
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize graph query executor (only for Badger backend)
|
||||
if badgerDB, ok := db.(*database.D); ok {
|
||||
// Get relay identity key for signing graph query responses
|
||||
relaySecretKey, err := badgerDB.GetOrCreateRelayIdentitySecret()
|
||||
if err != nil {
|
||||
log.E.F("failed to get relay identity key for graph executor: %v", err)
|
||||
} else {
|
||||
// Create the graph adapter and executor
|
||||
graphAdapter := database.NewGraphAdapter(badgerDB)
|
||||
if l.graphExecutor, err = graph.NewExecutor(graphAdapter, relaySecretKey); err != nil {
|
||||
log.E.F("failed to create graph executor: %v", err)
|
||||
} else {
|
||||
log.I.F("graph query executor initialized")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize spider manager based on mode (only for Badger backend)
|
||||
if badgerDB, ok := db.(*database.D); ok && cfg.SpiderMode != "none" {
|
||||
if l.spiderManager, err = spider.New(ctx, badgerDB, l.publishers, cfg.SpiderMode); chk.E(err) {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"next.orly.dev/pkg/policy"
|
||||
"git.mleku.dev/mleku/nostr/protocol/auth"
|
||||
"git.mleku.dev/mleku/nostr/httpauth"
|
||||
"next.orly.dev/pkg/protocol/graph"
|
||||
"next.orly.dev/pkg/protocol/nip43"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
"next.orly.dev/pkg/spider"
|
||||
@@ -62,6 +63,7 @@ type Server struct {
|
||||
clusterManager *dsync.ClusterManager
|
||||
blossomServer *blossom.Server
|
||||
InviteManager *nip43.InviteManager
|
||||
graphExecutor *graph.Executor
|
||||
cfg *config.C
|
||||
db database.Database // Changed from *database.D to interface
|
||||
}
|
||||
|
||||
24
cmd/benchmark/.claude/settings.local.json
Normal file
24
cmd/benchmark/.claude/settings.local.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(ls:*)",
|
||||
"Bash(go build:*)",
|
||||
"Bash(export LD_LIBRARY_PATH:*)",
|
||||
"Bash(/tmp/benchmark_test:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(go doc:*)",
|
||||
"Bash(CGO_ENABLED=0 go build:*)",
|
||||
"Bash(docker compose:*)",
|
||||
"Bash(./run-benchmark.sh:*)",
|
||||
"Bash(tee:*)",
|
||||
"Bash(sudo rm:*)",
|
||||
"Bash(go mod tidy:*)",
|
||||
"Bash(docker run --rm -v \"/home/mleku/src/next.orly.dev/cmd/benchmark/data:/data\" --user root alpine sh -c \"rm -rf /data/* /data/.[!.]*\")",
|
||||
"Bash(head:*)",
|
||||
"Bash(cat:*)",
|
||||
"Bash(chmod:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
# Dockerfile for benchmark runner
|
||||
# Uses pure Go build with purego for dynamic libsecp256k1 loading
|
||||
# Fetches latest tag from git repository for stable builds
|
||||
|
||||
# Use Debian-based Go image to match runtime stage (avoids musl/glibc linker mismatch)
|
||||
FROM golang:1.25-bookworm AS builder
|
||||
@@ -10,12 +11,18 @@ RUN apt-get update && apt-get install -y --no-install-recommends git ca-certific
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
# Clone the repository and checkout the latest tag
|
||||
# Using git.nostrdev.com (primary repo, most up-to-date)
|
||||
RUN git clone https://git.nostrdev.com/mleku/next.orly.dev.git . && \
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "main") && \
|
||||
echo "Building benchmark from ORLY version: ${LATEST_TAG}" && \
|
||||
git checkout "${LATEST_TAG}"
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
# Remove local replace directives and update to released version, then download dependencies
|
||||
RUN sed -i '/^replace .* => \/home/d' go.mod && \
|
||||
sed -i 's/git.mleku.dev\/mleku\/nostr v1.0.7/git.mleku.dev\/mleku\/nostr v1.0.8/' go.mod && \
|
||||
go mod tidy && \
|
||||
go mod download
|
||||
|
||||
# Build the benchmark tool with CGO disabled (uses purego for crypto)
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o benchmark ./cmd/benchmark
|
||||
@@ -36,8 +43,8 @@ WORKDIR /app
|
||||
# Copy benchmark binary (libsecp256k1.so.1 is already installed via apt)
|
||||
COPY --from=builder /build/benchmark /app/benchmark
|
||||
|
||||
# Copy benchmark runner script
|
||||
COPY cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||
# Copy benchmark runner script from the local code
|
||||
COPY --from=builder /build/cmd/benchmark/benchmark-runner.sh /app/benchmark-runner
|
||||
|
||||
# Make scripts executable
|
||||
RUN chmod +x /app/benchmark-runner
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
# Dockerfile for next.orly.dev relay (benchmark version)
|
||||
# Uses pure Go build with purego for dynamic libsecp256k1 loading
|
||||
# Fetches latest tag from git repository instead of local code
|
||||
|
||||
# Stage 1: Build stage
|
||||
# Use Debian-based Go image to match runtime stage (avoids musl/glibc linker mismatch)
|
||||
@@ -11,12 +12,18 @@ RUN apt-get update && apt-get install -y --no-install-recommends git make && rm
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go mod files first for better layer caching
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
# Clone the repository and checkout the latest tag
|
||||
# Using git.nostrdev.com (primary repo, most up-to-date)
|
||||
RUN git clone https://git.nostrdev.com/mleku/next.orly.dev.git . && \
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "main") && \
|
||||
echo "Building ORLY version: ${LATEST_TAG}" && \
|
||||
git checkout "${LATEST_TAG}"
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
# Remove local replace directives and update to released version, then download dependencies
|
||||
RUN sed -i '/^replace .* => \/home/d' go.mod && \
|
||||
sed -i 's/git.mleku.dev\/mleku\/nostr v1.0.7/git.mleku.dev\/mleku\/nostr v1.0.8/' go.mod && \
|
||||
go mod tidy && \
|
||||
go mod download
|
||||
|
||||
# Build the relay with CGO disabled (uses purego for crypto)
|
||||
# Include debug symbols for profiling
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including:
|
||||
|
||||
- **next.orly.dev** (this repository) - Badger, DGraph, and Neo4j backend variants
|
||||
- **next.orly.dev** (this repository) - Badger and Neo4j backend variants
|
||||
- **Khatru** - SQLite and Badger variants
|
||||
- **Relayer** - Basic example implementation
|
||||
- **Strfry** - C++ LMDB-based relay
|
||||
@@ -94,10 +94,7 @@ ls reports/run_YYYYMMDD_HHMMSS/
|
||||
| Service | Port | Description |
|
||||
| ------------------ | ---- | ----------------------------------------- |
|
||||
| next-orly-badger | 8001 | This repository's Badger relay |
|
||||
| next-orly-dgraph | 8007 | This repository's DGraph relay |
|
||||
| next-orly-neo4j | 8008 | This repository's Neo4j relay |
|
||||
| dgraph-zero | 5080 | DGraph cluster coordinator |
|
||||
| dgraph-alpha | 9080 | DGraph data node |
|
||||
| neo4j | 7474/7687 | Neo4j graph database |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
@@ -180,7 +177,7 @@ go build -o benchmark main.go
|
||||
|
||||
## Database Backend Comparison
|
||||
|
||||
The benchmark suite includes **next.orly.dev** with three different database backends to compare architectural approaches:
|
||||
The benchmark suite includes **next.orly.dev** with two different database backends to compare architectural approaches:
|
||||
|
||||
### Badger Backend (next-orly-badger)
|
||||
- **Type**: Embedded key-value store
|
||||
@@ -192,16 +189,6 @@ The benchmark suite includes **next.orly.dev** with three different database bac
|
||||
- Simpler deployment
|
||||
- Limited to single-node scaling
|
||||
|
||||
### DGraph Backend (next-orly-dgraph)
|
||||
- **Type**: Distributed graph database
|
||||
- **Architecture**: Client-server with dgraph-zero (coordinator) and dgraph-alpha (data node)
|
||||
- **Best for**: Distributed deployments, horizontal scaling
|
||||
- **Characteristics**:
|
||||
- Network overhead from gRPC communication
|
||||
- Supports multi-node clustering
|
||||
- Built-in replication and sharding
|
||||
- More complex deployment
|
||||
|
||||
### Neo4j Backend (next-orly-neo4j)
|
||||
- **Type**: Native graph database
|
||||
- **Architecture**: Client-server with Neo4j Community Edition
|
||||
@@ -218,10 +205,10 @@ The benchmark suite includes **next.orly.dev** with three different database bac
|
||||
### Comparing the Backends
|
||||
|
||||
The benchmark results will show:
|
||||
- **Latency differences**: Embedded vs. distributed overhead, graph traversal efficiency
|
||||
- **Throughput trade-offs**: Single-process optimization vs. distributed scalability vs. graph query optimization
|
||||
- **Latency differences**: Embedded vs. client-server overhead, graph traversal efficiency
|
||||
- **Throughput trade-offs**: Single-process optimization vs. graph query optimization
|
||||
- **Resource usage**: Memory and CPU patterns for different architectures
|
||||
- **Query performance**: Graph queries (Neo4j) vs. key-value lookups (Badger) vs. distributed queries (DGraph)
|
||||
- **Query performance**: Graph queries (Neo4j) vs. key-value lookups (Badger)
|
||||
|
||||
This comparison helps determine which backend is appropriate for different deployment scenarios and workload patterns.
|
||||
|
||||
|
||||
@@ -27,6 +27,7 @@ echo "Timestamp: $(date)"
|
||||
echo "Events per test: ${BENCHMARK_EVENTS}"
|
||||
echo "Concurrent workers: ${BENCHMARK_WORKERS}"
|
||||
echo "Test duration: ${BENCHMARK_DURATION}"
|
||||
echo "Graph traversal: ${BENCHMARK_GRAPH_TRAVERSAL:-false}"
|
||||
echo "Output directory: ${RUN_DIR}"
|
||||
echo "=================================================="
|
||||
|
||||
@@ -70,12 +71,12 @@ run_benchmark() {
|
||||
local relay_name="$1"
|
||||
local relay_url="$2"
|
||||
local output_file="$3"
|
||||
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Testing ${relay_name} at ws://${relay_url}"
|
||||
echo "=================================================="
|
||||
|
||||
|
||||
# Wait for relay to be ready
|
||||
if ! wait_for_relay "${relay_name}" "${relay_url}"; then
|
||||
echo "ERROR: ${relay_name} is not responding, skipping..."
|
||||
@@ -84,14 +85,14 @@ run_benchmark() {
|
||||
echo "ERROR: Connection failed" >> "${output_file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
# Run the benchmark
|
||||
echo "Running benchmark against ${relay_name}..."
|
||||
|
||||
|
||||
# Create temporary directory for this relay's data
|
||||
TEMP_DATA_DIR="/tmp/benchmark_${relay_name}_$$"
|
||||
mkdir -p "${TEMP_DATA_DIR}"
|
||||
|
||||
|
||||
# Run benchmark and capture both stdout and stderr
|
||||
if /app/benchmark \
|
||||
-datadir="${TEMP_DATA_DIR}" \
|
||||
@@ -99,9 +100,9 @@ run_benchmark() {
|
||||
-workers="${BENCHMARK_WORKERS}" \
|
||||
-duration="${BENCHMARK_DURATION}" \
|
||||
> "${output_file}" 2>&1; then
|
||||
|
||||
|
||||
echo "✓ Benchmark completed successfully for ${relay_name}"
|
||||
|
||||
|
||||
# Add relay identification to the report
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
@@ -111,7 +112,7 @@ run_benchmark() {
|
||||
echo " Events: ${BENCHMARK_EVENTS}" >> "${output_file}"
|
||||
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||
echo " Duration: ${BENCHMARK_DURATION}" >> "${output_file}"
|
||||
|
||||
|
||||
else
|
||||
echo "✗ Benchmark failed for ${relay_name}"
|
||||
echo "" >> "${output_file}"
|
||||
@@ -120,7 +121,67 @@ run_benchmark() {
|
||||
echo "STATUS: FAILED" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
fi
|
||||
|
||||
|
||||
# Clean up temporary data
|
||||
rm -rf "${TEMP_DATA_DIR}"
|
||||
}
|
||||
|
||||
# Function to run network graph traversal benchmark against a specific relay
|
||||
run_graph_traversal_benchmark() {
|
||||
local relay_name="$1"
|
||||
local relay_url="$2"
|
||||
local output_file="$3"
|
||||
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Graph Traversal Benchmark: ${relay_name} at ws://${relay_url}"
|
||||
echo "=================================================="
|
||||
|
||||
# Wait for relay to be ready
|
||||
if ! wait_for_relay "${relay_name}" "${relay_url}"; then
|
||||
echo "ERROR: ${relay_name} is not responding, skipping graph traversal..."
|
||||
echo "RELAY: ${relay_name}" > "${output_file}"
|
||||
echo "STATUS: FAILED - Relay not responding" >> "${output_file}"
|
||||
echo "ERROR: Connection failed" >> "${output_file}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Run the network graph traversal benchmark
|
||||
echo "Running network graph traversal benchmark against ${relay_name}..."
|
||||
|
||||
# Create temporary directory for this relay's data
|
||||
TEMP_DATA_DIR="/tmp/graph_benchmark_${relay_name}_$$"
|
||||
mkdir -p "${TEMP_DATA_DIR}"
|
||||
|
||||
# Run graph traversal benchmark via WebSocket
|
||||
if /app/benchmark \
|
||||
-graph-network \
|
||||
-relay-url="ws://${relay_url}" \
|
||||
-datadir="${TEMP_DATA_DIR}" \
|
||||
-workers="${BENCHMARK_WORKERS}" \
|
||||
> "${output_file}" 2>&1; then
|
||||
|
||||
echo "✓ Graph traversal benchmark completed successfully for ${relay_name}"
|
||||
|
||||
# Add relay identification to the report
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "TEST_TYPE: Graph Traversal (100k pubkeys, 3-degree follows)" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
echo "BENCHMARK_CONFIG:" >> "${output_file}"
|
||||
echo " Workers: ${BENCHMARK_WORKERS}" >> "${output_file}"
|
||||
|
||||
else
|
||||
echo "✗ Graph traversal benchmark failed for ${relay_name}"
|
||||
echo "" >> "${output_file}"
|
||||
echo "RELAY_NAME: ${relay_name}" >> "${output_file}"
|
||||
echo "RELAY_URL: ws://${relay_url}" >> "${output_file}"
|
||||
echo "TEST_TYPE: Graph Traversal" >> "${output_file}"
|
||||
echo "STATUS: FAILED" >> "${output_file}"
|
||||
echo "TEST_TIMESTAMP: $(date -Iseconds)" >> "${output_file}"
|
||||
fi
|
||||
|
||||
# Clean up temporary data
|
||||
rm -rf "${TEMP_DATA_DIR}"
|
||||
}
|
||||
@@ -234,22 +295,50 @@ EOF
|
||||
# Main execution
|
||||
echo "Starting relay benchmark suite..."
|
||||
|
||||
# Check if graph traversal mode is enabled
|
||||
BENCHMARK_GRAPH_TRAVERSAL="${BENCHMARK_GRAPH_TRAVERSAL:-false}"
|
||||
|
||||
# Parse targets and run benchmarks
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
echo "WARNING: Skipping invalid target: ${relay_name}:${relay_port}"
|
||||
continue
|
||||
fi
|
||||
|
||||
|
||||
relay_url="${relay_name}:${relay_port}"
|
||||
output_file="${RUN_DIR}/${relay_name}_results.txt"
|
||||
|
||||
|
||||
run_benchmark "${relay_name}" "${relay_url}" "${output_file}"
|
||||
|
||||
|
||||
# Small delay between tests
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# Run graph traversal benchmarks if enabled
|
||||
if [ "${BENCHMARK_GRAPH_TRAVERSAL}" = "true" ]; then
|
||||
echo ""
|
||||
echo "=================================================="
|
||||
echo "Starting Graph Traversal Benchmark Suite"
|
||||
echo "=================================================="
|
||||
echo "This tests 100k pubkeys with 1-1000 follows each"
|
||||
echo "and performs 3-degree traversal queries"
|
||||
echo "=================================================="
|
||||
|
||||
echo "${BENCHMARK_TARGETS}" | tr ',' '\n' | while IFS=':' read -r relay_name relay_port; do
|
||||
if [ -z "${relay_name}" ] || [ -z "${relay_port}" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
relay_url="${relay_name}:${relay_port}"
|
||||
output_file="${RUN_DIR}/${relay_name}_graph_traversal_results.txt"
|
||||
|
||||
run_graph_traversal_benchmark "${relay_name}" "${relay_url}" "${output_file}"
|
||||
|
||||
# Longer delay between graph traversal tests (they're more intensive)
|
||||
sleep 10
|
||||
done
|
||||
fi
|
||||
|
||||
# Generate aggregate report
|
||||
generate_aggregate_report
|
||||
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
_ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory
|
||||
)
|
||||
|
||||
// DgraphBenchmark wraps a Benchmark with dgraph-specific setup
|
||||
type DgraphBenchmark struct {
|
||||
config *BenchmarkConfig
|
||||
docker *DgraphDocker
|
||||
database database.Database
|
||||
bench *BenchmarkAdapter
|
||||
}
|
||||
|
||||
// NewDgraphBenchmark creates a new dgraph benchmark instance
|
||||
func NewDgraphBenchmark(config *BenchmarkConfig) (*DgraphBenchmark, error) {
|
||||
// Create Docker manager
|
||||
docker := NewDgraphDocker()
|
||||
|
||||
// Start dgraph containers
|
||||
ctx := context.Background()
|
||||
if err := docker.Start(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to start dgraph: %w", err)
|
||||
}
|
||||
|
||||
// Set environment variable for dgraph connection
|
||||
os.Setenv("ORLY_DGRAPH_URL", docker.GetGRPCEndpoint())
|
||||
|
||||
// Create database instance using dgraph backend
|
||||
cancel := func() {}
|
||||
db, err := database.NewDatabase(ctx, cancel, "dgraph", config.DataDir, "warn")
|
||||
if err != nil {
|
||||
docker.Stop()
|
||||
return nil, fmt.Errorf("failed to create dgraph database: %w", err)
|
||||
}
|
||||
|
||||
// Wait for database to be ready
|
||||
fmt.Println("Waiting for dgraph database to be ready...")
|
||||
select {
|
||||
case <-db.Ready():
|
||||
fmt.Println("Dgraph database is ready")
|
||||
case <-time.After(30 * time.Second):
|
||||
db.Close()
|
||||
docker.Stop()
|
||||
return nil, fmt.Errorf("dgraph database failed to become ready")
|
||||
}
|
||||
|
||||
// Create adapter to use Database interface with Benchmark
|
||||
adapter := NewBenchmarkAdapter(config, db)
|
||||
|
||||
dgraphBench := &DgraphBenchmark{
|
||||
config: config,
|
||||
docker: docker,
|
||||
database: db,
|
||||
bench: adapter,
|
||||
}
|
||||
|
||||
return dgraphBench, nil
|
||||
}
|
||||
|
||||
// Close closes the dgraph benchmark and stops Docker containers
|
||||
func (dgb *DgraphBenchmark) Close() {
|
||||
fmt.Println("Closing dgraph benchmark...")
|
||||
|
||||
if dgb.database != nil {
|
||||
dgb.database.Close()
|
||||
}
|
||||
|
||||
if dgb.docker != nil {
|
||||
if err := dgb.docker.Stop(); err != nil {
|
||||
log.Printf("Error stopping dgraph Docker: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunSuite runs the benchmark suite on dgraph
|
||||
func (dgb *DgraphBenchmark) RunSuite() {
|
||||
fmt.Println("\n╔════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ DGRAPH BACKEND BENCHMARK SUITE ║")
|
||||
fmt.Println("╚════════════════════════════════════════════════════════╝")
|
||||
|
||||
// Run only one round for dgraph to keep benchmark time reasonable
|
||||
fmt.Printf("\n=== Starting dgraph benchmark ===\n")
|
||||
|
||||
fmt.Printf("RunPeakThroughputTest (dgraph)..\n")
|
||||
dgb.bench.RunPeakThroughputTest()
|
||||
fmt.Println("Wiping database between tests...")
|
||||
dgb.database.Wipe()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunBurstPatternTest (dgraph)..\n")
|
||||
dgb.bench.RunBurstPatternTest()
|
||||
fmt.Println("Wiping database between tests...")
|
||||
dgb.database.Wipe()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunMixedReadWriteTest (dgraph)..\n")
|
||||
dgb.bench.RunMixedReadWriteTest()
|
||||
fmt.Println("Wiping database between tests...")
|
||||
dgb.database.Wipe()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunQueryTest (dgraph)..\n")
|
||||
dgb.bench.RunQueryTest()
|
||||
fmt.Println("Wiping database between tests...")
|
||||
dgb.database.Wipe()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunConcurrentQueryStoreTest (dgraph)..\n")
|
||||
dgb.bench.RunConcurrentQueryStoreTest()
|
||||
|
||||
fmt.Printf("\n=== Dgraph benchmark completed ===\n\n")
|
||||
}
|
||||
|
||||
// GenerateReport generates the benchmark report
|
||||
func (dgb *DgraphBenchmark) GenerateReport() {
|
||||
dgb.bench.GenerateReport()
|
||||
}
|
||||
|
||||
// GenerateAsciidocReport generates asciidoc format report
|
||||
func (dgb *DgraphBenchmark) GenerateAsciidocReport() {
|
||||
dgb.bench.GenerateAsciidocReport()
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DgraphDocker manages a dgraph instance via Docker Compose
|
||||
type DgraphDocker struct {
|
||||
composeFile string
|
||||
projectName string
|
||||
running bool
|
||||
}
|
||||
|
||||
// NewDgraphDocker creates a new dgraph Docker manager
|
||||
func NewDgraphDocker() *DgraphDocker {
|
||||
// Try to find the docker-compose file in the current directory first
|
||||
composeFile := "docker-compose-dgraph.yml"
|
||||
|
||||
// If not found, try the cmd/benchmark directory (for running from project root)
|
||||
if _, err := os.Stat(composeFile); os.IsNotExist(err) {
|
||||
composeFile = filepath.Join("cmd", "benchmark", "docker-compose-dgraph.yml")
|
||||
}
|
||||
|
||||
return &DgraphDocker{
|
||||
composeFile: composeFile,
|
||||
projectName: "orly-benchmark-dgraph",
|
||||
running: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the dgraph Docker containers
|
||||
func (d *DgraphDocker) Start(ctx context.Context) error {
|
||||
fmt.Println("Starting dgraph Docker containers...")
|
||||
|
||||
// Stop any existing containers first
|
||||
d.Stop()
|
||||
|
||||
// Start containers
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"up", "-d",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to start dgraph containers: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for dgraph to be healthy...")
|
||||
|
||||
// Wait for health checks to pass
|
||||
if err := d.waitForHealthy(ctx, 60*time.Second); err != nil {
|
||||
d.Stop() // Clean up on failure
|
||||
return err
|
||||
}
|
||||
|
||||
d.running = true
|
||||
fmt.Println("Dgraph is ready!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForHealthy waits for dgraph to become healthy
|
||||
func (d *DgraphDocker) waitForHealthy(ctx context.Context, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
// Check if alpha is healthy by checking docker health status
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
"docker",
|
||||
"inspect",
|
||||
"--format={{.State.Health.Status}}",
|
||||
"orly-benchmark-dgraph-alpha",
|
||||
)
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err == nil && string(output) == "healthy\n" {
|
||||
// Additional short wait to ensure full readiness
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(2 * time.Second):
|
||||
// Continue waiting
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("dgraph failed to become healthy within %v", timeout)
|
||||
}
|
||||
|
||||
// Stop stops and removes the dgraph Docker containers
|
||||
func (d *DgraphDocker) Stop() error {
|
||||
if !d.running {
|
||||
// Try to stop anyway in case of untracked state
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"down", "-v",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
_ = cmd.Run() // Ignore errors
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Println("Stopping dgraph Docker containers...")
|
||||
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"down", "-v",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to stop dgraph containers: %w", err)
|
||||
}
|
||||
|
||||
d.running = false
|
||||
fmt.Println("Dgraph containers stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGRPCEndpoint returns the dgraph gRPC endpoint
|
||||
func (d *DgraphDocker) GetGRPCEndpoint() string {
|
||||
return "localhost:9080"
|
||||
}
|
||||
|
||||
// IsRunning returns whether dgraph is running
|
||||
func (d *DgraphDocker) IsRunning() bool {
|
||||
return d.running
|
||||
}
|
||||
|
||||
// Logs returns the logs from dgraph containers
|
||||
func (d *DgraphDocker) Logs() error {
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"logs",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
dgraph-zero:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: orly-benchmark-dgraph-zero
|
||||
working_dir: /data/zero
|
||||
ports:
|
||||
- "5080:5080"
|
||||
- "6080:6080"
|
||||
command: dgraph zero --my=dgraph-zero:5080
|
||||
networks:
|
||||
- orly-benchmark
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
|
||||
dgraph-alpha:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: orly-benchmark-dgraph-alpha
|
||||
working_dir: /data/alpha
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "9080:9080"
|
||||
command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0
|
||||
networks:
|
||||
- orly-benchmark
|
||||
depends_on:
|
||||
dgraph-zero:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 6
|
||||
start_period: 10s
|
||||
|
||||
networks:
|
||||
orly-benchmark:
|
||||
name: orly-benchmark-network
|
||||
driver: bridge
|
||||
@@ -13,21 +13,6 @@ services:
|
||||
volumes:
|
||||
- /dev/shm/benchmark/next-orly-badger:/data
|
||||
|
||||
# Next.orly.dev relay with DGraph
|
||||
next-orly-dgraph:
|
||||
volumes:
|
||||
- /dev/shm/benchmark/next-orly-dgraph:/data
|
||||
|
||||
# DGraph Zero - cluster coordinator
|
||||
dgraph-zero:
|
||||
volumes:
|
||||
- /dev/shm/benchmark/dgraph-zero:/data
|
||||
|
||||
# DGraph Alpha - data node
|
||||
dgraph-alpha:
|
||||
volumes:
|
||||
- /dev/shm/benchmark/dgraph-alpha:/data
|
||||
|
||||
# Next.orly.dev relay with Neo4j
|
||||
next-orly-neo4j:
|
||||
volumes:
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# Next.orly.dev relay with Badger (this repository)
|
||||
# Next.orly.dev relay with Badger (fetches latest tag from git)
|
||||
next-orly-badger:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
context: .
|
||||
dockerfile: Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly-badger
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
@@ -26,83 +26,11 @@ services:
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
|
||||
# Next.orly.dev relay with DGraph (this repository)
|
||||
next-orly-dgraph:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly-dgraph
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=8080
|
||||
- ORLY_LOG_LEVEL=off
|
||||
- ORLY_DB_TYPE=dgraph
|
||||
- ORLY_DGRAPH_URL=dgraph-alpha:9080
|
||||
volumes:
|
||||
- ./data/next-orly-dgraph:/data
|
||||
ports:
|
||||
- "8007:8080"
|
||||
networks:
|
||||
- benchmark-net
|
||||
depends_on:
|
||||
dgraph-alpha:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
|
||||
# DGraph Zero - cluster coordinator
|
||||
dgraph-zero:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: benchmark-dgraph-zero
|
||||
working_dir: /data/zero
|
||||
ports:
|
||||
- "5080:5080"
|
||||
- "6080:6080"
|
||||
volumes:
|
||||
- ./data/dgraph-zero:/data
|
||||
command: dgraph zero --my=dgraph-zero:5080
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
|
||||
# DGraph Alpha - data node
|
||||
dgraph-alpha:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: benchmark-dgraph-alpha
|
||||
working_dir: /data/alpha
|
||||
ports:
|
||||
- "8088:8080"
|
||||
- "9080:9080"
|
||||
volumes:
|
||||
- ./data/dgraph-alpha:/data
|
||||
command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0
|
||||
networks:
|
||||
- benchmark-net
|
||||
depends_on:
|
||||
dgraph-zero:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 6
|
||||
start_period: 10s
|
||||
|
||||
# Next.orly.dev relay with Neo4j (this repository)
|
||||
# Next.orly.dev relay with Neo4j (fetches latest tag from git)
|
||||
next-orly-neo4j:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.next-orly
|
||||
context: .
|
||||
dockerfile: Dockerfile.next-orly
|
||||
container_name: benchmark-next-orly-neo4j
|
||||
environment:
|
||||
- ORLY_DATA_DIR=/data
|
||||
@@ -291,17 +219,15 @@ services:
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
|
||||
# Benchmark runner
|
||||
# Benchmark runner (fetches latest tag from git)
|
||||
benchmark-runner:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: cmd/benchmark/Dockerfile.benchmark
|
||||
context: .
|
||||
dockerfile: Dockerfile.benchmark
|
||||
container_name: benchmark-runner
|
||||
depends_on:
|
||||
next-orly-badger:
|
||||
condition: service_healthy
|
||||
next-orly-dgraph:
|
||||
condition: service_healthy
|
||||
next-orly-neo4j:
|
||||
condition: service_healthy
|
||||
khatru-sqlite:
|
||||
@@ -317,10 +243,11 @@ services:
|
||||
rely-sqlite:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- BENCHMARK_TARGETS=rely-sqlite:3334,next-orly-badger:8080,next-orly-dgraph:8080,next-orly-neo4j:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080
|
||||
- BENCHMARK_TARGETS=rely-sqlite:3334,next-orly-badger:8080,next-orly-neo4j:8080,khatru-sqlite:3334,khatru-badger:3334,relayer-basic:7447,strfry:8080,nostr-rs-relay:8080
|
||||
- BENCHMARK_EVENTS=50000
|
||||
- BENCHMARK_WORKERS=24
|
||||
- BENCHMARK_DURATION=60s
|
||||
- BENCHMARK_GRAPH_TRAVERSAL=${BENCHMARK_GRAPH_TRAVERSAL:-false}
|
||||
volumes:
|
||||
- ./reports:/reports
|
||||
networks:
|
||||
|
||||
520
cmd/benchmark/graph_traversal_benchmark.go
Normal file
520
cmd/benchmark/graph_traversal_benchmark.go
Normal file
@@ -0,0 +1,520 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
"lukechampine.com/frand"
|
||||
"next.orly.dev/pkg/database"
|
||||
)
|
||||
|
||||
const (
|
||||
// GraphBenchNumPubkeys is the number of pubkeys to generate for graph benchmark
|
||||
GraphBenchNumPubkeys = 100000
|
||||
// GraphBenchMinFollows is the minimum number of follows per pubkey
|
||||
GraphBenchMinFollows = 1
|
||||
// GraphBenchMaxFollows is the maximum number of follows per pubkey
|
||||
GraphBenchMaxFollows = 1000
|
||||
// GraphBenchSeed is the deterministic seed for frand PRNG (fits in uint64)
|
||||
GraphBenchSeed uint64 = 0x4E6F737472 // "Nostr" in hex
|
||||
// GraphBenchTraversalDepth is the depth of graph traversal (3 = third degree)
|
||||
GraphBenchTraversalDepth = 3
|
||||
)
|
||||
|
||||
// GraphTraversalBenchmark benchmarks graph traversal using NIP-01 style queries
|
||||
type GraphTraversalBenchmark struct {
|
||||
config *BenchmarkConfig
|
||||
db *database.D
|
||||
results []*BenchmarkResult
|
||||
mu sync.RWMutex
|
||||
|
||||
// Cached data for the benchmark
|
||||
pubkeys [][]byte // 100k pubkeys as 32-byte arrays
|
||||
signers []*p8k.Signer // signers for each pubkey
|
||||
follows [][]int // follows[i] = list of indices that pubkey[i] follows
|
||||
rng *frand.RNG // deterministic PRNG
|
||||
}
|
||||
|
||||
// NewGraphTraversalBenchmark creates a new graph traversal benchmark
|
||||
func NewGraphTraversalBenchmark(config *BenchmarkConfig, db *database.D) *GraphTraversalBenchmark {
|
||||
return &GraphTraversalBenchmark{
|
||||
config: config,
|
||||
db: db,
|
||||
results: make([]*BenchmarkResult, 0),
|
||||
rng: frand.NewCustom(make([]byte, 32), 1024, 12), // ChaCha12 with seed buffer
|
||||
}
|
||||
}
|
||||
|
||||
// initializeDeterministicRNG initializes the PRNG with deterministic seed
|
||||
func (g *GraphTraversalBenchmark) initializeDeterministicRNG() {
|
||||
// Create seed buffer from GraphBenchSeed (uint64 spread across 8 bytes)
|
||||
seedBuf := make([]byte, 32)
|
||||
seed := GraphBenchSeed
|
||||
seedBuf[0] = byte(seed >> 56)
|
||||
seedBuf[1] = byte(seed >> 48)
|
||||
seedBuf[2] = byte(seed >> 40)
|
||||
seedBuf[3] = byte(seed >> 32)
|
||||
seedBuf[4] = byte(seed >> 24)
|
||||
seedBuf[5] = byte(seed >> 16)
|
||||
seedBuf[6] = byte(seed >> 8)
|
||||
seedBuf[7] = byte(seed)
|
||||
g.rng = frand.NewCustom(seedBuf, 1024, 12)
|
||||
}
|
||||
|
||||
// generatePubkeys generates deterministic pubkeys using frand
|
||||
func (g *GraphTraversalBenchmark) generatePubkeys() {
|
||||
fmt.Printf("Generating %d deterministic pubkeys...\n", GraphBenchNumPubkeys)
|
||||
start := time.Now()
|
||||
|
||||
g.initializeDeterministicRNG()
|
||||
g.pubkeys = make([][]byte, GraphBenchNumPubkeys)
|
||||
g.signers = make([]*p8k.Signer, GraphBenchNumPubkeys)
|
||||
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
// Generate deterministic 32-byte secret key from PRNG
|
||||
secretKey := make([]byte, 32)
|
||||
g.rng.Read(secretKey)
|
||||
|
||||
// Create signer from secret key
|
||||
signer := p8k.MustNew()
|
||||
if err := signer.InitSec(secretKey); err != nil {
|
||||
panic(fmt.Sprintf("failed to init signer %d: %v", i, err))
|
||||
}
|
||||
|
||||
g.signers[i] = signer
|
||||
g.pubkeys[i] = make([]byte, 32)
|
||||
copy(g.pubkeys[i], signer.Pub())
|
||||
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Generated %d/%d pubkeys...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Generated %d pubkeys in %v\n", GraphBenchNumPubkeys, time.Since(start))
|
||||
}
|
||||
|
||||
// generateFollowGraph generates the random follow graph with deterministic PRNG
|
||||
func (g *GraphTraversalBenchmark) generateFollowGraph() {
|
||||
fmt.Printf("Generating follow graph (1-%d follows per pubkey)...\n", GraphBenchMaxFollows)
|
||||
start := time.Now()
|
||||
|
||||
// Reset RNG to ensure deterministic follow graph
|
||||
g.initializeDeterministicRNG()
|
||||
// Skip the bytes used for pubkey generation
|
||||
skipBuf := make([]byte, 32*GraphBenchNumPubkeys)
|
||||
g.rng.Read(skipBuf)
|
||||
|
||||
g.follows = make([][]int, GraphBenchNumPubkeys)
|
||||
|
||||
totalFollows := 0
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
// Determine number of follows for this pubkey (1 to 1000)
|
||||
numFollows := int(g.rng.Uint64n(uint64(GraphBenchMaxFollows-GraphBenchMinFollows+1))) + GraphBenchMinFollows
|
||||
|
||||
// Generate random follow indices (excluding self)
|
||||
followSet := make(map[int]struct{})
|
||||
for len(followSet) < numFollows {
|
||||
followIdx := int(g.rng.Uint64n(uint64(GraphBenchNumPubkeys)))
|
||||
if followIdx != i {
|
||||
followSet[followIdx] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice
|
||||
g.follows[i] = make([]int, 0, numFollows)
|
||||
for idx := range followSet {
|
||||
g.follows[i] = append(g.follows[i], idx)
|
||||
}
|
||||
totalFollows += numFollows
|
||||
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Generated follow lists for %d/%d pubkeys...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
|
||||
avgFollows := float64(totalFollows) / float64(GraphBenchNumPubkeys)
|
||||
fmt.Printf("Generated follow graph in %v (avg %.1f follows/pubkey, total %d follows)\n",
|
||||
time.Since(start), avgFollows, totalFollows)
|
||||
}
|
||||
|
||||
// createFollowListEvents creates kind 3 follow list events in the database
|
||||
func (g *GraphTraversalBenchmark) createFollowListEvents() {
|
||||
fmt.Println("Creating follow list events in database...")
|
||||
start := time.Now()
|
||||
|
||||
ctx := context.Background()
|
||||
baseTime := time.Now().Unix()
|
||||
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var successCount, errorCount int64
|
||||
latencies := make([]time.Duration, 0, GraphBenchNumPubkeys)
|
||||
|
||||
// Use worker pool for parallel event creation
|
||||
numWorkers := g.config.ConcurrentWorkers
|
||||
if numWorkers < 1 {
|
||||
numWorkers = 4
|
||||
}
|
||||
|
||||
workChan := make(chan int, numWorkers*2)
|
||||
|
||||
// Rate limiter: cap at 20,000 events/second
|
||||
perWorkerRate := 20000.0 / float64(numWorkers)
|
||||
|
||||
for w := 0; w < numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
workerLimiter := NewRateLimiter(perWorkerRate)
|
||||
|
||||
for i := range workChan {
|
||||
workerLimiter.Wait()
|
||||
|
||||
ev := event.New()
|
||||
ev.Kind = kind.FollowList.K
|
||||
ev.CreatedAt = baseTime + int64(i)
|
||||
ev.Content = []byte("")
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add p tags for all follows
|
||||
for _, followIdx := range g.follows[i] {
|
||||
pubkeyHex := hex.Enc(g.pubkeys[followIdx])
|
||||
ev.Tags.Append(tag.NewFromAny("p", pubkeyHex))
|
||||
}
|
||||
|
||||
// Sign the event
|
||||
if err := ev.Sign(g.signers[i]); err != nil {
|
||||
mu.Lock()
|
||||
errorCount++
|
||||
mu.Unlock()
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
|
||||
// Save to database
|
||||
eventStart := time.Now()
|
||||
_, err := g.db.SaveEvent(ctx, ev)
|
||||
latency := time.Since(eventStart)
|
||||
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errorCount++
|
||||
} else {
|
||||
successCount++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send work
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
workChan <- i
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Queued %d/%d follow list events...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
duration := time.Since(start)
|
||||
eventsPerSec := float64(successCount) / duration.Seconds()
|
||||
|
||||
// Calculate latency stats
|
||||
var avgLatency, p95Latency, p99Latency time.Duration
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool { return latencies[i] < latencies[j] })
|
||||
avgLatency = calculateAvgLatency(latencies)
|
||||
p95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||
p99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||
}
|
||||
|
||||
fmt.Printf("Created %d follow list events in %v (%.2f events/sec, errors: %d)\n",
|
||||
successCount, duration, eventsPerSec, errorCount)
|
||||
fmt.Printf(" Avg latency: %v, P95: %v, P99: %v\n", avgLatency, p95Latency, p99Latency)
|
||||
|
||||
// Record result for event creation phase
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Graph Setup (Follow Lists)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(successCount),
|
||||
EventsPerSecond: eventsPerSec,
|
||||
AvgLatency: avgLatency,
|
||||
P95Latency: p95Latency,
|
||||
P99Latency: p99Latency,
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: float64(successCount) / float64(GraphBenchNumPubkeys) * 100,
|
||||
}
|
||||
|
||||
g.mu.Lock()
|
||||
g.results = append(g.results, result)
|
||||
g.mu.Unlock()
|
||||
}
|
||||
|
||||
// runThirdDegreeTraversal runs the third-degree graph traversal benchmark
|
||||
func (g *GraphTraversalBenchmark) runThirdDegreeTraversal() {
|
||||
fmt.Printf("\n=== Third-Degree Graph Traversal Benchmark ===\n")
|
||||
fmt.Printf("Traversing 3 degrees of follows for each of %d pubkeys...\n", GraphBenchNumPubkeys)
|
||||
|
||||
start := time.Now()
|
||||
ctx := context.Background()
|
||||
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var totalQueries int64
|
||||
var totalPubkeysFound int64
|
||||
queryLatencies := make([]time.Duration, 0, GraphBenchNumPubkeys*3)
|
||||
traversalLatencies := make([]time.Duration, 0, GraphBenchNumPubkeys)
|
||||
|
||||
// Sample a subset for detailed traversal (full 100k would take too long)
|
||||
sampleSize := 1000
|
||||
if sampleSize > GraphBenchNumPubkeys {
|
||||
sampleSize = GraphBenchNumPubkeys
|
||||
}
|
||||
|
||||
// Deterministic sampling
|
||||
g.initializeDeterministicRNG()
|
||||
sampleIndices := make([]int, sampleSize)
|
||||
for i := 0; i < sampleSize; i++ {
|
||||
sampleIndices[i] = int(g.rng.Uint64n(uint64(GraphBenchNumPubkeys)))
|
||||
}
|
||||
|
||||
fmt.Printf("Sampling %d pubkeys for traversal...\n", sampleSize)
|
||||
|
||||
numWorkers := g.config.ConcurrentWorkers
|
||||
if numWorkers < 1 {
|
||||
numWorkers = 4
|
||||
}
|
||||
|
||||
workChan := make(chan int, numWorkers*2)
|
||||
|
||||
for w := 0; w < numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for startIdx := range workChan {
|
||||
traversalStart := time.Now()
|
||||
foundPubkeys := make(map[string]struct{})
|
||||
|
||||
// Start with the initial pubkey
|
||||
currentLevel := [][]byte{g.pubkeys[startIdx]}
|
||||
startPubkeyHex := hex.Enc(g.pubkeys[startIdx])
|
||||
foundPubkeys[startPubkeyHex] = struct{}{}
|
||||
|
||||
// Traverse 3 degrees
|
||||
for depth := 0; depth < GraphBenchTraversalDepth; depth++ {
|
||||
if len(currentLevel) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
nextLevel := make([][]byte, 0)
|
||||
|
||||
// Query follow lists for all pubkeys at current level
|
||||
// Batch queries for efficiency
|
||||
batchSize := 100
|
||||
for batchStart := 0; batchStart < len(currentLevel); batchStart += batchSize {
|
||||
batchEnd := batchStart + batchSize
|
||||
if batchEnd > len(currentLevel) {
|
||||
batchEnd = len(currentLevel)
|
||||
}
|
||||
|
||||
batch := currentLevel[batchStart:batchEnd]
|
||||
|
||||
// Build filter for kind 3 events from these pubkeys
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.FollowList)
|
||||
f.Authors = tag.NewWithCap(len(batch))
|
||||
for _, pk := range batch {
|
||||
// Authors.T expects raw byte slices (pubkeys)
|
||||
f.Authors.T = append(f.Authors.T, pk)
|
||||
}
|
||||
|
||||
queryStart := time.Now()
|
||||
events, err := g.db.QueryEvents(ctx, f)
|
||||
queryLatency := time.Since(queryStart)
|
||||
|
||||
mu.Lock()
|
||||
totalQueries++
|
||||
queryLatencies = append(queryLatencies, queryLatency)
|
||||
mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract followed pubkeys from p tags
|
||||
for _, ev := range events {
|
||||
for _, t := range *ev.Tags {
|
||||
if len(t.T) >= 2 && string(t.T[0]) == "p" {
|
||||
pubkeyHex := string(t.ValueHex())
|
||||
if _, exists := foundPubkeys[pubkeyHex]; !exists {
|
||||
foundPubkeys[pubkeyHex] = struct{}{}
|
||||
// Decode hex to bytes for next level
|
||||
if pkBytes, err := hex.Dec(pubkeyHex); err == nil {
|
||||
nextLevel = append(nextLevel, pkBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
}
|
||||
|
||||
currentLevel = nextLevel
|
||||
}
|
||||
|
||||
traversalLatency := time.Since(traversalStart)
|
||||
|
||||
mu.Lock()
|
||||
totalPubkeysFound += int64(len(foundPubkeys))
|
||||
traversalLatencies = append(traversalLatencies, traversalLatency)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send work
|
||||
for _, idx := range sampleIndices {
|
||||
workChan <- idx
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate statistics
|
||||
var avgQueryLatency, p95QueryLatency, p99QueryLatency time.Duration
|
||||
if len(queryLatencies) > 0 {
|
||||
sort.Slice(queryLatencies, func(i, j int) bool { return queryLatencies[i] < queryLatencies[j] })
|
||||
avgQueryLatency = calculateAvgLatency(queryLatencies)
|
||||
p95QueryLatency = calculatePercentileLatency(queryLatencies, 0.95)
|
||||
p99QueryLatency = calculatePercentileLatency(queryLatencies, 0.99)
|
||||
}
|
||||
|
||||
var avgTraversalLatency, p95TraversalLatency, p99TraversalLatency time.Duration
|
||||
if len(traversalLatencies) > 0 {
|
||||
sort.Slice(traversalLatencies, func(i, j int) bool { return traversalLatencies[i] < traversalLatencies[j] })
|
||||
avgTraversalLatency = calculateAvgLatency(traversalLatencies)
|
||||
p95TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.95)
|
||||
p99TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.99)
|
||||
}
|
||||
|
||||
avgPubkeysPerTraversal := float64(totalPubkeysFound) / float64(sampleSize)
|
||||
traversalsPerSec := float64(sampleSize) / duration.Seconds()
|
||||
queriesPerSec := float64(totalQueries) / duration.Seconds()
|
||||
|
||||
fmt.Printf("\n=== Graph Traversal Results ===\n")
|
||||
fmt.Printf("Traversals completed: %d\n", sampleSize)
|
||||
fmt.Printf("Total queries: %d (%.2f queries/sec)\n", totalQueries, queriesPerSec)
|
||||
fmt.Printf("Avg pubkeys found per traversal: %.1f\n", avgPubkeysPerTraversal)
|
||||
fmt.Printf("Total duration: %v\n", duration)
|
||||
fmt.Printf("\nQuery Latencies:\n")
|
||||
fmt.Printf(" Avg: %v, P95: %v, P99: %v\n", avgQueryLatency, p95QueryLatency, p99QueryLatency)
|
||||
fmt.Printf("\nFull Traversal Latencies (3 degrees):\n")
|
||||
fmt.Printf(" Avg: %v, P95: %v, P99: %v\n", avgTraversalLatency, p95TraversalLatency, p99TraversalLatency)
|
||||
fmt.Printf("Traversals/sec: %.2f\n", traversalsPerSec)
|
||||
|
||||
// Record result for traversal phase
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Graph Traversal (3 Degrees)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: traversalsPerSec,
|
||||
AvgLatency: avgTraversalLatency,
|
||||
P90Latency: calculatePercentileLatency(traversalLatencies, 0.90),
|
||||
P95Latency: p95TraversalLatency,
|
||||
P99Latency: p99TraversalLatency,
|
||||
Bottom10Avg: calculateBottom10Avg(traversalLatencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
g.mu.Lock()
|
||||
g.results = append(g.results, result)
|
||||
g.mu.Unlock()
|
||||
|
||||
// Also record query performance separately
|
||||
queryResult := &BenchmarkResult{
|
||||
TestName: "Graph Queries (Follow Lists)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: queriesPerSec,
|
||||
AvgLatency: avgQueryLatency,
|
||||
P90Latency: calculatePercentileLatency(queryLatencies, 0.90),
|
||||
P95Latency: p95QueryLatency,
|
||||
P99Latency: p99QueryLatency,
|
||||
Bottom10Avg: calculateBottom10Avg(queryLatencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
g.mu.Lock()
|
||||
g.results = append(g.results, queryResult)
|
||||
g.mu.Unlock()
|
||||
}
|
||||
|
||||
// RunSuite runs the complete graph traversal benchmark suite
|
||||
func (g *GraphTraversalBenchmark) RunSuite() {
|
||||
fmt.Println("\n╔════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ GRAPH TRAVERSAL BENCHMARK (100k Pubkeys) ║")
|
||||
fmt.Println("╚════════════════════════════════════════════════════════╝")
|
||||
|
||||
// Step 1: Generate pubkeys
|
||||
g.generatePubkeys()
|
||||
|
||||
// Step 2: Generate follow graph
|
||||
g.generateFollowGraph()
|
||||
|
||||
// Step 3: Create follow list events in database
|
||||
g.createFollowListEvents()
|
||||
|
||||
// Step 4: Run third-degree traversal benchmark
|
||||
g.runThirdDegreeTraversal()
|
||||
|
||||
fmt.Printf("\n=== Graph Traversal Benchmark Complete ===\n\n")
|
||||
}
|
||||
|
||||
// GetResults returns the benchmark results
|
||||
func (g *GraphTraversalBenchmark) GetResults() []*BenchmarkResult {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
return g.results
|
||||
}
|
||||
|
||||
// PrintResults prints the benchmark results
|
||||
func (g *GraphTraversalBenchmark) PrintResults() {
|
||||
g.mu.RLock()
|
||||
defer g.mu.RUnlock()
|
||||
|
||||
for _, result := range g.results {
|
||||
fmt.Printf("\nTest: %s\n", result.TestName)
|
||||
fmt.Printf("Duration: %v\n", result.Duration)
|
||||
fmt.Printf("Total Events/Queries: %d\n", result.TotalEvents)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Success Rate: %.1f%%\n", result.SuccessRate)
|
||||
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers)
|
||||
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024))
|
||||
fmt.Printf("Avg Latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P90 Latency: %v\n", result.P90Latency)
|
||||
fmt.Printf("P95 Latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 Latency: %v\n", result.P99Latency)
|
||||
fmt.Printf("Bottom 10%% Avg Latency: %v\n", result.Bottom10Avg)
|
||||
}
|
||||
}
|
||||
572
cmd/benchmark/graph_traversal_network.go
Normal file
572
cmd/benchmark/graph_traversal_network.go
Normal file
@@ -0,0 +1,572 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/eventenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
"git.mleku.dev/mleku/nostr/ws"
|
||||
"lukechampine.com/frand"
|
||||
)
|
||||
|
||||
// NetworkGraphTraversalBenchmark benchmarks graph traversal using NIP-01 queries over WebSocket
|
||||
type NetworkGraphTraversalBenchmark struct {
|
||||
relayURL string
|
||||
relay *ws.Client
|
||||
results []*BenchmarkResult
|
||||
mu sync.RWMutex
|
||||
workers int
|
||||
|
||||
// Cached data for the benchmark
|
||||
pubkeys [][]byte // 100k pubkeys as 32-byte arrays
|
||||
signers []*p8k.Signer // signers for each pubkey
|
||||
follows [][]int // follows[i] = list of indices that pubkey[i] follows
|
||||
rng *frand.RNG // deterministic PRNG
|
||||
}
|
||||
|
||||
// NewNetworkGraphTraversalBenchmark creates a new network graph traversal benchmark
|
||||
func NewNetworkGraphTraversalBenchmark(relayURL string, workers int) *NetworkGraphTraversalBenchmark {
|
||||
return &NetworkGraphTraversalBenchmark{
|
||||
relayURL: relayURL,
|
||||
workers: workers,
|
||||
results: make([]*BenchmarkResult, 0),
|
||||
rng: frand.NewCustom(make([]byte, 32), 1024, 12), // ChaCha12 with seed buffer
|
||||
}
|
||||
}
|
||||
|
||||
// Connect establishes WebSocket connection to the relay
|
||||
func (n *NetworkGraphTraversalBenchmark) Connect(ctx context.Context) error {
|
||||
var err error
|
||||
n.relay, err = ws.RelayConnect(ctx, n.relayURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to relay %s: %w", n.relayURL, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the relay connection
|
||||
func (n *NetworkGraphTraversalBenchmark) Close() {
|
||||
if n.relay != nil {
|
||||
n.relay.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// initializeDeterministicRNG initializes the PRNG with deterministic seed
|
||||
func (n *NetworkGraphTraversalBenchmark) initializeDeterministicRNG() {
|
||||
// Create seed buffer from GraphBenchSeed (uint64 spread across 8 bytes)
|
||||
seedBuf := make([]byte, 32)
|
||||
seed := GraphBenchSeed
|
||||
seedBuf[0] = byte(seed >> 56)
|
||||
seedBuf[1] = byte(seed >> 48)
|
||||
seedBuf[2] = byte(seed >> 40)
|
||||
seedBuf[3] = byte(seed >> 32)
|
||||
seedBuf[4] = byte(seed >> 24)
|
||||
seedBuf[5] = byte(seed >> 16)
|
||||
seedBuf[6] = byte(seed >> 8)
|
||||
seedBuf[7] = byte(seed)
|
||||
n.rng = frand.NewCustom(seedBuf, 1024, 12)
|
||||
}
|
||||
|
||||
// generatePubkeys generates deterministic pubkeys using frand
|
||||
func (n *NetworkGraphTraversalBenchmark) generatePubkeys() {
|
||||
fmt.Printf("Generating %d deterministic pubkeys...\n", GraphBenchNumPubkeys)
|
||||
start := time.Now()
|
||||
|
||||
n.initializeDeterministicRNG()
|
||||
n.pubkeys = make([][]byte, GraphBenchNumPubkeys)
|
||||
n.signers = make([]*p8k.Signer, GraphBenchNumPubkeys)
|
||||
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
// Generate deterministic 32-byte secret key from PRNG
|
||||
secretKey := make([]byte, 32)
|
||||
n.rng.Read(secretKey)
|
||||
|
||||
// Create signer from secret key
|
||||
signer := p8k.MustNew()
|
||||
if err := signer.InitSec(secretKey); err != nil {
|
||||
panic(fmt.Sprintf("failed to init signer %d: %v", i, err))
|
||||
}
|
||||
|
||||
n.signers[i] = signer
|
||||
n.pubkeys[i] = make([]byte, 32)
|
||||
copy(n.pubkeys[i], signer.Pub())
|
||||
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Generated %d/%d pubkeys...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Generated %d pubkeys in %v\n", GraphBenchNumPubkeys, time.Since(start))
|
||||
}
|
||||
|
||||
// generateFollowGraph generates the random follow graph with deterministic PRNG
|
||||
func (n *NetworkGraphTraversalBenchmark) generateFollowGraph() {
|
||||
fmt.Printf("Generating follow graph (1-%d follows per pubkey)...\n", GraphBenchMaxFollows)
|
||||
start := time.Now()
|
||||
|
||||
// Reset RNG to ensure deterministic follow graph
|
||||
n.initializeDeterministicRNG()
|
||||
// Skip the bytes used for pubkey generation
|
||||
skipBuf := make([]byte, 32*GraphBenchNumPubkeys)
|
||||
n.rng.Read(skipBuf)
|
||||
|
||||
n.follows = make([][]int, GraphBenchNumPubkeys)
|
||||
|
||||
totalFollows := 0
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
// Determine number of follows for this pubkey (1 to 1000)
|
||||
numFollows := int(n.rng.Uint64n(uint64(GraphBenchMaxFollows-GraphBenchMinFollows+1))) + GraphBenchMinFollows
|
||||
|
||||
// Generate random follow indices (excluding self)
|
||||
followSet := make(map[int]struct{})
|
||||
for len(followSet) < numFollows {
|
||||
followIdx := int(n.rng.Uint64n(uint64(GraphBenchNumPubkeys)))
|
||||
if followIdx != i {
|
||||
followSet[followIdx] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to slice
|
||||
n.follows[i] = make([]int, 0, numFollows)
|
||||
for idx := range followSet {
|
||||
n.follows[i] = append(n.follows[i], idx)
|
||||
}
|
||||
totalFollows += numFollows
|
||||
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Generated follow lists for %d/%d pubkeys...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
|
||||
avgFollows := float64(totalFollows) / float64(GraphBenchNumPubkeys)
|
||||
fmt.Printf("Generated follow graph in %v (avg %.1f follows/pubkey, total %d follows)\n",
|
||||
time.Since(start), avgFollows, totalFollows)
|
||||
}
|
||||
|
||||
// createFollowListEvents creates kind 3 follow list events via WebSocket
|
||||
func (n *NetworkGraphTraversalBenchmark) createFollowListEvents(ctx context.Context) {
|
||||
fmt.Println("Creating follow list events via WebSocket...")
|
||||
start := time.Now()
|
||||
|
||||
baseTime := time.Now().Unix()
|
||||
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var successCount, errorCount int64
|
||||
latencies := make([]time.Duration, 0, GraphBenchNumPubkeys)
|
||||
|
||||
// Use worker pool for parallel event creation
|
||||
numWorkers := n.workers
|
||||
if numWorkers < 1 {
|
||||
numWorkers = 4
|
||||
}
|
||||
|
||||
workChan := make(chan int, numWorkers*2)
|
||||
|
||||
// Rate limiter: cap at 1000 events/second per relay (to avoid overwhelming)
|
||||
perWorkerRate := 1000.0 / float64(numWorkers)
|
||||
|
||||
for w := 0; w < numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
workerLimiter := NewRateLimiter(perWorkerRate)
|
||||
|
||||
for i := range workChan {
|
||||
workerLimiter.Wait()
|
||||
|
||||
ev := event.New()
|
||||
ev.Kind = kind.FollowList.K
|
||||
ev.CreatedAt = baseTime + int64(i)
|
||||
ev.Content = []byte("")
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add p tags for all follows
|
||||
for _, followIdx := range n.follows[i] {
|
||||
pubkeyHex := hex.Enc(n.pubkeys[followIdx])
|
||||
ev.Tags.Append(tag.NewFromAny("p", pubkeyHex))
|
||||
}
|
||||
|
||||
// Sign the event
|
||||
if err := ev.Sign(n.signers[i]); err != nil {
|
||||
mu.Lock()
|
||||
errorCount++
|
||||
mu.Unlock()
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
|
||||
// Publish via WebSocket
|
||||
eventStart := time.Now()
|
||||
errCh := n.relay.Write(eventenvelope.NewSubmissionWith(ev).Marshal(nil))
|
||||
|
||||
// Wait for write to complete
|
||||
select {
|
||||
case err := <-errCh:
|
||||
latency := time.Since(eventStart)
|
||||
mu.Lock()
|
||||
if err != nil {
|
||||
errorCount++
|
||||
} else {
|
||||
successCount++
|
||||
latencies = append(latencies, latency)
|
||||
}
|
||||
mu.Unlock()
|
||||
case <-ctx.Done():
|
||||
mu.Lock()
|
||||
errorCount++
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send work
|
||||
for i := 0; i < GraphBenchNumPubkeys; i++ {
|
||||
workChan <- i
|
||||
if (i+1)%10000 == 0 {
|
||||
fmt.Printf(" Queued %d/%d follow list events...\n", i+1, GraphBenchNumPubkeys)
|
||||
}
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
duration := time.Since(start)
|
||||
eventsPerSec := float64(successCount) / duration.Seconds()
|
||||
|
||||
// Calculate latency stats
|
||||
var avgLatency, p90Latency, p95Latency, p99Latency time.Duration
|
||||
if len(latencies) > 0 {
|
||||
sort.Slice(latencies, func(i, j int) bool { return latencies[i] < latencies[j] })
|
||||
avgLatency = calculateAvgLatency(latencies)
|
||||
p90Latency = calculatePercentileLatency(latencies, 0.90)
|
||||
p95Latency = calculatePercentileLatency(latencies, 0.95)
|
||||
p99Latency = calculatePercentileLatency(latencies, 0.99)
|
||||
}
|
||||
|
||||
fmt.Printf("Created %d follow list events in %v (%.2f events/sec, errors: %d)\n",
|
||||
successCount, duration, eventsPerSec, errorCount)
|
||||
fmt.Printf(" Avg latency: %v, P95: %v, P99: %v\n", avgLatency, p95Latency, p99Latency)
|
||||
|
||||
// Record result for event creation phase
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Graph Setup (Follow Lists)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(successCount),
|
||||
EventsPerSecond: eventsPerSec,
|
||||
AvgLatency: avgLatency,
|
||||
P90Latency: p90Latency,
|
||||
P95Latency: p95Latency,
|
||||
P99Latency: p99Latency,
|
||||
Bottom10Avg: calculateBottom10Avg(latencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: float64(successCount) / float64(GraphBenchNumPubkeys) * 100,
|
||||
}
|
||||
|
||||
n.mu.Lock()
|
||||
n.results = append(n.results, result)
|
||||
n.mu.Unlock()
|
||||
}
|
||||
|
||||
// runThirdDegreeTraversal runs the third-degree graph traversal benchmark via WebSocket
|
||||
func (n *NetworkGraphTraversalBenchmark) runThirdDegreeTraversal(ctx context.Context) {
|
||||
fmt.Printf("\n=== Third-Degree Graph Traversal Benchmark (Network) ===\n")
|
||||
fmt.Printf("Traversing 3 degrees of follows via WebSocket...\n")
|
||||
|
||||
start := time.Now()
|
||||
|
||||
var mu sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
var totalQueries int64
|
||||
var totalPubkeysFound int64
|
||||
queryLatencies := make([]time.Duration, 0, 10000)
|
||||
traversalLatencies := make([]time.Duration, 0, 1000)
|
||||
|
||||
// Sample a subset for detailed traversal
|
||||
sampleSize := 1000
|
||||
if sampleSize > GraphBenchNumPubkeys {
|
||||
sampleSize = GraphBenchNumPubkeys
|
||||
}
|
||||
|
||||
// Deterministic sampling
|
||||
n.initializeDeterministicRNG()
|
||||
sampleIndices := make([]int, sampleSize)
|
||||
for i := 0; i < sampleSize; i++ {
|
||||
sampleIndices[i] = int(n.rng.Uint64n(uint64(GraphBenchNumPubkeys)))
|
||||
}
|
||||
|
||||
fmt.Printf("Sampling %d pubkeys for traversal...\n", sampleSize)
|
||||
|
||||
numWorkers := n.workers
|
||||
if numWorkers < 1 {
|
||||
numWorkers = 4
|
||||
}
|
||||
|
||||
workChan := make(chan int, numWorkers*2)
|
||||
|
||||
for w := 0; w < numWorkers; w++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
for startIdx := range workChan {
|
||||
traversalStart := time.Now()
|
||||
foundPubkeys := make(map[string]struct{})
|
||||
|
||||
// Start with the initial pubkey
|
||||
currentLevel := [][]byte{n.pubkeys[startIdx]}
|
||||
startPubkeyHex := hex.Enc(n.pubkeys[startIdx])
|
||||
foundPubkeys[startPubkeyHex] = struct{}{}
|
||||
|
||||
// Traverse 3 degrees
|
||||
for depth := 0; depth < GraphBenchTraversalDepth; depth++ {
|
||||
if len(currentLevel) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
nextLevel := make([][]byte, 0)
|
||||
|
||||
// Query follow lists for all pubkeys at current level
|
||||
// Batch queries for efficiency
|
||||
batchSize := 50
|
||||
for batchStart := 0; batchStart < len(currentLevel); batchStart += batchSize {
|
||||
batchEnd := batchStart + batchSize
|
||||
if batchEnd > len(currentLevel) {
|
||||
batchEnd = len(currentLevel)
|
||||
}
|
||||
|
||||
batch := currentLevel[batchStart:batchEnd]
|
||||
|
||||
// Build filter for kind 3 events from these pubkeys
|
||||
f := filter.New()
|
||||
f.Kinds = kind.NewS(kind.FollowList)
|
||||
f.Authors = tag.NewWithCap(len(batch))
|
||||
for _, pk := range batch {
|
||||
f.Authors.T = append(f.Authors.T, pk)
|
||||
}
|
||||
|
||||
queryStart := time.Now()
|
||||
|
||||
// Subscribe and collect results
|
||||
sub, err := n.relay.Subscribe(ctx, filter.NewS(f))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Collect events with timeout
|
||||
timeout := time.After(5 * time.Second)
|
||||
events := make([]*event.E, 0)
|
||||
collectLoop:
|
||||
for {
|
||||
select {
|
||||
case ev := <-sub.Events:
|
||||
if ev != nil {
|
||||
events = append(events, ev)
|
||||
}
|
||||
case <-sub.EndOfStoredEvents:
|
||||
break collectLoop
|
||||
case <-timeout:
|
||||
break collectLoop
|
||||
case <-ctx.Done():
|
||||
break collectLoop
|
||||
}
|
||||
}
|
||||
sub.Unsub()
|
||||
|
||||
queryLatency := time.Since(queryStart)
|
||||
|
||||
mu.Lock()
|
||||
totalQueries++
|
||||
queryLatencies = append(queryLatencies, queryLatency)
|
||||
mu.Unlock()
|
||||
|
||||
// Extract followed pubkeys from p tags
|
||||
for _, ev := range events {
|
||||
for _, t := range *ev.Tags {
|
||||
if len(t.T) >= 2 && string(t.T[0]) == "p" {
|
||||
pubkeyHex := string(t.ValueHex())
|
||||
if _, exists := foundPubkeys[pubkeyHex]; !exists {
|
||||
foundPubkeys[pubkeyHex] = struct{}{}
|
||||
// Decode hex to bytes for next level
|
||||
if pkBytes, err := hex.Dec(pubkeyHex); err == nil {
|
||||
nextLevel = append(nextLevel, pkBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
}
|
||||
|
||||
currentLevel = nextLevel
|
||||
}
|
||||
|
||||
traversalLatency := time.Since(traversalStart)
|
||||
|
||||
mu.Lock()
|
||||
totalPubkeysFound += int64(len(foundPubkeys))
|
||||
traversalLatencies = append(traversalLatencies, traversalLatency)
|
||||
mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Send work
|
||||
for _, idx := range sampleIndices {
|
||||
workChan <- idx
|
||||
}
|
||||
close(workChan)
|
||||
wg.Wait()
|
||||
|
||||
duration := time.Since(start)
|
||||
|
||||
// Calculate statistics
|
||||
var avgQueryLatency, p90QueryLatency, p95QueryLatency, p99QueryLatency time.Duration
|
||||
if len(queryLatencies) > 0 {
|
||||
sort.Slice(queryLatencies, func(i, j int) bool { return queryLatencies[i] < queryLatencies[j] })
|
||||
avgQueryLatency = calculateAvgLatency(queryLatencies)
|
||||
p90QueryLatency = calculatePercentileLatency(queryLatencies, 0.90)
|
||||
p95QueryLatency = calculatePercentileLatency(queryLatencies, 0.95)
|
||||
p99QueryLatency = calculatePercentileLatency(queryLatencies, 0.99)
|
||||
}
|
||||
|
||||
var avgTraversalLatency, p90TraversalLatency, p95TraversalLatency, p99TraversalLatency time.Duration
|
||||
if len(traversalLatencies) > 0 {
|
||||
sort.Slice(traversalLatencies, func(i, j int) bool { return traversalLatencies[i] < traversalLatencies[j] })
|
||||
avgTraversalLatency = calculateAvgLatency(traversalLatencies)
|
||||
p90TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.90)
|
||||
p95TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.95)
|
||||
p99TraversalLatency = calculatePercentileLatency(traversalLatencies, 0.99)
|
||||
}
|
||||
|
||||
avgPubkeysPerTraversal := float64(totalPubkeysFound) / float64(sampleSize)
|
||||
traversalsPerSec := float64(sampleSize) / duration.Seconds()
|
||||
queriesPerSec := float64(totalQueries) / duration.Seconds()
|
||||
|
||||
fmt.Printf("\n=== Graph Traversal Results (Network) ===\n")
|
||||
fmt.Printf("Traversals completed: %d\n", sampleSize)
|
||||
fmt.Printf("Total queries: %d (%.2f queries/sec)\n", totalQueries, queriesPerSec)
|
||||
fmt.Printf("Avg pubkeys found per traversal: %.1f\n", avgPubkeysPerTraversal)
|
||||
fmt.Printf("Total duration: %v\n", duration)
|
||||
fmt.Printf("\nQuery Latencies:\n")
|
||||
fmt.Printf(" Avg: %v, P95: %v, P99: %v\n", avgQueryLatency, p95QueryLatency, p99QueryLatency)
|
||||
fmt.Printf("\nFull Traversal Latencies (3 degrees):\n")
|
||||
fmt.Printf(" Avg: %v, P95: %v, P99: %v\n", avgTraversalLatency, p95TraversalLatency, p99TraversalLatency)
|
||||
fmt.Printf("Traversals/sec: %.2f\n", traversalsPerSec)
|
||||
|
||||
// Record result for traversal phase
|
||||
result := &BenchmarkResult{
|
||||
TestName: "Graph Traversal (3 Degrees)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: traversalsPerSec,
|
||||
AvgLatency: avgTraversalLatency,
|
||||
P90Latency: p90TraversalLatency,
|
||||
P95Latency: p95TraversalLatency,
|
||||
P99Latency: p99TraversalLatency,
|
||||
Bottom10Avg: calculateBottom10Avg(traversalLatencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
n.mu.Lock()
|
||||
n.results = append(n.results, result)
|
||||
n.mu.Unlock()
|
||||
|
||||
// Also record query performance separately
|
||||
queryResult := &BenchmarkResult{
|
||||
TestName: "Graph Queries (Follow Lists)",
|
||||
Duration: duration,
|
||||
TotalEvents: int(totalQueries),
|
||||
EventsPerSecond: queriesPerSec,
|
||||
AvgLatency: avgQueryLatency,
|
||||
P90Latency: p90QueryLatency,
|
||||
P95Latency: p95QueryLatency,
|
||||
P99Latency: p99QueryLatency,
|
||||
Bottom10Avg: calculateBottom10Avg(queryLatencies),
|
||||
ConcurrentWorkers: numWorkers,
|
||||
MemoryUsed: getMemUsage(),
|
||||
SuccessRate: 100.0,
|
||||
}
|
||||
|
||||
n.mu.Lock()
|
||||
n.results = append(n.results, queryResult)
|
||||
n.mu.Unlock()
|
||||
}
|
||||
|
||||
// RunSuite runs the complete network graph traversal benchmark suite
|
||||
func (n *NetworkGraphTraversalBenchmark) RunSuite(ctx context.Context) error {
|
||||
fmt.Println("\n╔════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ NETWORK GRAPH TRAVERSAL BENCHMARK (100k Pubkeys) ║")
|
||||
fmt.Printf("║ Relay: %-46s ║\n", n.relayURL)
|
||||
fmt.Println("╚════════════════════════════════════════════════════════╝")
|
||||
|
||||
// Step 1: Generate pubkeys
|
||||
n.generatePubkeys()
|
||||
|
||||
// Step 2: Generate follow graph
|
||||
n.generateFollowGraph()
|
||||
|
||||
// Step 3: Connect to relay
|
||||
fmt.Printf("\nConnecting to relay: %s\n", n.relayURL)
|
||||
if err := n.Connect(ctx); err != nil {
|
||||
return fmt.Errorf("failed to connect: %w", err)
|
||||
}
|
||||
defer n.Close()
|
||||
fmt.Println("Connected successfully!")
|
||||
|
||||
// Step 4: Create follow list events via WebSocket
|
||||
n.createFollowListEvents(ctx)
|
||||
|
||||
// Small delay to ensure events are processed
|
||||
fmt.Println("\nWaiting for events to be processed...")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
// Step 5: Run third-degree traversal benchmark
|
||||
n.runThirdDegreeTraversal(ctx)
|
||||
|
||||
fmt.Printf("\n=== Network Graph Traversal Benchmark Complete ===\n\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetResults returns the benchmark results
|
||||
func (n *NetworkGraphTraversalBenchmark) GetResults() []*BenchmarkResult {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
return n.results
|
||||
}
|
||||
|
||||
// PrintResults prints the benchmark results
|
||||
func (n *NetworkGraphTraversalBenchmark) PrintResults() {
|
||||
n.mu.RLock()
|
||||
defer n.mu.RUnlock()
|
||||
|
||||
for _, result := range n.results {
|
||||
fmt.Printf("\nTest: %s\n", result.TestName)
|
||||
fmt.Printf("Duration: %v\n", result.Duration)
|
||||
fmt.Printf("Total Events/Queries: %d\n", result.TotalEvents)
|
||||
fmt.Printf("Events/sec: %.2f\n", result.EventsPerSecond)
|
||||
fmt.Printf("Success Rate: %.1f%%\n", result.SuccessRate)
|
||||
fmt.Printf("Concurrent Workers: %d\n", result.ConcurrentWorkers)
|
||||
fmt.Printf("Memory Used: %d MB\n", result.MemoryUsed/(1024*1024))
|
||||
fmt.Printf("Avg Latency: %v\n", result.AvgLatency)
|
||||
fmt.Printf("P90 Latency: %v\n", result.P90Latency)
|
||||
fmt.Printf("P95 Latency: %v\n", result.P95Latency)
|
||||
fmt.Printf("P99 Latency: %v\n", result.P99Latency)
|
||||
fmt.Printf("Bottom 10%% Avg Latency: %v\n", result.Bottom10Avg)
|
||||
}
|
||||
}
|
||||
@@ -42,9 +42,12 @@ type BenchmarkConfig struct {
|
||||
NetRate int // events/sec per worker
|
||||
|
||||
// Backend selection
|
||||
UseDgraph bool
|
||||
UseNeo4j bool
|
||||
UseRelySQLite bool
|
||||
|
||||
// Graph traversal benchmark
|
||||
UseGraphTraversal bool
|
||||
UseNetworkGraphTraversal bool // Network-mode graph traversal (for multi-relay testing)
|
||||
}
|
||||
|
||||
type BenchmarkResult struct {
|
||||
@@ -109,15 +112,18 @@ func main() {
|
||||
// lol.SetLogLevel("trace")
|
||||
config := parseFlags()
|
||||
|
||||
if config.RelayURL != "" {
|
||||
// Network mode: connect to relay and generate traffic
|
||||
runNetworkLoad(config)
|
||||
if config.UseNetworkGraphTraversal {
|
||||
// Network graph traversal mode: requires relay URL
|
||||
if config.RelayURL == "" {
|
||||
log.Fatal("Network graph traversal benchmark requires -relay-url flag")
|
||||
}
|
||||
runNetworkGraphTraversalBenchmark(config)
|
||||
return
|
||||
}
|
||||
|
||||
if config.UseDgraph {
|
||||
// Run dgraph benchmark
|
||||
runDgraphBenchmark(config)
|
||||
if config.RelayURL != "" {
|
||||
// Network mode: connect to relay and generate traffic
|
||||
runNetworkLoad(config)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -133,6 +139,12 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
if config.UseGraphTraversal {
|
||||
// Run graph traversal benchmark
|
||||
runGraphTraversalBenchmark(config)
|
||||
return
|
||||
}
|
||||
|
||||
// Run standard Badger benchmark
|
||||
fmt.Printf("Starting Nostr Relay Benchmark (Badger Backend)\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
@@ -152,28 +164,6 @@ func main() {
|
||||
benchmark.GenerateAsciidocReport()
|
||||
}
|
||||
|
||||
func runDgraphBenchmark(config *BenchmarkConfig) {
|
||||
fmt.Printf("Starting Nostr Relay Benchmark (Dgraph Backend)\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
fmt.Printf(
|
||||
"Events: %d, Workers: %d\n",
|
||||
config.NumEvents, config.ConcurrentWorkers,
|
||||
)
|
||||
|
||||
dgraphBench, err := NewDgraphBenchmark(config)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create dgraph benchmark: %v", err)
|
||||
}
|
||||
defer dgraphBench.Close()
|
||||
|
||||
// Run dgraph benchmark suite
|
||||
dgraphBench.RunSuite()
|
||||
|
||||
// Generate reports
|
||||
dgraphBench.GenerateReport()
|
||||
dgraphBench.GenerateAsciidocReport()
|
||||
}
|
||||
|
||||
func runNeo4jBenchmark(config *BenchmarkConfig) {
|
||||
fmt.Printf("Starting Nostr Relay Benchmark (Neo4j Backend)\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
@@ -218,6 +208,130 @@ func runRelySQLiteBenchmark(config *BenchmarkConfig) {
|
||||
relysqliteBench.GenerateAsciidocReport()
|
||||
}
|
||||
|
||||
func runGraphTraversalBenchmark(config *BenchmarkConfig) {
|
||||
fmt.Printf("Starting Graph Traversal Benchmark (Badger Backend)\n")
|
||||
fmt.Printf("Data Directory: %s\n", config.DataDir)
|
||||
fmt.Printf("Workers: %d\n", config.ConcurrentWorkers)
|
||||
fmt.Printf("Pubkeys: %d, Follows per pubkey: %d-%d\n",
|
||||
GraphBenchNumPubkeys, GraphBenchMinFollows, GraphBenchMaxFollows)
|
||||
|
||||
// Clean up existing data directory
|
||||
os.RemoveAll(config.DataDir)
|
||||
|
||||
ctx := context.Background()
|
||||
cancel := func() {}
|
||||
|
||||
db, err := database.New(ctx, cancel, config.DataDir, "warn")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create and run graph traversal benchmark
|
||||
graphBench := NewGraphTraversalBenchmark(config, db)
|
||||
graphBench.RunSuite()
|
||||
|
||||
// Generate reports
|
||||
graphBench.PrintResults()
|
||||
generateGraphTraversalAsciidocReport(config, graphBench.GetResults())
|
||||
}
|
||||
|
||||
func generateGraphTraversalAsciidocReport(config *BenchmarkConfig, results []*BenchmarkResult) {
|
||||
path := filepath.Join(config.DataDir, "graph_traversal_report.adoc")
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Printf("Failed to create report: %v", err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString("= Graph Traversal Benchmark Results\n\n")
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Generated: %s\n\n", time.Now().Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
file.WriteString(fmt.Sprintf("Pubkeys: %d\n", GraphBenchNumPubkeys))
|
||||
file.WriteString(fmt.Sprintf("Follows per pubkey: %d-%d\n", GraphBenchMinFollows, GraphBenchMaxFollows))
|
||||
file.WriteString(fmt.Sprintf("Traversal depth: %d degrees\n\n", GraphBenchTraversalDepth))
|
||||
|
||||
file.WriteString("[cols=\"1,^1,^1,^1,^1,^1,^1\",options=\"header\"]\n")
|
||||
file.WriteString("|===\n")
|
||||
file.WriteString("| Test | Events/sec | Avg Latency | P90 | P95 | P99 | Bottom 10% Avg\n")
|
||||
|
||||
for _, r := range results {
|
||||
file.WriteString(fmt.Sprintf("| %s\n", r.TestName))
|
||||
file.WriteString(fmt.Sprintf("| %.2f\n", r.EventsPerSecond))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.AvgLatency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P90Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P95Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P99Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.Bottom10Avg))
|
||||
}
|
||||
file.WriteString("|===\n")
|
||||
|
||||
fmt.Printf("AsciiDoc report saved to: %s\n", path)
|
||||
}
|
||||
|
||||
func runNetworkGraphTraversalBenchmark(config *BenchmarkConfig) {
|
||||
fmt.Printf("Starting Network Graph Traversal Benchmark\n")
|
||||
fmt.Printf("Relay URL: %s\n", config.RelayURL)
|
||||
fmt.Printf("Workers: %d\n", config.ConcurrentWorkers)
|
||||
fmt.Printf("Pubkeys: %d, Follows per pubkey: %d-%d\n",
|
||||
GraphBenchNumPubkeys, GraphBenchMinFollows, GraphBenchMaxFollows)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create and run network graph traversal benchmark
|
||||
netGraphBench := NewNetworkGraphTraversalBenchmark(config.RelayURL, config.ConcurrentWorkers)
|
||||
|
||||
if err := netGraphBench.RunSuite(ctx); err != nil {
|
||||
log.Fatalf("Network graph traversal benchmark failed: %v", err)
|
||||
}
|
||||
|
||||
// Generate reports
|
||||
netGraphBench.PrintResults()
|
||||
generateNetworkGraphTraversalAsciidocReport(config, netGraphBench.GetResults())
|
||||
}
|
||||
|
||||
func generateNetworkGraphTraversalAsciidocReport(config *BenchmarkConfig, results []*BenchmarkResult) {
|
||||
path := filepath.Join(config.DataDir, "network_graph_traversal_report.adoc")
|
||||
file, err := os.Create(path)
|
||||
if err != nil {
|
||||
log.Printf("Failed to create report: %v", err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
file.WriteString("= Network Graph Traversal Benchmark Results\n\n")
|
||||
file.WriteString(
|
||||
fmt.Sprintf(
|
||||
"Generated: %s\n\n", time.Now().Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
file.WriteString(fmt.Sprintf("Relay URL: %s\n", config.RelayURL))
|
||||
file.WriteString(fmt.Sprintf("Pubkeys: %d\n", GraphBenchNumPubkeys))
|
||||
file.WriteString(fmt.Sprintf("Follows per pubkey: %d-%d\n", GraphBenchMinFollows, GraphBenchMaxFollows))
|
||||
file.WriteString(fmt.Sprintf("Traversal depth: %d degrees\n\n", GraphBenchTraversalDepth))
|
||||
|
||||
file.WriteString("[cols=\"1,^1,^1,^1,^1,^1,^1\",options=\"header\"]\n")
|
||||
file.WriteString("|===\n")
|
||||
file.WriteString("| Test | Events/sec | Avg Latency | P90 | P95 | P99 | Bottom 10% Avg\n")
|
||||
|
||||
for _, r := range results {
|
||||
file.WriteString(fmt.Sprintf("| %s\n", r.TestName))
|
||||
file.WriteString(fmt.Sprintf("| %.2f\n", r.EventsPerSecond))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.AvgLatency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P90Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P95Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.P99Latency))
|
||||
file.WriteString(fmt.Sprintf("| %v\n", r.Bottom10Avg))
|
||||
}
|
||||
file.WriteString("|===\n")
|
||||
|
||||
fmt.Printf("AsciiDoc report saved to: %s\n", path)
|
||||
}
|
||||
|
||||
func parseFlags() *BenchmarkConfig {
|
||||
config := &BenchmarkConfig{}
|
||||
|
||||
@@ -254,10 +368,6 @@ func parseFlags() *BenchmarkConfig {
|
||||
flag.IntVar(&config.NetRate, "net-rate", 20, "Events per second per worker")
|
||||
|
||||
// Backend selection
|
||||
flag.BoolVar(
|
||||
&config.UseDgraph, "dgraph", false,
|
||||
"Use dgraph backend (requires Docker)",
|
||||
)
|
||||
flag.BoolVar(
|
||||
&config.UseNeo4j, "neo4j", false,
|
||||
"Use Neo4j backend (requires Docker)",
|
||||
@@ -267,6 +377,16 @@ func parseFlags() *BenchmarkConfig {
|
||||
"Use rely-sqlite backend",
|
||||
)
|
||||
|
||||
// Graph traversal benchmark
|
||||
flag.BoolVar(
|
||||
&config.UseGraphTraversal, "graph", false,
|
||||
"Run graph traversal benchmark (100k pubkeys, 3-degree follows)",
|
||||
)
|
||||
flag.BoolVar(
|
||||
&config.UseNetworkGraphTraversal, "graph-network", false,
|
||||
"Run network graph traversal benchmark against relay specified by -relay-url",
|
||||
)
|
||||
|
||||
flag.Parse()
|
||||
return config
|
||||
}
|
||||
|
||||
176
cmd/benchmark/reports/run_20251203_222024/aggregate_report.txt
Normal file
176
cmd/benchmark/reports/run_20251203_222024/aggregate_report.txt
Normal file
@@ -0,0 +1,176 @@
|
||||
================================================================
|
||||
NOSTR RELAY BENCHMARK AGGREGATE REPORT
|
||||
================================================================
|
||||
Generated: 2025-12-03T22:47:18+00:00
|
||||
Benchmark Configuration:
|
||||
Events per test: 50000
|
||||
Concurrent workers: 24
|
||||
Test duration: 60s
|
||||
|
||||
Relays tested: 8
|
||||
|
||||
================================================================
|
||||
SUMMARY BY RELAY
|
||||
================================================================
|
||||
|
||||
Relay: rely-sqlite
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 16694.18
|
||||
Events/sec: 6270.43
|
||||
Events/sec: 16694.18
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 1.339216ms
|
||||
Bottom 10% Avg Latency: 779.552µs
|
||||
Avg Latency: 1.281976ms
|
||||
P95 Latency: 1.894111ms
|
||||
P95 Latency: 2.087148ms
|
||||
P95 Latency: 910.529µs
|
||||
|
||||
Relay: next-orly-badger
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17987.95
|
||||
Events/sec: 6246.39
|
||||
Events/sec: 17987.95
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 1.16914ms
|
||||
Bottom 10% Avg Latency: 675.419µs
|
||||
Avg Latency: 1.301155ms
|
||||
P95 Latency: 1.605171ms
|
||||
P95 Latency: 2.260728ms
|
||||
P95 Latency: 911.513µs
|
||||
|
||||
Relay: next-orly-neo4j
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17437.04
|
||||
Events/sec: 6060.82
|
||||
Events/sec: 17437.04
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 1.250926ms
|
||||
Bottom 10% Avg Latency: 735.971µs
|
||||
Avg Latency: 1.493295ms
|
||||
P95 Latency: 1.757814ms
|
||||
P95 Latency: 2.404304ms
|
||||
P95 Latency: 896.796µs
|
||||
|
||||
Relay: khatru-sqlite
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17531.15
|
||||
Events/sec: 6335.87
|
||||
Events/sec: 17531.15
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 1.233875ms
|
||||
Bottom 10% Avg Latency: 707.713µs
|
||||
Avg Latency: 1.239192ms
|
||||
P95 Latency: 1.713051ms
|
||||
P95 Latency: 1.880869ms
|
||||
P95 Latency: 918.848µs
|
||||
|
||||
Relay: khatru-badger
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 15958.89
|
||||
Events/sec: 6352.78
|
||||
Events/sec: 15958.89
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 1.414398ms
|
||||
Bottom 10% Avg Latency: 791.675µs
|
||||
Avg Latency: 1.183812ms
|
||||
P95 Latency: 2.191322ms
|
||||
P95 Latency: 1.80172ms
|
||||
P95 Latency: 903.25µs
|
||||
|
||||
Relay: relayer-basic
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17757.23
|
||||
Events/sec: 6227.33
|
||||
Events/sec: 17757.23
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 1.193531ms
|
||||
Bottom 10% Avg Latency: 679.232µs
|
||||
Avg Latency: 1.304ms
|
||||
P95 Latency: 1.679267ms
|
||||
P95 Latency: 2.155365ms
|
||||
P95 Latency: 930.632µs
|
||||
|
||||
Relay: strfry
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17794.50
|
||||
Events/sec: 6252.01
|
||||
Events/sec: 17794.50
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 1.192717ms
|
||||
Bottom 10% Avg Latency: 676.594µs
|
||||
Avg Latency: 1.270724ms
|
||||
P95 Latency: 1.645564ms
|
||||
P95 Latency: 2.251457ms
|
||||
P95 Latency: 915.623µs
|
||||
|
||||
Relay: nostr-rs-relay
|
||||
----------------------------------------
|
||||
Status: COMPLETED
|
||||
Events/sec: 17174.61
|
||||
Events/sec: 6311.06
|
||||
Events/sec: 17174.61
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Success Rate: 100.0%
|
||||
Avg Latency: 1.281647ms
|
||||
Bottom 10% Avg Latency: 742.249µs
|
||||
Avg Latency: 1.260479ms
|
||||
P95 Latency: 1.836808ms
|
||||
P95 Latency: 1.893887ms
|
||||
P95 Latency: 922.647µs
|
||||
|
||||
|
||||
================================================================
|
||||
DETAILED RESULTS
|
||||
================================================================
|
||||
|
||||
Individual relay reports are available in:
|
||||
- /reports/run_20251203_222024/khatru-badger_results.txt
|
||||
- /reports/run_20251203_222024/khatru-sqlite_results.txt
|
||||
- /reports/run_20251203_222024/next-orly-badger_results.txt
|
||||
- /reports/run_20251203_222024/next-orly-neo4j_results.txt
|
||||
- /reports/run_20251203_222024/nostr-rs-relay_results.txt
|
||||
- /reports/run_20251203_222024/relayer-basic_results.txt
|
||||
- /reports/run_20251203_222024/rely-sqlite_results.txt
|
||||
- /reports/run_20251203_222024/strfry_results.txt
|
||||
|
||||
================================================================
|
||||
BENCHMARK COMPARISON TABLE
|
||||
================================================================
|
||||
|
||||
Relay Status Peak Tput/s Avg Latency Success Rate
|
||||
---- ------ ----------- ----------- ------------
|
||||
rely-sqlite OK 16694.18 1.339216ms 100.0%
|
||||
next-orly-badger OK 17987.95 1.16914ms 100.0%
|
||||
next-orly-neo4j OK 17437.04 1.250926ms 100.0%
|
||||
khatru-sqlite OK 17531.15 1.233875ms 100.0%
|
||||
khatru-badger OK 15958.89 1.414398ms 100.0%
|
||||
relayer-basic OK 17757.23 1.193531ms 100.0%
|
||||
strfry OK 17794.50 1.192717ms 100.0%
|
||||
nostr-rs-relay OK 17174.61 1.281647ms 100.0%
|
||||
|
||||
================================================================
|
||||
End of Report
|
||||
================================================================
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764801231368401ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764801231368486ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764801231368519ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764801231368527ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764801231368536ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764801231368548ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764801231368553ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764801231368563ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764801231368569ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764801231368582ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764801231368586ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764801231368621ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764801231368649ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764801231368698ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764801231368705ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764801231368720ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764801231368725ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/03 22:33:51 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.133049501s
|
||||
Events/sec: 15958.89
|
||||
Avg latency: 1.414398ms
|
||||
P90 latency: 1.890809ms
|
||||
P95 latency: 2.191322ms
|
||||
P99 latency: 3.903963ms
|
||||
Bottom 10% Avg latency: 791.675µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 324.488191ms
|
||||
Burst completed: 5000 events in 290.052978ms
|
||||
Burst completed: 5000 events in 282.1799ms
|
||||
Burst completed: 5000 events in 293.897308ms
|
||||
Burst completed: 5000 events in 291.897961ms
|
||||
Burst completed: 5000 events in 289.485539ms
|
||||
Burst completed: 5000 events in 274.266496ms
|
||||
Burst completed: 5000 events in 267.924152ms
|
||||
Burst completed: 5000 events in 270.096418ms
|
||||
Burst completed: 5000 events in 279.041228ms
|
||||
Burst test completed: 50000 events in 7.870569084s, errors: 0
|
||||
Events/sec: 6352.78
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.293101466s
|
||||
Combined ops/sec: 2058.20
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 414124 queries in 1m0.005028928s
|
||||
Queries/sec: 6901.49
|
||||
Avg query latency: 2.186993ms
|
||||
P95 query latency: 8.150708ms
|
||||
P99 query latency: 12.392244ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 315470 operations (265470 queries, 50000 writes) in 1m0.003725813s
|
||||
Operations/sec: 5257.51
|
||||
Avg latency: 1.463937ms
|
||||
Avg query latency: 1.521887ms
|
||||
Avg write latency: 1.156254ms
|
||||
P95 latency: 3.595486ms
|
||||
P99 latency: 9.347708ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 3.133049501s
|
||||
Total Events: 50000
|
||||
Events/sec: 15958.89
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 189 MB
|
||||
Avg Latency: 1.414398ms
|
||||
P90 Latency: 1.890809ms
|
||||
P95 Latency: 2.191322ms
|
||||
P99 Latency: 3.903963ms
|
||||
Bottom 10% Avg Latency: 791.675µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.870569084s
|
||||
Total Events: 50000
|
||||
Events/sec: 6352.78
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 172 MB
|
||||
Avg Latency: 1.183812ms
|
||||
P90 Latency: 1.587121ms
|
||||
P95 Latency: 1.80172ms
|
||||
P99 Latency: 3.298169ms
|
||||
Bottom 10% Avg Latency: 529.205µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.293101466s
|
||||
Total Events: 50000
|
||||
Events/sec: 2058.20
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 186 MB
|
||||
Avg Latency: 384.638µs
|
||||
P90 Latency: 808.86µs
|
||||
P95 Latency: 903.25µs
|
||||
P99 Latency: 1.120102ms
|
||||
Bottom 10% Avg Latency: 1.02527ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005028928s
|
||||
Total Events: 414124
|
||||
Events/sec: 6901.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 198 MB
|
||||
Avg Latency: 2.186993ms
|
||||
P90 Latency: 6.624883ms
|
||||
P95 Latency: 8.150708ms
|
||||
P99 Latency: 12.392244ms
|
||||
Bottom 10% Avg Latency: 8.999206ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003725813s
|
||||
Total Events: 315470
|
||||
Events/sec: 5257.51
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 188 MB
|
||||
Avg Latency: 1.463937ms
|
||||
P90 Latency: 2.911413ms
|
||||
P95 Latency: 3.595486ms
|
||||
P99 Latency: 9.347708ms
|
||||
Bottom 10% Avg Latency: 5.359325ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: khatru-badger
|
||||
RELAY_URL: ws://khatru-badger:3334
|
||||
TEST_TIMESTAMP: 2025-12-03T22:37:08+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764801029566947ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764801029567062ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764801029567087ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764801029567092ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764801029567102ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764801029567115ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764801029567122ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764801029567131ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764801029567137ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764801029567154ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764801029567160ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764801029567169ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764801029567174ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764801029567187ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764801029567192ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764801029567211ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764801029567217ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/03 22:30:29 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.852065909s
|
||||
Events/sec: 17531.15
|
||||
Avg latency: 1.233875ms
|
||||
P90 latency: 1.547505ms
|
||||
P95 latency: 1.713051ms
|
||||
P99 latency: 3.452631ms
|
||||
Bottom 10% Avg latency: 707.713µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 297.229736ms
|
||||
Burst completed: 5000 events in 291.618719ms
|
||||
Burst completed: 5000 events in 295.763107ms
|
||||
Burst completed: 5000 events in 320.083794ms
|
||||
Burst completed: 5000 events in 306.954958ms
|
||||
Burst completed: 5000 events in 294.350551ms
|
||||
Burst completed: 5000 events in 266.492151ms
|
||||
Burst completed: 5000 events in 258.415169ms
|
||||
Burst completed: 5000 events in 279.574451ms
|
||||
Burst completed: 5000 events in 274.042853ms
|
||||
Burst test completed: 50000 events in 7.891576755s, errors: 0
|
||||
Events/sec: 6335.87
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.900476855s
|
||||
Combined ops/sec: 2007.99
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 410621 queries in 1m0.004293618s
|
||||
Queries/sec: 6843.19
|
||||
Avg query latency: 2.208463ms
|
||||
P95 query latency: 8.264ms
|
||||
P99 query latency: 12.52398ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 308780 operations (258780 queries, 50000 writes) in 1m0.002949862s
|
||||
Operations/sec: 5146.08
|
||||
Avg latency: 1.517721ms
|
||||
Avg query latency: 1.590458ms
|
||||
Avg write latency: 1.141264ms
|
||||
P95 latency: 3.798681ms
|
||||
P99 latency: 9.90181ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.852065909s
|
||||
Total Events: 50000
|
||||
Events/sec: 17531.15
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 288 MB
|
||||
Avg Latency: 1.233875ms
|
||||
P90 Latency: 1.547505ms
|
||||
P95 Latency: 1.713051ms
|
||||
P99 Latency: 3.452631ms
|
||||
Bottom 10% Avg Latency: 707.713µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.891576755s
|
||||
Total Events: 50000
|
||||
Events/sec: 6335.87
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 282 MB
|
||||
Avg Latency: 1.239192ms
|
||||
P90 Latency: 1.634621ms
|
||||
P95 Latency: 1.880869ms
|
||||
P99 Latency: 3.451389ms
|
||||
Bottom 10% Avg Latency: 633.361µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.900476855s
|
||||
Total Events: 50000
|
||||
Events/sec: 2007.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 179 MB
|
||||
Avg Latency: 632.275µs
|
||||
P90 Latency: 820.942µs
|
||||
P95 Latency: 918.848µs
|
||||
P99 Latency: 1.159557ms
|
||||
Bottom 10% Avg Latency: 3.463779ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.004293618s
|
||||
Total Events: 410621
|
||||
Events/sec: 6843.19
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 183 MB
|
||||
Avg Latency: 2.208463ms
|
||||
P90 Latency: 6.696276ms
|
||||
P95 Latency: 8.264ms
|
||||
P99 Latency: 12.52398ms
|
||||
Bottom 10% Avg Latency: 9.093268ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.002949862s
|
||||
Total Events: 308780
|
||||
Events/sec: 5146.08
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 137 MB
|
||||
Avg Latency: 1.517721ms
|
||||
P90 Latency: 3.086927ms
|
||||
P95 Latency: 3.798681ms
|
||||
P99 Latency: 9.90181ms
|
||||
Bottom 10% Avg Latency: 5.471135ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-03T22:33:46+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764800626547123ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764800626547201ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764800626547221ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764800626547228ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764800626547238ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764800626547252ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764800626547257ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764800626547269ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764800626547275ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764800626547291ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764800626547296ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764800626547303ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764800626547308ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764800626547323ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764800626547329ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764800626547351ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764800626547357ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/03 22:23:46 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.779639238s
|
||||
Events/sec: 17987.95
|
||||
Avg latency: 1.16914ms
|
||||
P90 latency: 1.459324ms
|
||||
P95 latency: 1.605171ms
|
||||
P99 latency: 3.239809ms
|
||||
Bottom 10% Avg latency: 675.419µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 289.152592ms
|
||||
Burst completed: 5000 events in 284.512743ms
|
||||
Burst completed: 5000 events in 285.58317ms
|
||||
Burst completed: 5000 events in 283.486103ms
|
||||
Burst completed: 5000 events in 351.630471ms
|
||||
Burst completed: 5000 events in 410.422971ms
|
||||
Burst completed: 5000 events in 272.00462ms
|
||||
Burst completed: 5000 events in 258.981762ms
|
||||
Burst completed: 5000 events in 287.217917ms
|
||||
Burst completed: 5000 events in 274.519636ms
|
||||
Burst test completed: 50000 events in 8.004626821s, errors: 0
|
||||
Events/sec: 6246.39
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.365264731s
|
||||
Combined ops/sec: 2052.10
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 395932 queries in 1m0.004955045s
|
||||
Queries/sec: 6598.32
|
||||
Avg query latency: 2.330632ms
|
||||
P95 query latency: 8.751923ms
|
||||
P99 query latency: 13.223897ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 309032 operations (259032 queries, 50000 writes) in 1m0.003111764s
|
||||
Operations/sec: 5150.27
|
||||
Avg latency: 1.47962ms
|
||||
Avg query latency: 1.571532ms
|
||||
Avg write latency: 1.003456ms
|
||||
P95 latency: 3.833182ms
|
||||
P99 latency: 9.589651ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.779639238s
|
||||
Total Events: 50000
|
||||
Events/sec: 17987.95
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 217 MB
|
||||
Avg Latency: 1.16914ms
|
||||
P90 Latency: 1.459324ms
|
||||
P95 Latency: 1.605171ms
|
||||
P99 Latency: 3.239809ms
|
||||
Bottom 10% Avg Latency: 675.419µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 8.004626821s
|
||||
Total Events: 50000
|
||||
Events/sec: 6246.39
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 335 MB
|
||||
Avg Latency: 1.301155ms
|
||||
P90 Latency: 1.847122ms
|
||||
P95 Latency: 2.260728ms
|
||||
P99 Latency: 3.744669ms
|
||||
Bottom 10% Avg Latency: 634.48µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.365264731s
|
||||
Total Events: 50000
|
||||
Events/sec: 2052.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 167 MB
|
||||
Avg Latency: 386.252µs
|
||||
P90 Latency: 816.533µs
|
||||
P95 Latency: 911.513µs
|
||||
P99 Latency: 1.142853ms
|
||||
Bottom 10% Avg Latency: 1.03593ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.004955045s
|
||||
Total Events: 395932
|
||||
Events/sec: 6598.32
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 202 MB
|
||||
Avg Latency: 2.330632ms
|
||||
P90 Latency: 7.016915ms
|
||||
P95 Latency: 8.751923ms
|
||||
P99 Latency: 13.223897ms
|
||||
Bottom 10% Avg Latency: 9.659987ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003111764s
|
||||
Total Events: 309032
|
||||
Events/sec: 5150.27
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 170 MB
|
||||
Avg Latency: 1.47962ms
|
||||
P90 Latency: 3.082944ms
|
||||
P95 Latency: 3.833182ms
|
||||
P99 Latency: 9.589651ms
|
||||
Bottom 10% Avg Latency: 5.322657ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: next-orly-badger
|
||||
RELAY_URL: ws://next-orly-badger:8080
|
||||
TEST_TIMESTAMP: 2025-12-03T22:27:02+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-neo4j_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764800827923617ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764800827923722ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764800827923845ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764800827923903ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764800827923913ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764800827923932ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764800827923938ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764800827923950ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764800827923957ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764800827923975ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764800827923981ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764800827923992ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764800827923997ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764800827924024ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764800827924030ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764800827924051ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764800827924056ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/03 22:27:07 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.867458976s
|
||||
Events/sec: 17437.04
|
||||
Avg latency: 1.250926ms
|
||||
P90 latency: 1.579234ms
|
||||
P95 latency: 1.757814ms
|
||||
P99 latency: 3.529624ms
|
||||
Bottom 10% Avg latency: 735.971µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 325.40241ms
|
||||
Burst completed: 5000 events in 303.100725ms
|
||||
Burst completed: 5000 events in 322.600483ms
|
||||
Burst completed: 5000 events in 368.813118ms
|
||||
Burst completed: 5000 events in 344.272535ms
|
||||
Burst completed: 5000 events in 313.590737ms
|
||||
Burst completed: 5000 events in 285.903125ms
|
||||
Burst completed: 5000 events in 371.578395ms
|
||||
Burst completed: 5000 events in 304.71264ms
|
||||
Burst completed: 5000 events in 303.146753ms
|
||||
Burst test completed: 50000 events in 8.249715579s, errors: 0
|
||||
Events/sec: 6060.82
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.349183827s
|
||||
Combined ops/sec: 2053.46
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 401050 queries in 1m0.005784437s
|
||||
Queries/sec: 6683.52
|
||||
Avg query latency: 2.274359ms
|
||||
P95 query latency: 8.507568ms
|
||||
P99 query latency: 12.862634ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 306446 operations (256446 queries, 50000 writes) in 1m0.003089159s
|
||||
Operations/sec: 5107.17
|
||||
Avg latency: 1.529829ms
|
||||
Avg query latency: 1.613393ms
|
||||
Avg write latency: 1.101234ms
|
||||
P95 latency: 3.928746ms
|
||||
P99 latency: 10.421101ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.867458976s
|
||||
Total Events: 50000
|
||||
Events/sec: 17437.04
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 210 MB
|
||||
Avg Latency: 1.250926ms
|
||||
P90 Latency: 1.579234ms
|
||||
P95 Latency: 1.757814ms
|
||||
P99 Latency: 3.529624ms
|
||||
Bottom 10% Avg Latency: 735.971µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 8.249715579s
|
||||
Total Events: 50000
|
||||
Events/sec: 6060.82
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 207 MB
|
||||
Avg Latency: 1.493295ms
|
||||
P90 Latency: 2.04999ms
|
||||
P95 Latency: 2.404304ms
|
||||
P99 Latency: 4.303908ms
|
||||
Bottom 10% Avg Latency: 768.239µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.349183827s
|
||||
Total Events: 50000
|
||||
Events/sec: 2053.46
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 176 MB
|
||||
Avg Latency: 379.443µs
|
||||
P90 Latency: 799.27µs
|
||||
P95 Latency: 896.796µs
|
||||
P99 Latency: 1.119981ms
|
||||
Bottom 10% Avg Latency: 1.022564ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005784437s
|
||||
Total Events: 401050
|
||||
Events/sec: 6683.52
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 176 MB
|
||||
Avg Latency: 2.274359ms
|
||||
P90 Latency: 6.877657ms
|
||||
P95 Latency: 8.507568ms
|
||||
P99 Latency: 12.862634ms
|
||||
Bottom 10% Avg Latency: 9.365763ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003089159s
|
||||
Total Events: 306446
|
||||
Events/sec: 5107.17
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 218 MB
|
||||
Avg Latency: 1.529829ms
|
||||
P90 Latency: 3.147159ms
|
||||
P95 Latency: 3.928746ms
|
||||
P99 Latency: 10.421101ms
|
||||
Bottom 10% Avg Latency: 5.559413ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-neo4j_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-neo4j_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: next-orly-neo4j
|
||||
RELAY_URL: ws://next-orly-neo4j:8080
|
||||
TEST_TIMESTAMP: 2025-12-03T22:30:24+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_nostr-rs-relay_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764801836806913ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764801836806998ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764801836807017ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764801836807022ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764801836807029ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764801836807042ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764801836807046ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764801836807056ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764801836807061ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764801836807076ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764801836807081ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764801836807094ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764801836807124ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764801836807178ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764801836807185ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764801836807242ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764801836807283ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/03 22:43:56 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.911273959s
|
||||
Events/sec: 17174.61
|
||||
Avg latency: 1.281647ms
|
||||
P90 latency: 1.626819ms
|
||||
P95 latency: 1.836808ms
|
||||
P99 latency: 3.758588ms
|
||||
Bottom 10% Avg latency: 742.249µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 292.272251ms
|
||||
Burst completed: 5000 events in 287.701125ms
|
||||
Burst completed: 5000 events in 286.616429ms
|
||||
Burst completed: 5000 events in 293.830439ms
|
||||
Burst completed: 5000 events in 301.388252ms
|
||||
Burst completed: 5000 events in 287.188158ms
|
||||
Burst completed: 5000 events in 263.711266ms
|
||||
Burst completed: 5000 events in 318.616274ms
|
||||
Burst completed: 5000 events in 310.007309ms
|
||||
Burst completed: 5000 events in 273.375973ms
|
||||
Burst test completed: 50000 events in 7.922605603s, errors: 0
|
||||
Events/sec: 6311.06
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.449120282s
|
||||
Combined ops/sec: 2045.06
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 417600 queries in 1m0.004478253s
|
||||
Queries/sec: 6959.48
|
||||
Avg query latency: 2.151151ms
|
||||
P95 query latency: 8.027278ms
|
||||
P99 query latency: 12.154981ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 317282 operations (267282 queries, 50000 writes) in 1m0.002851777s
|
||||
Operations/sec: 5287.78
|
||||
Avg latency: 1.438019ms
|
||||
Avg query latency: 1.509815ms
|
||||
Avg write latency: 1.054223ms
|
||||
P95 latency: 3.604275ms
|
||||
P99 latency: 8.983541ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.911273959s
|
||||
Total Events: 50000
|
||||
Events/sec: 17174.61
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 127 MB
|
||||
Avg Latency: 1.281647ms
|
||||
P90 Latency: 1.626819ms
|
||||
P95 Latency: 1.836808ms
|
||||
P99 Latency: 3.758588ms
|
||||
Bottom 10% Avg Latency: 742.249µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.922605603s
|
||||
Total Events: 50000
|
||||
Events/sec: 6311.06
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 207 MB
|
||||
Avg Latency: 1.260479ms
|
||||
P90 Latency: 1.672696ms
|
||||
P95 Latency: 1.893887ms
|
||||
P99 Latency: 3.384445ms
|
||||
Bottom 10% Avg Latency: 627.786µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.449120282s
|
||||
Total Events: 50000
|
||||
Events/sec: 2045.06
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 160 MB
|
||||
Avg Latency: 393.867µs
|
||||
P90 Latency: 827.906µs
|
||||
P95 Latency: 922.647µs
|
||||
P99 Latency: 1.157515ms
|
||||
Bottom 10% Avg Latency: 1.038892ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.004478253s
|
||||
Total Events: 417600
|
||||
Events/sec: 6959.48
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 205 MB
|
||||
Avg Latency: 2.151151ms
|
||||
P90 Latency: 6.525088ms
|
||||
P95 Latency: 8.027278ms
|
||||
P99 Latency: 12.154981ms
|
||||
Bottom 10% Avg Latency: 8.836851ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.002851777s
|
||||
Total Events: 317282
|
||||
Events/sec: 5287.78
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 168 MB
|
||||
Avg Latency: 1.438019ms
|
||||
P90 Latency: 2.919631ms
|
||||
P95 Latency: 3.604275ms
|
||||
P99 Latency: 8.983541ms
|
||||
Bottom 10% Avg Latency: 5.142371ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: nostr-rs-relay
|
||||
RELAY_URL: ws://nostr-rs-relay:8080
|
||||
TEST_TIMESTAMP: 2025-12-03T22:47:13+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_relayer-basic_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764801433220018ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764801433220118ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764801433220142ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764801433220148ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764801433220157ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764801433220179ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764801433220185ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764801433220202ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764801433220208ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764801433220225ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764801433220231ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764801433220263ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764801433220299ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764801433220355ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764801433220361ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764801433220380ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764801433220384ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/03 22:37:13 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.81575396s
|
||||
Events/sec: 17757.23
|
||||
Avg latency: 1.193531ms
|
||||
P90 latency: 1.503923ms
|
||||
P95 latency: 1.679267ms
|
||||
P99 latency: 3.258063ms
|
||||
Bottom 10% Avg latency: 679.232µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 282.513648ms
|
||||
Burst completed: 5000 events in 298.930177ms
|
||||
Burst completed: 5000 events in 368.119471ms
|
||||
Burst completed: 5000 events in 344.305787ms
|
||||
Burst completed: 5000 events in 299.829461ms
|
||||
Burst completed: 5000 events in 328.253293ms
|
||||
Burst completed: 5000 events in 268.415756ms
|
||||
Burst completed: 5000 events in 258.778746ms
|
||||
Burst completed: 5000 events in 281.496082ms
|
||||
Burst completed: 5000 events in 291.526061ms
|
||||
Burst test completed: 50000 events in 8.029129461s, errors: 0
|
||||
Events/sec: 6227.33
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.368434728s
|
||||
Combined ops/sec: 2051.83
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 399766 queries in 1m0.004004537s
|
||||
Queries/sec: 6662.32
|
||||
Avg query latency: 2.299802ms
|
||||
P95 query latency: 8.583876ms
|
||||
P99 query latency: 12.879727ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 319713 operations (269713 queries, 50000 writes) in 1m0.004382807s
|
||||
Operations/sec: 5328.16
|
||||
Avg latency: 1.408648ms
|
||||
Avg query latency: 1.486258ms
|
||||
Avg write latency: 990µs
|
||||
P95 latency: 3.553437ms
|
||||
P99 latency: 8.491811ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.81575396s
|
||||
Total Events: 50000
|
||||
Events/sec: 17757.23
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 279 MB
|
||||
Avg Latency: 1.193531ms
|
||||
P90 Latency: 1.503923ms
|
||||
P95 Latency: 1.679267ms
|
||||
P99 Latency: 3.258063ms
|
||||
Bottom 10% Avg Latency: 679.232µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 8.029129461s
|
||||
Total Events: 50000
|
||||
Events/sec: 6227.33
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 314 MB
|
||||
Avg Latency: 1.304ms
|
||||
P90 Latency: 1.833241ms
|
||||
P95 Latency: 2.155365ms
|
||||
P99 Latency: 3.621225ms
|
||||
Bottom 10% Avg Latency: 577.319µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.368434728s
|
||||
Total Events: 50000
|
||||
Events/sec: 2051.83
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 302 MB
|
||||
Avg Latency: 393.704µs
|
||||
P90 Latency: 831.964µs
|
||||
P95 Latency: 930.632µs
|
||||
P99 Latency: 1.162684ms
|
||||
Bottom 10% Avg Latency: 1.038382ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.004004537s
|
||||
Total Events: 399766
|
||||
Events/sec: 6662.32
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 2.299802ms
|
||||
P90 Latency: 6.959824ms
|
||||
P95 Latency: 8.583876ms
|
||||
P99 Latency: 12.879727ms
|
||||
Bottom 10% Avg Latency: 9.428864ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.004382807s
|
||||
Total Events: 319713
|
||||
Events/sec: 5328.16
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 181 MB
|
||||
Avg Latency: 1.408648ms
|
||||
P90 Latency: 2.876838ms
|
||||
P95 Latency: 3.553437ms
|
||||
P99 Latency: 8.491811ms
|
||||
Bottom 10% Avg Latency: 4.993856ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: relayer-basic
|
||||
RELAY_URL: ws://relayer-basic:7447
|
||||
TEST_TIMESTAMP: 2025-12-03T22:40:30+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,202 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_rely-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764800424570824ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764800424571528ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764800424571758ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764800424571773ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764800424571799ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764800424571834ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764800424571844ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764800424571863ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764800424571878ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764800424571916ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764800424571927ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764800424571949ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764800424571958ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764800424571993ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764800424572004ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764800424572070ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764800424572082ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/03 22:20:24 INFO: Extracted embedded libsecp256k1 to /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
2025/12/03 22:20:24 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.995055353s
|
||||
Events/sec: 16694.18
|
||||
Avg latency: 1.339216ms
|
||||
P90 latency: 1.68929ms
|
||||
P95 latency: 1.894111ms
|
||||
P99 latency: 3.956722ms
|
||||
Bottom 10% Avg latency: 779.552µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 287.657346ms
|
||||
Burst completed: 5000 events in 297.188663ms
|
||||
Burst completed: 5000 events in 367.265309ms
|
||||
Burst completed: 5000 events in 331.9274ms
|
||||
Burst completed: 5000 events in 304.066462ms
|
||||
Burst completed: 5000 events in 310.832609ms
|
||||
Burst completed: 5000 events in 268.076751ms
|
||||
Burst completed: 5000 events in 264.808751ms
|
||||
Burst completed: 5000 events in 267.153131ms
|
||||
Burst completed: 5000 events in 269.523097ms
|
||||
Burst test completed: 50000 events in 7.973932498s, errors: 0
|
||||
Events/sec: 6270.43
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.337287687s
|
||||
Combined ops/sec: 2054.46
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 406493 queries in 1m0.003860956s
|
||||
Queries/sec: 6774.45
|
||||
Avg query latency: 2.248129ms
|
||||
P95 query latency: 8.401333ms
|
||||
P99 query latency: 12.724368ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 318750 operations (268750 queries, 50000 writes) in 1m0.003885697s
|
||||
Operations/sec: 5312.16
|
||||
Avg latency: 1.416974ms
|
||||
Avg query latency: 1.494262ms
|
||||
Avg write latency: 1.001551ms
|
||||
P95 latency: 3.592498ms
|
||||
P99 latency: 8.935176ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.995055353s
|
||||
Total Events: 50000
|
||||
Events/sec: 16694.18
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 168 MB
|
||||
Avg Latency: 1.339216ms
|
||||
P90 Latency: 1.68929ms
|
||||
P95 Latency: 1.894111ms
|
||||
P99 Latency: 3.956722ms
|
||||
Bottom 10% Avg Latency: 779.552µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.973932498s
|
||||
Total Events: 50000
|
||||
Events/sec: 6270.43
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 200 MB
|
||||
Avg Latency: 1.281976ms
|
||||
P90 Latency: 1.779141ms
|
||||
P95 Latency: 2.087148ms
|
||||
P99 Latency: 3.70878ms
|
||||
Bottom 10% Avg Latency: 616.517µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.337287687s
|
||||
Total Events: 50000
|
||||
Events/sec: 2054.46
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 167 MB
|
||||
Avg Latency: 393.526µs
|
||||
P90 Latency: 818.886µs
|
||||
P95 Latency: 910.529µs
|
||||
P99 Latency: 1.137331ms
|
||||
Bottom 10% Avg Latency: 1.057702ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.003860956s
|
||||
Total Events: 406493
|
||||
Events/sec: 6774.45
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 218 MB
|
||||
Avg Latency: 2.248129ms
|
||||
P90 Latency: 6.801515ms
|
||||
P95 Latency: 8.401333ms
|
||||
P99 Latency: 12.724368ms
|
||||
Bottom 10% Avg Latency: 9.254973ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003885697s
|
||||
Total Events: 318750
|
||||
Events/sec: 5312.16
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 145 MB
|
||||
Avg Latency: 1.416974ms
|
||||
P90 Latency: 2.911313ms
|
||||
P95 Latency: 3.592498ms
|
||||
P99 Latency: 8.935176ms
|
||||
Bottom 10% Avg Latency: 5.052685ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: rely-sqlite
|
||||
RELAY_URL: ws://rely-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-03T22:23:41+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
201
cmd/benchmark/reports/run_20251203_222024/strfry_results.txt
Normal file
201
cmd/benchmark/reports/run_20251203_222024/strfry_results.txt
Normal file
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_strfry_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764801635484762ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764801635484837ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764801635484863ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764801635484869ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764801635484879ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764801635484894ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764801635484899ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764801635484909ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764801635484915ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764801635484931ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764801635484936ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764801635484945ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764801635484951ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764801635484969ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764801635484974ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764801635484993ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764801635485001ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/03 22:40:35 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.809857459s
|
||||
Events/sec: 17794.50
|
||||
Avg latency: 1.192717ms
|
||||
P90 latency: 1.483896ms
|
||||
P95 latency: 1.645564ms
|
||||
P99 latency: 3.557014ms
|
||||
Bottom 10% Avg latency: 676.594µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 277.530017ms
|
||||
Burst completed: 5000 events in 298.041629ms
|
||||
Burst completed: 5000 events in 282.179602ms
|
||||
Burst completed: 5000 events in 291.10499ms
|
||||
Burst completed: 5000 events in 337.732105ms
|
||||
Burst completed: 5000 events in 419.28426ms
|
||||
Burst completed: 5000 events in 273.162241ms
|
||||
Burst completed: 5000 events in 266.443777ms
|
||||
Burst completed: 5000 events in 276.847799ms
|
||||
Burst completed: 5000 events in 268.986549ms
|
||||
Burst test completed: 50000 events in 7.997424399s, errors: 0
|
||||
Events/sec: 6252.01
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.336991819s
|
||||
Combined ops/sec: 2054.49
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 405590 queries in 1m0.005592222s
|
||||
Queries/sec: 6759.20
|
||||
Avg query latency: 2.253836ms
|
||||
P95 query latency: 8.385881ms
|
||||
P99 query latency: 12.734892ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 318685 operations (268685 queries, 50000 writes) in 1m0.004113189s
|
||||
Operations/sec: 5311.05
|
||||
Avg latency: 1.408868ms
|
||||
Avg query latency: 1.488688ms
|
||||
Avg write latency: 979.939µs
|
||||
P95 latency: 3.553949ms
|
||||
P99 latency: 8.372585ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.809857459s
|
||||
Total Events: 50000
|
||||
Events/sec: 17794.50
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 153 MB
|
||||
Avg Latency: 1.192717ms
|
||||
P90 Latency: 1.483896ms
|
||||
P95 Latency: 1.645564ms
|
||||
P99 Latency: 3.557014ms
|
||||
Bottom 10% Avg Latency: 676.594µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.997424399s
|
||||
Total Events: 50000
|
||||
Events/sec: 6252.01
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 285 MB
|
||||
Avg Latency: 1.270724ms
|
||||
P90 Latency: 1.812071ms
|
||||
P95 Latency: 2.251457ms
|
||||
P99 Latency: 3.733049ms
|
||||
Bottom 10% Avg Latency: 565.205µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.336991819s
|
||||
Total Events: 50000
|
||||
Events/sec: 2054.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 166 MB
|
||||
Avg Latency: 389.259µs
|
||||
P90 Latency: 819.049µs
|
||||
P95 Latency: 915.623µs
|
||||
P99 Latency: 1.128529ms
|
||||
Bottom 10% Avg Latency: 1.043578ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005592222s
|
||||
Total Events: 405590
|
||||
Events/sec: 6759.20
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 211 MB
|
||||
Avg Latency: 2.253836ms
|
||||
P90 Latency: 6.794068ms
|
||||
P95 Latency: 8.385881ms
|
||||
P99 Latency: 12.734892ms
|
||||
Bottom 10% Avg Latency: 9.272721ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.004113189s
|
||||
Total Events: 318685
|
||||
Events/sec: 5311.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 234 MB
|
||||
Avg Latency: 1.408868ms
|
||||
P90 Latency: 2.883582ms
|
||||
P95 Latency: 3.553949ms
|
||||
P99 Latency: 8.372585ms
|
||||
Bottom 10% Avg Latency: 4.976512ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: strfry
|
||||
RELAY_URL: ws://strfry:8080
|
||||
TEST_TIMESTAMP: 2025-12-03T22:43:51+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,74 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764840830987179ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764840830987255ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764840830987278ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764840830987283ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764840830987292ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764840830987305ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764840830987310ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764840830987336ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764840830987364ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764840830987412ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764840830987419ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764840830987429ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764840830987435ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764840830987452ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764840830987458ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764840830987473ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764840830987479ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 09:33:50 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.213866224s
|
||||
Events/sec: 15557.59
|
||||
Avg latency: 1.456848ms
|
||||
P90 latency: 1.953553ms
|
||||
P95 latency: 2.322455ms
|
||||
P99 latency: 4.316566ms
|
||||
Bottom 10% Avg latency: 793.956µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 336.223018ms
|
||||
Burst completed: 5000 events in 314.023603ms
|
||||
Burst completed: 5000 events in 296.961158ms
|
||||
Burst completed: 5000 events in 313.470891ms
|
||||
Burst completed: 5000 events in 312.977339ms
|
||||
Burst completed: 5000 events in 304.290846ms
|
||||
Burst completed: 5000 events in 279.718158ms
|
||||
Burst completed: 5000 events in 351.360773ms
|
||||
Burst completed: 5000 events in 413.446584ms
|
||||
Burst completed: 5000 events in 412.074279ms
|
||||
Burst test completed: 50000 events in 8.341599033s, errors: 0
|
||||
Events/sec: 5994.05
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.442820936s
|
||||
Combined ops/sec: 2045.59
|
||||
Wiping database between tests...
|
||||
@@ -0,0 +1,8 @@
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T09:33:45+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764840226432341ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764840226432976ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764840226433077ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764840226433085ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764840226433100ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764840226433117ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764840226433122ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764840226433129ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764840226433135ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764840226433150ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764840226433155ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764840226433164ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764840226433169ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764840226433182ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764840226433186ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764840226433202ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764840226433206ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 09:23:46 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.949326718s
|
||||
Events/sec: 16953.02
|
||||
Avg latency: 1.296368ms
|
||||
P90 latency: 1.675853ms
|
||||
P95 latency: 1.934996ms
|
||||
P99 latency: 3.691782ms
|
||||
Bottom 10% Avg latency: 738.489µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 292.737912ms
|
||||
Burst completed: 5000 events in 295.756562ms
|
||||
Burst completed: 5000 events in 280.376675ms
|
||||
Burst completed: 5000 events in 283.027074ms
|
||||
Burst completed: 5000 events in 292.213914ms
|
||||
Burst completed: 5000 events in 292.804158ms
|
||||
Burst completed: 5000 events in 265.332637ms
|
||||
Burst completed: 5000 events in 262.359574ms
|
||||
Burst completed: 5000 events in 271.801669ms
|
||||
Burst completed: 5000 events in 270.594731ms
|
||||
Burst test completed: 50000 events in 7.813073176s, errors: 0
|
||||
Events/sec: 6399.53
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.337354042s
|
||||
Combined ops/sec: 2054.46
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 418759 queries in 1m0.009274332s
|
||||
Queries/sec: 6978.24
|
||||
Avg query latency: 2.156012ms
|
||||
P95 query latency: 8.060424ms
|
||||
P99 query latency: 12.213045ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 313770 operations (263770 queries, 50000 writes) in 1m0.003742319s
|
||||
Operations/sec: 5229.17
|
||||
Avg latency: 1.452263ms
|
||||
Avg query latency: 1.541956ms
|
||||
Avg write latency: 979.094µs
|
||||
P95 latency: 3.734524ms
|
||||
P99 latency: 9.585308ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.949326718s
|
||||
Total Events: 50000
|
||||
Events/sec: 16953.02
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 286 MB
|
||||
Avg Latency: 1.296368ms
|
||||
P90 Latency: 1.675853ms
|
||||
P95 Latency: 1.934996ms
|
||||
P99 Latency: 3.691782ms
|
||||
Bottom 10% Avg Latency: 738.489µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.813073176s
|
||||
Total Events: 50000
|
||||
Events/sec: 6399.53
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 275 MB
|
||||
Avg Latency: 1.179921ms
|
||||
P90 Latency: 1.527861ms
|
||||
P95 Latency: 1.722912ms
|
||||
P99 Latency: 3.6275ms
|
||||
Bottom 10% Avg Latency: 587.766µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.337354042s
|
||||
Total Events: 50000
|
||||
Events/sec: 2054.46
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 185 MB
|
||||
Avg Latency: 387.847µs
|
||||
P90 Latency: 809.663µs
|
||||
P95 Latency: 905.205µs
|
||||
P99 Latency: 1.133569ms
|
||||
Bottom 10% Avg Latency: 1.057923ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.009274332s
|
||||
Total Events: 418759
|
||||
Events/sec: 6978.24
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 199 MB
|
||||
Avg Latency: 2.156012ms
|
||||
P90 Latency: 6.536561ms
|
||||
P95 Latency: 8.060424ms
|
||||
P99 Latency: 12.213045ms
|
||||
Bottom 10% Avg Latency: 8.880182ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003742319s
|
||||
Total Events: 313770
|
||||
Events/sec: 5229.17
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 177 MB
|
||||
Avg Latency: 1.452263ms
|
||||
P90 Latency: 3.028419ms
|
||||
P95 Latency: 3.734524ms
|
||||
P99 Latency: 9.585308ms
|
||||
Bottom 10% Avg Latency: 5.204062ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: next-orly-badger
|
||||
RELAY_URL: ws://next-orly-badger:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T09:27:02+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,202 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_rely-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764840025108837ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764840025108932ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764840025108958ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764840025108965ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764840025108976ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764840025108998ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764840025109005ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764840025109017ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764840025109023ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764840025109041ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764840025109047ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764840025109059ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764840025109087ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764840025109131ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764840025109138ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764840025109161ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764840025109166ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 09:20:25 INFO: Extracted embedded libsecp256k1 to /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
2025/12/04 09:20:25 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 2.916633222s
|
||||
Events/sec: 17143.05
|
||||
Avg latency: 1.278819ms
|
||||
P90 latency: 1.645294ms
|
||||
P95 latency: 1.861406ms
|
||||
P99 latency: 3.124622ms
|
||||
Bottom 10% Avg latency: 729.231µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 299.940949ms
|
||||
Burst completed: 5000 events in 320.651151ms
|
||||
Burst completed: 5000 events in 285.455745ms
|
||||
Burst completed: 5000 events in 309.502203ms
|
||||
Burst completed: 5000 events in 298.703461ms
|
||||
Burst completed: 5000 events in 298.785067ms
|
||||
Burst completed: 5000 events in 272.364406ms
|
||||
Burst completed: 5000 events in 264.606838ms
|
||||
Burst completed: 5000 events in 315.333631ms
|
||||
Burst completed: 5000 events in 290.913401ms
|
||||
Burst test completed: 50000 events in 7.960160876s, errors: 0
|
||||
Events/sec: 6281.28
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.295679369s
|
||||
Combined ops/sec: 2057.98
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 409908 queries in 1m0.005235789s
|
||||
Queries/sec: 6831.20
|
||||
Avg query latency: 2.219665ms
|
||||
P95 query latency: 8.253853ms
|
||||
P99 query latency: 12.450497ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 312694 operations (262694 queries, 50000 writes) in 1m0.003601943s
|
||||
Operations/sec: 5211.25
|
||||
Avg latency: 1.479337ms
|
||||
Avg query latency: 1.552934ms
|
||||
Avg write latency: 1.092669ms
|
||||
P95 latency: 3.715568ms
|
||||
P99 latency: 9.865884ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 2.916633222s
|
||||
Total Events: 50000
|
||||
Events/sec: 17143.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 199 MB
|
||||
Avg Latency: 1.278819ms
|
||||
P90 Latency: 1.645294ms
|
||||
P95 Latency: 1.861406ms
|
||||
P99 Latency: 3.124622ms
|
||||
Bottom 10% Avg Latency: 729.231µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 7.960160876s
|
||||
Total Events: 50000
|
||||
Events/sec: 6281.28
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 172 MB
|
||||
Avg Latency: 1.284949ms
|
||||
P90 Latency: 1.745856ms
|
||||
P95 Latency: 2.012483ms
|
||||
P99 Latency: 3.414064ms
|
||||
Bottom 10% Avg Latency: 603.349µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.295679369s
|
||||
Total Events: 50000
|
||||
Events/sec: 2057.98
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 188 MB
|
||||
Avg Latency: 386.608µs
|
||||
P90 Latency: 813.46µs
|
||||
P95 Latency: 908.982µs
|
||||
P99 Latency: 1.125173ms
|
||||
Bottom 10% Avg Latency: 1.033435ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005235789s
|
||||
Total Events: 409908
|
||||
Events/sec: 6831.20
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 203 MB
|
||||
Avg Latency: 2.219665ms
|
||||
P90 Latency: 6.727054ms
|
||||
P95 Latency: 8.253853ms
|
||||
P99 Latency: 12.450497ms
|
||||
Bottom 10% Avg Latency: 9.092639ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003601943s
|
||||
Total Events: 312694
|
||||
Events/sec: 5211.25
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 134 MB
|
||||
Avg Latency: 1.479337ms
|
||||
P90 Latency: 2.996278ms
|
||||
P95 Latency: 3.715568ms
|
||||
P99 Latency: 9.865884ms
|
||||
Bottom 10% Avg Latency: 5.322579ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: rely-sqlite
|
||||
RELAY_URL: ws://rely-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T09:23:41+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_khatru-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845904475025ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845904475112ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845904475134ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845904475139ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845904475152ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845904475166ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845904475171ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845904475182ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845904475187ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845904475202ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845904475207ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845904475213ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845904475218ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845904475233ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845904475238ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845904475247ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845904475252ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:58:24 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.536980771s
|
||||
Events/sec: 11020.54
|
||||
Avg latency: 2.141467ms
|
||||
P90 latency: 3.415814ms
|
||||
P95 latency: 4.218151ms
|
||||
P99 latency: 6.573395ms
|
||||
Bottom 10% Avg latency: 965.163µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 562.534206ms
|
||||
Burst completed: 5000 events in 495.672511ms
|
||||
Burst completed: 5000 events in 403.9333ms
|
||||
Burst completed: 5000 events in 406.633831ms
|
||||
Burst completed: 5000 events in 497.747932ms
|
||||
Burst completed: 5000 events in 375.06022ms
|
||||
Burst completed: 5000 events in 357.935146ms
|
||||
Burst completed: 5000 events in 354.7018ms
|
||||
Burst completed: 5000 events in 363.034284ms
|
||||
Burst completed: 5000 events in 369.648798ms
|
||||
Burst test completed: 50000 events in 9.192909424s, errors: 0
|
||||
Events/sec: 5438.97
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.759007602s
|
||||
Combined ops/sec: 2019.47
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 279947 queries in 1m0.0101769s
|
||||
Queries/sec: 4664.99
|
||||
Avg query latency: 3.577317ms
|
||||
P95 query latency: 13.542975ms
|
||||
P99 query latency: 20.687227ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 236582 operations (186582 queries, 50000 writes) in 1m0.004658961s
|
||||
Operations/sec: 3942.73
|
||||
Avg latency: 2.272206ms
|
||||
Avg query latency: 2.486915ms
|
||||
Avg write latency: 1.470991ms
|
||||
P95 latency: 6.629071ms
|
||||
P99 latency: 17.102632ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.536980771s
|
||||
Total Events: 50000
|
||||
Events/sec: 11020.54
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 233 MB
|
||||
Avg Latency: 2.141467ms
|
||||
P90 Latency: 3.415814ms
|
||||
P95 Latency: 4.218151ms
|
||||
P99 Latency: 6.573395ms
|
||||
Bottom 10% Avg Latency: 965.163µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.192909424s
|
||||
Total Events: 50000
|
||||
Events/sec: 5438.97
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 235 MB
|
||||
Avg Latency: 1.990208ms
|
||||
P90 Latency: 3.107457ms
|
||||
P95 Latency: 3.856432ms
|
||||
P99 Latency: 6.336835ms
|
||||
Bottom 10% Avg Latency: 900.221µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.759007602s
|
||||
Total Events: 50000
|
||||
Events/sec: 2019.47
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 174 MB
|
||||
Avg Latency: 450.921µs
|
||||
P90 Latency: 937.184µs
|
||||
P95 Latency: 1.10841ms
|
||||
P99 Latency: 1.666212ms
|
||||
Bottom 10% Avg Latency: 1.296193ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.0101769s
|
||||
Total Events: 279947
|
||||
Events/sec: 4664.99
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 142 MB
|
||||
Avg Latency: 3.577317ms
|
||||
P90 Latency: 10.560196ms
|
||||
P95 Latency: 13.542975ms
|
||||
P99 Latency: 20.687227ms
|
||||
Bottom 10% Avg Latency: 14.957911ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.004658961s
|
||||
Total Events: 236582
|
||||
Events/sec: 3942.73
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 161 MB
|
||||
Avg Latency: 2.272206ms
|
||||
P90 Latency: 4.975152ms
|
||||
P95 Latency: 6.629071ms
|
||||
P99 Latency: 17.102632ms
|
||||
Bottom 10% Avg Latency: 8.89611ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-badger_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: khatru-badger
|
||||
RELAY_URL: ws://khatru-badger:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T11:01:44+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_khatru-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845699509026ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845699509106ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845699509128ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845699509133ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845699509146ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845699509159ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845699509164ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845699509172ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845699509178ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845699509192ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845699509197ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845699509206ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845699509211ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845699509224ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845699509228ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845699509238ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845699509242ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:54:59 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.109596583s
|
||||
Events/sec: 12166.64
|
||||
Avg latency: 1.93573ms
|
||||
P90 latency: 2.871977ms
|
||||
P95 latency: 3.44036ms
|
||||
P99 latency: 5.475515ms
|
||||
Bottom 10% Avg latency: 961.636µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 515.356224ms
|
||||
Burst completed: 5000 events in 399.9581ms
|
||||
Burst completed: 5000 events in 459.416277ms
|
||||
Burst completed: 5000 events in 428.20652ms
|
||||
Burst completed: 5000 events in 747.547021ms
|
||||
Burst completed: 5000 events in 647.984214ms
|
||||
Burst completed: 5000 events in 488.90592ms
|
||||
Burst completed: 5000 events in 377.505306ms
|
||||
Burst completed: 5000 events in 465.109125ms
|
||||
Burst completed: 5000 events in 429.364917ms
|
||||
Burst test completed: 50000 events in 9.965909051s, errors: 0
|
||||
Events/sec: 5017.10
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.612452482s
|
||||
Combined ops/sec: 2031.49
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 302291 queries in 1m0.005394665s
|
||||
Queries/sec: 5037.73
|
||||
Avg query latency: 3.277291ms
|
||||
P95 query latency: 12.307232ms
|
||||
P99 query latency: 18.488169ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 243436 operations (193436 queries, 50000 writes) in 1m0.00468811s
|
||||
Operations/sec: 4056.95
|
||||
Avg latency: 2.220391ms
|
||||
Avg query latency: 2.393422ms
|
||||
Avg write latency: 1.550983ms
|
||||
P95 latency: 6.295105ms
|
||||
P99 latency: 16.788623ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.109596583s
|
||||
Total Events: 50000
|
||||
Events/sec: 12166.64
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 243 MB
|
||||
Avg Latency: 1.93573ms
|
||||
P90 Latency: 2.871977ms
|
||||
P95 Latency: 3.44036ms
|
||||
P99 Latency: 5.475515ms
|
||||
Bottom 10% Avg Latency: 961.636µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.965909051s
|
||||
Total Events: 50000
|
||||
Events/sec: 5017.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 257 MB
|
||||
Avg Latency: 2.375602ms
|
||||
P90 Latency: 3.854368ms
|
||||
P95 Latency: 5.019226ms
|
||||
P99 Latency: 8.287248ms
|
||||
Bottom 10% Avg Latency: 1.013228ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.612452482s
|
||||
Total Events: 50000
|
||||
Events/sec: 2031.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 190 MB
|
||||
Avg Latency: 432.265µs
|
||||
P90 Latency: 913.499µs
|
||||
P95 Latency: 1.051763ms
|
||||
P99 Latency: 1.395767ms
|
||||
Bottom 10% Avg Latency: 1.160261ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005394665s
|
||||
Total Events: 302291
|
||||
Events/sec: 5037.73
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 234 MB
|
||||
Avg Latency: 3.277291ms
|
||||
P90 Latency: 9.787032ms
|
||||
P95 Latency: 12.307232ms
|
||||
P99 Latency: 18.488169ms
|
||||
Bottom 10% Avg Latency: 13.509646ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.00468811s
|
||||
Total Events: 243436
|
||||
Events/sec: 4056.95
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 148 MB
|
||||
Avg Latency: 2.220391ms
|
||||
P90 Latency: 4.746928ms
|
||||
P95 Latency: 6.295105ms
|
||||
P99 Latency: 16.788623ms
|
||||
Bottom 10% Avg Latency: 8.681502ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_khatru-sqlite_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: khatru-sqlite
|
||||
RELAY_URL: ws://khatru-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T10:58:19+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,43 @@
|
||||
Starting Network Graph Traversal Benchmark
|
||||
Relay URL: ws://next-orly-badger:8080
|
||||
Workers: 24
|
||||
Pubkeys: 100000, Follows per pubkey: 1-1000
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ NETWORK GRAPH TRAVERSAL BENCHMARK (100k Pubkeys) ║
|
||||
║ Relay: ws://next-orly-badger:8080 ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
Generating 100000 deterministic pubkeys...
|
||||
2025/12/04 13:19:05 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Generated 10000/100000 pubkeys...
|
||||
Generated 20000/100000 pubkeys...
|
||||
Generated 30000/100000 pubkeys...
|
||||
Generated 40000/100000 pubkeys...
|
||||
Generated 50000/100000 pubkeys...
|
||||
Generated 60000/100000 pubkeys...
|
||||
Generated 70000/100000 pubkeys...
|
||||
Generated 80000/100000 pubkeys...
|
||||
Generated 90000/100000 pubkeys...
|
||||
Generated 100000/100000 pubkeys...
|
||||
Generated 100000 pubkeys in 2.473794335s
|
||||
Generating follow graph (1-1000 follows per pubkey)...
|
||||
Generated follow lists for 10000/100000 pubkeys...
|
||||
Generated follow lists for 20000/100000 pubkeys...
|
||||
Generated follow lists for 30000/100000 pubkeys...
|
||||
Generated follow lists for 40000/100000 pubkeys...
|
||||
Generated follow lists for 50000/100000 pubkeys...
|
||||
Generated follow lists for 60000/100000 pubkeys...
|
||||
Generated follow lists for 70000/100000 pubkeys...
|
||||
Generated follow lists for 80000/100000 pubkeys...
|
||||
Generated follow lists for 90000/100000 pubkeys...
|
||||
Generated follow lists for 100000/100000 pubkeys...
|
||||
Generated follow graph in 4.361425602s (avg 500.5 follows/pubkey, total 50048088 follows)
|
||||
|
||||
Connecting to relay: ws://next-orly-badger:8080
|
||||
Connected successfully!
|
||||
Creating follow list events via WebSocket...
|
||||
Queued 10000/100000 follow list events...
|
||||
Queued 20000/100000 follow list events...
|
||||
Queued 30000/100000 follow list events...
|
||||
1764854401568817🚨 NOTICE from ws://next-orly-badger:8080: 'EVENT processing failed' /go/pkg/mod/git.mleku.dev/mleku/nostr@v1.0.8/ws/client.go:326
|
||||
1764854402773843🚨 failed to write message: %!w(*net.OpError=&{write tcp 0xc0001b0f30 0xc0001b0f60 {}}) /go/pkg/mod/git.mleku.dev/mleku/nostr@v1.0.8/ws/connection.go:63
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-badger_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845290757888ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845290758084ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845290758119ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845290758124ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845290758135ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845290758150ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845290758155ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845290758167ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845290758173ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845290758190ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845290758195ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845290758204ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845290758210ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845290758224ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845290758229ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845290758241ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845290758247ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:48:10 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.113585513s
|
||||
Events/sec: 12154.85
|
||||
Avg latency: 1.935424ms
|
||||
P90 latency: 2.908617ms
|
||||
P95 latency: 3.52541ms
|
||||
P99 latency: 5.586614ms
|
||||
Bottom 10% Avg latency: 943.568µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 384.404827ms
|
||||
Burst completed: 5000 events in 366.066982ms
|
||||
Burst completed: 5000 events in 413.972961ms
|
||||
Burst completed: 5000 events in 540.992935ms
|
||||
Burst completed: 5000 events in 444.488278ms
|
||||
Burst completed: 5000 events in 342.979185ms
|
||||
Burst completed: 5000 events in 393.451489ms
|
||||
Burst completed: 5000 events in 530.328367ms
|
||||
Burst completed: 5000 events in 483.78923ms
|
||||
Burst completed: 5000 events in 356.248835ms
|
||||
Burst test completed: 50000 events in 9.263453685s, errors: 0
|
||||
Events/sec: 5397.55
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.809227197s
|
||||
Combined ops/sec: 2015.38
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 256384 queries in 1m0.005966351s
|
||||
Queries/sec: 4272.64
|
||||
Avg query latency: 3.92418ms
|
||||
P95 query latency: 14.841512ms
|
||||
P99 query latency: 22.768552ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 220975 operations (170975 queries, 50000 writes) in 1m0.003529193s
|
||||
Operations/sec: 3682.70
|
||||
Avg latency: 2.572587ms
|
||||
Avg query latency: 2.803798ms
|
||||
Avg write latency: 1.781959ms
|
||||
P95 latency: 7.618974ms
|
||||
P99 latency: 19.690393ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.113585513s
|
||||
Total Events: 50000
|
||||
Events/sec: 12154.85
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 127 MB
|
||||
Avg Latency: 1.935424ms
|
||||
P90 Latency: 2.908617ms
|
||||
P95 Latency: 3.52541ms
|
||||
P99 Latency: 5.586614ms
|
||||
Bottom 10% Avg Latency: 943.568µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.263453685s
|
||||
Total Events: 50000
|
||||
Events/sec: 5397.55
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 231 MB
|
||||
Avg Latency: 2.034536ms
|
||||
P90 Latency: 3.126682ms
|
||||
P95 Latency: 3.863975ms
|
||||
P99 Latency: 6.098539ms
|
||||
Bottom 10% Avg Latency: 935.662µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.809227197s
|
||||
Total Events: 50000
|
||||
Events/sec: 2015.38
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 184 MB
|
||||
Avg Latency: 438.529µs
|
||||
P90 Latency: 917.747µs
|
||||
P95 Latency: 1.086949ms
|
||||
P99 Latency: 1.523991ms
|
||||
Bottom 10% Avg Latency: 1.218802ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.005966351s
|
||||
Total Events: 256384
|
||||
Events/sec: 4272.64
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 151 MB
|
||||
Avg Latency: 3.92418ms
|
||||
P90 Latency: 11.560176ms
|
||||
P95 Latency: 14.841512ms
|
||||
P99 Latency: 22.768552ms
|
||||
Bottom 10% Avg Latency: 16.422096ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003529193s
|
||||
Total Events: 220975
|
||||
Events/sec: 3682.70
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 207 MB
|
||||
Avg Latency: 2.572587ms
|
||||
P90 Latency: 5.5629ms
|
||||
P95 Latency: 7.618974ms
|
||||
P99 Latency: 19.690393ms
|
||||
Bottom 10% Avg Latency: 10.306482ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-badger_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: next-orly-badger
|
||||
RELAY_URL: ws://next-orly-badger:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T10:51:30+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_next-orly-neo4j_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845495230040ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845495230118ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845495230154ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845495230159ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845495230168ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845495230182ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845495230187ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845495230198ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845495230204ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845495230219ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845495230224ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845495230232ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845495230237ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845495230250ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845495230255ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845495230265ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845495230269ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:51:35 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.737037757s
|
||||
Events/sec: 13379.58
|
||||
Avg latency: 1.744659ms
|
||||
P90 latency: 2.47401ms
|
||||
P95 latency: 2.895953ms
|
||||
P99 latency: 4.909556ms
|
||||
Bottom 10% Avg latency: 897.762µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 421.882059ms
|
||||
Burst completed: 5000 events in 412.531799ms
|
||||
Burst completed: 5000 events in 429.098267ms
|
||||
Burst completed: 5000 events in 390.670143ms
|
||||
Burst completed: 5000 events in 438.603112ms
|
||||
Burst completed: 5000 events in 366.944086ms
|
||||
Burst completed: 5000 events in 534.455064ms
|
||||
Burst completed: 5000 events in 559.621403ms
|
||||
Burst completed: 5000 events in 393.427363ms
|
||||
Burst completed: 5000 events in 371.875354ms
|
||||
Burst test completed: 50000 events in 9.324705477s, errors: 0
|
||||
Events/sec: 5362.10
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.924958418s
|
||||
Combined ops/sec: 2006.02
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 244167 queries in 1m0.008740456s
|
||||
Queries/sec: 4068.86
|
||||
Avg query latency: 4.157543ms
|
||||
P95 query latency: 15.724716ms
|
||||
P99 query latency: 24.284362ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 227664 operations (177664 queries, 50000 writes) in 1m0.005538199s
|
||||
Operations/sec: 3794.05
|
||||
Avg latency: 2.523997ms
|
||||
Avg query latency: 2.668863ms
|
||||
Avg write latency: 2.009247ms
|
||||
P95 latency: 7.235855ms
|
||||
P99 latency: 20.657306ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 3.737037757s
|
||||
Total Events: 50000
|
||||
Events/sec: 13379.58
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 217 MB
|
||||
Avg Latency: 1.744659ms
|
||||
P90 Latency: 2.47401ms
|
||||
P95 Latency: 2.895953ms
|
||||
P99 Latency: 4.909556ms
|
||||
Bottom 10% Avg Latency: 897.762µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.324705477s
|
||||
Total Events: 50000
|
||||
Events/sec: 5362.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 304 MB
|
||||
Avg Latency: 2.063122ms
|
||||
P90 Latency: 3.130188ms
|
||||
P95 Latency: 3.8975ms
|
||||
P99 Latency: 6.378352ms
|
||||
Bottom 10% Avg Latency: 954.959µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.924958418s
|
||||
Total Events: 50000
|
||||
Events/sec: 2006.02
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 272 MB
|
||||
Avg Latency: 475.177µs
|
||||
P90 Latency: 996.497µs
|
||||
P95 Latency: 1.205595ms
|
||||
P99 Latency: 1.873106ms
|
||||
Bottom 10% Avg Latency: 1.414397ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.008740456s
|
||||
Total Events: 244167
|
||||
Events/sec: 4068.86
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 148 MB
|
||||
Avg Latency: 4.157543ms
|
||||
P90 Latency: 12.228439ms
|
||||
P95 Latency: 15.724716ms
|
||||
P99 Latency: 24.284362ms
|
||||
Bottom 10% Avg Latency: 17.427943ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.005538199s
|
||||
Total Events: 227664
|
||||
Events/sec: 3794.05
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 211 MB
|
||||
Avg Latency: 2.523997ms
|
||||
P90 Latency: 5.269722ms
|
||||
P95 Latency: 7.235855ms
|
||||
P99 Latency: 20.657306ms
|
||||
Bottom 10% Avg Latency: 10.288906ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_next-orly-neo4j_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_next-orly-neo4j_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: next-orly-neo4j
|
||||
RELAY_URL: ws://next-orly-neo4j:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T10:54:54+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_nostr-rs-relay_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764846517510492ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764846517510692ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764846517511210ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764846517511251ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764846517511274ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764846517511304ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764846517511317ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764846517511329ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764846517511340ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764846517511366ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764846517511373ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764846517511388ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764846517511394ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764846517511443ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764846517511452ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764846517511466ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764846517511472ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 11:08:37 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.118969633s
|
||||
Events/sec: 12138.96
|
||||
Avg latency: 1.937994ms
|
||||
P90 latency: 2.852802ms
|
||||
P95 latency: 3.444328ms
|
||||
P99 latency: 5.727836ms
|
||||
Bottom 10% Avg latency: 946.456µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 403.020917ms
|
||||
Burst completed: 5000 events in 372.371612ms
|
||||
Burst completed: 5000 events in 424.238707ms
|
||||
Burst completed: 5000 events in 385.317421ms
|
||||
Burst completed: 5000 events in 516.841571ms
|
||||
Burst completed: 5000 events in 591.703187ms
|
||||
Burst completed: 5000 events in 445.314485ms
|
||||
Burst completed: 5000 events in 374.011153ms
|
||||
Burst completed: 5000 events in 398.6942ms
|
||||
Burst completed: 5000 events in 365.965806ms
|
||||
Burst test completed: 50000 events in 9.28457886s, errors: 0
|
||||
Events/sec: 5385.27
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.684808581s
|
||||
Combined ops/sec: 2025.54
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 251672 queries in 1m0.006178379s
|
||||
Queries/sec: 4194.10
|
||||
Avg query latency: 4.01666ms
|
||||
P95 query latency: 15.051188ms
|
||||
P99 query latency: 22.451758ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 219001 operations (169001 queries, 50000 writes) in 1m0.004144652s
|
||||
Operations/sec: 3649.76
|
||||
Avg latency: 2.620549ms
|
||||
Avg query latency: 2.844617ms
|
||||
Avg write latency: 1.863195ms
|
||||
P95 latency: 7.482377ms
|
||||
P99 latency: 20.396275ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.118969633s
|
||||
Total Events: 50000
|
||||
Events/sec: 12138.96
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 150 MB
|
||||
Avg Latency: 1.937994ms
|
||||
P90 Latency: 2.852802ms
|
||||
P95 Latency: 3.444328ms
|
||||
P99 Latency: 5.727836ms
|
||||
Bottom 10% Avg Latency: 946.456µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.28457886s
|
||||
Total Events: 50000
|
||||
Events/sec: 5385.27
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 259 MB
|
||||
Avg Latency: 2.040218ms
|
||||
P90 Latency: 3.113648ms
|
||||
P95 Latency: 3.901749ms
|
||||
P99 Latency: 6.623842ms
|
||||
Bottom 10% Avg Latency: 930.455µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.684808581s
|
||||
Total Events: 50000
|
||||
Events/sec: 2025.54
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 170 MB
|
||||
Avg Latency: 435.806µs
|
||||
P90 Latency: 909.692µs
|
||||
P95 Latency: 1.063135ms
|
||||
P99 Latency: 1.414473ms
|
||||
Bottom 10% Avg Latency: 1.173081ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.006178379s
|
||||
Total Events: 251672
|
||||
Events/sec: 4194.10
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 159 MB
|
||||
Avg Latency: 4.01666ms
|
||||
P90 Latency: 11.874709ms
|
||||
P95 Latency: 15.051188ms
|
||||
P99 Latency: 22.451758ms
|
||||
Bottom 10% Avg Latency: 16.47537ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.004144652s
|
||||
Total Events: 219001
|
||||
Events/sec: 3649.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 154 MB
|
||||
Avg Latency: 2.620549ms
|
||||
P90 Latency: 5.591506ms
|
||||
P95 Latency: 7.482377ms
|
||||
P99 Latency: 20.396275ms
|
||||
Bottom 10% Avg Latency: 10.345145ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_nostr-rs-relay_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: nostr-rs-relay
|
||||
RELAY_URL: ws://nostr-rs-relay:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T11:11:56+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_relayer-basic_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764846109277147ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764846109277265ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764846109277319ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764846109277325ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764846109277335ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764846109277350ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764846109277355ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764846109277363ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764846109277369ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764846109277389ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764846109277396ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764846109277405ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764846109277410ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764846109277424ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764846109277429ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764846109277439ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764846109277443ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 11:01:49 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.829064715s
|
||||
Events/sec: 13058.02
|
||||
Avg latency: 1.792879ms
|
||||
P90 latency: 2.621872ms
|
||||
P95 latency: 3.153103ms
|
||||
P99 latency: 4.914106ms
|
||||
Bottom 10% Avg latency: 919.64µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 406.089196ms
|
||||
Burst completed: 5000 events in 571.162214ms
|
||||
Burst completed: 5000 events in 417.21044ms
|
||||
Burst completed: 5000 events in 388.695149ms
|
||||
Burst completed: 5000 events in 448.68702ms
|
||||
Burst completed: 5000 events in 349.680067ms
|
||||
Burst completed: 5000 events in 352.379547ms
|
||||
Burst completed: 5000 events in 348.007743ms
|
||||
Burst completed: 5000 events in 396.819076ms
|
||||
Burst completed: 5000 events in 388.190088ms
|
||||
Burst test completed: 50000 events in 9.077665116s, errors: 0
|
||||
Events/sec: 5508.02
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.750507885s
|
||||
Combined ops/sec: 2020.16
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 272535 queries in 1m0.006407297s
|
||||
Queries/sec: 4541.76
|
||||
Avg query latency: 3.702484ms
|
||||
P95 query latency: 14.064278ms
|
||||
P99 query latency: 21.546984ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 236255 operations (186255 queries, 50000 writes) in 1m0.005350378s
|
||||
Operations/sec: 3937.23
|
||||
Avg latency: 2.284443ms
|
||||
Avg query latency: 2.471631ms
|
||||
Avg write latency: 1.58715ms
|
||||
P95 latency: 6.469447ms
|
||||
P99 latency: 17.551758ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 3.829064715s
|
||||
Total Events: 50000
|
||||
Events/sec: 13058.02
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 226 MB
|
||||
Avg Latency: 1.792879ms
|
||||
P90 Latency: 2.621872ms
|
||||
P95 Latency: 3.153103ms
|
||||
P99 Latency: 4.914106ms
|
||||
Bottom 10% Avg Latency: 919.64µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.077665116s
|
||||
Total Events: 50000
|
||||
Events/sec: 5508.02
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 263 MB
|
||||
Avg Latency: 1.938961ms
|
||||
P90 Latency: 2.872088ms
|
||||
P95 Latency: 3.585166ms
|
||||
P99 Latency: 6.443979ms
|
||||
Bottom 10% Avg Latency: 919.151µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.750507885s
|
||||
Total Events: 50000
|
||||
Events/sec: 2020.16
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 173 MB
|
||||
Avg Latency: 448.262µs
|
||||
P90 Latency: 942.865µs
|
||||
P95 Latency: 1.09768ms
|
||||
P99 Latency: 1.554199ms
|
||||
Bottom 10% Avg Latency: 1.241163ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.006407297s
|
||||
Total Events: 272535
|
||||
Events/sec: 4541.76
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 134 MB
|
||||
Avg Latency: 3.702484ms
|
||||
P90 Latency: 10.940029ms
|
||||
P95 Latency: 14.064278ms
|
||||
P99 Latency: 21.546984ms
|
||||
Bottom 10% Avg Latency: 15.564533ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.005350378s
|
||||
Total Events: 236255
|
||||
Events/sec: 3937.23
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 200 MB
|
||||
Avg Latency: 2.284443ms
|
||||
P90 Latency: 4.876796ms
|
||||
P95 Latency: 6.469447ms
|
||||
P99 Latency: 17.551758ms
|
||||
Bottom 10% Avg Latency: 8.957464ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_relayer-basic_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: relayer-basic
|
||||
RELAY_URL: ws://relayer-basic:7447
|
||||
TEST_TIMESTAMP: 2025-12-04T11:05:08+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -0,0 +1,63 @@
|
||||
Starting Network Graph Traversal Benchmark
|
||||
Relay URL: ws://rely-sqlite:3334
|
||||
Workers: 24
|
||||
Pubkeys: 100000, Follows per pubkey: 1-1000
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ NETWORK GRAPH TRAVERSAL BENCHMARK (100k Pubkeys) ║
|
||||
║ Relay: ws://rely-sqlite:3334 ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
Generating 100000 deterministic pubkeys...
|
||||
2025/12/04 11:12:01 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Generated 10000/100000 pubkeys...
|
||||
Generated 20000/100000 pubkeys...
|
||||
Generated 30000/100000 pubkeys...
|
||||
Generated 40000/100000 pubkeys...
|
||||
Generated 50000/100000 pubkeys...
|
||||
Generated 60000/100000 pubkeys...
|
||||
Generated 70000/100000 pubkeys...
|
||||
Generated 80000/100000 pubkeys...
|
||||
Generated 90000/100000 pubkeys...
|
||||
Generated 100000/100000 pubkeys...
|
||||
Generated 100000 pubkeys in 2.699112464s
|
||||
Generating follow graph (1-1000 follows per pubkey)...
|
||||
Generated follow lists for 10000/100000 pubkeys...
|
||||
Generated follow lists for 20000/100000 pubkeys...
|
||||
Generated follow lists for 30000/100000 pubkeys...
|
||||
Generated follow lists for 40000/100000 pubkeys...
|
||||
Generated follow lists for 50000/100000 pubkeys...
|
||||
Generated follow lists for 60000/100000 pubkeys...
|
||||
Generated follow lists for 70000/100000 pubkeys...
|
||||
Generated follow lists for 80000/100000 pubkeys...
|
||||
Generated follow lists for 90000/100000 pubkeys...
|
||||
Generated follow lists for 100000/100000 pubkeys...
|
||||
Generated follow graph in 5.172393834s (avg 500.5 follows/pubkey, total 50048088 follows)
|
||||
|
||||
Connecting to relay: ws://rely-sqlite:3334
|
||||
Connected successfully!
|
||||
Creating follow list events via WebSocket...
|
||||
Queued 10000/100000 follow list events...
|
||||
Queued 20000/100000 follow list events...
|
||||
Queued 30000/100000 follow list events...
|
||||
Queued 40000/100000 follow list events...
|
||||
Queued 50000/100000 follow list events...
|
||||
Queued 60000/100000 follow list events...
|
||||
Queued 70000/100000 follow list events...
|
||||
Queued 80000/100000 follow list events...
|
||||
Queued 90000/100000 follow list events...
|
||||
Queued 100000/100000 follow list events...
|
||||
Created 100000 follow list events in 1m47.750797847s (928.07 events/sec, errors: 0)
|
||||
Avg latency: 5.218453ms, P95: 30.619168ms, P99: 66.455368ms
|
||||
|
||||
Waiting for events to be processed...
|
||||
|
||||
=== Third-Degree Graph Traversal Benchmark (Network) ===
|
||||
Traversing 3 degrees of follows via WebSocket...
|
||||
Sampling 1000 pubkeys for traversal...
|
||||
Killed
|
||||
|
||||
RELAY_NAME: rely-sqlite
|
||||
RELAY_URL: ws://rely-sqlite:3334
|
||||
TEST_TYPE: Graph Traversal
|
||||
STATUS: FAILED
|
||||
TEST_TIMESTAMP: 2025-12-04T13:18:55+00:00
|
||||
@@ -0,0 +1,202 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_rely-sqlite_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764845084601162ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764845084601278ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764845084601338ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764845084601353ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764845084601368ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764845084601398ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764845084601404ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764845084601425ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764845084601432ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764845084601453ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764845084601459ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764845084601470ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764845084601476ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764845084601492ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764845084601498ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764845084601512ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764845084601518ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 10:44:44 INFO: Extracted embedded libsecp256k1 to /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
2025/12/04 10:44:44 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 4.863868097s
|
||||
Events/sec: 10279.88
|
||||
Avg latency: 2.303586ms
|
||||
P90 latency: 3.506294ms
|
||||
P95 latency: 4.26606ms
|
||||
P99 latency: 6.589692ms
|
||||
Bottom 10% Avg latency: 1.039748ms
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 490.290781ms
|
||||
Burst completed: 5000 events in 660.13017ms
|
||||
Burst completed: 5000 events in 395.417016ms
|
||||
Burst completed: 5000 events in 386.572933ms
|
||||
Burst completed: 5000 events in 453.417446ms
|
||||
Burst completed: 5000 events in 431.074552ms
|
||||
Burst completed: 5000 events in 425.56635ms
|
||||
Burst completed: 5000 events in 480.609672ms
|
||||
Burst completed: 5000 events in 491.483839ms
|
||||
Burst completed: 5000 events in 855.851556ms
|
||||
Burst test completed: 50000 events in 10.076554319s, errors: 0
|
||||
Events/sec: 4962.01
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.99725206s
|
||||
Combined ops/sec: 2000.22
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 248134 queries in 1m0.010897965s
|
||||
Queries/sec: 4134.82
|
||||
Avg query latency: 4.008215ms
|
||||
P95 query latency: 15.241611ms
|
||||
P99 query latency: 23.364071ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 223423 operations (173423 queries, 50000 writes) in 1m0.003723611s
|
||||
Operations/sec: 3723.49
|
||||
Avg latency: 2.490436ms
|
||||
Avg query latency: 2.752076ms
|
||||
Avg write latency: 1.582945ms
|
||||
P95 latency: 7.431916ms
|
||||
P99 latency: 18.31948ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 4.863868097s
|
||||
Total Events: 50000
|
||||
Events/sec: 10279.88
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 210 MB
|
||||
Avg Latency: 2.303586ms
|
||||
P90 Latency: 3.506294ms
|
||||
P95 Latency: 4.26606ms
|
||||
P99 Latency: 6.589692ms
|
||||
Bottom 10% Avg Latency: 1.039748ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 10.076554319s
|
||||
Total Events: 50000
|
||||
Events/sec: 4962.01
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 306 MB
|
||||
Avg Latency: 2.440058ms
|
||||
P90 Latency: 3.974234ms
|
||||
P95 Latency: 5.200288ms
|
||||
P99 Latency: 9.335708ms
|
||||
Bottom 10% Avg Latency: 1.00845ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.99725206s
|
||||
Total Events: 50000
|
||||
Events/sec: 2000.22
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 270 MB
|
||||
Avg Latency: 457.992µs
|
||||
P90 Latency: 957.983µs
|
||||
P95 Latency: 1.136012ms
|
||||
P99 Latency: 1.617368ms
|
||||
Bottom 10% Avg Latency: 1.292479ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.010897965s
|
||||
Total Events: 248134
|
||||
Events/sec: 4134.82
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 138 MB
|
||||
Avg Latency: 4.008215ms
|
||||
P90 Latency: 11.8477ms
|
||||
P95 Latency: 15.241611ms
|
||||
P99 Latency: 23.364071ms
|
||||
Bottom 10% Avg Latency: 16.87008ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.003723611s
|
||||
Total Events: 223423
|
||||
Events/sec: 3723.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 195 MB
|
||||
Avg Latency: 2.490436ms
|
||||
P90 Latency: 5.497334ms
|
||||
P95 Latency: 7.431916ms
|
||||
P99 Latency: 18.31948ms
|
||||
Bottom 10% Avg Latency: 9.827857ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_rely-sqlite_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: rely-sqlite
|
||||
RELAY_URL: ws://rely-sqlite:3334
|
||||
TEST_TIMESTAMP: 2025-12-04T10:48:05+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
201
cmd/benchmark/reports/run_20251204_104444/strfry_results.txt
Normal file
201
cmd/benchmark/reports/run_20251204_104444/strfry_results.txt
Normal file
@@ -0,0 +1,201 @@
|
||||
Starting Nostr Relay Benchmark (Badger Backend)
|
||||
Data Directory: /tmp/benchmark_strfry_8
|
||||
Events: 50000, Workers: 24, Duration: 1m0s
|
||||
1764846313173994ℹ️ migrating to version 1... /build/pkg/database/migrations.go:68
|
||||
1764846313174100ℹ️ migrating to version 2... /build/pkg/database/migrations.go:75
|
||||
1764846313174135ℹ️ migrating to version 3... /build/pkg/database/migrations.go:82
|
||||
1764846313174143ℹ️ cleaning up ephemeral events (kinds 20000-29999)... /build/pkg/database/migrations.go:304
|
||||
1764846313174154ℹ️ cleaned up 0 ephemeral events from database /build/pkg/database/migrations.go:349
|
||||
1764846313174172ℹ️ migrating to version 4... /build/pkg/database/migrations.go:89
|
||||
1764846313174177ℹ️ converting events to optimized inline storage (Reiser4 optimization)... /build/pkg/database/migrations.go:357
|
||||
1764846313174193ℹ️ found 0 events to convert (0 regular, 0 replaceable, 0 addressable) /build/pkg/database/migrations.go:446
|
||||
1764846313174199ℹ️ migration complete: converted 0 events to optimized inline storage, deleted 0 old keys /build/pkg/database/migrations.go:555
|
||||
1764846313174215ℹ️ migrating to version 5... /build/pkg/database/migrations.go:96
|
||||
1764846313174222ℹ️ re-encoding events with optimized tag binary format... /build/pkg/database/migrations.go:562
|
||||
1764846313174232ℹ️ found 0 events with e/p tags to re-encode /build/pkg/database/migrations.go:649
|
||||
1764846313174238ℹ️ no events need re-encoding /build/pkg/database/migrations.go:652
|
||||
1764846313174259ℹ️ migrating to version 6... /build/pkg/database/migrations.go:103
|
||||
1764846313174264ℹ️ converting events to compact serial-reference format... /build/pkg/database/migrations.go:706
|
||||
1764846313174274ℹ️ found 0 events to convert to compact format /build/pkg/database/migrations.go:846
|
||||
1764846313174282ℹ️ no events need conversion /build/pkg/database/migrations.go:849
|
||||
|
||||
╔════════════════════════════════════════════════════════╗
|
||||
║ BADGER BACKEND BENCHMARK SUITE ║
|
||||
╚════════════════════════════════════════════════════════╝
|
||||
|
||||
=== Starting Badger benchmark ===
|
||||
RunPeakThroughputTest (Badger)..
|
||||
|
||||
=== Peak Throughput Test ===
|
||||
2025/12/04 11:05:13 INFO: Successfully loaded embedded libsecp256k1 v5.0.0 from /tmp/orly-libsecp256k1/libsecp256k1.so
|
||||
Events saved: 50000/50000 (100.0%), errors: 0
|
||||
Duration: 3.876849434s
|
||||
Events/sec: 12897.07
|
||||
Avg latency: 1.815658ms
|
||||
P90 latency: 2.61564ms
|
||||
P95 latency: 3.107597ms
|
||||
P99 latency: 5.258081ms
|
||||
Bottom 10% Avg latency: 919.54µs
|
||||
Wiping database between tests...
|
||||
RunBurstPatternTest (Badger)..
|
||||
|
||||
=== Burst Pattern Test ===
|
||||
Burst completed: 5000 events in 399.187129ms
|
||||
Burst completed: 5000 events in 388.99822ms
|
||||
Burst completed: 5000 events in 402.825697ms
|
||||
Burst completed: 5000 events in 402.426226ms
|
||||
Burst completed: 5000 events in 509.746009ms
|
||||
Burst completed: 5000 events in 360.327121ms
|
||||
Burst completed: 5000 events in 354.620576ms
|
||||
Burst completed: 5000 events in 340.233233ms
|
||||
Burst completed: 5000 events in 484.991889ms
|
||||
Burst completed: 5000 events in 450.540384ms
|
||||
Burst test completed: 50000 events in 9.101582141s, errors: 0
|
||||
Events/sec: 5493.55
|
||||
Wiping database between tests...
|
||||
RunMixedReadWriteTest (Badger)..
|
||||
|
||||
=== Mixed Read/Write Test ===
|
||||
Generating 1000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 1000 events:
|
||||
Average content size: 312 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database for read tests...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Mixed test completed: 25000 writes, 25000 reads in 24.968859674s
|
||||
Combined ops/sec: 2002.49
|
||||
Wiping database between tests...
|
||||
RunQueryTest (Badger)..
|
||||
|
||||
=== Query Test ===
|
||||
Generating 10000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 10000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 10000 events for query tests...
|
||||
Query test completed: 261904 queries in 1m0.006069229s
|
||||
Queries/sec: 4364.63
|
||||
Avg query latency: 3.860709ms
|
||||
P95 query latency: 14.612102ms
|
||||
P99 query latency: 22.708667ms
|
||||
Wiping database between tests...
|
||||
RunConcurrentQueryStoreTest (Badger)..
|
||||
|
||||
=== Concurrent Query/Store Test ===
|
||||
Generating 5000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 5000 events:
|
||||
Average content size: 313 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Pre-populating database with 5000 events for concurrent query/store test...
|
||||
Generating 50000 unique synthetic events (minimum 300 bytes each)...
|
||||
Generated 50000 events:
|
||||
Average content size: 314 bytes
|
||||
All events are unique (incremental timestamps)
|
||||
All events are properly signed
|
||||
|
||||
Concurrent test completed: 230898 operations (180898 queries, 50000 writes) in 1m0.007085265s
|
||||
Operations/sec: 3847.85
|
||||
Avg latency: 2.400221ms
|
||||
Avg query latency: 2.609803ms
|
||||
Avg write latency: 1.641962ms
|
||||
P95 latency: 6.834352ms
|
||||
P99 latency: 18.125521ms
|
||||
|
||||
=== Badger benchmark completed ===
|
||||
|
||||
|
||||
================================================================================
|
||||
BENCHMARK REPORT
|
||||
================================================================================
|
||||
|
||||
Test: Peak Throughput
|
||||
Duration: 3.876849434s
|
||||
Total Events: 50000
|
||||
Events/sec: 12897.07
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 194 MB
|
||||
Avg Latency: 1.815658ms
|
||||
P90 Latency: 2.61564ms
|
||||
P95 Latency: 3.107597ms
|
||||
P99 Latency: 5.258081ms
|
||||
Bottom 10% Avg Latency: 919.54µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Burst Pattern
|
||||
Duration: 9.101582141s
|
||||
Total Events: 50000
|
||||
Events/sec: 5493.55
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 189 MB
|
||||
Avg Latency: 1.954573ms
|
||||
P90 Latency: 2.922786ms
|
||||
P95 Latency: 3.66591ms
|
||||
P99 Latency: 6.353176ms
|
||||
Bottom 10% Avg Latency: 904.101µs
|
||||
----------------------------------------
|
||||
|
||||
Test: Mixed Read/Write
|
||||
Duration: 24.968859674s
|
||||
Total Events: 50000
|
||||
Events/sec: 2002.49
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 188 MB
|
||||
Avg Latency: 443.895µs
|
||||
P90 Latency: 930.312µs
|
||||
P95 Latency: 1.08191ms
|
||||
P99 Latency: 1.476191ms
|
||||
Bottom 10% Avg Latency: 1.222569ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Query Performance
|
||||
Duration: 1m0.006069229s
|
||||
Total Events: 261904
|
||||
Events/sec: 4364.63
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 198 MB
|
||||
Avg Latency: 3.860709ms
|
||||
P90 Latency: 11.381821ms
|
||||
P95 Latency: 14.612102ms
|
||||
P99 Latency: 22.708667ms
|
||||
Bottom 10% Avg Latency: 16.28305ms
|
||||
----------------------------------------
|
||||
|
||||
Test: Concurrent Query/Store
|
||||
Duration: 1m0.007085265s
|
||||
Total Events: 230898
|
||||
Events/sec: 3847.85
|
||||
Success Rate: 100.0%
|
||||
Concurrent Workers: 24
|
||||
Memory Used: 178 MB
|
||||
Avg Latency: 2.400221ms
|
||||
P90 Latency: 5.16819ms
|
||||
P95 Latency: 6.834352ms
|
||||
P99 Latency: 18.125521ms
|
||||
Bottom 10% Avg Latency: 9.340478ms
|
||||
----------------------------------------
|
||||
|
||||
Report saved to: /tmp/benchmark_strfry_8/benchmark_report.txt
|
||||
AsciiDoc report saved to: /tmp/benchmark_strfry_8/benchmark_report.adoc
|
||||
|
||||
RELAY_NAME: strfry
|
||||
RELAY_URL: ws://strfry:8080
|
||||
TEST_TIMESTAMP: 2025-12-04T11:08:32+00:00
|
||||
BENCHMARK_CONFIG:
|
||||
Events: 50000
|
||||
Workers: 24
|
||||
Duration: 60s
|
||||
@@ -3,19 +3,25 @@
|
||||
# Wrapper script to run the benchmark suite and automatically shut down when complete
|
||||
#
|
||||
# Usage:
|
||||
# ./run-benchmark.sh # Use disk-based storage (default)
|
||||
# ./run-benchmark.sh --ramdisk # Use /dev/shm ramdisk for maximum performance
|
||||
# ./run-benchmark.sh # Use disk-based storage (default)
|
||||
# ./run-benchmark.sh --ramdisk # Use /dev/shm ramdisk for maximum performance
|
||||
# ./run-benchmark.sh --graph # Also run graph traversal benchmarks
|
||||
|
||||
set -e
|
||||
|
||||
# Parse command line arguments
|
||||
USE_RAMDISK=false
|
||||
USE_GRAPH_TRAVERSAL=false
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--ramdisk)
|
||||
USE_RAMDISK=true
|
||||
shift
|
||||
;;
|
||||
--graph)
|
||||
USE_GRAPH_TRAVERSAL=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
@@ -23,6 +29,8 @@ for arg in "$@"; do
|
||||
echo " --ramdisk Use /dev/shm ramdisk storage instead of disk"
|
||||
echo " This eliminates disk I/O bottlenecks for accurate"
|
||||
echo " relay performance measurement."
|
||||
echo " --graph Run graph traversal benchmarks (100k pubkeys,"
|
||||
echo " 1-1000 follows each, 3-degree traversal)"
|
||||
echo " --help, -h Show this help message"
|
||||
echo ""
|
||||
echo "Requirements for --ramdisk:"
|
||||
@@ -39,6 +47,9 @@ for arg in "$@"; do
|
||||
esac
|
||||
done
|
||||
|
||||
# Export graph traversal setting for docker-compose
|
||||
export BENCHMARK_GRAPH_TRAVERSAL="${USE_GRAPH_TRAVERSAL}"
|
||||
|
||||
# Determine docker-compose command
|
||||
if docker compose version &> /dev/null 2>&1; then
|
||||
DOCKER_COMPOSE="docker compose"
|
||||
@@ -97,6 +108,17 @@ else
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Show graph traversal status
|
||||
if [ "$USE_GRAPH_TRAVERSAL" = true ]; then
|
||||
echo "======================================================"
|
||||
echo " GRAPH TRAVERSAL BENCHMARK ENABLED"
|
||||
echo "======================================================"
|
||||
echo " Will test 100k pubkeys with 1-1000 follows each"
|
||||
echo " performing 3-degree graph traversal queries"
|
||||
echo "======================================================"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Clean old data directories (may be owned by root from Docker)
|
||||
if [ -d "${DATA_BASE}" ]; then
|
||||
echo "Cleaning old data directories at ${DATA_BASE}..."
|
||||
@@ -136,17 +158,17 @@ echo "Preparing data directories at ${DATA_BASE}..."
|
||||
|
||||
if [ "$USE_RAMDISK" = true ]; then
|
||||
# Create ramdisk directories
|
||||
mkdir -p "${DATA_BASE}"/{next-orly-badger,next-orly-dgraph,next-orly-neo4j,dgraph-zero,dgraph-alpha,neo4j,neo4j-logs,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,rely-sqlite,postgres}
|
||||
chmod 777 "${DATA_BASE}"/{next-orly-badger,next-orly-dgraph,next-orly-neo4j,dgraph-zero,dgraph-alpha,neo4j,neo4j-logs,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,rely-sqlite,postgres}
|
||||
mkdir -p "${DATA_BASE}"/{next-orly-badger,next-orly-neo4j,neo4j,neo4j-logs,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,rely-sqlite,postgres}
|
||||
chmod 777 "${DATA_BASE}"/{next-orly-badger,next-orly-neo4j,neo4j,neo4j-logs,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,rely-sqlite,postgres}
|
||||
else
|
||||
# Create disk directories (relative path)
|
||||
mkdir -p data/{next-orly-badger,next-orly-dgraph,next-orly-neo4j,dgraph-zero,dgraph-alpha,neo4j,neo4j-logs,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,rely-sqlite,postgres}
|
||||
chmod 777 data/{next-orly-badger,next-orly-dgraph,next-orly-neo4j,dgraph-zero,dgraph-alpha,neo4j,neo4j-logs,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,rely-sqlite,postgres}
|
||||
mkdir -p data/{next-orly-badger,next-orly-neo4j,neo4j,neo4j-logs,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,rely-sqlite,postgres}
|
||||
chmod 777 data/{next-orly-badger,next-orly-neo4j,neo4j,neo4j-logs,khatru-sqlite,khatru-badger,relayer-basic,strfry,nostr-rs-relay,rely-sqlite,postgres}
|
||||
fi
|
||||
|
||||
echo "Building fresh Docker images..."
|
||||
# Force rebuild to pick up latest code changes
|
||||
$DOCKER_COMPOSE $COMPOSE_FILES build --no-cache benchmark-runner next-orly-badger next-orly-dgraph next-orly-neo4j rely-sqlite
|
||||
$DOCKER_COMPOSE $COMPOSE_FILES build --no-cache benchmark-runner next-orly-badger next-orly-neo4j rely-sqlite
|
||||
|
||||
echo ""
|
||||
echo "Starting benchmark suite..."
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"next.orly.dev/pkg/interfaces/neterr"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -90,7 +91,7 @@ func main() {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if netErr, ok := err.(interface{ Timeout() bool }); ok && netErr.Timeout() {
|
||||
if netErr, ok := err.(neterr.TimeoutError); ok && netErr.Timeout() {
|
||||
continue
|
||||
}
|
||||
log.Printf("Read error: %v", err)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"next.orly.dev/pkg/interfaces/neterr"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -123,7 +124,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Check for timeout errors (these are expected during idle periods)
|
||||
if netErr, ok := err.(interface{ Timeout() bool }); ok && netErr.Timeout() {
|
||||
if netErr, ok := err.(neterr.TimeoutError); ok && netErr.Timeout() {
|
||||
consecutiveTimeouts++
|
||||
if consecutiveTimeouts >= maxConsecutiveTimeouts {
|
||||
log.Printf("Too many consecutive read timeouts (%d), connection may be dead", consecutiveTimeouts)
|
||||
|
||||
347
docs/GRAPH_IMPLEMENTATION_PHASES.md
Normal file
347
docs/GRAPH_IMPLEMENTATION_PHASES.md
Normal file
@@ -0,0 +1,347 @@
|
||||
# Graph Query Implementation Phases
|
||||
|
||||
This document provides a clear breakdown of implementation phases for NIP-XX Graph Queries.
|
||||
|
||||
---
|
||||
|
||||
## Phase 0: Filter Extension Parsing (Foundation) ✅ COMPLETE
|
||||
|
||||
**Goal**: Enable the nostr library to correctly "ignore" unknown filter fields per NIP-01, while preserving them for relay-level processing.
|
||||
|
||||
### Deliverables (Completed)
|
||||
- [x] Modified `filter.F` struct with `Extra` field
|
||||
- [x] Modified `Unmarshal()` to skip unknown keys
|
||||
- [x] `skipJSONValue()` helper function
|
||||
- [x] `graph.ExtractFromFilter()` function
|
||||
- [x] Integration in `handle-req.go`
|
||||
- [x] Rate limiter with token bucket for graph queries
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: E-Tag Graph Index ✅ COMPLETE
|
||||
|
||||
**Goal**: Create bidirectional indexes for event-to-event references (e-tags).
|
||||
|
||||
### Index Key Structure
|
||||
|
||||
```
|
||||
Event-Event Graph (Forward): eeg
|
||||
eeg|source_event_serial(5)|target_event_serial(5)|kind(2)|direction(1) = 16 bytes
|
||||
|
||||
Event-Event Graph (Reverse): gee
|
||||
gee|target_event_serial(5)|kind(2)|direction(1)|source_event_serial(5) = 16 bytes
|
||||
```
|
||||
|
||||
### Direction Constants
|
||||
- `EdgeDirectionETagOut = 0` - Event references another event (outbound)
|
||||
- `EdgeDirectionETagIn = 1` - Event is referenced by another (inbound)
|
||||
|
||||
### Deliverables (Completed)
|
||||
- [x] Index key definitions for eeg/gee (`pkg/database/indexes/keys.go`)
|
||||
- [x] Direction constants for e-tags (`pkg/database/indexes/types/letter.go`)
|
||||
- [x] E-tag graph creation in SaveEvent (`pkg/database/save-event.go`)
|
||||
- [x] Tests for e-tag graph creation (`pkg/database/etag-graph_test.go`)
|
||||
|
||||
**Key Bug Fix**: Buffer reuse in transaction required copying key bytes before writing second key to prevent overwrite.
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Graph Traversal Primitives ✅ COMPLETE
|
||||
|
||||
**Goal**: Implement pure index-based graph traversal functions.
|
||||
|
||||
### 2.1 Core traversal functions
|
||||
|
||||
**File**: `pkg/database/graph-traversal.go`
|
||||
|
||||
```go
|
||||
// Core primitives (no event decoding required)
|
||||
func (d *D) GetPTagsFromEventSerial(eventSerial *types.Uint40) ([]*types.Uint40, error)
|
||||
func (d *D) GetETagsFromEventSerial(eventSerial *types.Uint40) ([]*types.Uint40, error)
|
||||
func (d *D) GetReferencingEvents(targetSerial *types.Uint40, kinds []uint16) ([]*types.Uint40, error)
|
||||
func (d *D) GetFollowsFromPubkeySerial(pubkeySerial *types.Uint40) ([]*types.Uint40, error)
|
||||
func (d *D) GetFollowersOfPubkeySerial(pubkeySerial *types.Uint40) ([]*types.Uint40, error)
|
||||
func (d *D) GetPubkeyHexFromSerial(serial *types.Uint40) (string, error)
|
||||
func (d *D) GetEventIDFromSerial(serial *types.Uint40) (string, error)
|
||||
```
|
||||
|
||||
### 2.2 GraphResult struct
|
||||
|
||||
**File**: `pkg/database/graph-result.go`
|
||||
|
||||
```go
|
||||
// GraphResult contains depth-organized traversal results
|
||||
type GraphResult struct {
|
||||
PubkeysByDepth map[int][]string // depth -> pubkeys first discovered at that depth
|
||||
EventsByDepth map[int][]string // depth -> events discovered at that depth
|
||||
FirstSeenPubkey map[string]int // pubkey hex -> depth where first seen
|
||||
FirstSeenEvent map[string]int // event hex -> depth where first seen
|
||||
TotalPubkeys int
|
||||
TotalEvents int
|
||||
InboundRefs map[uint16]map[string][]string // kind -> target -> []referencing_ids
|
||||
OutboundRefs map[uint16]map[string][]string // kind -> source -> []referenced_ids
|
||||
}
|
||||
|
||||
func (r *GraphResult) ToDepthArrays() [][]string // For pubkey results
|
||||
func (r *GraphResult) ToEventDepthArrays() [][]string // For event results
|
||||
func (r *GraphResult) GetInboundRefsSorted(kind uint16) []RefAggregation
|
||||
func (r *GraphResult) GetOutboundRefsSorted(kind uint16) []RefAggregation
|
||||
```
|
||||
|
||||
### Deliverables (Completed)
|
||||
- [x] Core traversal functions in `graph-traversal.go`
|
||||
- [x] GraphResult struct with ToDepthArrays() and ToEventDepthArrays()
|
||||
- [x] RefAggregation struct with sorted accessors
|
||||
- [x] Tests in `graph-result_test.go` and `graph-traversal_test.go`
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: High-Level Traversals ✅ COMPLETE
|
||||
|
||||
**Goal**: Implement the graph query methods (follows, followers, mentions, thread).
|
||||
|
||||
### 3.1 Follow graph traversal
|
||||
|
||||
**File**: `pkg/database/graph-follows.go`
|
||||
|
||||
```go
|
||||
// TraverseFollows performs BFS traversal of the follow graph
|
||||
// Returns pubkeys grouped by first-discovered depth (no duplicates across depths)
|
||||
func (d *D) TraverseFollows(seedPubkey []byte, maxDepth int) (*GraphResult, error)
|
||||
|
||||
// TraverseFollowers performs BFS traversal to find who follows the seed pubkey
|
||||
func (d *D) TraverseFollowers(seedPubkey []byte, maxDepth int) (*GraphResult, error)
|
||||
|
||||
// Hex convenience wrappers
|
||||
func (d *D) TraverseFollowsFromHex(seedPubkeyHex string, maxDepth int) (*GraphResult, error)
|
||||
func (d *D) TraverseFollowersFromHex(seedPubkeyHex string, maxDepth int) (*GraphResult, error)
|
||||
```
|
||||
|
||||
### 3.2 Other traversals
|
||||
|
||||
**File**: `pkg/database/graph-mentions.go`
|
||||
```go
|
||||
func (d *D) FindMentions(pubkey []byte, kinds []uint16) (*GraphResult, error)
|
||||
func (d *D) FindMentionsFromHex(pubkeyHex string, kinds []uint16) (*GraphResult, error)
|
||||
func (d *D) FindMentionsByPubkeys(pubkeySerials []*types.Uint40, kinds []uint16) (*GraphResult, error)
|
||||
```
|
||||
|
||||
**File**: `pkg/database/graph-thread.go`
|
||||
```go
|
||||
func (d *D) TraverseThread(seedEventID []byte, maxDepth int, direction string) (*GraphResult, error)
|
||||
func (d *D) TraverseThreadFromHex(seedEventIDHex string, maxDepth int, direction string) (*GraphResult, error)
|
||||
func (d *D) GetThreadReplies(eventID []byte, kinds []uint16) (*GraphResult, error)
|
||||
func (d *D) GetThreadParents(eventID []byte) (*GraphResult, error)
|
||||
```
|
||||
|
||||
### 3.3 Ref aggregation
|
||||
|
||||
**File**: `pkg/database/graph-refs.go`
|
||||
```go
|
||||
func (d *D) AddInboundRefsToResult(result *GraphResult, depth int, kinds []uint16) error
|
||||
func (d *D) AddOutboundRefsToResult(result *GraphResult, depth int, kinds []uint16) error
|
||||
func (d *D) CollectRefsForPubkeys(pubkeySerials []*types.Uint40, refKinds []uint16, eventKinds []uint16) (*GraphResult, error)
|
||||
```
|
||||
|
||||
### Deliverables (Completed)
|
||||
- [x] TraverseFollows with early termination (2 consecutive empty depths)
|
||||
- [x] TraverseFollowers
|
||||
- [x] FindMentions and FindMentionsByPubkeys
|
||||
- [x] TraverseThread with bidirectional traversal
|
||||
- [x] Inbound/Outbound ref aggregation
|
||||
- [x] Tests in `graph-follows_test.go`
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: Graph Query Handler and Response Generation ✅ COMPLETE
|
||||
|
||||
**Goal**: Wire up the REQ handler to execute graph queries and generate relay-signed response events.
|
||||
|
||||
### 4.1 Response Event Generation
|
||||
|
||||
**Key Design Decision**: All graph query responses are returned as **relay-signed events**, enabling:
|
||||
- Standard client validation (no special handling)
|
||||
- Result caching and storage on relays
|
||||
- Cryptographic proof of origin
|
||||
|
||||
### 4.2 Response Kinds (Implemented)
|
||||
|
||||
| Kind | Name | Description |
|
||||
|------|------|-------------|
|
||||
| 39000 | Graph Follows | Response for follows/followers queries |
|
||||
| 39001 | Graph Mentions | Response for mentions queries |
|
||||
| 39002 | Graph Thread | Response for thread traversal queries |
|
||||
|
||||
### 4.3 Implementation Files
|
||||
|
||||
**New files:**
|
||||
- `pkg/protocol/graph/executor.go` - Executes graph queries and generates signed responses
|
||||
- `pkg/database/graph-adapter.go` - Adapts database to `graph.GraphDatabase` interface
|
||||
|
||||
**Modified files:**
|
||||
- `app/server.go` - Added `graphExecutor` field
|
||||
- `app/main.go` - Initialize graph executor on startup
|
||||
- `app/handle-req.go` - Execute graph queries and return results
|
||||
|
||||
### 4.4 Response Format (Implemented)
|
||||
|
||||
The response is a relay-signed event with JSON content:
|
||||
|
||||
```go
|
||||
type ResponseContent struct {
|
||||
PubkeysByDepth [][]string `json:"pubkeys_by_depth,omitempty"`
|
||||
EventsByDepth [][]string `json:"events_by_depth,omitempty"`
|
||||
TotalPubkeys int `json:"total_pubkeys,omitempty"`
|
||||
TotalEvents int `json:"total_events,omitempty"`
|
||||
}
|
||||
```
|
||||
|
||||
**Example response event:**
|
||||
```json
|
||||
{
|
||||
"kind": 39000,
|
||||
"pubkey": "<relay_identity_pubkey>",
|
||||
"created_at": 1704067200,
|
||||
"tags": [
|
||||
["method", "follows"],
|
||||
["seed", "<seed_pubkey_hex>"],
|
||||
["depth", "2"]
|
||||
],
|
||||
"content": "{\"pubkeys_by_depth\":[[\"pk1\",\"pk2\"],[\"pk3\",\"pk4\"]],\"total_pubkeys\":4}",
|
||||
"sig": "<relay_signature>"
|
||||
}
|
||||
|
||||
### Deliverables (Completed)
|
||||
- [x] Graph executor with query routing (`pkg/protocol/graph/executor.go`)
|
||||
- [x] Response event generation with relay signature
|
||||
- [x] GraphDatabase interface and adapter
|
||||
- [x] Integration in `handle-req.go`
|
||||
- [x] All tests passing
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Migration & Configuration
|
||||
|
||||
**Goal**: Enable backfilling and configuration.
|
||||
|
||||
### 5.1 E-tag graph backfill migration
|
||||
|
||||
**File**: `pkg/database/migrations.go`
|
||||
|
||||
```go
|
||||
func (d *D) MigrateETagGraph() error {
|
||||
// Iterate all events
|
||||
// Extract e-tags
|
||||
// Create eeg/gee edges for targets that exist
|
||||
}
|
||||
```
|
||||
|
||||
### 5.2 Configuration
|
||||
|
||||
**File**: `app/config/config.go`
|
||||
|
||||
Add:
|
||||
- `ORLY_GRAPH_QUERIES_ENABLED` - enable/disable feature
|
||||
- `ORLY_GRAPH_MAX_DEPTH` - maximum traversal depth (default 16)
|
||||
- `ORLY_GRAPH_RATE_LIMIT` - queries per minute per connection
|
||||
|
||||
### 5.3 NIP-11 advertisement
|
||||
|
||||
Update relay info document to advertise support and limits.
|
||||
|
||||
### Deliverables
|
||||
- [ ] Backfill migration
|
||||
- [ ] Configuration options
|
||||
- [ ] NIP-11 advertisement
|
||||
- [ ] Documentation updates
|
||||
|
||||
---
|
||||
|
||||
## Summary: Implementation Order
|
||||
|
||||
| Phase | Description | Status | Dependencies |
|
||||
|-------|-------------|--------|--------------|
|
||||
| **0** | Filter extension parsing | ✅ Complete | None |
|
||||
| **1** | E-tag graph index | ✅ Complete | Phase 0 |
|
||||
| **2** | Graph traversal primitives | ✅ Complete | Phase 1 |
|
||||
| **3** | High-level traversals | ✅ Complete | Phase 2 |
|
||||
| **4** | Graph query handler | ✅ Complete | Phase 3 |
|
||||
| **5** | Migration & configuration | Pending | Phase 4 |
|
||||
|
||||
---
|
||||
|
||||
## Response Format Summary
|
||||
|
||||
### Graph-Only Query (no kinds filter)
|
||||
|
||||
**Request:**
|
||||
```json
|
||||
["REQ", "sub", {"_graph": {"method": "follows", "seed": "abc...", "depth": 2}}]
|
||||
```
|
||||
|
||||
**Response:** Single signed event with depth arrays
|
||||
```json
|
||||
["EVENT", "sub", {
|
||||
"kind": 39000,
|
||||
"pubkey": "<relay_pubkey>",
|
||||
"content": "[[\"depth1_pk1\",\"depth1_pk2\"],[\"depth2_pk3\",\"depth2_pk4\"]]",
|
||||
"tags": [["d","follows:abc...:2"],["method","follows"],["seed","abc..."],["depth","2"]],
|
||||
"sig": "..."
|
||||
}]
|
||||
["EOSE", "sub"]
|
||||
```
|
||||
|
||||
### Query with Event Filters
|
||||
|
||||
**Request:**
|
||||
```json
|
||||
["REQ", "sub", {"_graph": {"method": "follows", "seed": "abc...", "depth": 2}, "kinds": [0]}]
|
||||
```
|
||||
|
||||
**Response:** Graph result + events in depth order
|
||||
```
|
||||
["EVENT", "sub", <kind-39000 graph result>]
|
||||
["EVENT", "sub", <kind-0 for depth-1 pubkey>]
|
||||
["EVENT", "sub", <kind-0 for depth-1 pubkey>]
|
||||
["EVENT", "sub", <kind-0 for depth-2 pubkey>]
|
||||
...
|
||||
["EOSE", "sub"]
|
||||
```
|
||||
|
||||
### Query with Reference Aggregation
|
||||
|
||||
**Request:**
|
||||
```json
|
||||
["REQ", "sub", {"_graph": {"method": "follows", "seed": "abc...", "depth": 1, "inbound_refs": [{"kinds": [7]}]}}]
|
||||
```
|
||||
|
||||
**Response:** Graph result + refs sorted by count (descending)
|
||||
```
|
||||
["EVENT", "sub", <kind-39000 with ref summaries>]
|
||||
["EVENT", "sub", <kind-39001 target with 523 refs>]
|
||||
["EVENT", "sub", <kind-39001 target with 312 refs>]
|
||||
["EVENT", "sub", <kind-39001 target with 1 ref>]
|
||||
["EOSE", "sub"]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
- Filter parsing with unknown fields
|
||||
- Index key encoding/decoding
|
||||
- Traversal primitives
|
||||
- Result depth array generation
|
||||
- Reference sorting
|
||||
|
||||
### Integration Tests
|
||||
- Full graph query round-trip
|
||||
- Response format validation
|
||||
- Signature verification
|
||||
- Backward compatibility (non-graph REQs still work)
|
||||
|
||||
### Performance Tests
|
||||
- Traversal latency at various depths
|
||||
- Memory usage for large graphs
|
||||
- Comparison with event-decoding approach
|
||||
1753
docs/GRAPH_QUERY_IMPLEMENTATION_PLAN.md
Normal file
1753
docs/GRAPH_QUERY_IMPLEMENTATION_PLAN.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -177,6 +177,10 @@ LIMIT $limit
|
||||
|
||||
## Configuration
|
||||
|
||||
All configuration is centralized in `app/config/config.go` and visible via `./orly help`.
|
||||
|
||||
> **Important:** All environment variables must be defined in `app/config/config.go`. Do not use `os.Getenv()` directly in package code. Database backends receive configuration via the `database.DatabaseConfig` struct.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
|
||||
612
docs/NIP-XX-GRAPH-QUERIES.md
Normal file
612
docs/NIP-XX-GRAPH-QUERIES.md
Normal file
@@ -0,0 +1,612 @@
|
||||
# NIP-XX: Graph Queries
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
This NIP defines an extension to the REQ message filter that enables efficient social graph traversal queries without requiring clients to fetch and decode large numbers of events.
|
||||
|
||||
## Motivation
|
||||
|
||||
Nostr's social graph is encoded in event tags:
|
||||
- **Follow relationships**: Kind-3 events with `p` tags listing followed pubkeys
|
||||
- **Event references**: `e` tags linking replies, reactions, reposts to their targets
|
||||
- **Mentions**: `p` tags in any event kind referencing other users
|
||||
|
||||
Clients building social features (timelines, notifications, discovery) must currently:
|
||||
1. Fetch kind-3 events for each user
|
||||
2. Decode JSON to extract `p` tags
|
||||
3. Recursively fetch more events for multi-hop queries
|
||||
4. Aggregate and count references client-side
|
||||
|
||||
This is inefficient, especially for:
|
||||
- **Multi-hop follow graphs** (friends-of-friends)
|
||||
- **Reaction/reply counts** on posts
|
||||
- **Thread traversal** for long conversations
|
||||
- **Follower discovery** (who follows this user?)
|
||||
|
||||
Relays with graph-indexed storage can answer these queries orders of magnitude faster by traversing indexes directly without event decoding.
|
||||
|
||||
## Protocol Extension
|
||||
|
||||
### Filter Extension: `_graph`
|
||||
|
||||
The `_graph` field is added to REQ filters. Per NIP-01, unknown fields are ignored by relays that don't support this extension, ensuring backward compatibility.
|
||||
|
||||
```json
|
||||
["REQ", "<subscription_id>", {
|
||||
"_graph": {
|
||||
"method": "<method>",
|
||||
"seed": "<hex>",
|
||||
"depth": <number>,
|
||||
"inbound_refs": [<ref_spec>, ...],
|
||||
"outbound_refs": [<ref_spec>, ...]
|
||||
},
|
||||
"kinds": [<kind>, ...]
|
||||
}]
|
||||
```
|
||||
|
||||
### Fields
|
||||
|
||||
#### `method` (required)
|
||||
|
||||
The graph traversal method to execute:
|
||||
|
||||
| Method | Seed Type | Description |
|
||||
|--------|-----------|-------------|
|
||||
| `follows` | pubkey | Traverse outbound follow relationships via kind-3 `p` tags |
|
||||
| `followers` | pubkey | Find pubkeys whose kind-3 events contain `p` tag to seed |
|
||||
| `mentions` | pubkey | Find events with `p` tag referencing seed pubkey |
|
||||
| `thread` | event ID | Traverse reply chain via `e` tags |
|
||||
|
||||
#### `seed` (required)
|
||||
|
||||
64-character hex string. Interpretation depends on `method`:
|
||||
- For `follows`, `followers`, `mentions`: pubkey hex
|
||||
- For `thread`: event ID hex
|
||||
|
||||
#### `depth` (optional)
|
||||
|
||||
Maximum traversal depth. Integer from 1-16. Default: 1.
|
||||
|
||||
- `depth: 1` returns direct connections only
|
||||
- `depth: 2` returns connections and their connections (friends-of-friends)
|
||||
- Higher depths expand the graph further
|
||||
|
||||
**Early termination**: Traversal stops before reaching `depth` if two consecutive depth levels yield no new pubkeys. This prevents unnecessary work when the graph is exhausted.
|
||||
|
||||
#### `inbound_refs` (optional)
|
||||
|
||||
Array of reference specifications for finding events that **reference** discovered events (via `e` tags). Used to find reactions, replies, reposts, zaps, etc.
|
||||
|
||||
```json
|
||||
"inbound_refs": [
|
||||
{"kinds": [7], "from_depth": 1},
|
||||
{"kinds": [1, 6], "from_depth": 0}
|
||||
]
|
||||
```
|
||||
|
||||
#### `outbound_refs` (optional)
|
||||
|
||||
Array of reference specifications for finding events **referenced by** discovered events (via `e` tags). Used to find what posts are being replied to, quoted, etc.
|
||||
|
||||
```json
|
||||
"outbound_refs": [
|
||||
{"kinds": [1], "from_depth": 1}
|
||||
]
|
||||
```
|
||||
|
||||
#### Reference Specification (`ref_spec`)
|
||||
|
||||
```json
|
||||
{
|
||||
"kinds": [<kind>, ...],
|
||||
"from_depth": <number>
|
||||
}
|
||||
```
|
||||
|
||||
- `kinds`: Event kinds to match (required, non-empty array)
|
||||
- `from_depth`: Only apply this filter from this depth onwards (optional, default: 0)
|
||||
|
||||
**Semantics:**
|
||||
- Multiple `ref_spec` objects in an array have **AND** semantics (all must match)
|
||||
- Multiple kinds within a single `ref_spec` have **OR** semantics (any kind matches)
|
||||
- `from_depth: 0` includes references to/from the seed itself
|
||||
- `from_depth: 1` starts from first-hop connections
|
||||
|
||||
#### `kinds` (standard filter field)
|
||||
|
||||
When present alongside `_graph`, specifies which event kinds to return for discovered pubkeys (e.g., kind-0 profiles, kind-1 notes).
|
||||
|
||||
## Response Format
|
||||
|
||||
### Relay-Signed Result Events
|
||||
|
||||
All graph query responses are returned as **signed Nostr events** created by the relay using its identity key. This design provides several benefits:
|
||||
|
||||
1. **Standard validation**: Clients validate the response like any normal event - no special handling needed
|
||||
2. **Caching**: Results can be stored on relays and retrieved later
|
||||
3. **Transparency**: The relay's pubkey identifies who produced the result
|
||||
4. **Cryptographic binding**: The signature proves the result came from a specific relay
|
||||
|
||||
### Response Kinds
|
||||
|
||||
| Kind | Name | Description |
|
||||
|------|------|-------------|
|
||||
| 39000 | Graph Follows | Response for follows/followers queries |
|
||||
| 39001 | Graph Mentions | Response for mentions queries |
|
||||
| 39002 | Graph Thread | Response for thread traversal queries |
|
||||
|
||||
These are application-specific kinds in the 39000-39999 range.
|
||||
|
||||
---
|
||||
|
||||
## Simple Query Response (graph-only filter)
|
||||
|
||||
When a REQ contains **only** the `_graph` field (no `kinds`, `authors`, or other filter fields), the relay returns a single signed event containing the graph traversal results organized by depth.
|
||||
|
||||
### Request Format
|
||||
|
||||
```json
|
||||
["REQ", "<sub>", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "<pubkey_hex>",
|
||||
"depth": 3
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
### Response: Kind 39000 Graph Result Event
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": 39000,
|
||||
"pubkey": "<relay_identity_pubkey>",
|
||||
"created_at": <timestamp>,
|
||||
"tags": [
|
||||
["method", "follows"],
|
||||
["seed", "<seed_hex>"],
|
||||
["depth", "3"]
|
||||
],
|
||||
"content": "{\"pubkeys_by_depth\":[[\"pubkey1\",\"pubkey2\"],[\"pubkey3\",\"pubkey4\"]],\"total_pubkeys\":4}",
|
||||
"id": "<event_id>",
|
||||
"sig": "<relay_signature>"
|
||||
}
|
||||
```
|
||||
|
||||
### Content Structure
|
||||
|
||||
The `content` field contains a JSON object with depth arrays:
|
||||
|
||||
```json
|
||||
{
|
||||
"pubkeys_by_depth": [
|
||||
["<pubkey_depth_1>", "<pubkey_depth_1>", ...],
|
||||
["<pubkey_depth_2>", "<pubkey_depth_2>", ...],
|
||||
["<pubkey_depth_3>", "<pubkey_depth_3>", ...]
|
||||
],
|
||||
"total_pubkeys": 150
|
||||
}
|
||||
```
|
||||
|
||||
For event-based queries (mentions, thread), the structure is:
|
||||
|
||||
```json
|
||||
{
|
||||
"events_by_depth": [
|
||||
["<event_id_depth_1>", ...],
|
||||
["<event_id_depth_2>", ...]
|
||||
],
|
||||
"total_events": 42
|
||||
}
|
||||
```
|
||||
|
||||
**Key properties:**
|
||||
- **Array index = depth - 1**: Index 0 contains depth-1 pubkeys (direct follows)
|
||||
- **Unique per depth**: Each pubkey/event appears only at the depth where it was **first discovered**
|
||||
- **No duplicates**: A pubkey in depth 1 will NOT appear in depth 2 or 3
|
||||
- **Hex format**: All pubkeys and event IDs are 64-character lowercase hex strings
|
||||
|
||||
### Example
|
||||
|
||||
Alice follows Bob and Carol. Bob follows Dave. Carol follows Dave and Eve.
|
||||
|
||||
Request:
|
||||
```json
|
||||
["REQ", "follow-net", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "<alice_pubkey>",
|
||||
"depth": 2
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
Response:
|
||||
```json
|
||||
["EVENT", "follow-net", {
|
||||
"kind": 39000,
|
||||
"pubkey": "<relay_pubkey>",
|
||||
"created_at": 1704067200,
|
||||
"tags": [
|
||||
["method", "follows"],
|
||||
["seed", "<alice_pubkey>"],
|
||||
["depth", "2"]
|
||||
],
|
||||
"content": "{\"pubkeys_by_depth\":[[\"<bob_pubkey>\",\"<carol_pubkey>\"],[\"<dave_pubkey>\",\"<eve_pubkey>\"]],\"total_pubkeys\":4}",
|
||||
"sig": "<signature>"
|
||||
}]
|
||||
["EOSE", "follow-net"]
|
||||
```
|
||||
|
||||
**Interpretation:**
|
||||
- Depth 1 (index 0): Bob, Carol (Alice's direct follows)
|
||||
- Depth 2 (index 1): Dave, Eve (friends-of-friends, excluding Bob and Carol)
|
||||
- Note: Dave appears only once even though both Bob and Carol follow Dave
|
||||
|
||||
---
|
||||
|
||||
## Query with Additional Filters
|
||||
|
||||
When the REQ includes both `_graph` AND other filter fields (like `kinds`), the relay:
|
||||
|
||||
1. Executes the graph traversal to discover pubkeys
|
||||
2. Fetches the requested events for those pubkeys
|
||||
3. Returns events in **ascending depth order**
|
||||
|
||||
### Request Format
|
||||
|
||||
```json
|
||||
["REQ", "<sub>", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "<pubkey_hex>",
|
||||
"depth": 2
|
||||
},
|
||||
"kinds": [0, 1]
|
||||
}]
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
```
|
||||
["EVENT", "<sub>", <kind-39000 graph result event>]
|
||||
["EVENT", "<sub>", <kind-0 profile for depth-1 pubkey>]
|
||||
["EVENT", "<sub>", <kind-1 note for depth-1 pubkey>]
|
||||
... (all depth-1 events)
|
||||
["EVENT", "<sub>", <kind-0 profile for depth-2 pubkey>]
|
||||
["EVENT", "<sub>", <kind-1 note for depth-2 pubkey>]
|
||||
... (all depth-2 events)
|
||||
["EOSE", "<sub>"]
|
||||
```
|
||||
|
||||
The graph result event (kind 39000) is sent first, allowing clients to know the complete graph structure before receiving individual events.
|
||||
|
||||
---
|
||||
|
||||
## Query with Reference Aggregation (Planned)
|
||||
|
||||
> **Note:** Reference aggregation is planned for a future implementation phase. The following describes the intended behavior.
|
||||
|
||||
When `inbound_refs` or `outbound_refs` are specified, the response will include aggregated reference data **sorted by count descending** (most referenced first).
|
||||
|
||||
### Request Format
|
||||
|
||||
```json
|
||||
["REQ", "popular-posts", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "<pubkey_hex>",
|
||||
"depth": 1,
|
||||
"inbound_refs": [
|
||||
{"kinds": [7], "from_depth": 1}
|
||||
]
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
### Response (Planned)
|
||||
|
||||
```
|
||||
["EVENT", "popular-posts", <kind-39000 graph result with ref summaries>]
|
||||
["EVENT", "popular-posts", <aggregated ref event with 523 reactions>]
|
||||
["EVENT", "popular-posts", <aggregated ref event with 312 reactions>]
|
||||
...
|
||||
["EVENT", "popular-posts", <aggregated ref event with 1 reaction>]
|
||||
["EOSE", "popular-posts"]
|
||||
```
|
||||
|
||||
### Kind 39001: Graph Mentions Result
|
||||
|
||||
Used for `mentions` queries. Contains events that mention the seed pubkey:
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": 39001,
|
||||
"pubkey": "<relay_pubkey>",
|
||||
"created_at": <timestamp>,
|
||||
"tags": [
|
||||
["method", "mentions"],
|
||||
["seed", "<seed_pubkey_hex>"],
|
||||
["depth", "1"]
|
||||
],
|
||||
"content": "{\"events_by_depth\":[[\"<event_id_1>\",\"<event_id_2>\",...]],\"total_events\":42}",
|
||||
"sig": "<signature>"
|
||||
}
|
||||
```
|
||||
|
||||
### Kind 39002: Graph Thread Result
|
||||
|
||||
Used for `thread` queries. Contains events in a reply thread:
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": 39002,
|
||||
"pubkey": "<relay_pubkey>",
|
||||
"created_at": <timestamp>,
|
||||
"tags": [
|
||||
["method", "thread"],
|
||||
["seed", "<seed_event_id_hex>"],
|
||||
["depth", "10"]
|
||||
],
|
||||
"content": "{\"events_by_depth\":[[\"<reply_id_1>\",...],[\"<reply_id_2>\",...]],\"total_events\":156}",
|
||||
"sig": "<signature>"
|
||||
}
|
||||
```
|
||||
|
||||
### Reference Aggregation (Future)
|
||||
|
||||
When `inbound_refs` or `outbound_refs` are specified, the response includes aggregated reference data sorted by count descending. This feature is planned for a future implementation phase.
|
||||
|
||||
---
|
||||
|
||||
## Examples
|
||||
|
||||
### Example 1: Get Follow Network (Graph Only)
|
||||
|
||||
Get Alice's 2-hop follow network as a single signed event:
|
||||
|
||||
```json
|
||||
["REQ", "follow-network", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "abc123...def456",
|
||||
"depth": 2
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```json
|
||||
["EVENT", "follow-network", {
|
||||
"kind": 39000,
|
||||
"pubkey": "<relay_pubkey>",
|
||||
"tags": [
|
||||
["method", "follows"],
|
||||
["seed", "abc123...def456"],
|
||||
["depth", "2"]
|
||||
],
|
||||
"content": "{\"pubkeys_by_depth\":[[\"pub1\",\"pub2\",...150 pubkeys],[\"pub151\",\"pub152\",...3420 pubkeys]],\"total_pubkeys\":3570}",
|
||||
"sig": "<signature>"
|
||||
}]
|
||||
["EOSE", "follow-network"]
|
||||
```
|
||||
|
||||
The content JSON object contains:
|
||||
- `pubkeys_by_depth[0]`: 150 pubkeys (depth 1 - direct follows)
|
||||
- `pubkeys_by_depth[1]`: 3420 pubkeys (depth 2 - friends-of-friends, excluding depth 1)
|
||||
- `total_pubkeys`: 3570 (total unique pubkeys discovered)
|
||||
|
||||
### Example 2: Follow Network with Profiles
|
||||
|
||||
```json
|
||||
["REQ", "follow-profiles", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "abc123...def456",
|
||||
"depth": 2
|
||||
},
|
||||
"kinds": [0]
|
||||
}]
|
||||
```
|
||||
|
||||
**Response:**
|
||||
```
|
||||
["EVENT", "follow-profiles", <kind-39000 graph result>]
|
||||
["EVENT", "follow-profiles", <kind-0 for depth-1 follow>]
|
||||
... (150 depth-1 profiles)
|
||||
["EVENT", "follow-profiles", <kind-0 for depth-2 follow>]
|
||||
... (3420 depth-2 profiles)
|
||||
["EOSE", "follow-profiles"]
|
||||
```
|
||||
|
||||
### Example 3: Popular Posts by Reactions
|
||||
|
||||
Find reactions to posts by Alice's follows, sorted by popularity:
|
||||
|
||||
```json
|
||||
["REQ", "popular-posts", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "abc123...def456",
|
||||
"depth": 1,
|
||||
"inbound_refs": [
|
||||
{"kinds": [7], "from_depth": 1}
|
||||
]
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
**Response:** Most-reacted posts first, down to posts with only 1 reaction.
|
||||
|
||||
### Example 4: Thread Traversal
|
||||
|
||||
Fetch a complete reply thread:
|
||||
|
||||
```json
|
||||
["REQ", "thread", {
|
||||
"_graph": {
|
||||
"method": "thread",
|
||||
"seed": "root_event_id_hex",
|
||||
"depth": 10,
|
||||
"inbound_refs": [
|
||||
{"kinds": [1], "from_depth": 0}
|
||||
]
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
### Example 5: Who Follows Me?
|
||||
|
||||
Find pubkeys that follow Alice:
|
||||
|
||||
```json
|
||||
["REQ", "my-followers", {
|
||||
"_graph": {
|
||||
"method": "followers",
|
||||
"seed": "alice_pubkey_hex",
|
||||
"depth": 1
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
**Response:** Single kind-39000 event with follower pubkeys in content.
|
||||
|
||||
### Example 6: Reactions AND Reposts (AND semantics)
|
||||
|
||||
Find posts with both reactions and reposts:
|
||||
|
||||
```json
|
||||
["REQ", "engaged-posts", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "abc123...def456",
|
||||
"depth": 1,
|
||||
"inbound_refs": [
|
||||
{"kinds": [7], "from_depth": 1},
|
||||
{"kinds": [6], "from_depth": 1}
|
||||
]
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
This returns only posts that have **both** kind-7 reactions AND kind-6 reposts.
|
||||
|
||||
### Example 7: Reactions OR Reposts (OR semantics)
|
||||
|
||||
Find posts with either reactions or reposts:
|
||||
|
||||
```json
|
||||
["REQ", "any-engagement", {
|
||||
"_graph": {
|
||||
"method": "follows",
|
||||
"seed": "abc123...def456",
|
||||
"depth": 1,
|
||||
"inbound_refs": [
|
||||
{"kinds": [6, 7], "from_depth": 1}
|
||||
]
|
||||
}
|
||||
}]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Client Implementation Notes
|
||||
|
||||
### Validating Graph Results
|
||||
|
||||
Graph result events are signed by the relay's identity key. Clients should:
|
||||
|
||||
1. Verify the signature as with any event
|
||||
2. Optionally verify the relay pubkey matches the connected relay
|
||||
3. Parse the `content` JSON to extract depth-organized results
|
||||
|
||||
### Caching Results
|
||||
|
||||
Because graph results are standard signed events, clients can:
|
||||
|
||||
1. Store results locally for offline access
|
||||
2. Optionally publish results to relays for sharing
|
||||
3. Use the `method`, `seed`, and `depth` tags to identify equivalent queries
|
||||
4. Compare `created_at` timestamps to determine freshness
|
||||
|
||||
### Trust Considerations
|
||||
|
||||
The relay is asserting "this is what the graph looks like from my perspective." Clients may want to:
|
||||
|
||||
1. Query multiple relays and compare results
|
||||
2. Prefer relays they trust for graph queries
|
||||
3. Use the response as a starting point and verify critical paths independently
|
||||
|
||||
---
|
||||
|
||||
## Relay Implementation Notes
|
||||
|
||||
### Index Requirements
|
||||
|
||||
Efficient implementation requires bidirectional graph indexes:
|
||||
|
||||
**Pubkey Graph:**
|
||||
- Event → Pubkey edges (author relationship, `p` tag references)
|
||||
- Pubkey → Event edges (reverse lookup)
|
||||
|
||||
**Event Graph:**
|
||||
- Event → Event edges (`e` tag references)
|
||||
- Event → Event reverse edges (what references this event)
|
||||
|
||||
Both indexes should include:
|
||||
- Event kind (for filtering)
|
||||
- Direction (author vs tag, inbound vs outbound)
|
||||
|
||||
### Query Execution
|
||||
|
||||
1. **Resolve seed**: Convert seed hex to internal identifier
|
||||
2. **BFS traversal**: Traverse graph to specified depth, tracking first-seen depth
|
||||
3. **Deduplication**: Each pubkey appears only at its first-discovered depth
|
||||
4. **Collect refs**: If `inbound_refs`/`outbound_refs` specified, scan reference indexes
|
||||
5. **Aggregate**: Group references by target/source, count occurrences
|
||||
6. **Sort**: Order by count descending (for refs)
|
||||
7. **Sign response**: Create and sign relay events with identity key
|
||||
|
||||
### Performance Considerations
|
||||
|
||||
- Use serial-based internal identifiers (5-byte) instead of full 32-byte IDs
|
||||
- Pre-compute common aggregations if possible
|
||||
- Set reasonable limits on depth (default max: 16) and result counts
|
||||
- Consider caching frequent queries
|
||||
- Use rate limiting to prevent abuse
|
||||
|
||||
---
|
||||
|
||||
## Backward Compatibility
|
||||
|
||||
- Relays not supporting this NIP will ignore the `_graph` field per NIP-01
|
||||
- Clients should detect support via NIP-11 relay information document
|
||||
- Response events (39000, 39001, 39002) are standard Nostr events
|
||||
|
||||
## NIP-11 Advertisement
|
||||
|
||||
Relays supporting this NIP should advertise it:
|
||||
|
||||
```json
|
||||
{
|
||||
"supported_nips": [1, "XX"],
|
||||
"limitation": {
|
||||
"graph_query_max_depth": 16
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **Rate limiting**: Graph queries can be expensive; relays should rate limit
|
||||
- **Depth limits**: Maximum depth should be capped (recommended: 16)
|
||||
- **Result limits**: Large follow graphs can return many results; consider size limits
|
||||
- **Authentication**: Relays may require NIP-42 auth for graph queries
|
||||
|
||||
## References
|
||||
|
||||
- [NIP-01](https://github.com/nostr-protocol/nips/blob/master/01.md): Basic protocol
|
||||
- [NIP-02](https://github.com/nostr-protocol/nips/blob/master/02.md): Follow lists (kind 3)
|
||||
- [NIP-11](https://github.com/nostr-protocol/nips/blob/master/11.md): Relay information
|
||||
- [NIP-33](https://github.com/nostr-protocol/nips/blob/master/33.md): Parameterized replaceable events
|
||||
- [NIP-42](https://github.com/nostr-protocol/nips/blob/master/42.md): Authentication
|
||||
615
docs/POLICY_CONFIGURATION_REFERENCE.md
Normal file
615
docs/POLICY_CONFIGURATION_REFERENCE.md
Normal file
@@ -0,0 +1,615 @@
|
||||
# ORLY Policy Configuration Reference
|
||||
|
||||
This document provides a definitive reference for all policy configuration options and when each rule applies. Use this as the authoritative source for understanding policy behavior.
|
||||
|
||||
## Quick Reference: Read vs Write Applicability
|
||||
|
||||
| Rule Field | Write (EVENT) | Read (REQ) | Notes |
|
||||
|------------|:-------------:|:----------:|-------|
|
||||
| `size_limit` | ✅ | ❌ | Validates incoming events only |
|
||||
| `content_limit` | ✅ | ❌ | Validates incoming events only |
|
||||
| `max_age_of_event` | ✅ | ❌ | Prevents replay attacks |
|
||||
| `max_age_event_in_future` | ✅ | ❌ | Prevents future-dated events |
|
||||
| `max_expiry_duration` | ✅ | ❌ | Requires expiration tag |
|
||||
| `must_have_tags` | ✅ | ❌ | Validates required tags |
|
||||
| `protected_required` | ✅ | ❌ | Requires NIP-70 "-" tag |
|
||||
| `identifier_regex` | ✅ | ❌ | Validates "d" tag format |
|
||||
| `tag_validation` | ✅ | ❌ | Validates tag values with regex |
|
||||
| `write_allow` | ✅ | ❌ | Pubkey whitelist for writing |
|
||||
| `write_deny` | ✅ | ❌ | Pubkey blacklist for writing |
|
||||
| `read_allow` | ❌ | ✅ | Pubkey whitelist for reading |
|
||||
| `read_deny` | ❌ | ✅ | Pubkey blacklist for reading |
|
||||
| `privileged` | ❌ | ✅ | Party-involved access control |
|
||||
| `write_allow_follows` | ✅ | ✅ | Grants **both** read AND write |
|
||||
| `follows_whitelist_admins` | ✅ | ✅ | Grants **both** read AND write |
|
||||
| `script` | ✅ | ❌ | Scripts only run for writes |
|
||||
|
||||
---
|
||||
|
||||
## Core Principle: Validation vs Filtering
|
||||
|
||||
The policy system has two distinct modes of operation:
|
||||
|
||||
### Write Operations (EVENT messages)
|
||||
- **Purpose**: Validate and accept/reject incoming events
|
||||
- **All rules apply** except `read_allow`, `read_deny`, and `privileged`
|
||||
- Events are checked **before storage**
|
||||
- Rejected events are never stored
|
||||
|
||||
### Read Operations (REQ messages)
|
||||
- **Purpose**: Filter which stored events a user can retrieve
|
||||
- **Only access control rules apply**: `read_allow`, `read_deny`, `privileged`, `write_allow_follows`, `follows_whitelist_admins`
|
||||
- Validation rules (size, age, tags) do NOT apply
|
||||
- Scripts are NOT executed for reads
|
||||
- Filtering happens **after database query**
|
||||
|
||||
---
|
||||
|
||||
## Configuration Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow|deny",
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7],
|
||||
"blacklist": [4, 42]
|
||||
},
|
||||
"owners": ["hex_pubkey_64_chars"],
|
||||
"policy_admins": ["hex_pubkey_64_chars"],
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"global": { /* Rule object */ },
|
||||
"rules": {
|
||||
"1": { /* Rule object for kind 1 */ },
|
||||
"30023": { /* Rule object for kind 30023 */ }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Top-Level Configuration Fields
|
||||
|
||||
### `default_policy`
|
||||
**Type**: `string`
|
||||
**Values**: `"allow"` (default) or `"deny"`
|
||||
**Applies to**: Both read and write
|
||||
|
||||
The fallback behavior when no specific rule makes a decision.
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny"
|
||||
}
|
||||
```
|
||||
|
||||
### `kind.whitelist` and `kind.blacklist`
|
||||
**Type**: `[]int`
|
||||
**Applies to**: Both read and write
|
||||
|
||||
Controls which event kinds are processed at all.
|
||||
|
||||
- **Whitelist** takes precedence: If present, ONLY whitelisted kinds are allowed
|
||||
- **Blacklist**: If no whitelist, these kinds are denied
|
||||
- **Neither**: Behavior depends on `default_policy` and whether rules exist
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": {
|
||||
"whitelist": [0, 1, 3, 7, 30023]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `owners`
|
||||
**Type**: `[]string` (64-character hex pubkeys)
|
||||
**Applies to**: Policy administration
|
||||
|
||||
Relay owners with full control. Merged with `ORLY_OWNERS` environment variable.
|
||||
|
||||
```json
|
||||
{
|
||||
"owners": ["4a93c5ac0c6f49d2c7e7a5b8d9e0f1a2b3c4d5e6f7a8b9c0d1e2f3a4b5c6d7e8"]
|
||||
}
|
||||
```
|
||||
|
||||
### `policy_admins`
|
||||
**Type**: `[]string` (64-character hex pubkeys)
|
||||
**Applies to**: Policy administration
|
||||
|
||||
Pubkeys that can update policy via kind 12345 events (with restrictions).
|
||||
|
||||
### `policy_follow_whitelist_enabled`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Both read and write (when `write_allow_follows` is true)
|
||||
|
||||
When enabled, allows `write_allow_follows` rules to grant access to policy admin follows.
|
||||
|
||||
---
|
||||
|
||||
## Rule Object Fields
|
||||
|
||||
Rules can be defined in `global` (applies to all events) or `rules[kind]` (applies to specific kind).
|
||||
|
||||
### Access Control Fields
|
||||
|
||||
#### `write_allow`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Write only
|
||||
**Behavior**: Exclusive whitelist
|
||||
|
||||
When present with entries, ONLY these pubkeys can write events of this kind. All others are denied.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_allow": ["pubkey1_hex", "pubkey2_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Special case**: Empty array `[]` explicitly allows all writers.
|
||||
|
||||
#### `write_deny`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Write only
|
||||
**Behavior**: Blacklist (highest priority)
|
||||
|
||||
These pubkeys cannot write events of this kind. **Checked before allow lists.**
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_deny": ["banned_pubkey_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `read_allow`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Read only
|
||||
**Behavior**: Exclusive whitelist (with OR logic for privileged)
|
||||
|
||||
When present with entries:
|
||||
- If `privileged: false`: ONLY these pubkeys can read
|
||||
- If `privileged: true`: These pubkeys OR parties involved can read
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"read_allow": ["trusted_pubkey_hex"],
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `read_deny`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Read only
|
||||
**Behavior**: Blacklist (highest priority)
|
||||
|
||||
These pubkeys cannot read events of this kind. **Checked before allow lists.**
|
||||
|
||||
#### `privileged`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Read only
|
||||
|
||||
When `true`, events are only readable by "parties involved":
|
||||
- The event author (`event.pubkey`)
|
||||
- Users mentioned in `p` tags
|
||||
|
||||
**Interaction with `read_allow`**:
|
||||
- `read_allow` present + `privileged: true` = OR logic (in list OR party involved)
|
||||
- `read_allow` empty + `privileged: true` = Only parties involved
|
||||
- `privileged: true` alone = Only parties involved
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"description": "DMs - only sender and recipient can read",
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `write_allow_follows`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Both read AND write
|
||||
**Requires**: `policy_follow_whitelist_enabled: true` at top level
|
||||
|
||||
Grants **both read and write access** to pubkeys followed by policy admins.
|
||||
|
||||
> **Important**: Despite the name, this grants BOTH read and write access.
|
||||
|
||||
```json
|
||||
{
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_allow_follows": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `follows_whitelist_admins`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Both read AND write
|
||||
|
||||
Alternative to `write_allow_follows` that specifies which admin pubkeys' follows are whitelisted for this specific rule.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"follows_whitelist_admins": ["curator_pubkey_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Validation Fields (Write-Only)
|
||||
|
||||
These fields validate incoming events and are **completely ignored for read operations**.
|
||||
|
||||
#### `size_limit`
|
||||
**Type**: `int64` (bytes)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum total serialized event size.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"size_limit": 100000
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `content_limit`
|
||||
**Type**: `int64` (bytes)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum size of the `content` field.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"content_limit": 10000
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_age_of_event`
|
||||
**Type**: `int64` (seconds)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum age of events. Events with `created_at` older than `now - max_age_of_event` are rejected.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"max_age_of_event": 86400
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_age_event_in_future`
|
||||
**Type**: `int64` (seconds)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum time events can be dated in the future. Events with `created_at` later than `now + max_age_event_in_future` are rejected.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"max_age_event_in_future": 300
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_expiry_duration`
|
||||
**Type**: `string` (ISO-8601 duration)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum allowed expiry time from event creation. Events **must** have an `expiration` tag when this is set.
|
||||
|
||||
**Format**: `P[n]Y[n]M[n]W[n]DT[n]H[n]M[n]S`
|
||||
|
||||
**Examples**:
|
||||
- `P7D` = 7 days
|
||||
- `PT1H` = 1 hour
|
||||
- `P1DT12H` = 1 day 12 hours
|
||||
- `PT30M` = 30 minutes
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"20": {
|
||||
"description": "Ephemeral events must expire within 24 hours",
|
||||
"max_expiry_duration": "P1D"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `must_have_tags`
|
||||
**Type**: `[]string` (tag names)
|
||||
**Applies to**: Write only
|
||||
|
||||
Required tags that must be present on the event.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"must_have_tags": ["p", "e"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `protected_required`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Write only
|
||||
|
||||
Requires events to have a `-` tag (NIP-70 protected events).
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"protected_required": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `identifier_regex`
|
||||
**Type**: `string` (regex pattern)
|
||||
**Applies to**: Write only
|
||||
|
||||
Regex pattern that `d` tag values must match. Events **must** have a `d` tag when this is set.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"identifier_regex": "^[a-z0-9-]{1,64}$"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `tag_validation`
|
||||
**Type**: `map[string]string` (tag name → regex pattern)
|
||||
**Applies to**: Write only
|
||||
|
||||
Regex patterns for validating specific tag values. Only validates tags that are **present** on the event.
|
||||
|
||||
> **Note**: To require a tag to exist, use `must_have_tags`. `tag_validation` only validates format.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"tag_validation": {
|
||||
"t": "^[a-z0-9-]{1,32}$",
|
||||
"d": "^[a-z0-9-]+$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Script Configuration
|
||||
|
||||
#### `script`
|
||||
**Type**: `string` (file path)
|
||||
**Applies to**: Write only
|
||||
|
||||
Path to a custom validation script. **Scripts are NOT executed for read operations.**
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"script": "/etc/orly/scripts/spam-filter.py"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Policy Evaluation Order
|
||||
|
||||
### For Write Operations
|
||||
|
||||
```
|
||||
1. Global Rule Check (all fields apply)
|
||||
├─ Universal constraints (size, tags, age, etc.)
|
||||
├─ write_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
└─ write_allow check
|
||||
|
||||
2. Kind Filtering (whitelist/blacklist)
|
||||
|
||||
3. Kind-Specific Rule Check (same as global)
|
||||
├─ Universal constraints
|
||||
├─ write_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ write_allow check
|
||||
└─ Script execution (if configured)
|
||||
|
||||
4. Default Policy (if no rules matched)
|
||||
```
|
||||
|
||||
### For Read Operations
|
||||
|
||||
```
|
||||
1. Global Rule Check (access control only)
|
||||
├─ read_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ read_allow check
|
||||
└─ privileged check (party involved)
|
||||
|
||||
2. Kind Filtering (whitelist/blacklist)
|
||||
|
||||
3. Kind-Specific Rule Check (access control only)
|
||||
├─ read_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ read_allow + privileged (OR logic)
|
||||
└─ privileged-only check
|
||||
|
||||
4. Default Policy (if no rules matched)
|
||||
|
||||
NOTE: Scripts are NOT executed for read operations
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Configuration Patterns
|
||||
|
||||
### Private Relay (Whitelist Only)
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny",
|
||||
"global": {
|
||||
"write_allow": ["trusted_pubkey_1", "trusted_pubkey_2"],
|
||||
"read_allow": ["trusted_pubkey_1", "trusted_pubkey_2"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Open Relay with Spam Protection
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow",
|
||||
"global": {
|
||||
"size_limit": 100000,
|
||||
"max_age_of_event": 86400,
|
||||
"max_age_event_in_future": 300
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"script": "/etc/orly/scripts/spam-filter.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Community Relay (Follows-Based)
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny",
|
||||
"policy_admins": ["community_admin_pubkey"],
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"global": {
|
||||
"write_allow_follows": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Encrypted DMs (Privileged Access)
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"description": "Encrypted DMs - only sender/recipient",
|
||||
"privileged": true,
|
||||
"protected_required": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Long-Form Content with Validation
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"description": "Long-form articles",
|
||||
"size_limit": 100000,
|
||||
"content_limit": 50000,
|
||||
"max_expiry_duration": "P30D",
|
||||
"identifier_regex": "^[a-z0-9-]{1,64}$",
|
||||
"tag_validation": {
|
||||
"t": "^[a-z0-9-]{1,32}$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Important Behaviors
|
||||
|
||||
### Whitelist vs Blacklist Precedence
|
||||
|
||||
1. **Deny lists** (`write_deny`, `read_deny`) are checked **first** and have highest priority
|
||||
2. **Allow lists** are exclusive when populated - ONLY listed pubkeys are allowed
|
||||
3. **Deny-only configuration**: If only deny list exists (no allow list), all non-denied pubkeys are allowed
|
||||
|
||||
### Empty Arrays vs Null
|
||||
|
||||
- `[]` (empty array explicitly set) = Allow all
|
||||
- `null` or field omitted = No list configured, use other rules
|
||||
|
||||
### Global Rules Are Additive
|
||||
|
||||
Global rules are always evaluated **in addition to** kind-specific rules. They cannot be overridden at the kind level.
|
||||
|
||||
### Implicit Kind Whitelist
|
||||
|
||||
When rules are defined but no explicit `kind.whitelist`:
|
||||
- If `default_policy: "allow"`: All kinds allowed
|
||||
- If `default_policy: "deny"` or unset: Only kinds with rules allowed
|
||||
|
||||
---
|
||||
|
||||
## Debugging Policy Issues
|
||||
|
||||
Enable debug logging to see policy decisions:
|
||||
|
||||
```bash
|
||||
export ORLY_LOG_LEVEL=debug
|
||||
```
|
||||
|
||||
Log messages include:
|
||||
- Policy evaluation steps
|
||||
- Rule matching
|
||||
- Access decisions with reasons
|
||||
|
||||
---
|
||||
|
||||
## Source Code Reference
|
||||
|
||||
- Policy struct definition: `pkg/policy/policy.go:75-144` (Rule struct)
|
||||
- Policy struct definition: `pkg/policy/policy.go:380-412` (P struct)
|
||||
- Check evaluation: `pkg/policy/policy.go:1260-1595` (checkRulePolicy)
|
||||
- Write handler: `app/handle-event.go:114-138`
|
||||
- Read handler: `app/handle-req.go:420-438`
|
||||
10
go.mod
10
go.mod
@@ -3,11 +3,12 @@ module next.orly.dev
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
git.mleku.dev/mleku/nostr v1.0.7
|
||||
git.mleku.dev/mleku/nostr v1.0.8
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
github.com/dgraph-io/dgo/v230 v230.0.1
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hack-pad/safejs v0.1.1
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/klauspost/compress v1.18.2
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
@@ -21,7 +22,6 @@ require (
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/crypto v0.45.0
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||
google.golang.org/grpc v1.76.0
|
||||
honnef.co/go/tools v0.6.1
|
||||
lol.mleku.dev v1.0.5
|
||||
lukechampine.com/frand v1.5.1
|
||||
@@ -46,8 +46,6 @@ require (
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/flatbuffers v25.9.23+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@@ -57,7 +55,6 @@ require (
|
||||
github.com/mattn/go-sqlite3 v1.14.32 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/templexxx/cpu v0.1.1 // indirect
|
||||
@@ -79,7 +76,6 @@ require (
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
p256k1.mleku.dev v1.0.3 // indirect
|
||||
|
||||
101
go.sum
101
go.sum
@@ -1,13 +1,13 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
git.mleku.dev/mleku/nostr v1.0.7 h1:BXWsAAiGu56JXR4rIn0kaVOE+RtMmA9MPvAs8y/BjnI=
|
||||
git.mleku.dev/mleku/nostr v1.0.7/go.mod h1:iYTlg2WKJXJ0kcsM6QBGOJ0UDiJidMgL/i64cHyPjZc=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
git.mleku.dev/mleku/nostr v1.0.8 h1:YYREdIxobEqYkzxQ7/5ALACPzLkiHW+CTira+VvSQZk=
|
||||
git.mleku.dev/mleku/nostr v1.0.8/go.mod h1:iYTlg2WKJXJ0kcsM6QBGOJ0UDiJidMgL/i64cHyPjZc=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3 h1:DfquIk9YEZjWD/lJyBWZWGCtRga43/a96bx0Ulv9VhQ=
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3/go.mod h1:JV1XngOCCui7zrMSyRz+Wvz00nUSfotRKZqJzWpl5fQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
@@ -17,7 +17,6 @@ github.com/bytedance/sonic v1.13.1/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
|
||||
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
@@ -29,7 +28,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
@@ -44,8 +42,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvw
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
|
||||
github.com/dgraph-io/dgo/v230 v230.0.1 h1:kR7gI7/ZZv0jtG6dnedNgNOCxe1cbSG8ekF+pNfReks=
|
||||
github.com/dgraph-io/dgo/v230 v230.0.1/go.mod h1:5FerO2h4LPOxR2XTkOAtqUUPaFdQ+5aBOHXPBJ3nT10=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
@@ -55,8 +51,6 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
@@ -68,26 +62,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU=
|
||||
github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -95,10 +71,10 @@ github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8I
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hack-pad/safejs v0.1.1 h1:d5qPO0iQ7h2oVtpzGnLExE+Wn9AtytxIfltcS2b9KD8=
|
||||
github.com/hack-pad/safejs v0.1.1/go.mod h1:HdS+bKF1NrE72VoXZeWzxFOVQVUSqZJAG0xNCnb+Tio=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
@@ -107,8 +83,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
@@ -137,13 +111,10 @@ github.com/nbd-wtf/go-nostr v0.52.0/go.mod h1:4avYoc9mDGZ9wHsvCOhHH9vPzKucCfuYBt
|
||||
github.com/neo4j/neo4j-go-driver/v5 v5.28.4 h1:7toxehVcYkZbyxV4W3Ib9VcnyRBQPucF+VwNNmtSXi4=
|
||||
github.com/neo4j/neo4j-go-driver/v5 v5.28.4/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
@@ -177,8 +148,6 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/vertex-lab/nostr-sqlite v0.3.2 h1:8nZYYIwiKnWLA446qA/wL/Gy+bU0kuaxdLfUyfeTt/E=
|
||||
github.com/vertex-lab/nostr-sqlite v0.3.2/go.mod h1:5bw1wMgJhSdrumsZAWxqy+P0u1g+q02PnlGQn15dnSM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
|
||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
@@ -187,10 +156,6 @@ go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
@@ -199,92 +164,40 @@ golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw=
|
||||
golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||
golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 h1:HDjDiATsGqvuqvkDvgJjD1IgPrVekcSXVVE21JwvzGE=
|
||||
golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
|
||||
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -293,8 +206,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
lol.mleku.dev v1.0.5 h1:irwfwz+Scv74G/2OXmv05YFKOzUNOVZ735EAkYgjgM8=
|
||||
|
||||
45
main.go
45
main.go
@@ -21,8 +21,7 @@ import (
|
||||
"next.orly.dev/pkg/acl"
|
||||
"git.mleku.dev/mleku/nostr/crypto/keys"
|
||||
"next.orly.dev/pkg/database"
|
||||
_ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory
|
||||
_ "next.orly.dev/pkg/neo4j" // Import to register neo4j factory
|
||||
_ "next.orly.dev/pkg/neo4j" // Import to register neo4j factory
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"next.orly.dev/pkg/utils/interrupt"
|
||||
"next.orly.dev/pkg/version"
|
||||
@@ -31,6 +30,13 @@ import (
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(128)
|
||||
debug.SetGCPercent(10)
|
||||
|
||||
// Handle 'version' subcommand early, before any other initialization
|
||||
if config.VersionRequested() {
|
||||
fmt.Println(version.V)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var err error
|
||||
var cfg *config.C
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
@@ -42,8 +48,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var db database.Database
|
||||
if db, err = database.NewDatabase(
|
||||
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
|
||||
if db, err = database.NewDatabaseWithConfig(
|
||||
ctx, cancel, cfg.DBType, makeDatabaseConfig(cfg),
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -318,8 +324,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var db database.Database
|
||||
log.I.F("initializing %s database at %s", cfg.DBType, cfg.DataDir)
|
||||
if db, err = database.NewDatabase(
|
||||
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
|
||||
if db, err = database.NewDatabaseWithConfig(
|
||||
ctx, cancel, cfg.DBType, makeDatabaseConfig(cfg),
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -430,3 +436,30 @@ func main() {
|
||||
}
|
||||
// log.I.F("exiting")
|
||||
}
|
||||
|
||||
// makeDatabaseConfig creates a database.DatabaseConfig from the app config.
|
||||
// This helper function extracts all database-specific configuration values
|
||||
// and constructs the appropriate struct for the database package.
|
||||
func makeDatabaseConfig(cfg *config.C) *database.DatabaseConfig {
|
||||
dataDir, logLevel,
|
||||
blockCacheMB, indexCacheMB, queryCacheSizeMB,
|
||||
queryCacheMaxAge,
|
||||
serialCachePubkeys, serialCacheEventIds,
|
||||
zstdLevel,
|
||||
neo4jURI, neo4jUser, neo4jPassword := cfg.GetDatabaseConfigValues()
|
||||
|
||||
return &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
BlockCacheMB: blockCacheMB,
|
||||
IndexCacheMB: indexCacheMB,
|
||||
QueryCacheSizeMB: queryCacheSizeMB,
|
||||
QueryCacheMaxAge: queryCacheMaxAge,
|
||||
SerialCachePubkeys: serialCachePubkeys,
|
||||
SerialCacheEventIds: serialCacheEventIds,
|
||||
ZSTDLevel: zstdLevel,
|
||||
Neo4jURI: neo4jURI,
|
||||
Neo4jUser: neo4jUser,
|
||||
Neo4jPassword: neo4jPassword,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,20 +2,20 @@ package acl
|
||||
|
||||
import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"next.orly.dev/pkg/interfaces/acl"
|
||||
acliface "next.orly.dev/pkg/interfaces/acl"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
var Registry = &S{}
|
||||
|
||||
type S struct {
|
||||
ACL []acl.I
|
||||
ACL []acliface.I
|
||||
Active atomic.String
|
||||
}
|
||||
|
||||
type A struct{ S }
|
||||
|
||||
func (s *S) Register(i acl.I) {
|
||||
func (s *S) Register(i acliface.I) {
|
||||
(*s).ACL = append((*s).ACL, i)
|
||||
}
|
||||
|
||||
@@ -85,9 +85,7 @@ func (s *S) CheckPolicy(ev *event.E) (allowed bool, err error) {
|
||||
for _, i := range s.ACL {
|
||||
if i.Type() == s.Active.Load() {
|
||||
// Check if the ACL implementation has a CheckPolicy method
|
||||
if policyChecker, ok := i.(interface {
|
||||
CheckPolicy(ev *event.E) (allowed bool, err error)
|
||||
}); ok {
|
||||
if policyChecker, ok := i.(acliface.PolicyChecker); ok {
|
||||
return policyChecker.CheckPolicy(ev)
|
||||
}
|
||||
// If no CheckPolicy method, default to allowing
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -19,11 +19,12 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// TestInlineSmallEventStorage tests the Reiser4-inspired inline storage optimization
|
||||
// for small events (<=1024 bytes by default).
|
||||
func TestInlineSmallEventStorage(t *testing.T) {
|
||||
// TestCompactEventStorage tests the compact storage format (cmp prefix) which
|
||||
// replaced the old inline storage optimization (sev/evt prefixes).
|
||||
// All events are now stored in compact format regardless of size.
|
||||
func TestCompactEventStorage(t *testing.T) {
|
||||
// Create a temporary directory for the database
|
||||
tempDir, err := os.MkdirTemp("", "test-inline-db-*")
|
||||
tempDir, err := os.MkdirTemp("", "test-compact-db-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||
}
|
||||
@@ -46,8 +47,8 @@ func TestInlineSmallEventStorage(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Test Case 1: Small event (should use inline storage)
|
||||
t.Run("SmallEventInlineStorage", func(t *testing.T) {
|
||||
// Test Case 1: Small event (should use compact storage)
|
||||
t.Run("SmallEventCompactStorage", func(t *testing.T) {
|
||||
smallEvent := event.New()
|
||||
smallEvent.Kind = kind.TextNote.K
|
||||
smallEvent.CreatedAt = timestamp.Now().V
|
||||
@@ -65,49 +66,27 @@ func TestInlineSmallEventStorage(t *testing.T) {
|
||||
t.Fatalf("Failed to save small event: %v", err)
|
||||
}
|
||||
|
||||
// Verify it was stored with sev prefix
|
||||
// Verify it was stored with cmp prefix
|
||||
serial, err := db.GetSerialById(smallEvent.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial for small event: %v", err)
|
||||
}
|
||||
|
||||
// Check that sev key exists
|
||||
sevKeyExists := false
|
||||
// Check that cmp key exists (compact format)
|
||||
cmpKeyExists := false
|
||||
db.View(func(txn *badger.Txn) error {
|
||||
smallBuf := new(bytes.Buffer)
|
||||
indexes.SmallEventEnc(serial).MarshalWrite(smallBuf)
|
||||
cmpBuf := new(bytes.Buffer)
|
||||
indexes.CompactEventEnc(serial).MarshalWrite(cmpBuf)
|
||||
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.Prefix = smallBuf.Bytes()
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
it.Rewind()
|
||||
if it.Valid() {
|
||||
sevKeyExists = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if !sevKeyExists {
|
||||
t.Errorf("Small event was not stored with sev prefix")
|
||||
}
|
||||
|
||||
// Verify evt key does NOT exist for small event
|
||||
evtKeyExists := false
|
||||
db.View(func(txn *badger.Txn) error {
|
||||
buf := new(bytes.Buffer)
|
||||
indexes.EventEnc(serial).MarshalWrite(buf)
|
||||
|
||||
_, err := txn.Get(buf.Bytes())
|
||||
_, err := txn.Get(cmpBuf.Bytes())
|
||||
if err == nil {
|
||||
evtKeyExists = true
|
||||
cmpKeyExists = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if evtKeyExists {
|
||||
t.Errorf("Small event should not have evt key (should only use sev)")
|
||||
if !cmpKeyExists {
|
||||
t.Errorf("Small event was not stored with cmp prefix (compact format)")
|
||||
}
|
||||
|
||||
// Fetch and verify the event
|
||||
@@ -124,12 +103,12 @@ func TestInlineSmallEventStorage(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// Test Case 2: Large event (should use traditional storage)
|
||||
t.Run("LargeEventTraditionalStorage", func(t *testing.T) {
|
||||
// Test Case 2: Large event (should also use compact storage)
|
||||
t.Run("LargeEventCompactStorage", func(t *testing.T) {
|
||||
largeEvent := event.New()
|
||||
largeEvent.Kind = kind.TextNote.K
|
||||
largeEvent.CreatedAt = timestamp.Now().V
|
||||
// Create content larger than 1024 bytes (the default inline storage threshold)
|
||||
// Create larger content
|
||||
largeContent := make([]byte, 1500)
|
||||
for i := range largeContent {
|
||||
largeContent[i] = 'x'
|
||||
@@ -148,27 +127,27 @@ func TestInlineSmallEventStorage(t *testing.T) {
|
||||
t.Fatalf("Failed to save large event: %v", err)
|
||||
}
|
||||
|
||||
// Verify it was stored with evt prefix
|
||||
// Verify it was stored with cmp prefix (compact format)
|
||||
serial, err := db.GetSerialById(largeEvent.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial for large event: %v", err)
|
||||
}
|
||||
|
||||
// Check that evt key exists
|
||||
evtKeyExists := false
|
||||
// Check that cmp key exists
|
||||
cmpKeyExists := false
|
||||
db.View(func(txn *badger.Txn) error {
|
||||
buf := new(bytes.Buffer)
|
||||
indexes.EventEnc(serial).MarshalWrite(buf)
|
||||
cmpBuf := new(bytes.Buffer)
|
||||
indexes.CompactEventEnc(serial).MarshalWrite(cmpBuf)
|
||||
|
||||
_, err := txn.Get(buf.Bytes())
|
||||
_, err := txn.Get(cmpBuf.Bytes())
|
||||
if err == nil {
|
||||
evtKeyExists = true
|
||||
cmpKeyExists = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if !evtKeyExists {
|
||||
t.Errorf("Large event was not stored with evt prefix")
|
||||
if !cmpKeyExists {
|
||||
t.Errorf("Large event was not stored with cmp prefix (compact format)")
|
||||
}
|
||||
|
||||
// Fetch and verify the event
|
||||
@@ -399,9 +378,11 @@ func TestInlineStorageMigration(t *testing.T) {
|
||||
i, fetchedEvent.Content, ev.Content)
|
||||
}
|
||||
|
||||
// Verify it's now using inline storage
|
||||
sevKeyExists := false
|
||||
// Verify it's now using optimized storage (sev inline OR cmp compact format)
|
||||
// The migration may convert to sev (version 4) or cmp (version 6) depending on migration order
|
||||
optimizedStorageExists := false
|
||||
db.View(func(txn *badger.Txn) error {
|
||||
// Check for sev (small event inline) format
|
||||
smallBuf := new(bytes.Buffer)
|
||||
indexes.SmallEventEnc(serial).MarshalWrite(smallBuf)
|
||||
|
||||
@@ -412,21 +393,31 @@ func TestInlineStorageMigration(t *testing.T) {
|
||||
|
||||
it.Rewind()
|
||||
if it.Valid() {
|
||||
sevKeyExists = true
|
||||
t.Logf("Event %d (%s) successfully migrated to inline storage",
|
||||
optimizedStorageExists = true
|
||||
t.Logf("Event %d (%s) successfully migrated to inline (sev) storage",
|
||||
i, hex.Enc(ev.ID[:8]))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check for cmp (compact format) storage
|
||||
cmpBuf := new(bytes.Buffer)
|
||||
indexes.CompactEventEnc(serial).MarshalWrite(cmpBuf)
|
||||
if _, err := txn.Get(cmpBuf.Bytes()); err == nil {
|
||||
optimizedStorageExists = true
|
||||
t.Logf("Event %d (%s) successfully migrated to compact (cmp) storage",
|
||||
i, hex.Enc(ev.ID[:8]))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if !sevKeyExists {
|
||||
t.Errorf("Event %d was not migrated to inline storage", i)
|
||||
if !optimizedStorageExists {
|
||||
t.Errorf("Event %d was not migrated to optimized storage (sev or cmp)", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkInlineVsTraditionalStorage compares performance of inline vs traditional storage
|
||||
func BenchmarkInlineVsTraditionalStorage(b *testing.B) {
|
||||
// BenchmarkCompactStorage benchmarks the compact storage format performance
|
||||
func BenchmarkCompactStorage(b *testing.B) {
|
||||
// Create a temporary directory for the database
|
||||
tempDir, err := os.MkdirTemp("", "bench-inline-db-*")
|
||||
if err != nil {
|
||||
@@ -489,7 +480,7 @@ func BenchmarkInlineVsTraditionalStorage(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
b.Run("FetchSmallEventsInline", func(b *testing.B) {
|
||||
b.Run("FetchSmallEventsCompact", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
idx := i % len(smallSerials)
|
||||
@@ -497,7 +488,7 @@ func BenchmarkInlineVsTraditionalStorage(b *testing.B) {
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("FetchLargeEventsTraditional", func(b *testing.B) {
|
||||
b.Run("FetchLargeEventsCompact", func(b *testing.B) {
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
idx := i % len(largeSerials)
|
||||
421
pkg/database/compact_event.go
Normal file
421
pkg/database/compact_event.go
Normal file
@@ -0,0 +1,421 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/crypto/ec/schnorr"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/varint"
|
||||
"lol.mleku.dev/chk"
|
||||
)
|
||||
|
||||
// CompactEventFormat defines the binary format for compact event storage.
|
||||
// This format uses 5-byte serial references instead of 32-byte IDs/pubkeys,
|
||||
// dramatically reducing storage requirements.
|
||||
//
|
||||
// Format:
|
||||
// - 1 byte: Version (currently 1)
|
||||
// - 5 bytes: Author pubkey serial (reference to spk table)
|
||||
// - varint: CreatedAt timestamp
|
||||
// - 2 bytes: Kind (uint16 big-endian)
|
||||
// - varint: Number of tags
|
||||
// - For each tag:
|
||||
// - varint: Number of elements in tag
|
||||
// - For each element:
|
||||
// - 1 byte: Element type flag
|
||||
// - 0x00 = raw bytes (followed by varint length + data)
|
||||
// - 0x01 = pubkey serial reference (followed by 5-byte serial)
|
||||
// - 0x02 = event ID serial reference (followed by 5-byte serial)
|
||||
// - 0x03 = unknown event ID (followed by 32-byte full ID)
|
||||
// - Element data based on type
|
||||
// - varint: Content length
|
||||
// - Content bytes
|
||||
// - 64 bytes: Signature
|
||||
//
|
||||
// Space savings example (event with 3 p-tags, 1 e-tag):
|
||||
// - Original: 32 (ID) + 32 (pubkey) + 32*4 (tags) = 192 bytes
|
||||
// - Compact: 5 (pubkey serial) + 5*4 (tag serials) = 25 bytes
|
||||
// - Savings: 167 bytes per event (87%)
|
||||
|
||||
const (
|
||||
CompactFormatVersion = 1
|
||||
|
||||
// Tag element type flags
|
||||
TagElementRaw = 0x00 // Raw bytes (varint length + data)
|
||||
TagElementPubkeySerial = 0x01 // Pubkey serial reference (5 bytes)
|
||||
TagElementEventSerial = 0x02 // Event ID serial reference (5 bytes)
|
||||
TagElementEventIdFull = 0x03 // Full event ID (32 bytes) - for unknown refs
|
||||
)
|
||||
|
||||
// SerialResolver is an interface for resolving serials during compact encoding/decoding.
|
||||
// This allows the encoder/decoder to look up or create serial mappings.
|
||||
type SerialResolver interface {
|
||||
// GetOrCreatePubkeySerial returns the serial for a pubkey, creating one if needed.
|
||||
GetOrCreatePubkeySerial(pubkey []byte) (serial uint64, err error)
|
||||
|
||||
// GetPubkeyBySerial returns the full pubkey for a serial.
|
||||
GetPubkeyBySerial(serial uint64) (pubkey []byte, err error)
|
||||
|
||||
// GetEventSerialById returns the serial for an event ID, or 0 if not found.
|
||||
GetEventSerialById(eventId []byte) (serial uint64, found bool, err error)
|
||||
|
||||
// GetEventIdBySerial returns the full event ID for a serial.
|
||||
GetEventIdBySerial(serial uint64) (eventId []byte, err error)
|
||||
}
|
||||
|
||||
// MarshalCompactEvent encodes an event using compact serial references.
|
||||
// The resolver is used to look up/create serial mappings for pubkeys and event IDs.
|
||||
func MarshalCompactEvent(ev *event.E, resolver SerialResolver) (data []byte, err error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Version byte
|
||||
buf.WriteByte(CompactFormatVersion)
|
||||
|
||||
// Author pubkey serial (5 bytes)
|
||||
var authorSerial uint64
|
||||
if authorSerial, err = resolver.GetOrCreatePubkeySerial(ev.Pubkey); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
writeUint40(buf, authorSerial)
|
||||
|
||||
// CreatedAt (varint)
|
||||
varint.Encode(buf, uint64(ev.CreatedAt))
|
||||
|
||||
// Kind (2 bytes big-endian)
|
||||
binary.Write(buf, binary.BigEndian, ev.Kind)
|
||||
|
||||
// Tags
|
||||
if ev.Tags == nil || ev.Tags.Len() == 0 {
|
||||
varint.Encode(buf, 0)
|
||||
} else {
|
||||
varint.Encode(buf, uint64(ev.Tags.Len()))
|
||||
for _, t := range *ev.Tags {
|
||||
if err = encodeCompactTag(buf, t, resolver); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Content
|
||||
varint.Encode(buf, uint64(len(ev.Content)))
|
||||
buf.Write(ev.Content)
|
||||
|
||||
// Signature (64 bytes)
|
||||
buf.Write(ev.Sig)
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// encodeCompactTag encodes a single tag with serial references for e/p tags.
|
||||
func encodeCompactTag(w io.Writer, t *tag.T, resolver SerialResolver) (err error) {
|
||||
if t == nil || t.Len() == 0 {
|
||||
varint.Encode(w, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
varint.Encode(w, uint64(t.Len()))
|
||||
|
||||
// Get tag key to determine if we should use serial references
|
||||
key := t.Key()
|
||||
isPTag := len(key) == 1 && key[0] == 'p'
|
||||
isETag := len(key) == 1 && key[0] == 'e'
|
||||
|
||||
for i, elem := range t.T {
|
||||
if i == 0 {
|
||||
// First element is always the tag key - store as raw
|
||||
writeTagElement(w, TagElementRaw, elem)
|
||||
continue
|
||||
}
|
||||
|
||||
if i == 1 {
|
||||
// Second element is the value - potentially a serial reference
|
||||
if isPTag && len(elem) == 32 {
|
||||
// Binary pubkey - look up serial
|
||||
serial, serErr := resolver.GetOrCreatePubkeySerial(elem)
|
||||
if serErr == nil {
|
||||
writeTagElementSerial(w, TagElementPubkeySerial, serial)
|
||||
continue
|
||||
}
|
||||
// Fall through to raw encoding on error
|
||||
} else if isPTag && len(elem) == 64 {
|
||||
// Hex pubkey - decode and look up serial
|
||||
var pubkey []byte
|
||||
if pubkey, err = hexDecode(elem); err == nil && len(pubkey) == 32 {
|
||||
serial, serErr := resolver.GetOrCreatePubkeySerial(pubkey)
|
||||
if serErr == nil {
|
||||
writeTagElementSerial(w, TagElementPubkeySerial, serial)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Fall through to raw encoding on error
|
||||
} else if isETag && len(elem) == 32 {
|
||||
// Binary event ID - look up serial if exists
|
||||
serial, found, serErr := resolver.GetEventSerialById(elem)
|
||||
if serErr == nil && found {
|
||||
writeTagElementSerial(w, TagElementEventSerial, serial)
|
||||
continue
|
||||
}
|
||||
// Event not found - store full ID
|
||||
writeTagElement(w, TagElementEventIdFull, elem)
|
||||
continue
|
||||
} else if isETag && len(elem) == 64 {
|
||||
// Hex event ID - decode and look up serial
|
||||
var eventId []byte
|
||||
if eventId, err = hexDecode(elem); err == nil && len(eventId) == 32 {
|
||||
serial, found, serErr := resolver.GetEventSerialById(eventId)
|
||||
if serErr == nil && found {
|
||||
writeTagElementSerial(w, TagElementEventSerial, serial)
|
||||
continue
|
||||
}
|
||||
// Event not found - store full ID
|
||||
writeTagElement(w, TagElementEventIdFull, eventId)
|
||||
continue
|
||||
}
|
||||
// Fall through to raw encoding on error
|
||||
}
|
||||
}
|
||||
|
||||
// Default: raw encoding
|
||||
writeTagElement(w, TagElementRaw, elem)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeTagElement writes a tag element with type flag.
|
||||
func writeTagElement(w io.Writer, typeFlag byte, data []byte) {
|
||||
w.Write([]byte{typeFlag})
|
||||
if typeFlag == TagElementEventIdFull {
|
||||
// Full event ID - no length prefix, always 32 bytes
|
||||
w.Write(data)
|
||||
} else {
|
||||
// Raw data - length prefix
|
||||
varint.Encode(w, uint64(len(data)))
|
||||
w.Write(data)
|
||||
}
|
||||
}
|
||||
|
||||
// writeTagElementSerial writes a serial reference tag element.
|
||||
func writeTagElementSerial(w io.Writer, typeFlag byte, serial uint64) {
|
||||
w.Write([]byte{typeFlag})
|
||||
writeUint40(w, serial)
|
||||
}
|
||||
|
||||
// writeUint40 writes a 5-byte big-endian unsigned integer.
|
||||
func writeUint40(w io.Writer, value uint64) {
|
||||
buf := []byte{
|
||||
byte((value >> 32) & 0xFF),
|
||||
byte((value >> 24) & 0xFF),
|
||||
byte((value >> 16) & 0xFF),
|
||||
byte((value >> 8) & 0xFF),
|
||||
byte(value & 0xFF),
|
||||
}
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// readUint40 reads a 5-byte big-endian unsigned integer.
|
||||
func readUint40(r io.Reader) (value uint64, err error) {
|
||||
buf := make([]byte, 5)
|
||||
if _, err = io.ReadFull(r, buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value = (uint64(buf[0]) << 32) |
|
||||
(uint64(buf[1]) << 24) |
|
||||
(uint64(buf[2]) << 16) |
|
||||
(uint64(buf[3]) << 8) |
|
||||
uint64(buf[4])
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// UnmarshalCompactEvent decodes a compact event back to a full event.E.
|
||||
// The resolver is used to look up pubkeys and event IDs from serials.
|
||||
// The eventId parameter is the full 32-byte event ID (from SerialEventId table).
|
||||
func UnmarshalCompactEvent(data []byte, eventId []byte, resolver SerialResolver) (ev *event.E, err error) {
|
||||
r := bytes.NewReader(data)
|
||||
ev = new(event.E)
|
||||
|
||||
// Version byte
|
||||
version, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if version != CompactFormatVersion {
|
||||
return nil, errors.New("unsupported compact event format version")
|
||||
}
|
||||
|
||||
// Set the event ID (passed separately from SerialEventId lookup)
|
||||
ev.ID = make([]byte, 32)
|
||||
copy(ev.ID, eventId)
|
||||
|
||||
// Author pubkey serial (5 bytes) -> full pubkey
|
||||
authorSerial, err := readUint40(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ev.Pubkey, err = resolver.GetPubkeyBySerial(authorSerial); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// CreatedAt (varint)
|
||||
var ca uint64
|
||||
if ca, err = varint.Decode(r); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
ev.CreatedAt = int64(ca)
|
||||
|
||||
// Kind (2 bytes big-endian)
|
||||
if err = binary.Read(r, binary.BigEndian, &ev.Kind); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Tags
|
||||
var nTags uint64
|
||||
if nTags, err = varint.Decode(r); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
if nTags > 0 {
|
||||
ev.Tags = tag.NewSWithCap(int(nTags))
|
||||
for i := uint64(0); i < nTags; i++ {
|
||||
var t *tag.T
|
||||
if t, err = decodeCompactTag(r, resolver); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
*ev.Tags = append(*ev.Tags, t)
|
||||
}
|
||||
}
|
||||
|
||||
// Content
|
||||
var contentLen uint64
|
||||
if contentLen, err = varint.Decode(r); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
ev.Content = make([]byte, contentLen)
|
||||
if _, err = io.ReadFull(r, ev.Content); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Signature (64 bytes)
|
||||
ev.Sig = make([]byte, schnorr.SignatureSize)
|
||||
if _, err = io.ReadFull(r, ev.Sig); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
// decodeCompactTag decodes a single tag from compact format.
|
||||
func decodeCompactTag(r io.Reader, resolver SerialResolver) (t *tag.T, err error) {
|
||||
var nElems uint64
|
||||
if nElems, err = varint.Decode(r); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t = tag.NewWithCap(int(nElems))
|
||||
|
||||
for i := uint64(0); i < nElems; i++ {
|
||||
var elem []byte
|
||||
if elem, err = decodeTagElement(r, resolver); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
t.T = append(t.T, elem)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// decodeTagElement decodes a single tag element from compact format.
|
||||
func decodeTagElement(r io.Reader, resolver SerialResolver) (elem []byte, err error) {
|
||||
// Read type flag
|
||||
typeBuf := make([]byte, 1)
|
||||
if _, err = io.ReadFull(r, typeBuf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
typeFlag := typeBuf[0]
|
||||
|
||||
switch typeFlag {
|
||||
case TagElementRaw:
|
||||
// Raw bytes: varint length + data
|
||||
var length uint64
|
||||
if length, err = varint.Decode(r); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
elem = make([]byte, length)
|
||||
if _, err = io.ReadFull(r, elem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return elem, nil
|
||||
|
||||
case TagElementPubkeySerial:
|
||||
// Pubkey serial: 5 bytes -> lookup full pubkey -> return as 32-byte binary
|
||||
serial, err := readUint40(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pubkey, err := resolver.GetPubkeyBySerial(serial)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Return as 32-byte binary (nostr library optimized format)
|
||||
return pubkey, nil
|
||||
|
||||
case TagElementEventSerial:
|
||||
// Event serial: 5 bytes -> lookup full event ID -> return as 32-byte binary
|
||||
serial, err := readUint40(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventId, err := resolver.GetEventIdBySerial(serial)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Return as 32-byte binary
|
||||
return eventId, nil
|
||||
|
||||
case TagElementEventIdFull:
|
||||
// Full event ID: 32 bytes (for unknown/forward references)
|
||||
elem = make([]byte, 32)
|
||||
if _, err = io.ReadFull(r, elem); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return elem, nil
|
||||
|
||||
default:
|
||||
return nil, errors.New("unknown tag element type flag")
|
||||
}
|
||||
}
|
||||
|
||||
// hexDecode decodes hex bytes to binary.
|
||||
// This is a simple implementation - the real one uses the optimized hex package.
|
||||
func hexDecode(src []byte) (dst []byte, err error) {
|
||||
if len(src)%2 != 0 {
|
||||
return nil, errors.New("hex string has odd length")
|
||||
}
|
||||
dst = make([]byte, len(src)/2)
|
||||
for i := 0; i < len(dst); i++ {
|
||||
a := unhex(src[i*2])
|
||||
b := unhex(src[i*2+1])
|
||||
if a == 0xFF || b == 0xFF {
|
||||
return nil, errors.New("invalid hex character")
|
||||
}
|
||||
dst[i] = (a << 4) | b
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func unhex(c byte) byte {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0'
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10
|
||||
}
|
||||
return 0xFF
|
||||
}
|
||||
195
pkg/database/compact_stats.go
Normal file
195
pkg/database/compact_stats.go
Normal file
@@ -0,0 +1,195 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
)
|
||||
|
||||
// CompactStorageStats holds statistics about compact vs legacy storage.
|
||||
type CompactStorageStats struct {
|
||||
// Event counts
|
||||
CompactEvents int64 // Number of events in compact format (cmp prefix)
|
||||
LegacyEvents int64 // Number of events in legacy format (evt/sev prefixes)
|
||||
TotalEvents int64 // Total events
|
||||
|
||||
// Storage sizes
|
||||
CompactBytes int64 // Total bytes used by compact format
|
||||
LegacyBytes int64 // Total bytes used by legacy format (would be used without compact)
|
||||
|
||||
// Savings
|
||||
BytesSaved int64 // Bytes saved by using compact format
|
||||
PercentSaved float64 // Percentage of space saved
|
||||
AverageCompact float64 // Average compact event size
|
||||
AverageLegacy float64 // Average legacy event size (estimated)
|
||||
|
||||
// Serial mappings
|
||||
SerialEventIdEntries int64 // Number of sei (serial -> event ID) mappings
|
||||
SerialEventIdBytes int64 // Bytes used by sei mappings
|
||||
}
|
||||
|
||||
// CompactStorageStats calculates storage statistics for compact event storage.
|
||||
// This scans the database to provide accurate metrics on space savings.
|
||||
func (d *D) CompactStorageStats() (stats CompactStorageStats, err error) {
|
||||
if err = d.View(func(txn *badger.Txn) error {
|
||||
// Count compact events (cmp prefix)
|
||||
cmpPrf := new(bytes.Buffer)
|
||||
if err = indexes.CompactEventEnc(nil).MarshalWrite(cmpPrf); chk.E(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: cmpPrf.Bytes()})
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
stats.CompactEvents++
|
||||
stats.CompactBytes += int64(len(item.Key())) + int64(item.ValueSize())
|
||||
}
|
||||
it.Close()
|
||||
|
||||
// Count legacy evt entries
|
||||
evtPrf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(nil).MarshalWrite(evtPrf); chk.E(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
it = txn.NewIterator(badger.IteratorOptions{Prefix: evtPrf.Bytes()})
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
stats.LegacyEvents++
|
||||
stats.LegacyBytes += int64(len(item.Key())) + int64(item.ValueSize())
|
||||
}
|
||||
it.Close()
|
||||
|
||||
// Count legacy sev entries
|
||||
sevPrf := new(bytes.Buffer)
|
||||
if err = indexes.SmallEventEnc(nil).MarshalWrite(sevPrf); chk.E(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
it = txn.NewIterator(badger.IteratorOptions{Prefix: sevPrf.Bytes()})
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
stats.LegacyEvents++
|
||||
stats.LegacyBytes += int64(len(item.Key())) // sev stores data in key
|
||||
}
|
||||
it.Close()
|
||||
|
||||
// Count SerialEventId mappings (sei prefix)
|
||||
seiPrf := new(bytes.Buffer)
|
||||
if err = indexes.SerialEventIdEnc(nil).MarshalWrite(seiPrf); chk.E(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
it = txn.NewIterator(badger.IteratorOptions{Prefix: seiPrf.Bytes()})
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
stats.SerialEventIdEntries++
|
||||
stats.SerialEventIdBytes += int64(len(item.Key())) + int64(item.ValueSize())
|
||||
}
|
||||
it.Close()
|
||||
|
||||
return nil
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
stats.TotalEvents = stats.CompactEvents + stats.LegacyEvents
|
||||
|
||||
// Calculate averages
|
||||
if stats.CompactEvents > 0 {
|
||||
stats.AverageCompact = float64(stats.CompactBytes) / float64(stats.CompactEvents)
|
||||
}
|
||||
if stats.LegacyEvents > 0 {
|
||||
stats.AverageLegacy = float64(stats.LegacyBytes) / float64(stats.LegacyEvents)
|
||||
}
|
||||
|
||||
// Estimate savings: compare compact size to what legacy size would be
|
||||
// For events that are in compact format, estimate legacy size based on typical ratios
|
||||
// A typical event has:
|
||||
// - 32 bytes event ID (saved in compact: stored separately in sei)
|
||||
// - 32 bytes pubkey (saved: replaced by 5-byte serial)
|
||||
// - For e-tags: 32 bytes each (saved: replaced by 5-byte serial when known)
|
||||
// - For p-tags: 32 bytes each (saved: replaced by 5-byte serial)
|
||||
// Conservative estimate: compact format is ~60% of legacy size for typical events
|
||||
if stats.CompactEvents > 0 && stats.AverageCompact > 0 {
|
||||
// Estimate what the legacy size would have been
|
||||
estimatedLegacyForCompact := float64(stats.CompactBytes) / 0.60 // 60% compression ratio
|
||||
stats.BytesSaved = int64(estimatedLegacyForCompact) - stats.CompactBytes - stats.SerialEventIdBytes
|
||||
if stats.BytesSaved < 0 {
|
||||
stats.BytesSaved = 0
|
||||
}
|
||||
totalWithoutCompact := estimatedLegacyForCompact + float64(stats.LegacyBytes)
|
||||
totalWithCompact := float64(stats.CompactBytes + stats.LegacyBytes + stats.SerialEventIdBytes)
|
||||
if totalWithoutCompact > 0 {
|
||||
stats.PercentSaved = (1.0 - totalWithCompact/totalWithoutCompact) * 100.0
|
||||
}
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
}
|
||||
|
||||
// compactSaveCounter tracks cumulative bytes saved by compact format
|
||||
var compactSaveCounter atomic.Int64
|
||||
|
||||
// LogCompactSavings logs the storage savings achieved by compact format.
|
||||
// Call this periodically or after significant operations.
|
||||
func (d *D) LogCompactSavings() {
|
||||
stats, err := d.CompactStorageStats()
|
||||
if err != nil {
|
||||
log.W.F("failed to get compact storage stats: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if stats.TotalEvents == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
log.I.F("📊 Compact storage stats: %d compact events, %d legacy events",
|
||||
stats.CompactEvents, stats.LegacyEvents)
|
||||
log.I.F(" Compact size: %.2f MB, Legacy size: %.2f MB",
|
||||
float64(stats.CompactBytes)/(1024.0*1024.0),
|
||||
float64(stats.LegacyBytes)/(1024.0*1024.0))
|
||||
log.I.F(" Serial mappings (sei): %d entries, %.2f KB",
|
||||
stats.SerialEventIdEntries,
|
||||
float64(stats.SerialEventIdBytes)/1024.0)
|
||||
|
||||
if stats.CompactEvents > 0 {
|
||||
log.I.F(" Average compact event: %.0f bytes, estimated legacy: %.0f bytes",
|
||||
stats.AverageCompact, stats.AverageCompact/0.60)
|
||||
log.I.F(" Estimated savings: %.2f MB (%.1f%%)",
|
||||
float64(stats.BytesSaved)/(1024.0*1024.0),
|
||||
stats.PercentSaved)
|
||||
}
|
||||
|
||||
// Also log serial cache stats
|
||||
cacheStats := d.SerialCacheStats()
|
||||
log.I.F(" Serial cache: %d/%d pubkeys, %d/%d event IDs, ~%.2f MB memory",
|
||||
cacheStats.PubkeysCached, cacheStats.PubkeysMaxSize,
|
||||
cacheStats.EventIdsCached, cacheStats.EventIdsMaxSize,
|
||||
float64(cacheStats.TotalMemoryBytes)/(1024.0*1024.0))
|
||||
}
|
||||
|
||||
// TrackCompactSaving records bytes saved for a single event.
|
||||
// Call this during event save to track cumulative savings.
|
||||
func TrackCompactSaving(legacySize, compactSize int) {
|
||||
saved := legacySize - compactSize
|
||||
if saved > 0 {
|
||||
compactSaveCounter.Add(int64(saved))
|
||||
}
|
||||
}
|
||||
|
||||
// GetCumulativeCompactSavings returns total bytes saved across all compact saves.
|
||||
func GetCumulativeCompactSavings() int64 {
|
||||
return compactSaveCounter.Load()
|
||||
}
|
||||
|
||||
// ResetCompactSavingsCounter resets the cumulative savings counter.
|
||||
func ResetCompactSavingsCounter() {
|
||||
compactSaveCounter.Store(0)
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -5,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
@@ -21,77 +22,117 @@ import (
|
||||
|
||||
// D implements the Database interface using Badger as the storage backend
|
||||
type D struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
*badger.DB
|
||||
seq *badger.Sequence
|
||||
pubkeySeq *badger.Sequence // Sequence for pubkey serials
|
||||
ready chan struct{} // Closed when database is ready to serve requests
|
||||
queryCache *querycache.EventCache
|
||||
|
||||
// Serial cache for compact event storage
|
||||
// Caches pubkey and event ID serial mappings for fast compact event decoding
|
||||
serialCache *SerialCache
|
||||
}
|
||||
|
||||
// Ensure D implements Database interface at compile time
|
||||
var _ Database = (*D)(nil)
|
||||
|
||||
// New creates a new Badger database instance with default configuration.
|
||||
// This is provided for backward compatibility with existing callers.
|
||||
// For full configuration control, use NewWithConfig instead.
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Initialize query cache with configurable size (default 512MB)
|
||||
queryCacheSize := int64(512 * 1024 * 1024) // 512 MB
|
||||
if v := os.Getenv("ORLY_QUERY_CACHE_SIZE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
queryCacheSize = int64(n * 1024 * 1024)
|
||||
}
|
||||
// Create a default config for backward compatibility
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
BlockCacheMB: 1024, // Default 1024 MB
|
||||
IndexCacheMB: 512, // Default 512 MB
|
||||
QueryCacheSizeMB: 512, // Default 512 MB
|
||||
QueryCacheMaxAge: 5 * time.Minute, // Default 5 minutes
|
||||
}
|
||||
queryCacheMaxAge := 5 * time.Minute // Default 5 minutes
|
||||
if v := os.Getenv("ORLY_QUERY_CACHE_MAX_AGE"); v != "" {
|
||||
if duration, perr := time.ParseDuration(v); perr == nil {
|
||||
queryCacheMaxAge = duration
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// NewWithConfig creates a new Badger database instance with full configuration.
|
||||
// This is the preferred method when you have access to DatabaseConfig.
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *DatabaseConfig,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Apply defaults for zero values (backward compatibility)
|
||||
blockCacheMB := cfg.BlockCacheMB
|
||||
if blockCacheMB == 0 {
|
||||
blockCacheMB = 1024 // Default 1024 MB
|
||||
}
|
||||
indexCacheMB := cfg.IndexCacheMB
|
||||
if indexCacheMB == 0 {
|
||||
indexCacheMB = 512 // Default 512 MB
|
||||
}
|
||||
queryCacheSizeMB := cfg.QueryCacheSizeMB
|
||||
if queryCacheSizeMB == 0 {
|
||||
queryCacheSizeMB = 512 // Default 512 MB
|
||||
}
|
||||
queryCacheMaxAge := cfg.QueryCacheMaxAge
|
||||
if queryCacheMaxAge == 0 {
|
||||
queryCacheMaxAge = 5 * time.Minute // Default 5 minutes
|
||||
}
|
||||
|
||||
// Serial cache configuration for compact event storage
|
||||
serialCachePubkeys := cfg.SerialCachePubkeys
|
||||
if serialCachePubkeys == 0 {
|
||||
serialCachePubkeys = 100000 // Default 100k pubkeys (~3.2MB memory)
|
||||
}
|
||||
serialCacheEventIds := cfg.SerialCacheEventIds
|
||||
if serialCacheEventIds == 0 {
|
||||
serialCacheEventIds = 500000 // Default 500k event IDs (~16MB memory)
|
||||
}
|
||||
|
||||
// ZSTD compression level configuration
|
||||
// Level 0 = disabled, 1 = fast (~500 MB/s), 3 = default, 9 = best ratio
|
||||
zstdLevel := cfg.ZSTDLevel
|
||||
if zstdLevel < 0 {
|
||||
zstdLevel = 0
|
||||
} else if zstdLevel > 19 {
|
||||
zstdLevel = 19 // ZSTD maximum level
|
||||
}
|
||||
|
||||
queryCacheSize := int64(queryCacheSizeMB * 1024 * 1024)
|
||||
|
||||
d = &D{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
DB: nil,
|
||||
seq: nil,
|
||||
ready: make(chan struct{}),
|
||||
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
DB: nil,
|
||||
seq: nil,
|
||||
ready: make(chan struct{}),
|
||||
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
|
||||
serialCache: NewSerialCache(serialCachePubkeys, serialCacheEventIds),
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
if err = os.MkdirAll(cfg.DataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Also ensure the directory exists using apputil.EnsureDir for any
|
||||
// potential subdirectories
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
dummyFile := filepath.Join(cfg.DataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := badger.DefaultOptions(d.dataDir)
|
||||
// Configure caches based on environment to better match workload.
|
||||
// Configure caches based on config to better match workload.
|
||||
// Defaults aim for higher hit ratios under read-heavy workloads while remaining safe.
|
||||
var blockCacheMB = 1024 // default 512 MB
|
||||
var indexCacheMB = 512 // default 256 MB
|
||||
if v := os.Getenv("ORLY_DB_BLOCK_CACHE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
blockCacheMB = n
|
||||
}
|
||||
}
|
||||
if v := os.Getenv("ORLY_DB_INDEX_CACHE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
indexCacheMB = n
|
||||
}
|
||||
}
|
||||
opts.BlockCacheSize = int64(blockCacheMB * units.Mb)
|
||||
opts.IndexCacheSize = int64(indexCacheMB * units.Mb)
|
||||
opts.BlockSize = 4 * units.Kb // 4 KB block size
|
||||
@@ -117,8 +158,13 @@ func New(
|
||||
opts.LmaxCompaction = true
|
||||
|
||||
// Enable compression to reduce cache cost
|
||||
opts.Compression = options.ZSTD
|
||||
opts.ZSTDCompressionLevel = 1 // Fast compression (500+ MB/s)
|
||||
// Level 0 disables compression, 1 = fast (~500 MB/s), 3 = default, 9 = best ratio
|
||||
if zstdLevel == 0 {
|
||||
opts.Compression = options.None
|
||||
} else {
|
||||
opts.Compression = options.ZSTD
|
||||
opts.ZSTDCompressionLevel = zstdLevel
|
||||
}
|
||||
|
||||
// Disable conflict detection for write-heavy relay workloads
|
||||
// Nostr events are immutable, no need for transaction conflict checks
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
460
pkg/database/etag-graph_test.go
Normal file
460
pkg/database/etag-graph_test.go
Normal file
@@ -0,0 +1,460 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
func TestETagGraphEdgeCreation(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a parent event (the post being replied to)
|
||||
parentPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
parentID := make([]byte, 32)
|
||||
parentID[0] = 0x10
|
||||
parentSig := make([]byte, 64)
|
||||
parentSig[0] = 0x10
|
||||
|
||||
parentEvent := &event.E{
|
||||
ID: parentID,
|
||||
Pubkey: parentPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 1,
|
||||
Content: []byte("This is the parent post"),
|
||||
Sig: parentSig,
|
||||
Tags: &tag.S{},
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, parentEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save parent event: %v", err)
|
||||
}
|
||||
|
||||
// Create a reply event with e-tag pointing to parent
|
||||
replyPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
replyID := make([]byte, 32)
|
||||
replyID[0] = 0x20
|
||||
replySig := make([]byte, 64)
|
||||
replySig[0] = 0x20
|
||||
|
||||
replyEvent := &event.E{
|
||||
ID: replyID,
|
||||
Pubkey: replyPubkey,
|
||||
CreatedAt: 1234567891,
|
||||
Kind: 1,
|
||||
Content: []byte("This is a reply"),
|
||||
Sig: replySig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(parentID)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reply event: %v", err)
|
||||
}
|
||||
|
||||
// Get serials for both events
|
||||
parentSerial, err := db.GetSerialById(parentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get parent serial: %v", err)
|
||||
}
|
||||
replySerial, err := db.GetSerialById(replyID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get reply serial: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Parent serial: %d, Reply serial: %d", parentSerial.Get(), replySerial.Get())
|
||||
|
||||
// Verify forward edge exists (reply -> parent)
|
||||
forwardFound := false
|
||||
prefix := []byte(indexes.EventEventGraphPrefix)
|
||||
|
||||
err = db.View(func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
|
||||
// Decode the key
|
||||
srcSer, tgtSer, kind, direction := indexes.EventEventGraphVars()
|
||||
keyReader := bytes.NewReader(key)
|
||||
if err := indexes.EventEventGraphDec(srcSer, tgtSer, kind, direction).UnmarshalRead(keyReader); err != nil {
|
||||
t.Logf("Failed to decode key: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if this is our edge
|
||||
if srcSer.Get() == replySerial.Get() && tgtSer.Get() == parentSerial.Get() {
|
||||
forwardFound = true
|
||||
if direction.Letter() != types.EdgeDirectionETagOut {
|
||||
t.Errorf("Expected direction %d, got %d", types.EdgeDirectionETagOut, direction.Letter())
|
||||
}
|
||||
if kind.Get() != 1 {
|
||||
t.Errorf("Expected kind 1, got %d", kind.Get())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("View failed: %v", err)
|
||||
}
|
||||
if !forwardFound {
|
||||
t.Error("Forward edge (reply -> parent) should exist")
|
||||
}
|
||||
|
||||
// Verify reverse edge exists (parent <- reply)
|
||||
reverseFound := false
|
||||
prefix = []byte(indexes.GraphEventEventPrefix)
|
||||
|
||||
err = db.View(func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
|
||||
// Decode the key
|
||||
tgtSer, kind, direction, srcSer := indexes.GraphEventEventVars()
|
||||
keyReader := bytes.NewReader(key)
|
||||
if err := indexes.GraphEventEventDec(tgtSer, kind, direction, srcSer).UnmarshalRead(keyReader); err != nil {
|
||||
t.Logf("Failed to decode key: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
t.Logf("Found gee edge: tgt=%d kind=%d dir=%d src=%d",
|
||||
tgtSer.Get(), kind.Get(), direction.Letter(), srcSer.Get())
|
||||
|
||||
// Check if this is our edge
|
||||
if tgtSer.Get() == parentSerial.Get() && srcSer.Get() == replySerial.Get() {
|
||||
reverseFound = true
|
||||
if direction.Letter() != types.EdgeDirectionETagIn {
|
||||
t.Errorf("Expected direction %d, got %d", types.EdgeDirectionETagIn, direction.Letter())
|
||||
}
|
||||
if kind.Get() != 1 {
|
||||
t.Errorf("Expected kind 1, got %d", kind.Get())
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("View failed: %v", err)
|
||||
}
|
||||
if !reverseFound {
|
||||
t.Error("Reverse edge (parent <- reply) should exist")
|
||||
}
|
||||
}
|
||||
|
||||
func TestETagGraphMultipleReplies(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a parent event
|
||||
parentPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
parentID := make([]byte, 32)
|
||||
parentID[0] = 0x10
|
||||
parentSig := make([]byte, 64)
|
||||
parentSig[0] = 0x10
|
||||
|
||||
parentEvent := &event.E{
|
||||
ID: parentID,
|
||||
Pubkey: parentPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 1,
|
||||
Content: []byte("Parent post"),
|
||||
Sig: parentSig,
|
||||
Tags: &tag.S{},
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, parentEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save parent: %v", err)
|
||||
}
|
||||
|
||||
// Create multiple replies
|
||||
numReplies := 5
|
||||
for i := 0; i < numReplies; i++ {
|
||||
replyPubkey := make([]byte, 32)
|
||||
replyPubkey[0] = byte(i + 0x20)
|
||||
replyID := make([]byte, 32)
|
||||
replyID[0] = byte(i + 0x30)
|
||||
replySig := make([]byte, 64)
|
||||
replySig[0] = byte(i + 0x30)
|
||||
|
||||
replyEvent := &event.E{
|
||||
ID: replyID,
|
||||
Pubkey: replyPubkey,
|
||||
CreatedAt: int64(1234567891 + i),
|
||||
Kind: 1,
|
||||
Content: []byte("Reply"),
|
||||
Sig: replySig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(parentID)),
|
||||
),
|
||||
}
|
||||
_, err := db.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reply %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Count inbound edges to parent
|
||||
parentSerial, err := db.GetSerialById(parentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get parent serial: %v", err)
|
||||
}
|
||||
|
||||
inboundCount := 0
|
||||
prefix := []byte(indexes.GraphEventEventPrefix)
|
||||
|
||||
err = db.View(func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
|
||||
tgtSer, kind, direction, srcSer := indexes.GraphEventEventVars()
|
||||
keyReader := bytes.NewReader(key)
|
||||
if err := indexes.GraphEventEventDec(tgtSer, kind, direction, srcSer).UnmarshalRead(keyReader); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if tgtSer.Get() == parentSerial.Get() {
|
||||
inboundCount++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("View failed: %v", err)
|
||||
}
|
||||
|
||||
if inboundCount != numReplies {
|
||||
t.Errorf("Expected %d inbound edges, got %d", numReplies, inboundCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestETagGraphDifferentKinds(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a parent event (kind 1 - note)
|
||||
parentPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
parentID := make([]byte, 32)
|
||||
parentID[0] = 0x10
|
||||
parentSig := make([]byte, 64)
|
||||
parentSig[0] = 0x10
|
||||
|
||||
parentEvent := &event.E{
|
||||
ID: parentID,
|
||||
Pubkey: parentPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 1,
|
||||
Content: []byte("A note"),
|
||||
Sig: parentSig,
|
||||
Tags: &tag.S{},
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, parentEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save parent: %v", err)
|
||||
}
|
||||
|
||||
// Create a reaction (kind 7)
|
||||
reactionPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
reactionID := make([]byte, 32)
|
||||
reactionID[0] = 0x20
|
||||
reactionSig := make([]byte, 64)
|
||||
reactionSig[0] = 0x20
|
||||
|
||||
reactionEvent := &event.E{
|
||||
ID: reactionID,
|
||||
Pubkey: reactionPubkey,
|
||||
CreatedAt: 1234567891,
|
||||
Kind: 7,
|
||||
Content: []byte("+"),
|
||||
Sig: reactionSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(parentID)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, reactionEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reaction: %v", err)
|
||||
}
|
||||
|
||||
// Create a repost (kind 6)
|
||||
repostPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
repostID := make([]byte, 32)
|
||||
repostID[0] = 0x30
|
||||
repostSig := make([]byte, 64)
|
||||
repostSig[0] = 0x30
|
||||
|
||||
repostEvent := &event.E{
|
||||
ID: repostID,
|
||||
Pubkey: repostPubkey,
|
||||
CreatedAt: 1234567892,
|
||||
Kind: 6,
|
||||
Content: []byte(""),
|
||||
Sig: repostSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(parentID)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, repostEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save repost: %v", err)
|
||||
}
|
||||
|
||||
// Query inbound edges by kind
|
||||
parentSerial, err := db.GetSerialById(parentID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get parent serial: %v", err)
|
||||
}
|
||||
|
||||
kindCounts := make(map[uint16]int)
|
||||
prefix := []byte(indexes.GraphEventEventPrefix)
|
||||
|
||||
err = db.View(func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
|
||||
tgtSer, kind, direction, srcSer := indexes.GraphEventEventVars()
|
||||
keyReader := bytes.NewReader(key)
|
||||
if err := indexes.GraphEventEventDec(tgtSer, kind, direction, srcSer).UnmarshalRead(keyReader); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if tgtSer.Get() == parentSerial.Get() {
|
||||
kindCounts[kind.Get()]++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("View failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify we have edges for each kind
|
||||
if kindCounts[7] != 1 {
|
||||
t.Errorf("Expected 1 kind-7 (reaction) edge, got %d", kindCounts[7])
|
||||
}
|
||||
if kindCounts[6] != 1 {
|
||||
t.Errorf("Expected 1 kind-6 (repost) edge, got %d", kindCounts[6])
|
||||
}
|
||||
}
|
||||
|
||||
func TestETagGraphUnknownTarget(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create an event with e-tag pointing to non-existent event
|
||||
unknownID := make([]byte, 32)
|
||||
unknownID[0] = 0xFF
|
||||
unknownID[31] = 0xFF
|
||||
|
||||
replyPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
replyID := make([]byte, 32)
|
||||
replyID[0] = 0x10
|
||||
replySig := make([]byte, 64)
|
||||
replySig[0] = 0x10
|
||||
|
||||
replyEvent := &event.E{
|
||||
ID: replyID,
|
||||
Pubkey: replyPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 1,
|
||||
Content: []byte("Reply to unknown"),
|
||||
Sig: replySig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(unknownID)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reply: %v", err)
|
||||
}
|
||||
|
||||
// Verify event was saved
|
||||
replySerial, err := db.GetSerialById(replyID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get reply serial: %v", err)
|
||||
}
|
||||
if replySerial == nil {
|
||||
t.Fatal("Reply serial should exist")
|
||||
}
|
||||
|
||||
// Verify no forward edge was created (since target doesn't exist)
|
||||
edgeCount := 0
|
||||
prefix := []byte(indexes.EventEventGraphPrefix)
|
||||
|
||||
err = db.View(func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
|
||||
srcSer, _, _, _ := indexes.EventEventGraphVars()
|
||||
keyReader := bytes.NewReader(key)
|
||||
if err := indexes.EventEventGraphDec(srcSer, new(types.Uint40), new(types.Uint16), new(types.Letter)).UnmarshalRead(keyReader); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if srcSer.Get() == replySerial.Get() {
|
||||
edgeCount++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("View failed: %v", err)
|
||||
}
|
||||
|
||||
if edgeCount != 0 {
|
||||
t.Errorf("Expected no edges for unknown target, got %d", edgeCount)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -14,52 +16,141 @@ import (
|
||||
)
|
||||
|
||||
// Export the complete database of stored events to an io.Writer in line structured minified
|
||||
// JSON.
|
||||
// JSON. Supports both legacy and compact event formats.
|
||||
func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
var err error
|
||||
evB := make([]byte, 0, units.Mb)
|
||||
evBuf := bytes.NewBuffer(evB)
|
||||
|
||||
// Create resolver for compact event decoding
|
||||
resolver := NewDatabaseSerialResolver(d, d.serialCache)
|
||||
|
||||
// Helper function to unmarshal event data (handles both legacy and compact formats)
|
||||
unmarshalEventData := func(val []byte, ser *types.Uint40) (*event.E, error) {
|
||||
// Check if this is compact format (starts with version byte 1)
|
||||
if len(val) > 0 && val[0] == CompactFormatVersion {
|
||||
// Get event ID from SerialEventId table
|
||||
eventId, idErr := d.GetEventIdBySerial(ser)
|
||||
if idErr != nil {
|
||||
// Can't decode without event ID - skip
|
||||
return nil, idErr
|
||||
}
|
||||
return UnmarshalCompactEvent(val, eventId, resolver)
|
||||
}
|
||||
|
||||
// Legacy binary format
|
||||
ev := event.New()
|
||||
evBuf.Reset()
|
||||
evBuf.Write(val)
|
||||
if err := ev.UnmarshalBinary(evBuf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
if len(pubkeys) == 0 {
|
||||
// Export all events - prefer cmp table, fall back to evt
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(nil).MarshalWrite(buf); chk.E(err) {
|
||||
// First try cmp (compact format) table
|
||||
cmpBuf := new(bytes.Buffer)
|
||||
if err = indexes.CompactEventEnc(nil).MarshalWrite(cmpBuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: buf.Bytes()})
|
||||
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: cmpBuf.Bytes()})
|
||||
defer it.Close()
|
||||
|
||||
seenSerials := make(map[uint64]bool)
|
||||
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
if err = item.Value(
|
||||
func(val []byte) (err error) {
|
||||
evBuf.Write(val)
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
key := item.Key()
|
||||
|
||||
// Extract serial from key
|
||||
ser := new(types.Uint40)
|
||||
if err = ser.UnmarshalRead(bytes.NewReader(key[3:8])); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
ev := event.New()
|
||||
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
|
||||
|
||||
seenSerials[ser.Get()] = true
|
||||
|
||||
var val []byte
|
||||
if val, err = item.ValueCopy(nil); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
ev, unmarshalErr := unmarshalEventData(val, ser)
|
||||
if unmarshalErr != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Serialize the event to JSON and write it to the output
|
||||
defer func(ev *event.E) {
|
||||
ev.Free()
|
||||
evBuf.Reset()
|
||||
}(ev)
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
ev.Free()
|
||||
return
|
||||
}
|
||||
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||
ev.Free()
|
||||
return
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
it.Close()
|
||||
|
||||
// Then fall back to evt (legacy) table for any events not in cmp
|
||||
evtBuf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(nil).MarshalWrite(evtBuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
it2 := txn.NewIterator(badger.IteratorOptions{Prefix: evtBuf.Bytes()})
|
||||
defer it2.Close()
|
||||
|
||||
for it2.Rewind(); it2.Valid(); it2.Next() {
|
||||
item := it2.Item()
|
||||
key := item.Key()
|
||||
|
||||
// Extract serial from key
|
||||
ser := new(types.Uint40)
|
||||
if err = ser.UnmarshalRead(bytes.NewReader(key[3:8])); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if already exported from cmp table
|
||||
if seenSerials[ser.Get()] {
|
||||
continue
|
||||
}
|
||||
|
||||
var val []byte
|
||||
if val, err = item.ValueCopy(nil); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
ev, unmarshalErr := unmarshalEventData(val, ser)
|
||||
if unmarshalErr != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Serialize the event to JSON and write it to the output
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
ev.Free()
|
||||
return
|
||||
}
|
||||
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||
ev.Free()
|
||||
return
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
|
||||
return
|
||||
},
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Export events for specific pubkeys
|
||||
for _, pubkey := range pubkeys {
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
@@ -77,29 +168,34 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
if err = item.Value(
|
||||
func(val []byte) (err error) {
|
||||
evBuf.Write(val)
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
key := item.Key()
|
||||
|
||||
// Extract serial from pubkey index key
|
||||
// Key format: pc-|pubkey_hash|created_at|serial
|
||||
if len(key) < 3+8+8+5 {
|
||||
continue
|
||||
}
|
||||
ev := event.New()
|
||||
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
|
||||
ser := new(types.Uint40)
|
||||
if err = ser.UnmarshalRead(bytes.NewReader(key[len(key)-5:])); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Fetch the event using FetchEventBySerial which handles all formats
|
||||
ev, fetchErr := d.FetchEventBySerial(ser)
|
||||
if fetchErr != nil || ev == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Serialize the event to JSON and write it to the output
|
||||
defer func(ev *event.E) {
|
||||
ev.Free()
|
||||
evBuf.Reset()
|
||||
}(ev)
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
ev.Free()
|
||||
}
|
||||
return
|
||||
},
|
||||
|
||||
@@ -1,53 +1,103 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DatabaseConfig holds all database configuration options that can be passed
|
||||
// to any database backend. Each backend uses the relevant fields for its type.
|
||||
// This centralizes configuration instead of having each backend read env vars directly.
|
||||
type DatabaseConfig struct {
|
||||
// Common settings for all backends
|
||||
DataDir string
|
||||
LogLevel string
|
||||
|
||||
// Badger-specific settings
|
||||
BlockCacheMB int // ORLY_DB_BLOCK_CACHE_MB
|
||||
IndexCacheMB int // ORLY_DB_INDEX_CACHE_MB
|
||||
QueryCacheSizeMB int // ORLY_QUERY_CACHE_SIZE_MB
|
||||
QueryCacheMaxAge time.Duration // ORLY_QUERY_CACHE_MAX_AGE
|
||||
|
||||
// Serial cache settings for compact event storage
|
||||
SerialCachePubkeys int // ORLY_SERIAL_CACHE_PUBKEYS - max pubkeys to cache (default: 100000)
|
||||
SerialCacheEventIds int // ORLY_SERIAL_CACHE_EVENT_IDS - max event IDs to cache (default: 500000)
|
||||
|
||||
// Compression settings
|
||||
ZSTDLevel int // ORLY_DB_ZSTD_LEVEL - ZSTD compression level (0=none, 1=fast, 3=default, 9=best)
|
||||
|
||||
// Neo4j-specific settings
|
||||
Neo4jURI string // ORLY_NEO4J_URI
|
||||
Neo4jUser string // ORLY_NEO4J_USER
|
||||
Neo4jPassword string // ORLY_NEO4J_PASSWORD
|
||||
}
|
||||
|
||||
// NewDatabase creates a database instance based on the specified type.
|
||||
// Supported types: "badger", "dgraph", "neo4j"
|
||||
// Supported types: "badger", "neo4j"
|
||||
func NewDatabase(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
) (Database, error) {
|
||||
// Create a default config for backward compatibility with existing callers
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewDatabaseWithConfig(ctx, cancel, dbType, cfg)
|
||||
}
|
||||
|
||||
// NewDatabaseWithConfig creates a database instance with full configuration.
|
||||
// This is the preferred method when you have access to the app config.
|
||||
func NewDatabaseWithConfig(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
cfg *DatabaseConfig,
|
||||
) (Database, error) {
|
||||
switch strings.ToLower(dbType) {
|
||||
case "badger", "":
|
||||
// Use the existing badger implementation
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
case "dgraph":
|
||||
// Use the new dgraph implementation
|
||||
// Import dynamically to avoid import cycles
|
||||
return newDgraphDatabase(ctx, cancel, dataDir, logLevel)
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
case "neo4j":
|
||||
// Use the new neo4j implementation
|
||||
// Import dynamically to avoid import cycles
|
||||
return newNeo4jDatabase(ctx, cancel, dataDir, logLevel)
|
||||
// Use the neo4j implementation
|
||||
if newNeo4jDatabase == nil {
|
||||
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
|
||||
}
|
||||
return newNeo4jDatabase(ctx, cancel, cfg)
|
||||
case "wasmdb", "indexeddb", "wasm":
|
||||
// Use the wasmdb implementation (IndexedDB backend for WebAssembly)
|
||||
if newWasmDBDatabase == nil {
|
||||
return nil, fmt.Errorf("wasmdb database backend not available (import _ \"next.orly.dev/pkg/wasmdb\")")
|
||||
}
|
||||
return newWasmDBDatabase(ctx, cancel, cfg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j)", dbType)
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, neo4j, wasmdb)", dbType)
|
||||
}
|
||||
}
|
||||
|
||||
// newDgraphDatabase creates a dgraph database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newDgraphDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
|
||||
|
||||
// RegisterDgraphFactory registers the dgraph database factory
|
||||
// This is called from the dgraph package's init() function
|
||||
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
|
||||
newDgraphDatabase = factory
|
||||
}
|
||||
|
||||
// newNeo4jDatabase creates a neo4j database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterNeo4jFactory registers the neo4j database factory
|
||||
// This is called from the neo4j package's init() function
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newNeo4jDatabase = factory
|
||||
}
|
||||
|
||||
// newWasmDBDatabase creates a wasmdb database instance (IndexedDB backend for WebAssembly)
|
||||
// This is defined here to avoid import cycles
|
||||
var newWasmDBDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterWasmDBFactory registers the wasmdb database factory
|
||||
// This is called from the wasmdb package's init() function
|
||||
func RegisterWasmDBFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newWasmDBDatabase = factory
|
||||
}
|
||||
|
||||
99
pkg/database/factory_wasm.go
Normal file
99
pkg/database/factory_wasm.go
Normal file
@@ -0,0 +1,99 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DatabaseConfig holds all database configuration options that can be passed
|
||||
// to any database backend. Each backend uses the relevant fields for its type.
|
||||
// This centralizes configuration instead of having each backend read env vars directly.
|
||||
type DatabaseConfig struct {
|
||||
// Common settings for all backends
|
||||
DataDir string
|
||||
LogLevel string
|
||||
|
||||
// Badger-specific settings (not available in WASM)
|
||||
BlockCacheMB int // ORLY_DB_BLOCK_CACHE_MB
|
||||
IndexCacheMB int // ORLY_DB_INDEX_CACHE_MB
|
||||
QueryCacheSizeMB int // ORLY_QUERY_CACHE_SIZE_MB
|
||||
QueryCacheMaxAge time.Duration // ORLY_QUERY_CACHE_MAX_AGE
|
||||
|
||||
// Serial cache settings for compact event storage (Badger-specific)
|
||||
SerialCachePubkeys int // ORLY_SERIAL_CACHE_PUBKEYS - max pubkeys to cache (default: 100000)
|
||||
SerialCacheEventIds int // ORLY_SERIAL_CACHE_EVENT_IDS - max event IDs to cache (default: 500000)
|
||||
|
||||
// Neo4j-specific settings
|
||||
Neo4jURI string // ORLY_NEO4J_URI
|
||||
Neo4jUser string // ORLY_NEO4J_USER
|
||||
Neo4jPassword string // ORLY_NEO4J_PASSWORD
|
||||
}
|
||||
|
||||
// NewDatabase creates a database instance based on the specified type.
|
||||
// Supported types in WASM: "wasmdb", "neo4j"
|
||||
// Note: "badger" is not available in WASM builds due to filesystem dependencies
|
||||
func NewDatabase(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
) (Database, error) {
|
||||
// Create a default config for backward compatibility with existing callers
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewDatabaseWithConfig(ctx, cancel, dbType, cfg)
|
||||
}
|
||||
|
||||
// NewDatabaseWithConfig creates a database instance with full configuration.
|
||||
// This is the preferred method when you have access to the app config.
|
||||
func NewDatabaseWithConfig(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
cfg *DatabaseConfig,
|
||||
) (Database, error) {
|
||||
switch strings.ToLower(dbType) {
|
||||
case "wasmdb", "indexeddb", "wasm", "badger", "":
|
||||
// In WASM builds, default to wasmdb (IndexedDB backend)
|
||||
// "badger" is mapped to wasmdb since Badger is not available
|
||||
if newWasmDBDatabase == nil {
|
||||
return nil, fmt.Errorf("wasmdb database backend not available (import _ \"next.orly.dev/pkg/wasmdb\")")
|
||||
}
|
||||
return newWasmDBDatabase(ctx, cancel, cfg)
|
||||
case "neo4j":
|
||||
// Use the neo4j implementation (HTTP-based, works in WASM)
|
||||
if newNeo4jDatabase == nil {
|
||||
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
|
||||
}
|
||||
return newNeo4jDatabase(ctx, cancel, cfg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported in WASM: wasmdb, neo4j)", dbType)
|
||||
}
|
||||
}
|
||||
|
||||
// newNeo4jDatabase creates a neo4j database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterNeo4jFactory registers the neo4j database factory
|
||||
// This is called from the neo4j package's init() function
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newNeo4jDatabase = factory
|
||||
}
|
||||
|
||||
// newWasmDBDatabase creates a wasmdb database instance (IndexedDB backend for WebAssembly)
|
||||
// This is defined here to avoid import cycles
|
||||
var newWasmDBDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterWasmDBFactory registers the wasmdb database factory
|
||||
// This is called from the wasmdb package's init() function
|
||||
func RegisterWasmDBFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newWasmDBDatabase = factory
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -11,9 +13,24 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
)
|
||||
|
||||
// FetchEventBySerial fetches a single event by its serial.
|
||||
// This function tries multiple storage formats in order:
|
||||
// 1. cmp (compact format with serial references) - newest, most space-efficient
|
||||
// 2. sev (small event inline) - legacy Reiser4 optimization
|
||||
// 3. evt (traditional separate storage) - legacy fallback
|
||||
func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||
// Create resolver for compact event decoding
|
||||
resolver := NewDatabaseSerialResolver(d, d.serialCache)
|
||||
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
// Try cmp (compact format) first - most efficient
|
||||
ev, err = d.fetchCompactEvent(txn, ser, resolver)
|
||||
if err == nil && ev != nil {
|
||||
return nil
|
||||
}
|
||||
err = nil // Reset error, try legacy formats
|
||||
|
||||
// Helper function to extract inline event data from key
|
||||
extractInlineData := func(key []byte, prefixLen int) (*event.E, error) {
|
||||
if len(key) > prefixLen+2 {
|
||||
@@ -23,6 +40,16 @@ func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||
|
||||
if len(key) >= dataStart+size {
|
||||
eventData := key[dataStart : dataStart+size]
|
||||
|
||||
// Check if this is compact format
|
||||
if len(eventData) > 0 && eventData[0] == CompactFormatVersion {
|
||||
eventId, idErr := d.GetEventIdBySerial(ser)
|
||||
if idErr == nil {
|
||||
return UnmarshalCompactEvent(eventData, eventId, resolver)
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy binary format
|
||||
ev := new(event.E)
|
||||
if err := ev.UnmarshalBinary(bytes.NewBuffer(eventData)); err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
@@ -36,7 +63,7 @@ func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Try sev (small event inline) prefix first - Reiser4 optimization
|
||||
// Try sev (small event inline) prefix - Reiser4 optimization
|
||||
smallBuf := new(bytes.Buffer)
|
||||
if err = indexes.SmallEventEnc(ser).MarshalWrite(smallBuf); chk.E(err) {
|
||||
return
|
||||
@@ -75,6 +102,16 @@ func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||
if v, err = item.ValueCopy(nil); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if this is compact format
|
||||
if len(v) > 0 && v[0] == CompactFormatVersion {
|
||||
eventId, idErr := d.GetEventIdBySerial(ser)
|
||||
if idErr == nil {
|
||||
ev, err = UnmarshalCompactEvent(v, eventId, resolver)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have valid data before attempting to unmarshal
|
||||
if len(v) < 32+32+1+2+1+1+64 { // ID + Pubkey + min varint fields + Sig
|
||||
err = fmt.Errorf(
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -5,6 +7,7 @@ import (
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
@@ -12,6 +15,11 @@ import (
|
||||
|
||||
// FetchEventsBySerials fetches multiple events by their serials in a single database transaction.
|
||||
// Returns a map of serial uint64 value to event, only including successfully fetched events.
|
||||
//
|
||||
// This function tries multiple storage formats in order:
|
||||
// 1. cmp (compact format with serial references) - newest, most space-efficient
|
||||
// 2. sev (small event inline) - legacy Reiser4 optimization
|
||||
// 3. evt (traditional separate storage) - legacy fallback
|
||||
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*event.E, err error) {
|
||||
// Pre-allocate map with estimated capacity to reduce reallocations
|
||||
events = make(map[uint64]*event.E, len(serials))
|
||||
@@ -20,89 +28,38 @@ func (d *D) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*ev
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// Create resolver for compact event decoding
|
||||
resolver := NewDatabaseSerialResolver(d, d.serialCache)
|
||||
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
for _, ser := range serials {
|
||||
var ev *event.E
|
||||
serialVal := ser.Get()
|
||||
|
||||
// Try sev (small event inline) prefix first - Reiser4 optimization
|
||||
smallBuf := new(bytes.Buffer)
|
||||
if err = indexes.SmallEventEnc(ser).MarshalWrite(smallBuf); chk.E(err) {
|
||||
// Skip this serial on error but continue with others
|
||||
err = nil
|
||||
// Try cmp (compact format) first - most efficient
|
||||
ev, err = d.fetchCompactEvent(txn, ser, resolver)
|
||||
if err == nil && ev != nil {
|
||||
events[serialVal] = ev
|
||||
continue
|
||||
}
|
||||
err = nil // Reset error, try legacy formats
|
||||
|
||||
// Iterate with prefix to find the small event key
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.Prefix = smallBuf.Bytes()
|
||||
opts.PrefetchValues = true
|
||||
opts.PrefetchSize = 1
|
||||
it := txn.NewIterator(opts)
|
||||
|
||||
it.Rewind()
|
||||
if it.Valid() {
|
||||
// Found in sev table - extract inline data
|
||||
key := it.Item().Key()
|
||||
// Key format: sev|serial|size_uint16|event_data
|
||||
if len(key) > 8+2 { // prefix(3) + serial(5) + size(2) = 10 bytes minimum
|
||||
sizeIdx := 8 // After sev(3) + serial(5)
|
||||
// Read uint16 big-endian size
|
||||
size := int(key[sizeIdx])<<8 | int(key[sizeIdx+1])
|
||||
dataStart := sizeIdx + 2
|
||||
|
||||
if len(key) >= dataStart+size {
|
||||
eventData := key[dataStart : dataStart+size]
|
||||
ev = new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(eventData)); err == nil {
|
||||
events[ser.Get()] = ev
|
||||
}
|
||||
// Clean up and continue
|
||||
it.Close()
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Try sev (small event inline) prefix - legacy Reiser4 optimization
|
||||
ev, err = d.fetchSmallEvent(txn, ser)
|
||||
if err == nil && ev != nil {
|
||||
events[serialVal] = ev
|
||||
continue
|
||||
}
|
||||
it.Close()
|
||||
err = nil // Reset error, try evt
|
||||
|
||||
// Not found in sev table, try evt (traditional) prefix
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
|
||||
// Skip this serial on error but continue with others
|
||||
err = nil
|
||||
ev, err = d.fetchLegacyEvent(txn, ser)
|
||||
if err == nil && ev != nil {
|
||||
events[serialVal] = ev
|
||||
continue
|
||||
}
|
||||
|
||||
var item *badger.Item
|
||||
if item, err = txn.Get(buf.Bytes()); err != nil {
|
||||
// Skip this serial if not found but continue with others
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
|
||||
var v []byte
|
||||
if v, err = item.ValueCopy(nil); chk.E(err) {
|
||||
// Skip this serial on error but continue with others
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if we have valid data before attempting to unmarshal
|
||||
if len(v) < 32+32+1+2+1+1+64 { // ID + Pubkey + min varint fields + Sig
|
||||
// Skip this serial - incomplete data
|
||||
continue
|
||||
}
|
||||
|
||||
ev = new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); err != nil {
|
||||
// Skip this serial on unmarshal error but continue with others
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// Successfully unmarshaled event, add to results
|
||||
events[ser.Get()] = ev
|
||||
err = nil // Reset error, event not found
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -111,4 +68,150 @@ func (d *D) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*ev
|
||||
}
|
||||
|
||||
return events, nil
|
||||
}
|
||||
}
|
||||
|
||||
// fetchCompactEvent tries to fetch an event from the compact format (cmp prefix).
|
||||
func (d *D) fetchCompactEvent(txn *badger.Txn, ser *types.Uint40, resolver SerialResolver) (ev *event.E, err error) {
|
||||
// Build cmp key
|
||||
keyBuf := new(bytes.Buffer)
|
||||
if err = indexes.CompactEventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item, err := txn.Get(keyBuf.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var compactData []byte
|
||||
if compactData, err = item.ValueCopy(nil); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Need to get the event ID from SerialEventId table
|
||||
eventId, err := d.GetEventIdBySerial(ser)
|
||||
if err != nil {
|
||||
log.D.F("fetchCompactEvent: failed to get event ID for serial %d: %v", ser.Get(), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal compact event
|
||||
ev, err = UnmarshalCompactEvent(compactData, eventId, resolver)
|
||||
if err != nil {
|
||||
log.D.F("fetchCompactEvent: failed to unmarshal compact event for serial %d: %v", ser.Get(), err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
// fetchSmallEvent tries to fetch an event from the small event inline format (sev prefix).
|
||||
func (d *D) fetchSmallEvent(txn *badger.Txn, ser *types.Uint40) (ev *event.E, err error) {
|
||||
smallBuf := new(bytes.Buffer)
|
||||
if err = indexes.SmallEventEnc(ser).MarshalWrite(smallBuf); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Iterate with prefix to find the small event key
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.Prefix = smallBuf.Bytes()
|
||||
opts.PrefetchValues = true
|
||||
opts.PrefetchSize = 1
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
it.Rewind()
|
||||
if !it.Valid() {
|
||||
return nil, nil // Not found
|
||||
}
|
||||
|
||||
// Found in sev table - extract inline data
|
||||
key := it.Item().Key()
|
||||
// Key format: sev|serial|size_uint16|event_data
|
||||
if len(key) <= 8+2 { // prefix(3) + serial(5) + size(2) = 10 bytes minimum
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
sizeIdx := 8 // After sev(3) + serial(5)
|
||||
// Read uint16 big-endian size
|
||||
size := int(key[sizeIdx])<<8 | int(key[sizeIdx+1])
|
||||
dataStart := sizeIdx + 2
|
||||
|
||||
if len(key) < dataStart+size {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
eventData := key[dataStart : dataStart+size]
|
||||
|
||||
// Check if this is compact format (starts with version byte 1)
|
||||
if len(eventData) > 0 && eventData[0] == CompactFormatVersion {
|
||||
// This is compact format stored in sev - need to decode with resolver
|
||||
resolver := NewDatabaseSerialResolver(d, d.serialCache)
|
||||
eventId, idErr := d.GetEventIdBySerial(ser)
|
||||
if idErr != nil {
|
||||
// Fall back to legacy unmarshal
|
||||
ev = new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(eventData)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
return UnmarshalCompactEvent(eventData, eventId, resolver)
|
||||
}
|
||||
|
||||
// Legacy binary format
|
||||
ev = new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(eventData)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
// fetchLegacyEvent tries to fetch an event from the legacy format (evt prefix).
|
||||
func (d *D) fetchLegacyEvent(txn *badger.Txn, ser *types.Uint40) (ev *event.E, err error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
item, err := txn.Get(buf.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var v []byte
|
||||
if v, err = item.ValueCopy(nil); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check if we have valid data before attempting to unmarshal
|
||||
if len(v) < 32+32+1+2+1+1+64 { // ID + Pubkey + min varint fields + Sig
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Check if this is compact format (starts with version byte 1)
|
||||
if len(v) > 0 && v[0] == CompactFormatVersion {
|
||||
// This is compact format stored in evt - need to decode with resolver
|
||||
resolver := NewDatabaseSerialResolver(d, d.serialCache)
|
||||
eventId, idErr := d.GetEventIdBySerial(ser)
|
||||
if idErr != nil {
|
||||
// Fall back to legacy unmarshal
|
||||
ev = new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ev, nil
|
||||
}
|
||||
return UnmarshalCompactEvent(v, eventId, resolver)
|
||||
}
|
||||
|
||||
// Legacy binary format
|
||||
ev = new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
42
pkg/database/graph-adapter.go
Normal file
42
pkg/database/graph-adapter.go
Normal file
@@ -0,0 +1,42 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"next.orly.dev/pkg/protocol/graph"
|
||||
)
|
||||
|
||||
// GraphAdapter wraps a database instance and implements graph.GraphDatabase interface.
|
||||
// This allows the graph executor to call database traversal methods without
|
||||
// the database package importing the graph package.
|
||||
type GraphAdapter struct {
|
||||
db *D
|
||||
}
|
||||
|
||||
// NewGraphAdapter creates a new GraphAdapter wrapping the given database.
|
||||
func NewGraphAdapter(db *D) *GraphAdapter {
|
||||
return &GraphAdapter{db: db}
|
||||
}
|
||||
|
||||
// TraverseFollows implements graph.GraphDatabase.
|
||||
func (a *GraphAdapter) TraverseFollows(seedPubkey []byte, maxDepth int) (graph.GraphResultI, error) {
|
||||
return a.db.TraverseFollows(seedPubkey, maxDepth)
|
||||
}
|
||||
|
||||
// TraverseFollowers implements graph.GraphDatabase.
|
||||
func (a *GraphAdapter) TraverseFollowers(seedPubkey []byte, maxDepth int) (graph.GraphResultI, error) {
|
||||
return a.db.TraverseFollowers(seedPubkey, maxDepth)
|
||||
}
|
||||
|
||||
// FindMentions implements graph.GraphDatabase.
|
||||
func (a *GraphAdapter) FindMentions(pubkey []byte, kinds []uint16) (graph.GraphResultI, error) {
|
||||
return a.db.FindMentions(pubkey, kinds)
|
||||
}
|
||||
|
||||
// TraverseThread implements graph.GraphDatabase.
|
||||
func (a *GraphAdapter) TraverseThread(seedEventID []byte, maxDepth int, direction string) (graph.GraphResultI, error) {
|
||||
return a.db.TraverseThread(seedEventID, maxDepth, direction)
|
||||
}
|
||||
|
||||
// Verify GraphAdapter implements graph.GraphDatabase
|
||||
var _ graph.GraphDatabase = (*GraphAdapter)(nil)
|
||||
199
pkg/database/graph-follows.go
Normal file
199
pkg/database/graph-follows.go
Normal file
@@ -0,0 +1,199 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// TraverseFollows performs BFS traversal of the follow graph starting from a seed pubkey.
|
||||
// Returns pubkeys grouped by first-discovered depth (no duplicates across depths).
|
||||
//
|
||||
// The traversal works by:
|
||||
// 1. Starting with the seed pubkey at depth 0 (not included in results)
|
||||
// 2. For each pubkey at the current depth, find their kind-3 contact list
|
||||
// 3. Extract p-tags from the contact list to get follows
|
||||
// 4. Add new (unseen) follows to the next depth
|
||||
// 5. Continue until maxDepth is reached or no new pubkeys are found
|
||||
//
|
||||
// Early termination occurs if two consecutive depths yield no new pubkeys.
|
||||
func (d *D) TraverseFollows(seedPubkey []byte, maxDepth int) (*GraphResult, error) {
|
||||
result := NewGraphResult()
|
||||
|
||||
if len(seedPubkey) != 32 {
|
||||
return result, ErrPubkeyNotFound
|
||||
}
|
||||
|
||||
// Get seed pubkey serial
|
||||
seedSerial, err := d.GetPubkeySerial(seedPubkey)
|
||||
if err != nil {
|
||||
log.D.F("TraverseFollows: seed pubkey not in database: %s", hex.Enc(seedPubkey))
|
||||
return result, nil // Not an error - just no results
|
||||
}
|
||||
|
||||
// Track visited pubkeys by serial to avoid cycles
|
||||
visited := make(map[uint64]bool)
|
||||
visited[seedSerial.Get()] = true // Mark seed as visited but don't add to results
|
||||
|
||||
// Current frontier (pubkeys to process at this depth)
|
||||
currentFrontier := []*types.Uint40{seedSerial}
|
||||
|
||||
// Track consecutive empty depths for early termination
|
||||
consecutiveEmptyDepths := 0
|
||||
|
||||
for currentDepth := 1; currentDepth <= maxDepth; currentDepth++ {
|
||||
var nextFrontier []*types.Uint40
|
||||
newPubkeysAtDepth := 0
|
||||
|
||||
for _, pubkeySerial := range currentFrontier {
|
||||
// Get follows for this pubkey
|
||||
follows, err := d.GetFollowsFromPubkeySerial(pubkeySerial)
|
||||
if err != nil {
|
||||
log.D.F("TraverseFollows: error getting follows for serial %d: %v", pubkeySerial.Get(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, followSerial := range follows {
|
||||
// Skip if already visited
|
||||
if visited[followSerial.Get()] {
|
||||
continue
|
||||
}
|
||||
visited[followSerial.Get()] = true
|
||||
|
||||
// Get pubkey hex for result
|
||||
pubkeyHex, err := d.GetPubkeyHexFromSerial(followSerial)
|
||||
if err != nil {
|
||||
log.D.F("TraverseFollows: error getting pubkey hex for serial %d: %v", followSerial.Get(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add to results at this depth
|
||||
result.AddPubkeyAtDepth(pubkeyHex, currentDepth)
|
||||
newPubkeysAtDepth++
|
||||
|
||||
// Add to next frontier for further traversal
|
||||
nextFrontier = append(nextFrontier, followSerial)
|
||||
}
|
||||
}
|
||||
|
||||
log.T.F("TraverseFollows: depth %d found %d new pubkeys", currentDepth, newPubkeysAtDepth)
|
||||
|
||||
// Check for early termination
|
||||
if newPubkeysAtDepth == 0 {
|
||||
consecutiveEmptyDepths++
|
||||
if consecutiveEmptyDepths >= 2 {
|
||||
log.T.F("TraverseFollows: early termination at depth %d (2 consecutive empty depths)", currentDepth)
|
||||
break
|
||||
}
|
||||
} else {
|
||||
consecutiveEmptyDepths = 0
|
||||
}
|
||||
|
||||
// Move to next depth
|
||||
currentFrontier = nextFrontier
|
||||
}
|
||||
|
||||
log.D.F("TraverseFollows: completed with %d total pubkeys across %d depths",
|
||||
result.TotalPubkeys, len(result.PubkeysByDepth))
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// TraverseFollowers performs BFS traversal to find who follows the seed pubkey.
|
||||
// This is the reverse of TraverseFollows - it finds users whose kind-3 lists
|
||||
// contain the target pubkey(s).
|
||||
//
|
||||
// At each depth:
|
||||
// - Depth 1: Users who directly follow the seed
|
||||
// - Depth 2: Users who follow anyone at depth 1 (followers of followers)
|
||||
// - etc.
|
||||
func (d *D) TraverseFollowers(seedPubkey []byte, maxDepth int) (*GraphResult, error) {
|
||||
result := NewGraphResult()
|
||||
|
||||
if len(seedPubkey) != 32 {
|
||||
return result, ErrPubkeyNotFound
|
||||
}
|
||||
|
||||
// Get seed pubkey serial
|
||||
seedSerial, err := d.GetPubkeySerial(seedPubkey)
|
||||
if err != nil {
|
||||
log.D.F("TraverseFollowers: seed pubkey not in database: %s", hex.Enc(seedPubkey))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Track visited pubkeys
|
||||
visited := make(map[uint64]bool)
|
||||
visited[seedSerial.Get()] = true
|
||||
|
||||
// Current frontier
|
||||
currentFrontier := []*types.Uint40{seedSerial}
|
||||
|
||||
consecutiveEmptyDepths := 0
|
||||
|
||||
for currentDepth := 1; currentDepth <= maxDepth; currentDepth++ {
|
||||
var nextFrontier []*types.Uint40
|
||||
newPubkeysAtDepth := 0
|
||||
|
||||
for _, targetSerial := range currentFrontier {
|
||||
// Get followers of this pubkey
|
||||
followers, err := d.GetFollowersOfPubkeySerial(targetSerial)
|
||||
if err != nil {
|
||||
log.D.F("TraverseFollowers: error getting followers for serial %d: %v", targetSerial.Get(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, followerSerial := range followers {
|
||||
if visited[followerSerial.Get()] {
|
||||
continue
|
||||
}
|
||||
visited[followerSerial.Get()] = true
|
||||
|
||||
pubkeyHex, err := d.GetPubkeyHexFromSerial(followerSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
result.AddPubkeyAtDepth(pubkeyHex, currentDepth)
|
||||
newPubkeysAtDepth++
|
||||
nextFrontier = append(nextFrontier, followerSerial)
|
||||
}
|
||||
}
|
||||
|
||||
log.T.F("TraverseFollowers: depth %d found %d new pubkeys", currentDepth, newPubkeysAtDepth)
|
||||
|
||||
if newPubkeysAtDepth == 0 {
|
||||
consecutiveEmptyDepths++
|
||||
if consecutiveEmptyDepths >= 2 {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
consecutiveEmptyDepths = 0
|
||||
}
|
||||
|
||||
currentFrontier = nextFrontier
|
||||
}
|
||||
|
||||
log.D.F("TraverseFollowers: completed with %d total pubkeys", result.TotalPubkeys)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// TraverseFollowsFromHex is a convenience wrapper that accepts hex-encoded pubkey.
|
||||
func (d *D) TraverseFollowsFromHex(seedPubkeyHex string, maxDepth int) (*GraphResult, error) {
|
||||
seedPubkey, err := hex.Dec(seedPubkeyHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.TraverseFollows(seedPubkey, maxDepth)
|
||||
}
|
||||
|
||||
// TraverseFollowersFromHex is a convenience wrapper that accepts hex-encoded pubkey.
|
||||
func (d *D) TraverseFollowersFromHex(seedPubkeyHex string, maxDepth int) (*GraphResult, error) {
|
||||
seedPubkey, err := hex.Dec(seedPubkeyHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.TraverseFollowers(seedPubkey, maxDepth)
|
||||
}
|
||||
318
pkg/database/graph-follows_test.go
Normal file
318
pkg/database/graph-follows_test.go
Normal file
@@ -0,0 +1,318 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
func TestTraverseFollows(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a simple follow graph:
|
||||
// Alice -> Bob, Carol
|
||||
// Bob -> David, Eve
|
||||
// Carol -> Eve, Frank
|
||||
//
|
||||
// Expected depth 1 from Alice: Bob, Carol
|
||||
// Expected depth 2 from Alice: David, Eve, Frank (Eve deduplicated)
|
||||
|
||||
alice, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
bob, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
carol, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
david, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000004")
|
||||
eve, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000005")
|
||||
frank, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000006")
|
||||
|
||||
// Create Alice's follow list (kind 3)
|
||||
aliceContactID := make([]byte, 32)
|
||||
aliceContactID[0] = 0x10
|
||||
aliceContactSig := make([]byte, 64)
|
||||
aliceContactSig[0] = 0x10
|
||||
aliceContact := &event.E{
|
||||
ID: aliceContactID,
|
||||
Pubkey: alice,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 3,
|
||||
Content: []byte(""),
|
||||
Sig: aliceContactSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(bob)),
|
||||
tag.NewFromAny("p", hex.Enc(carol)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, aliceContact)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save Alice's contact list: %v", err)
|
||||
}
|
||||
|
||||
// Create Bob's follow list
|
||||
bobContactID := make([]byte, 32)
|
||||
bobContactID[0] = 0x20
|
||||
bobContactSig := make([]byte, 64)
|
||||
bobContactSig[0] = 0x20
|
||||
bobContact := &event.E{
|
||||
ID: bobContactID,
|
||||
Pubkey: bob,
|
||||
CreatedAt: 1234567891,
|
||||
Kind: 3,
|
||||
Content: []byte(""),
|
||||
Sig: bobContactSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(david)),
|
||||
tag.NewFromAny("p", hex.Enc(eve)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, bobContact)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save Bob's contact list: %v", err)
|
||||
}
|
||||
|
||||
// Create Carol's follow list
|
||||
carolContactID := make([]byte, 32)
|
||||
carolContactID[0] = 0x30
|
||||
carolContactSig := make([]byte, 64)
|
||||
carolContactSig[0] = 0x30
|
||||
carolContact := &event.E{
|
||||
ID: carolContactID,
|
||||
Pubkey: carol,
|
||||
CreatedAt: 1234567892,
|
||||
Kind: 3,
|
||||
Content: []byte(""),
|
||||
Sig: carolContactSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(eve)),
|
||||
tag.NewFromAny("p", hex.Enc(frank)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, carolContact)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save Carol's contact list: %v", err)
|
||||
}
|
||||
|
||||
// Traverse follows from Alice with depth 2
|
||||
result, err := db.TraverseFollows(alice, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("TraverseFollows failed: %v", err)
|
||||
}
|
||||
|
||||
// Check depth 1: should have Bob and Carol
|
||||
depth1 := result.GetPubkeysAtDepth(1)
|
||||
if len(depth1) != 2 {
|
||||
t.Errorf("Expected 2 pubkeys at depth 1, got %d", len(depth1))
|
||||
}
|
||||
|
||||
depth1Set := make(map[string]bool)
|
||||
for _, pk := range depth1 {
|
||||
depth1Set[pk] = true
|
||||
}
|
||||
if !depth1Set[hex.Enc(bob)] {
|
||||
t.Error("Bob should be at depth 1")
|
||||
}
|
||||
if !depth1Set[hex.Enc(carol)] {
|
||||
t.Error("Carol should be at depth 1")
|
||||
}
|
||||
|
||||
// Check depth 2: should have David, Eve, Frank (Eve deduplicated)
|
||||
depth2 := result.GetPubkeysAtDepth(2)
|
||||
if len(depth2) != 3 {
|
||||
t.Errorf("Expected 3 pubkeys at depth 2, got %d: %v", len(depth2), depth2)
|
||||
}
|
||||
|
||||
depth2Set := make(map[string]bool)
|
||||
for _, pk := range depth2 {
|
||||
depth2Set[pk] = true
|
||||
}
|
||||
if !depth2Set[hex.Enc(david)] {
|
||||
t.Error("David should be at depth 2")
|
||||
}
|
||||
if !depth2Set[hex.Enc(eve)] {
|
||||
t.Error("Eve should be at depth 2")
|
||||
}
|
||||
if !depth2Set[hex.Enc(frank)] {
|
||||
t.Error("Frank should be at depth 2")
|
||||
}
|
||||
|
||||
// Verify total count
|
||||
if result.TotalPubkeys != 5 {
|
||||
t.Errorf("Expected 5 total pubkeys, got %d", result.TotalPubkeys)
|
||||
}
|
||||
|
||||
// Verify ToDepthArrays output
|
||||
arrays := result.ToDepthArrays()
|
||||
if len(arrays) != 2 {
|
||||
t.Errorf("Expected 2 depth arrays, got %d", len(arrays))
|
||||
}
|
||||
if len(arrays[0]) != 2 {
|
||||
t.Errorf("Expected 2 pubkeys in depth 1 array, got %d", len(arrays[0]))
|
||||
}
|
||||
if len(arrays[1]) != 3 {
|
||||
t.Errorf("Expected 3 pubkeys in depth 2 array, got %d", len(arrays[1]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestTraverseFollowsDepth1(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
alice, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
bob, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
carol, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
|
||||
// Create Alice's follow list
|
||||
aliceContactID := make([]byte, 32)
|
||||
aliceContactID[0] = 0x10
|
||||
aliceContactSig := make([]byte, 64)
|
||||
aliceContactSig[0] = 0x10
|
||||
aliceContact := &event.E{
|
||||
ID: aliceContactID,
|
||||
Pubkey: alice,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 3,
|
||||
Content: []byte(""),
|
||||
Sig: aliceContactSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(bob)),
|
||||
tag.NewFromAny("p", hex.Enc(carol)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, aliceContact)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save contact list: %v", err)
|
||||
}
|
||||
|
||||
// Traverse with depth 1 only
|
||||
result, err := db.TraverseFollows(alice, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("TraverseFollows failed: %v", err)
|
||||
}
|
||||
|
||||
if result.TotalPubkeys != 2 {
|
||||
t.Errorf("Expected 2 pubkeys, got %d", result.TotalPubkeys)
|
||||
}
|
||||
|
||||
arrays := result.ToDepthArrays()
|
||||
if len(arrays) != 1 {
|
||||
t.Errorf("Expected 1 depth array for depth 1 query, got %d", len(arrays))
|
||||
}
|
||||
}
|
||||
|
||||
func TestTraverseFollowersBasic(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create scenario: Bob and Carol follow Alice
|
||||
alice, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
bob, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
carol, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
|
||||
// Bob's contact list includes Alice
|
||||
bobContactID := make([]byte, 32)
|
||||
bobContactID[0] = 0x10
|
||||
bobContactSig := make([]byte, 64)
|
||||
bobContactSig[0] = 0x10
|
||||
bobContact := &event.E{
|
||||
ID: bobContactID,
|
||||
Pubkey: bob,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 3,
|
||||
Content: []byte(""),
|
||||
Sig: bobContactSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(alice)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, bobContact)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save Bob's contact list: %v", err)
|
||||
}
|
||||
|
||||
// Carol's contact list includes Alice
|
||||
carolContactID := make([]byte, 32)
|
||||
carolContactID[0] = 0x20
|
||||
carolContactSig := make([]byte, 64)
|
||||
carolContactSig[0] = 0x20
|
||||
carolContact := &event.E{
|
||||
ID: carolContactID,
|
||||
Pubkey: carol,
|
||||
CreatedAt: 1234567891,
|
||||
Kind: 3,
|
||||
Content: []byte(""),
|
||||
Sig: carolContactSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(alice)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, carolContact)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save Carol's contact list: %v", err)
|
||||
}
|
||||
|
||||
// Find Alice's followers
|
||||
result, err := db.TraverseFollowers(alice, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("TraverseFollowers failed: %v", err)
|
||||
}
|
||||
|
||||
if result.TotalPubkeys != 2 {
|
||||
t.Errorf("Expected 2 followers, got %d", result.TotalPubkeys)
|
||||
}
|
||||
|
||||
followers := result.GetPubkeysAtDepth(1)
|
||||
followerSet := make(map[string]bool)
|
||||
for _, pk := range followers {
|
||||
followerSet[pk] = true
|
||||
}
|
||||
if !followerSet[hex.Enc(bob)] {
|
||||
t.Error("Bob should be a follower")
|
||||
}
|
||||
if !followerSet[hex.Enc(carol)] {
|
||||
t.Error("Carol should be a follower")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTraverseFollowsNonExistent(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Try to traverse from a pubkey that doesn't exist
|
||||
nonExistent, _ := hex.Dec("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
result, err := db.TraverseFollows(nonExistent, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("TraverseFollows should not error for non-existent pubkey: %v", err)
|
||||
}
|
||||
|
||||
if result.TotalPubkeys != 0 {
|
||||
t.Errorf("Expected 0 pubkeys for non-existent seed, got %d", result.TotalPubkeys)
|
||||
}
|
||||
}
|
||||
91
pkg/database/graph-mentions.go
Normal file
91
pkg/database/graph-mentions.go
Normal file
@@ -0,0 +1,91 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// FindMentions finds events that mention a pubkey via p-tags.
|
||||
// This returns events grouped by depth, where depth represents how the events relate:
|
||||
// - Depth 1: Events that directly mention the seed pubkey
|
||||
// - Depth 2+: Not typically used for mentions (reserved for future expansion)
|
||||
//
|
||||
// The kinds parameter filters which event kinds to include (e.g., [1] for notes only,
|
||||
// [1,7] for notes and reactions, etc.)
|
||||
func (d *D) FindMentions(pubkey []byte, kinds []uint16) (*GraphResult, error) {
|
||||
result := NewGraphResult()
|
||||
|
||||
if len(pubkey) != 32 {
|
||||
return result, ErrPubkeyNotFound
|
||||
}
|
||||
|
||||
// Get pubkey serial
|
||||
pubkeySerial, err := d.GetPubkeySerial(pubkey)
|
||||
if err != nil {
|
||||
log.D.F("FindMentions: pubkey not in database: %s", hex.Enc(pubkey))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Find all events that reference this pubkey
|
||||
eventSerials, err := d.GetEventsReferencingPubkey(pubkeySerial, kinds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add each event at depth 1
|
||||
for _, eventSerial := range eventSerials {
|
||||
eventIDHex, err := d.GetEventIDFromSerial(eventSerial)
|
||||
if err != nil {
|
||||
log.D.F("FindMentions: error getting event ID for serial %d: %v", eventSerial.Get(), err)
|
||||
continue
|
||||
}
|
||||
result.AddEventAtDepth(eventIDHex, 1)
|
||||
}
|
||||
|
||||
log.D.F("FindMentions: found %d events mentioning pubkey %s", result.TotalEvents, hex.Enc(pubkey))
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// FindMentionsFromHex is a convenience wrapper that accepts hex-encoded pubkey.
|
||||
func (d *D) FindMentionsFromHex(pubkeyHex string, kinds []uint16) (*GraphResult, error) {
|
||||
pubkey, err := hex.Dec(pubkeyHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.FindMentions(pubkey, kinds)
|
||||
}
|
||||
|
||||
// FindMentionsByPubkeys returns events that mention any of the given pubkeys.
|
||||
// Useful for finding mentions across a set of followed accounts.
|
||||
func (d *D) FindMentionsByPubkeys(pubkeySerials []*types.Uint40, kinds []uint16) (*GraphResult, error) {
|
||||
result := NewGraphResult()
|
||||
|
||||
seen := make(map[uint64]bool)
|
||||
|
||||
for _, pubkeySerial := range pubkeySerials {
|
||||
eventSerials, err := d.GetEventsReferencingPubkey(pubkeySerial, kinds)
|
||||
if err != nil {
|
||||
log.D.F("FindMentionsByPubkeys: error for serial %d: %v", pubkeySerial.Get(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, eventSerial := range eventSerials {
|
||||
if seen[eventSerial.Get()] {
|
||||
continue
|
||||
}
|
||||
seen[eventSerial.Get()] = true
|
||||
|
||||
eventIDHex, err := d.GetEventIDFromSerial(eventSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result.AddEventAtDepth(eventIDHex, 1)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
206
pkg/database/graph-refs.go
Normal file
206
pkg/database/graph-refs.go
Normal file
@@ -0,0 +1,206 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
// AddInboundRefsToResult collects inbound references (events that reference discovered items)
|
||||
// for events at a specific depth in the result.
|
||||
//
|
||||
// For example, if you have a follows graph result and want to find all kind-7 reactions
|
||||
// to posts by users at depth 1, this collects those reactions and adds them to result.InboundRefs.
|
||||
//
|
||||
// Parameters:
|
||||
// - result: The graph result to augment with ref data
|
||||
// - depth: The depth at which to collect refs (0 = all depths)
|
||||
// - kinds: Event kinds to collect (e.g., [7] for reactions, [6] for reposts)
|
||||
func (d *D) AddInboundRefsToResult(result *GraphResult, depth int, kinds []uint16) error {
|
||||
// Determine which events to find refs for
|
||||
var targetEventIDs []string
|
||||
|
||||
if depth == 0 {
|
||||
// Collect for all depths
|
||||
targetEventIDs = result.GetAllEvents()
|
||||
} else {
|
||||
targetEventIDs = result.GetEventsAtDepth(depth)
|
||||
}
|
||||
|
||||
// Also collect refs for events authored by pubkeys in the result
|
||||
// This is common for "find reactions to posts by my follows" queries
|
||||
pubkeys := result.GetAllPubkeys()
|
||||
for _, pubkeyHex := range pubkeys {
|
||||
pubkeySerial, err := d.PubkeyHexToSerial(pubkeyHex)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get events authored by this pubkey
|
||||
// For efficiency, limit to relevant event kinds that might have reactions
|
||||
authoredEvents, err := d.GetEventsByAuthor(pubkeySerial, []uint16{1, 30023}) // notes and articles
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, eventSerial := range authoredEvents {
|
||||
eventIDHex, err := d.GetEventIDFromSerial(eventSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// Add to target list if not already tracking
|
||||
if !result.HasEvent(eventIDHex) {
|
||||
targetEventIDs = append(targetEventIDs, eventIDHex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For each target event, find referencing events
|
||||
for _, eventIDHex := range targetEventIDs {
|
||||
eventSerial, err := d.EventIDHexToSerial(eventIDHex)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
refSerials, err := d.GetReferencingEvents(eventSerial, kinds)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, refSerial := range refSerials {
|
||||
refEventIDHex, err := d.GetEventIDFromSerial(refSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the kind of the referencing event
|
||||
// For now, use the first kind in the filter (assumes single kind queries)
|
||||
// TODO: Look up actual event kind from index if needed
|
||||
if len(kinds) > 0 {
|
||||
result.AddInboundRef(kinds[0], eventIDHex, refEventIDHex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.D.F("AddInboundRefsToResult: collected refs for %d target events", len(targetEventIDs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddOutboundRefsToResult collects outbound references (events referenced by discovered items).
|
||||
//
|
||||
// For example, find all events that posts by users at depth 1 reference (quoted posts, replied-to posts).
|
||||
func (d *D) AddOutboundRefsToResult(result *GraphResult, depth int, kinds []uint16) error {
|
||||
// Determine source events
|
||||
var sourceEventIDs []string
|
||||
|
||||
if depth == 0 {
|
||||
sourceEventIDs = result.GetAllEvents()
|
||||
} else {
|
||||
sourceEventIDs = result.GetEventsAtDepth(depth)
|
||||
}
|
||||
|
||||
// Also include events authored by pubkeys in result
|
||||
pubkeys := result.GetAllPubkeys()
|
||||
for _, pubkeyHex := range pubkeys {
|
||||
pubkeySerial, err := d.PubkeyHexToSerial(pubkeyHex)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
authoredEvents, err := d.GetEventsByAuthor(pubkeySerial, kinds)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, eventSerial := range authoredEvents {
|
||||
eventIDHex, err := d.GetEventIDFromSerial(eventSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if !result.HasEvent(eventIDHex) {
|
||||
sourceEventIDs = append(sourceEventIDs, eventIDHex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For each source event, find referenced events
|
||||
for _, eventIDHex := range sourceEventIDs {
|
||||
eventSerial, err := d.EventIDHexToSerial(eventIDHex)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
refSerials, err := d.GetETagsFromEventSerial(eventSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, refSerial := range refSerials {
|
||||
refEventIDHex, err := d.GetEventIDFromSerial(refSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Use first kind for categorization
|
||||
if len(kinds) > 0 {
|
||||
result.AddOutboundRef(kinds[0], eventIDHex, refEventIDHex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.D.F("AddOutboundRefsToResult: collected refs from %d source events", len(sourceEventIDs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CollectRefsForPubkeys collects inbound references to events by specific pubkeys.
|
||||
// This is useful for "find all reactions to posts by these users" queries.
|
||||
//
|
||||
// Parameters:
|
||||
// - pubkeySerials: The pubkeys whose events should be checked for refs
|
||||
// - refKinds: Event kinds to collect (e.g., [7] for reactions)
|
||||
// - eventKinds: Event kinds to check for refs (e.g., [1] for notes)
|
||||
func (d *D) CollectRefsForPubkeys(
|
||||
pubkeySerials []*types.Uint40,
|
||||
refKinds []uint16,
|
||||
eventKinds []uint16,
|
||||
) (*GraphResult, error) {
|
||||
result := NewGraphResult()
|
||||
|
||||
for _, pubkeySerial := range pubkeySerials {
|
||||
// Get events by this author
|
||||
authoredEvents, err := d.GetEventsByAuthor(pubkeySerial, eventKinds)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, eventSerial := range authoredEvents {
|
||||
eventIDHex, err := d.GetEventIDFromSerial(eventSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Find refs to this event
|
||||
refSerials, err := d.GetReferencingEvents(eventSerial, refKinds)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, refSerial := range refSerials {
|
||||
refEventIDHex, err := d.GetEventIDFromSerial(refSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add to result
|
||||
if len(refKinds) > 0 {
|
||||
result.AddInboundRef(refKinds[0], eventIDHex, refEventIDHex)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
327
pkg/database/graph-result.go
Normal file
327
pkg/database/graph-result.go
Normal file
@@ -0,0 +1,327 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// GraphResult contains depth-organized traversal results for graph queries.
|
||||
// It tracks pubkeys and events discovered at each depth level, ensuring
|
||||
// each entity appears only at the depth where it was first discovered.
|
||||
type GraphResult struct {
|
||||
// PubkeysByDepth maps depth -> pubkeys first discovered at that depth.
|
||||
// Each pubkey appears ONLY in the array for the depth where it was first seen.
|
||||
// Depth 1 = direct connections, Depth 2 = connections of connections, etc.
|
||||
PubkeysByDepth map[int][]string
|
||||
|
||||
// EventsByDepth maps depth -> event IDs discovered at that depth.
|
||||
// Used for thread traversal queries.
|
||||
EventsByDepth map[int][]string
|
||||
|
||||
// FirstSeenPubkey tracks which depth each pubkey was first discovered.
|
||||
// Key is pubkey hex, value is the depth (1-indexed).
|
||||
FirstSeenPubkey map[string]int
|
||||
|
||||
// FirstSeenEvent tracks which depth each event was first discovered.
|
||||
// Key is event ID hex, value is the depth (1-indexed).
|
||||
FirstSeenEvent map[string]int
|
||||
|
||||
// TotalPubkeys is the count of unique pubkeys discovered across all depths.
|
||||
TotalPubkeys int
|
||||
|
||||
// TotalEvents is the count of unique events discovered across all depths.
|
||||
TotalEvents int
|
||||
|
||||
// InboundRefs tracks inbound references (events that reference discovered items).
|
||||
// Structure: kind -> target_id -> []referencing_event_ids
|
||||
InboundRefs map[uint16]map[string][]string
|
||||
|
||||
// OutboundRefs tracks outbound references (events referenced by discovered items).
|
||||
// Structure: kind -> source_id -> []referenced_event_ids
|
||||
OutboundRefs map[uint16]map[string][]string
|
||||
}
|
||||
|
||||
// NewGraphResult creates a new initialized GraphResult.
|
||||
func NewGraphResult() *GraphResult {
|
||||
return &GraphResult{
|
||||
PubkeysByDepth: make(map[int][]string),
|
||||
EventsByDepth: make(map[int][]string),
|
||||
FirstSeenPubkey: make(map[string]int),
|
||||
FirstSeenEvent: make(map[string]int),
|
||||
InboundRefs: make(map[uint16]map[string][]string),
|
||||
OutboundRefs: make(map[uint16]map[string][]string),
|
||||
}
|
||||
}
|
||||
|
||||
// AddPubkeyAtDepth adds a pubkey to the result at the specified depth if not already seen.
|
||||
// Returns true if the pubkey was added (first time seen), false if already exists.
|
||||
func (r *GraphResult) AddPubkeyAtDepth(pubkeyHex string, depth int) bool {
|
||||
if _, exists := r.FirstSeenPubkey[pubkeyHex]; exists {
|
||||
return false
|
||||
}
|
||||
|
||||
r.FirstSeenPubkey[pubkeyHex] = depth
|
||||
r.PubkeysByDepth[depth] = append(r.PubkeysByDepth[depth], pubkeyHex)
|
||||
r.TotalPubkeys++
|
||||
return true
|
||||
}
|
||||
|
||||
// AddEventAtDepth adds an event ID to the result at the specified depth if not already seen.
|
||||
// Returns true if the event was added (first time seen), false if already exists.
|
||||
func (r *GraphResult) AddEventAtDepth(eventIDHex string, depth int) bool {
|
||||
if _, exists := r.FirstSeenEvent[eventIDHex]; exists {
|
||||
return false
|
||||
}
|
||||
|
||||
r.FirstSeenEvent[eventIDHex] = depth
|
||||
r.EventsByDepth[depth] = append(r.EventsByDepth[depth], eventIDHex)
|
||||
r.TotalEvents++
|
||||
return true
|
||||
}
|
||||
|
||||
// HasPubkey returns true if the pubkey has been discovered at any depth.
|
||||
func (r *GraphResult) HasPubkey(pubkeyHex string) bool {
|
||||
_, exists := r.FirstSeenPubkey[pubkeyHex]
|
||||
return exists
|
||||
}
|
||||
|
||||
// HasEvent returns true if the event has been discovered at any depth.
|
||||
func (r *GraphResult) HasEvent(eventIDHex string) bool {
|
||||
_, exists := r.FirstSeenEvent[eventIDHex]
|
||||
return exists
|
||||
}
|
||||
|
||||
// GetPubkeyDepth returns the depth at which a pubkey was first discovered.
|
||||
// Returns 0 if the pubkey was not found.
|
||||
func (r *GraphResult) GetPubkeyDepth(pubkeyHex string) int {
|
||||
return r.FirstSeenPubkey[pubkeyHex]
|
||||
}
|
||||
|
||||
// GetEventDepth returns the depth at which an event was first discovered.
|
||||
// Returns 0 if the event was not found.
|
||||
func (r *GraphResult) GetEventDepth(eventIDHex string) int {
|
||||
return r.FirstSeenEvent[eventIDHex]
|
||||
}
|
||||
|
||||
// GetDepthsSorted returns all depths that have pubkeys, sorted ascending.
|
||||
func (r *GraphResult) GetDepthsSorted() []int {
|
||||
depths := make([]int, 0, len(r.PubkeysByDepth))
|
||||
for d := range r.PubkeysByDepth {
|
||||
depths = append(depths, d)
|
||||
}
|
||||
sort.Ints(depths)
|
||||
return depths
|
||||
}
|
||||
|
||||
// GetEventDepthsSorted returns all depths that have events, sorted ascending.
|
||||
func (r *GraphResult) GetEventDepthsSorted() []int {
|
||||
depths := make([]int, 0, len(r.EventsByDepth))
|
||||
for d := range r.EventsByDepth {
|
||||
depths = append(depths, d)
|
||||
}
|
||||
sort.Ints(depths)
|
||||
return depths
|
||||
}
|
||||
|
||||
// ToDepthArrays converts the result to the response format: array of arrays.
|
||||
// Index 0 = depth 1 pubkeys, Index 1 = depth 2 pubkeys, etc.
|
||||
// Empty arrays are included for depths with no pubkeys to maintain index alignment.
|
||||
func (r *GraphResult) ToDepthArrays() [][]string {
|
||||
if len(r.PubkeysByDepth) == 0 {
|
||||
return [][]string{}
|
||||
}
|
||||
|
||||
// Find the maximum depth
|
||||
maxDepth := 0
|
||||
for d := range r.PubkeysByDepth {
|
||||
if d > maxDepth {
|
||||
maxDepth = d
|
||||
}
|
||||
}
|
||||
|
||||
// Create result array with entries for each depth
|
||||
result := make([][]string, maxDepth)
|
||||
for i := 0; i < maxDepth; i++ {
|
||||
depth := i + 1 // depths are 1-indexed
|
||||
if pubkeys, exists := r.PubkeysByDepth[depth]; exists {
|
||||
result[i] = pubkeys
|
||||
} else {
|
||||
result[i] = []string{} // Empty array for depths with no pubkeys
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ToEventDepthArrays converts event results to the response format: array of arrays.
|
||||
// Index 0 = depth 1 events, Index 1 = depth 2 events, etc.
|
||||
func (r *GraphResult) ToEventDepthArrays() [][]string {
|
||||
if len(r.EventsByDepth) == 0 {
|
||||
return [][]string{}
|
||||
}
|
||||
|
||||
maxDepth := 0
|
||||
for d := range r.EventsByDepth {
|
||||
if d > maxDepth {
|
||||
maxDepth = d
|
||||
}
|
||||
}
|
||||
|
||||
result := make([][]string, maxDepth)
|
||||
for i := 0; i < maxDepth; i++ {
|
||||
depth := i + 1
|
||||
if events, exists := r.EventsByDepth[depth]; exists {
|
||||
result[i] = events
|
||||
} else {
|
||||
result[i] = []string{}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// AddInboundRef records an inbound reference from a referencing event to a target.
|
||||
func (r *GraphResult) AddInboundRef(kind uint16, targetIDHex string, referencingEventIDHex string) {
|
||||
if r.InboundRefs[kind] == nil {
|
||||
r.InboundRefs[kind] = make(map[string][]string)
|
||||
}
|
||||
r.InboundRefs[kind][targetIDHex] = append(r.InboundRefs[kind][targetIDHex], referencingEventIDHex)
|
||||
}
|
||||
|
||||
// AddOutboundRef records an outbound reference from a source event to a referenced event.
|
||||
func (r *GraphResult) AddOutboundRef(kind uint16, sourceIDHex string, referencedEventIDHex string) {
|
||||
if r.OutboundRefs[kind] == nil {
|
||||
r.OutboundRefs[kind] = make(map[string][]string)
|
||||
}
|
||||
r.OutboundRefs[kind][sourceIDHex] = append(r.OutboundRefs[kind][sourceIDHex], referencedEventIDHex)
|
||||
}
|
||||
|
||||
// RefAggregation represents aggregated reference data for a single target/source.
|
||||
type RefAggregation struct {
|
||||
// TargetEventID is the event ID being referenced (for inbound) or referencing (for outbound)
|
||||
TargetEventID string
|
||||
|
||||
// TargetAuthor is the author pubkey of the target event (if known)
|
||||
TargetAuthor string
|
||||
|
||||
// TargetDepth is the depth at which this target was discovered in the graph
|
||||
TargetDepth int
|
||||
|
||||
// RefKind is the kind of the referencing events
|
||||
RefKind uint16
|
||||
|
||||
// RefCount is the number of references to/from this target
|
||||
RefCount int
|
||||
|
||||
// RefEventIDs is the list of event IDs that reference this target
|
||||
RefEventIDs []string
|
||||
}
|
||||
|
||||
// GetInboundRefsSorted returns inbound refs for a kind, sorted by count descending.
|
||||
func (r *GraphResult) GetInboundRefsSorted(kind uint16) []RefAggregation {
|
||||
kindRefs := r.InboundRefs[kind]
|
||||
if kindRefs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
aggs := make([]RefAggregation, 0, len(kindRefs))
|
||||
for targetID, refs := range kindRefs {
|
||||
agg := RefAggregation{
|
||||
TargetEventID: targetID,
|
||||
TargetDepth: r.GetEventDepth(targetID),
|
||||
RefKind: kind,
|
||||
RefCount: len(refs),
|
||||
RefEventIDs: refs,
|
||||
}
|
||||
aggs = append(aggs, agg)
|
||||
}
|
||||
|
||||
// Sort by count descending
|
||||
sort.Slice(aggs, func(i, j int) bool {
|
||||
return aggs[i].RefCount > aggs[j].RefCount
|
||||
})
|
||||
|
||||
return aggs
|
||||
}
|
||||
|
||||
// GetOutboundRefsSorted returns outbound refs for a kind, sorted by count descending.
|
||||
func (r *GraphResult) GetOutboundRefsSorted(kind uint16) []RefAggregation {
|
||||
kindRefs := r.OutboundRefs[kind]
|
||||
if kindRefs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
aggs := make([]RefAggregation, 0, len(kindRefs))
|
||||
for sourceID, refs := range kindRefs {
|
||||
agg := RefAggregation{
|
||||
TargetEventID: sourceID,
|
||||
TargetDepth: r.GetEventDepth(sourceID),
|
||||
RefKind: kind,
|
||||
RefCount: len(refs),
|
||||
RefEventIDs: refs,
|
||||
}
|
||||
aggs = append(aggs, agg)
|
||||
}
|
||||
|
||||
sort.Slice(aggs, func(i, j int) bool {
|
||||
return aggs[i].RefCount > aggs[j].RefCount
|
||||
})
|
||||
|
||||
return aggs
|
||||
}
|
||||
|
||||
// GetAllPubkeys returns all pubkeys discovered across all depths.
|
||||
func (r *GraphResult) GetAllPubkeys() []string {
|
||||
all := make([]string, 0, r.TotalPubkeys)
|
||||
for _, pubkeys := range r.PubkeysByDepth {
|
||||
all = append(all, pubkeys...)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
// GetAllEvents returns all event IDs discovered across all depths.
|
||||
func (r *GraphResult) GetAllEvents() []string {
|
||||
all := make([]string, 0, r.TotalEvents)
|
||||
for _, events := range r.EventsByDepth {
|
||||
all = append(all, events...)
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
// GetPubkeysAtDepth returns pubkeys at a specific depth, or empty slice if none.
|
||||
func (r *GraphResult) GetPubkeysAtDepth(depth int) []string {
|
||||
if pubkeys, exists := r.PubkeysByDepth[depth]; exists {
|
||||
return pubkeys
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// GetEventsAtDepth returns events at a specific depth, or empty slice if none.
|
||||
func (r *GraphResult) GetEventsAtDepth(depth int) []string {
|
||||
if events, exists := r.EventsByDepth[depth]; exists {
|
||||
return events
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// Interface methods for external package access (e.g., pkg/protocol/graph)
|
||||
// These allow the graph executor to extract data without direct struct access.
|
||||
|
||||
// GetPubkeysByDepth returns the PubkeysByDepth map for external access.
|
||||
func (r *GraphResult) GetPubkeysByDepth() map[int][]string {
|
||||
return r.PubkeysByDepth
|
||||
}
|
||||
|
||||
// GetEventsByDepth returns the EventsByDepth map for external access.
|
||||
func (r *GraphResult) GetEventsByDepth() map[int][]string {
|
||||
return r.EventsByDepth
|
||||
}
|
||||
|
||||
// GetTotalPubkeys returns the total pubkey count for external access.
|
||||
func (r *GraphResult) GetTotalPubkeys() int {
|
||||
return r.TotalPubkeys
|
||||
}
|
||||
|
||||
// GetTotalEvents returns the total event count for external access.
|
||||
func (r *GraphResult) GetTotalEvents() int {
|
||||
return r.TotalEvents
|
||||
}
|
||||
191
pkg/database/graph-thread.go
Normal file
191
pkg/database/graph-thread.go
Normal file
@@ -0,0 +1,191 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// TraverseThread performs BFS traversal of thread structure via e-tags.
|
||||
// Starting from a seed event, it finds all replies/references at each depth.
|
||||
//
|
||||
// The traversal works bidirectionally:
|
||||
// - Forward: Events that the seed references (parents, quoted posts)
|
||||
// - Backward: Events that reference the seed (replies, reactions, reposts)
|
||||
//
|
||||
// Parameters:
|
||||
// - seedEventID: The event ID to start traversal from
|
||||
// - maxDepth: Maximum depth to traverse
|
||||
// - direction: "both" (default), "inbound" (replies to seed), "outbound" (seed's references)
|
||||
func (d *D) TraverseThread(seedEventID []byte, maxDepth int, direction string) (*GraphResult, error) {
|
||||
result := NewGraphResult()
|
||||
|
||||
if len(seedEventID) != 32 {
|
||||
return result, ErrEventNotFound
|
||||
}
|
||||
|
||||
// Get seed event serial
|
||||
seedSerial, err := d.GetSerialById(seedEventID)
|
||||
if err != nil {
|
||||
log.D.F("TraverseThread: seed event not in database: %s", hex.Enc(seedEventID))
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Normalize direction
|
||||
if direction == "" {
|
||||
direction = "both"
|
||||
}
|
||||
|
||||
// Track visited events
|
||||
visited := make(map[uint64]bool)
|
||||
visited[seedSerial.Get()] = true
|
||||
|
||||
// Current frontier
|
||||
currentFrontier := []*types.Uint40{seedSerial}
|
||||
|
||||
consecutiveEmptyDepths := 0
|
||||
|
||||
for currentDepth := 1; currentDepth <= maxDepth; currentDepth++ {
|
||||
var nextFrontier []*types.Uint40
|
||||
newEventsAtDepth := 0
|
||||
|
||||
for _, eventSerial := range currentFrontier {
|
||||
// Get inbound references (events that reference this event)
|
||||
if direction == "both" || direction == "inbound" {
|
||||
inboundSerials, err := d.GetReferencingEvents(eventSerial, nil)
|
||||
if err != nil {
|
||||
log.D.F("TraverseThread: error getting inbound refs for serial %d: %v", eventSerial.Get(), err)
|
||||
} else {
|
||||
for _, refSerial := range inboundSerials {
|
||||
if visited[refSerial.Get()] {
|
||||
continue
|
||||
}
|
||||
visited[refSerial.Get()] = true
|
||||
|
||||
eventIDHex, err := d.GetEventIDFromSerial(refSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
result.AddEventAtDepth(eventIDHex, currentDepth)
|
||||
newEventsAtDepth++
|
||||
nextFrontier = append(nextFrontier, refSerial)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get outbound references (events this event references)
|
||||
if direction == "both" || direction == "outbound" {
|
||||
outboundSerials, err := d.GetETagsFromEventSerial(eventSerial)
|
||||
if err != nil {
|
||||
log.D.F("TraverseThread: error getting outbound refs for serial %d: %v", eventSerial.Get(), err)
|
||||
} else {
|
||||
for _, refSerial := range outboundSerials {
|
||||
if visited[refSerial.Get()] {
|
||||
continue
|
||||
}
|
||||
visited[refSerial.Get()] = true
|
||||
|
||||
eventIDHex, err := d.GetEventIDFromSerial(refSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
result.AddEventAtDepth(eventIDHex, currentDepth)
|
||||
newEventsAtDepth++
|
||||
nextFrontier = append(nextFrontier, refSerial)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.T.F("TraverseThread: depth %d found %d new events", currentDepth, newEventsAtDepth)
|
||||
|
||||
if newEventsAtDepth == 0 {
|
||||
consecutiveEmptyDepths++
|
||||
if consecutiveEmptyDepths >= 2 {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
consecutiveEmptyDepths = 0
|
||||
}
|
||||
|
||||
currentFrontier = nextFrontier
|
||||
}
|
||||
|
||||
log.D.F("TraverseThread: completed with %d total events", result.TotalEvents)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// TraverseThreadFromHex is a convenience wrapper that accepts hex-encoded event ID.
|
||||
func (d *D) TraverseThreadFromHex(seedEventIDHex string, maxDepth int, direction string) (*GraphResult, error) {
|
||||
seedEventID, err := hex.Dec(seedEventIDHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.TraverseThread(seedEventID, maxDepth, direction)
|
||||
}
|
||||
|
||||
// GetThreadReplies finds all direct replies to an event.
|
||||
// This is a convenience method that returns events at depth 1 with inbound direction.
|
||||
func (d *D) GetThreadReplies(eventID []byte, kinds []uint16) (*GraphResult, error) {
|
||||
result := NewGraphResult()
|
||||
|
||||
if len(eventID) != 32 {
|
||||
return result, ErrEventNotFound
|
||||
}
|
||||
|
||||
eventSerial, err := d.GetSerialById(eventID)
|
||||
if err != nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Get events that reference this event
|
||||
replySerials, err := d.GetReferencingEvents(eventSerial, kinds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, replySerial := range replySerials {
|
||||
eventIDHex, err := d.GetEventIDFromSerial(replySerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result.AddEventAtDepth(eventIDHex, 1)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetThreadParents finds events that a given event references (its parents/quotes).
|
||||
func (d *D) GetThreadParents(eventID []byte) (*GraphResult, error) {
|
||||
result := NewGraphResult()
|
||||
|
||||
if len(eventID) != 32 {
|
||||
return result, ErrEventNotFound
|
||||
}
|
||||
|
||||
eventSerial, err := d.GetSerialById(eventID)
|
||||
if err != nil {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Get events that this event references
|
||||
parentSerials, err := d.GetETagsFromEventSerial(eventSerial)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, parentSerial := range parentSerials {
|
||||
eventIDHex, err := d.GetEventIDFromSerial(parentSerial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result.AddEventAtDepth(eventIDHex, 1)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
560
pkg/database/graph-traversal.go
Normal file
560
pkg/database/graph-traversal.go
Normal file
@@ -0,0 +1,560 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// Graph traversal errors
|
||||
var (
|
||||
ErrPubkeyNotFound = errors.New("pubkey not found in database")
|
||||
ErrEventNotFound = errors.New("event not found in database")
|
||||
)
|
||||
|
||||
// GetPTagsFromEventSerial extracts p-tag pubkey serials from an event by its serial.
|
||||
// This is a pure index-based operation - no event decoding required.
|
||||
// It scans the epg (event-pubkey-graph) index for p-tag edges.
|
||||
func (d *D) GetPTagsFromEventSerial(eventSerial *types.Uint40) ([]*types.Uint40, error) {
|
||||
var pubkeySerials []*types.Uint40
|
||||
|
||||
// Build prefix: epg|event_serial
|
||||
prefix := new(bytes.Buffer)
|
||||
prefix.Write([]byte(indexes.EventPubkeyGraphPrefix))
|
||||
if err := eventSerial.MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
searchPrefix := prefix.Bytes()
|
||||
|
||||
err := d.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = false
|
||||
opts.Prefix = searchPrefix
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(searchPrefix); it.ValidForPrefix(searchPrefix); it.Next() {
|
||||
key := it.Item().KeyCopy(nil)
|
||||
|
||||
// Decode key: epg(3)|event_serial(5)|pubkey_serial(5)|kind(2)|direction(1)
|
||||
if len(key) != 16 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract direction to filter for p-tags only
|
||||
direction := key[15]
|
||||
if direction != types.EdgeDirectionPTagOut {
|
||||
continue // Skip author edges, only want p-tag edges
|
||||
}
|
||||
|
||||
// Extract pubkey serial (bytes 8-12)
|
||||
pubkeySerial := new(types.Uint40)
|
||||
serialReader := bytes.NewReader(key[8:13])
|
||||
if err := pubkeySerial.UnmarshalRead(serialReader); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
pubkeySerials = append(pubkeySerials, pubkeySerial)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return pubkeySerials, err
|
||||
}
|
||||
|
||||
// GetETagsFromEventSerial extracts e-tag event serials from an event by its serial.
|
||||
// This is a pure index-based operation - no event decoding required.
|
||||
// It scans the eeg (event-event-graph) index for outbound e-tag edges.
|
||||
func (d *D) GetETagsFromEventSerial(eventSerial *types.Uint40) ([]*types.Uint40, error) {
|
||||
var targetSerials []*types.Uint40
|
||||
|
||||
// Build prefix: eeg|source_event_serial
|
||||
prefix := new(bytes.Buffer)
|
||||
prefix.Write([]byte(indexes.EventEventGraphPrefix))
|
||||
if err := eventSerial.MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
searchPrefix := prefix.Bytes()
|
||||
|
||||
err := d.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = false
|
||||
opts.Prefix = searchPrefix
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(searchPrefix); it.ValidForPrefix(searchPrefix); it.Next() {
|
||||
key := it.Item().KeyCopy(nil)
|
||||
|
||||
// Decode key: eeg(3)|source_serial(5)|target_serial(5)|kind(2)|direction(1)
|
||||
if len(key) != 16 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract target serial (bytes 8-12)
|
||||
targetSerial := new(types.Uint40)
|
||||
serialReader := bytes.NewReader(key[8:13])
|
||||
if err := targetSerial.UnmarshalRead(serialReader); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
targetSerials = append(targetSerials, targetSerial)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return targetSerials, err
|
||||
}
|
||||
|
||||
// GetReferencingEvents finds all events that reference a target event via e-tags.
|
||||
// Optionally filters by event kinds. Uses the gee (reverse e-tag graph) index.
|
||||
func (d *D) GetReferencingEvents(targetSerial *types.Uint40, kinds []uint16) ([]*types.Uint40, error) {
|
||||
var sourceSerials []*types.Uint40
|
||||
|
||||
if len(kinds) == 0 {
|
||||
// No kind filter - scan all kinds
|
||||
prefix := new(bytes.Buffer)
|
||||
prefix.Write([]byte(indexes.GraphEventEventPrefix))
|
||||
if err := targetSerial.MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
searchPrefix := prefix.Bytes()
|
||||
|
||||
err := d.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = false
|
||||
opts.Prefix = searchPrefix
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(searchPrefix); it.ValidForPrefix(searchPrefix); it.Next() {
|
||||
key := it.Item().KeyCopy(nil)
|
||||
|
||||
// Decode key: gee(3)|target_serial(5)|kind(2)|direction(1)|source_serial(5)
|
||||
if len(key) != 16 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract source serial (bytes 11-15)
|
||||
sourceSerial := new(types.Uint40)
|
||||
serialReader := bytes.NewReader(key[11:16])
|
||||
if err := sourceSerial.UnmarshalRead(serialReader); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
sourceSerials = append(sourceSerials, sourceSerial)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return sourceSerials, err
|
||||
}
|
||||
|
||||
// With kind filter - scan each kind's prefix
|
||||
for _, k := range kinds {
|
||||
kind := new(types.Uint16)
|
||||
kind.Set(k)
|
||||
|
||||
direction := new(types.Letter)
|
||||
direction.Set(types.EdgeDirectionETagIn)
|
||||
|
||||
prefix := new(bytes.Buffer)
|
||||
if err := indexes.GraphEventEventEnc(targetSerial, kind, direction, nil).MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
searchPrefix := prefix.Bytes()
|
||||
|
||||
err := d.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = false
|
||||
opts.Prefix = searchPrefix
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(searchPrefix); it.ValidForPrefix(searchPrefix); it.Next() {
|
||||
key := it.Item().KeyCopy(nil)
|
||||
|
||||
// Extract source serial (last 5 bytes)
|
||||
if len(key) < 5 {
|
||||
continue
|
||||
}
|
||||
sourceSerial := new(types.Uint40)
|
||||
serialReader := bytes.NewReader(key[len(key)-5:])
|
||||
if err := sourceSerial.UnmarshalRead(serialReader); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
sourceSerials = append(sourceSerials, sourceSerial)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return sourceSerials, nil
|
||||
}
|
||||
|
||||
// FindEventByAuthorAndKind finds the most recent event of a specific kind by an author.
|
||||
// This is used to find kind-3 contact lists for follow graph traversal.
|
||||
// Returns nil, nil if no matching event is found.
|
||||
func (d *D) FindEventByAuthorAndKind(authorSerial *types.Uint40, kind uint16) (*types.Uint40, error) {
|
||||
var eventSerial *types.Uint40
|
||||
|
||||
// First, get the full pubkey from the serial
|
||||
pubkey, err := d.GetPubkeyBySerial(authorSerial)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build prefix for kind-pubkey index: kpc|kind|pubkey_hash
|
||||
pubHash := new(types.PubHash)
|
||||
if err := pubHash.FromPubkey(pubkey); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kindType := new(types.Uint16)
|
||||
kindType.Set(kind)
|
||||
|
||||
prefix := new(bytes.Buffer)
|
||||
prefix.Write([]byte(indexes.KindPubkeyPrefix))
|
||||
if err := kindType.MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
if err := pubHash.MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
searchPrefix := prefix.Bytes()
|
||||
|
||||
err = d.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = false
|
||||
opts.Prefix = searchPrefix
|
||||
opts.Reverse = true // Most recent first (highest created_at)
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
// Seek to end of prefix range for reverse iteration
|
||||
seekKey := make([]byte, len(searchPrefix)+8+5) // prefix + max timestamp + max serial
|
||||
copy(seekKey, searchPrefix)
|
||||
for i := len(searchPrefix); i < len(seekKey); i++ {
|
||||
seekKey[i] = 0xFF
|
||||
}
|
||||
|
||||
it.Seek(seekKey)
|
||||
if !it.ValidForPrefix(searchPrefix) {
|
||||
// Try going to the first valid key if seek went past
|
||||
it.Rewind()
|
||||
it.Seek(searchPrefix)
|
||||
}
|
||||
|
||||
if it.ValidForPrefix(searchPrefix) {
|
||||
key := it.Item().KeyCopy(nil)
|
||||
|
||||
// Decode key: kpc(3)|kind(2)|pubkey_hash(8)|created_at(8)|serial(5)
|
||||
// Total: 26 bytes
|
||||
if len(key) < 26 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extract serial (last 5 bytes)
|
||||
eventSerial = new(types.Uint40)
|
||||
serialReader := bytes.NewReader(key[len(key)-5:])
|
||||
if err := eventSerial.UnmarshalRead(serialReader); chk.E(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return eventSerial, err
|
||||
}
|
||||
|
||||
// GetPubkeyHexFromSerial converts a pubkey serial to its hex string representation.
|
||||
func (d *D) GetPubkeyHexFromSerial(serial *types.Uint40) (string, error) {
|
||||
pubkey, err := d.GetPubkeyBySerial(serial)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.Enc(pubkey), nil
|
||||
}
|
||||
|
||||
// GetEventIDFromSerial converts an event serial to its hex ID string.
|
||||
func (d *D) GetEventIDFromSerial(serial *types.Uint40) (string, error) {
|
||||
eventID, err := d.GetEventIdBySerial(serial)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return hex.Enc(eventID), nil
|
||||
}
|
||||
|
||||
// GetEventsReferencingPubkey finds all events that reference a pubkey via p-tags.
|
||||
// Uses the peg (pubkey-event-graph) index with direction filter for inbound p-tags.
|
||||
// Optionally filters by event kinds.
|
||||
func (d *D) GetEventsReferencingPubkey(pubkeySerial *types.Uint40, kinds []uint16) ([]*types.Uint40, error) {
|
||||
var eventSerials []*types.Uint40
|
||||
|
||||
if len(kinds) == 0 {
|
||||
// No kind filter - we need to scan common kinds since direction comes after kind in the key
|
||||
// Use same approach as QueryPTagGraph
|
||||
commonKinds := []uint16{1, 6, 7, 9735, 10002, 3, 4, 5, 30023}
|
||||
kinds = commonKinds
|
||||
}
|
||||
|
||||
for _, k := range kinds {
|
||||
kind := new(types.Uint16)
|
||||
kind.Set(k)
|
||||
|
||||
direction := new(types.Letter)
|
||||
direction.Set(types.EdgeDirectionPTagIn) // Inbound p-tags
|
||||
|
||||
prefix := new(bytes.Buffer)
|
||||
if err := indexes.PubkeyEventGraphEnc(pubkeySerial, kind, direction, nil).MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
searchPrefix := prefix.Bytes()
|
||||
|
||||
err := d.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = false
|
||||
opts.Prefix = searchPrefix
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(searchPrefix); it.ValidForPrefix(searchPrefix); it.Next() {
|
||||
key := it.Item().KeyCopy(nil)
|
||||
|
||||
// Key format: peg(3)|pubkey_serial(5)|kind(2)|direction(1)|event_serial(5) = 16 bytes
|
||||
if len(key) != 16 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract event serial (last 5 bytes)
|
||||
eventSerial := new(types.Uint40)
|
||||
serialReader := bytes.NewReader(key[11:16])
|
||||
if err := eventSerial.UnmarshalRead(serialReader); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
eventSerials = append(eventSerials, eventSerial)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return eventSerials, nil
|
||||
}
|
||||
|
||||
// GetEventsByAuthor finds all events authored by a pubkey.
|
||||
// Uses the peg (pubkey-event-graph) index with direction filter for author edges.
|
||||
// Optionally filters by event kinds.
|
||||
func (d *D) GetEventsByAuthor(authorSerial *types.Uint40, kinds []uint16) ([]*types.Uint40, error) {
|
||||
var eventSerials []*types.Uint40
|
||||
|
||||
if len(kinds) == 0 {
|
||||
// No kind filter - scan for author direction across common kinds
|
||||
// This is less efficient but necessary without kind filter
|
||||
commonKinds := []uint16{0, 1, 3, 6, 7, 30023, 10002}
|
||||
kinds = commonKinds
|
||||
}
|
||||
|
||||
for _, k := range kinds {
|
||||
kind := new(types.Uint16)
|
||||
kind.Set(k)
|
||||
|
||||
direction := new(types.Letter)
|
||||
direction.Set(types.EdgeDirectionAuthor) // Author edges
|
||||
|
||||
prefix := new(bytes.Buffer)
|
||||
if err := indexes.PubkeyEventGraphEnc(authorSerial, kind, direction, nil).MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
searchPrefix := prefix.Bytes()
|
||||
|
||||
err := d.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = false
|
||||
opts.Prefix = searchPrefix
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(searchPrefix); it.ValidForPrefix(searchPrefix); it.Next() {
|
||||
key := it.Item().KeyCopy(nil)
|
||||
|
||||
// Key format: peg(3)|pubkey_serial(5)|kind(2)|direction(1)|event_serial(5) = 16 bytes
|
||||
if len(key) != 16 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract event serial (last 5 bytes)
|
||||
eventSerial := new(types.Uint40)
|
||||
serialReader := bytes.NewReader(key[11:16])
|
||||
if err := eventSerial.UnmarshalRead(serialReader); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
eventSerials = append(eventSerials, eventSerial)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return eventSerials, nil
|
||||
}
|
||||
|
||||
// GetFollowsFromPubkeySerial returns the pubkey serials that a user follows.
|
||||
// This extracts p-tags from the user's kind-3 contact list event.
|
||||
// Returns an empty slice if no kind-3 event is found.
|
||||
func (d *D) GetFollowsFromPubkeySerial(pubkeySerial *types.Uint40) ([]*types.Uint40, error) {
|
||||
// Find the kind-3 event for this pubkey
|
||||
contactEventSerial, err := d.FindEventByAuthorAndKind(pubkeySerial, 3)
|
||||
if err != nil {
|
||||
log.D.F("GetFollowsFromPubkeySerial: error finding kind-3 for serial %d: %v", pubkeySerial.Get(), err)
|
||||
return nil, nil // No kind-3 event found is not an error
|
||||
}
|
||||
if contactEventSerial == nil {
|
||||
log.T.F("GetFollowsFromPubkeySerial: no kind-3 event found for serial %d", pubkeySerial.Get())
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Extract p-tags from the contact list event
|
||||
follows, err := d.GetPTagsFromEventSerial(contactEventSerial)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.T.F("GetFollowsFromPubkeySerial: found %d follows for serial %d", len(follows), pubkeySerial.Get())
|
||||
return follows, nil
|
||||
}
|
||||
|
||||
// GetFollowersOfPubkeySerial returns the pubkey serials of users who follow a given pubkey.
|
||||
// This finds all kind-3 events that have a p-tag referencing the target pubkey.
|
||||
func (d *D) GetFollowersOfPubkeySerial(targetSerial *types.Uint40) ([]*types.Uint40, error) {
|
||||
// Find all kind-3 events that reference this pubkey via p-tag
|
||||
kind3Events, err := d.GetEventsReferencingPubkey(targetSerial, []uint16{3})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract the author serials from these events
|
||||
var followerSerials []*types.Uint40
|
||||
seen := make(map[uint64]bool)
|
||||
|
||||
for _, eventSerial := range kind3Events {
|
||||
// Get the author of this kind-3 event
|
||||
// We need to look up the event to get its author
|
||||
// Use the epg index to find the author edge
|
||||
authorSerial, err := d.GetEventAuthorSerial(eventSerial)
|
||||
if err != nil {
|
||||
log.D.F("GetFollowersOfPubkeySerial: couldn't get author for event %d: %v", eventSerial.Get(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Deduplicate (a user might have multiple kind-3 events)
|
||||
if seen[authorSerial.Get()] {
|
||||
continue
|
||||
}
|
||||
seen[authorSerial.Get()] = true
|
||||
followerSerials = append(followerSerials, authorSerial)
|
||||
}
|
||||
|
||||
log.T.F("GetFollowersOfPubkeySerial: found %d followers for serial %d", len(followerSerials), targetSerial.Get())
|
||||
return followerSerials, nil
|
||||
}
|
||||
|
||||
// GetEventAuthorSerial finds the author pubkey serial for an event.
|
||||
// Uses the epg (event-pubkey-graph) index with author direction.
|
||||
func (d *D) GetEventAuthorSerial(eventSerial *types.Uint40) (*types.Uint40, error) {
|
||||
var authorSerial *types.Uint40
|
||||
|
||||
// Build prefix: epg|event_serial
|
||||
prefix := new(bytes.Buffer)
|
||||
prefix.Write([]byte(indexes.EventPubkeyGraphPrefix))
|
||||
if err := eventSerial.MarshalWrite(prefix); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
searchPrefix := prefix.Bytes()
|
||||
|
||||
err := d.View(func(txn *badger.Txn) error {
|
||||
opts := badger.DefaultIteratorOptions
|
||||
opts.PrefetchValues = false
|
||||
opts.Prefix = searchPrefix
|
||||
|
||||
it := txn.NewIterator(opts)
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(searchPrefix); it.ValidForPrefix(searchPrefix); it.Next() {
|
||||
key := it.Item().KeyCopy(nil)
|
||||
|
||||
// Decode key: epg(3)|event_serial(5)|pubkey_serial(5)|kind(2)|direction(1)
|
||||
if len(key) != 16 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check direction - we want author (0)
|
||||
direction := key[15]
|
||||
if direction != types.EdgeDirectionAuthor {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract pubkey serial (bytes 8-12)
|
||||
authorSerial = new(types.Uint40)
|
||||
serialReader := bytes.NewReader(key[8:13])
|
||||
if err := authorSerial.UnmarshalRead(serialReader); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return nil // Found the author
|
||||
}
|
||||
return ErrEventNotFound
|
||||
})
|
||||
|
||||
return authorSerial, err
|
||||
}
|
||||
|
||||
// PubkeyHexToSerial converts a pubkey hex string to its serial, if it exists.
|
||||
// Returns an error if the pubkey is not in the database.
|
||||
func (d *D) PubkeyHexToSerial(pubkeyHex string) (*types.Uint40, error) {
|
||||
pubkeyBytes, err := hex.Dec(pubkeyHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pubkeyBytes) != 32 {
|
||||
return nil, errors.New("invalid pubkey length")
|
||||
}
|
||||
return d.GetPubkeySerial(pubkeyBytes)
|
||||
}
|
||||
|
||||
// EventIDHexToSerial converts an event ID hex string to its serial, if it exists.
|
||||
// Returns an error if the event is not in the database.
|
||||
func (d *D) EventIDHexToSerial(eventIDHex string) (*types.Uint40, error) {
|
||||
eventIDBytes, err := hex.Dec(eventIDHex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(eventIDBytes) != 32 {
|
||||
return nil, errors.New("invalid event ID length")
|
||||
}
|
||||
return d.GetSerialById(eventIDBytes)
|
||||
}
|
||||
547
pkg/database/graph-traversal_test.go
Normal file
547
pkg/database/graph-traversal_test.go
Normal file
@@ -0,0 +1,547 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
func TestGetPTagsFromEventSerial(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create an author pubkey
|
||||
authorPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
|
||||
// Create p-tag target pubkeys
|
||||
target1, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
target2, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
|
||||
// Create event with p-tags
|
||||
eventID := make([]byte, 32)
|
||||
eventID[0] = 0x10
|
||||
eventSig := make([]byte, 64)
|
||||
eventSig[0] = 0x10
|
||||
|
||||
ev := &event.E{
|
||||
ID: eventID,
|
||||
Pubkey: authorPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 1,
|
||||
Content: []byte("Test event with p-tags"),
|
||||
Sig: eventSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(target1)),
|
||||
tag.NewFromAny("p", hex.Enc(target2)),
|
||||
),
|
||||
}
|
||||
|
||||
_, err = db.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get the event serial
|
||||
eventSerial, err := db.GetSerialById(eventID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get event serial: %v", err)
|
||||
}
|
||||
|
||||
// Get p-tags from event serial
|
||||
ptagSerials, err := db.GetPTagsFromEventSerial(eventSerial)
|
||||
if err != nil {
|
||||
t.Fatalf("GetPTagsFromEventSerial failed: %v", err)
|
||||
}
|
||||
|
||||
// Should have 2 p-tags
|
||||
if len(ptagSerials) != 2 {
|
||||
t.Errorf("Expected 2 p-tag serials, got %d", len(ptagSerials))
|
||||
}
|
||||
|
||||
// Verify the pubkeys
|
||||
for _, serial := range ptagSerials {
|
||||
pubkey, err := db.GetPubkeyBySerial(serial)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get pubkey for serial: %v", err)
|
||||
continue
|
||||
}
|
||||
pubkeyHex := hex.Enc(pubkey)
|
||||
if pubkeyHex != hex.Enc(target1) && pubkeyHex != hex.Enc(target2) {
|
||||
t.Errorf("Unexpected pubkey: %s", pubkeyHex)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetETagsFromEventSerial(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a parent event
|
||||
parentPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
parentID := make([]byte, 32)
|
||||
parentID[0] = 0x10
|
||||
parentSig := make([]byte, 64)
|
||||
parentSig[0] = 0x10
|
||||
|
||||
parentEvent := &event.E{
|
||||
ID: parentID,
|
||||
Pubkey: parentPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 1,
|
||||
Content: []byte("Parent post"),
|
||||
Sig: parentSig,
|
||||
Tags: &tag.S{},
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, parentEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save parent event: %v", err)
|
||||
}
|
||||
|
||||
// Create a reply event with e-tag
|
||||
replyPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
replyID := make([]byte, 32)
|
||||
replyID[0] = 0x20
|
||||
replySig := make([]byte, 64)
|
||||
replySig[0] = 0x20
|
||||
|
||||
replyEvent := &event.E{
|
||||
ID: replyID,
|
||||
Pubkey: replyPubkey,
|
||||
CreatedAt: 1234567891,
|
||||
Kind: 1,
|
||||
Content: []byte("Reply"),
|
||||
Sig: replySig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(parentID)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reply event: %v", err)
|
||||
}
|
||||
|
||||
// Get e-tags from reply
|
||||
replySerial, _ := db.GetSerialById(replyID)
|
||||
etagSerials, err := db.GetETagsFromEventSerial(replySerial)
|
||||
if err != nil {
|
||||
t.Fatalf("GetETagsFromEventSerial failed: %v", err)
|
||||
}
|
||||
|
||||
if len(etagSerials) != 1 {
|
||||
t.Errorf("Expected 1 e-tag serial, got %d", len(etagSerials))
|
||||
}
|
||||
|
||||
// Verify the target event
|
||||
if len(etagSerials) > 0 {
|
||||
targetEventID, err := db.GetEventIdBySerial(etagSerials[0])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get event ID from serial: %v", err)
|
||||
}
|
||||
if hex.Enc(targetEventID) != hex.Enc(parentID) {
|
||||
t.Errorf("Expected parent ID, got %s", hex.Enc(targetEventID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetReferencingEvents(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a parent event
|
||||
parentPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
parentID := make([]byte, 32)
|
||||
parentID[0] = 0x10
|
||||
parentSig := make([]byte, 64)
|
||||
parentSig[0] = 0x10
|
||||
|
||||
parentEvent := &event.E{
|
||||
ID: parentID,
|
||||
Pubkey: parentPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 1,
|
||||
Content: []byte("Parent post"),
|
||||
Sig: parentSig,
|
||||
Tags: &tag.S{},
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, parentEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save parent event: %v", err)
|
||||
}
|
||||
|
||||
// Create multiple replies and reactions
|
||||
for i := 0; i < 3; i++ {
|
||||
replyPubkey := make([]byte, 32)
|
||||
replyPubkey[0] = byte(0x20 + i)
|
||||
replyID := make([]byte, 32)
|
||||
replyID[0] = byte(0x30 + i)
|
||||
replySig := make([]byte, 64)
|
||||
replySig[0] = byte(0x30 + i)
|
||||
|
||||
var evKind uint16 = 1 // Reply
|
||||
if i == 2 {
|
||||
evKind = 7 // Reaction
|
||||
}
|
||||
|
||||
replyEvent := &event.E{
|
||||
ID: replyID,
|
||||
Pubkey: replyPubkey,
|
||||
CreatedAt: int64(1234567891 + i),
|
||||
Kind: evKind,
|
||||
Content: []byte("Response"),
|
||||
Sig: replySig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(parentID)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reply %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get parent serial
|
||||
parentSerial, _ := db.GetSerialById(parentID)
|
||||
|
||||
// Test without kind filter
|
||||
refs, err := db.GetReferencingEvents(parentSerial, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("GetReferencingEvents failed: %v", err)
|
||||
}
|
||||
if len(refs) != 3 {
|
||||
t.Errorf("Expected 3 referencing events, got %d", len(refs))
|
||||
}
|
||||
|
||||
// Test with kind filter (only replies)
|
||||
refs, err = db.GetReferencingEvents(parentSerial, []uint16{1})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReferencingEvents with kind filter failed: %v", err)
|
||||
}
|
||||
if len(refs) != 2 {
|
||||
t.Errorf("Expected 2 kind-1 referencing events, got %d", len(refs))
|
||||
}
|
||||
|
||||
// Test with kind filter (only reactions)
|
||||
refs, err = db.GetReferencingEvents(parentSerial, []uint16{7})
|
||||
if err != nil {
|
||||
t.Fatalf("GetReferencingEvents with kind 7 filter failed: %v", err)
|
||||
}
|
||||
if len(refs) != 1 {
|
||||
t.Errorf("Expected 1 kind-7 referencing event, got %d", len(refs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFollowsFromPubkeySerial(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create author and their follows
|
||||
authorPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
follow1, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
follow2, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
follow3, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000004")
|
||||
|
||||
// Create kind-3 contact list
|
||||
eventID := make([]byte, 32)
|
||||
eventID[0] = 0x10
|
||||
eventSig := make([]byte, 64)
|
||||
eventSig[0] = 0x10
|
||||
|
||||
contactList := &event.E{
|
||||
ID: eventID,
|
||||
Pubkey: authorPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 3,
|
||||
Content: []byte(""),
|
||||
Sig: eventSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(follow1)),
|
||||
tag.NewFromAny("p", hex.Enc(follow2)),
|
||||
tag.NewFromAny("p", hex.Enc(follow3)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, contactList)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save contact list: %v", err)
|
||||
}
|
||||
|
||||
// Get author serial
|
||||
authorSerial, err := db.GetPubkeySerial(authorPubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get author serial: %v", err)
|
||||
}
|
||||
|
||||
// Get follows
|
||||
follows, err := db.GetFollowsFromPubkeySerial(authorSerial)
|
||||
if err != nil {
|
||||
t.Fatalf("GetFollowsFromPubkeySerial failed: %v", err)
|
||||
}
|
||||
|
||||
if len(follows) != 3 {
|
||||
t.Errorf("Expected 3 follows, got %d", len(follows))
|
||||
}
|
||||
|
||||
// Verify the follows are correct
|
||||
expectedFollows := map[string]bool{
|
||||
hex.Enc(follow1): false,
|
||||
hex.Enc(follow2): false,
|
||||
hex.Enc(follow3): false,
|
||||
}
|
||||
for _, serial := range follows {
|
||||
pubkey, err := db.GetPubkeyBySerial(serial)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get pubkey from serial: %v", err)
|
||||
continue
|
||||
}
|
||||
pkHex := hex.Enc(pubkey)
|
||||
if _, exists := expectedFollows[pkHex]; exists {
|
||||
expectedFollows[pkHex] = true
|
||||
} else {
|
||||
t.Errorf("Unexpected follow: %s", pkHex)
|
||||
}
|
||||
}
|
||||
for pk, found := range expectedFollows {
|
||||
if !found {
|
||||
t.Errorf("Expected follow not found: %s", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGraphResult(t *testing.T) {
|
||||
result := NewGraphResult()
|
||||
|
||||
// Add pubkeys at different depths
|
||||
result.AddPubkeyAtDepth("pubkey1", 1)
|
||||
result.AddPubkeyAtDepth("pubkey2", 1)
|
||||
result.AddPubkeyAtDepth("pubkey3", 2)
|
||||
result.AddPubkeyAtDepth("pubkey4", 2)
|
||||
result.AddPubkeyAtDepth("pubkey5", 3)
|
||||
|
||||
// Try to add duplicate
|
||||
added := result.AddPubkeyAtDepth("pubkey1", 2)
|
||||
if added {
|
||||
t.Error("Should not add duplicate pubkey")
|
||||
}
|
||||
|
||||
// Verify counts
|
||||
if result.TotalPubkeys != 5 {
|
||||
t.Errorf("Expected 5 total pubkeys, got %d", result.TotalPubkeys)
|
||||
}
|
||||
|
||||
// Verify depth tracking
|
||||
if result.GetPubkeyDepth("pubkey1") != 1 {
|
||||
t.Errorf("pubkey1 should be at depth 1")
|
||||
}
|
||||
if result.GetPubkeyDepth("pubkey3") != 2 {
|
||||
t.Errorf("pubkey3 should be at depth 2")
|
||||
}
|
||||
|
||||
// Verify HasPubkey
|
||||
if !result.HasPubkey("pubkey1") {
|
||||
t.Error("Should have pubkey1")
|
||||
}
|
||||
if result.HasPubkey("nonexistent") {
|
||||
t.Error("Should not have nonexistent pubkey")
|
||||
}
|
||||
|
||||
// Verify ToDepthArrays
|
||||
arrays := result.ToDepthArrays()
|
||||
if len(arrays) != 3 {
|
||||
t.Errorf("Expected 3 depth arrays, got %d", len(arrays))
|
||||
}
|
||||
if len(arrays[0]) != 2 {
|
||||
t.Errorf("Expected 2 pubkeys at depth 1, got %d", len(arrays[0]))
|
||||
}
|
||||
if len(arrays[1]) != 2 {
|
||||
t.Errorf("Expected 2 pubkeys at depth 2, got %d", len(arrays[1]))
|
||||
}
|
||||
if len(arrays[2]) != 1 {
|
||||
t.Errorf("Expected 1 pubkey at depth 3, got %d", len(arrays[2]))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGraphResultRefs(t *testing.T) {
|
||||
result := NewGraphResult()
|
||||
|
||||
// Add some pubkeys
|
||||
result.AddPubkeyAtDepth("pubkey1", 1)
|
||||
result.AddEventAtDepth("event1", 1)
|
||||
|
||||
// Add inbound refs (kind 7 reactions)
|
||||
result.AddInboundRef(7, "event1", "reaction1")
|
||||
result.AddInboundRef(7, "event1", "reaction2")
|
||||
result.AddInboundRef(7, "event1", "reaction3")
|
||||
|
||||
// Get sorted refs
|
||||
refs := result.GetInboundRefsSorted(7)
|
||||
if len(refs) != 1 {
|
||||
t.Fatalf("Expected 1 aggregation, got %d", len(refs))
|
||||
}
|
||||
if refs[0].RefCount != 3 {
|
||||
t.Errorf("Expected 3 refs, got %d", refs[0].RefCount)
|
||||
}
|
||||
if refs[0].TargetEventID != "event1" {
|
||||
t.Errorf("Expected event1, got %s", refs[0].TargetEventID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetFollowersOfPubkeySerial(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create target pubkey (the one being followed)
|
||||
targetPubkey, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
|
||||
// Create followers
|
||||
follower1, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000002")
|
||||
follower2, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000003")
|
||||
|
||||
// Create kind-3 contact lists for followers
|
||||
for i, followerPubkey := range [][]byte{follower1, follower2} {
|
||||
eventID := make([]byte, 32)
|
||||
eventID[0] = byte(0x10 + i)
|
||||
eventSig := make([]byte, 64)
|
||||
eventSig[0] = byte(0x10 + i)
|
||||
|
||||
contactList := &event.E{
|
||||
ID: eventID,
|
||||
Pubkey: followerPubkey,
|
||||
CreatedAt: int64(1234567890 + i),
|
||||
Kind: 3,
|
||||
Content: []byte(""),
|
||||
Sig: eventSig,
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("p", hex.Enc(targetPubkey)),
|
||||
),
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, contactList)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save contact list %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get target serial
|
||||
targetSerial, err := db.GetPubkeySerial(targetPubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get target serial: %v", err)
|
||||
}
|
||||
|
||||
// Get followers
|
||||
followers, err := db.GetFollowersOfPubkeySerial(targetSerial)
|
||||
if err != nil {
|
||||
t.Fatalf("GetFollowersOfPubkeySerial failed: %v", err)
|
||||
}
|
||||
|
||||
if len(followers) != 2 {
|
||||
t.Errorf("Expected 2 followers, got %d", len(followers))
|
||||
}
|
||||
|
||||
// Verify the followers
|
||||
expectedFollowers := map[string]bool{
|
||||
hex.Enc(follower1): false,
|
||||
hex.Enc(follower2): false,
|
||||
}
|
||||
for _, serial := range followers {
|
||||
pubkey, err := db.GetPubkeyBySerial(serial)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get pubkey from serial: %v", err)
|
||||
continue
|
||||
}
|
||||
pkHex := hex.Enc(pubkey)
|
||||
if _, exists := expectedFollowers[pkHex]; exists {
|
||||
expectedFollowers[pkHex] = true
|
||||
} else {
|
||||
t.Errorf("Unexpected follower: %s", pkHex)
|
||||
}
|
||||
}
|
||||
for pk, found := range expectedFollowers {
|
||||
if !found {
|
||||
t.Errorf("Expected follower not found: %s", pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubkeyHexToSerial(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
db, err := New(ctx, cancel, t.TempDir(), "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a pubkey by saving an event
|
||||
pubkeyBytes, _ := hex.Dec("0000000000000000000000000000000000000000000000000000000000000001")
|
||||
eventID := make([]byte, 32)
|
||||
eventID[0] = 0x10
|
||||
eventSig := make([]byte, 64)
|
||||
eventSig[0] = 0x10
|
||||
|
||||
ev := &event.E{
|
||||
ID: eventID,
|
||||
Pubkey: pubkeyBytes,
|
||||
CreatedAt: 1234567890,
|
||||
Kind: 1,
|
||||
Content: []byte("Test"),
|
||||
Sig: eventSig,
|
||||
Tags: &tag.S{},
|
||||
}
|
||||
_, err = db.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Convert hex to serial
|
||||
pubkeyHex := hex.Enc(pubkeyBytes)
|
||||
serial, err := db.PubkeyHexToSerial(pubkeyHex)
|
||||
if err != nil {
|
||||
t.Fatalf("PubkeyHexToSerial failed: %v", err)
|
||||
}
|
||||
if serial == nil {
|
||||
t.Fatal("Expected non-nil serial")
|
||||
}
|
||||
|
||||
// Convert back and verify
|
||||
backToHex, err := db.GetPubkeyHexFromSerial(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("GetPubkeyHexFromSerial failed: %v", err)
|
||||
}
|
||||
if backToHex != pubkeyHex {
|
||||
t.Errorf("Round-trip failed: %s != %s", backToHex, pubkeyHex)
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user