Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
6bd56a30c9
|
|||
|
880772cab1
|
|||
|
1851ba39fa
|
|||
|
de290aeb25
|
|||
|
0a61f274d5
|
|||
|
c8fac06f24
|
|||
|
64c6bd8bdd
|
|||
|
58d75bfc5a
|
|||
|
69e2c873d8
|
|||
|
6c7d55ff7e
|
|||
|
3c17e975df
|
|||
|
feae79af1a
|
|||
|
ebef8605eb
|
|||
|
c5db0abf73
|
|||
|
016e97925a
|
|||
|
042b47a4d9
|
|||
|
952ce0285b
|
|||
|
45856f39b4
|
|||
|
70944d45df
|
@@ -153,7 +153,28 @@
|
||||
"Bash(git check-ignore:*)",
|
||||
"Bash(git commit:*)",
|
||||
"WebFetch(domain:www.npmjs.com)",
|
||||
"Bash(git stash:*)"
|
||||
"Bash(git stash:*)",
|
||||
"WebFetch(domain:arxiv.org)",
|
||||
"WebFetch(domain:hal.science)",
|
||||
"WebFetch(domain:pkg.go.dev)",
|
||||
"Bash(GOOS=js GOARCH=wasm CGO_ENABLED=0 go build:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go doc:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm CGO_ENABLED=0 go test:*)",
|
||||
"Bash(node --version:*)",
|
||||
"Bash(npm install)",
|
||||
"Bash(node run_wasm_tests.mjs:*)",
|
||||
"Bash(go env:*)",
|
||||
"Bash(GOROOT=/home/mleku/go node run_wasm_tests.mjs:*)",
|
||||
"Bash(./orly:*)",
|
||||
"Bash(./orly -version:*)",
|
||||
"Bash(./orly --version:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go test:*)",
|
||||
"Bash(ls:*)",
|
||||
"Bash(GOROOT=/home/mleku/go node:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go build:*)",
|
||||
"Bash(go mod graph:*)",
|
||||
"Bash(xxd:*)",
|
||||
"Bash(CGO_ENABLED=0 go mod tidy:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
395
.claude/skills/cypher/SKILL.md
Normal file
395
.claude/skills/cypher/SKILL.md
Normal file
@@ -0,0 +1,395 @@
|
||||
---
|
||||
name: cypher
|
||||
description: This skill should be used when writing, debugging, or discussing Neo4j Cypher queries. Provides comprehensive knowledge of Cypher syntax, query patterns, performance optimization, and common mistakes. Particularly useful for translating between domain models and graph queries.
|
||||
---
|
||||
|
||||
# Neo4j Cypher Query Language
|
||||
|
||||
## Purpose
|
||||
|
||||
This skill provides expert-level guidance for writing Neo4j Cypher queries, including syntax, patterns, performance optimization, and common pitfalls. It is particularly tuned for the patterns used in this ORLY Nostr relay codebase.
|
||||
|
||||
## When to Use
|
||||
|
||||
Activate this skill when:
|
||||
- Writing Cypher queries for Neo4j
|
||||
- Debugging Cypher syntax errors
|
||||
- Optimizing query performance
|
||||
- Translating Nostr filter queries to Cypher
|
||||
- Working with graph relationships and traversals
|
||||
- Creating or modifying schema (indexes, constraints)
|
||||
|
||||
## Core Cypher Syntax
|
||||
|
||||
### Clause Order (CRITICAL)
|
||||
|
||||
Cypher requires clauses in a specific order. Violating this causes syntax errors:
|
||||
|
||||
```cypher
|
||||
// CORRECT order of clauses
|
||||
MATCH (n:Label) // 1. Pattern matching
|
||||
WHERE n.prop = value // 2. Filtering
|
||||
WITH n, count(*) AS cnt // 3. Intermediate results (resets scope)
|
||||
OPTIONAL MATCH (n)-[r]-() // 4. Optional patterns
|
||||
CREATE (m:NewNode) // 5. Node/relationship creation
|
||||
SET n.prop = value // 6. Property updates
|
||||
DELETE r // 7. Deletions
|
||||
RETURN n.prop AS result // 8. Return clause
|
||||
ORDER BY result DESC // 9. Ordering
|
||||
SKIP 10 LIMIT 20 // 10. Pagination
|
||||
```
|
||||
|
||||
### The WITH Clause (CRITICAL)
|
||||
|
||||
The `WITH` clause is required to transition between certain operations:
|
||||
|
||||
**Rule: Cannot use MATCH after CREATE without WITH**
|
||||
|
||||
```cypher
|
||||
// WRONG - MATCH after CREATE without WITH
|
||||
CREATE (e:Event {id: $id})
|
||||
MATCH (ref:Event {id: $refId}) // ERROR!
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
|
||||
// CORRECT - Use WITH to carry variables forward
|
||||
CREATE (e:Event {id: $id})
|
||||
WITH e
|
||||
MATCH (ref:Event {id: $refId})
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
```
|
||||
|
||||
**Rule: WITH resets the scope**
|
||||
|
||||
Variables not included in WITH are no longer accessible:
|
||||
|
||||
```cypher
|
||||
// WRONG - 'a' is lost after WITH
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH e
|
||||
WHERE a.pubkey = $pubkey // ERROR: 'a' not defined
|
||||
|
||||
// CORRECT - Include all needed variables
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH a, e
|
||||
WHERE a.pubkey = $pubkey
|
||||
```
|
||||
|
||||
### Node and Relationship Patterns
|
||||
|
||||
```cypher
|
||||
// Nodes
|
||||
(n) // Anonymous node
|
||||
(n:Label) // Labeled node
|
||||
(n:Label {prop: value}) // Node with properties
|
||||
(n:Label:OtherLabel) // Multiple labels
|
||||
|
||||
// Relationships
|
||||
-[r]-> // Directed, anonymous
|
||||
-[r:TYPE]-> // Typed relationship
|
||||
-[r:TYPE {prop: value}]-> // With properties
|
||||
-[r:TYPE|OTHER]-> // Multiple types (OR)
|
||||
-[*1..3]-> // Variable length (1 to 3 hops)
|
||||
-[*]-> // Any number of hops
|
||||
```
|
||||
|
||||
### MERGE vs CREATE
|
||||
|
||||
**CREATE**: Always creates new nodes/relationships (may create duplicates)
|
||||
|
||||
```cypher
|
||||
CREATE (n:Event {id: $id}) // Creates even if id exists
|
||||
```
|
||||
|
||||
**MERGE**: Finds or creates (idempotent)
|
||||
|
||||
```cypher
|
||||
MERGE (n:Event {id: $id}) // Finds existing or creates new
|
||||
ON CREATE SET n.created = timestamp()
|
||||
ON MATCH SET n.accessed = timestamp()
|
||||
```
|
||||
|
||||
**Best Practice**: Use MERGE for reference nodes, CREATE for unique events
|
||||
|
||||
```cypher
|
||||
// Reference nodes - use MERGE (idempotent)
|
||||
MERGE (author:Author {pubkey: $pubkey})
|
||||
|
||||
// Unique events - use CREATE (after checking existence)
|
||||
CREATE (e:Event {id: $eventId, ...})
|
||||
```
|
||||
|
||||
### OPTIONAL MATCH
|
||||
|
||||
Returns NULL for non-matching patterns (like LEFT JOIN):
|
||||
|
||||
```cypher
|
||||
// Find events, with or without tags
|
||||
MATCH (e:Event)
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
RETURN e.id, collect(t.value) AS tags
|
||||
```
|
||||
|
||||
### Conditional Creation with FOREACH
|
||||
|
||||
To conditionally create relationships:
|
||||
|
||||
```cypher
|
||||
// FOREACH trick for conditional operations
|
||||
OPTIONAL MATCH (ref:Event {id: $refId})
|
||||
FOREACH (ignoreMe IN CASE WHEN ref IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
)
|
||||
```
|
||||
|
||||
### Aggregation Functions
|
||||
|
||||
```cypher
|
||||
count(*) // Count all rows
|
||||
count(n) // Count non-null values
|
||||
count(DISTINCT n) // Count unique values
|
||||
collect(n) // Collect into list
|
||||
collect(DISTINCT n) // Collect unique values
|
||||
sum(n.value) // Sum values
|
||||
avg(n.value) // Average
|
||||
min(n.value), max(n.value) // Min/max
|
||||
```
|
||||
|
||||
### String Operations
|
||||
|
||||
```cypher
|
||||
// String matching
|
||||
WHERE n.name STARTS WITH 'prefix'
|
||||
WHERE n.name ENDS WITH 'suffix'
|
||||
WHERE n.name CONTAINS 'substring'
|
||||
WHERE n.name =~ 'regex.*pattern' // Regex
|
||||
|
||||
// String functions
|
||||
toLower(str), toUpper(str)
|
||||
trim(str), ltrim(str), rtrim(str)
|
||||
substring(str, start, length)
|
||||
replace(str, search, replacement)
|
||||
```
|
||||
|
||||
### List Operations
|
||||
|
||||
```cypher
|
||||
// IN clause
|
||||
WHERE n.kind IN [1, 7, 30023]
|
||||
WHERE n.pubkey IN $pubkeyList
|
||||
|
||||
// List comprehension
|
||||
[x IN list WHERE x > 0 | x * 2]
|
||||
|
||||
// UNWIND - expand list into rows
|
||||
UNWIND $pubkeys AS pubkey
|
||||
MERGE (u:User {pubkey: pubkey})
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
Always use parameters for values (security + performance):
|
||||
|
||||
```cypher
|
||||
// CORRECT - parameterized
|
||||
MATCH (e:Event {id: $eventId})
|
||||
WHERE e.kind IN $kinds
|
||||
|
||||
// WRONG - string interpolation (SQL injection risk!)
|
||||
MATCH (e:Event {id: '" + eventId + "'})
|
||||
```
|
||||
|
||||
## Schema Management
|
||||
|
||||
### Constraints
|
||||
|
||||
```cypher
|
||||
// Uniqueness constraint (also creates index)
|
||||
CREATE CONSTRAINT event_id_unique IF NOT EXISTS
|
||||
FOR (e:Event) REQUIRE e.id IS UNIQUE
|
||||
|
||||
// Composite uniqueness
|
||||
CREATE CONSTRAINT card_unique IF NOT EXISTS
|
||||
FOR (c:Card) REQUIRE (c.customer_id, c.observee_pubkey) IS UNIQUE
|
||||
|
||||
// Drop constraint
|
||||
DROP CONSTRAINT event_id_unique IF EXISTS
|
||||
```
|
||||
|
||||
### Indexes
|
||||
|
||||
```cypher
|
||||
// Single property index
|
||||
CREATE INDEX event_kind IF NOT EXISTS FOR (e:Event) ON (e.kind)
|
||||
|
||||
// Composite index
|
||||
CREATE INDEX event_kind_created IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind, e.created_at)
|
||||
|
||||
// Drop index
|
||||
DROP INDEX event_kind IF EXISTS
|
||||
```
|
||||
|
||||
## Common Query Patterns
|
||||
|
||||
### Find with Filter
|
||||
|
||||
```cypher
|
||||
// Multiple conditions with OR
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
AND (e.id = $id1 OR e.id = $id2)
|
||||
AND e.created_at >= $since
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Graph Traversal
|
||||
|
||||
```cypher
|
||||
// Find events by author
|
||||
MATCH (e:Event)-[:AUTHORED_BY]->(a:Author {pubkey: $pubkey})
|
||||
RETURN e
|
||||
|
||||
// Find followers of a user
|
||||
MATCH (follower:NostrUser)-[:FOLLOWS]->(user:NostrUser {pubkey: $pubkey})
|
||||
RETURN follower.pubkey
|
||||
|
||||
// Find mutual follows (friends)
|
||||
MATCH (a:NostrUser {pubkey: $pubkeyA})-[:FOLLOWS]->(b:NostrUser)
|
||||
WHERE (b)-[:FOLLOWS]->(a)
|
||||
RETURN b.pubkey AS mutual_friend
|
||||
```
|
||||
|
||||
### Upsert Pattern
|
||||
|
||||
```cypher
|
||||
MERGE (n:Node {key: $key})
|
||||
ON CREATE SET
|
||||
n.created_at = timestamp(),
|
||||
n.value = $value
|
||||
ON MATCH SET
|
||||
n.updated_at = timestamp(),
|
||||
n.value = $value
|
||||
RETURN n
|
||||
```
|
||||
|
||||
### Batch Processing with UNWIND
|
||||
|
||||
```cypher
|
||||
// Create multiple nodes from list
|
||||
UNWIND $items AS item
|
||||
CREATE (n:Node {id: item.id, value: item.value})
|
||||
|
||||
// Create relationships from list
|
||||
UNWIND $follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[:FOLLOWS]->(followed)
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Index Usage
|
||||
|
||||
1. **Start with indexed properties** - Begin MATCH with most selective indexed field
|
||||
2. **Use composite indexes** - For queries filtering on multiple properties
|
||||
3. **Profile queries** - Use `PROFILE` prefix to see execution plan
|
||||
|
||||
```cypher
|
||||
PROFILE MATCH (e:Event {kind: 1})
|
||||
WHERE e.created_at > $since
|
||||
RETURN e LIMIT 100
|
||||
```
|
||||
|
||||
### Query Optimization Tips
|
||||
|
||||
1. **Filter early** - Put WHERE conditions close to MATCH
|
||||
2. **Limit early** - Use LIMIT as early as possible
|
||||
3. **Avoid Cartesian products** - Connect patterns or use WITH
|
||||
4. **Use parameters** - Enables query plan caching
|
||||
|
||||
```cypher
|
||||
// GOOD - Filter and limit early
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds AND e.created_at >= $since
|
||||
WITH e ORDER BY e.created_at DESC LIMIT 100
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
RETURN e, collect(t)
|
||||
|
||||
// BAD - Late filtering
|
||||
MATCH (e:Event), (t:Tag)
|
||||
WHERE e.kind IN $kinds
|
||||
RETURN e, t LIMIT 100
|
||||
```
|
||||
|
||||
## Reference Materials
|
||||
|
||||
For detailed information, consult the reference files:
|
||||
|
||||
- **references/syntax-reference.md** - Complete Cypher syntax guide with all clause types, operators, and functions
|
||||
- **references/common-patterns.md** - Project-specific patterns for ORLY Nostr relay including event storage, tag queries, and social graph traversals
|
||||
- **references/common-mistakes.md** - Frequent Cypher errors and how to avoid them
|
||||
|
||||
## ORLY-Specific Patterns
|
||||
|
||||
This codebase uses these specific Cypher patterns:
|
||||
|
||||
### Event Storage Pattern
|
||||
|
||||
```cypher
|
||||
// Create event with author relationship
|
||||
MERGE (a:Author {pubkey: $pubkey})
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
kind: $kind,
|
||||
created_at: $createdAt,
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tags
|
||||
})
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
```
|
||||
|
||||
### Tag Query Pattern
|
||||
|
||||
```cypher
|
||||
// Query events by tag (Nostr #<tag> filter)
|
||||
MATCH (e:Event)-[:TAGGED_WITH]->(t:Tag {type: $tagType})
|
||||
WHERE t.value IN $tagValues
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Social Graph Pattern
|
||||
|
||||
```cypher
|
||||
// Process contact list with diff-based updates
|
||||
// Mark old as superseded
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
|
||||
// Create tracking node
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 3,
|
||||
pubkey: $author_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp()
|
||||
})
|
||||
|
||||
// Update relationships
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
WITH author
|
||||
UNWIND $added_follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[:FOLLOWS]->(followed)
|
||||
```
|
||||
|
||||
## Official Resources
|
||||
|
||||
- Neo4j Cypher Manual: https://neo4j.com/docs/cypher-manual/current/
|
||||
- Cypher Cheat Sheet: https://neo4j.com/docs/cypher-cheat-sheet/current/
|
||||
- Query Tuning: https://neo4j.com/docs/cypher-manual/current/query-tuning/
|
||||
381
.claude/skills/cypher/references/common-mistakes.md
Normal file
381
.claude/skills/cypher/references/common-mistakes.md
Normal file
@@ -0,0 +1,381 @@
|
||||
# Common Cypher Mistakes and How to Avoid Them
|
||||
|
||||
## Clause Ordering Errors
|
||||
|
||||
### MATCH After CREATE Without WITH
|
||||
|
||||
**Error**: `Invalid input 'MATCH': expected ... WITH`
|
||||
|
||||
```cypher
|
||||
// WRONG
|
||||
CREATE (e:Event {id: $id})
|
||||
MATCH (ref:Event {id: $refId}) // ERROR!
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
|
||||
// CORRECT - Use WITH to transition
|
||||
CREATE (e:Event {id: $id})
|
||||
WITH e
|
||||
MATCH (ref:Event {id: $refId})
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
```
|
||||
|
||||
**Rule**: After CREATE, you must use WITH before MATCH.
|
||||
|
||||
### WHERE After WITH Without Carrying Variables
|
||||
|
||||
**Error**: `Variable 'x' not defined`
|
||||
|
||||
```cypher
|
||||
// WRONG - 'a' is lost
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH e
|
||||
WHERE a.pubkey = $pubkey // ERROR: 'a' not in scope
|
||||
|
||||
// CORRECT - Include all needed variables
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH a, e
|
||||
WHERE a.pubkey = $pubkey
|
||||
```
|
||||
|
||||
**Rule**: WITH resets the scope. Include all variables you need.
|
||||
|
||||
### ORDER BY Without Aliased Return
|
||||
|
||||
**Error**: `Invalid input 'ORDER': expected ... AS`
|
||||
|
||||
```cypher
|
||||
// WRONG in some contexts
|
||||
RETURN n.name
|
||||
ORDER BY n.name
|
||||
|
||||
// SAFER - Use alias
|
||||
RETURN n.name AS name
|
||||
ORDER BY name
|
||||
```
|
||||
|
||||
## MERGE Mistakes
|
||||
|
||||
### MERGE on Complex Pattern Creates Duplicates
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - May create duplicate nodes
|
||||
MERGE (a:Person {name: 'Alice'})-[:KNOWS]->(b:Person {name: 'Bob'})
|
||||
|
||||
// CORRECT - MERGE nodes separately first
|
||||
MERGE (a:Person {name: 'Alice'})
|
||||
MERGE (b:Person {name: 'Bob'})
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
```
|
||||
|
||||
**Rule**: MERGE simple patterns, not complex ones.
|
||||
|
||||
### MERGE Without Unique Property
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - Will keep creating nodes
|
||||
MERGE (p:Person) // No unique identifier!
|
||||
SET p.name = 'Alice'
|
||||
|
||||
// CORRECT - Provide unique key
|
||||
MERGE (p:Person {email: $email})
|
||||
SET p.name = 'Alice'
|
||||
```
|
||||
|
||||
**Rule**: MERGE must have properties that uniquely identify the node.
|
||||
|
||||
### Missing ON CREATE/ON MATCH
|
||||
|
||||
```cypher
|
||||
// LOSES context of whether new or existing
|
||||
MERGE (p:Person {id: $id})
|
||||
SET p.updated_at = timestamp() // Always runs
|
||||
|
||||
// BETTER - Handle each case
|
||||
MERGE (p:Person {id: $id})
|
||||
ON CREATE SET p.created_at = timestamp()
|
||||
ON MATCH SET p.updated_at = timestamp()
|
||||
```
|
||||
|
||||
## NULL Handling Errors
|
||||
|
||||
### Comparing with NULL
|
||||
|
||||
```cypher
|
||||
// WRONG - NULL = NULL is NULL, not true
|
||||
WHERE n.email = null // Never matches!
|
||||
|
||||
// CORRECT
|
||||
WHERE n.email IS NULL
|
||||
WHERE n.email IS NOT NULL
|
||||
```
|
||||
|
||||
### NULL in Aggregations
|
||||
|
||||
```cypher
|
||||
// count(NULL) returns 0, collect(NULL) includes NULL
|
||||
MATCH (n:Person)
|
||||
OPTIONAL MATCH (n)-[:BOUGHT]->(p:Product)
|
||||
RETURN n.name, count(p) // count ignores NULL
|
||||
```
|
||||
|
||||
### NULL Propagation in Expressions
|
||||
|
||||
```cypher
|
||||
// Any operation with NULL returns NULL
|
||||
WHERE n.age + 1 > 21 // If n.age is NULL, whole expression is NULL (falsy)
|
||||
|
||||
// Handle with coalesce
|
||||
WHERE coalesce(n.age, 0) + 1 > 21
|
||||
```
|
||||
|
||||
## List and IN Clause Errors
|
||||
|
||||
### Empty List in IN
|
||||
|
||||
```cypher
|
||||
// An empty list never matches
|
||||
WHERE n.kind IN [] // Always false
|
||||
|
||||
// Check for empty list in application code before query
|
||||
// Or use CASE:
|
||||
WHERE CASE WHEN size($kinds) > 0 THEN n.kind IN $kinds ELSE true END
|
||||
```
|
||||
|
||||
### IN with NULL Values
|
||||
|
||||
```cypher
|
||||
// NULL in the list causes issues
|
||||
WHERE n.id IN [1, NULL, 3] // NULL is never equal to anything
|
||||
|
||||
// Filter NULLs in application code
|
||||
```
|
||||
|
||||
## Relationship Pattern Errors
|
||||
|
||||
### Forgetting Direction
|
||||
|
||||
```cypher
|
||||
// WRONG - Creates both directions
|
||||
MATCH (a)-[:FOLLOWS]-(b) // Undirected!
|
||||
|
||||
// CORRECT - Specify direction
|
||||
MATCH (a)-[:FOLLOWS]->(b) // a follows b
|
||||
MATCH (a)<-[:FOLLOWS]-(b) // b follows a
|
||||
```
|
||||
|
||||
### Variable-Length Without Bounds
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - Potentially explosive
|
||||
MATCH (a)-[*]->(b) // Any length path!
|
||||
|
||||
// SAFE - Set bounds
|
||||
MATCH (a)-[*1..3]->(b) // 1 to 3 hops max
|
||||
```
|
||||
|
||||
### Creating Duplicate Relationships
|
||||
|
||||
```cypher
|
||||
// May create duplicates
|
||||
CREATE (a)-[:KNOWS]->(b)
|
||||
|
||||
// Idempotent
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
```
|
||||
|
||||
## Performance Mistakes
|
||||
|
||||
### Cartesian Products
|
||||
|
||||
```cypher
|
||||
// WRONG - Cartesian product
|
||||
MATCH (a:Person), (b:Product)
|
||||
WHERE a.id = $personId AND b.id = $productId
|
||||
CREATE (a)-[:BOUGHT]->(b)
|
||||
|
||||
// CORRECT - Single pattern or sequential
|
||||
MATCH (a:Person {id: $personId})
|
||||
MATCH (b:Product {id: $productId})
|
||||
CREATE (a)-[:BOUGHT]->(b)
|
||||
```
|
||||
|
||||
### Late Filtering
|
||||
|
||||
```cypher
|
||||
// SLOW - Filters after collecting everything
|
||||
MATCH (e:Event)
|
||||
WITH e
|
||||
WHERE e.kind = 1 // Should be in MATCH or right after
|
||||
|
||||
// FAST - Filter early
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind = 1
|
||||
```
|
||||
|
||||
### Missing LIMIT with ORDER BY
|
||||
|
||||
```cypher
|
||||
// SLOW - Sorts all results
|
||||
MATCH (e:Event)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
|
||||
// FAST - Limits result set
|
||||
MATCH (e:Event)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT 100
|
||||
```
|
||||
|
||||
### Unparameterized Queries
|
||||
|
||||
```cypher
|
||||
// WRONG - No query plan caching, injection risk
|
||||
MATCH (e:Event {id: '" + eventId + "'})
|
||||
|
||||
// CORRECT - Use parameters
|
||||
MATCH (e:Event {id: $eventId})
|
||||
```
|
||||
|
||||
## String Comparison Errors
|
||||
|
||||
### Case Sensitivity
|
||||
|
||||
```cypher
|
||||
// Cypher strings are case-sensitive
|
||||
WHERE n.name = 'alice' // Won't match 'Alice'
|
||||
|
||||
// Use toLower/toUpper for case-insensitive
|
||||
WHERE toLower(n.name) = toLower($name)
|
||||
|
||||
// Or use regex with (?i)
|
||||
WHERE n.name =~ '(?i)alice'
|
||||
```
|
||||
|
||||
### LIKE vs CONTAINS
|
||||
|
||||
```cypher
|
||||
// There's no LIKE in Cypher
|
||||
WHERE n.name LIKE '%alice%' // ERROR!
|
||||
|
||||
// Use CONTAINS, STARTS WITH, ENDS WITH
|
||||
WHERE n.name CONTAINS 'alice'
|
||||
WHERE n.name STARTS WITH 'ali'
|
||||
WHERE n.name ENDS WITH 'ice'
|
||||
|
||||
// Or regex for complex patterns
|
||||
WHERE n.name =~ '.*ali.*ce.*'
|
||||
```
|
||||
|
||||
## Index Mistakes
|
||||
|
||||
### Constraint vs Index
|
||||
|
||||
```cypher
|
||||
// Constraint (also creates index, enforces uniqueness)
|
||||
CREATE CONSTRAINT foo IF NOT EXISTS FOR (n:Node) REQUIRE n.id IS UNIQUE
|
||||
|
||||
// Index only (no uniqueness enforcement)
|
||||
CREATE INDEX bar IF NOT EXISTS FOR (n:Node) ON (n.id)
|
||||
```
|
||||
|
||||
### Index Not Used
|
||||
|
||||
```cypher
|
||||
// Index on n.id won't help here
|
||||
WHERE toLower(n.id) = $id // Function applied to indexed property!
|
||||
|
||||
// Store lowercase if needed, or create computed property
|
||||
```
|
||||
|
||||
### Wrong Composite Index Order
|
||||
|
||||
```cypher
|
||||
// Index on (kind, created_at) won't help query by created_at alone
|
||||
MATCH (e:Event) WHERE e.created_at > $since // Index not used
|
||||
|
||||
// Either create single-property index or query by kind too
|
||||
CREATE INDEX event_created_at FOR (e:Event) ON (e.created_at)
|
||||
```
|
||||
|
||||
## Transaction Errors
|
||||
|
||||
### Read After Write in Same Transaction
|
||||
|
||||
```cypher
|
||||
// In Neo4j, reads in a transaction see the writes
|
||||
// But be careful with external processes
|
||||
CREATE (n:Node {id: 'new'})
|
||||
WITH n
|
||||
MATCH (m:Node {id: 'new'}) // Will find 'n'
|
||||
```
|
||||
|
||||
### Locks and Deadlocks
|
||||
|
||||
```cypher
|
||||
// MERGE takes locks; avoid complex patterns that might deadlock
|
||||
// Bad: two MERGEs on same labels in different order
|
||||
Session 1: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
Session 2: MERGE (b:Person {id: 2}) MERGE (a:Person {id: 1}) // Potential deadlock
|
||||
|
||||
// Good: consistent ordering
|
||||
Session 1: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
Session 2: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
```
|
||||
|
||||
## Type Coercion Issues
|
||||
|
||||
### Integer vs String
|
||||
|
||||
```cypher
|
||||
// Types must match
|
||||
WHERE n.id = 123 // Won't match if n.id is "123"
|
||||
WHERE n.id = '123' // Won't match if n.id is 123
|
||||
|
||||
// Use appropriate parameter types from Go
|
||||
params["id"] = int64(123) // For integer
|
||||
params["id"] = "123" // For string
|
||||
```
|
||||
|
||||
### Boolean Handling
|
||||
|
||||
```cypher
|
||||
// Neo4j booleans vs strings
|
||||
WHERE n.active = true // Boolean
|
||||
WHERE n.active = 'true' // String - different!
|
||||
```
|
||||
|
||||
## Delete Errors
|
||||
|
||||
### Delete Node With Relationships
|
||||
|
||||
```cypher
|
||||
// ERROR - Node still has relationships
|
||||
MATCH (n:Person {id: $id})
|
||||
DELETE n
|
||||
|
||||
// CORRECT - Delete relationships first
|
||||
MATCH (n:Person {id: $id})
|
||||
DETACH DELETE n
|
||||
```
|
||||
|
||||
### Optional Match and Delete
|
||||
|
||||
```cypher
|
||||
// WRONG - DELETE NULL causes no error but also doesn't help
|
||||
OPTIONAL MATCH (n:Node {id: $id})
|
||||
DELETE n // If n is NULL, nothing happens silently
|
||||
|
||||
// Better - Check existence first or handle in application
|
||||
MATCH (n:Node {id: $id})
|
||||
DELETE n
|
||||
```
|
||||
|
||||
## Debugging Tips
|
||||
|
||||
1. **Use EXPLAIN** to see query plan without executing
|
||||
2. **Use PROFILE** to see actual execution metrics
|
||||
3. **Break complex queries** into smaller parts to isolate issues
|
||||
4. **Check parameter types** - mismatched types are a common issue
|
||||
5. **Verify indexes exist** with `SHOW INDEXES`
|
||||
6. **Check constraints** with `SHOW CONSTRAINTS`
|
||||
397
.claude/skills/cypher/references/common-patterns.md
Normal file
397
.claude/skills/cypher/references/common-patterns.md
Normal file
@@ -0,0 +1,397 @@
|
||||
# Common Cypher Patterns for ORLY Nostr Relay
|
||||
|
||||
This reference contains project-specific Cypher patterns used in the ORLY Nostr relay's Neo4j backend.
|
||||
|
||||
## Schema Overview
|
||||
|
||||
### Node Types
|
||||
|
||||
| Label | Purpose | Key Properties |
|
||||
|-------|---------|----------------|
|
||||
| `Event` | Nostr events (NIP-01) | `id`, `kind`, `pubkey`, `created_at`, `content`, `sig`, `tags`, `serial` |
|
||||
| `Author` | Event authors (for NIP-01 queries) | `pubkey` |
|
||||
| `Tag` | Generic tags | `type`, `value` |
|
||||
| `NostrUser` | Social graph users (WoT) | `pubkey`, `name`, `about`, `picture`, `nip05` |
|
||||
| `ProcessedSocialEvent` | Social event tracking | `event_id`, `event_kind`, `pubkey`, `superseded_by` |
|
||||
| `Marker` | Internal state markers | `key`, `value` |
|
||||
|
||||
### Relationship Types
|
||||
|
||||
| Type | From | To | Purpose |
|
||||
|------|------|-----|---------|
|
||||
| `AUTHORED_BY` | Event | Author | Links event to author |
|
||||
| `TAGGED_WITH` | Event | Tag | Links event to tags |
|
||||
| `REFERENCES` | Event | Event | e-tag references |
|
||||
| `MENTIONS` | Event | Author | p-tag mentions |
|
||||
| `FOLLOWS` | NostrUser | NostrUser | Contact list (kind 3) |
|
||||
| `MUTES` | NostrUser | NostrUser | Mute list (kind 10000) |
|
||||
| `REPORTS` | NostrUser | NostrUser | Reports (kind 1984) |
|
||||
|
||||
## Event Storage Patterns
|
||||
|
||||
### Create Event with Full Relationships
|
||||
|
||||
This pattern creates an event and all related nodes/relationships atomically:
|
||||
|
||||
```cypher
|
||||
// 1. Create or get author
|
||||
MERGE (a:Author {pubkey: $pubkey})
|
||||
|
||||
// 2. Create event node
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
kind: $kind,
|
||||
created_at: $createdAt,
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tagsJson // JSON string for full tag data
|
||||
})
|
||||
|
||||
// 3. Link to author
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
|
||||
// 4. Process e-tags (event references)
|
||||
WITH e, a
|
||||
OPTIONAL MATCH (ref0:Event {id: $eTag_0})
|
||||
FOREACH (_ IN CASE WHEN ref0 IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (e)-[:REFERENCES]->(ref0)
|
||||
)
|
||||
|
||||
// 5. Process p-tags (mentions)
|
||||
WITH e, a
|
||||
MERGE (mentioned0:Author {pubkey: $pTag_0})
|
||||
CREATE (e)-[:MENTIONS]->(mentioned0)
|
||||
|
||||
// 6. Process other tags
|
||||
WITH e, a
|
||||
MERGE (tag0:Tag {type: $tagType_0, value: $tagValue_0})
|
||||
CREATE (e)-[:TAGGED_WITH]->(tag0)
|
||||
|
||||
RETURN e.id AS id
|
||||
```
|
||||
|
||||
### Check Event Existence
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {id: $id})
|
||||
RETURN e.id AS id
|
||||
LIMIT 1
|
||||
```
|
||||
|
||||
### Get Next Serial Number
|
||||
|
||||
```cypher
|
||||
MERGE (m:Marker {key: 'serial'})
|
||||
ON CREATE SET m.value = 1
|
||||
ON MATCH SET m.value = m.value + 1
|
||||
RETURN m.value AS serial
|
||||
```
|
||||
|
||||
## Query Patterns
|
||||
|
||||
### Basic Filter Query (NIP-01)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
AND e.pubkey IN $authors
|
||||
AND e.created_at >= $since
|
||||
AND e.created_at <= $until
|
||||
RETURN e.id AS id,
|
||||
e.kind AS kind,
|
||||
e.created_at AS created_at,
|
||||
e.content AS content,
|
||||
e.sig AS sig,
|
||||
e.pubkey AS pubkey,
|
||||
e.tags AS tags,
|
||||
e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Query by Event ID (with prefix support)
|
||||
|
||||
```cypher
|
||||
// Exact match
|
||||
MATCH (e:Event {id: $id})
|
||||
RETURN e
|
||||
|
||||
// Prefix match
|
||||
MATCH (e:Event)
|
||||
WHERE e.id STARTS WITH $idPrefix
|
||||
RETURN e
|
||||
```
|
||||
|
||||
### Query by Tag (#<tag> filter)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
WHERE t.type = $tagType AND t.value IN $tagValues
|
||||
RETURN DISTINCT e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Count Events
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
RETURN count(e) AS count
|
||||
```
|
||||
|
||||
### Query Delete Events Targeting an Event
|
||||
|
||||
```cypher
|
||||
MATCH (target:Event {id: $targetId})
|
||||
MATCH (e:Event {kind: 5})-[:REFERENCES]->(target)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
### Replaceable Event Check (kinds 0, 3, 10000-19999)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {kind: $kind, pubkey: $pubkey})
|
||||
WHERE e.created_at < $newCreatedAt
|
||||
RETURN e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
### Parameterized Replaceable Event Check (kinds 30000-39999)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {kind: $kind, pubkey: $pubkey})-[:TAGGED_WITH]->(t:Tag {type: 'd', value: $dValue})
|
||||
WHERE e.created_at < $newCreatedAt
|
||||
RETURN e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
## Social Graph Patterns
|
||||
|
||||
### Update Profile (Kind 0)
|
||||
|
||||
```cypher
|
||||
MERGE (user:NostrUser {pubkey: $pubkey})
|
||||
ON CREATE SET
|
||||
user.created_at = timestamp(),
|
||||
user.first_seen_event = $event_id
|
||||
ON MATCH SET
|
||||
user.last_profile_update = $created_at
|
||||
SET
|
||||
user.name = $name,
|
||||
user.about = $about,
|
||||
user.picture = $picture,
|
||||
user.nip05 = $nip05,
|
||||
user.lud16 = $lud16,
|
||||
user.display_name = $display_name
|
||||
```
|
||||
|
||||
### Contact List Update (Kind 3) - Diff-Based
|
||||
|
||||
```cypher
|
||||
// Mark old event as superseded
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
|
||||
// Create new event tracking
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 3,
|
||||
pubkey: $author_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp(),
|
||||
relationship_count: $total_follows,
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// Get or create author
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
|
||||
// Update unchanged relationships to new event
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[unchanged:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE unchanged.created_by_event = $old_event_id
|
||||
AND NOT followed.pubkey IN $removed_follows
|
||||
SET unchanged.created_by_event = $new_event_id,
|
||||
unchanged.created_at = $created_at
|
||||
|
||||
// Remove old relationships for removed follows
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[old_follows:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE old_follows.created_by_event = $old_event_id
|
||||
AND followed.pubkey IN $removed_follows
|
||||
DELETE old_follows
|
||||
|
||||
// Create new relationships for added follows
|
||||
WITH author
|
||||
UNWIND $added_follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[new_follows:FOLLOWS]->(followed)
|
||||
ON CREATE SET
|
||||
new_follows.created_by_event = $new_event_id,
|
||||
new_follows.created_at = $created_at,
|
||||
new_follows.relay_received_at = timestamp()
|
||||
ON MATCH SET
|
||||
new_follows.created_by_event = $new_event_id,
|
||||
new_follows.created_at = $created_at
|
||||
```
|
||||
|
||||
### Create Report (Kind 1984)
|
||||
|
||||
```cypher
|
||||
// Create tracking node
|
||||
CREATE (evt:ProcessedSocialEvent {
|
||||
event_id: $event_id,
|
||||
event_kind: 1984,
|
||||
pubkey: $reporter_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp(),
|
||||
relationship_count: 1,
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// Create users and relationship
|
||||
MERGE (reporter:NostrUser {pubkey: $reporter_pubkey})
|
||||
MERGE (reported:NostrUser {pubkey: $reported_pubkey})
|
||||
CREATE (reporter)-[:REPORTS {
|
||||
created_by_event: $event_id,
|
||||
created_at: $created_at,
|
||||
relay_received_at: timestamp(),
|
||||
report_type: $report_type
|
||||
}]->(reported)
|
||||
```
|
||||
|
||||
### Get Latest Social Event for Pubkey
|
||||
|
||||
```cypher
|
||||
MATCH (evt:ProcessedSocialEvent {pubkey: $pubkey, event_kind: $kind})
|
||||
WHERE evt.superseded_by IS NULL
|
||||
RETURN evt.event_id AS event_id,
|
||||
evt.created_at AS created_at,
|
||||
evt.relationship_count AS relationship_count
|
||||
ORDER BY evt.created_at DESC
|
||||
LIMIT 1
|
||||
```
|
||||
|
||||
### Get Follows for Event
|
||||
|
||||
```cypher
|
||||
MATCH (author:NostrUser)-[f:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE f.created_by_event = $event_id
|
||||
RETURN collect(followed.pubkey) AS pubkeys
|
||||
```
|
||||
|
||||
## WoT Query Patterns
|
||||
|
||||
### Find Mutual Follows
|
||||
|
||||
```cypher
|
||||
MATCH (a:NostrUser {pubkey: $pubkeyA})-[:FOLLOWS]->(b:NostrUser)
|
||||
WHERE (b)-[:FOLLOWS]->(a)
|
||||
RETURN b.pubkey AS mutual_friend
|
||||
```
|
||||
|
||||
### Find Followers
|
||||
|
||||
```cypher
|
||||
MATCH (follower:NostrUser)-[:FOLLOWS]->(user:NostrUser {pubkey: $pubkey})
|
||||
RETURN follower.pubkey, follower.name
|
||||
```
|
||||
|
||||
### Find Following
|
||||
|
||||
```cypher
|
||||
MATCH (user:NostrUser {pubkey: $pubkey})-[:FOLLOWS]->(following:NostrUser)
|
||||
RETURN following.pubkey, following.name
|
||||
```
|
||||
|
||||
### Hop Distance (Trust Path)
|
||||
|
||||
```cypher
|
||||
MATCH (start:NostrUser {pubkey: $startPubkey})
|
||||
MATCH (end:NostrUser {pubkey: $endPubkey})
|
||||
MATCH path = shortestPath((start)-[:FOLLOWS*..6]->(end))
|
||||
RETURN length(path) AS hops, [n IN nodes(path) | n.pubkey] AS path
|
||||
```
|
||||
|
||||
### Second-Degree Connections
|
||||
|
||||
```cypher
|
||||
MATCH (me:NostrUser {pubkey: $myPubkey})-[:FOLLOWS]->(:NostrUser)-[:FOLLOWS]->(suggested:NostrUser)
|
||||
WHERE NOT (me)-[:FOLLOWS]->(suggested)
|
||||
AND suggested.pubkey <> $myPubkey
|
||||
RETURN suggested.pubkey, count(*) AS commonFollows
|
||||
ORDER BY commonFollows DESC
|
||||
LIMIT 20
|
||||
```
|
||||
|
||||
## Schema Management Patterns
|
||||
|
||||
### Create Constraint
|
||||
|
||||
```cypher
|
||||
CREATE CONSTRAINT event_id_unique IF NOT EXISTS
|
||||
FOR (e:Event) REQUIRE e.id IS UNIQUE
|
||||
```
|
||||
|
||||
### Create Index
|
||||
|
||||
```cypher
|
||||
CREATE INDEX event_kind IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind)
|
||||
```
|
||||
|
||||
### Create Composite Index
|
||||
|
||||
```cypher
|
||||
CREATE INDEX event_kind_created_at IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind, e.created_at)
|
||||
```
|
||||
|
||||
### Drop All Data (Testing Only)
|
||||
|
||||
```cypher
|
||||
MATCH (n) DETACH DELETE n
|
||||
```
|
||||
|
||||
## Performance Patterns
|
||||
|
||||
### Use EXPLAIN/PROFILE
|
||||
|
||||
```cypher
|
||||
// See query plan without running
|
||||
EXPLAIN MATCH (e:Event) WHERE e.kind = 1 RETURN e
|
||||
|
||||
// Run and see actual metrics
|
||||
PROFILE MATCH (e:Event) WHERE e.kind = 1 RETURN e
|
||||
```
|
||||
|
||||
### Batch Import with UNWIND
|
||||
|
||||
```cypher
|
||||
UNWIND $events AS evt
|
||||
CREATE (e:Event {
|
||||
id: evt.id,
|
||||
kind: evt.kind,
|
||||
pubkey: evt.pubkey,
|
||||
created_at: evt.created_at,
|
||||
content: evt.content,
|
||||
sig: evt.sig,
|
||||
tags: evt.tags
|
||||
})
|
||||
```
|
||||
|
||||
### Efficient Pagination
|
||||
|
||||
```cypher
|
||||
// Use indexed ORDER BY with WHERE for cursor-based pagination
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind = 1 AND e.created_at < $cursor
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT 20
|
||||
```
|
||||
540
.claude/skills/cypher/references/syntax-reference.md
Normal file
540
.claude/skills/cypher/references/syntax-reference.md
Normal file
@@ -0,0 +1,540 @@
|
||||
# Cypher Syntax Reference
|
||||
|
||||
Complete syntax reference for Neo4j Cypher query language.
|
||||
|
||||
## Clause Reference
|
||||
|
||||
### Reading Clauses
|
||||
|
||||
#### MATCH
|
||||
|
||||
Finds patterns in the graph.
|
||||
|
||||
```cypher
|
||||
// Basic node match
|
||||
MATCH (n:Label)
|
||||
|
||||
// Match with properties
|
||||
MATCH (n:Label {key: value})
|
||||
|
||||
// Match relationships
|
||||
MATCH (a)-[r:RELATES_TO]->(b)
|
||||
|
||||
// Match path
|
||||
MATCH path = (a)-[*1..3]->(b)
|
||||
```
|
||||
|
||||
#### OPTIONAL MATCH
|
||||
|
||||
Like MATCH but returns NULL for non-matches (LEFT OUTER JOIN).
|
||||
|
||||
```cypher
|
||||
MATCH (a:Person)
|
||||
OPTIONAL MATCH (a)-[:KNOWS]->(b:Person)
|
||||
RETURN a.name, b.name // b.name may be NULL
|
||||
```
|
||||
|
||||
#### WHERE
|
||||
|
||||
Filters results.
|
||||
|
||||
```cypher
|
||||
// Comparison operators
|
||||
WHERE n.age > 21
|
||||
WHERE n.age >= 21
|
||||
WHERE n.age < 65
|
||||
WHERE n.age <= 65
|
||||
WHERE n.name = 'Alice'
|
||||
WHERE n.name <> 'Bob'
|
||||
|
||||
// Boolean operators
|
||||
WHERE n.age > 21 AND n.active = true
|
||||
WHERE n.age < 18 OR n.age > 65
|
||||
WHERE NOT n.deleted
|
||||
|
||||
// NULL checks
|
||||
WHERE n.email IS NULL
|
||||
WHERE n.email IS NOT NULL
|
||||
|
||||
// Pattern predicates
|
||||
WHERE (n)-[:KNOWS]->(:Person)
|
||||
WHERE NOT (n)-[:BLOCKED]->()
|
||||
WHERE exists((n)-[:FOLLOWS]->())
|
||||
|
||||
// String predicates
|
||||
WHERE n.name STARTS WITH 'A'
|
||||
WHERE n.name ENDS WITH 'son'
|
||||
WHERE n.name CONTAINS 'li'
|
||||
WHERE n.name =~ '(?i)alice.*' // Case-insensitive regex
|
||||
|
||||
// List predicates
|
||||
WHERE n.status IN ['active', 'pending']
|
||||
WHERE any(x IN n.tags WHERE x = 'important')
|
||||
WHERE all(x IN n.scores WHERE x > 50)
|
||||
WHERE none(x IN n.errors WHERE x IS NOT NULL)
|
||||
WHERE single(x IN n.items WHERE x.primary = true)
|
||||
```
|
||||
|
||||
### Writing Clauses
|
||||
|
||||
#### CREATE
|
||||
|
||||
Creates nodes and relationships.
|
||||
|
||||
```cypher
|
||||
// Create node
|
||||
CREATE (n:Label {key: value})
|
||||
|
||||
// Create multiple nodes
|
||||
CREATE (a:Person {name: 'Alice'}), (b:Person {name: 'Bob'})
|
||||
|
||||
// Create relationship
|
||||
CREATE (a)-[r:KNOWS {since: 2020}]->(b)
|
||||
|
||||
// Create path
|
||||
CREATE p = (a)-[:KNOWS]->(b)-[:KNOWS]->(c)
|
||||
```
|
||||
|
||||
#### MERGE
|
||||
|
||||
Find or create pattern. **Critical for idempotency**.
|
||||
|
||||
```cypher
|
||||
// MERGE node
|
||||
MERGE (n:Label {key: $uniqueKey})
|
||||
|
||||
// MERGE with ON CREATE / ON MATCH
|
||||
MERGE (n:Person {email: $email})
|
||||
ON CREATE SET n.created = timestamp(), n.name = $name
|
||||
ON MATCH SET n.accessed = timestamp()
|
||||
|
||||
// MERGE relationship (both nodes must exist or be in scope)
|
||||
MERGE (a)-[r:KNOWS]->(b)
|
||||
ON CREATE SET r.since = date()
|
||||
```
|
||||
|
||||
**MERGE Gotcha**: MERGE on a pattern locks the entire pattern. For relationships, MERGE each node first:
|
||||
|
||||
```cypher
|
||||
// CORRECT
|
||||
MERGE (a:Person {id: $id1})
|
||||
MERGE (b:Person {id: $id2})
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
|
||||
// RISKY - may create duplicate nodes
|
||||
MERGE (a:Person {id: $id1})-[:KNOWS]->(b:Person {id: $id2})
|
||||
```
|
||||
|
||||
#### SET
|
||||
|
||||
Updates properties.
|
||||
|
||||
```cypher
|
||||
// Set single property
|
||||
SET n.name = 'Alice'
|
||||
|
||||
// Set multiple properties
|
||||
SET n.name = 'Alice', n.age = 30
|
||||
|
||||
// Set from map (replaces all properties)
|
||||
SET n = {name: 'Alice', age: 30}
|
||||
|
||||
// Set from map (adds/updates, keeps existing)
|
||||
SET n += {name: 'Alice'}
|
||||
|
||||
// Set label
|
||||
SET n:NewLabel
|
||||
|
||||
// Remove property
|
||||
SET n.obsolete = null
|
||||
```
|
||||
|
||||
#### DELETE / DETACH DELETE
|
||||
|
||||
Removes nodes and relationships.
|
||||
|
||||
```cypher
|
||||
// Delete relationship
|
||||
MATCH (a)-[r:KNOWS]->(b)
|
||||
DELETE r
|
||||
|
||||
// Delete node (must have no relationships)
|
||||
MATCH (n:Orphan)
|
||||
DELETE n
|
||||
|
||||
// Delete node and all relationships
|
||||
MATCH (n:Person {name: 'Bob'})
|
||||
DETACH DELETE n
|
||||
```
|
||||
|
||||
#### REMOVE
|
||||
|
||||
Removes properties and labels.
|
||||
|
||||
```cypher
|
||||
// Remove property
|
||||
REMOVE n.temporary
|
||||
|
||||
// Remove label
|
||||
REMOVE n:OldLabel
|
||||
```
|
||||
|
||||
### Projection Clauses
|
||||
|
||||
#### RETURN
|
||||
|
||||
Specifies output.
|
||||
|
||||
```cypher
|
||||
// Return nodes
|
||||
RETURN n
|
||||
|
||||
// Return properties
|
||||
RETURN n.name, n.age
|
||||
|
||||
// Return with alias
|
||||
RETURN n.name AS name, n.age AS age
|
||||
|
||||
// Return all
|
||||
RETURN *
|
||||
|
||||
// Return distinct
|
||||
RETURN DISTINCT n.category
|
||||
|
||||
// Return expression
|
||||
RETURN n.price * n.quantity AS total
|
||||
```
|
||||
|
||||
#### WITH
|
||||
|
||||
Passes results between query parts. **Critical for multi-part queries**.
|
||||
|
||||
```cypher
|
||||
// Filter and pass
|
||||
MATCH (n:Person)
|
||||
WITH n WHERE n.age > 21
|
||||
RETURN n
|
||||
|
||||
// Aggregate and continue
|
||||
MATCH (n:Person)-[:BOUGHT]->(p:Product)
|
||||
WITH n, count(p) AS purchases
|
||||
WHERE purchases > 5
|
||||
RETURN n.name, purchases
|
||||
|
||||
// Order and limit mid-query
|
||||
MATCH (n:Person)
|
||||
WITH n ORDER BY n.age DESC LIMIT 10
|
||||
MATCH (n)-[:LIVES_IN]->(c:City)
|
||||
RETURN n.name, c.name
|
||||
```
|
||||
|
||||
**WITH resets scope**: Variables not listed in WITH are no longer available.
|
||||
|
||||
#### ORDER BY
|
||||
|
||||
Sorts results.
|
||||
|
||||
```cypher
|
||||
ORDER BY n.name // Ascending (default)
|
||||
ORDER BY n.name ASC // Explicit ascending
|
||||
ORDER BY n.name DESC // Descending
|
||||
ORDER BY n.lastName, n.firstName // Multiple fields
|
||||
ORDER BY n.priority DESC, n.name // Mixed
|
||||
```
|
||||
|
||||
#### SKIP and LIMIT
|
||||
|
||||
Pagination.
|
||||
|
||||
```cypher
|
||||
// Skip first 10
|
||||
SKIP 10
|
||||
|
||||
// Return only 20
|
||||
LIMIT 20
|
||||
|
||||
// Pagination
|
||||
ORDER BY n.created_at DESC
|
||||
SKIP $offset LIMIT $pageSize
|
||||
```
|
||||
|
||||
### Sub-queries
|
||||
|
||||
#### CALL (Subquery)
|
||||
|
||||
Execute subquery for each row.
|
||||
|
||||
```cypher
|
||||
MATCH (p:Person)
|
||||
CALL {
|
||||
WITH p
|
||||
MATCH (p)-[:BOUGHT]->(prod:Product)
|
||||
RETURN count(prod) AS purchaseCount
|
||||
}
|
||||
RETURN p.name, purchaseCount
|
||||
```
|
||||
|
||||
#### UNION
|
||||
|
||||
Combine results from multiple queries.
|
||||
|
||||
```cypher
|
||||
MATCH (n:Person) RETURN n.name AS name
|
||||
UNION
|
||||
MATCH (n:Company) RETURN n.name AS name
|
||||
|
||||
// UNION ALL keeps duplicates
|
||||
MATCH (n:Person) RETURN n.name AS name
|
||||
UNION ALL
|
||||
MATCH (n:Company) RETURN n.name AS name
|
||||
```
|
||||
|
||||
### Control Flow
|
||||
|
||||
#### FOREACH
|
||||
|
||||
Iterate over list, execute updates.
|
||||
|
||||
```cypher
|
||||
// Set property on path nodes
|
||||
MATCH path = (a)-[*]->(b)
|
||||
FOREACH (n IN nodes(path) | SET n.visited = true)
|
||||
|
||||
// Conditional operation (common pattern)
|
||||
OPTIONAL MATCH (target:Node {id: $id})
|
||||
FOREACH (_ IN CASE WHEN target IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (source)-[:LINKS_TO]->(target)
|
||||
)
|
||||
```
|
||||
|
||||
#### CASE
|
||||
|
||||
Conditional expressions.
|
||||
|
||||
```cypher
|
||||
// Simple CASE
|
||||
RETURN CASE n.status
|
||||
WHEN 'active' THEN 'A'
|
||||
WHEN 'pending' THEN 'P'
|
||||
ELSE 'X'
|
||||
END AS code
|
||||
|
||||
// Generic CASE
|
||||
RETURN CASE
|
||||
WHEN n.age < 18 THEN 'minor'
|
||||
WHEN n.age < 65 THEN 'adult'
|
||||
ELSE 'senior'
|
||||
END AS category
|
||||
```
|
||||
|
||||
## Operators
|
||||
|
||||
### Comparison
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `=` | Equal |
|
||||
| `<>` | Not equal |
|
||||
| `<` | Less than |
|
||||
| `>` | Greater than |
|
||||
| `<=` | Less than or equal |
|
||||
| `>=` | Greater than or equal |
|
||||
| `IS NULL` | Is null |
|
||||
| `IS NOT NULL` | Is not null |
|
||||
|
||||
### Boolean
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `AND` | Logical AND |
|
||||
| `OR` | Logical OR |
|
||||
| `NOT` | Logical NOT |
|
||||
| `XOR` | Exclusive OR |
|
||||
|
||||
### String
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `STARTS WITH` | Prefix match |
|
||||
| `ENDS WITH` | Suffix match |
|
||||
| `CONTAINS` | Substring match |
|
||||
| `=~` | Regex match |
|
||||
|
||||
### List
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `IN` | List membership |
|
||||
| `+` | List concatenation |
|
||||
|
||||
### Mathematical
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `+` | Addition |
|
||||
| `-` | Subtraction |
|
||||
| `*` | Multiplication |
|
||||
| `/` | Division |
|
||||
| `%` | Modulo |
|
||||
| `^` | Exponentiation |
|
||||
|
||||
## Functions
|
||||
|
||||
### Aggregation
|
||||
|
||||
```cypher
|
||||
count(*) // Count rows
|
||||
count(n) // Count non-null
|
||||
count(DISTINCT n) // Count unique
|
||||
sum(n.value) // Sum
|
||||
avg(n.value) // Average
|
||||
min(n.value) // Minimum
|
||||
max(n.value) // Maximum
|
||||
collect(n) // Collect to list
|
||||
collect(DISTINCT n) // Collect unique
|
||||
stDev(n.value) // Standard deviation
|
||||
percentileCont(n.value, 0.5) // Median
|
||||
```
|
||||
|
||||
### Scalar
|
||||
|
||||
```cypher
|
||||
// Type functions
|
||||
id(n) // Internal node ID (deprecated, use elementId)
|
||||
elementId(n) // Element ID string
|
||||
labels(n) // Node labels
|
||||
type(r) // Relationship type
|
||||
properties(n) // Property map
|
||||
|
||||
// Math
|
||||
abs(x)
|
||||
ceil(x)
|
||||
floor(x)
|
||||
round(x)
|
||||
sign(x)
|
||||
sqrt(x)
|
||||
rand() // Random 0-1
|
||||
|
||||
// String
|
||||
size(str) // String length
|
||||
toLower(str)
|
||||
toUpper(str)
|
||||
trim(str)
|
||||
ltrim(str)
|
||||
rtrim(str)
|
||||
replace(str, from, to)
|
||||
substring(str, start, len)
|
||||
left(str, len)
|
||||
right(str, len)
|
||||
split(str, delimiter)
|
||||
reverse(str)
|
||||
toString(val)
|
||||
|
||||
// Null handling
|
||||
coalesce(val1, val2, ...) // First non-null
|
||||
nullIf(val1, val2) // NULL if equal
|
||||
|
||||
// Type conversion
|
||||
toInteger(val)
|
||||
toFloat(val)
|
||||
toBoolean(val)
|
||||
toString(val)
|
||||
```
|
||||
|
||||
### List Functions
|
||||
|
||||
```cypher
|
||||
size(list) // List length
|
||||
head(list) // First element
|
||||
tail(list) // All but first
|
||||
last(list) // Last element
|
||||
range(start, end) // Create range [start..end]
|
||||
range(start, end, step)
|
||||
reverse(list)
|
||||
keys(map) // Map keys as list
|
||||
values(map) // Map values as list
|
||||
|
||||
// List predicates
|
||||
any(x IN list WHERE predicate)
|
||||
all(x IN list WHERE predicate)
|
||||
none(x IN list WHERE predicate)
|
||||
single(x IN list WHERE predicate)
|
||||
|
||||
// List manipulation
|
||||
[x IN list WHERE predicate] // Filter
|
||||
[x IN list | expression] // Map
|
||||
[x IN list WHERE pred | expr] // Filter and map
|
||||
reduce(s = initial, x IN list | s + x) // Reduce
|
||||
```
|
||||
|
||||
### Path Functions
|
||||
|
||||
```cypher
|
||||
nodes(path) // Nodes in path
|
||||
relationships(path) // Relationships in path
|
||||
length(path) // Number of relationships
|
||||
shortestPath((a)-[*]-(b))
|
||||
allShortestPaths((a)-[*]-(b))
|
||||
```
|
||||
|
||||
### Temporal Functions
|
||||
|
||||
```cypher
|
||||
timestamp() // Current Unix timestamp (ms)
|
||||
datetime() // Current datetime
|
||||
date() // Current date
|
||||
time() // Current time
|
||||
duration({days: 1, hours: 12})
|
||||
|
||||
// Components
|
||||
datetime().year
|
||||
datetime().month
|
||||
datetime().day
|
||||
datetime().hour
|
||||
|
||||
// Parsing
|
||||
date('2024-01-15')
|
||||
datetime('2024-01-15T10:30:00Z')
|
||||
```
|
||||
|
||||
### Spatial Functions
|
||||
|
||||
```cypher
|
||||
point({x: 1, y: 2})
|
||||
point({latitude: 37.5, longitude: -122.4})
|
||||
distance(point1, point2)
|
||||
```
|
||||
|
||||
## Comments
|
||||
|
||||
```cypher
|
||||
// Single line comment
|
||||
|
||||
/* Multi-line
|
||||
comment */
|
||||
```
|
||||
|
||||
## Transaction Control
|
||||
|
||||
```cypher
|
||||
// In procedures/transactions
|
||||
:begin
|
||||
:commit
|
||||
:rollback
|
||||
```
|
||||
|
||||
## Parameter Syntax
|
||||
|
||||
```cypher
|
||||
// Parameter reference
|
||||
$paramName
|
||||
|
||||
// In properties
|
||||
{key: $value}
|
||||
|
||||
// In WHERE
|
||||
WHERE n.id = $id
|
||||
|
||||
// In expressions
|
||||
RETURN $multiplier * n.value
|
||||
```
|
||||
1115
.claude/skills/distributed-systems/SKILL.md
Normal file
1115
.claude/skills/distributed-systems/SKILL.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,610 @@
|
||||
# Consensus Protocols - Detailed Reference
|
||||
|
||||
Complete specifications and implementation details for major consensus protocols.
|
||||
|
||||
## Paxos Complete Specification
|
||||
|
||||
### Proposal Numbers
|
||||
|
||||
Proposal numbers must be:
|
||||
- **Unique**: No two proposers use the same number
|
||||
- **Totally ordered**: Any two can be compared
|
||||
|
||||
**Implementation**: `(round_number, proposer_id)` where proposer_id breaks ties.
|
||||
|
||||
### Single-Decree Paxos State
|
||||
|
||||
**Proposer state**:
|
||||
```
|
||||
proposal_number: int
|
||||
value: any
|
||||
```
|
||||
|
||||
**Acceptor state (persistent)**:
|
||||
```
|
||||
highest_promised: int # Highest proposal number promised
|
||||
accepted_proposal: int # Number of accepted proposal (0 if none)
|
||||
accepted_value: any # Value of accepted proposal (null if none)
|
||||
```
|
||||
|
||||
### Message Format
|
||||
|
||||
**Prepare** (Phase 1a):
|
||||
```
|
||||
{
|
||||
type: "PREPARE",
|
||||
proposal_number: n
|
||||
}
|
||||
```
|
||||
|
||||
**Promise** (Phase 1b):
|
||||
```
|
||||
{
|
||||
type: "PROMISE",
|
||||
proposal_number: n,
|
||||
accepted_proposal: m, # null if nothing accepted
|
||||
accepted_value: v # null if nothing accepted
|
||||
}
|
||||
```
|
||||
|
||||
**Accept** (Phase 2a):
|
||||
```
|
||||
{
|
||||
type: "ACCEPT",
|
||||
proposal_number: n,
|
||||
value: v
|
||||
}
|
||||
```
|
||||
|
||||
**Accepted** (Phase 2b):
|
||||
```
|
||||
{
|
||||
type: "ACCEPTED",
|
||||
proposal_number: n,
|
||||
value: v
|
||||
}
|
||||
```
|
||||
|
||||
### Proposer Algorithm
|
||||
|
||||
```
|
||||
function propose(value):
|
||||
n = generate_proposal_number()
|
||||
|
||||
# Phase 1: Prepare
|
||||
promises = []
|
||||
for acceptor in acceptors:
|
||||
send PREPARE(n) to acceptor
|
||||
|
||||
wait until |promises| > |acceptors|/2 or timeout
|
||||
|
||||
if timeout:
|
||||
return FAILED
|
||||
|
||||
# Choose value
|
||||
highest = max(promises, key=p.accepted_proposal)
|
||||
if highest.accepted_value is not null:
|
||||
value = highest.accepted_value
|
||||
|
||||
# Phase 2: Accept
|
||||
accepts = []
|
||||
for acceptor in acceptors:
|
||||
send ACCEPT(n, value) to acceptor
|
||||
|
||||
wait until |accepts| > |acceptors|/2 or timeout
|
||||
|
||||
if timeout:
|
||||
return FAILED
|
||||
|
||||
return SUCCESS(value)
|
||||
```
|
||||
|
||||
### Acceptor Algorithm
|
||||
|
||||
```
|
||||
on receive PREPARE(n):
|
||||
if n > highest_promised:
|
||||
highest_promised = n
|
||||
persist(highest_promised)
|
||||
reply PROMISE(n, accepted_proposal, accepted_value)
|
||||
else:
|
||||
# Optionally reply NACK(highest_promised)
|
||||
ignore or reject
|
||||
|
||||
on receive ACCEPT(n, v):
|
||||
if n >= highest_promised:
|
||||
highest_promised = n
|
||||
accepted_proposal = n
|
||||
accepted_value = v
|
||||
persist(highest_promised, accepted_proposal, accepted_value)
|
||||
reply ACCEPTED(n, v)
|
||||
else:
|
||||
ignore or reject
|
||||
```
|
||||
|
||||
### Multi-Paxos Optimization
|
||||
|
||||
**Stable leader**:
|
||||
```
|
||||
# Leader election (using Paxos or other method)
|
||||
leader = elect_leader()
|
||||
|
||||
# Leader's Phase 1 for all future instances
|
||||
leader sends PREPARE(n) for instance range [i, ∞)
|
||||
|
||||
# For each command:
|
||||
function propose_as_leader(value, instance):
|
||||
# Skip Phase 1 if already leader
|
||||
for acceptor in acceptors:
|
||||
send ACCEPT(n, value, instance) to acceptor
|
||||
wait for majority ACCEPTED
|
||||
return SUCCESS
|
||||
```
|
||||
|
||||
### Paxos Safety Proof Sketch
|
||||
|
||||
**Invariant**: If a value v is chosen for instance i, no other value can be chosen.
|
||||
|
||||
**Proof**:
|
||||
1. Value chosen → accepted by majority with proposal n
|
||||
2. Any higher proposal n' must contact majority
|
||||
3. Majorities intersect → at least one acceptor has accepted v
|
||||
4. New proposer adopts v (or higher already-accepted value)
|
||||
5. By induction, all future proposals use v
|
||||
|
||||
## Raft Complete Specification
|
||||
|
||||
### State
|
||||
|
||||
**All servers (persistent)**:
|
||||
```
|
||||
currentTerm: int # Latest term seen
|
||||
votedFor: ServerId # Candidate voted for in current term (null if none)
|
||||
log[]: LogEntry # Log entries
|
||||
```
|
||||
|
||||
**All servers (volatile)**:
|
||||
```
|
||||
commitIndex: int # Highest log index known to be committed
|
||||
lastApplied: int # Highest log index applied to state machine
|
||||
```
|
||||
|
||||
**Leader (volatile, reinitialized after election)**:
|
||||
```
|
||||
nextIndex[]: int # For each server, next log index to send
|
||||
matchIndex[]: int # For each server, highest log index replicated
|
||||
```
|
||||
|
||||
**LogEntry**:
|
||||
```
|
||||
{
|
||||
term: int,
|
||||
command: any
|
||||
}
|
||||
```
|
||||
|
||||
### RequestVote RPC
|
||||
|
||||
**Request**:
|
||||
```
|
||||
{
|
||||
term: int, # Candidate's term
|
||||
candidateId: ServerId, # Candidate requesting vote
|
||||
lastLogIndex: int, # Index of candidate's last log entry
|
||||
lastLogTerm: int # Term of candidate's last log entry
|
||||
}
|
||||
```
|
||||
|
||||
**Response**:
|
||||
```
|
||||
{
|
||||
term: int, # currentTerm, for candidate to update itself
|
||||
voteGranted: bool # True if candidate received vote
|
||||
}
|
||||
```
|
||||
|
||||
**Receiver implementation**:
|
||||
```
|
||||
on receive RequestVote(term, candidateId, lastLogIndex, lastLogTerm):
|
||||
if term < currentTerm:
|
||||
return {term: currentTerm, voteGranted: false}
|
||||
|
||||
if term > currentTerm:
|
||||
currentTerm = term
|
||||
votedFor = null
|
||||
convert to follower
|
||||
|
||||
# Check if candidate's log is at least as up-to-date as ours
|
||||
ourLastTerm = log[len(log)-1].term if log else 0
|
||||
ourLastIndex = len(log) - 1
|
||||
|
||||
logOK = (lastLogTerm > ourLastTerm) or
|
||||
(lastLogTerm == ourLastTerm and lastLogIndex >= ourLastIndex)
|
||||
|
||||
if (votedFor is null or votedFor == candidateId) and logOK:
|
||||
votedFor = candidateId
|
||||
persist(currentTerm, votedFor)
|
||||
reset election timer
|
||||
return {term: currentTerm, voteGranted: true}
|
||||
|
||||
return {term: currentTerm, voteGranted: false}
|
||||
```
|
||||
|
||||
### AppendEntries RPC
|
||||
|
||||
**Request**:
|
||||
```
|
||||
{
|
||||
term: int, # Leader's term
|
||||
leaderId: ServerId, # For follower to redirect clients
|
||||
prevLogIndex: int, # Index of log entry preceding new ones
|
||||
prevLogTerm: int, # Term of prevLogIndex entry
|
||||
entries[]: LogEntry, # Log entries to store (empty for heartbeat)
|
||||
leaderCommit: int # Leader's commitIndex
|
||||
}
|
||||
```
|
||||
|
||||
**Response**:
|
||||
```
|
||||
{
|
||||
term: int, # currentTerm, for leader to update itself
|
||||
success: bool # True if follower had matching prevLog entry
|
||||
}
|
||||
```
|
||||
|
||||
**Receiver implementation**:
|
||||
```
|
||||
on receive AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit):
|
||||
if term < currentTerm:
|
||||
return {term: currentTerm, success: false}
|
||||
|
||||
reset election timer
|
||||
|
||||
if term > currentTerm:
|
||||
currentTerm = term
|
||||
votedFor = null
|
||||
|
||||
convert to follower
|
||||
|
||||
# Check log consistency
|
||||
if prevLogIndex >= len(log) or
|
||||
(prevLogIndex >= 0 and log[prevLogIndex].term != prevLogTerm):
|
||||
return {term: currentTerm, success: false}
|
||||
|
||||
# Append new entries (handling conflicts)
|
||||
for i, entry in enumerate(entries):
|
||||
index = prevLogIndex + 1 + i
|
||||
if index < len(log):
|
||||
if log[index].term != entry.term:
|
||||
# Delete conflicting entry and all following
|
||||
log = log[:index]
|
||||
log.append(entry)
|
||||
else:
|
||||
log.append(entry)
|
||||
|
||||
persist(currentTerm, votedFor, log)
|
||||
|
||||
# Update commit index
|
||||
if leaderCommit > commitIndex:
|
||||
commitIndex = min(leaderCommit, len(log) - 1)
|
||||
|
||||
return {term: currentTerm, success: true}
|
||||
```
|
||||
|
||||
### Leader Behavior
|
||||
|
||||
```
|
||||
on becoming leader:
|
||||
for each server:
|
||||
nextIndex[server] = len(log)
|
||||
matchIndex[server] = 0
|
||||
|
||||
start sending heartbeats
|
||||
|
||||
on receiving client command:
|
||||
append entry to local log
|
||||
persist log
|
||||
send AppendEntries to all followers
|
||||
|
||||
on receiving AppendEntries response from server:
|
||||
if response.success:
|
||||
matchIndex[server] = prevLogIndex + len(entries)
|
||||
nextIndex[server] = matchIndex[server] + 1
|
||||
|
||||
# Update commit index
|
||||
for N from commitIndex+1 to len(log)-1:
|
||||
if log[N].term == currentTerm and
|
||||
|{s : matchIndex[s] >= N}| > |servers|/2:
|
||||
commitIndex = N
|
||||
else:
|
||||
nextIndex[server] = max(1, nextIndex[server] - 1)
|
||||
retry AppendEntries with lower prevLogIndex
|
||||
|
||||
on commitIndex update:
|
||||
while lastApplied < commitIndex:
|
||||
lastApplied++
|
||||
apply log[lastApplied].command to state machine
|
||||
```
|
||||
|
||||
### Election Timeout
|
||||
|
||||
```
|
||||
on election timeout (follower or candidate):
|
||||
currentTerm++
|
||||
convert to candidate
|
||||
votedFor = self
|
||||
persist(currentTerm, votedFor)
|
||||
reset election timer
|
||||
votes = 1 # Vote for self
|
||||
|
||||
for each server except self:
|
||||
send RequestVote(currentTerm, self, lastLogIndex, lastLogTerm)
|
||||
|
||||
wait for responses or timeout:
|
||||
if received votes > |servers|/2:
|
||||
become leader
|
||||
if received AppendEntries from valid leader:
|
||||
become follower
|
||||
if timeout:
|
||||
start new election
|
||||
```
|
||||
|
||||
## PBFT Complete Specification
|
||||
|
||||
### Message Types
|
||||
|
||||
**REQUEST**:
|
||||
```
|
||||
{
|
||||
type: "REQUEST",
|
||||
operation: o, # Operation to execute
|
||||
timestamp: t, # Client timestamp (for reply matching)
|
||||
client: c # Client identifier
|
||||
}
|
||||
```
|
||||
|
||||
**PRE-PREPARE**:
|
||||
```
|
||||
{
|
||||
type: "PRE-PREPARE",
|
||||
view: v, # Current view number
|
||||
sequence: n, # Sequence number
|
||||
digest: d, # Hash of request
|
||||
request: m # The request message
|
||||
}
|
||||
signature(primary)
|
||||
```
|
||||
|
||||
**PREPARE**:
|
||||
```
|
||||
{
|
||||
type: "PREPARE",
|
||||
view: v,
|
||||
sequence: n,
|
||||
digest: d,
|
||||
replica: i # Sending replica
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**COMMIT**:
|
||||
```
|
||||
{
|
||||
type: "COMMIT",
|
||||
view: v,
|
||||
sequence: n,
|
||||
digest: d,
|
||||
replica: i
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**REPLY**:
|
||||
```
|
||||
{
|
||||
type: "REPLY",
|
||||
view: v,
|
||||
timestamp: t,
|
||||
client: c,
|
||||
replica: i,
|
||||
result: r # Execution result
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
### Replica State
|
||||
|
||||
```
|
||||
view: int # Current view
|
||||
sequence: int # Last assigned sequence number (primary)
|
||||
log[]: {request, prepares, commits, state} # Log of requests
|
||||
prepared_certificates: {} # Prepared certificates (2f+1 prepares)
|
||||
committed_certificates: {} # Committed certificates (2f+1 commits)
|
||||
h: int # Low water mark
|
||||
H: int # High water mark (h + L)
|
||||
```
|
||||
|
||||
### Normal Operation Protocol
|
||||
|
||||
**Primary (replica p = v mod n)**:
|
||||
```
|
||||
on receive REQUEST(m) from client:
|
||||
if not primary for current view:
|
||||
forward to primary
|
||||
return
|
||||
|
||||
n = assign_sequence_number()
|
||||
d = hash(m)
|
||||
|
||||
broadcast PRE-PREPARE(v, n, d, m) to all replicas
|
||||
add to log
|
||||
```
|
||||
|
||||
**All replicas**:
|
||||
```
|
||||
on receive PRE-PREPARE(v, n, d, m) from primary:
|
||||
if v != current_view:
|
||||
ignore
|
||||
if already accepted pre-prepare for (v, n) with different digest:
|
||||
ignore
|
||||
if not in_view_as_backup(v):
|
||||
ignore
|
||||
if not h < n <= H:
|
||||
ignore # Outside sequence window
|
||||
|
||||
# Valid pre-prepare
|
||||
add to log
|
||||
broadcast PREPARE(v, n, d, i) to all replicas
|
||||
|
||||
on receive PREPARE(v, n, d, j) from replica j:
|
||||
if v != current_view:
|
||||
ignore
|
||||
|
||||
add to log[n].prepares
|
||||
|
||||
if |log[n].prepares| >= 2f and not already_prepared(v, n, d):
|
||||
# Prepared certificate complete
|
||||
mark as prepared
|
||||
broadcast COMMIT(v, n, d, i) to all replicas
|
||||
|
||||
on receive COMMIT(v, n, d, j) from replica j:
|
||||
if v != current_view:
|
||||
ignore
|
||||
|
||||
add to log[n].commits
|
||||
|
||||
if |log[n].commits| >= 2f + 1 and prepared(v, n, d):
|
||||
# Committed certificate complete
|
||||
if all entries < n are committed:
|
||||
execute(m)
|
||||
send REPLY(v, t, c, i, result) to client
|
||||
```
|
||||
|
||||
### View Change Protocol
|
||||
|
||||
**Timeout trigger**:
|
||||
```
|
||||
on request timeout (no progress):
|
||||
view_change_timeout++
|
||||
broadcast VIEW-CHANGE(v+1, n, C, P, i)
|
||||
|
||||
where:
|
||||
n = last stable checkpoint sequence number
|
||||
C = checkpoint certificate (2f+1 checkpoint messages)
|
||||
P = set of prepared certificates for messages after n
|
||||
```
|
||||
|
||||
**VIEW-CHANGE**:
|
||||
```
|
||||
{
|
||||
type: "VIEW-CHANGE",
|
||||
view: v, # New view number
|
||||
sequence: n, # Checkpoint sequence
|
||||
checkpoints: C, # Checkpoint certificate
|
||||
prepared: P, # Set of prepared certificates
|
||||
replica: i
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**New primary (p' = v mod n)**:
|
||||
```
|
||||
on receive 2f VIEW-CHANGE for view v:
|
||||
V = set of valid view-change messages
|
||||
|
||||
# Compute O: set of requests to re-propose
|
||||
O = {}
|
||||
for seq in max_checkpoint_seq(V) to max_seq(V):
|
||||
if exists prepared certificate for seq in V:
|
||||
O[seq] = request from certificate
|
||||
else:
|
||||
O[seq] = null-request # No-op
|
||||
|
||||
broadcast NEW-VIEW(v, V, O)
|
||||
|
||||
# Re-run protocol for requests in O
|
||||
for seq, request in O:
|
||||
if request != null:
|
||||
send PRE-PREPARE(v, seq, hash(request), request)
|
||||
```
|
||||
|
||||
**NEW-VIEW**:
|
||||
```
|
||||
{
|
||||
type: "NEW-VIEW",
|
||||
view: v,
|
||||
view_changes: V, # 2f+1 view-change messages
|
||||
pre_prepares: O # Set of pre-prepare messages
|
||||
}
|
||||
signature(primary)
|
||||
```
|
||||
|
||||
### Checkpointing
|
||||
|
||||
Periodic stable checkpoints to garbage collect logs:
|
||||
|
||||
```
|
||||
every K requests:
|
||||
state_hash = hash(state_machine_state)
|
||||
broadcast CHECKPOINT(n, state_hash, i)
|
||||
|
||||
on receive 2f+1 CHECKPOINT for (n, d):
|
||||
if all digests match:
|
||||
create stable checkpoint
|
||||
h = n # Move low water mark
|
||||
garbage_collect(entries < n)
|
||||
```
|
||||
|
||||
## HotStuff Protocol
|
||||
|
||||
Linear complexity BFT using threshold signatures.
|
||||
|
||||
### Key Innovation
|
||||
|
||||
- **Three-phase**: prepare → pre-commit → commit → decide
|
||||
- **Pipelining**: Next proposal starts before current finishes
|
||||
- **Threshold signatures**: O(n) total messages instead of O(n²)
|
||||
|
||||
### Message Flow
|
||||
|
||||
```
|
||||
Phase 1 (Prepare):
|
||||
Leader: broadcast PREPARE(v, node)
|
||||
Replicas: sign and send partial signature to leader
|
||||
Leader: aggregate into prepare certificate QC
|
||||
|
||||
Phase 2 (Pre-commit):
|
||||
Leader: broadcast PRE-COMMIT(v, QC_prepare)
|
||||
Replicas: sign and send partial signature
|
||||
Leader: aggregate into pre-commit certificate
|
||||
|
||||
Phase 3 (Commit):
|
||||
Leader: broadcast COMMIT(v, QC_precommit)
|
||||
Replicas: sign and send partial signature
|
||||
Leader: aggregate into commit certificate
|
||||
|
||||
Phase 4 (Decide):
|
||||
Leader: broadcast DECIDE(v, QC_commit)
|
||||
Replicas: execute and commit
|
||||
```
|
||||
|
||||
### Pipelining
|
||||
|
||||
```
|
||||
Block k: [prepare] [pre-commit] [commit] [decide]
|
||||
Block k+1: [prepare] [pre-commit] [commit] [decide]
|
||||
Block k+2: [prepare] [pre-commit] [commit] [decide]
|
||||
```
|
||||
|
||||
Each phase of block k+1 piggybacks on messages for block k.
|
||||
|
||||
## Protocol Comparison Matrix
|
||||
|
||||
| Feature | Paxos | Raft | PBFT | HotStuff |
|
||||
|---------|-------|------|------|----------|
|
||||
| Fault model | Crash | Crash | Byzantine | Byzantine |
|
||||
| Fault tolerance | f with 2f+1 | f with 2f+1 | f with 3f+1 | f with 3f+1 |
|
||||
| Message complexity | O(n) | O(n) | O(n²) | O(n) |
|
||||
| Leader required | No (helps) | Yes | Yes | Yes |
|
||||
| Phases | 2 | 2 | 3 | 3 |
|
||||
| View change | Complex | Simple | Complex | Simple |
|
||||
610
.claude/skills/distributed-systems/references/logical-clocks.md
Normal file
610
.claude/skills/distributed-systems/references/logical-clocks.md
Normal file
@@ -0,0 +1,610 @@
|
||||
# Logical Clocks - Implementation Reference
|
||||
|
||||
Detailed implementations and algorithms for causality tracking.
|
||||
|
||||
## Lamport Clock Implementation
|
||||
|
||||
### Data Structure
|
||||
|
||||
```go
|
||||
type LamportClock struct {
|
||||
counter uint64
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewLamportClock() *LamportClock {
|
||||
return &LamportClock{counter: 0}
|
||||
}
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
```go
|
||||
// Tick increments clock for local event
|
||||
func (c *LamportClock) Tick() uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.counter++
|
||||
return c.counter
|
||||
}
|
||||
|
||||
// Send returns timestamp for outgoing message
|
||||
func (c *LamportClock) Send() uint64 {
|
||||
return c.Tick()
|
||||
}
|
||||
|
||||
// Receive updates clock based on incoming message timestamp
|
||||
func (c *LamportClock) Receive(msgTime uint64) uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if msgTime > c.counter {
|
||||
c.counter = msgTime
|
||||
}
|
||||
c.counter++
|
||||
return c.counter
|
||||
}
|
||||
|
||||
// Time returns current clock value without incrementing
|
||||
func (c *LamportClock) Time() uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.counter
|
||||
}
|
||||
```
|
||||
|
||||
### Usage Example
|
||||
|
||||
```go
|
||||
// Process A
|
||||
clockA := NewLamportClock()
|
||||
e1 := clockA.Tick() // Event 1: time=1
|
||||
msgTime := clockA.Send() // Send: time=2
|
||||
|
||||
// Process B
|
||||
clockB := NewLamportClock()
|
||||
e2 := clockB.Tick() // Event 2: time=1
|
||||
e3 := clockB.Receive(msgTime) // Receive: time=3 (max(1,2)+1)
|
||||
```
|
||||
|
||||
## Vector Clock Implementation
|
||||
|
||||
### Data Structure
|
||||
|
||||
```go
|
||||
type VectorClock struct {
|
||||
clocks map[string]uint64 // processID -> logical time
|
||||
self string // this process's ID
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewVectorClock(processID string, allProcesses []string) *VectorClock {
|
||||
clocks := make(map[string]uint64)
|
||||
for _, p := range allProcesses {
|
||||
clocks[p] = 0
|
||||
}
|
||||
return &VectorClock{
|
||||
clocks: clocks,
|
||||
self: processID,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
```go
|
||||
// Tick increments own clock
|
||||
func (vc *VectorClock) Tick() map[string]uint64 {
|
||||
vc.mu.Lock()
|
||||
defer vc.mu.Unlock()
|
||||
|
||||
vc.clocks[vc.self]++
|
||||
return vc.copy()
|
||||
}
|
||||
|
||||
// Send returns copy of vector for message
|
||||
func (vc *VectorClock) Send() map[string]uint64 {
|
||||
return vc.Tick()
|
||||
}
|
||||
|
||||
// Receive merges incoming vector and increments
|
||||
func (vc *VectorClock) Receive(incoming map[string]uint64) map[string]uint64 {
|
||||
vc.mu.Lock()
|
||||
defer vc.mu.Unlock()
|
||||
|
||||
// Merge: take max of each component
|
||||
for pid, time := range incoming {
|
||||
if time > vc.clocks[pid] {
|
||||
vc.clocks[pid] = time
|
||||
}
|
||||
}
|
||||
|
||||
// Increment own clock
|
||||
vc.clocks[vc.self]++
|
||||
return vc.copy()
|
||||
}
|
||||
|
||||
// copy returns a copy of the vector
|
||||
func (vc *VectorClock) copy() map[string]uint64 {
|
||||
result := make(map[string]uint64)
|
||||
for k, v := range vc.clocks {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
```
|
||||
|
||||
### Comparison Functions
|
||||
|
||||
```go
|
||||
// Compare returns ordering relationship between two vectors
|
||||
type Ordering int
|
||||
|
||||
const (
|
||||
Equal Ordering = iota // V1 == V2
|
||||
HappenedBefore // V1 < V2
|
||||
HappenedAfter // V1 > V2
|
||||
Concurrent // V1 || V2
|
||||
)
|
||||
|
||||
func Compare(v1, v2 map[string]uint64) Ordering {
|
||||
less := false
|
||||
greater := false
|
||||
|
||||
// Get all keys
|
||||
allKeys := make(map[string]bool)
|
||||
for k := range v1 {
|
||||
allKeys[k] = true
|
||||
}
|
||||
for k := range v2 {
|
||||
allKeys[k] = true
|
||||
}
|
||||
|
||||
for k := range allKeys {
|
||||
t1 := v1[k] // 0 if not present
|
||||
t2 := v2[k]
|
||||
|
||||
if t1 < t2 {
|
||||
less = true
|
||||
}
|
||||
if t1 > t2 {
|
||||
greater = true
|
||||
}
|
||||
}
|
||||
|
||||
if !less && !greater {
|
||||
return Equal
|
||||
}
|
||||
if less && !greater {
|
||||
return HappenedBefore
|
||||
}
|
||||
if greater && !less {
|
||||
return HappenedAfter
|
||||
}
|
||||
return Concurrent
|
||||
}
|
||||
|
||||
// IsConcurrent checks if two events are concurrent
|
||||
func IsConcurrent(v1, v2 map[string]uint64) bool {
|
||||
return Compare(v1, v2) == Concurrent
|
||||
}
|
||||
|
||||
// HappenedBefore checks if v1 -> v2 (v1 causally precedes v2)
|
||||
func HappenedBefore(v1, v2 map[string]uint64) bool {
|
||||
return Compare(v1, v2) == HappenedBefore
|
||||
}
|
||||
```
|
||||
|
||||
## Interval Tree Clock Implementation
|
||||
|
||||
### Data Structures
|
||||
|
||||
```go
|
||||
// ID represents the identity tree
|
||||
type ID struct {
|
||||
IsLeaf bool
|
||||
Value int // 0 or 1 for leaves
|
||||
Left *ID // nil for leaves
|
||||
Right *ID
|
||||
}
|
||||
|
||||
// Stamp represents the event tree
|
||||
type Stamp struct {
|
||||
Base int
|
||||
Left *Stamp // nil for leaf stamps
|
||||
Right *Stamp
|
||||
}
|
||||
|
||||
// ITC combines ID and Stamp
|
||||
type ITC struct {
|
||||
ID *ID
|
||||
Stamp *Stamp
|
||||
}
|
||||
```
|
||||
|
||||
### ID Operations
|
||||
|
||||
```go
|
||||
// NewSeedID creates initial full ID (1)
|
||||
func NewSeedID() *ID {
|
||||
return &ID{IsLeaf: true, Value: 1}
|
||||
}
|
||||
|
||||
// Fork splits an ID into two
|
||||
func (id *ID) Fork() (*ID, *ID) {
|
||||
if id.IsLeaf {
|
||||
if id.Value == 0 {
|
||||
// Cannot fork zero ID
|
||||
return &ID{IsLeaf: true, Value: 0},
|
||||
&ID{IsLeaf: true, Value: 0}
|
||||
}
|
||||
// Split full ID into left and right halves
|
||||
return &ID{
|
||||
IsLeaf: false,
|
||||
Left: &ID{IsLeaf: true, Value: 1},
|
||||
Right: &ID{IsLeaf: true, Value: 0},
|
||||
},
|
||||
&ID{
|
||||
IsLeaf: false,
|
||||
Left: &ID{IsLeaf: true, Value: 0},
|
||||
Right: &ID{IsLeaf: true, Value: 1},
|
||||
}
|
||||
}
|
||||
|
||||
// Fork from non-leaf: give half to each
|
||||
if id.Left.IsLeaf && id.Left.Value == 0 {
|
||||
// Left is zero, fork right
|
||||
newRight1, newRight2 := id.Right.Fork()
|
||||
return &ID{IsLeaf: false, Left: id.Left, Right: newRight1},
|
||||
&ID{IsLeaf: false, Left: &ID{IsLeaf: true, Value: 0}, Right: newRight2}
|
||||
}
|
||||
if id.Right.IsLeaf && id.Right.Value == 0 {
|
||||
// Right is zero, fork left
|
||||
newLeft1, newLeft2 := id.Left.Fork()
|
||||
return &ID{IsLeaf: false, Left: newLeft1, Right: id.Right},
|
||||
&ID{IsLeaf: false, Left: newLeft2, Right: &ID{IsLeaf: true, Value: 0}}
|
||||
}
|
||||
|
||||
// Both have IDs, split
|
||||
return &ID{IsLeaf: false, Left: id.Left, Right: &ID{IsLeaf: true, Value: 0}},
|
||||
&ID{IsLeaf: false, Left: &ID{IsLeaf: true, Value: 0}, Right: id.Right}
|
||||
}
|
||||
|
||||
// Join merges two IDs
|
||||
func Join(id1, id2 *ID) *ID {
|
||||
if id1.IsLeaf && id1.Value == 0 {
|
||||
return id2
|
||||
}
|
||||
if id2.IsLeaf && id2.Value == 0 {
|
||||
return id1
|
||||
}
|
||||
if id1.IsLeaf && id2.IsLeaf && id1.Value == 1 && id2.Value == 1 {
|
||||
return &ID{IsLeaf: true, Value: 1}
|
||||
}
|
||||
|
||||
// Normalize to non-leaf
|
||||
left1 := id1.Left
|
||||
right1 := id1.Right
|
||||
left2 := id2.Left
|
||||
right2 := id2.Right
|
||||
|
||||
if id1.IsLeaf {
|
||||
left1 = id1
|
||||
right1 = id1
|
||||
}
|
||||
if id2.IsLeaf {
|
||||
left2 = id2
|
||||
right2 = id2
|
||||
}
|
||||
|
||||
newLeft := Join(left1, left2)
|
||||
newRight := Join(right1, right2)
|
||||
|
||||
return normalize(&ID{IsLeaf: false, Left: newLeft, Right: newRight})
|
||||
}
|
||||
|
||||
func normalize(id *ID) *ID {
|
||||
if !id.IsLeaf {
|
||||
if id.Left.IsLeaf && id.Right.IsLeaf &&
|
||||
id.Left.Value == id.Right.Value {
|
||||
return &ID{IsLeaf: true, Value: id.Left.Value}
|
||||
}
|
||||
}
|
||||
return id
|
||||
}
|
||||
```
|
||||
|
||||
### Stamp Operations
|
||||
|
||||
```go
|
||||
// NewStamp creates initial stamp (0)
|
||||
func NewStamp() *Stamp {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
|
||||
// Event increments the stamp for the given ID
|
||||
func Event(id *ID, stamp *Stamp) *Stamp {
|
||||
if id.IsLeaf {
|
||||
if id.Value == 1 {
|
||||
return &Stamp{Base: stamp.Base + 1}
|
||||
}
|
||||
return stamp // Cannot increment with zero ID
|
||||
}
|
||||
|
||||
// Non-leaf ID: fill where we have ID
|
||||
if id.Left.IsLeaf && id.Left.Value == 1 {
|
||||
// Have left ID, increment left
|
||||
newLeft := Event(&ID{IsLeaf: true, Value: 1}, getLeft(stamp))
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: newLeft,
|
||||
Right: getRight(stamp),
|
||||
})
|
||||
}
|
||||
if id.Right.IsLeaf && id.Right.Value == 1 {
|
||||
newRight := Event(&ID{IsLeaf: true, Value: 1}, getRight(stamp))
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: getLeft(stamp),
|
||||
Right: newRight,
|
||||
})
|
||||
}
|
||||
|
||||
// Both non-zero, choose lower side
|
||||
leftMax := maxStamp(getLeft(stamp))
|
||||
rightMax := maxStamp(getRight(stamp))
|
||||
|
||||
if leftMax <= rightMax {
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: Event(id.Left, getLeft(stamp)),
|
||||
Right: getRight(stamp),
|
||||
})
|
||||
}
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: getLeft(stamp),
|
||||
Right: Event(id.Right, getRight(stamp)),
|
||||
})
|
||||
}
|
||||
|
||||
func getLeft(s *Stamp) *Stamp {
|
||||
if s.Left == nil {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
return s.Left
|
||||
}
|
||||
|
||||
func getRight(s *Stamp) *Stamp {
|
||||
if s.Right == nil {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
return s.Right
|
||||
}
|
||||
|
||||
func maxStamp(s *Stamp) int {
|
||||
if s.Left == nil && s.Right == nil {
|
||||
return s.Base
|
||||
}
|
||||
left := 0
|
||||
right := 0
|
||||
if s.Left != nil {
|
||||
left = maxStamp(s.Left)
|
||||
}
|
||||
if s.Right != nil {
|
||||
right = maxStamp(s.Right)
|
||||
}
|
||||
max := left
|
||||
if right > max {
|
||||
max = right
|
||||
}
|
||||
return s.Base + max
|
||||
}
|
||||
|
||||
// JoinStamps merges two stamps
|
||||
func JoinStamps(s1, s2 *Stamp) *Stamp {
|
||||
// Take max at each level
|
||||
base := s1.Base
|
||||
if s2.Base > base {
|
||||
base = s2.Base
|
||||
}
|
||||
|
||||
// Adjust for base difference
|
||||
adj1 := s1.Base
|
||||
adj2 := s2.Base
|
||||
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: base,
|
||||
Left: joinStampsRecursive(s1.Left, s2.Left, adj1-base, adj2-base),
|
||||
Right: joinStampsRecursive(s1.Right, s2.Right, adj1-base, adj2-base),
|
||||
})
|
||||
}
|
||||
|
||||
func normalizeStamp(s *Stamp) *Stamp {
|
||||
if s.Left == nil && s.Right == nil {
|
||||
return s
|
||||
}
|
||||
if s.Left != nil && s.Right != nil {
|
||||
if s.Left.Base > 0 && s.Right.Base > 0 {
|
||||
min := s.Left.Base
|
||||
if s.Right.Base < min {
|
||||
min = s.Right.Base
|
||||
}
|
||||
return &Stamp{
|
||||
Base: s.Base + min,
|
||||
Left: &Stamp{Base: s.Left.Base - min, Left: s.Left.Left, Right: s.Left.Right},
|
||||
Right: &Stamp{Base: s.Right.Base - min, Left: s.Right.Left, Right: s.Right.Right},
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
```
|
||||
|
||||
## Hybrid Logical Clock Implementation
|
||||
|
||||
```go
|
||||
type HLC struct {
|
||||
l int64 // logical component (physical time)
|
||||
c int64 // counter
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewHLC() *HLC {
|
||||
return &HLC{l: 0, c: 0}
|
||||
}
|
||||
|
||||
type HLCTimestamp struct {
|
||||
L int64
|
||||
C int64
|
||||
}
|
||||
|
||||
func (hlc *HLC) physicalTime() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// Now returns current HLC timestamp for local/send event
|
||||
func (hlc *HLC) Now() HLCTimestamp {
|
||||
hlc.mu.Lock()
|
||||
defer hlc.mu.Unlock()
|
||||
|
||||
pt := hlc.physicalTime()
|
||||
|
||||
if pt > hlc.l {
|
||||
hlc.l = pt
|
||||
hlc.c = 0
|
||||
} else {
|
||||
hlc.c++
|
||||
}
|
||||
|
||||
return HLCTimestamp{L: hlc.l, C: hlc.c}
|
||||
}
|
||||
|
||||
// Update updates HLC based on received timestamp
|
||||
func (hlc *HLC) Update(received HLCTimestamp) HLCTimestamp {
|
||||
hlc.mu.Lock()
|
||||
defer hlc.mu.Unlock()
|
||||
|
||||
pt := hlc.physicalTime()
|
||||
|
||||
if pt > hlc.l && pt > received.L {
|
||||
hlc.l = pt
|
||||
hlc.c = 0
|
||||
} else if received.L > hlc.l {
|
||||
hlc.l = received.L
|
||||
hlc.c = received.C + 1
|
||||
} else if hlc.l > received.L {
|
||||
hlc.c++
|
||||
} else { // hlc.l == received.L
|
||||
if received.C > hlc.c {
|
||||
hlc.c = received.C + 1
|
||||
} else {
|
||||
hlc.c++
|
||||
}
|
||||
}
|
||||
|
||||
return HLCTimestamp{L: hlc.l, C: hlc.c}
|
||||
}
|
||||
|
||||
// Compare compares two HLC timestamps
|
||||
func (t1 HLCTimestamp) Compare(t2 HLCTimestamp) int {
|
||||
if t1.L < t2.L {
|
||||
return -1
|
||||
}
|
||||
if t1.L > t2.L {
|
||||
return 1
|
||||
}
|
||||
if t1.C < t2.C {
|
||||
return -1
|
||||
}
|
||||
if t1.C > t2.C {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
```
|
||||
|
||||
## Causal Broadcast Implementation
|
||||
|
||||
```go
|
||||
type CausalBroadcast struct {
|
||||
vc *VectorClock
|
||||
pending []PendingMessage
|
||||
deliver func(Message)
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type PendingMessage struct {
|
||||
Msg Message
|
||||
Timestamp map[string]uint64
|
||||
}
|
||||
|
||||
func NewCausalBroadcast(processID string, processes []string, deliver func(Message)) *CausalBroadcast {
|
||||
return &CausalBroadcast{
|
||||
vc: NewVectorClock(processID, processes),
|
||||
pending: make([]PendingMessage, 0),
|
||||
deliver: deliver,
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast sends a message to all processes
|
||||
func (cb *CausalBroadcast) Broadcast(msg Message) map[string]uint64 {
|
||||
cb.mu.Lock()
|
||||
defer cb.mu.Unlock()
|
||||
|
||||
timestamp := cb.vc.Send()
|
||||
// Actual network broadcast would happen here
|
||||
return timestamp
|
||||
}
|
||||
|
||||
// Receive handles an incoming message
|
||||
func (cb *CausalBroadcast) Receive(msg Message, sender string, timestamp map[string]uint64) {
|
||||
cb.mu.Lock()
|
||||
defer cb.mu.Unlock()
|
||||
|
||||
// Add to pending
|
||||
cb.pending = append(cb.pending, PendingMessage{Msg: msg, Timestamp: timestamp})
|
||||
|
||||
// Try to deliver pending messages
|
||||
cb.tryDeliver()
|
||||
}
|
||||
|
||||
func (cb *CausalBroadcast) tryDeliver() {
|
||||
changed := true
|
||||
for changed {
|
||||
changed = false
|
||||
|
||||
for i, pending := range cb.pending {
|
||||
if cb.canDeliver(pending.Timestamp) {
|
||||
// Deliver message
|
||||
cb.vc.Receive(pending.Timestamp)
|
||||
cb.deliver(pending.Msg)
|
||||
|
||||
// Remove from pending
|
||||
cb.pending = append(cb.pending[:i], cb.pending[i+1:]...)
|
||||
changed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cb *CausalBroadcast) canDeliver(msgVC map[string]uint64) bool {
|
||||
currentVC := cb.vc.clocks
|
||||
|
||||
for pid, msgTime := range msgVC {
|
||||
if pid == cb.vc.self {
|
||||
// Must be next expected from sender
|
||||
if msgTime != currentVC[pid]+1 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// All other dependencies must be satisfied
|
||||
if msgTime > currentVC[pid] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
```
|
||||
369
.claude/skills/elliptic-curves/SKILL.md
Normal file
369
.claude/skills/elliptic-curves/SKILL.md
Normal file
@@ -0,0 +1,369 @@
|
||||
---
|
||||
name: elliptic-curves
|
||||
description: This skill should be used when working with elliptic curve cryptography, implementing or debugging secp256k1 operations, understanding modular arithmetic and finite fields, or implementing signature schemes like ECDSA and Schnorr. Provides comprehensive knowledge of group theory foundations, curve mathematics, point multiplication algorithms, and cryptographic optimizations.
|
||||
---
|
||||
|
||||
# Elliptic Curve Cryptography
|
||||
|
||||
This skill provides deep knowledge of elliptic curve cryptography (ECC), with particular focus on the secp256k1 curve used in Bitcoin and Nostr, including the mathematical foundations and implementation considerations.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
- Implementing or debugging elliptic curve operations
|
||||
- Working with secp256k1, ECDSA, or Schnorr signatures
|
||||
- Understanding modular arithmetic and finite field operations
|
||||
- Optimizing cryptographic code for performance
|
||||
- Analyzing security properties of curve-based cryptography
|
||||
|
||||
## Mathematical Foundations
|
||||
|
||||
### Groups in Cryptography
|
||||
|
||||
A **group** is a set G with a binary operation (often denoted · or +) satisfying:
|
||||
|
||||
1. **Closure**: For all a, b ∈ G, the result a · b is also in G
|
||||
2. **Associativity**: (a · b) · c = a · (b · c)
|
||||
3. **Identity**: There exists e ∈ G such that e · a = a · e = a
|
||||
4. **Inverse**: For each a ∈ G, there exists a⁻¹ such that a · a⁻¹ = e
|
||||
|
||||
A **cyclic group** is generated by repeatedly applying the operation to a single element (the generator). The **order** of a group is the number of elements.
|
||||
|
||||
**Why groups matter in cryptography**: The discrete logarithm problem—given g and gⁿ, find n—is computationally hard in certain groups, forming the security basis for ECC.
|
||||
|
||||
### Modular Arithmetic
|
||||
|
||||
Modular arithmetic constrains calculations to a finite range [0, p-1] for some modulus p:
|
||||
|
||||
```
|
||||
a ≡ b (mod p) means p divides (a - b)
|
||||
|
||||
Operations:
|
||||
- Addition: (a + b) mod p
|
||||
- Subtraction: (a - b + p) mod p
|
||||
- Multiplication: (a × b) mod p
|
||||
- Inverse: a⁻¹ where (a × a⁻¹) ≡ 1 (mod p)
|
||||
```
|
||||
|
||||
**Computing modular inverse**:
|
||||
- **Fermat's Little Theorem**: If p is prime, a⁻¹ ≡ a^(p-2) (mod p)
|
||||
- **Extended Euclidean Algorithm**: More efficient for general cases
|
||||
- **SafeGCD Algorithm**: Constant-time, used in libsecp256k1
|
||||
|
||||
### Finite Fields (Galois Fields)
|
||||
|
||||
A **finite field** GF(p) or 𝔽ₚ is a field with a finite number of elements where:
|
||||
- p must be prime (or a prime power for extension fields)
|
||||
- All arithmetic operations are defined and produce elements within the field
|
||||
- Every non-zero element has a multiplicative inverse
|
||||
|
||||
For cryptographic curves like secp256k1, the field is 𝔽ₚ where p is a 256-bit prime.
|
||||
|
||||
**Key property**: The non-zero elements of a finite field form a cyclic group under multiplication.
|
||||
|
||||
## Elliptic Curves
|
||||
|
||||
### The Curve Equation
|
||||
|
||||
An elliptic curve over a finite field 𝔽ₚ is defined by the Weierstrass equation:
|
||||
|
||||
```
|
||||
y² = x³ + ax + b (mod p)
|
||||
```
|
||||
|
||||
The curve must satisfy the non-singularity condition: 4a³ + 27b² ≠ 0
|
||||
|
||||
### Points on the Curve
|
||||
|
||||
A point P = (x, y) is on the curve if it satisfies the equation. The set of all points, plus a special "point at infinity" O (the identity element), forms an abelian group.
|
||||
|
||||
### Point Operations
|
||||
|
||||
**Point Addition (P + Q where P ≠ Q)**:
|
||||
```
|
||||
λ = (y₂ - y₁) / (x₂ - x₁) (mod p)
|
||||
x₃ = λ² - x₁ - x₂ (mod p)
|
||||
y₃ = λ(x₁ - x₃) - y₁ (mod p)
|
||||
```
|
||||
|
||||
**Point Doubling (P + P = 2P)**:
|
||||
```
|
||||
λ = (3x₁² + a) / (2y₁) (mod p)
|
||||
x₃ = λ² - 2x₁ (mod p)
|
||||
y₃ = λ(x₁ - x₃) - y₁ (mod p)
|
||||
```
|
||||
|
||||
**Point at Infinity**: Acts as the identity element; P + O = P for all P.
|
||||
|
||||
**Point Negation**: -P = (x, -y) = (x, p - y)
|
||||
|
||||
## The secp256k1 Curve
|
||||
|
||||
### Parameters
|
||||
|
||||
secp256k1 is defined by SECG (Standards for Efficient Cryptography Group):
|
||||
|
||||
```
|
||||
Curve equation: y² = x³ + 7 (a = 0, b = 7)
|
||||
|
||||
Prime modulus p:
|
||||
0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F
|
||||
= 2²⁵⁶ - 2³² - 977
|
||||
|
||||
Group order n:
|
||||
0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE BAAEDCE6 AF48A03B BFD25E8C D0364141
|
||||
|
||||
Generator point G:
|
||||
Gx = 0x79BE667E F9DCBBAC 55A06295 CE870B07 029BFCDB 2DCE28D9 59F2815B 16F81798
|
||||
Gy = 0x483ADA77 26A3C465 5DA4FBFC 0E1108A8 FD17B448 A6855419 9C47D08F FB10D4B8
|
||||
|
||||
Cofactor h = 1
|
||||
```
|
||||
|
||||
### Why secp256k1?
|
||||
|
||||
1. **Koblitz curve**: a = 0 enables faster computation (no ax term)
|
||||
2. **Special prime**: p = 2²⁵⁶ - 2³² - 977 allows efficient modular reduction
|
||||
3. **Deterministic construction**: Not randomly generated, reducing backdoor concerns
|
||||
4. **~30% faster** than random curves when fully optimized
|
||||
|
||||
### Efficient Modular Reduction
|
||||
|
||||
The special form of p enables fast reduction without general division:
|
||||
|
||||
```
|
||||
For p = 2²⁵⁶ - 2³² - 977:
|
||||
To reduce a 512-bit number c = c_high × 2²⁵⁶ + c_low:
|
||||
c ≡ c_low + c_high × 2³² + c_high × 977 (mod p)
|
||||
```
|
||||
|
||||
## Point Multiplication Algorithms
|
||||
|
||||
Scalar multiplication kP (computing P + P + ... + P, k times) is the core operation.
|
||||
|
||||
### Double-and-Add (Binary Method)
|
||||
|
||||
```
|
||||
Input: k (scalar), P (point)
|
||||
Output: kP
|
||||
|
||||
R = O (point at infinity)
|
||||
for i from bit_length(k)-1 down to 0:
|
||||
R = 2R # Point doubling
|
||||
if bit i of k is 1:
|
||||
R = R + P # Point addition
|
||||
return R
|
||||
```
|
||||
|
||||
**Complexity**: O(log k) point operations
|
||||
**Vulnerability**: Timing side-channels (different branches for 0/1 bits)
|
||||
|
||||
### Montgomery Ladder
|
||||
|
||||
Constant-time algorithm that performs the same operations regardless of bit values:
|
||||
|
||||
```
|
||||
Input: k (scalar), P (point)
|
||||
Output: kP
|
||||
|
||||
R0 = O
|
||||
R1 = P
|
||||
for i from bit_length(k)-1 down to 0:
|
||||
if bit i of k is 0:
|
||||
R1 = R0 + R1
|
||||
R0 = 2R0
|
||||
else:
|
||||
R0 = R0 + R1
|
||||
R1 = 2R1
|
||||
return R0
|
||||
```
|
||||
|
||||
**Advantage**: Resistant to simple power analysis and timing attacks.
|
||||
|
||||
### Window Methods (w-NAF)
|
||||
|
||||
Precompute small multiples of P, then process w bits at a time:
|
||||
|
||||
```
|
||||
w-NAF representation reduces additions by ~1/3 compared to binary
|
||||
Precomputation table: [P, 3P, 5P, 7P, ...] for w=4
|
||||
```
|
||||
|
||||
### Endomorphism Optimization (GLV Method)
|
||||
|
||||
secp256k1 has an efficiently computable endomorphism φ where:
|
||||
```
|
||||
φ(x, y) = (βx, y) where β³ ≡ 1 (mod p)
|
||||
φ(P) = λP where λ³ ≡ 1 (mod n)
|
||||
```
|
||||
|
||||
This allows splitting scalar k into k₁ + k₂λ with smaller k₁, k₂, reducing operations by ~33-50%.
|
||||
|
||||
### Multi-Scalar Multiplication (Strauss-Shamir)
|
||||
|
||||
For computing k₁P₁ + k₂P₂ (common in signature verification):
|
||||
|
||||
```
|
||||
Process both scalars simultaneously, combining operations
|
||||
Reduces work compared to separate multiplications
|
||||
```
|
||||
|
||||
## Coordinate Systems
|
||||
|
||||
### Affine Coordinates
|
||||
|
||||
Standard (x, y) representation. Requires modular inversion for each operation.
|
||||
|
||||
### Projective Coordinates
|
||||
|
||||
Represent (X:Y:Z) where x = X/Z, y = Y/Z:
|
||||
- Avoids inversions during intermediate computations
|
||||
- Only one inversion at the end to convert back to affine
|
||||
|
||||
### Jacobian Coordinates
|
||||
|
||||
Represent (X:Y:Z) where x = X/Z², y = Y/Z³:
|
||||
- Fastest for point doubling
|
||||
- Used extensively in libsecp256k1
|
||||
|
||||
### López-Dahab Coordinates
|
||||
|
||||
For curves over GF(2ⁿ), optimized for binary field arithmetic.
|
||||
|
||||
## Signature Schemes
|
||||
|
||||
### ECDSA (Elliptic Curve Digital Signature Algorithm)
|
||||
|
||||
**Key Generation**:
|
||||
```
|
||||
Private key: d (random integer in [1, n-1])
|
||||
Public key: Q = dG
|
||||
```
|
||||
|
||||
**Signing message m**:
|
||||
```
|
||||
1. Hash: e = H(m) truncated to curve order bit length
|
||||
2. Random: k ∈ [1, n-1]
|
||||
3. Compute: (x, y) = kG
|
||||
4. Calculate: r = x mod n (if r = 0, restart with new k)
|
||||
5. Calculate: s = k⁻¹(e + rd) mod n (if s = 0, restart)
|
||||
6. Signature: (r, s)
|
||||
```
|
||||
|
||||
**Verification of signature (r, s) on message m**:
|
||||
```
|
||||
1. Check: r, s ∈ [1, n-1]
|
||||
2. Hash: e = H(m)
|
||||
3. Compute: w = s⁻¹ mod n
|
||||
4. Compute: u₁ = ew mod n, u₂ = rw mod n
|
||||
5. Compute: (x, y) = u₁G + u₂Q
|
||||
6. Valid if: r ≡ x (mod n)
|
||||
```
|
||||
|
||||
**Security considerations**:
|
||||
- k MUST be unique per signature (reuse leaks private key)
|
||||
- Use RFC 6979 for deterministic k derivation
|
||||
|
||||
### Schnorr Signatures (BIP-340)
|
||||
|
||||
Simpler, more efficient, with provable security.
|
||||
|
||||
**Signing message m**:
|
||||
```
|
||||
1. Random: k ∈ [1, n-1]
|
||||
2. Compute: R = kG
|
||||
3. Challenge: e = H(R || Q || m)
|
||||
4. Response: s = k + ed mod n
|
||||
5. Signature: (R, s) or (r_x, s) where r_x is x-coordinate of R
|
||||
```
|
||||
|
||||
**Verification**:
|
||||
```
|
||||
1. Compute: e = H(R || Q || m)
|
||||
2. Check: sG = R + eQ
|
||||
```
|
||||
|
||||
**Advantages over ECDSA**:
|
||||
- Linear: enables signature aggregation (MuSig)
|
||||
- Simpler verification (no modular inverse)
|
||||
- Batch verification support
|
||||
- Provably secure in Random Oracle Model
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
### Constant-Time Operations
|
||||
|
||||
To prevent timing attacks:
|
||||
- Avoid branches dependent on secret data
|
||||
- Use constant-time comparison functions
|
||||
- Mask operations to hide data-dependent timing
|
||||
|
||||
```go
|
||||
// BAD: Timing leak
|
||||
if secretBit == 1 {
|
||||
doOperation()
|
||||
}
|
||||
|
||||
// GOOD: Constant-time conditional
|
||||
result = conditionalSelect(secretBit, value1, value0)
|
||||
```
|
||||
|
||||
### Memory Safety
|
||||
|
||||
- Zeroize sensitive data after use
|
||||
- Avoid leaving secrets in registers or cache
|
||||
- Use secure memory allocation when available
|
||||
|
||||
### Side-Channel Protections
|
||||
|
||||
- **Timing attacks**: Use constant-time algorithms
|
||||
- **Power analysis**: Montgomery ladder, point blinding
|
||||
- **Cache attacks**: Avoid table lookups indexed by secrets
|
||||
|
||||
### Random Number Generation
|
||||
|
||||
- Use cryptographically secure RNG for k in ECDSA
|
||||
- Consider deterministic k (RFC 6979) for reproducibility
|
||||
- Validate output is in valid range [1, n-1]
|
||||
|
||||
## libsecp256k1 Optimizations
|
||||
|
||||
The Bitcoin Core library includes:
|
||||
|
||||
1. **Field arithmetic**: 5×52-bit limbs for 64-bit platforms
|
||||
2. **Scalar arithmetic**: 4×64-bit representation
|
||||
3. **Endomorphism**: GLV decomposition enabled by default
|
||||
4. **Batch inversion**: Amortizes expensive inversions
|
||||
5. **SafeGCD**: Constant-time modular inverse
|
||||
6. **Precomputed tables**: For generator point multiplications
|
||||
|
||||
## Security Properties
|
||||
|
||||
### Discrete Logarithm Problem (DLP)
|
||||
|
||||
Given P and Q = kP, finding k is computationally infeasible.
|
||||
|
||||
**Best known attacks**:
|
||||
- Generic: Baby-step Giant-step, Pollard's rho: O(√n) operations
|
||||
- For secp256k1: ~2¹²⁸ operations (128-bit security)
|
||||
|
||||
### Curve Security Criteria
|
||||
|
||||
- Large prime order subgroup
|
||||
- Cofactor 1 (no small subgroup attacks)
|
||||
- Resistant to MOV attack (embedding degree)
|
||||
- Not anomalous (n ≠ p)
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. **k reuse in ECDSA**: Immediately leaks private key
|
||||
2. **Weak random k**: Partially leaks key over multiple signatures
|
||||
3. **Invalid curve points**: Validate points are on curve
|
||||
4. **Small subgroup attacks**: Check point order (cofactor = 1 helps)
|
||||
5. **Timing leaks**: Non-constant-time scalar multiplication
|
||||
|
||||
## References
|
||||
|
||||
For detailed implementations, see:
|
||||
- `references/secp256k1-parameters.md` - Full curve parameters
|
||||
- `references/algorithms.md` - Detailed algorithm pseudocode
|
||||
- `references/security.md` - Security analysis and attack vectors
|
||||
513
.claude/skills/elliptic-curves/references/algorithms.md
Normal file
513
.claude/skills/elliptic-curves/references/algorithms.md
Normal file
@@ -0,0 +1,513 @@
|
||||
# Elliptic Curve Algorithms
|
||||
|
||||
Detailed pseudocode for core elliptic curve operations.
|
||||
|
||||
## Field Arithmetic
|
||||
|
||||
### Modular Addition
|
||||
|
||||
```
|
||||
function mod_add(a, b, p):
|
||||
result = a + b
|
||||
if result >= p:
|
||||
result = result - p
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Subtraction
|
||||
|
||||
```
|
||||
function mod_sub(a, b, p):
|
||||
if a >= b:
|
||||
return a - b
|
||||
else:
|
||||
return p - b + a
|
||||
```
|
||||
|
||||
### Modular Multiplication
|
||||
|
||||
For general case:
|
||||
```
|
||||
function mod_mul(a, b, p):
|
||||
return (a * b) mod p
|
||||
```
|
||||
|
||||
For secp256k1 optimized (Barrett reduction):
|
||||
```
|
||||
function mod_mul_secp256k1(a, b):
|
||||
# Compute full 512-bit product
|
||||
product = a * b
|
||||
|
||||
# Split into high and low 256-bit parts
|
||||
low = product & ((1 << 256) - 1)
|
||||
high = product >> 256
|
||||
|
||||
# Reduce: result ≡ low + high * (2³² + 977) (mod p)
|
||||
result = low + high * (1 << 32) + high * 977
|
||||
|
||||
# May need additional reduction
|
||||
while result >= p:
|
||||
result = result - p
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Inverse
|
||||
|
||||
**Extended Euclidean Algorithm**:
|
||||
```
|
||||
function mod_inverse(a, p):
|
||||
if a == 0:
|
||||
error "No inverse exists for 0"
|
||||
|
||||
old_r, r = p, a
|
||||
old_s, s = 0, 1
|
||||
|
||||
while r != 0:
|
||||
quotient = old_r / r
|
||||
old_r, r = r, old_r - quotient * r
|
||||
old_s, s = s, old_s - quotient * s
|
||||
|
||||
if old_r != 1:
|
||||
error "No inverse exists"
|
||||
|
||||
if old_s < 0:
|
||||
old_s = old_s + p
|
||||
|
||||
return old_s
|
||||
```
|
||||
|
||||
**Fermat's Little Theorem** (for prime p):
|
||||
```
|
||||
function mod_inverse_fermat(a, p):
|
||||
return mod_exp(a, p - 2, p)
|
||||
```
|
||||
|
||||
### Modular Exponentiation (Square-and-Multiply)
|
||||
|
||||
```
|
||||
function mod_exp(base, exp, p):
|
||||
result = 1
|
||||
base = base mod p
|
||||
|
||||
while exp > 0:
|
||||
if exp & 1: # exp is odd
|
||||
result = (result * base) mod p
|
||||
exp = exp >> 1
|
||||
base = (base * base) mod p
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Square Root (Tonelli-Shanks)
|
||||
|
||||
For secp256k1 where p ≡ 3 (mod 4):
|
||||
```
|
||||
function mod_sqrt(a, p):
|
||||
# For p ≡ 3 (mod 4), sqrt(a) = a^((p+1)/4)
|
||||
return mod_exp(a, (p + 1) / 4, p)
|
||||
```
|
||||
|
||||
## Point Operations
|
||||
|
||||
### Point Validation
|
||||
|
||||
```
|
||||
function is_on_curve(P, a, b, p):
|
||||
if P is infinity:
|
||||
return true
|
||||
|
||||
x, y = P
|
||||
left = (y * y) mod p
|
||||
right = (x * x * x + a * x + b) mod p
|
||||
|
||||
return left == right
|
||||
```
|
||||
|
||||
### Point Addition (Affine Coordinates)
|
||||
|
||||
```
|
||||
function point_add(P, Q, a, p):
|
||||
if P is infinity:
|
||||
return Q
|
||||
if Q is infinity:
|
||||
return P
|
||||
|
||||
x1, y1 = P
|
||||
x2, y2 = Q
|
||||
|
||||
if x1 == x2:
|
||||
if y1 == mod_neg(y2, p): # P = -Q
|
||||
return infinity
|
||||
else: # P == Q
|
||||
return point_double(P, a, p)
|
||||
|
||||
# λ = (y2 - y1) / (x2 - x1)
|
||||
numerator = mod_sub(y2, y1, p)
|
||||
denominator = mod_sub(x2, x1, p)
|
||||
λ = mod_mul(numerator, mod_inverse(denominator, p), p)
|
||||
|
||||
# x3 = λ² - x1 - x2
|
||||
x3 = mod_sub(mod_sub(mod_mul(λ, λ, p), x1, p), x2, p)
|
||||
|
||||
# y3 = λ(x1 - x3) - y1
|
||||
y3 = mod_sub(mod_mul(λ, mod_sub(x1, x3, p), p), y1, p)
|
||||
|
||||
return (x3, y3)
|
||||
```
|
||||
|
||||
### Point Doubling (Affine Coordinates)
|
||||
|
||||
```
|
||||
function point_double(P, a, p):
|
||||
if P is infinity:
|
||||
return infinity
|
||||
|
||||
x, y = P
|
||||
|
||||
if y == 0:
|
||||
return infinity
|
||||
|
||||
# λ = (3x² + a) / (2y)
|
||||
numerator = mod_add(mod_mul(3, mod_mul(x, x, p), p), a, p)
|
||||
denominator = mod_mul(2, y, p)
|
||||
λ = mod_mul(numerator, mod_inverse(denominator, p), p)
|
||||
|
||||
# x3 = λ² - 2x
|
||||
x3 = mod_sub(mod_mul(λ, λ, p), mod_mul(2, x, p), p)
|
||||
|
||||
# y3 = λ(x - x3) - y
|
||||
y3 = mod_sub(mod_mul(λ, mod_sub(x, x3, p), p), y, p)
|
||||
|
||||
return (x3, y3)
|
||||
```
|
||||
|
||||
### Point Negation
|
||||
|
||||
```
|
||||
function point_negate(P, p):
|
||||
if P is infinity:
|
||||
return infinity
|
||||
|
||||
x, y = P
|
||||
return (x, p - y)
|
||||
```
|
||||
|
||||
## Scalar Multiplication
|
||||
|
||||
### Double-and-Add (Left-to-Right)
|
||||
|
||||
```
|
||||
function scalar_mult_double_add(k, P, a, p):
|
||||
if k == 0 or P is infinity:
|
||||
return infinity
|
||||
|
||||
if k < 0:
|
||||
k = -k
|
||||
P = point_negate(P, p)
|
||||
|
||||
R = infinity
|
||||
bits = binary_representation(k) # MSB first
|
||||
|
||||
for bit in bits:
|
||||
R = point_double(R, a, p)
|
||||
if bit == 1:
|
||||
R = point_add(R, P, a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
### Montgomery Ladder (Constant-Time)
|
||||
|
||||
```
|
||||
function scalar_mult_montgomery(k, P, a, p):
|
||||
R0 = infinity
|
||||
R1 = P
|
||||
|
||||
bits = binary_representation(k) # MSB first
|
||||
|
||||
for bit in bits:
|
||||
if bit == 0:
|
||||
R1 = point_add(R0, R1, a, p)
|
||||
R0 = point_double(R0, a, p)
|
||||
else:
|
||||
R0 = point_add(R0, R1, a, p)
|
||||
R1 = point_double(R1, a, p)
|
||||
|
||||
return R0
|
||||
```
|
||||
|
||||
### w-NAF Scalar Multiplication
|
||||
|
||||
```
|
||||
function compute_wNAF(k, w):
|
||||
# Convert scalar to width-w Non-Adjacent Form
|
||||
naf = []
|
||||
|
||||
while k > 0:
|
||||
if k & 1: # k is odd
|
||||
# Get w-bit window
|
||||
digit = k mod (1 << w)
|
||||
if digit >= (1 << (w-1)):
|
||||
digit = digit - (1 << w)
|
||||
naf.append(digit)
|
||||
k = k - digit
|
||||
else:
|
||||
naf.append(0)
|
||||
k = k >> 1
|
||||
|
||||
return naf
|
||||
|
||||
function scalar_mult_wNAF(k, P, w, a, p):
|
||||
# Precompute odd multiples: [P, 3P, 5P, ..., (2^(w-1)-1)P]
|
||||
precomp = [P]
|
||||
P2 = point_double(P, a, p)
|
||||
for i in range(1, 1 << (w-1)):
|
||||
precomp.append(point_add(precomp[-1], P2, a, p))
|
||||
|
||||
# Convert k to w-NAF
|
||||
naf = compute_wNAF(k, w)
|
||||
|
||||
# Compute scalar multiplication
|
||||
R = infinity
|
||||
for i in range(len(naf) - 1, -1, -1):
|
||||
R = point_double(R, a, p)
|
||||
digit = naf[i]
|
||||
if digit > 0:
|
||||
R = point_add(R, precomp[(digit - 1) / 2], a, p)
|
||||
elif digit < 0:
|
||||
R = point_add(R, point_negate(precomp[(-digit - 1) / 2], p), a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
### Shamir's Trick (Multi-Scalar)
|
||||
|
||||
For computing k₁P + k₂Q efficiently:
|
||||
|
||||
```
|
||||
function multi_scalar_mult(k1, P, k2, Q, a, p):
|
||||
# Precompute P + Q
|
||||
PQ = point_add(P, Q, a, p)
|
||||
|
||||
# Get binary representations (same length, padded)
|
||||
bits1 = binary_representation(k1)
|
||||
bits2 = binary_representation(k2)
|
||||
max_len = max(len(bits1), len(bits2))
|
||||
bits1 = pad_left(bits1, max_len)
|
||||
bits2 = pad_left(bits2, max_len)
|
||||
|
||||
R = infinity
|
||||
|
||||
for i in range(max_len):
|
||||
R = point_double(R, a, p)
|
||||
|
||||
b1, b2 = bits1[i], bits2[i]
|
||||
|
||||
if b1 == 1 and b2 == 1:
|
||||
R = point_add(R, PQ, a, p)
|
||||
elif b1 == 1:
|
||||
R = point_add(R, P, a, p)
|
||||
elif b2 == 1:
|
||||
R = point_add(R, Q, a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
## Jacobian Coordinates
|
||||
|
||||
More efficient for repeated operations.
|
||||
|
||||
### Conversion
|
||||
|
||||
```
|
||||
# Affine to Jacobian
|
||||
function affine_to_jacobian(P):
|
||||
if P is infinity:
|
||||
return (1, 1, 0) # Jacobian infinity
|
||||
x, y = P
|
||||
return (x, y, 1)
|
||||
|
||||
# Jacobian to Affine
|
||||
function jacobian_to_affine(P, p):
|
||||
X, Y, Z = P
|
||||
if Z == 0:
|
||||
return infinity
|
||||
|
||||
Z_inv = mod_inverse(Z, p)
|
||||
Z_inv2 = mod_mul(Z_inv, Z_inv, p)
|
||||
Z_inv3 = mod_mul(Z_inv2, Z_inv, p)
|
||||
|
||||
x = mod_mul(X, Z_inv2, p)
|
||||
y = mod_mul(Y, Z_inv3, p)
|
||||
|
||||
return (x, y)
|
||||
```
|
||||
|
||||
### Point Doubling (Jacobian)
|
||||
|
||||
For curve y² = x³ + 7 (a = 0):
|
||||
|
||||
```
|
||||
function jacobian_double(P, p):
|
||||
X, Y, Z = P
|
||||
|
||||
if Y == 0:
|
||||
return (1, 1, 0) # infinity
|
||||
|
||||
# For a = 0: M = 3*X²
|
||||
S = mod_mul(4, mod_mul(X, mod_mul(Y, Y, p), p), p)
|
||||
M = mod_mul(3, mod_mul(X, X, p), p)
|
||||
|
||||
X3 = mod_sub(mod_mul(M, M, p), mod_mul(2, S, p), p)
|
||||
Y3 = mod_sub(mod_mul(M, mod_sub(S, X3, p), p),
|
||||
mod_mul(8, mod_mul(Y, Y, mod_mul(Y, Y, p), p), p), p)
|
||||
Z3 = mod_mul(2, mod_mul(Y, Z, p), p)
|
||||
|
||||
return (X3, Y3, Z3)
|
||||
```
|
||||
|
||||
### Point Addition (Jacobian + Affine)
|
||||
|
||||
Mixed addition is faster when one point is in affine:
|
||||
|
||||
```
|
||||
function jacobian_add_affine(P, Q, p):
|
||||
# P in Jacobian (X1, Y1, Z1), Q in affine (x2, y2)
|
||||
X1, Y1, Z1 = P
|
||||
x2, y2 = Q
|
||||
|
||||
if Z1 == 0:
|
||||
return affine_to_jacobian(Q)
|
||||
|
||||
Z1Z1 = mod_mul(Z1, Z1, p)
|
||||
U2 = mod_mul(x2, Z1Z1, p)
|
||||
S2 = mod_mul(y2, mod_mul(Z1, Z1Z1, p), p)
|
||||
|
||||
H = mod_sub(U2, X1, p)
|
||||
HH = mod_mul(H, H, p)
|
||||
I = mod_mul(4, HH, p)
|
||||
J = mod_mul(H, I, p)
|
||||
r = mod_mul(2, mod_sub(S2, Y1, p), p)
|
||||
V = mod_mul(X1, I, p)
|
||||
|
||||
X3 = mod_sub(mod_sub(mod_mul(r, r, p), J, p), mod_mul(2, V, p), p)
|
||||
Y3 = mod_sub(mod_mul(r, mod_sub(V, X3, p), p), mod_mul(2, mod_mul(Y1, J, p), p), p)
|
||||
Z3 = mod_mul(mod_sub(mod_mul(mod_add(Z1, H, p), mod_add(Z1, H, p), p),
|
||||
mod_add(Z1Z1, HH, p), p), 1, p)
|
||||
|
||||
return (X3, Y3, Z3)
|
||||
```
|
||||
|
||||
## GLV Endomorphism (secp256k1)
|
||||
|
||||
### Scalar Decomposition
|
||||
|
||||
```
|
||||
# Constants for secp256k1
|
||||
LAMBDA = 0x5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72
|
||||
BETA = 0x7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE
|
||||
|
||||
# Decomposition coefficients
|
||||
A1 = 0x3086D221A7D46BCDE86C90E49284EB15
|
||||
B1 = 0x114CA50F7A8E2F3F657C1108D9D44CFD8
|
||||
A2 = 0xE4437ED6010E88286F547FA90ABFE4C3
|
||||
B2 = A1
|
||||
|
||||
function glv_decompose(k, n):
|
||||
# Compute c1 = round(b2 * k / n)
|
||||
# Compute c2 = round(-b1 * k / n)
|
||||
c1 = (B2 * k + n // 2) // n
|
||||
c2 = (-B1 * k + n // 2) // n
|
||||
|
||||
# k1 = k - c1*A1 - c2*A2
|
||||
# k2 = -c1*B1 - c2*B2
|
||||
k1 = k - c1 * A1 - c2 * A2
|
||||
k2 = -c1 * B1 - c2 * B2
|
||||
|
||||
return (k1, k2)
|
||||
|
||||
function glv_scalar_mult(k, P, p, n):
|
||||
k1, k2 = glv_decompose(k, n)
|
||||
|
||||
# Compute endomorphism: φ(P) = (β*x, y)
|
||||
x, y = P
|
||||
phi_P = (mod_mul(BETA, x, p), y)
|
||||
|
||||
# Use Shamir's trick: k1*P + k2*φ(P)
|
||||
return multi_scalar_mult(k1, P, k2, phi_P, 0, p)
|
||||
```
|
||||
|
||||
## Batch Inversion
|
||||
|
||||
Amortize expensive inversions over multiple points:
|
||||
|
||||
```
|
||||
function batch_invert(values, p):
|
||||
n = len(values)
|
||||
if n == 0:
|
||||
return []
|
||||
|
||||
# Compute cumulative products
|
||||
products = [values[0]]
|
||||
for i in range(1, n):
|
||||
products.append(mod_mul(products[-1], values[i], p))
|
||||
|
||||
# Invert the final product
|
||||
inv = mod_inverse(products[-1], p)
|
||||
|
||||
# Compute individual inverses
|
||||
inverses = [0] * n
|
||||
for i in range(n - 1, 0, -1):
|
||||
inverses[i] = mod_mul(inv, products[i - 1], p)
|
||||
inv = mod_mul(inv, values[i], p)
|
||||
inverses[0] = inv
|
||||
|
||||
return inverses
|
||||
```
|
||||
|
||||
## Key Generation
|
||||
|
||||
```
|
||||
function generate_keypair(G, n, p):
|
||||
# Generate random private key
|
||||
d = random_integer(1, n - 1)
|
||||
|
||||
# Compute public key
|
||||
Q = scalar_mult(d, G)
|
||||
|
||||
return (d, Q)
|
||||
```
|
||||
|
||||
## Point Compression/Decompression
|
||||
|
||||
```
|
||||
function compress_point(P, p):
|
||||
if P is infinity:
|
||||
return bytes([0x00])
|
||||
|
||||
x, y = P
|
||||
prefix = 0x02 if (y % 2 == 0) else 0x03
|
||||
return bytes([prefix]) + x.to_bytes(32, 'big')
|
||||
|
||||
function decompress_point(compressed, a, b, p):
|
||||
prefix = compressed[0]
|
||||
|
||||
if prefix == 0x00:
|
||||
return infinity
|
||||
|
||||
x = int.from_bytes(compressed[1:], 'big')
|
||||
|
||||
# Compute y² = x³ + ax + b
|
||||
y_squared = mod_add(mod_add(mod_mul(x, mod_mul(x, x, p), p),
|
||||
mod_mul(a, x, p), p), b, p)
|
||||
|
||||
# Compute y = sqrt(y²)
|
||||
y = mod_sqrt(y_squared, p)
|
||||
|
||||
# Select correct y based on prefix
|
||||
if (prefix == 0x02) != (y % 2 == 0):
|
||||
y = p - y
|
||||
|
||||
return (x, y)
|
||||
```
|
||||
@@ -0,0 +1,194 @@
|
||||
# secp256k1 Complete Parameters
|
||||
|
||||
## Curve Definition
|
||||
|
||||
**Name**: secp256k1 (Standards for Efficient Cryptography, prime field, 256-bit, Koblitz curve #1)
|
||||
|
||||
**Equation**: y² = x³ + 7 (mod p)
|
||||
|
||||
This is the short Weierstrass form with coefficients a = 0, b = 7.
|
||||
|
||||
## Field Parameters
|
||||
|
||||
### Prime Modulus p
|
||||
|
||||
```
|
||||
Decimal:
|
||||
115792089237316195423570985008687907853269984665640564039457584007908834671663
|
||||
|
||||
Hexadecimal:
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
|
||||
|
||||
Binary representation:
|
||||
2²⁵⁶ - 2³² - 2⁹ - 2⁸ - 2⁷ - 2⁶ - 2⁴ - 1
|
||||
= 2²⁵⁶ - 2³² - 977
|
||||
```
|
||||
|
||||
**Special form benefits**:
|
||||
- Efficient modular reduction using: c mod p = c_low + c_high × (2³² + 977)
|
||||
- Near-Mersenne prime enables fast arithmetic
|
||||
|
||||
### Group Order n
|
||||
|
||||
```
|
||||
Decimal:
|
||||
115792089237316195423570985008687907852837564279074904382605163141518161494337
|
||||
|
||||
Hexadecimal:
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
|
||||
```
|
||||
|
||||
The number of points on the curve, including the point at infinity.
|
||||
|
||||
### Cofactor h
|
||||
|
||||
```
|
||||
h = 1
|
||||
```
|
||||
|
||||
Cofactor 1 means the group order n equals the curve order, simplifying security analysis and eliminating small subgroup attacks.
|
||||
|
||||
## Generator Point G
|
||||
|
||||
### Compressed Form
|
||||
|
||||
```
|
||||
02 79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
```
|
||||
|
||||
The 02 prefix indicates the y-coordinate is even.
|
||||
|
||||
### Uncompressed Form
|
||||
|
||||
```
|
||||
04 79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
|
||||
```
|
||||
|
||||
### Individual Coordinates
|
||||
|
||||
**Gx**:
|
||||
```
|
||||
Decimal:
|
||||
55066263022277343669578718895168534326250603453777594175500187360389116729240
|
||||
|
||||
Hexadecimal:
|
||||
0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
```
|
||||
|
||||
**Gy**:
|
||||
```
|
||||
Decimal:
|
||||
32670510020758816978083085130507043184471273380659243275938904335757337482424
|
||||
|
||||
Hexadecimal:
|
||||
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
|
||||
```
|
||||
|
||||
## Endomorphism Parameters
|
||||
|
||||
secp256k1 has an efficiently computable endomorphism φ: (x, y) → (βx, y).
|
||||
|
||||
### β (Beta)
|
||||
|
||||
```
|
||||
Hexadecimal:
|
||||
0x7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE
|
||||
|
||||
Property: β³ ≡ 1 (mod p)
|
||||
```
|
||||
|
||||
### λ (Lambda)
|
||||
|
||||
```
|
||||
Hexadecimal:
|
||||
0x5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72
|
||||
|
||||
Property: λ³ ≡ 1 (mod n)
|
||||
Relationship: φ(P) = λP for all points P
|
||||
```
|
||||
|
||||
### GLV Decomposition Constants
|
||||
|
||||
For splitting scalar k into k₁ + k₂λ:
|
||||
|
||||
```
|
||||
a₁ = 0x3086D221A7D46BCDE86C90E49284EB15
|
||||
b₁ = -0xE4437ED6010E88286F547FA90ABFE4C3
|
||||
a₂ = 0x114CA50F7A8E2F3F657C1108D9D44CFD8
|
||||
b₂ = a₁
|
||||
```
|
||||
|
||||
## Derived Constants
|
||||
|
||||
### Field Characteristics
|
||||
|
||||
```
|
||||
(p + 1) / 4 = 0x3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFBFFFFF0C
|
||||
Used for computing modular square roots via Tonelli-Shanks shortcut
|
||||
```
|
||||
|
||||
### Order Characteristics
|
||||
|
||||
```
|
||||
(n - 1) / 2 = 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0
|
||||
Used in low-S normalization for ECDSA signatures
|
||||
```
|
||||
|
||||
## Validation Formulas
|
||||
|
||||
### Point on Curve Check
|
||||
|
||||
For point (x, y), verify:
|
||||
```
|
||||
y² ≡ x³ + 7 (mod p)
|
||||
```
|
||||
|
||||
### Generator Verification
|
||||
|
||||
Verify G is on curve:
|
||||
```
|
||||
Gy² mod p = 0x9C47D08FFB10D4B8 ... (truncated for display)
|
||||
Gx³ + 7 mod p = same value
|
||||
```
|
||||
|
||||
### Order Verification
|
||||
|
||||
Verify nG = O (point at infinity):
|
||||
```
|
||||
Computing n × G should yield the identity element
|
||||
```
|
||||
|
||||
## Bit Lengths
|
||||
|
||||
| Parameter | Bits | Bytes |
|
||||
|-----------|------|-------|
|
||||
| p (prime) | 256 | 32 |
|
||||
| n (order) | 256 | 32 |
|
||||
| Private key | 256 | 32 |
|
||||
| Public key (compressed) | 257 | 33 |
|
||||
| Public key (uncompressed) | 513 | 65 |
|
||||
| ECDSA signature | 512 | 64 |
|
||||
| Schnorr signature | 512 | 64 |
|
||||
|
||||
## Security Level
|
||||
|
||||
- **Equivalent symmetric key strength**: 128 bits
|
||||
- **Best known attack complexity**: ~2¹²⁸ operations (Pollard's rho)
|
||||
- **Safe until**: Quantum computers with ~1500+ logical qubits
|
||||
|
||||
## ASN.1 OID
|
||||
|
||||
```
|
||||
1.3.132.0.10
|
||||
iso(1) identified-organization(3) certicom(132) curve(0) secp256k1(10)
|
||||
```
|
||||
|
||||
## Comparison with Other Curves
|
||||
|
||||
| Curve | Field Size | Security | Speed | Use Case |
|
||||
|-------|------------|----------|-------|----------|
|
||||
| secp256k1 | 256-bit | 128-bit | Fast (Koblitz) | Bitcoin, Nostr |
|
||||
| secp256r1 (P-256) | 256-bit | 128-bit | Moderate | TLS, general |
|
||||
| Curve25519 | 255-bit | ~128-bit | Very fast | Modern crypto |
|
||||
| secp384r1 (P-384) | 384-bit | 192-bit | Slower | High security |
|
||||
291
.claude/skills/elliptic-curves/references/security.md
Normal file
291
.claude/skills/elliptic-curves/references/security.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# Elliptic Curve Security Analysis
|
||||
|
||||
Security properties, attack vectors, and mitigations for elliptic curve cryptography.
|
||||
|
||||
## The Discrete Logarithm Problem (ECDLP)
|
||||
|
||||
### Definition
|
||||
|
||||
Given points P and Q = kP on an elliptic curve, find the scalar k.
|
||||
|
||||
**Security assumption**: For properly chosen curves, this problem is computationally infeasible.
|
||||
|
||||
### Best Known Attacks
|
||||
|
||||
#### Generic Attacks (Work on Any Group)
|
||||
|
||||
| Attack | Complexity | Notes |
|
||||
|--------|------------|-------|
|
||||
| Baby-step Giant-step | O(√n) space and time | Requires √n storage |
|
||||
| Pollard's rho | O(√n) time, O(1) space | Practical for large groups |
|
||||
| Pollard's lambda | O(√n) | When k is in known range |
|
||||
| Pohlig-Hellman | O(√p) where p is largest prime factor | Exploits factorization of n |
|
||||
|
||||
For secp256k1 (n ≈ 2²⁵⁶):
|
||||
- Generic attack complexity: ~2¹²⁸ operations
|
||||
- Equivalent to 128-bit symmetric security
|
||||
|
||||
#### Curve-Specific Attacks
|
||||
|
||||
| Attack | Applicable When | Mitigation |
|
||||
|--------|-----------------|------------|
|
||||
| MOV/FR reduction | Low embedding degree | Use curves with high embedding degree |
|
||||
| Anomalous curve attack | n = p | Ensure n ≠ p |
|
||||
| GHS attack | Extension field curves | Use prime field curves |
|
||||
|
||||
**secp256k1 is immune to all known curve-specific attacks**.
|
||||
|
||||
## Side-Channel Attacks
|
||||
|
||||
### Timing Attacks
|
||||
|
||||
**Vulnerability**: Execution time varies based on secret data.
|
||||
|
||||
**Examples**:
|
||||
- Conditional branches on secret bits
|
||||
- Early exit conditions
|
||||
- Variable-time modular operations
|
||||
|
||||
**Mitigations**:
|
||||
- Constant-time algorithms (Montgomery ladder)
|
||||
- Fixed execution paths
|
||||
- Dummy operations to equalize timing
|
||||
|
||||
### Power Analysis
|
||||
|
||||
**Simple Power Analysis (SPA)**: Single trace reveals operations.
|
||||
- Double-and-add visible as different power signatures
|
||||
- Mitigation: Montgomery ladder (uniform operations)
|
||||
|
||||
**Differential Power Analysis (DPA)**: Statistical analysis of many traces.
|
||||
- Mitigation: Point blinding, scalar blinding
|
||||
|
||||
### Cache Attacks
|
||||
|
||||
**FLUSH+RELOAD Attack**:
|
||||
```
|
||||
1. Attacker flushes cache line containing lookup table
|
||||
2. Victim performs table lookup based on secret
|
||||
3. Attacker measures reload time to determine which entry was accessed
|
||||
```
|
||||
|
||||
**Mitigations**:
|
||||
- Avoid secret-dependent table lookups
|
||||
- Use constant-time table access patterns
|
||||
- Scatter tables to prevent cache line sharing
|
||||
|
||||
### Electromagnetic (EM) Attacks
|
||||
|
||||
Similar to power analysis but captures electromagnetic emissions.
|
||||
|
||||
**Mitigations**:
|
||||
- Shielding
|
||||
- Same algorithmic protections as power analysis
|
||||
|
||||
## Implementation Vulnerabilities
|
||||
|
||||
### k-Reuse in ECDSA
|
||||
|
||||
**The Sony PS3 Hack (2010)**:
|
||||
|
||||
If the same k is used for two signatures (r₁, s₁) and (r₂, s₂) on messages m₁ and m₂:
|
||||
|
||||
```
|
||||
s₁ = k⁻¹(e₁ + rd) mod n
|
||||
s₂ = k⁻¹(e₂ + rd) mod n
|
||||
|
||||
Since k is the same:
|
||||
s₁ - s₂ = k⁻¹(e₁ - e₂) mod n
|
||||
k = (e₁ - e₂)(s₁ - s₂)⁻¹ mod n
|
||||
|
||||
Once k is known:
|
||||
d = (s₁k - e₁)r⁻¹ mod n
|
||||
```
|
||||
|
||||
**Mitigation**: Use deterministic k (RFC 6979).
|
||||
|
||||
### Weak Random k
|
||||
|
||||
Even with unique k values, if the RNG is biased:
|
||||
- Lattice-based attacks can recover private key
|
||||
- Only ~1% bias in k can be exploitable with enough signatures
|
||||
|
||||
**Mitigations**:
|
||||
- Use cryptographically secure RNG
|
||||
- Use deterministic k (RFC 6979)
|
||||
- Verify k is in valid range [1, n-1]
|
||||
|
||||
### Invalid Curve Attacks
|
||||
|
||||
**Attack**: Attacker provides point not on the curve.
|
||||
- Point may be on a weaker curve
|
||||
- Operations may leak information
|
||||
|
||||
**Mitigation**: Always validate points are on curve:
|
||||
```
|
||||
Verify: y² ≡ x³ + ax + b (mod p)
|
||||
```
|
||||
|
||||
### Small Subgroup Attacks
|
||||
|
||||
**Attack**: If cofactor h > 1, points of small order exist.
|
||||
- Attacker sends point of small order
|
||||
- Response reveals private key mod (small order)
|
||||
|
||||
**Mitigation**:
|
||||
- Use curves with cofactor 1 (secp256k1 has h = 1)
|
||||
- Multiply received points by cofactor
|
||||
- Validate point order
|
||||
|
||||
### Fault Attacks
|
||||
|
||||
**Attack**: Induce computational errors (voltage glitches, radiation).
|
||||
- Corrupted intermediate values may leak information
|
||||
- Differential fault analysis can recover keys
|
||||
|
||||
**Mitigations**:
|
||||
- Redundant computations with comparison
|
||||
- Verify final results
|
||||
- Hardware protections
|
||||
|
||||
## Signature Malleability
|
||||
|
||||
### ECDSA Malleability
|
||||
|
||||
Given valid signature (r, s), signature (r, n - s) is also valid for the same message.
|
||||
|
||||
**Impact**: Transaction ID malleability (historical Bitcoin issue)
|
||||
|
||||
**Mitigation**: Enforce low-S normalization:
|
||||
```
|
||||
if s > n/2:
|
||||
s = n - s
|
||||
```
|
||||
|
||||
### Schnorr Non-Malleability
|
||||
|
||||
BIP-340 Schnorr signatures are non-malleable by design:
|
||||
- Use x-only public keys
|
||||
- Deterministic nonce derivation
|
||||
|
||||
## Quantum Threats
|
||||
|
||||
### Shor's Algorithm
|
||||
|
||||
**Threat**: Polynomial-time discrete log on quantum computers.
|
||||
- Requires ~1500-2000 logical qubits for secp256k1
|
||||
- Current quantum computers: <100 noisy qubits
|
||||
|
||||
**Timeline**: Estimated 10-20+ years for cryptographically relevant quantum computers.
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
1. **Monitor**: Track quantum computing progress
|
||||
2. **Prepare**: Develop post-quantum alternatives
|
||||
3. **Hybrid**: Use classical + post-quantum in transition
|
||||
4. **Migrate**: Full transition when necessary
|
||||
|
||||
### Post-Quantum Alternatives
|
||||
|
||||
- Lattice-based signatures (CRYSTALS-Dilithium)
|
||||
- Hash-based signatures (SPHINCS+)
|
||||
- Code-based cryptography
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Key Generation
|
||||
|
||||
```
|
||||
DO:
|
||||
- Use cryptographically secure RNG
|
||||
- Validate private key is in [1, n-1]
|
||||
- Verify public key is on curve
|
||||
- Verify public key is not point at infinity
|
||||
|
||||
DON'T:
|
||||
- Use predictable seeds
|
||||
- Use truncated random values
|
||||
- Skip validation
|
||||
```
|
||||
|
||||
### Signature Generation
|
||||
|
||||
```
|
||||
DO:
|
||||
- Use RFC 6979 for deterministic k
|
||||
- Validate all inputs
|
||||
- Use constant-time operations
|
||||
- Clear sensitive memory after use
|
||||
|
||||
DON'T:
|
||||
- Reuse k values
|
||||
- Use weak/biased RNG
|
||||
- Skip low-S normalization (ECDSA)
|
||||
```
|
||||
|
||||
### Signature Verification
|
||||
|
||||
```
|
||||
DO:
|
||||
- Validate r, s are in [1, n-1]
|
||||
- Validate public key is on curve
|
||||
- Validate public key is not infinity
|
||||
- Use batch verification when possible
|
||||
|
||||
DON'T:
|
||||
- Skip any validation steps
|
||||
- Accept malformed signatures
|
||||
```
|
||||
|
||||
### Public Key Handling
|
||||
|
||||
```
|
||||
DO:
|
||||
- Validate received points are on curve
|
||||
- Check point is not infinity
|
||||
- Prefer compressed format for storage
|
||||
|
||||
DON'T:
|
||||
- Accept unvalidated points
|
||||
- Skip curve membership check
|
||||
```
|
||||
|
||||
## Security Checklist
|
||||
|
||||
### Implementation Review
|
||||
|
||||
- [ ] All scalar multiplications are constant-time
|
||||
- [ ] No secret-dependent branches
|
||||
- [ ] No secret-indexed table lookups
|
||||
- [ ] Memory is zeroized after use
|
||||
- [ ] Random k uses CSPRNG or RFC 6979
|
||||
- [ ] All received points are validated
|
||||
- [ ] Private keys are in valid range
|
||||
- [ ] Signatures use low-S normalization
|
||||
|
||||
### Operational Security
|
||||
|
||||
- [ ] Private keys stored securely (HSM, secure enclave)
|
||||
- [ ] Key derivation uses proper KDF
|
||||
- [ ] Backups are encrypted
|
||||
- [ ] Key rotation policy exists
|
||||
- [ ] Audit logging enabled
|
||||
- [ ] Incident response plan exists
|
||||
|
||||
## Security Levels Comparison
|
||||
|
||||
| Curve | Bits | Symmetric Equivalent | RSA Equivalent |
|
||||
|-------|------|---------------------|----------------|
|
||||
| secp192r1 | 192 | 96 | 1536 |
|
||||
| secp224r1 | 224 | 112 | 2048 |
|
||||
| secp256k1 | 256 | 128 | 3072 |
|
||||
| secp384r1 | 384 | 192 | 7680 |
|
||||
| secp521r1 | 521 | 256 | 15360 |
|
||||
|
||||
## References
|
||||
|
||||
- NIST SP 800-57: Recommendation for Key Management
|
||||
- SEC 1: Elliptic Curve Cryptography
|
||||
- RFC 6979: Deterministic Usage of DSA and ECDSA
|
||||
- BIP-340: Schnorr Signatures for secp256k1
|
||||
- SafeCurves: Choosing Safe Curves for Elliptic-Curve Cryptography
|
||||
@@ -82,6 +82,49 @@ func (f *File) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
```
|
||||
|
||||
### Interface Design - CRITICAL RULES
|
||||
|
||||
**Rule 1: Define interfaces in a dedicated package (e.g., `pkg/interfaces/<name>/`)**
|
||||
- Interfaces provide isolation between packages and enable dependency inversion
|
||||
- Keeping interfaces in a dedicated package prevents circular dependencies
|
||||
- Each interface package should be minimal (just the interface, no implementations)
|
||||
|
||||
**Rule 2: NEVER use type assertions with interface literals**
|
||||
- **NEVER** write `.(interface{ Method() Type })` - this is non-idiomatic and unmaintainable
|
||||
- Interface literals cannot be documented, tested for satisfaction, or reused
|
||||
|
||||
```go
|
||||
// BAD - interface literal in type assertion (NEVER DO THIS)
|
||||
if checker, ok := obj.(interface{ Check() bool }); ok {
|
||||
checker.Check()
|
||||
}
|
||||
|
||||
// GOOD - use defined interface from dedicated package
|
||||
import "myproject/pkg/interfaces/checker"
|
||||
|
||||
if c, ok := obj.(checker.Checker); ok {
|
||||
c.Check()
|
||||
}
|
||||
```
|
||||
|
||||
**Rule 3: Resolving Circular Dependencies**
|
||||
- If a circular dependency occurs, move the interface to `pkg/interfaces/`
|
||||
- The implementing type stays in its original package
|
||||
- The consuming code imports only the interface package
|
||||
- Pattern:
|
||||
```
|
||||
pkg/interfaces/foo/ <- interface definition (no dependencies)
|
||||
↑ ↑
|
||||
pkg/bar/ pkg/baz/
|
||||
(implements) (consumes via interface)
|
||||
```
|
||||
|
||||
**Rule 4: Verify interface satisfaction at compile time**
|
||||
```go
|
||||
// Add this line to ensure *MyType implements MyInterface
|
||||
var _ MyInterface = (*MyType)(nil)
|
||||
```
|
||||
|
||||
### Concurrency
|
||||
|
||||
Use goroutines and channels for concurrent programming:
|
||||
@@ -178,6 +221,26 @@ For detailed information, consult the reference files:
|
||||
- Start comments with the name being described
|
||||
- Use godoc format
|
||||
|
||||
6. **Configuration - CRITICAL**
|
||||
- **NEVER** use `os.Getenv()` scattered throughout packages
|
||||
- **ALWAYS** centralize environment variable parsing in a single config package (e.g., `app/config/`)
|
||||
- Pass configuration via structs, not by reading environment directly
|
||||
- This ensures discoverability, documentation, and testability of all config options
|
||||
|
||||
7. **Constants - CRITICAL**
|
||||
- **ALWAYS** define named constants for values used more than a few times
|
||||
- **ALWAYS** define named constants if multiple packages depend on the same value
|
||||
- Constants shared across packages belong in a dedicated package (e.g., `pkg/constants/`)
|
||||
- Magic numbers and strings are forbidden
|
||||
```go
|
||||
// BAD - magic number
|
||||
if size > 1024 {
|
||||
|
||||
// GOOD - named constant
|
||||
const MaxBufferSize = 1024
|
||||
if size > MaxBufferSize {
|
||||
```
|
||||
|
||||
## Common Commands
|
||||
|
||||
```bash
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -10,8 +10,6 @@
|
||||
# Especially these
|
||||
.vscode/
|
||||
**/.vscode/
|
||||
node_modules/
|
||||
**/node_modules/
|
||||
/test*
|
||||
.idea/
|
||||
# and others
|
||||
@@ -98,6 +96,10 @@ cmd/benchmark/data
|
||||
# Re-ignore IDE directories (must come after !*/)
|
||||
.idea/
|
||||
**/.idea/
|
||||
|
||||
# Re-ignore node_modules everywhere (must come after !*/)
|
||||
node_modules/
|
||||
**/node_modules/
|
||||
/blocklist.json
|
||||
/gui/gui/main.wasm
|
||||
/gui/gui/index.html
|
||||
@@ -105,7 +107,6 @@ pkg/database/testrealy
|
||||
/ctxproxy.config.yml
|
||||
cmd/benchmark/external/**
|
||||
private*
|
||||
pkg/protocol/directory-client/node_modules
|
||||
|
||||
# Build outputs
|
||||
build/orly-*
|
||||
|
||||
@@ -1,319 +0,0 @@
|
||||
# Badger Database Migration Guide
|
||||
|
||||
## Overview
|
||||
|
||||
This guide covers migrating your ORLY relay database when changing Badger configuration parameters, specifically for the VLogPercentile and table size optimizations.
|
||||
|
||||
## When Migration is Needed
|
||||
|
||||
Based on research of Badger v4 source code and documentation:
|
||||
|
||||
### Configuration Changes That DON'T Require Migration
|
||||
|
||||
The following options can be changed **without migration**:
|
||||
- `BlockCacheSize` - Only affects in-memory cache
|
||||
- `IndexCacheSize` - Only affects in-memory cache
|
||||
- `NumCompactors` - Runtime setting
|
||||
- `NumLevelZeroTables` - Affects compaction timing
|
||||
- `NumMemtables` - Affects write buffering
|
||||
- `DetectConflicts` - Runtime conflict detection
|
||||
- `Compression` - New data uses new compression, old data remains as-is
|
||||
- `BlockSize` - Explicitly stated in Badger source: "Changing BlockSize across DB runs will not break badger"
|
||||
|
||||
### Configuration Changes That BENEFIT from Migration
|
||||
|
||||
The following options apply to **new writes only** - existing data gradually adopts new settings through compaction:
|
||||
- `VLogPercentile` - Affects where **new** values are stored (LSM vs vlog)
|
||||
- `BaseTableSize` - **New** SST files use new size
|
||||
- `MemTableSize` - Affects new write buffering
|
||||
- `BaseLevelSize` - Affects new LSM tree structure
|
||||
- `ValueLogFileSize` - New vlog files use new size
|
||||
|
||||
**Migration Impact:** Without migration, existing data remains in its current location (LSM tree or value log). The database will **gradually** adapt through normal compaction, which may take days or weeks depending on write volume.
|
||||
|
||||
## Migration Options
|
||||
|
||||
### Option 1: No Migration (Let Natural Compaction Handle It)
|
||||
|
||||
**Best for:** Low-traffic relays, testing environments
|
||||
|
||||
**Pros:**
|
||||
- No downtime required
|
||||
- No manual intervention
|
||||
- Zero risk of data loss
|
||||
|
||||
**Cons:**
|
||||
- Benefits take time to materialize (days/weeks)
|
||||
- Old data layout persists until natural compaction
|
||||
- Cache tuning benefits delayed
|
||||
|
||||
**Steps:**
|
||||
1. Update Badger configuration in `pkg/database/database.go`
|
||||
2. Restart ORLY relay
|
||||
3. Monitor performance over several days
|
||||
4. Optionally run manual GC: `db.RunValueLogGC(0.5)` periodically
|
||||
|
||||
### Option 2: Manual Value Log Garbage Collection
|
||||
|
||||
**Best for:** Medium-traffic relays wanting faster optimization
|
||||
|
||||
**Pros:**
|
||||
- Faster than natural compaction
|
||||
- Still safe (no export/import)
|
||||
- Can run while relay is online
|
||||
|
||||
**Cons:**
|
||||
- Still gradual (hours instead of days)
|
||||
- CPU/disk intensive during GC
|
||||
- Partial benefit until GC completes
|
||||
|
||||
**Steps:**
|
||||
1. Update Badger configuration
|
||||
2. Restart ORLY relay
|
||||
3. Monitor logs for compaction activity
|
||||
4. Manually trigger GC if needed (future feature - not currently exposed)
|
||||
|
||||
### Option 3: Full Export/Import Migration (RECOMMENDED for Production)
|
||||
|
||||
**Best for:** Production relays, large databases, maximum performance
|
||||
|
||||
**Pros:**
|
||||
- Immediate full benefit of new configuration
|
||||
- Clean database structure
|
||||
- Predictable migration time
|
||||
- Reclaims all disk space
|
||||
|
||||
**Cons:**
|
||||
- Requires relay downtime (several hours for large DBs)
|
||||
- Requires 2x disk space temporarily
|
||||
- More complex procedure
|
||||
|
||||
**Steps:** See detailed procedure below
|
||||
|
||||
## Full Migration Procedure (Option 3)
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Disk space:** At minimum 2.5x current database size
|
||||
- 1x for current database
|
||||
- 1x for JSONL export
|
||||
- 0.5x for new database (will be smaller with compression)
|
||||
|
||||
2. **Time estimate:**
|
||||
- Export: ~100-500 MB/s depending on disk speed
|
||||
- Import: ~50-200 MB/s with indexing overhead
|
||||
- Example: 10 GB database = ~10-30 minutes total
|
||||
|
||||
3. **Backup:** Ensure you have a recent backup before proceeding
|
||||
|
||||
### Step-by-Step Migration
|
||||
|
||||
#### 1. Prepare Migration Script
|
||||
|
||||
Use the provided `scripts/migrate-badger-config.sh` script (see below).
|
||||
|
||||
#### 2. Stop the Relay
|
||||
|
||||
```bash
|
||||
# If using systemd
|
||||
sudo systemctl stop orly
|
||||
|
||||
# If running manually
|
||||
pkill orly
|
||||
```
|
||||
|
||||
#### 3. Run Migration
|
||||
|
||||
```bash
|
||||
cd ~/src/next.orly.dev
|
||||
chmod +x scripts/migrate-badger-config.sh
|
||||
./scripts/migrate-badger-config.sh
|
||||
```
|
||||
|
||||
The script will:
|
||||
- Export all events to JSONL format
|
||||
- Move old database to backup location
|
||||
- Create new database with updated configuration
|
||||
- Import all events (rebuilds indexes automatically)
|
||||
- Verify event count matches
|
||||
|
||||
#### 4. Verify Migration
|
||||
|
||||
```bash
|
||||
# Check that events were migrated
|
||||
echo "Old event count:"
|
||||
cat ~/.local/share/ORLY-backup-*/migration.log | grep "exported.*events"
|
||||
|
||||
echo "New event count:"
|
||||
cat ~/.local/share/ORLY/migration.log | grep "saved.*events"
|
||||
```
|
||||
|
||||
#### 5. Restart Relay
|
||||
|
||||
```bash
|
||||
# If using systemd
|
||||
sudo systemctl start orly
|
||||
sudo journalctl -u orly -f
|
||||
|
||||
# If running manually
|
||||
./orly
|
||||
```
|
||||
|
||||
#### 6. Monitor Performance
|
||||
|
||||
Watch for improvements in:
|
||||
- Cache hit ratio (should be >85% with new config)
|
||||
- Average query latency (should be <3ms for cached events)
|
||||
- No "Block cache too small" warnings in logs
|
||||
|
||||
#### 7. Clean Up (After Verification)
|
||||
|
||||
```bash
|
||||
# Once you confirm everything works (wait 24-48 hours)
|
||||
rm -rf ~/.local/share/ORLY-backup-*
|
||||
rm ~/.local/share/ORLY/events-export.jsonl
|
||||
```
|
||||
|
||||
## Migration Script
|
||||
|
||||
The migration script is located at `scripts/migrate-badger-config.sh` and handles:
|
||||
- Automatic export of all events to JSONL
|
||||
- Safe backup of existing database
|
||||
- Creation of new database with updated config
|
||||
- Import and indexing of all events
|
||||
- Verification of event counts
|
||||
|
||||
## Rollback Procedure
|
||||
|
||||
If migration fails or performance degrades:
|
||||
|
||||
```bash
|
||||
# Stop the relay
|
||||
sudo systemctl stop orly # or pkill orly
|
||||
|
||||
# Restore old database
|
||||
rm -rf ~/.local/share/ORLY
|
||||
mv ~/.local/share/ORLY-backup-$(date +%Y%m%d)* ~/.local/share/ORLY
|
||||
|
||||
# Restart with old configuration
|
||||
sudo systemctl start orly
|
||||
```
|
||||
|
||||
## Configuration Changes Summary
|
||||
|
||||
### Changes Applied in pkg/database/database.go
|
||||
|
||||
```go
|
||||
// Cache sizes (can change without migration)
|
||||
opts.BlockCacheSize = 16384 MB (was 512 MB)
|
||||
opts.IndexCacheSize = 4096 MB (was 256 MB)
|
||||
|
||||
// Table sizes (benefits from migration)
|
||||
opts.BaseTableSize = 8 MB (was 64 MB)
|
||||
opts.MemTableSize = 16 MB (was 64 MB)
|
||||
opts.ValueLogFileSize = 128 MB (was 256 MB)
|
||||
|
||||
// Inline event optimization (CRITICAL - benefits from migration)
|
||||
opts.VLogPercentile = 0.99 (was 0.0 - default)
|
||||
|
||||
// LSM structure (benefits from migration)
|
||||
opts.BaseLevelSize = 64 MB (was 10 MB - default)
|
||||
|
||||
// Performance settings (no migration needed)
|
||||
opts.DetectConflicts = false (was true)
|
||||
opts.Compression = options.ZSTD (was options.None)
|
||||
opts.NumCompactors = 8 (was 4)
|
||||
opts.NumMemtables = 8 (was 5)
|
||||
```
|
||||
|
||||
## Expected Improvements
|
||||
|
||||
### Before Migration
|
||||
- Cache hit ratio: 33%
|
||||
- Average latency: 9.35ms
|
||||
- P95 latency: 34.48ms
|
||||
- Block cache warnings: Yes
|
||||
|
||||
### After Migration
|
||||
- Cache hit ratio: 85-95%
|
||||
- Average latency: <3ms
|
||||
- P95 latency: <8ms
|
||||
- Block cache warnings: No
|
||||
- Inline events: 3-5x faster reads
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Migration Script Fails
|
||||
|
||||
**Error:** "Not enough disk space"
|
||||
- Free up space or use Option 1 (natural compaction)
|
||||
- Ensure you have 2.5x current DB size available
|
||||
|
||||
**Error:** "Export failed"
|
||||
- Check database is not corrupted
|
||||
- Ensure ORLY is stopped
|
||||
- Check file permissions
|
||||
|
||||
**Error:** "Import count mismatch"
|
||||
- This is informational - some events may be duplicates
|
||||
- Check logs for specific errors
|
||||
- Verify core events are present via relay queries
|
||||
|
||||
### Performance Not Improved
|
||||
|
||||
**After migration, performance is the same:**
|
||||
1. Verify configuration was actually applied:
|
||||
```bash
|
||||
# Check running relay logs for config output
|
||||
sudo journalctl -u orly | grep -i "block.*cache\|vlog"
|
||||
```
|
||||
|
||||
2. Wait for cache to warm up (2-5 minutes after start)
|
||||
|
||||
3. Check if workload changed (different query patterns)
|
||||
|
||||
4. Verify disk I/O is not bottleneck:
|
||||
```bash
|
||||
iostat -x 5
|
||||
```
|
||||
|
||||
### High CPU During Migration
|
||||
|
||||
- This is normal - import rebuilds all indexes
|
||||
- Migration is single-threaded by design (data consistency)
|
||||
- Expect 30-60% CPU usage on one core
|
||||
|
||||
## Additional Notes
|
||||
|
||||
### Compression Impact
|
||||
|
||||
The `Compression = options.ZSTD` setting:
|
||||
- Only compresses **new** data
|
||||
- Old data remains uncompressed until rewritten by compaction
|
||||
- Migration forces all data to be rewritten → immediate compression benefit
|
||||
- Expect 2-3x compression ratio for event data
|
||||
|
||||
### VLogPercentile Behavior
|
||||
|
||||
With `VLogPercentile = 0.99`:
|
||||
- **99% of values** stored in LSM tree (fast access)
|
||||
- **1% of values** stored in value log (large events >100 KB)
|
||||
- Threshold dynamically adjusted based on value size distribution
|
||||
- Perfect for ORLY's inline event optimization
|
||||
|
||||
### Production Considerations
|
||||
|
||||
For production relays:
|
||||
1. Schedule migration during low-traffic period
|
||||
2. Notify users of maintenance window
|
||||
3. Have rollback plan ready
|
||||
4. Monitor closely for 24-48 hours after migration
|
||||
5. Keep backup for at least 1 week
|
||||
|
||||
## References
|
||||
|
||||
- Badger v4 Documentation: https://pkg.go.dev/github.com/dgraph-io/badger/v4
|
||||
- ORLY Database Package: `pkg/database/database.go`
|
||||
- Export/Import Implementation: `pkg/database/{export,import}.go`
|
||||
- Cache Optimization Analysis: `cmd/benchmark/CACHE_OPTIMIZATION_STRATEGY.md`
|
||||
- Inline Event Optimization: `cmd/benchmark/INLINE_EVENT_OPTIMIZATION.md`
|
||||
258
CLAUDE.md
258
CLAUDE.md
@@ -8,11 +8,12 @@ ORLY is a high-performance Nostr relay written in Go, designed for personal rela
|
||||
|
||||
**Key Technologies:**
|
||||
- **Language**: Go 1.25.3+
|
||||
- **Database**: Badger v4 (embedded key-value store) or DGraph (distributed graph database)
|
||||
- **Database**: Badger v4 (embedded) or Neo4j (social graph)
|
||||
- **Cryptography**: Custom p8k library using purego for secp256k1 operations (no CGO)
|
||||
- **Web UI**: Svelte frontend embedded in the binary
|
||||
- **WebSocket**: gorilla/websocket for Nostr protocol
|
||||
- **Performance**: SIMD-accelerated SHA256 and hex encoding, query result caching with zstd compression
|
||||
- **Social Graph**: Neo4j backend with Web of Trust (WoT) extensions for trust metrics
|
||||
|
||||
## Build Commands
|
||||
|
||||
@@ -139,9 +140,13 @@ export ORLY_SPROCKET_ENABLED=true
|
||||
# Enable policy system
|
||||
export ORLY_POLICY_ENABLED=true
|
||||
|
||||
# Database backend selection (badger or dgraph)
|
||||
# Database backend selection (badger or neo4j)
|
||||
export ORLY_DB_TYPE=badger
|
||||
export ORLY_DGRAPH_URL=localhost:9080 # Only for dgraph backend
|
||||
|
||||
# Neo4j configuration (only when ORLY_DB_TYPE=neo4j)
|
||||
export ORLY_NEO4J_URI=bolt://localhost:7687
|
||||
export ORLY_NEO4J_USER=neo4j
|
||||
export ORLY_NEO4J_PASSWORD=password
|
||||
|
||||
# Query cache configuration (improves REQ response times)
|
||||
export ORLY_QUERY_CACHE_SIZE_MB=512 # Default: 512MB
|
||||
@@ -150,6 +155,20 @@ export ORLY_QUERY_CACHE_MAX_AGE=5m # Cache expiry time
|
||||
# Database cache tuning (for Badger backend)
|
||||
export ORLY_DB_BLOCK_CACHE_MB=512 # Block cache size
|
||||
export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
export ORLY_INLINE_EVENT_THRESHOLD=1024 # Inline storage threshold (bytes)
|
||||
|
||||
# Directory Spider (metadata sync from other relays)
|
||||
export ORLY_DIRECTORY_SPIDER=true # Enable directory spider
|
||||
export ORLY_DIRECTORY_SPIDER_INTERVAL=24h # How often to run
|
||||
export ORLY_DIRECTORY_SPIDER_HOPS=3 # Max hops for relay discovery
|
||||
|
||||
# NIP-43 Relay Access Metadata
|
||||
export ORLY_NIP43_ENABLED=true # Enable invite system
|
||||
export ORLY_NIP43_INVITE_EXPIRY=24h # Invite code validity
|
||||
|
||||
# Authentication modes
|
||||
export ORLY_AUTH_REQUIRED=false # Require auth for all requests
|
||||
export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
```
|
||||
|
||||
## Code Architecture
|
||||
@@ -177,7 +196,7 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
|
||||
**`pkg/database/`** - Database abstraction layer with multiple backend support
|
||||
- `interface.go` - Database interface definition for pluggable backends
|
||||
- `factory.go` - Database backend selection (Badger or DGraph)
|
||||
- `factory.go` - Database backend selection (Badger or Neo4j)
|
||||
- `database.go` - Badger implementation with cache tuning and query cache
|
||||
- `save-event.go` - Event storage with index updates
|
||||
- `query-events.go` - Main query execution engine with filter normalization
|
||||
@@ -188,6 +207,15 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- `identity.go` - Relay identity key management
|
||||
- `migrations.go` - Database schema migration runner
|
||||
|
||||
**`pkg/neo4j/`** - Neo4j graph database backend with social graph support
|
||||
- `neo4j.go` - Main database implementation
|
||||
- `schema.go` - Graph schema and index definitions (includes WoT extensions)
|
||||
- `query-events.go` - REQ filter to Cypher translation
|
||||
- `save-event.go` - Event storage with relationship creation
|
||||
- `social-event-processor.go` - Processes kinds 0, 3, 1984, 10000 for social graph
|
||||
- `WOT_SPEC.md` - Web of Trust data model specification (NostrUser nodes, trust metrics)
|
||||
- `MODIFYING_SCHEMA.md` - Guide for schema modifications
|
||||
|
||||
**`pkg/protocol/`** - Nostr protocol implementation
|
||||
- `ws/` - WebSocket message framing and parsing
|
||||
- `auth/` - NIP-42 authentication challenge/response
|
||||
@@ -223,6 +251,9 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
**`pkg/policy/`** - Event filtering and validation policies
|
||||
- Policy configuration loaded from `~/.config/ORLY/policy.json`
|
||||
- Per-kind size limits, age restrictions, custom scripts
|
||||
- **Write-Only Validation**: Size, age, tag, and expiry validations apply ONLY to write operations
|
||||
- **Read-Only Filtering**: `read_allow`, `read_deny`, `privileged` apply ONLY to read operations
|
||||
- See `docs/POLICY_CONFIGURATION_REFERENCE.md` for authoritative read vs write applicability
|
||||
- **Dynamic Policy Hot Reload via Kind 12345 Events:**
|
||||
- Policy admins can update policy configuration without relay restart
|
||||
- Kind 12345 events contain JSON policy in content field
|
||||
@@ -231,12 +262,16 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- Policy admin follow lists (kind 3) trigger immediate cache refresh
|
||||
- `WriteAllowFollows` rule grants both read+write access to admin follows
|
||||
- Tag validation supports regex patterns per tag type
|
||||
- **New Policy Rule Fields:**
|
||||
- **Policy Rule Fields:**
|
||||
- `max_expiry_duration`: ISO-8601 duration format (e.g., "P7D", "PT1H30M") for event expiry limits
|
||||
- `protected_required`: Requires NIP-70 protected events (must have "-" tag)
|
||||
- `identifier_regex`: Regex pattern for validating "d" tag identifiers
|
||||
- `follows_whitelist_admins`: Per-rule admin pubkeys whose follows are whitelisted
|
||||
- `write_allow` / `write_deny`: Pubkey whitelist/blacklist for writing (write-only)
|
||||
- `read_allow` / `read_deny`: Pubkey whitelist/blacklist for reading (read-only)
|
||||
- `privileged`: Party-involved access control (read-only)
|
||||
- See `docs/POLICY_USAGE_GUIDE.md` for configuration examples
|
||||
- See `pkg/policy/README.md` for quick reference
|
||||
|
||||
**`pkg/sync/`** - Distributed synchronization
|
||||
- `cluster_manager.go` - Active replication between relay peers
|
||||
@@ -246,6 +281,12 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
**`pkg/spider/`** - Event syncing from other relays
|
||||
- `spider.go` - Spider manager for "follows" mode
|
||||
- Fetches events from admin relays for followed pubkeys
|
||||
- **Directory Spider** (`directory.go`):
|
||||
- Discovers relays by crawling kind 10002 (relay list) events
|
||||
- Expands outward from seed pubkeys (whitelisted users) via hop distance
|
||||
- Fetches metadata events (kinds 0, 3, 10000, 10002) from discovered relays
|
||||
- Self-detection prevents querying own relay
|
||||
- Configurable interval and max hops via `ORLY_DIRECTORY_SPIDER_*` env vars
|
||||
|
||||
**`pkg/utils/`** - Shared utilities
|
||||
- `atomic/` - Extended atomic operations
|
||||
@@ -278,7 +319,11 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
**Database Backend Selection:**
|
||||
- Supports multiple backends via `ORLY_DB_TYPE` environment variable
|
||||
- **Badger** (default): Embedded key-value store with custom indexing, ideal for single-instance deployments
|
||||
- **DGraph**: Distributed graph database for larger, multi-node deployments
|
||||
- **Neo4j**: Graph database with social graph and Web of Trust (WoT) extensions
|
||||
- Processes kinds 0 (profile), 3 (contacts), 1984 (reports), 10000 (mute list) for social graph
|
||||
- NostrUser nodes with trust metrics (influence, PageRank)
|
||||
- FOLLOWS, MUTES, REPORTS relationships for WoT analysis
|
||||
- See `pkg/neo4j/WOT_SPEC.md` for full schema specification
|
||||
- Backend selected via factory pattern in `pkg/database/factory.go`
|
||||
- All backends implement the same `Database` interface defined in `pkg/database/interface.go`
|
||||
|
||||
@@ -297,11 +342,33 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
4. Events stored via `database.SaveEvent()`
|
||||
5. Active subscriptions notified via `publishers.Publish()`
|
||||
|
||||
**Configuration System:**
|
||||
**Configuration System - CRITICAL RULES:**
|
||||
- Uses `go-simpler.org/env` for struct tags
|
||||
- All config in `app/config/config.go` with `ORLY_` prefix
|
||||
- **ALL environment variables MUST be defined in `app/config/config.go`**
|
||||
- **NEVER** use `os.Getenv()` directly in packages - always pass config via structs
|
||||
- **NEVER** parse environment variables outside of `app/config/`
|
||||
- This ensures all config options appear in `./orly help` output
|
||||
- Database backends receive config via `database.DatabaseConfig` struct
|
||||
- Use `GetDatabaseConfigValues()` helper to extract DB config from app config
|
||||
- All config fields use `ORLY_` prefix with struct tags defining defaults and usage
|
||||
- Supports XDG directories via `github.com/adrg/xdg`
|
||||
- Default data directory: `~/.local/share/ORLY`
|
||||
- Database-specific config (Neo4j, Badger) is passed via `DatabaseConfig` struct in `pkg/database/factory.go`
|
||||
|
||||
**Constants - CRITICAL RULES:**
|
||||
- **ALWAYS** define named constants for values used more than a few times
|
||||
- **ALWAYS** define named constants if multiple packages depend on the same value
|
||||
- Constants shared across packages should be in a dedicated package (e.g., `pkg/constants/`)
|
||||
- Magic numbers and strings are forbidden - use named constants with clear documentation
|
||||
- Example:
|
||||
```go
|
||||
// BAD - magic number
|
||||
if timeout > 30 {
|
||||
|
||||
// GOOD - named constant
|
||||
const DefaultTimeoutSeconds = 30
|
||||
if timeout > DefaultTimeoutSeconds {
|
||||
```
|
||||
|
||||
**Event Publishing:**
|
||||
- `pkg/protocol/publish/` manages publisher registry
|
||||
@@ -322,22 +389,120 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- External packages (e.g., `app/`) should ONLY use public API methods, never access internal fields
|
||||
- **DO NOT** change unexported fields to exported when fixing bugs - this breaks the domain boundary
|
||||
|
||||
**Binary-Optimized Tag Storage (IMPORTANT):**
|
||||
- The nostr library (`git.mleku.dev/mleku/nostr/encoders/tag`) uses binary optimization for `e` and `p` tags
|
||||
- When events are unmarshaled from JSON, 64-character hex values in e/p tags are converted to 33-byte binary format (32 bytes hash + null terminator)
|
||||
- **DO NOT** use `tag.Value()` directly for e/p tags - it returns raw bytes which may be binary, not hex
|
||||
- **ALWAYS** use these methods instead:
|
||||
- `tag.ValueHex()` - Returns hex string regardless of storage format (handles both binary and hex)
|
||||
- `tag.ValueBinary()` - Returns 32-byte binary if stored in binary format, nil otherwise
|
||||
- Example pattern for comparing pubkeys:
|
||||
```go
|
||||
// CORRECT: Use ValueHex() for hex decoding
|
||||
pt, err := hex.Dec(string(pTag.ValueHex()))
|
||||
**Binary-Optimized Tag Storage (CRITICAL - Read Carefully):**
|
||||
|
||||
// WRONG: Value() may return binary bytes, not hex
|
||||
pt, err := hex.Dec(string(pTag.Value())) // Will fail for binary-encoded tags!
|
||||
The nostr library (`git.mleku.dev/mleku/nostr/encoders/tag`) uses binary optimization for `e` and `p` tags. This is a common source of bugs when working with pubkeys and event IDs.
|
||||
|
||||
**How Binary Encoding Works:**
|
||||
- When events are unmarshaled from JSON, 64-character hex values in e/p tags are converted to 33-byte binary format (32 bytes hash + null terminator)
|
||||
- The `tag.T` field contains `[][]byte` where each element may be binary or hex depending on tag type
|
||||
- `event.E.ID`, `event.E.Pubkey`, and `event.E.Sig` are always stored as fixed-size byte arrays (`[32]byte` or `[64]byte`)
|
||||
|
||||
**NEVER Do This:**
|
||||
```go
|
||||
// WRONG: tag.T[1] may be 33-byte binary, not 64-char hex!
|
||||
pubkey := string(tag.T[1]) // Results in garbage for binary-encoded tags
|
||||
|
||||
// WRONG: Will fail for binary-encoded e/p tags
|
||||
pt, err := hex.Dec(string(pTag.Value()))
|
||||
```
|
||||
|
||||
**ALWAYS Do This:**
|
||||
```go
|
||||
// CORRECT: Use ValueHex() which handles both binary and hex formats
|
||||
pubkey := string(pTag.ValueHex()) // Always returns lowercase hex
|
||||
|
||||
// CORRECT: For decoding to bytes
|
||||
pt, err := hex.Dec(string(pTag.ValueHex()))
|
||||
|
||||
// CORRECT: For event.E fields (always binary, use hex.Enc)
|
||||
pubkeyHex := hex.Enc(ev.Pubkey[:]) // Always produces lowercase hex
|
||||
eventIDHex := hex.Enc(ev.ID[:])
|
||||
sigHex := hex.Enc(ev.Sig[:])
|
||||
```
|
||||
|
||||
**Tag Methods Reference:**
|
||||
- `tag.ValueHex()` - Returns hex string regardless of storage format (handles both binary and hex)
|
||||
- `tag.ValueBinary()` - Returns 32-byte binary if stored in binary format, nil otherwise
|
||||
- `tag.Value()` - Returns raw bytes **DANGEROUS for e/p tags** - may be binary
|
||||
|
||||
**Hex Case Sensitivity:**
|
||||
- The hex encoder (`git.mleku.dev/mleku/nostr/encoders/hex`) **always produces lowercase hex**
|
||||
- External sources may send uppercase hex (e.g., `"ABCD..."` instead of `"abcd..."`)
|
||||
- When storing pubkeys/event IDs (especially in Neo4j), **always normalize to lowercase**
|
||||
- Mixed case causes duplicate entities in graph databases
|
||||
|
||||
**Neo4j-Specific Helpers (pkg/neo4j/hex_utils.go):**
|
||||
```go
|
||||
// ExtractPTagValue handles binary encoding and normalizes to lowercase
|
||||
pubkey := ExtractPTagValue(pTag)
|
||||
|
||||
// ExtractETagValue handles binary encoding and normalizes to lowercase
|
||||
eventID := ExtractETagValue(eTag)
|
||||
|
||||
// NormalizePubkeyHex handles both binary and uppercase hex
|
||||
normalized := NormalizePubkeyHex(rawValue)
|
||||
|
||||
// IsValidHexPubkey validates 64-char hex
|
||||
if IsValidHexPubkey(pubkey) { ... }
|
||||
```
|
||||
|
||||
**Files Most Affected by These Rules:**
|
||||
- `pkg/neo4j/save-event.go` - Event storage with e/p tag handling
|
||||
- `pkg/neo4j/social-event-processor.go` - Social graph with p-tag extraction
|
||||
- `pkg/neo4j/query-events.go` - Filter queries with tag matching
|
||||
- `pkg/database/save-event.go` - Badger event storage
|
||||
- `pkg/database/filter_utils.go` - Tag normalization utilities
|
||||
- `pkg/find/parser.go` - FIND protocol parser with p-tag extraction
|
||||
|
||||
This optimization saves memory and enables faster comparisons in the database layer.
|
||||
|
||||
**Interface Design - CRITICAL RULES:**
|
||||
|
||||
**Rule 1: ALL interfaces MUST be defined in `pkg/interfaces/<name>/`**
|
||||
- Interfaces provide isolation between packages and enable dependency inversion
|
||||
- Keeping interfaces in a dedicated package prevents circular dependencies
|
||||
- Each interface package should be minimal (just the interface, no implementations)
|
||||
|
||||
**Rule 2: NEVER use type assertions with interface literals**
|
||||
- **NEVER** write `.(interface{ Method() Type })` - this is non-idiomatic and unmaintainable
|
||||
- Interface literals cannot be documented, tested for satisfaction, or reused
|
||||
- Example of WRONG approach:
|
||||
```go
|
||||
// BAD - interface literal in type assertion
|
||||
if checker, ok := obj.(interface{ Check() bool }); ok {
|
||||
checker.Check()
|
||||
}
|
||||
```
|
||||
- This optimization saves memory and enables faster comparisons in the database layer
|
||||
- Example of CORRECT approach:
|
||||
```go
|
||||
// GOOD - use defined interface from pkg/interfaces/
|
||||
import "next.orly.dev/pkg/interfaces/checker"
|
||||
|
||||
if c, ok := obj.(checker.Checker); ok {
|
||||
c.Check()
|
||||
}
|
||||
```
|
||||
|
||||
**Rule 3: Resolving Circular Dependencies**
|
||||
- If a circular dependency occurs when adding an interface, move the interface to `pkg/interfaces/`
|
||||
- The implementing type stays in its original package
|
||||
- The consuming code imports only the interface package
|
||||
- This pattern:
|
||||
```
|
||||
pkg/interfaces/foo/ <- interface definition (no dependencies)
|
||||
↑ ↑
|
||||
pkg/bar/ pkg/baz/
|
||||
(implements) (consumes via interface)
|
||||
```
|
||||
|
||||
**Existing interfaces in `pkg/interfaces/`:**
|
||||
- `acl/` - ACL and PolicyChecker interfaces
|
||||
- `neterr/` - TimeoutError interface for network errors
|
||||
- `resultiter/` - Neo4jResultIterator for database results
|
||||
- `store/` - Storage-related interfaces
|
||||
- `publisher/` - Event publishing interfaces
|
||||
- `typer/` - Type identification interface
|
||||
|
||||
## Development Workflow
|
||||
|
||||
@@ -524,3 +689,52 @@ Files modified:
|
||||
```
|
||||
3. GitHub Actions workflow builds binaries for multiple platforms
|
||||
4. Release created automatically with binaries and checksums
|
||||
|
||||
## Recent Features (v0.31.x)
|
||||
|
||||
### Directory Spider
|
||||
The directory spider (`pkg/spider/directory.go`) automatically discovers and syncs metadata from other relays:
|
||||
- Crawls kind 10002 (relay list) events to discover relays
|
||||
- Expands outward from seed pubkeys (whitelisted users) via configurable hop distance
|
||||
- Fetches essential metadata events (kinds 0, 3, 10000, 10002)
|
||||
- Self-detection prevents querying own relay
|
||||
- Enable with `ORLY_DIRECTORY_SPIDER=true`
|
||||
|
||||
### Neo4j Social Graph Backend
|
||||
The Neo4j backend (`pkg/neo4j/`) includes Web of Trust (WoT) extensions:
|
||||
- **Social Event Processor**: Handles kinds 0, 3, 1984, 10000 for social graph management
|
||||
- **NostrUser nodes**: Store profile data and trust metrics (influence, PageRank)
|
||||
- **Relationships**: FOLLOWS, MUTES, REPORTS for social graph analysis
|
||||
- **WoT Schema**: See `pkg/neo4j/WOT_SPEC.md` for full specification
|
||||
- **Schema Modifications**: See `pkg/neo4j/MODIFYING_SCHEMA.md` for how to update
|
||||
|
||||
### Policy System Enhancements
|
||||
- **Write-Only Validation**: Size, age, tag validations apply ONLY to writes
|
||||
- **Read-Only Filtering**: `read_allow`, `read_deny`, `privileged` apply ONLY to reads
|
||||
- **Scripts**: Policy scripts execute ONLY for write operations
|
||||
- **Reference Documentation**: `docs/POLICY_CONFIGURATION_REFERENCE.md` provides authoritative read vs write applicability
|
||||
- See also: `pkg/policy/README.md` for quick reference
|
||||
|
||||
### Authentication Modes
|
||||
- `ORLY_AUTH_REQUIRED=true`: Require authentication for ALL requests
|
||||
- `ORLY_AUTH_TO_WRITE=true`: Require authentication only for writes (allow anonymous reads)
|
||||
|
||||
### NIP-43 Relay Access Metadata
|
||||
Invite-based access control system:
|
||||
- `ORLY_NIP43_ENABLED=true`: Enable invite system
|
||||
- Publishes kind 8000/8001 events for member changes
|
||||
- Publishes kind 13534 membership list events
|
||||
- Configurable invite expiry via `ORLY_NIP43_INVITE_EXPIRY`
|
||||
|
||||
## Documentation Index
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| `docs/POLICY_CONFIGURATION_REFERENCE.md` | Authoritative policy config reference with read/write applicability |
|
||||
| `docs/POLICY_USAGE_GUIDE.md` | Comprehensive policy system user guide |
|
||||
| `pkg/policy/README.md` | Policy system quick reference |
|
||||
| `pkg/neo4j/README.md` | Neo4j backend overview |
|
||||
| `pkg/neo4j/WOT_SPEC.md` | Web of Trust schema specification |
|
||||
| `pkg/neo4j/MODIFYING_SCHEMA.md` | How to modify Neo4j schema |
|
||||
| `pkg/neo4j/TESTING.md` | Neo4j testing guide |
|
||||
| `readme.adoc` | Project README with feature overview |
|
||||
|
||||
@@ -1,387 +0,0 @@
|
||||
# Dgraph Database Implementation Status
|
||||
|
||||
## Overview
|
||||
|
||||
This document tracks the implementation of Dgraph as an alternative database backend for ORLY. The implementation allows switching between Badger (default) and Dgraph via the `ORLY_DB_TYPE` environment variable.
|
||||
|
||||
## Completion Status: ✅ STEP 1 COMPLETE - DGRAPH SERVER INTEGRATION + TESTS
|
||||
|
||||
**Build Status:** ✅ Successfully compiles with `CGO_ENABLED=0`
|
||||
**Binary Test:** ✅ ORLY v0.29.0 starts and runs successfully
|
||||
**Database Backend:** Uses badger by default, dgraph client integration complete
|
||||
**Dgraph Integration:** ✅ Real dgraph client connection via dgo library
|
||||
**Test Suite:** ✅ Comprehensive test suite mirroring badger tests
|
||||
|
||||
### ✅ Completed Components
|
||||
|
||||
1. **Core Infrastructure**
|
||||
- Database interface abstraction (`pkg/database/interface.go`)
|
||||
- Database factory with `ORLY_DB_TYPE` configuration
|
||||
- Dgraph package structure (`pkg/dgraph/`)
|
||||
- Schema definition for Nostr events, authors, tags, and markers
|
||||
- Lifecycle management (initialization, shutdown)
|
||||
|
||||
2. **Serial Number Generation**
|
||||
- Atomic counter using Dgraph markers (`pkg/dgraph/serial.go`)
|
||||
- Automatic initialization on startup
|
||||
- Thread-safe increment with mutex protection
|
||||
- Serial numbers assigned during SaveEvent
|
||||
|
||||
3. **Event Operations**
|
||||
- `SaveEvent`: Store events with graph relationships
|
||||
- `QueryEvents`: DQL query generation from Nostr filters
|
||||
- `QueryEventsWithOptions`: Support for delete events and versions
|
||||
- `CountEvents`: Event counting
|
||||
- `FetchEventBySerial`: Retrieve by serial number
|
||||
- `DeleteEvent`: Event deletion by ID
|
||||
- `Delete EventBySerial`: Event deletion by serial
|
||||
- `ProcessDelete`: Kind 5 deletion processing
|
||||
|
||||
4. **Metadata Storage (Marker-based)**
|
||||
- `SetMarker`/`GetMarker`/`HasMarker`/`DeleteMarker`: Key-value storage
|
||||
- Relay identity storage (using markers)
|
||||
- All metadata stored as special Marker nodes in graph
|
||||
|
||||
5. **Subscriptions & Payments**
|
||||
- `GetSubscription`/`IsSubscriptionActive`/`ExtendSubscription`
|
||||
- `RecordPayment`/`GetPaymentHistory`
|
||||
- `ExtendBlossomSubscription`/`GetBlossomStorageQuota`
|
||||
- `IsFirstTimeUser`
|
||||
- All implemented using JSON-encoded markers
|
||||
|
||||
6. **NIP-43 Invite System**
|
||||
- `AddNIP43Member`/`RemoveNIP43Member`/`IsNIP43Member`
|
||||
- `GetNIP43Membership`/`GetAllNIP43Members`
|
||||
- `StoreInviteCode`/`ValidateInviteCode`/`DeleteInviteCode`
|
||||
- All implemented using JSON-encoded markers
|
||||
|
||||
7. **Import/Export**
|
||||
- `Import`/`ImportEventsFromReader`/`ImportEventsFromStrings`
|
||||
- JSONL format support
|
||||
- Basic `Export` stub
|
||||
|
||||
8. **Configuration**
|
||||
- `ORLY_DB_TYPE` environment variable added
|
||||
- Factory pattern for database instantiation
|
||||
- main.go updated to use database.Database interface
|
||||
|
||||
9. **Compilation Fixes (Completed)**
|
||||
- ✅ All interface signatures matched to badger implementation
|
||||
- ✅ Fixed 100+ type errors in pkg/dgraph package
|
||||
- ✅ Updated app layer to use database interface instead of concrete types
|
||||
- ✅ Added type assertions for compatibility with existing managers
|
||||
- ✅ Project compiles successfully with both badger and dgraph implementations
|
||||
|
||||
10. **Dgraph Server Integration (✅ STEP 1 COMPLETE)**
|
||||
- ✅ Added dgo client library (v230.0.1)
|
||||
- ✅ Implemented gRPC connection to external dgraph instance
|
||||
- ✅ Real Query() and Mutate() methods using dgraph client
|
||||
- ✅ Schema definition and automatic application on startup
|
||||
- ✅ ORLY_DGRAPH_URL configuration (default: localhost:9080)
|
||||
- ✅ Proper connection lifecycle management
|
||||
- ✅ Badger metadata store for local key-value storage
|
||||
- ✅ Dual-storage architecture: dgraph for events, badger for metadata
|
||||
|
||||
11. **Test Suite (✅ COMPLETE)**
|
||||
- ✅ Test infrastructure (testmain_test.go, helpers_test.go)
|
||||
- ✅ Comprehensive save-event tests
|
||||
- ✅ Comprehensive query-events tests
|
||||
- ✅ Docker-compose setup for dgraph server
|
||||
- ✅ Automated test scripts (test-dgraph.sh, dgraph-start.sh)
|
||||
- ✅ Test documentation (DGRAPH_TESTING.md)
|
||||
- ✅ All tests compile successfully
|
||||
- ⏳ Tests require running dgraph server to execute
|
||||
|
||||
### ⚠️ Remaining Work (For Production Use)
|
||||
|
||||
1. **Unimplemented Methods** (Stubs - Not Critical)
|
||||
- `GetSerialsFromFilter`: Returns "not implemented" error
|
||||
- `GetSerialsByRange`: Returns "not implemented" error
|
||||
- `EventIdsBySerial`: Returns "not implemented" error
|
||||
- These are helper methods that may not be critical for basic operation
|
||||
|
||||
2. **📝 STEP 2: DQL Implementation** (Next Priority)
|
||||
- Update save-event.go to use real Mutate() calls with RDF N-Quads
|
||||
- Update query-events.go to parse actual DQL responses
|
||||
- Implement proper event JSON unmarshaling from dgraph responses
|
||||
- Add error handling for dgraph-specific errors
|
||||
- Optimize DQL queries for performance
|
||||
|
||||
3. **Schema Optimizations**
|
||||
- Current tag queries are simplified
|
||||
- Complex tag filters may need refinement
|
||||
- Consider using Dgraph facets for better tag indexing
|
||||
|
||||
4. **📝 STEP 3: Testing** (After DQL Implementation)
|
||||
- Set up local dgraph instance for testing
|
||||
- Integration testing with relay-tester
|
||||
- Performance comparison with Badger
|
||||
- Memory usage profiling
|
||||
- Test with actual dgraph server instance
|
||||
|
||||
### 📦 Dependencies Added
|
||||
|
||||
```bash
|
||||
go get github.com/dgraph-io/dgo/v230@v230.0.1
|
||||
go get google.golang.org/grpc@latest
|
||||
go get github.com/dgraph-io/badger/v4 # For metadata storage
|
||||
```
|
||||
|
||||
All dependencies have been added and `go mod tidy` completed successfully.
|
||||
|
||||
### 🔌 Dgraph Server Integration Details
|
||||
|
||||
The implementation uses a **client-server architecture**:
|
||||
|
||||
1. **Dgraph Server** (External)
|
||||
- Runs as a separate process (via docker or standalone)
|
||||
- Default gRPC endpoint: `localhost:9080`
|
||||
- Configured via `ORLY_DGRAPH_URL` environment variable
|
||||
|
||||
2. **ORLY Dgraph Client** (Integrated)
|
||||
- Uses dgo library for gRPC communication
|
||||
- Connects on startup, applies Nostr schema automatically
|
||||
- Query and Mutate methods communicate with dgraph server
|
||||
|
||||
3. **Dual Storage Architecture**
|
||||
- **Dgraph**: Event graph storage (events, authors, tags, relationships)
|
||||
- **Badger**: Metadata storage (markers, counters, relay identity)
|
||||
- This hybrid approach leverages strengths of both databases
|
||||
|
||||
## Implementation Approach
|
||||
|
||||
### Marker-Based Storage
|
||||
|
||||
For metadata that doesn't fit the graph model (subscriptions, NIP-43, identity), we use a marker-based approach:
|
||||
|
||||
1. **Markers** are special graph nodes with type "Marker"
|
||||
2. Each marker has:
|
||||
- `marker.key`: String index for lookup
|
||||
- `marker.value`: Hex-encoded or JSON-encoded data
|
||||
3. This provides key-value storage within the graph database
|
||||
|
||||
### Serial Number Management
|
||||
|
||||
Serial numbers are critical for event ordering. Implementation:
|
||||
|
||||
```go
|
||||
// Serial counter stored as a special marker
|
||||
const serialCounterKey = "serial_counter"
|
||||
|
||||
// Atomic increment with mutex protection
|
||||
func (d *D) getNextSerial() (uint64, error) {
|
||||
serialMutex.Lock()
|
||||
defer serialMutex.Unlock()
|
||||
|
||||
// Query current value, increment, save
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Event Storage
|
||||
|
||||
Events are stored as graph nodes with relationships:
|
||||
|
||||
- **Event nodes**: ID, serial, kind, created_at, content, sig, pubkey, tags
|
||||
- **Author nodes**: Pubkey with reverse edges to events
|
||||
- **Tag nodes**: Tag type and value with reverse edges
|
||||
- **Relationships**: `authored_by`, `references`, `mentions`, `tagged_with`
|
||||
|
||||
## Files Created/Modified
|
||||
|
||||
### New Files (`pkg/dgraph/`)
|
||||
- `dgraph.go`: Main implementation, initialization, schema
|
||||
- `save-event.go`: Event storage with RDF triple generation
|
||||
- `query-events.go`: Nostr filter to DQL translation
|
||||
- `fetch-event.go`: Event retrieval methods
|
||||
- `delete.go`: Event deletion
|
||||
- `markers.go`: Key-value metadata storage
|
||||
- `identity.go`: Relay identity management
|
||||
- `serial.go`: Serial number generation
|
||||
- `subscriptions.go`: Subscription/payment methods
|
||||
- `nip43.go`: NIP-43 invite system
|
||||
- `import-export.go`: Import/export operations
|
||||
- `logger.go`: Logging adapter
|
||||
- `utils.go`: Helper functions
|
||||
- `README.md`: Documentation
|
||||
|
||||
### Modified Files
|
||||
- `pkg/database/interface.go`: Database interface definition
|
||||
- `pkg/database/factory.go`: Database factory
|
||||
- `pkg/database/database.go`: Badger compile-time check
|
||||
- `app/config/config.go`: Added `ORLY_DB_TYPE` config
|
||||
- `app/server.go`: Changed to use Database interface
|
||||
- `app/main.go`: Updated to use Database interface
|
||||
- `main.go`: Added dgraph import and factory usage
|
||||
|
||||
## Usage
|
||||
|
||||
### Setting Up Dgraph Server
|
||||
|
||||
Before using dgraph mode, start a dgraph server:
|
||||
|
||||
```bash
|
||||
# Using docker (recommended)
|
||||
docker run -d -p 8080:8080 -p 9080:9080 -p 8000:8000 \
|
||||
-v ~/dgraph:/dgraph \
|
||||
dgraph/standalone:latest
|
||||
|
||||
# Or using docker-compose (see docs/dgraph-docker-compose.yml)
|
||||
docker-compose up -d dgraph
|
||||
```
|
||||
|
||||
### Environment Configuration
|
||||
|
||||
```bash
|
||||
# Use Badger (default)
|
||||
./orly
|
||||
|
||||
# Use Dgraph with default localhost connection
|
||||
export ORLY_DB_TYPE=dgraph
|
||||
./orly
|
||||
|
||||
# Use Dgraph with custom server
|
||||
export ORLY_DB_TYPE=dgraph
|
||||
export ORLY_DGRAPH_URL=remote.dgraph.server:9080
|
||||
./orly
|
||||
|
||||
# With full configuration
|
||||
export ORLY_DB_TYPE=dgraph
|
||||
export ORLY_DGRAPH_URL=localhost:9080
|
||||
export ORLY_DATA_DIR=/path/to/data
|
||||
./orly
|
||||
```
|
||||
|
||||
### Data Storage
|
||||
|
||||
#### Badger
|
||||
- Single directory with SST files
|
||||
- Typical size: 100-500MB for moderate usage
|
||||
|
||||
#### Dgraph
|
||||
- Three subdirectories:
|
||||
- `p/`: Postings (main data)
|
||||
- `w/`: Write-ahead log
|
||||
- Typical size: 500MB-2GB overhead + event data
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Memory Usage
|
||||
- **Badger**: ~100-200MB baseline
|
||||
- **Dgraph**: ~500MB-1GB baseline
|
||||
|
||||
### Query Performance
|
||||
- **Simple queries** (by ID, kind, author): Dgraph may be slower than Badger
|
||||
- **Graph traversals** (follows-of-follows): Dgraph significantly faster
|
||||
- **Full-text search**: Dgraph has built-in support
|
||||
|
||||
### Recommendations
|
||||
1. Use Badger for simple, high-performance relays
|
||||
2. Use Dgraph for relays needing complex graph queries
|
||||
3. Consider hybrid approach: Badger primary + Dgraph secondary
|
||||
|
||||
## Next Steps to Complete
|
||||
|
||||
### ✅ STEP 1: Dgraph Server Integration (COMPLETED)
|
||||
- ✅ Added dgo client library
|
||||
- ✅ Implemented gRPC connection
|
||||
- ✅ Real Query/Mutate methods
|
||||
- ✅ Schema application
|
||||
- ✅ Configuration added
|
||||
|
||||
### 📝 STEP 2: DQL Implementation (Next Priority)
|
||||
|
||||
1. **Update SaveEvent Implementation** (2-3 hours)
|
||||
- Replace RDF string building with actual Mutate() calls
|
||||
- Use dgraph's SetNquads for event insertion
|
||||
- Handle UIDs and references properly
|
||||
- Add error handling and transaction rollback
|
||||
|
||||
2. **Update QueryEvents Implementation** (2-3 hours)
|
||||
- Parse actual JSON responses from dgraph Query()
|
||||
- Implement proper event deserialization
|
||||
- Handle pagination with DQL offset/limit
|
||||
- Add query optimization for common patterns
|
||||
|
||||
3. **Implement Helper Methods** (1-2 hours)
|
||||
- FetchEventBySerial using DQL
|
||||
- GetSerialsByIds using DQL
|
||||
- CountEvents using DQL aggregation
|
||||
- DeleteEvent using dgraph mutations
|
||||
|
||||
### 📝 STEP 3: Testing (After DQL)
|
||||
|
||||
1. **Setup Dgraph Test Instance** (30 minutes)
|
||||
```bash
|
||||
# Start dgraph server
|
||||
docker run -d -p 9080:9080 dgraph/standalone:latest
|
||||
|
||||
# Test connection
|
||||
ORLY_DB_TYPE=dgraph ORLY_DGRAPH_URL=localhost:9080 ./orly
|
||||
```
|
||||
|
||||
2. **Basic Functional Testing** (1 hour)
|
||||
```bash
|
||||
# Start with dgraph
|
||||
ORLY_DB_TYPE=dgraph ./orly
|
||||
|
||||
# Test with relay-tester
|
||||
go run cmd/relay-tester/main.go -url ws://localhost:3334
|
||||
```
|
||||
|
||||
3. **Performance Testing** (2 hours)
|
||||
```bash
|
||||
# Compare query performance
|
||||
# Memory profiling
|
||||
# Load testing
|
||||
```
|
||||
|
||||
## Known Limitations
|
||||
|
||||
1. **Subscription Storage**: Uses simple JSON encoding in markers rather than proper graph nodes
|
||||
2. **Tag Queries**: Simplified implementation may not handle all complex tag filter combinations
|
||||
3. **Export**: Basic stub - needs full implementation for production use
|
||||
4. **Migrations**: Not implemented (Dgraph schema changes require manual updates)
|
||||
|
||||
## Conclusion
|
||||
|
||||
The Dgraph implementation has completed **✅ STEP 1: DGRAPH SERVER INTEGRATION** successfully.
|
||||
|
||||
### What Works Now (Step 1 Complete)
|
||||
- ✅ Full database interface implementation
|
||||
- ✅ All method signatures match badger implementation
|
||||
- ✅ Project compiles successfully with `CGO_ENABLED=0`
|
||||
- ✅ Binary runs and starts successfully
|
||||
- ✅ Real dgraph client connection via dgo library
|
||||
- ✅ gRPC communication with external dgraph server
|
||||
- ✅ Schema application on startup
|
||||
- ✅ Query() and Mutate() methods implemented
|
||||
- ✅ ORLY_DGRAPH_URL configuration
|
||||
- ✅ Dual-storage architecture (dgraph + badger metadata)
|
||||
|
||||
### Implementation Status
|
||||
- **Step 1: Dgraph Server Integration** ✅ COMPLETE
|
||||
- **Step 2: DQL Implementation** 📝 Next (save-event.go and query-events.go need updates)
|
||||
- **Step 3: Testing** 📝 After Step 2 (relay-tester, performance benchmarks)
|
||||
|
||||
### Architecture Summary
|
||||
|
||||
The implementation uses a **client-server architecture** with dual storage:
|
||||
|
||||
1. **Dgraph Client** (ORLY)
|
||||
- Connects to external dgraph via gRPC (default: localhost:9080)
|
||||
- Applies Nostr schema automatically on startup
|
||||
- Query/Mutate methods ready for DQL operations
|
||||
|
||||
2. **Dgraph Server** (External)
|
||||
- Run separately via docker or standalone binary
|
||||
- Stores event graph data (events, authors, tags, relationships)
|
||||
- Handles all graph queries and mutations
|
||||
|
||||
3. **Badger Metadata Store** (Local)
|
||||
- Stores markers, counters, relay identity
|
||||
- Provides fast key-value access for non-graph data
|
||||
- Complements dgraph for hybrid storage benefits
|
||||
|
||||
The abstraction layer is complete and the dgraph client integration is functional. Next step is implementing actual DQL query/mutation logic in save-event.go and query-events.go.
|
||||
|
||||
@@ -1,197 +0,0 @@
|
||||
# Migration to git.mleku.dev/mleku/nostr Library
|
||||
|
||||
## Overview
|
||||
|
||||
Successfully migrated the ORLY relay codebase to use the external `git.mleku.dev/mleku/nostr` library instead of maintaining duplicate protocol code internally.
|
||||
|
||||
## Migration Statistics
|
||||
|
||||
- **Files Changed**: 449
|
||||
- **Lines Added**: 624
|
||||
- **Lines Removed**: 65,132
|
||||
- **Net Reduction**: **64,508 lines of code** (~30-40% of the codebase)
|
||||
|
||||
## Packages Migrated
|
||||
|
||||
### Removed from next.orly.dev/pkg/
|
||||
|
||||
The following packages were completely removed as they now come from the nostr library:
|
||||
|
||||
#### Encoders (`pkg/encoders/`)
|
||||
- `encoders/event/` → `git.mleku.dev/mleku/nostr/encoders/event`
|
||||
- `encoders/filter/` → `git.mleku.dev/mleku/nostr/encoders/filter`
|
||||
- `encoders/tag/` → `git.mleku.dev/mleku/nostr/encoders/tag`
|
||||
- `encoders/kind/` → `git.mleku.dev/mleku/nostr/encoders/kind`
|
||||
- `encoders/timestamp/` → `git.mleku.dev/mleku/nostr/encoders/timestamp`
|
||||
- `encoders/hex/` → `git.mleku.dev/mleku/nostr/encoders/hex`
|
||||
- `encoders/text/` → `git.mleku.dev/mleku/nostr/encoders/text`
|
||||
- `encoders/ints/` → `git.mleku.dev/mleku/nostr/encoders/ints`
|
||||
- `encoders/bech32encoding/` → `git.mleku.dev/mleku/nostr/encoders/bech32encoding`
|
||||
- `encoders/reason/` → `git.mleku.dev/mleku/nostr/encoders/reason`
|
||||
- `encoders/varint/` → `git.mleku.dev/mleku/nostr/encoders/varint`
|
||||
|
||||
#### Envelopes (`pkg/encoders/envelopes/`)
|
||||
- `envelopes/eventenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/eventenvelope`
|
||||
- `envelopes/reqenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/reqenvelope`
|
||||
- `envelopes/okenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/okenvelope`
|
||||
- `envelopes/noticeenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/noticeenvelope`
|
||||
- `envelopes/eoseenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/eoseenvelope`
|
||||
- `envelopes/closedenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/closedenvelope`
|
||||
- `envelopes/closeenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/closeenvelope`
|
||||
- `envelopes/countenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/countenvelope`
|
||||
- `envelopes/authenvelope/` → `git.mleku.dev/mleku/nostr/encoders/envelopes/authenvelope`
|
||||
|
||||
#### Cryptography (`pkg/crypto/`)
|
||||
- `crypto/p8k/` → `git.mleku.dev/mleku/nostr/crypto/p8k`
|
||||
- `crypto/ec/schnorr/` → `git.mleku.dev/mleku/nostr/crypto/ec/schnorr`
|
||||
- `crypto/ec/secp256k1/` → `git.mleku.dev/mleku/nostr/crypto/ec/secp256k1`
|
||||
- `crypto/ec/bech32/` → `git.mleku.dev/mleku/nostr/crypto/ec/bech32`
|
||||
- `crypto/ec/musig2/` → `git.mleku.dev/mleku/nostr/crypto/ec/musig2`
|
||||
- `crypto/ec/base58/` → `git.mleku.dev/mleku/nostr/crypto/ec/base58`
|
||||
- `crypto/ec/ecdsa/` → `git.mleku.dev/mleku/nostr/crypto/ec/ecdsa`
|
||||
- `crypto/ec/taproot/` → `git.mleku.dev/mleku/nostr/crypto/ec/taproot`
|
||||
- `crypto/keys/` → `git.mleku.dev/mleku/nostr/crypto/keys`
|
||||
- `crypto/encryption/` → `git.mleku.dev/mleku/nostr/crypto/encryption`
|
||||
|
||||
#### Interfaces (`pkg/interfaces/`)
|
||||
- `interfaces/signer/` → `git.mleku.dev/mleku/nostr/interfaces/signer`
|
||||
- `interfaces/signer/p8k/` → `git.mleku.dev/mleku/nostr/interfaces/signer/p8k`
|
||||
- `interfaces/codec/` → `git.mleku.dev/mleku/nostr/interfaces/codec`
|
||||
|
||||
#### Protocol (`pkg/protocol/`)
|
||||
- `protocol/ws/` → `git.mleku.dev/mleku/nostr/ws` (note: moved to root level in library)
|
||||
- `protocol/auth/` → `git.mleku.dev/mleku/nostr/protocol/auth`
|
||||
- `protocol/relayinfo/` → `git.mleku.dev/mleku/nostr/relayinfo`
|
||||
- `protocol/httpauth/` → `git.mleku.dev/mleku/nostr/httpauth`
|
||||
|
||||
#### Utilities (`pkg/utils/`)
|
||||
- `utils/bufpool/` → `git.mleku.dev/mleku/nostr/utils/bufpool`
|
||||
- `utils/normalize/` → `git.mleku.dev/mleku/nostr/utils/normalize`
|
||||
- `utils/constraints/` → `git.mleku.dev/mleku/nostr/utils/constraints`
|
||||
- `utils/number/` → `git.mleku.dev/mleku/nostr/utils/number`
|
||||
- `utils/pointers/` → `git.mleku.dev/mleku/nostr/utils/pointers`
|
||||
- `utils/units/` → `git.mleku.dev/mleku/nostr/utils/units`
|
||||
- `utils/values/` → `git.mleku.dev/mleku/nostr/utils/values`
|
||||
|
||||
### Packages Kept in ORLY (Relay-Specific)
|
||||
|
||||
The following packages remain in the ORLY codebase as they are relay-specific:
|
||||
|
||||
- `pkg/database/` - Database abstraction layer (Badger, DGraph backends)
|
||||
- `pkg/acl/` - Access control systems (follows, managed, none)
|
||||
- `pkg/policy/` - Event filtering and validation policies
|
||||
- `pkg/spider/` - Event syncing from other relays
|
||||
- `pkg/sync/` - Distributed relay synchronization
|
||||
- `pkg/protocol/blossom/` - Blossom blob storage protocol implementation
|
||||
- `pkg/protocol/directory/` - Directory service
|
||||
- `pkg/protocol/nwc/` - Nostr Wallet Connect
|
||||
- `pkg/protocol/nip43/` - NIP-43 relay management
|
||||
- `pkg/protocol/publish/` - Event publisher for WebSocket subscriptions
|
||||
- `pkg/interfaces/publisher/` - Publisher interface
|
||||
- `pkg/interfaces/store/` - Storage interface
|
||||
- `pkg/interfaces/acl/` - ACL interface
|
||||
- `pkg/interfaces/typer/` - Type identification interface (not in nostr library)
|
||||
- `pkg/utils/atomic/` - Extended atomic operations
|
||||
- `pkg/utils/interrupt/` - Signal handling
|
||||
- `pkg/utils/apputil/` - Application utilities
|
||||
- `pkg/utils/qu/` - Queue utilities
|
||||
- `pkg/utils/fastequal.go` - Fast byte comparison
|
||||
- `pkg/utils/subscription.go` - Subscription utilities
|
||||
- `pkg/run/` - Run utilities
|
||||
- `pkg/version/` - Version information
|
||||
- `app/` - All relay server code
|
||||
|
||||
## Migration Process
|
||||
|
||||
### 1. Added Dependency
|
||||
```bash
|
||||
go get git.mleku.dev/mleku/nostr@latest
|
||||
```
|
||||
|
||||
### 2. Updated Imports
|
||||
Created automated migration script to update all import paths from:
|
||||
- `next.orly.dev/pkg/encoders/*` → `git.mleku.dev/mleku/nostr/encoders/*`
|
||||
- `next.orly.dev/pkg/crypto/*` → `git.mleku.dev/mleku/nostr/crypto/*`
|
||||
- etc.
|
||||
|
||||
Processed **240+ files** with encoder imports, **74 files** with crypto imports, and **9 files** with WebSocket client imports.
|
||||
|
||||
### 3. Special Cases
|
||||
- **pkg/interfaces/typer/**: Restored from git as it's not in the nostr library (relay-specific)
|
||||
- **pkg/protocol/ws/**: Mapped to root-level `ws/` in the nostr library
|
||||
- **Test helpers**: Updated to use `git.mleku.dev/mleku/nostr/encoders/event/examples`
|
||||
- **atag package**: Migrated to `git.mleku.dev/mleku/nostr/encoders/tag/atag`
|
||||
|
||||
### 4. Removed Redundant Code
|
||||
```bash
|
||||
rm -rf pkg/encoders pkg/crypto pkg/interfaces/signer pkg/interfaces/codec \
|
||||
pkg/protocol/ws pkg/protocol/auth pkg/protocol/relayinfo \
|
||||
pkg/protocol/httpauth pkg/utils/bufpool pkg/utils/normalize \
|
||||
pkg/utils/constraints pkg/utils/number pkg/utils/pointers \
|
||||
pkg/utils/units pkg/utils/values
|
||||
```
|
||||
|
||||
### 5. Fixed Dependencies
|
||||
- Ran `go mod tidy` to clean up go.mod
|
||||
- Rebuilt with `CGO_ENABLED=0 GOFLAGS=-mod=mod go build -o orly .`
|
||||
- Verified tests pass
|
||||
|
||||
## Benefits
|
||||
|
||||
### 1. Code Reduction
|
||||
- **64,508 fewer lines** of code to maintain
|
||||
- Simplified codebase focused on relay-specific functionality
|
||||
- Reduced maintenance burden
|
||||
|
||||
### 2. Code Reuse
|
||||
- Nostr protocol code can be shared across multiple projects
|
||||
- Clients and other tools can use the same library
|
||||
- Consistent implementation across the ecosystem
|
||||
|
||||
### 3. Separation of Concerns
|
||||
- Clear boundary between general Nostr protocol code (library) and relay-specific code (ORLY)
|
||||
- Easier to understand which code is protocol-level vs. application-level
|
||||
|
||||
### 4. Improved Development
|
||||
- Protocol improvements benefit all projects using the library
|
||||
- Bug fixes are centralized
|
||||
- Testing is consolidated
|
||||
|
||||
## Verification
|
||||
|
||||
### Build Status
|
||||
✅ **Build successful**: Binary builds without errors
|
||||
|
||||
### Test Status
|
||||
✅ **App tests passed**: All application-level tests pass
|
||||
⏳ **Database tests**: Run extensively (timing out due to comprehensive query tests, but functionally working)
|
||||
|
||||
### Binary Output
|
||||
```
|
||||
$ ./orly version
|
||||
ℹ️ starting ORLY v0.29.14
|
||||
✅ Successfully initialized with nostr library
|
||||
```
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. **Commit Changes**: Review and commit the migration
|
||||
2. **Update Documentation**: Update CLAUDE.md to reflect the new architecture
|
||||
3. **CI/CD**: Ensure CI pipeline works with the new dependency
|
||||
4. **Testing**: Run full test suite to verify all functionality
|
||||
|
||||
## Notes
|
||||
|
||||
- The migration maintains full compatibility with existing ORLY functionality
|
||||
- No changes to relay behavior or API
|
||||
- All relay-specific features remain intact
|
||||
- The nostr library is actively maintained at `git.mleku.dev/mleku/nostr`
|
||||
- Library version: **v1.0.2**
|
||||
|
||||
## Migration Scripts
|
||||
|
||||
Created helper scripts (can be removed after commit):
|
||||
- `migrate-imports.sh` - Original comprehensive migration script
|
||||
- `migrate-fast.sh` - Fast sed-based migration script (used)
|
||||
|
||||
These scripts can be deleted after the migration is committed.
|
||||
@@ -1,5 +1,13 @@
|
||||
// Package config provides a go-simpler.org/env configuration table and helpers
|
||||
// for working with the list of key/value lists stored in .env files.
|
||||
//
|
||||
// IMPORTANT: This file is the SINGLE SOURCE OF TRUTH for all environment variables.
|
||||
// All configuration options MUST be defined here with proper `env` struct tags.
|
||||
// Never use os.Getenv() directly in other packages - pass configuration via structs.
|
||||
// This ensures all options appear in `./orly help` output and are documented.
|
||||
//
|
||||
// For database backends, use GetDatabaseConfigValues() to extract database-specific
|
||||
// settings, then construct a database.DatabaseConfig in the caller (e.g., main.go).
|
||||
package config
|
||||
|
||||
import (
|
||||
@@ -82,11 +90,18 @@ type C struct {
|
||||
NIP43InviteExpiry time.Duration `env:"ORLY_NIP43_INVITE_EXPIRY" default:"24h" usage:"how long invite codes remain valid"`
|
||||
|
||||
// Database configuration
|
||||
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger or dgraph"`
|
||||
DgraphURL string `env:"ORLY_DGRAPH_URL" default:"localhost:9080" usage:"dgraph gRPC endpoint address (only used when ORLY_DB_TYPE=dgraph)"`
|
||||
QueryCacheSizeMB int `env:"ORLY_QUERY_CACHE_SIZE_MB" default:"512" usage:"query cache size in MB (caches database query results for faster REQ responses)"`
|
||||
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger or neo4j"`
|
||||
QueryCacheSizeMB int `env:"ORLY_QUERY_CACHE_SIZE_MB" default:"512" usage:"query cache size in MB (caches database query results for faster REQ responses)"`
|
||||
QueryCacheMaxAge string `env:"ORLY_QUERY_CACHE_MAX_AGE" default:"5m" usage:"maximum age for cached query results (e.g., 5m, 10m, 1h)"`
|
||||
|
||||
// Neo4j configuration (only used when ORLY_DB_TYPE=neo4j)
|
||||
Neo4jURI string `env:"ORLY_NEO4J_URI" default:"bolt://localhost:7687" usage:"Neo4j bolt URI (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
Neo4jUser string `env:"ORLY_NEO4J_USER" default:"neo4j" usage:"Neo4j authentication username (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
Neo4jPassword string `env:"ORLY_NEO4J_PASSWORD" default:"password" usage:"Neo4j authentication password (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
|
||||
// Advanced database tuning
|
||||
InlineEventThreshold int `env:"ORLY_INLINE_EVENT_THRESHOLD" default:"1024" usage:"size threshold in bytes for inline event storage in Badger (0 to disable, typical values: 384-1024)"`
|
||||
|
||||
// TLS configuration
|
||||
TLSDomains []string `env:"ORLY_TLS_DOMAINS" usage:"comma-separated list of domains to respond to for TLS"`
|
||||
Certs []string `env:"ORLY_CERTS" usage:"comma-separated list of paths to certificate root names (e.g., /path/to/cert will load /path/to/cert.pem and /path/to/cert.key)"`
|
||||
@@ -217,6 +232,21 @@ func ServeRequested() (requested bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// VersionRequested checks if the first command line argument is "version" and returns
|
||||
// whether the version should be printed and the program should exit.
|
||||
//
|
||||
// Return Values
|
||||
// - requested: true if the 'version' subcommand was provided, false otherwise.
|
||||
func VersionRequested() (requested bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "version", "-v", "--v", "-version", "--version":
|
||||
requested = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KV is a key/value pair.
|
||||
type KV struct{ Key, Value string }
|
||||
|
||||
@@ -348,7 +378,7 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
)
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
`Usage: %s [env|help|identity|serve]
|
||||
`Usage: %s [env|help|identity|serve|version]
|
||||
|
||||
- env: print environment variables configuring %s
|
||||
- help: print this help text
|
||||
@@ -356,6 +386,7 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
- serve: start ephemeral relay with RAM-based storage at /dev/shm/orlyserve
|
||||
listening on 0.0.0.0:10547 with 'none' ACL mode (open relay)
|
||||
useful for testing and benchmarking
|
||||
- version: print version and exit (also: -v, --v, -version, --version)
|
||||
|
||||
`,
|
||||
cfg.AppName, cfg.AppName,
|
||||
@@ -369,3 +400,28 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
PrintEnv(cfg, printer)
|
||||
fmt.Fprintln(printer)
|
||||
}
|
||||
|
||||
// GetDatabaseConfigValues returns the database configuration values as individual fields.
|
||||
// This avoids circular imports with pkg/database while allowing main.go to construct
|
||||
// a database.DatabaseConfig with the correct type.
|
||||
func (cfg *C) GetDatabaseConfigValues() (
|
||||
dataDir, logLevel string,
|
||||
blockCacheMB, indexCacheMB, queryCacheSizeMB int,
|
||||
queryCacheMaxAge time.Duration,
|
||||
inlineEventThreshold int,
|
||||
neo4jURI, neo4jUser, neo4jPassword string,
|
||||
) {
|
||||
// Parse query cache max age from string to duration
|
||||
queryCacheMaxAge = 5 * time.Minute // Default
|
||||
if cfg.QueryCacheMaxAge != "" {
|
||||
if duration, err := time.ParseDuration(cfg.QueryCacheMaxAge); err == nil {
|
||||
queryCacheMaxAge = duration
|
||||
}
|
||||
}
|
||||
|
||||
return cfg.DataDir, cfg.DBLogLevel,
|
||||
cfg.DBBlockCacheMB, cfg.DBIndexCacheMB, cfg.QueryCacheSizeMB,
|
||||
queryCacheMaxAge,
|
||||
cfg.InlineEventThreshold,
|
||||
cfg.Neo4jURI, cfg.Neo4jUser, cfg.Neo4jPassword
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"next.orly.dev/pkg/acl"
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/authenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/eventenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/noticeenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/envelopes/okenvelope"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
@@ -19,8 +21,185 @@ import (
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// validateLowercaseHexInJSON checks that all hex-encoded fields in the raw JSON are lowercase.
|
||||
// NIP-01 specifies that hex encoding must be lowercase.
|
||||
// This must be called on the raw message BEFORE unmarshaling, since unmarshal converts
|
||||
// hex strings to binary and loses case information.
|
||||
// Returns an error message if validation fails, or empty string if valid.
|
||||
func validateLowercaseHexInJSON(msg []byte) string {
|
||||
// Find and validate "id" field (64 hex chars)
|
||||
if err := validateJSONHexField(msg, `"id"`); err != "" {
|
||||
return err + " (id)"
|
||||
}
|
||||
|
||||
// Find and validate "pubkey" field (64 hex chars)
|
||||
if err := validateJSONHexField(msg, `"pubkey"`); err != "" {
|
||||
return err + " (pubkey)"
|
||||
}
|
||||
|
||||
// Find and validate "sig" field (128 hex chars)
|
||||
if err := validateJSONHexField(msg, `"sig"`); err != "" {
|
||||
return err + " (sig)"
|
||||
}
|
||||
|
||||
// Validate e and p tags in the tags array
|
||||
// Tags format: ["e", "hexvalue", ...] or ["p", "hexvalue", ...]
|
||||
if err := validateEPTagsInJSON(msg); err != "" {
|
||||
return err
|
||||
}
|
||||
|
||||
return "" // Valid
|
||||
}
|
||||
|
||||
// validateJSONHexField finds a JSON field and checks if its hex value contains uppercase.
|
||||
func validateJSONHexField(msg []byte, fieldName string) string {
|
||||
// Find the field name
|
||||
idx := bytes.Index(msg, []byte(fieldName))
|
||||
if idx == -1 {
|
||||
return "" // Field not found, skip
|
||||
}
|
||||
|
||||
// Find the colon after the field name
|
||||
colonIdx := bytes.Index(msg[idx:], []byte(":"))
|
||||
if colonIdx == -1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Find the opening quote of the value
|
||||
valueStart := idx + colonIdx + 1
|
||||
for valueStart < len(msg) && (msg[valueStart] == ' ' || msg[valueStart] == '\t' || msg[valueStart] == '\n' || msg[valueStart] == '\r') {
|
||||
valueStart++
|
||||
}
|
||||
if valueStart >= len(msg) || msg[valueStart] != '"' {
|
||||
return ""
|
||||
}
|
||||
valueStart++ // Skip the opening quote
|
||||
|
||||
// Find the closing quote
|
||||
valueEnd := valueStart
|
||||
for valueEnd < len(msg) && msg[valueEnd] != '"' {
|
||||
valueEnd++
|
||||
}
|
||||
|
||||
// Extract the hex value and check for uppercase
|
||||
hexValue := msg[valueStart:valueEnd]
|
||||
if containsUppercaseHex(hexValue) {
|
||||
return "blocked: hex fields may only be lower case, see NIP-01"
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// validateEPTagsInJSON checks e and p tags in the JSON for uppercase hex.
|
||||
func validateEPTagsInJSON(msg []byte) string {
|
||||
// Find the tags array
|
||||
tagsIdx := bytes.Index(msg, []byte(`"tags"`))
|
||||
if tagsIdx == -1 {
|
||||
return "" // No tags
|
||||
}
|
||||
|
||||
// Find the opening bracket of the tags array
|
||||
bracketIdx := bytes.Index(msg[tagsIdx:], []byte("["))
|
||||
if bracketIdx == -1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
tagsStart := tagsIdx + bracketIdx
|
||||
|
||||
// Scan through to find ["e", ...] and ["p", ...] patterns
|
||||
// This is a simplified parser that looks for specific patterns
|
||||
pos := tagsStart
|
||||
for pos < len(msg) {
|
||||
// Look for ["e" or ["p" pattern
|
||||
eTagPattern := bytes.Index(msg[pos:], []byte(`["e"`))
|
||||
pTagPattern := bytes.Index(msg[pos:], []byte(`["p"`))
|
||||
|
||||
var tagType string
|
||||
var nextIdx int
|
||||
|
||||
if eTagPattern == -1 && pTagPattern == -1 {
|
||||
break // No more e or p tags
|
||||
} else if eTagPattern == -1 {
|
||||
nextIdx = pos + pTagPattern
|
||||
tagType = "p"
|
||||
} else if pTagPattern == -1 {
|
||||
nextIdx = pos + eTagPattern
|
||||
tagType = "e"
|
||||
} else if eTagPattern < pTagPattern {
|
||||
nextIdx = pos + eTagPattern
|
||||
tagType = "e"
|
||||
} else {
|
||||
nextIdx = pos + pTagPattern
|
||||
tagType = "p"
|
||||
}
|
||||
|
||||
// Find the hex value after the tag type
|
||||
// Pattern: ["e", "hexvalue" or ["p", "hexvalue"
|
||||
commaIdx := bytes.Index(msg[nextIdx:], []byte(","))
|
||||
if commaIdx == -1 {
|
||||
pos = nextIdx + 4
|
||||
continue
|
||||
}
|
||||
|
||||
// Find the opening quote of the hex value
|
||||
valueStart := nextIdx + commaIdx + 1
|
||||
for valueStart < len(msg) && (msg[valueStart] == ' ' || msg[valueStart] == '\t' || msg[valueStart] == '"') {
|
||||
if msg[valueStart] == '"' {
|
||||
valueStart++
|
||||
break
|
||||
}
|
||||
valueStart++
|
||||
}
|
||||
|
||||
// Find the closing quote
|
||||
valueEnd := valueStart
|
||||
for valueEnd < len(msg) && msg[valueEnd] != '"' {
|
||||
valueEnd++
|
||||
}
|
||||
|
||||
// Check if this looks like a hex value (64 chars for pubkey/event ID)
|
||||
hexValue := msg[valueStart:valueEnd]
|
||||
if len(hexValue) == 64 && containsUppercaseHex(hexValue) {
|
||||
return fmt.Sprintf("blocked: hex fields may only be lower case, see NIP-01 (%s tag)", tagType)
|
||||
}
|
||||
|
||||
pos = valueEnd + 1
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// containsUppercaseHex checks if a byte slice (representing hex) contains uppercase letters A-F.
|
||||
func containsUppercaseHex(b []byte) bool {
|
||||
for _, c := range b {
|
||||
if c >= 'A' && c <= 'F' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
log.D.F("HandleEvent: START handling event: %s", msg)
|
||||
|
||||
// Validate that all hex fields are lowercase BEFORE unmarshaling
|
||||
// (unmarshal converts hex to binary and loses case information)
|
||||
if errMsg := validateLowercaseHexInJSON(msg); errMsg != "" {
|
||||
log.W.F("HandleEvent: rejecting event with uppercase hex: %s", errMsg)
|
||||
// Send NOTICE to alert client developers about the issue
|
||||
if noticeErr := noticeenvelope.NewFrom(errMsg).Write(l); noticeErr != nil {
|
||||
log.E.F("failed to send NOTICE for uppercase hex: %v", noticeErr)
|
||||
}
|
||||
// Send OK false with the error message
|
||||
if err = okenvelope.NewFrom(
|
||||
nil, false,
|
||||
reason.Blocked.F(errMsg),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode the envelope
|
||||
env := eventenvelope.NewSubmission()
|
||||
log.I.F("HandleEvent: received event message length: %d", len(msg))
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
@@ -16,11 +15,20 @@ import (
|
||||
)
|
||||
|
||||
// HandlePolicyConfigUpdate processes kind 12345 policy configuration events.
|
||||
// Only policy admins can update policy configuration.
|
||||
// Owners and policy admins can update policy configuration, with different permissions:
|
||||
//
|
||||
// OWNERS can:
|
||||
// - Modify all fields including owners and policy_admins
|
||||
// - But owners list must remain non-empty (to prevent lockout)
|
||||
//
|
||||
// POLICY ADMINS can:
|
||||
// - Extend rules (add to allow lists, add new kinds, add blacklists)
|
||||
// - CANNOT modify owners or policy_admins (protected fields)
|
||||
// - CANNOT reduce owner-granted permissions
|
||||
//
|
||||
// Process flow:
|
||||
// 1. Verify sender is policy admin (from current policy.policy_admins list)
|
||||
// 2. Parse and validate JSON FIRST (before making any changes)
|
||||
// 1. Check if sender is owner or policy admin
|
||||
// 2. Validate JSON with appropriate rules for the sender type
|
||||
// 3. Pause ALL message processing (lock mutex)
|
||||
// 4. Reload policy (pause policy engine, update, save, resume)
|
||||
// 5. Resume message processing (unlock mutex)
|
||||
@@ -30,24 +38,40 @@ import (
|
||||
func (l *Listener) HandlePolicyConfigUpdate(ev *event.E) error {
|
||||
log.I.F("received policy config update from pubkey: %s", hex.Enc(ev.Pubkey))
|
||||
|
||||
// 1. Verify sender is policy admin (from current policy.policy_admins list)
|
||||
// 1. Verify sender is owner or policy admin
|
||||
if l.policyManager == nil {
|
||||
return fmt.Errorf("policy system is not enabled")
|
||||
}
|
||||
|
||||
isOwner := l.policyManager.IsOwner(ev.Pubkey)
|
||||
isAdmin := l.policyManager.IsPolicyAdmin(ev.Pubkey)
|
||||
if !isAdmin {
|
||||
log.W.F("policy config update rejected: pubkey %s is not a policy admin", hex.Enc(ev.Pubkey))
|
||||
return fmt.Errorf("only policy administrators can update policy configuration")
|
||||
|
||||
if !isOwner && !isAdmin {
|
||||
log.W.F("policy config update rejected: pubkey %s is not an owner or policy admin", hex.Enc(ev.Pubkey))
|
||||
return fmt.Errorf("only owners and policy administrators can update policy configuration")
|
||||
}
|
||||
|
||||
log.I.F("policy admin verified: %s", hex.Enc(ev.Pubkey))
|
||||
if isOwner {
|
||||
log.I.F("owner verified: %s", hex.Enc(ev.Pubkey))
|
||||
} else {
|
||||
log.I.F("policy admin verified: %s", hex.Enc(ev.Pubkey))
|
||||
}
|
||||
|
||||
// 2. Parse and validate JSON FIRST (before making any changes)
|
||||
// 2. Parse and validate JSON with appropriate validation rules
|
||||
policyJSON := []byte(ev.Content)
|
||||
if err := l.policyManager.ValidateJSON(policyJSON); chk.E(err) {
|
||||
log.E.F("policy config update validation failed: %v", err)
|
||||
return fmt.Errorf("invalid policy configuration: %v", err)
|
||||
var validationErr error
|
||||
|
||||
if isOwner {
|
||||
// Owners can modify all fields, but owners list must be non-empty
|
||||
validationErr = l.policyManager.ValidateOwnerPolicyUpdate(policyJSON)
|
||||
} else {
|
||||
// Policy admins have restrictions: can't modify protected fields, can't reduce permissions
|
||||
validationErr = l.policyManager.ValidatePolicyAdminUpdate(policyJSON, ev.Pubkey)
|
||||
}
|
||||
|
||||
if validationErr != nil {
|
||||
log.E.F("policy config update validation failed: %v", validationErr)
|
||||
return fmt.Errorf("invalid policy configuration: %v", validationErr)
|
||||
}
|
||||
|
||||
log.I.F("policy config validation passed")
|
||||
@@ -65,12 +89,23 @@ func (l *Listener) HandlePolicyConfigUpdate(ev *event.E) error {
|
||||
|
||||
// 4. Reload policy (this will pause policy engine, update, save, and resume)
|
||||
log.I.F("applying policy configuration update")
|
||||
if err := l.policyManager.Reload(policyJSON, configPath); chk.E(err) {
|
||||
log.E.F("policy config update failed: %v", err)
|
||||
return fmt.Errorf("failed to apply policy configuration: %v", err)
|
||||
var reloadErr error
|
||||
if isOwner {
|
||||
reloadErr = l.policyManager.ReloadAsOwner(policyJSON, configPath)
|
||||
} else {
|
||||
reloadErr = l.policyManager.ReloadAsPolicyAdmin(policyJSON, configPath, ev.Pubkey)
|
||||
}
|
||||
|
||||
log.I.F("policy configuration updated successfully by admin: %s", hex.Enc(ev.Pubkey))
|
||||
if reloadErr != nil {
|
||||
log.E.F("policy config update failed: %v", reloadErr)
|
||||
return fmt.Errorf("failed to apply policy configuration: %v", reloadErr)
|
||||
}
|
||||
|
||||
if isOwner {
|
||||
log.I.F("policy configuration updated successfully by owner: %s", hex.Enc(ev.Pubkey))
|
||||
} else {
|
||||
log.I.F("policy configuration updated successfully by policy admin: %s", hex.Enc(ev.Pubkey))
|
||||
}
|
||||
|
||||
// 5. Message processing mutex will be unlocked by defer
|
||||
return nil
|
||||
|
||||
@@ -139,6 +139,7 @@ func createPolicyConfigEvent(t *testing.T, signer *p8k.Signer, policyJSON string
|
||||
}
|
||||
|
||||
// TestHandlePolicyConfigUpdate_ValidAdmin tests policy update from valid admin
|
||||
// Policy admins can extend rules but cannot modify protected fields (owners, policy_admins)
|
||||
func TestHandlePolicyConfigUpdate_ValidAdmin(t *testing.T) {
|
||||
// Create admin signer
|
||||
adminSigner := p8k.MustNew()
|
||||
@@ -150,9 +151,10 @@ func TestHandlePolicyConfigUpdate_ValidAdmin(t *testing.T) {
|
||||
listener, _, cleanup := setupPolicyTestListener(t, adminHex)
|
||||
defer cleanup()
|
||||
|
||||
// Create valid policy update event
|
||||
// Create valid policy update event that ONLY extends, doesn't modify protected fields
|
||||
// Note: policy_admins must stay the same (policy admins cannot change this field)
|
||||
newPolicyJSON := `{
|
||||
"default_policy": "deny",
|
||||
"default_policy": "allow",
|
||||
"policy_admins": ["` + adminHex + `"],
|
||||
"kind": {"whitelist": [1, 3, 7]}
|
||||
}`
|
||||
@@ -165,9 +167,10 @@ func TestHandlePolicyConfigUpdate_ValidAdmin(t *testing.T) {
|
||||
t.Errorf("Expected success but got error: %v", err)
|
||||
}
|
||||
|
||||
// Verify policy was updated
|
||||
if listener.policyManager.DefaultPolicy != "deny" {
|
||||
t.Errorf("Policy was not updated, default_policy = %q, expected 'deny'",
|
||||
// Verify policy was updated (kind whitelist was extended)
|
||||
// Note: default_policy should still be "allow" from original
|
||||
if listener.policyManager.DefaultPolicy != "allow" {
|
||||
t.Errorf("Policy was not updated correctly, default_policy = %q, expected 'allow'",
|
||||
listener.policyManager.DefaultPolicy)
|
||||
}
|
||||
}
|
||||
@@ -260,8 +263,9 @@ func TestHandlePolicyConfigUpdate_InvalidPubkey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestHandlePolicyConfigUpdate_AdminCannotRemoveSelf tests that admin can update policy
|
||||
func TestHandlePolicyConfigUpdate_AdminCanUpdateAdminList(t *testing.T) {
|
||||
// TestHandlePolicyConfigUpdate_PolicyAdminCannotModifyProtectedFields tests that policy admins
|
||||
// cannot modify the owners or policy_admins fields (these are protected, owner-only fields)
|
||||
func TestHandlePolicyConfigUpdate_PolicyAdminCannotModifyProtectedFields(t *testing.T) {
|
||||
adminSigner := p8k.MustNew()
|
||||
if err := adminSigner.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate admin keypair: %v", err)
|
||||
@@ -274,22 +278,23 @@ func TestHandlePolicyConfigUpdate_AdminCanUpdateAdminList(t *testing.T) {
|
||||
listener, _, cleanup := setupPolicyTestListener(t, adminHex)
|
||||
defer cleanup()
|
||||
|
||||
// Update policy to add second admin
|
||||
// Try to add second admin (policy_admins is a protected field)
|
||||
newPolicyJSON := `{
|
||||
"default_policy": "allow",
|
||||
"policy_admins": ["` + adminHex + `", "` + admin2Hex + `"]
|
||||
}`
|
||||
ev := createPolicyConfigEvent(t, adminSigner, newPolicyJSON)
|
||||
|
||||
// This should FAIL because policy admins cannot modify the policy_admins field
|
||||
err := listener.HandlePolicyConfigUpdate(ev)
|
||||
if err != nil {
|
||||
t.Errorf("Expected success but got error: %v", err)
|
||||
if err == nil {
|
||||
t.Error("Expected error when policy admin tries to modify policy_admins (protected field)")
|
||||
}
|
||||
|
||||
// Verify both admins are now in the list
|
||||
// Second admin should NOT be in the list since update was rejected
|
||||
admin2Bin, _ := hex.Dec(admin2Hex)
|
||||
if !listener.policyManager.IsPolicyAdmin(admin2Bin) {
|
||||
t.Error("Second admin should have been added to admin list")
|
||||
if listener.policyManager.IsPolicyAdmin(admin2Bin) {
|
||||
t.Error("Second admin should NOT have been added - policy_admins is protected")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,10 +451,11 @@ func TestMessageProcessingPauseDuringPolicyUpdate(t *testing.T) {
|
||||
|
||||
// We can't easily mock the mutex, but we can verify the policy update succeeds
|
||||
// which implies the pause/resume cycle completed
|
||||
|
||||
// Note: policy_admins must stay the same (protected field)
|
||||
newPolicyJSON := `{
|
||||
"default_policy": "deny",
|
||||
"policy_admins": ["` + adminHex + `"]
|
||||
"default_policy": "allow",
|
||||
"policy_admins": ["` + adminHex + `"],
|
||||
"kind": {"whitelist": [1, 3, 5, 7]}
|
||||
}`
|
||||
ev := createPolicyConfigEvent(t, adminSigner, newPolicyJSON)
|
||||
|
||||
@@ -462,8 +468,8 @@ func TestMessageProcessingPauseDuringPolicyUpdate(t *testing.T) {
|
||||
_ = pauseCalled
|
||||
_ = resumeCalled
|
||||
|
||||
// Verify policy was actually updated
|
||||
if listener.policyManager.DefaultPolicy != "deny" {
|
||||
// Verify policy was actually updated (kind whitelist was extended)
|
||||
if listener.policyManager.DefaultPolicy != "allow" {
|
||||
t.Error("Policy should have been updated")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
A comprehensive benchmarking system for testing and comparing the performance of multiple Nostr relay implementations, including:
|
||||
|
||||
- **next.orly.dev** (this repository) - Badger, DGraph, and Neo4j backend variants
|
||||
- **next.orly.dev** (this repository) - Badger and Neo4j backend variants
|
||||
- **Khatru** - SQLite and Badger variants
|
||||
- **Relayer** - Basic example implementation
|
||||
- **Strfry** - C++ LMDB-based relay
|
||||
@@ -94,10 +94,7 @@ ls reports/run_YYYYMMDD_HHMMSS/
|
||||
| Service | Port | Description |
|
||||
| ------------------ | ---- | ----------------------------------------- |
|
||||
| next-orly-badger | 8001 | This repository's Badger relay |
|
||||
| next-orly-dgraph | 8007 | This repository's DGraph relay |
|
||||
| next-orly-neo4j | 8008 | This repository's Neo4j relay |
|
||||
| dgraph-zero | 5080 | DGraph cluster coordinator |
|
||||
| dgraph-alpha | 9080 | DGraph data node |
|
||||
| neo4j | 7474/7687 | Neo4j graph database |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
@@ -180,7 +177,7 @@ go build -o benchmark main.go
|
||||
|
||||
## Database Backend Comparison
|
||||
|
||||
The benchmark suite includes **next.orly.dev** with three different database backends to compare architectural approaches:
|
||||
The benchmark suite includes **next.orly.dev** with two different database backends to compare architectural approaches:
|
||||
|
||||
### Badger Backend (next-orly-badger)
|
||||
- **Type**: Embedded key-value store
|
||||
@@ -192,16 +189,6 @@ The benchmark suite includes **next.orly.dev** with three different database bac
|
||||
- Simpler deployment
|
||||
- Limited to single-node scaling
|
||||
|
||||
### DGraph Backend (next-orly-dgraph)
|
||||
- **Type**: Distributed graph database
|
||||
- **Architecture**: Client-server with dgraph-zero (coordinator) and dgraph-alpha (data node)
|
||||
- **Best for**: Distributed deployments, horizontal scaling
|
||||
- **Characteristics**:
|
||||
- Network overhead from gRPC communication
|
||||
- Supports multi-node clustering
|
||||
- Built-in replication and sharding
|
||||
- More complex deployment
|
||||
|
||||
### Neo4j Backend (next-orly-neo4j)
|
||||
- **Type**: Native graph database
|
||||
- **Architecture**: Client-server with Neo4j Community Edition
|
||||
@@ -218,10 +205,10 @@ The benchmark suite includes **next.orly.dev** with three different database bac
|
||||
### Comparing the Backends
|
||||
|
||||
The benchmark results will show:
|
||||
- **Latency differences**: Embedded vs. distributed overhead, graph traversal efficiency
|
||||
- **Throughput trade-offs**: Single-process optimization vs. distributed scalability vs. graph query optimization
|
||||
- **Latency differences**: Embedded vs. client-server overhead, graph traversal efficiency
|
||||
- **Throughput trade-offs**: Single-process optimization vs. graph query optimization
|
||||
- **Resource usage**: Memory and CPU patterns for different architectures
|
||||
- **Query performance**: Graph queries (Neo4j) vs. key-value lookups (Badger) vs. distributed queries (DGraph)
|
||||
- **Query performance**: Graph queries (Neo4j) vs. key-value lookups (Badger)
|
||||
|
||||
This comparison helps determine which backend is appropriate for different deployment scenarios and workload patterns.
|
||||
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
_ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory
|
||||
)
|
||||
|
||||
// DgraphBenchmark wraps a Benchmark with dgraph-specific setup
|
||||
type DgraphBenchmark struct {
|
||||
config *BenchmarkConfig
|
||||
docker *DgraphDocker
|
||||
database database.Database
|
||||
bench *BenchmarkAdapter
|
||||
}
|
||||
|
||||
// NewDgraphBenchmark creates a new dgraph benchmark instance
|
||||
func NewDgraphBenchmark(config *BenchmarkConfig) (*DgraphBenchmark, error) {
|
||||
// Create Docker manager
|
||||
docker := NewDgraphDocker()
|
||||
|
||||
// Start dgraph containers
|
||||
ctx := context.Background()
|
||||
if err := docker.Start(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to start dgraph: %w", err)
|
||||
}
|
||||
|
||||
// Set environment variable for dgraph connection
|
||||
os.Setenv("ORLY_DGRAPH_URL", docker.GetGRPCEndpoint())
|
||||
|
||||
// Create database instance using dgraph backend
|
||||
cancel := func() {}
|
||||
db, err := database.NewDatabase(ctx, cancel, "dgraph", config.DataDir, "warn")
|
||||
if err != nil {
|
||||
docker.Stop()
|
||||
return nil, fmt.Errorf("failed to create dgraph database: %w", err)
|
||||
}
|
||||
|
||||
// Wait for database to be ready
|
||||
fmt.Println("Waiting for dgraph database to be ready...")
|
||||
select {
|
||||
case <-db.Ready():
|
||||
fmt.Println("Dgraph database is ready")
|
||||
case <-time.After(30 * time.Second):
|
||||
db.Close()
|
||||
docker.Stop()
|
||||
return nil, fmt.Errorf("dgraph database failed to become ready")
|
||||
}
|
||||
|
||||
// Create adapter to use Database interface with Benchmark
|
||||
adapter := NewBenchmarkAdapter(config, db)
|
||||
|
||||
dgraphBench := &DgraphBenchmark{
|
||||
config: config,
|
||||
docker: docker,
|
||||
database: db,
|
||||
bench: adapter,
|
||||
}
|
||||
|
||||
return dgraphBench, nil
|
||||
}
|
||||
|
||||
// Close closes the dgraph benchmark and stops Docker containers
|
||||
func (dgb *DgraphBenchmark) Close() {
|
||||
fmt.Println("Closing dgraph benchmark...")
|
||||
|
||||
if dgb.database != nil {
|
||||
dgb.database.Close()
|
||||
}
|
||||
|
||||
if dgb.docker != nil {
|
||||
if err := dgb.docker.Stop(); err != nil {
|
||||
log.Printf("Error stopping dgraph Docker: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RunSuite runs the benchmark suite on dgraph
|
||||
func (dgb *DgraphBenchmark) RunSuite() {
|
||||
fmt.Println("\n╔════════════════════════════════════════════════════════╗")
|
||||
fmt.Println("║ DGRAPH BACKEND BENCHMARK SUITE ║")
|
||||
fmt.Println("╚════════════════════════════════════════════════════════╝")
|
||||
|
||||
// Run only one round for dgraph to keep benchmark time reasonable
|
||||
fmt.Printf("\n=== Starting dgraph benchmark ===\n")
|
||||
|
||||
fmt.Printf("RunPeakThroughputTest (dgraph)..\n")
|
||||
dgb.bench.RunPeakThroughputTest()
|
||||
fmt.Println("Wiping database between tests...")
|
||||
dgb.database.Wipe()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunBurstPatternTest (dgraph)..\n")
|
||||
dgb.bench.RunBurstPatternTest()
|
||||
fmt.Println("Wiping database between tests...")
|
||||
dgb.database.Wipe()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunMixedReadWriteTest (dgraph)..\n")
|
||||
dgb.bench.RunMixedReadWriteTest()
|
||||
fmt.Println("Wiping database between tests...")
|
||||
dgb.database.Wipe()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunQueryTest (dgraph)..\n")
|
||||
dgb.bench.RunQueryTest()
|
||||
fmt.Println("Wiping database between tests...")
|
||||
dgb.database.Wipe()
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
fmt.Printf("RunConcurrentQueryStoreTest (dgraph)..\n")
|
||||
dgb.bench.RunConcurrentQueryStoreTest()
|
||||
|
||||
fmt.Printf("\n=== Dgraph benchmark completed ===\n\n")
|
||||
}
|
||||
|
||||
// GenerateReport generates the benchmark report
|
||||
func (dgb *DgraphBenchmark) GenerateReport() {
|
||||
dgb.bench.GenerateReport()
|
||||
}
|
||||
|
||||
// GenerateAsciidocReport generates asciidoc format report
|
||||
func (dgb *DgraphBenchmark) GenerateAsciidocReport() {
|
||||
dgb.bench.GenerateAsciidocReport()
|
||||
}
|
||||
@@ -1,160 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DgraphDocker manages a dgraph instance via Docker Compose
|
||||
type DgraphDocker struct {
|
||||
composeFile string
|
||||
projectName string
|
||||
running bool
|
||||
}
|
||||
|
||||
// NewDgraphDocker creates a new dgraph Docker manager
|
||||
func NewDgraphDocker() *DgraphDocker {
|
||||
// Try to find the docker-compose file in the current directory first
|
||||
composeFile := "docker-compose-dgraph.yml"
|
||||
|
||||
// If not found, try the cmd/benchmark directory (for running from project root)
|
||||
if _, err := os.Stat(composeFile); os.IsNotExist(err) {
|
||||
composeFile = filepath.Join("cmd", "benchmark", "docker-compose-dgraph.yml")
|
||||
}
|
||||
|
||||
return &DgraphDocker{
|
||||
composeFile: composeFile,
|
||||
projectName: "orly-benchmark-dgraph",
|
||||
running: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the dgraph Docker containers
|
||||
func (d *DgraphDocker) Start(ctx context.Context) error {
|
||||
fmt.Println("Starting dgraph Docker containers...")
|
||||
|
||||
// Stop any existing containers first
|
||||
d.Stop()
|
||||
|
||||
// Start containers
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"up", "-d",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to start dgraph containers: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for dgraph to be healthy...")
|
||||
|
||||
// Wait for health checks to pass
|
||||
if err := d.waitForHealthy(ctx, 60*time.Second); err != nil {
|
||||
d.Stop() // Clean up on failure
|
||||
return err
|
||||
}
|
||||
|
||||
d.running = true
|
||||
fmt.Println("Dgraph is ready!")
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForHealthy waits for dgraph to become healthy
|
||||
func (d *DgraphDocker) waitForHealthy(ctx context.Context, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
// Check if alpha is healthy by checking docker health status
|
||||
cmd := exec.CommandContext(
|
||||
ctx,
|
||||
"docker",
|
||||
"inspect",
|
||||
"--format={{.State.Health.Status}}",
|
||||
"orly-benchmark-dgraph-alpha",
|
||||
)
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err == nil && string(output) == "healthy\n" {
|
||||
// Additional short wait to ensure full readiness
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(2 * time.Second):
|
||||
// Continue waiting
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("dgraph failed to become healthy within %v", timeout)
|
||||
}
|
||||
|
||||
// Stop stops and removes the dgraph Docker containers
|
||||
func (d *DgraphDocker) Stop() error {
|
||||
if !d.running {
|
||||
// Try to stop anyway in case of untracked state
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"down", "-v",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
_ = cmd.Run() // Ignore errors
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Println("Stopping dgraph Docker containers...")
|
||||
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"down", "-v",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to stop dgraph containers: %w", err)
|
||||
}
|
||||
|
||||
d.running = false
|
||||
fmt.Println("Dgraph containers stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGRPCEndpoint returns the dgraph gRPC endpoint
|
||||
func (d *DgraphDocker) GetGRPCEndpoint() string {
|
||||
return "localhost:9080"
|
||||
}
|
||||
|
||||
// IsRunning returns whether dgraph is running
|
||||
func (d *DgraphDocker) IsRunning() bool {
|
||||
return d.running
|
||||
}
|
||||
|
||||
// Logs returns the logs from dgraph containers
|
||||
func (d *DgraphDocker) Logs() error {
|
||||
cmd := exec.Command(
|
||||
"docker-compose",
|
||||
"-f", d.composeFile,
|
||||
"-p", d.projectName,
|
||||
"logs",
|
||||
)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
dgraph-zero:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: orly-benchmark-dgraph-zero
|
||||
working_dir: /data/zero
|
||||
ports:
|
||||
- "5080:5080"
|
||||
- "6080:6080"
|
||||
command: dgraph zero --my=dgraph-zero:5080
|
||||
networks:
|
||||
- orly-benchmark
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
|
||||
dgraph-alpha:
|
||||
image: dgraph/dgraph:v23.1.0
|
||||
container_name: orly-benchmark-dgraph-alpha
|
||||
working_dir: /data/alpha
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "9080:9080"
|
||||
command: dgraph alpha --my=dgraph-alpha:7080 --zero=dgraph-zero:5080 --security whitelist=0.0.0.0/0
|
||||
networks:
|
||||
- orly-benchmark
|
||||
depends_on:
|
||||
dgraph-zero:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "dgraph version || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 3s
|
||||
retries: 6
|
||||
start_period: 10s
|
||||
|
||||
networks:
|
||||
orly-benchmark:
|
||||
name: orly-benchmark-network
|
||||
driver: bridge
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"next.orly.dev/pkg/interfaces/neterr"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -90,7 +91,7 @@ func main() {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if netErr, ok := err.(interface{ Timeout() bool }); ok && netErr.Timeout() {
|
||||
if netErr, ok := err.(neterr.TimeoutError); ok && netErr.Timeout() {
|
||||
continue
|
||||
}
|
||||
log.Printf("Read error: %v", err)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"next.orly.dev/pkg/interfaces/neterr"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -123,7 +124,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Check for timeout errors (these are expected during idle periods)
|
||||
if netErr, ok := err.(interface{ Timeout() bool }); ok && netErr.Timeout() {
|
||||
if netErr, ok := err.(neterr.TimeoutError); ok && netErr.Timeout() {
|
||||
consecutiveTimeouts++
|
||||
if consecutiveTimeouts >= maxConsecutiveTimeouts {
|
||||
log.Printf("Too many consecutive read timeouts (%d), connection may be dead", consecutiveTimeouts)
|
||||
|
||||
@@ -177,6 +177,10 @@ LIMIT $limit
|
||||
|
||||
## Configuration
|
||||
|
||||
All configuration is centralized in `app/config/config.go` and visible via `./orly help`.
|
||||
|
||||
> **Important:** All environment variables must be defined in `app/config/config.go`. Do not use `os.Getenv()` directly in package code. Database backends receive configuration via the `database.DatabaseConfig` struct.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
|
||||
615
docs/POLICY_CONFIGURATION_REFERENCE.md
Normal file
615
docs/POLICY_CONFIGURATION_REFERENCE.md
Normal file
@@ -0,0 +1,615 @@
|
||||
# ORLY Policy Configuration Reference
|
||||
|
||||
This document provides a definitive reference for all policy configuration options and when each rule applies. Use this as the authoritative source for understanding policy behavior.
|
||||
|
||||
## Quick Reference: Read vs Write Applicability
|
||||
|
||||
| Rule Field | Write (EVENT) | Read (REQ) | Notes |
|
||||
|------------|:-------------:|:----------:|-------|
|
||||
| `size_limit` | ✅ | ❌ | Validates incoming events only |
|
||||
| `content_limit` | ✅ | ❌ | Validates incoming events only |
|
||||
| `max_age_of_event` | ✅ | ❌ | Prevents replay attacks |
|
||||
| `max_age_event_in_future` | ✅ | ❌ | Prevents future-dated events |
|
||||
| `max_expiry_duration` | ✅ | ❌ | Requires expiration tag |
|
||||
| `must_have_tags` | ✅ | ❌ | Validates required tags |
|
||||
| `protected_required` | ✅ | ❌ | Requires NIP-70 "-" tag |
|
||||
| `identifier_regex` | ✅ | ❌ | Validates "d" tag format |
|
||||
| `tag_validation` | ✅ | ❌ | Validates tag values with regex |
|
||||
| `write_allow` | ✅ | ❌ | Pubkey whitelist for writing |
|
||||
| `write_deny` | ✅ | ❌ | Pubkey blacklist for writing |
|
||||
| `read_allow` | ❌ | ✅ | Pubkey whitelist for reading |
|
||||
| `read_deny` | ❌ | ✅ | Pubkey blacklist for reading |
|
||||
| `privileged` | ❌ | ✅ | Party-involved access control |
|
||||
| `write_allow_follows` | ✅ | ✅ | Grants **both** read AND write |
|
||||
| `follows_whitelist_admins` | ✅ | ✅ | Grants **both** read AND write |
|
||||
| `script` | ✅ | ❌ | Scripts only run for writes |
|
||||
|
||||
---
|
||||
|
||||
## Core Principle: Validation vs Filtering
|
||||
|
||||
The policy system has two distinct modes of operation:
|
||||
|
||||
### Write Operations (EVENT messages)
|
||||
- **Purpose**: Validate and accept/reject incoming events
|
||||
- **All rules apply** except `read_allow`, `read_deny`, and `privileged`
|
||||
- Events are checked **before storage**
|
||||
- Rejected events are never stored
|
||||
|
||||
### Read Operations (REQ messages)
|
||||
- **Purpose**: Filter which stored events a user can retrieve
|
||||
- **Only access control rules apply**: `read_allow`, `read_deny`, `privileged`, `write_allow_follows`, `follows_whitelist_admins`
|
||||
- Validation rules (size, age, tags) do NOT apply
|
||||
- Scripts are NOT executed for reads
|
||||
- Filtering happens **after database query**
|
||||
|
||||
---
|
||||
|
||||
## Configuration Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow|deny",
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7],
|
||||
"blacklist": [4, 42]
|
||||
},
|
||||
"owners": ["hex_pubkey_64_chars"],
|
||||
"policy_admins": ["hex_pubkey_64_chars"],
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"global": { /* Rule object */ },
|
||||
"rules": {
|
||||
"1": { /* Rule object for kind 1 */ },
|
||||
"30023": { /* Rule object for kind 30023 */ }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Top-Level Configuration Fields
|
||||
|
||||
### `default_policy`
|
||||
**Type**: `string`
|
||||
**Values**: `"allow"` (default) or `"deny"`
|
||||
**Applies to**: Both read and write
|
||||
|
||||
The fallback behavior when no specific rule makes a decision.
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny"
|
||||
}
|
||||
```
|
||||
|
||||
### `kind.whitelist` and `kind.blacklist`
|
||||
**Type**: `[]int`
|
||||
**Applies to**: Both read and write
|
||||
|
||||
Controls which event kinds are processed at all.
|
||||
|
||||
- **Whitelist** takes precedence: If present, ONLY whitelisted kinds are allowed
|
||||
- **Blacklist**: If no whitelist, these kinds are denied
|
||||
- **Neither**: Behavior depends on `default_policy` and whether rules exist
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": {
|
||||
"whitelist": [0, 1, 3, 7, 30023]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `owners`
|
||||
**Type**: `[]string` (64-character hex pubkeys)
|
||||
**Applies to**: Policy administration
|
||||
|
||||
Relay owners with full control. Merged with `ORLY_OWNERS` environment variable.
|
||||
|
||||
```json
|
||||
{
|
||||
"owners": ["4a93c5ac0c6f49d2c7e7a5b8d9e0f1a2b3c4d5e6f7a8b9c0d1e2f3a4b5c6d7e8"]
|
||||
}
|
||||
```
|
||||
|
||||
### `policy_admins`
|
||||
**Type**: `[]string` (64-character hex pubkeys)
|
||||
**Applies to**: Policy administration
|
||||
|
||||
Pubkeys that can update policy via kind 12345 events (with restrictions).
|
||||
|
||||
### `policy_follow_whitelist_enabled`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Both read and write (when `write_allow_follows` is true)
|
||||
|
||||
When enabled, allows `write_allow_follows` rules to grant access to policy admin follows.
|
||||
|
||||
---
|
||||
|
||||
## Rule Object Fields
|
||||
|
||||
Rules can be defined in `global` (applies to all events) or `rules[kind]` (applies to specific kind).
|
||||
|
||||
### Access Control Fields
|
||||
|
||||
#### `write_allow`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Write only
|
||||
**Behavior**: Exclusive whitelist
|
||||
|
||||
When present with entries, ONLY these pubkeys can write events of this kind. All others are denied.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_allow": ["pubkey1_hex", "pubkey2_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Special case**: Empty array `[]` explicitly allows all writers.
|
||||
|
||||
#### `write_deny`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Write only
|
||||
**Behavior**: Blacklist (highest priority)
|
||||
|
||||
These pubkeys cannot write events of this kind. **Checked before allow lists.**
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_deny": ["banned_pubkey_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `read_allow`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Read only
|
||||
**Behavior**: Exclusive whitelist (with OR logic for privileged)
|
||||
|
||||
When present with entries:
|
||||
- If `privileged: false`: ONLY these pubkeys can read
|
||||
- If `privileged: true`: These pubkeys OR parties involved can read
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"read_allow": ["trusted_pubkey_hex"],
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `read_deny`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Read only
|
||||
**Behavior**: Blacklist (highest priority)
|
||||
|
||||
These pubkeys cannot read events of this kind. **Checked before allow lists.**
|
||||
|
||||
#### `privileged`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Read only
|
||||
|
||||
When `true`, events are only readable by "parties involved":
|
||||
- The event author (`event.pubkey`)
|
||||
- Users mentioned in `p` tags
|
||||
|
||||
**Interaction with `read_allow`**:
|
||||
- `read_allow` present + `privileged: true` = OR logic (in list OR party involved)
|
||||
- `read_allow` empty + `privileged: true` = Only parties involved
|
||||
- `privileged: true` alone = Only parties involved
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"description": "DMs - only sender and recipient can read",
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `write_allow_follows`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Both read AND write
|
||||
**Requires**: `policy_follow_whitelist_enabled: true` at top level
|
||||
|
||||
Grants **both read and write access** to pubkeys followed by policy admins.
|
||||
|
||||
> **Important**: Despite the name, this grants BOTH read and write access.
|
||||
|
||||
```json
|
||||
{
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_allow_follows": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `follows_whitelist_admins`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Both read AND write
|
||||
|
||||
Alternative to `write_allow_follows` that specifies which admin pubkeys' follows are whitelisted for this specific rule.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"follows_whitelist_admins": ["curator_pubkey_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Validation Fields (Write-Only)
|
||||
|
||||
These fields validate incoming events and are **completely ignored for read operations**.
|
||||
|
||||
#### `size_limit`
|
||||
**Type**: `int64` (bytes)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum total serialized event size.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"size_limit": 100000
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `content_limit`
|
||||
**Type**: `int64` (bytes)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum size of the `content` field.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"content_limit": 10000
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_age_of_event`
|
||||
**Type**: `int64` (seconds)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum age of events. Events with `created_at` older than `now - max_age_of_event` are rejected.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"max_age_of_event": 86400
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_age_event_in_future`
|
||||
**Type**: `int64` (seconds)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum time events can be dated in the future. Events with `created_at` later than `now + max_age_event_in_future` are rejected.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"max_age_event_in_future": 300
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_expiry_duration`
|
||||
**Type**: `string` (ISO-8601 duration)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum allowed expiry time from event creation. Events **must** have an `expiration` tag when this is set.
|
||||
|
||||
**Format**: `P[n]Y[n]M[n]W[n]DT[n]H[n]M[n]S`
|
||||
|
||||
**Examples**:
|
||||
- `P7D` = 7 days
|
||||
- `PT1H` = 1 hour
|
||||
- `P1DT12H` = 1 day 12 hours
|
||||
- `PT30M` = 30 minutes
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"20": {
|
||||
"description": "Ephemeral events must expire within 24 hours",
|
||||
"max_expiry_duration": "P1D"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `must_have_tags`
|
||||
**Type**: `[]string` (tag names)
|
||||
**Applies to**: Write only
|
||||
|
||||
Required tags that must be present on the event.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"must_have_tags": ["p", "e"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `protected_required`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Write only
|
||||
|
||||
Requires events to have a `-` tag (NIP-70 protected events).
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"protected_required": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `identifier_regex`
|
||||
**Type**: `string` (regex pattern)
|
||||
**Applies to**: Write only
|
||||
|
||||
Regex pattern that `d` tag values must match. Events **must** have a `d` tag when this is set.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"identifier_regex": "^[a-z0-9-]{1,64}$"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `tag_validation`
|
||||
**Type**: `map[string]string` (tag name → regex pattern)
|
||||
**Applies to**: Write only
|
||||
|
||||
Regex patterns for validating specific tag values. Only validates tags that are **present** on the event.
|
||||
|
||||
> **Note**: To require a tag to exist, use `must_have_tags`. `tag_validation` only validates format.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"tag_validation": {
|
||||
"t": "^[a-z0-9-]{1,32}$",
|
||||
"d": "^[a-z0-9-]+$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Script Configuration
|
||||
|
||||
#### `script`
|
||||
**Type**: `string` (file path)
|
||||
**Applies to**: Write only
|
||||
|
||||
Path to a custom validation script. **Scripts are NOT executed for read operations.**
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"script": "/etc/orly/scripts/spam-filter.py"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Policy Evaluation Order
|
||||
|
||||
### For Write Operations
|
||||
|
||||
```
|
||||
1. Global Rule Check (all fields apply)
|
||||
├─ Universal constraints (size, tags, age, etc.)
|
||||
├─ write_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
└─ write_allow check
|
||||
|
||||
2. Kind Filtering (whitelist/blacklist)
|
||||
|
||||
3. Kind-Specific Rule Check (same as global)
|
||||
├─ Universal constraints
|
||||
├─ write_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ write_allow check
|
||||
└─ Script execution (if configured)
|
||||
|
||||
4. Default Policy (if no rules matched)
|
||||
```
|
||||
|
||||
### For Read Operations
|
||||
|
||||
```
|
||||
1. Global Rule Check (access control only)
|
||||
├─ read_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ read_allow check
|
||||
└─ privileged check (party involved)
|
||||
|
||||
2. Kind Filtering (whitelist/blacklist)
|
||||
|
||||
3. Kind-Specific Rule Check (access control only)
|
||||
├─ read_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ read_allow + privileged (OR logic)
|
||||
└─ privileged-only check
|
||||
|
||||
4. Default Policy (if no rules matched)
|
||||
|
||||
NOTE: Scripts are NOT executed for read operations
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Configuration Patterns
|
||||
|
||||
### Private Relay (Whitelist Only)
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny",
|
||||
"global": {
|
||||
"write_allow": ["trusted_pubkey_1", "trusted_pubkey_2"],
|
||||
"read_allow": ["trusted_pubkey_1", "trusted_pubkey_2"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Open Relay with Spam Protection
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow",
|
||||
"global": {
|
||||
"size_limit": 100000,
|
||||
"max_age_of_event": 86400,
|
||||
"max_age_event_in_future": 300
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"script": "/etc/orly/scripts/spam-filter.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Community Relay (Follows-Based)
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny",
|
||||
"policy_admins": ["community_admin_pubkey"],
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"global": {
|
||||
"write_allow_follows": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Encrypted DMs (Privileged Access)
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"description": "Encrypted DMs - only sender/recipient",
|
||||
"privileged": true,
|
||||
"protected_required": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Long-Form Content with Validation
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"description": "Long-form articles",
|
||||
"size_limit": 100000,
|
||||
"content_limit": 50000,
|
||||
"max_expiry_duration": "P30D",
|
||||
"identifier_regex": "^[a-z0-9-]{1,64}$",
|
||||
"tag_validation": {
|
||||
"t": "^[a-z0-9-]{1,32}$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Important Behaviors
|
||||
|
||||
### Whitelist vs Blacklist Precedence
|
||||
|
||||
1. **Deny lists** (`write_deny`, `read_deny`) are checked **first** and have highest priority
|
||||
2. **Allow lists** are exclusive when populated - ONLY listed pubkeys are allowed
|
||||
3. **Deny-only configuration**: If only deny list exists (no allow list), all non-denied pubkeys are allowed
|
||||
|
||||
### Empty Arrays vs Null
|
||||
|
||||
- `[]` (empty array explicitly set) = Allow all
|
||||
- `null` or field omitted = No list configured, use other rules
|
||||
|
||||
### Global Rules Are Additive
|
||||
|
||||
Global rules are always evaluated **in addition to** kind-specific rules. They cannot be overridden at the kind level.
|
||||
|
||||
### Implicit Kind Whitelist
|
||||
|
||||
When rules are defined but no explicit `kind.whitelist`:
|
||||
- If `default_policy: "allow"`: All kinds allowed
|
||||
- If `default_policy: "deny"` or unset: Only kinds with rules allowed
|
||||
|
||||
---
|
||||
|
||||
## Debugging Policy Issues
|
||||
|
||||
Enable debug logging to see policy decisions:
|
||||
|
||||
```bash
|
||||
export ORLY_LOG_LEVEL=debug
|
||||
```
|
||||
|
||||
Log messages include:
|
||||
- Policy evaluation steps
|
||||
- Rule matching
|
||||
- Access decisions with reasons
|
||||
|
||||
---
|
||||
|
||||
## Source Code Reference
|
||||
|
||||
- Policy struct definition: `pkg/policy/policy.go:75-144` (Rule struct)
|
||||
- Policy struct definition: `pkg/policy/policy.go:380-412` (P struct)
|
||||
- Check evaluation: `pkg/policy/policy.go:1260-1595` (checkRulePolicy)
|
||||
- Write handler: `app/handle-event.go:114-138`
|
||||
- Read handler: `app/handle-req.go:420-438`
|
||||
@@ -1111,20 +1111,62 @@ Check logs for policy decisions and errors.
|
||||
|
||||
## Dynamic Policy Configuration via Kind 12345
|
||||
|
||||
Policy administrators can update the relay policy dynamically by publishing kind 12345 events. This enables runtime policy changes without relay restarts.
|
||||
Both **owners** and **policy admins** can update the relay policy dynamically by publishing kind 12345 events. This enables runtime policy changes without relay restarts, with different permission levels for each role.
|
||||
|
||||
### Role Hierarchy and Permissions
|
||||
|
||||
ORLY uses a layered permission model for policy updates:
|
||||
|
||||
| Role | Source | Can Modify | Restrictions |
|
||||
|------|--------|------------|--------------|
|
||||
| **Owner** | `ORLY_OWNERS` env or `owners` in policy.json | All fields | Owners list must remain non-empty |
|
||||
| **Policy Admin** | `policy_admins` in policy.json | Extend rules, add blacklists | Cannot modify `owners` or `policy_admins`, cannot reduce permissions |
|
||||
|
||||
### Composition Rules
|
||||
|
||||
Policy updates from owners and policy admins compose as follows:
|
||||
|
||||
1. **Owner policy is the base** - Defines minimum permissions and protected fields
|
||||
2. **Policy admins can extend** - Add to allow lists, add new kinds, add blacklists
|
||||
3. **Blacklists override whitelists** - Policy admins can ban users that owners allowed
|
||||
4. **Protected fields are immutable** - Only owners can modify `owners` and `policy_admins`
|
||||
|
||||
#### What Policy Admins CAN Do:
|
||||
|
||||
- ✅ Add pubkeys to `write_allow` and `read_allow` lists
|
||||
- ✅ Add entries to `write_deny` and `read_deny` lists to blacklist malicious users
|
||||
- ✅ Blacklist any non-admin user, even if whitelisted by owners or other admins
|
||||
- ✅ Add kinds to `kind.whitelist` and `kind.blacklist`
|
||||
- ✅ Increase size limits (`size_limit`, `content_limit`, etc.)
|
||||
- ✅ Add rules for new kinds not defined by owners
|
||||
- ✅ Enable `write_allow_follows` for additional rules
|
||||
|
||||
#### What Policy Admins CANNOT Do:
|
||||
|
||||
- ❌ Modify the `owners` field
|
||||
- ❌ Modify the `policy_admins` field
|
||||
- ❌ Blacklist owners or other policy admins (protected users)
|
||||
- ❌ Remove pubkeys from allow lists
|
||||
- ❌ Remove kinds from whitelist
|
||||
- ❌ Reduce size limits
|
||||
- ❌ Remove rules defined by owners
|
||||
- ❌ Add new required tags (restrictions)
|
||||
|
||||
### Enabling Dynamic Policy Updates
|
||||
|
||||
1. Add yourself as a policy admin in the initial policy.json:
|
||||
1. Set yourself as both owner and policy admin in the initial policy.json:
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow",
|
||||
"policy_admins": ["YOUR_HEX_PUBKEY_HERE"],
|
||||
"owners": ["YOUR_HEX_PUBKEY_HERE"],
|
||||
"policy_admins": ["ADMIN_HEX_PUBKEY_HERE"],
|
||||
"policy_follow_whitelist_enabled": false
|
||||
}
|
||||
```
|
||||
|
||||
**Important:** The `owners` list must contain at least one pubkey to prevent lockout.
|
||||
|
||||
2. Ensure policy is enabled:
|
||||
|
||||
```bash
|
||||
@@ -1135,15 +1177,28 @@ export ORLY_POLICY_ENABLED=true
|
||||
|
||||
Send a kind 12345 event with the new policy configuration as JSON content:
|
||||
|
||||
**As Owner (full control):**
|
||||
```json
|
||||
{
|
||||
"kind": 12345,
|
||||
"content": "{\"default_policy\": \"deny\", \"kind\": {\"whitelist\": [1,3,7]}, \"policy_admins\": [\"YOUR_HEX_PUBKEY\"]}",
|
||||
"content": "{\"default_policy\": \"deny\", \"owners\": [\"OWNER_HEX\"], \"policy_admins\": [\"ADMIN_HEX\"], \"kind\": {\"whitelist\": [1,3,7]}}",
|
||||
"tags": [],
|
||||
"created_at": 1234567890
|
||||
}
|
||||
```
|
||||
|
||||
**As Policy Admin (extensions only):**
|
||||
```json
|
||||
{
|
||||
"kind": 12345,
|
||||
"content": "{\"default_policy\": \"deny\", \"owners\": [\"OWNER_HEX\"], \"policy_admins\": [\"ADMIN_HEX\"], \"kind\": {\"whitelist\": [1,3,7,30023], \"blacklist\": [4]}, \"rules\": {\"1\": {\"write_deny\": [\"BAD_ACTOR_HEX\"]}}}",
|
||||
"tags": [],
|
||||
"created_at": 1234567890
|
||||
}
|
||||
```
|
||||
|
||||
Note: Policy admins must include the original `owners` and `policy_admins` values unchanged.
|
||||
|
||||
### Policy Admin Follow List Whitelisting
|
||||
|
||||
When `policy_follow_whitelist_enabled` is `true`, the relay automatically grants access to all pubkeys followed by policy admins.
|
||||
@@ -1161,10 +1216,27 @@ When `policy_follow_whitelist_enabled` is `true`, the relay automatically grants
|
||||
|
||||
### Security Considerations
|
||||
|
||||
- Only pubkeys listed in `policy_admins` can update the policy
|
||||
- Policy updates are validated before applying (invalid JSON or pubkeys are rejected)
|
||||
- Failed updates preserve the existing policy (no corruption)
|
||||
- All policy updates are logged for audit purposes
|
||||
- **Privilege separation**: Only owners can add/remove owners and policy admins
|
||||
- **Non-empty owners**: At least one owner must always exist to prevent lockout
|
||||
- **Protected fields**: Policy admins cannot escalate their privileges by modifying `owners`
|
||||
- **Blacklist override**: Policy admins can block bad actors even if owners allowed them
|
||||
- **Validation first**: Policy updates are validated before applying (invalid updates are rejected)
|
||||
- **Atomic updates**: Failed updates preserve the existing policy (no corruption)
|
||||
- **Audit logging**: All policy updates are logged with the submitter's pubkey
|
||||
|
||||
### Error Messages
|
||||
|
||||
Common validation errors:
|
||||
|
||||
| Error | Cause |
|
||||
|-------|-------|
|
||||
| `owners list cannot be empty` | Owner tried to remove all owners |
|
||||
| `cannot modify the 'owners' field` | Policy admin tried to change owners |
|
||||
| `cannot modify the 'policy_admins' field` | Policy admin tried to change admins |
|
||||
| `cannot remove kind X from whitelist` | Policy admin tried to reduce permissions |
|
||||
| `cannot reduce size_limit for kind X` | Policy admin tried to make limits stricter |
|
||||
| `cannot blacklist owner X` | Policy admin tried to blacklist an owner |
|
||||
| `cannot blacklist policy admin X` | Policy admin tried to blacklist another admin |
|
||||
|
||||
## Testing the Policy System
|
||||
|
||||
|
||||
417
docs/WASM_MOBILE_BUILD_PLAN.md
Normal file
417
docs/WASM_MOBILE_BUILD_PLAN.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Plan: Enable js/wasm, iOS, and Android Builds
|
||||
|
||||
This document outlines the work required to enable ORLY and the nostr library to build successfully for WebAssembly (js/wasm), iOS (ios/arm64), and Android (android/arm64).
|
||||
|
||||
## Current Build Status
|
||||
|
||||
| Platform | Status | Notes |
|
||||
|----------|--------|-------|
|
||||
| linux/amd64 | ✅ Works | Uses libsecp256k1 via purego |
|
||||
| darwin/arm64 | ✅ Works | Uses pure Go p256k1 |
|
||||
| darwin/amd64 | ✅ Works | Uses pure Go p256k1 |
|
||||
| windows/amd64 | ✅ Works | Uses pure Go p256k1 |
|
||||
| android/arm64 | ✅ Works | Uses pure Go p256k1 |
|
||||
| js/wasm | ❌ Fails | Missing platform stubs (planned for hackpadfs work) |
|
||||
| ios/arm64 | ⚠️ Requires gomobile | See iOS section below |
|
||||
|
||||
---
|
||||
|
||||
## Issue 1: js/wasm Build Failures
|
||||
|
||||
### Problem
|
||||
Two packages fail to compile for js/wasm due to missing platform-specific implementations:
|
||||
|
||||
1. **`next.orly.dev/pkg/utils/interrupt`** - Missing `Restart()` function
|
||||
2. **`git.mleku.dev/mleku/nostr/ws`** - Missing `getConnectionOptions()` function
|
||||
|
||||
### Root Cause Analysis
|
||||
|
||||
#### 1.1 interrupt package
|
||||
The `Restart()` function is defined with build tags:
|
||||
- `restart.go` → `//go:build linux`
|
||||
- `restart_darwin.go` → `//go:build darwin`
|
||||
- `restart_windows.go` → `//go:build windows`
|
||||
|
||||
But `main.go` calls `Restart()` unconditionally on line 66, causing undefined symbol on js/wasm.
|
||||
|
||||
#### 1.2 ws package
|
||||
The `getConnectionOptions()` function is defined in `connection_options.go` with:
|
||||
```go
|
||||
//go:build !js
|
||||
```
|
||||
|
||||
This correctly excludes js/wasm, but no alternative implementation exists for js/wasm, so `connection.go` line 28 fails.
|
||||
|
||||
### Solution
|
||||
|
||||
#### 1.1 Fix interrupt package (ORLY)
|
||||
|
||||
Create a new file `restart_other.go`:
|
||||
|
||||
```go
|
||||
//go:build !linux && !darwin && !windows
|
||||
|
||||
package interrupt
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Restart is not supported on this platform - just exit
|
||||
func Restart() {
|
||||
log.W.Ln("restart not supported on this platform, exiting")
|
||||
os.Exit(0)
|
||||
}
|
||||
```
|
||||
|
||||
#### 1.2 Fix ws package (nostr library)
|
||||
|
||||
Create a new file `connection_options_js.go`:
|
||||
|
||||
```go
|
||||
//go:build js
|
||||
|
||||
package ws
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// getConnectionOptions returns nil on js/wasm as we use browser WebSocket API
|
||||
func getConnectionOptions(
|
||||
requestHeader http.Header, tlsConfig *tls.Config,
|
||||
) *websocket.Dialer {
|
||||
// On js/wasm, gorilla/websocket doesn't work - need to use browser APIs
|
||||
// This is a stub that allows compilation; actual WebSocket usage would
|
||||
// need a js/wasm-compatible implementation
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
**However**, this alone won't make WebSocket work - the entire `ws` package uses `gorilla/websocket` which doesn't support js/wasm. A proper fix requires:
|
||||
|
||||
Option A: Use conditional compilation to swap in a js/wasm WebSocket implementation (e.g., `nhooyr.io/websocket` which supports js/wasm)
|
||||
|
||||
Option B: Make the `ws` package optional with build tags so js/wasm builds exclude it entirely
|
||||
|
||||
**Recommended**: Option B - exclude the ws client package on js/wasm since ORLY is a server, not a client.
|
||||
|
||||
---
|
||||
|
||||
## Issue 2: iOS Build Failure
|
||||
|
||||
### Problem
|
||||
```
|
||||
ios/arm64 requires external (cgo) linking, but cgo is not enabled
|
||||
```
|
||||
|
||||
### Root Cause
|
||||
iOS requires CGO for all executables due to Apple's linking requirements. This is a fundamental Go limitation - you cannot build iOS binaries with `CGO_ENABLED=0`.
|
||||
|
||||
### Solution
|
||||
|
||||
#### Option A: Accept CGO requirement for iOS
|
||||
Build with CGO enabled and provide a cross-compilation toolchain:
|
||||
|
||||
```bash
|
||||
CGO_ENABLED=1 CC=clang GOOS=ios GOARCH=arm64 go build
|
||||
```
|
||||
|
||||
This requires:
|
||||
1. Xcode with iOS SDK installed
|
||||
2. Cross-compilation from macOS (or complex cross-toolchain setup)
|
||||
|
||||
#### Option B: Create a library instead of executable
|
||||
Instead of building a standalone binary, build ORLY as a Go library that can be called from Swift/Objective-C:
|
||||
|
||||
```bash
|
||||
CGO_ENABLED=1 GOOS=ios GOARCH=arm64 go build -buildmode=c-archive -o liborly.a
|
||||
```
|
||||
|
||||
This creates a static library usable in iOS apps.
|
||||
|
||||
#### Option C: Use gomobile
|
||||
Use the `gomobile` tool which handles iOS cross-compilation:
|
||||
|
||||
```bash
|
||||
gomobile bind -target=ios ./pkg/...
|
||||
```
|
||||
|
||||
**Recommendation**: Option A or B depending on use case. For a relay server, iOS support may not be practical anyway (iOS backgrounding restrictions, network limitations).
|
||||
|
||||
---
|
||||
|
||||
## Issue 3: Android Build Failure (RESOLVED)
|
||||
|
||||
### Problem
|
||||
```
|
||||
# github.com/ebitengine/purego
|
||||
dlfcn_android.go:21:13: undefined: cgo.Dlopen
|
||||
```
|
||||
|
||||
### Root Cause
|
||||
Android uses the Linux kernel, so Go's `GOOS=android` still matches the `linux` build tag. This meant our `*_linux.go` files (which import purego) were being compiled for Android.
|
||||
|
||||
### Solution (Implemented)
|
||||
|
||||
Updated all build tags in `crypto/p8k/` to explicitly exclude Android:
|
||||
|
||||
**Linux files** (`*_linux.go`):
|
||||
```go
|
||||
//go:build linux && !android && !purego
|
||||
```
|
||||
|
||||
**Other platform files** (`*_other.go`):
|
||||
```go
|
||||
//go:build !linux || android || purego
|
||||
```
|
||||
|
||||
This ensures Android uses the pure Go `p256k1.mleku.dev` implementation instead of trying to load libsecp256k1 via purego.
|
||||
|
||||
### Verification
|
||||
```bash
|
||||
GOOS=android GOARCH=arm64 CGO_ENABLED=0 go build -o orly-android-arm64
|
||||
# Successfully produces 33MB ARM64 ELF binary
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: js/wasm Support (Low effort)
|
||||
|
||||
| Task | Repository | Effort |
|
||||
|------|------------|--------|
|
||||
| Create `restart_other.go` stub | ORLY | 5 min |
|
||||
| Create `connection_options_js.go` stub OR exclude ws package | nostr | 15 min |
|
||||
| Test js/wasm build compiles | Both | 5 min |
|
||||
|
||||
**Note**: This enables *compilation* but not *functionality*. Running ORLY in a browser would require significant additional work (no filesystem, no listening sockets, etc.).
|
||||
|
||||
### Phase 2: Android Support (Medium effort)
|
||||
|
||||
| Task | Repository | Effort |
|
||||
|------|------------|--------|
|
||||
| Audit purego imports - ensure Linux-only | nostr | 30 min |
|
||||
| Add build tags to any files importing purego | nostr | 15 min |
|
||||
| Test android/arm64 build | Both | 5 min |
|
||||
|
||||
### Phase 3: iOS Support (High effort, questionable value)
|
||||
|
||||
| Task | Repository | Effort |
|
||||
|------|------------|--------|
|
||||
| Set up iOS cross-compilation environment | - | 2-4 hours |
|
||||
| Modify build scripts for CGO_ENABLED=1 | ORLY | 30 min |
|
||||
| Create c-archive or gomobile bindings | ORLY | 2-4 hours |
|
||||
| Test on iOS simulator/device | - | 1-2 hours |
|
||||
|
||||
**Recommendation**: iOS support should be deprioritized unless there's a specific use case. A Nostr relay is a server, and iOS imposes severe restrictions on background network services.
|
||||
|
||||
---
|
||||
|
||||
## Quick Wins (Do First)
|
||||
|
||||
### 1. Create `restart_other.go` in ORLY
|
||||
|
||||
```go
|
||||
//go:build !linux && !darwin && !windows
|
||||
|
||||
package interrupt
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Restart() {
|
||||
log.W.Ln("restart not supported on this platform, exiting")
|
||||
os.Exit(0)
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Exclude ws package from js/wasm in nostr library
|
||||
|
||||
Modify `connection.go` to have a build tag:
|
||||
```go
|
||||
//go:build !js
|
||||
|
||||
package ws
|
||||
// ... rest of file
|
||||
```
|
||||
|
||||
Create `connection_js.go`:
|
||||
```go
|
||||
//go:build js
|
||||
|
||||
package ws
|
||||
|
||||
// Stub package for js/wasm - WebSocket client not supported
|
||||
// Use browser's native WebSocket API instead
|
||||
```
|
||||
|
||||
### 3. Audit purego usage
|
||||
|
||||
Ensure all files that import `github.com/ebitengine/purego` have:
|
||||
```go
|
||||
//go:build linux && !purego
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Estimated Total Effort
|
||||
|
||||
| Platform | Compilation | Full Functionality |
|
||||
|----------|-------------|-------------------|
|
||||
| js/wasm | 1 hour | Not practical (server) |
|
||||
| android/arm64 | 1-2 hours | Possible with NDK |
|
||||
| ios/arm64 | 4-8 hours | Limited (iOS restrictions) |
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
## iOS with gomobile
|
||||
|
||||
Since iOS requires CGO and you cannot use Xcode without an Apple ID, the `gomobile` approach is the best option. This creates a framework that can be integrated into iOS apps.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Install gomobile**:
|
||||
```bash
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
gomobile init
|
||||
```
|
||||
|
||||
2. **Create a bindable package**:
|
||||
gomobile can only bind packages that export types and functions suitable for mobile. You'll need to create a simplified API layer.
|
||||
|
||||
### Creating a Bindable API
|
||||
|
||||
Create a new package (e.g., `pkg/mobile/`) with a simplified interface:
|
||||
|
||||
```go
|
||||
// pkg/mobile/relay.go
|
||||
package mobile
|
||||
|
||||
import (
|
||||
"context"
|
||||
// ... minimal imports
|
||||
)
|
||||
|
||||
// Relay represents an embedded Nostr relay
|
||||
type Relay struct {
|
||||
// internal fields
|
||||
}
|
||||
|
||||
// NewRelay creates a new relay instance
|
||||
func NewRelay(dataDir string, port int) (*Relay, error) {
|
||||
// Initialize relay with mobile-friendly defaults
|
||||
}
|
||||
|
||||
// Start begins accepting connections
|
||||
func (r *Relay) Start() error {
|
||||
// Start the relay server
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the relay
|
||||
func (r *Relay) Stop() error {
|
||||
// Shutdown
|
||||
}
|
||||
|
||||
// GetPublicKey returns the relay's public key
|
||||
func (r *Relay) GetPublicKey() string {
|
||||
// Return npub
|
||||
}
|
||||
```
|
||||
|
||||
### Building the iOS Framework
|
||||
|
||||
```bash
|
||||
# Build iOS framework (requires macOS)
|
||||
gomobile bind -target=ios -o ORLY.xcframework ./pkg/mobile
|
||||
|
||||
# This produces ORLY.xcframework which can be added to Xcode projects
|
||||
```
|
||||
|
||||
### Limitations of gomobile
|
||||
|
||||
1. **Only certain types are bindable**:
|
||||
- Basic types (int, float, string, bool, []byte)
|
||||
- Structs with exported fields of bindable types
|
||||
- Interfaces with methods using bindable types
|
||||
- Error return values
|
||||
|
||||
2. **No channels or goroutines in API**:
|
||||
The public API must be synchronous or use callbacks
|
||||
|
||||
3. **Callbacks require interfaces**:
|
||||
```go
|
||||
// Define callback interface
|
||||
type EventHandler interface {
|
||||
OnEvent(eventJSON string)
|
||||
}
|
||||
|
||||
// Accept callback in API
|
||||
func (r *Relay) SetEventHandler(h EventHandler) {
|
||||
// Store and use callback
|
||||
}
|
||||
```
|
||||
|
||||
### Alternative: Building a Static Library
|
||||
|
||||
If you want more control, build as a C archive:
|
||||
|
||||
```bash
|
||||
# From macOS with Xcode command line tools
|
||||
CGO_ENABLED=1 GOOS=ios GOARCH=arm64 \
|
||||
go build -buildmode=c-archive -o liborly.a ./pkg/mobile
|
||||
|
||||
# This produces:
|
||||
# - liborly.a (static library)
|
||||
# - liborly.h (C header file)
|
||||
```
|
||||
|
||||
This can be linked into any iOS project using the C header.
|
||||
|
||||
### Recommended Next Steps for iOS
|
||||
|
||||
1. Create `pkg/mobile/` with a minimal, mobile-friendly API
|
||||
2. Test gomobile binding on Linux first: `gomobile bind -target=android ./pkg/mobile`
|
||||
3. Once Android binding works, the iOS binding will use the same API
|
||||
4. Find someone with macOS to run `gomobile bind -target=ios`
|
||||
|
||||
---
|
||||
|
||||
## Appendix: File Changes Summary
|
||||
|
||||
### nostr Repository (`git.mleku.dev/mleku/nostr`) - COMPLETED
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `crypto/p8k/secp_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/schnorr_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/ecdh_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/recovery_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/utils_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/secp_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/schnorr_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/ecdh_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/recovery_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/utils_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/constants.go` | NEW - shared constants (no build tags) |
|
||||
|
||||
### ORLY Repository (`next.orly.dev`)
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `go.mod` | Added `replace` directive for local nostr library |
|
||||
|
||||
### Future Work (js/wasm)
|
||||
|
||||
| File | Action Needed |
|
||||
|------|---------------|
|
||||
| `pkg/utils/interrupt/restart_other.go` | CREATE - stub `Restart()` for unsupported platforms |
|
||||
| `nostr/ws/connection.go` | MODIFY - add `//go:build !js` or exclude package |
|
||||
| `nostr/ws/connection_js.go` | CREATE - stub for js/wasm |
|
||||
11
go.mod
11
go.mod
@@ -3,11 +3,12 @@ module next.orly.dev
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
git.mleku.dev/mleku/nostr v1.0.4
|
||||
git.mleku.dev/mleku/nostr v1.0.7
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
github.com/dgraph-io/dgo/v230 v230.0.1
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
github.com/hack-pad/safejs v0.1.1
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/klauspost/compress v1.18.2
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
@@ -21,7 +22,6 @@ require (
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/crypto v0.45.0
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||
google.golang.org/grpc v1.76.0
|
||||
honnef.co/go/tools v0.6.1
|
||||
lol.mleku.dev v1.0.5
|
||||
lukechampine.com/frand v1.5.1
|
||||
@@ -46,8 +46,6 @@ require (
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/flatbuffers v25.9.23+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@@ -57,7 +55,6 @@ require (
|
||||
github.com/mattn/go-sqlite3 v1.14.32 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
|
||||
github.com/templexxx/cpu v0.1.1 // indirect
|
||||
@@ -79,9 +76,9 @@ require (
|
||||
golang.org/x/sys v0.38.0 // indirect
|
||||
golang.org/x/text v0.31.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
p256k1.mleku.dev v1.0.3 // indirect
|
||||
)
|
||||
|
||||
retract v1.0.3
|
||||
|
||||
103
go.sum
103
go.sum
@@ -1,13 +1,13 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
git.mleku.dev/mleku/nostr v1.0.4 h1:QKJlqUubLPeMpYpxHODSvfSlL+F6UhjBiBuze9FGRKo=
|
||||
git.mleku.dev/mleku/nostr v1.0.4/go.mod h1:swI7bWLc7yU1jd7PLCCIrIcUR3Ug5O+GPvpub/w6eTY=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
git.mleku.dev/mleku/nostr v1.0.7 h1:BXWsAAiGu56JXR4rIn0kaVOE+RtMmA9MPvAs8y/BjnI=
|
||||
git.mleku.dev/mleku/nostr v1.0.7/go.mod h1:iYTlg2WKJXJ0kcsM6QBGOJ0UDiJidMgL/i64cHyPjZc=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3 h1:DfquIk9YEZjWD/lJyBWZWGCtRga43/a96bx0Ulv9VhQ=
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3/go.mod h1:JV1XngOCCui7zrMSyRz+Wvz00nUSfotRKZqJzWpl5fQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
@@ -17,7 +17,6 @@ github.com/bytedance/sonic v1.13.1/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1
|
||||
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
|
||||
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
|
||||
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
@@ -29,7 +28,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
|
||||
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
|
||||
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
|
||||
@@ -44,8 +42,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvw
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
|
||||
github.com/dgraph-io/dgo/v230 v230.0.1 h1:kR7gI7/ZZv0jtG6dnedNgNOCxe1cbSG8ekF+pNfReks=
|
||||
github.com/dgraph-io/dgo/v230 v230.0.1/go.mod h1:5FerO2h4LPOxR2XTkOAtqUUPaFdQ+5aBOHXPBJ3nT10=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
@@ -55,8 +51,6 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m
|
||||
github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
|
||||
github.com/ebitengine/purego v0.9.1 h1:a/k2f2HQU3Pi399RPW1MOaZyhKJL9w/xFpKAg4q1s0A=
|
||||
github.com/ebitengine/purego v0.9.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
@@ -68,26 +62,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU=
|
||||
github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -95,10 +71,10 @@ github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8I
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d h1:KJIErDwbSHjnp/SGzE5ed8Aol7JsKiI5X7yWKAtzhM0=
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hack-pad/safejs v0.1.1 h1:d5qPO0iQ7h2oVtpzGnLExE+Wn9AtytxIfltcS2b9KD8=
|
||||
github.com/hack-pad/safejs v0.1.1/go.mod h1:HdS+bKF1NrE72VoXZeWzxFOVQVUSqZJAG0xNCnb+Tio=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
@@ -107,8 +83,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
|
||||
github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
@@ -137,13 +111,10 @@ github.com/nbd-wtf/go-nostr v0.52.0/go.mod h1:4avYoc9mDGZ9wHsvCOhHH9vPzKucCfuYBt
|
||||
github.com/neo4j/neo4j-go-driver/v5 v5.28.4 h1:7toxehVcYkZbyxV4W3Ib9VcnyRBQPucF+VwNNmtSXi4=
|
||||
github.com/neo4j/neo4j-go-driver/v5 v5.28.4/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
@@ -177,8 +148,6 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/vertex-lab/nostr-sqlite v0.3.2 h1:8nZYYIwiKnWLA446qA/wL/Gy+bU0kuaxdLfUyfeTt/E=
|
||||
github.com/vertex-lab/nostr-sqlite v0.3.2/go.mod h1:5bw1wMgJhSdrumsZAWxqy+P0u1g+q02PnlGQn15dnSM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
|
||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
@@ -187,10 +156,6 @@ go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
|
||||
go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
@@ -199,92 +164,40 @@ golang.org/x/arch v0.15.0 h1:QtOrQd0bTUnhNVNndMpLHNWrDmYzZ2KDqSrEymqInZw=
|
||||
golang.org/x/arch v0.15.0/go.mod h1:JmwW7aLIoRUKgaTzhkiEFxvcEiQGyOg9BMonBJUS7EE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||
golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546 h1:HDjDiATsGqvuqvkDvgJjD1IgPrVekcSXVVE21JwvzGE=
|
||||
golang.org/x/exp/typeparams v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
|
||||
google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -293,8 +206,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
lol.mleku.dev v1.0.5 h1:irwfwz+Scv74G/2OXmv05YFKOzUNOVZ735EAkYgjgM8=
|
||||
@@ -302,3 +213,5 @@ lol.mleku.dev v1.0.5/go.mod h1:JlsqP0CZDLKRyd85XGcy79+ydSRqmFkrPzYFMYxQ+zs=
|
||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
||||
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
p256k1.mleku.dev v1.0.3 h1:2SBEH9XhNAotO1Ik8ejODjChTqc06Z/6ncQhrYkAdRA=
|
||||
p256k1.mleku.dev v1.0.3/go.mod h1:cWkZlx6Tu7CTmIxonFbdjhdNfkY3VbjjY5TFEILiTnY=
|
||||
|
||||
42
main.go
42
main.go
@@ -21,8 +21,7 @@ import (
|
||||
"next.orly.dev/pkg/acl"
|
||||
"git.mleku.dev/mleku/nostr/crypto/keys"
|
||||
"next.orly.dev/pkg/database"
|
||||
_ "next.orly.dev/pkg/dgraph" // Import to register dgraph factory
|
||||
_ "next.orly.dev/pkg/neo4j" // Import to register neo4j factory
|
||||
_ "next.orly.dev/pkg/neo4j" // Import to register neo4j factory
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"next.orly.dev/pkg/utils/interrupt"
|
||||
"next.orly.dev/pkg/version"
|
||||
@@ -31,6 +30,13 @@ import (
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(128)
|
||||
debug.SetGCPercent(10)
|
||||
|
||||
// Handle 'version' subcommand early, before any other initialization
|
||||
if config.VersionRequested() {
|
||||
fmt.Println(version.V)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var err error
|
||||
var cfg *config.C
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
@@ -42,8 +48,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var db database.Database
|
||||
if db, err = database.NewDatabase(
|
||||
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
|
||||
if db, err = database.NewDatabaseWithConfig(
|
||||
ctx, cancel, cfg.DBType, makeDatabaseConfig(cfg),
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -318,8 +324,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var db database.Database
|
||||
log.I.F("initializing %s database at %s", cfg.DBType, cfg.DataDir)
|
||||
if db, err = database.NewDatabase(
|
||||
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
|
||||
if db, err = database.NewDatabaseWithConfig(
|
||||
ctx, cancel, cfg.DBType, makeDatabaseConfig(cfg),
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -430,3 +436,27 @@ func main() {
|
||||
}
|
||||
// log.I.F("exiting")
|
||||
}
|
||||
|
||||
// makeDatabaseConfig creates a database.DatabaseConfig from the app config.
|
||||
// This helper function extracts all database-specific configuration values
|
||||
// and constructs the appropriate struct for the database package.
|
||||
func makeDatabaseConfig(cfg *config.C) *database.DatabaseConfig {
|
||||
dataDir, logLevel,
|
||||
blockCacheMB, indexCacheMB, queryCacheSizeMB,
|
||||
queryCacheMaxAge,
|
||||
inlineEventThreshold,
|
||||
neo4jURI, neo4jUser, neo4jPassword := cfg.GetDatabaseConfigValues()
|
||||
|
||||
return &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
BlockCacheMB: blockCacheMB,
|
||||
IndexCacheMB: indexCacheMB,
|
||||
QueryCacheSizeMB: queryCacheSizeMB,
|
||||
QueryCacheMaxAge: queryCacheMaxAge,
|
||||
InlineEventThreshold: inlineEventThreshold,
|
||||
Neo4jURI: neo4jURI,
|
||||
Neo4jUser: neo4jUser,
|
||||
Neo4jPassword: neo4jPassword,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,20 +2,20 @@ package acl
|
||||
|
||||
import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"next.orly.dev/pkg/interfaces/acl"
|
||||
acliface "next.orly.dev/pkg/interfaces/acl"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
var Registry = &S{}
|
||||
|
||||
type S struct {
|
||||
ACL []acl.I
|
||||
ACL []acliface.I
|
||||
Active atomic.String
|
||||
}
|
||||
|
||||
type A struct{ S }
|
||||
|
||||
func (s *S) Register(i acl.I) {
|
||||
func (s *S) Register(i acliface.I) {
|
||||
(*s).ACL = append((*s).ACL, i)
|
||||
}
|
||||
|
||||
@@ -85,9 +85,7 @@ func (s *S) CheckPolicy(ev *event.E) (allowed bool, err error) {
|
||||
for _, i := range s.ACL {
|
||||
if i.Type() == s.Active.Load() {
|
||||
// Check if the ACL implementation has a CheckPolicy method
|
||||
if policyChecker, ok := i.(interface {
|
||||
CheckPolicy(ev *event.E) (allowed bool, err error)
|
||||
}); ok {
|
||||
if policyChecker, ok := i.(acliface.PolicyChecker); ok {
|
||||
return policyChecker.CheckPolicy(ev)
|
||||
}
|
||||
// If no CheckPolicy method, default to allowing
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestKind3TagRoundTrip(t *testing.T) {
|
||||
// Verify all tags have key "p"
|
||||
pTagCount := 0
|
||||
for _, tg := range *ev1.Tags {
|
||||
if tag != nil && tag.Len() >= 2 {
|
||||
if tg != nil && tg.Len() >= 2 {
|
||||
key := tg.Key()
|
||||
if len(key) == 1 && key[0] == 'p' {
|
||||
pTagCount++
|
||||
@@ -63,7 +63,7 @@ func TestKind3TagRoundTrip(t *testing.T) {
|
||||
// Verify all tags still have key "p"
|
||||
pTagCount2 := 0
|
||||
for _, tg := range *ev2.Tags {
|
||||
if tag != nil && tag.Len() >= 2 {
|
||||
if tg != nil && tg.Len() >= 2 {
|
||||
key := tg.Key()
|
||||
if len(key) == 1 && key[0] == 'p' {
|
||||
pTagCount2++
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -5,7 +7,6 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
@@ -21,10 +22,11 @@ import (
|
||||
|
||||
// D implements the Database interface using Badger as the storage backend
|
||||
type D struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
inlineEventThreshold int // Configurable threshold for inline event storage
|
||||
*badger.DB
|
||||
seq *badger.Sequence
|
||||
pubkeySeq *badger.Sequence // Sequence for pubkey serials
|
||||
@@ -35,63 +37,85 @@ type D struct {
|
||||
// Ensure D implements Database interface at compile time
|
||||
var _ Database = (*D)(nil)
|
||||
|
||||
// New creates a new Badger database instance with default configuration.
|
||||
// This is provided for backward compatibility with existing callers.
|
||||
// For full configuration control, use NewWithConfig instead.
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Initialize query cache with configurable size (default 512MB)
|
||||
queryCacheSize := int64(512 * 1024 * 1024) // 512 MB
|
||||
if v := os.Getenv("ORLY_QUERY_CACHE_SIZE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
queryCacheSize = int64(n * 1024 * 1024)
|
||||
}
|
||||
// Create a default config for backward compatibility
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
BlockCacheMB: 1024, // Default 1024 MB
|
||||
IndexCacheMB: 512, // Default 512 MB
|
||||
QueryCacheSizeMB: 512, // Default 512 MB
|
||||
QueryCacheMaxAge: 5 * time.Minute, // Default 5 minutes
|
||||
InlineEventThreshold: 1024, // Default 1024 bytes
|
||||
}
|
||||
queryCacheMaxAge := 5 * time.Minute // Default 5 minutes
|
||||
if v := os.Getenv("ORLY_QUERY_CACHE_MAX_AGE"); v != "" {
|
||||
if duration, perr := time.ParseDuration(v); perr == nil {
|
||||
queryCacheMaxAge = duration
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// NewWithConfig creates a new Badger database instance with full configuration.
|
||||
// This is the preferred method when you have access to DatabaseConfig.
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *DatabaseConfig,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Apply defaults for zero values (backward compatibility)
|
||||
blockCacheMB := cfg.BlockCacheMB
|
||||
if blockCacheMB == 0 {
|
||||
blockCacheMB = 1024 // Default 1024 MB
|
||||
}
|
||||
indexCacheMB := cfg.IndexCacheMB
|
||||
if indexCacheMB == 0 {
|
||||
indexCacheMB = 512 // Default 512 MB
|
||||
}
|
||||
queryCacheSizeMB := cfg.QueryCacheSizeMB
|
||||
if queryCacheSizeMB == 0 {
|
||||
queryCacheSizeMB = 512 // Default 512 MB
|
||||
}
|
||||
queryCacheMaxAge := cfg.QueryCacheMaxAge
|
||||
if queryCacheMaxAge == 0 {
|
||||
queryCacheMaxAge = 5 * time.Minute // Default 5 minutes
|
||||
}
|
||||
inlineEventThreshold := cfg.InlineEventThreshold
|
||||
if inlineEventThreshold == 0 {
|
||||
inlineEventThreshold = 1024 // Default 1024 bytes
|
||||
}
|
||||
|
||||
queryCacheSize := int64(queryCacheSizeMB * 1024 * 1024)
|
||||
|
||||
d = &D{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
DB: nil,
|
||||
seq: nil,
|
||||
ready: make(chan struct{}),
|
||||
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
inlineEventThreshold: inlineEventThreshold,
|
||||
DB: nil,
|
||||
seq: nil,
|
||||
ready: make(chan struct{}),
|
||||
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
if err = os.MkdirAll(cfg.DataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Also ensure the directory exists using apputil.EnsureDir for any
|
||||
// potential subdirectories
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
dummyFile := filepath.Join(cfg.DataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := badger.DefaultOptions(d.dataDir)
|
||||
// Configure caches based on environment to better match workload.
|
||||
// Configure caches based on config to better match workload.
|
||||
// Defaults aim for higher hit ratios under read-heavy workloads while remaining safe.
|
||||
var blockCacheMB = 1024 // default 512 MB
|
||||
var indexCacheMB = 512 // default 256 MB
|
||||
if v := os.Getenv("ORLY_DB_BLOCK_CACHE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
blockCacheMB = n
|
||||
}
|
||||
}
|
||||
if v := os.Getenv("ORLY_DB_INDEX_CACHE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
indexCacheMB = n
|
||||
}
|
||||
}
|
||||
opts.BlockCacheSize = int64(blockCacheMB * units.Mb)
|
||||
opts.IndexCacheSize = int64(indexCacheMB * units.Mb)
|
||||
opts.BlockSize = 4 * units.Kb // 4 KB block size
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,11 +1,35 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DatabaseConfig holds all database configuration options that can be passed
|
||||
// to any database backend. Each backend uses the relevant fields for its type.
|
||||
// This centralizes configuration instead of having each backend read env vars directly.
|
||||
type DatabaseConfig struct {
|
||||
// Common settings for all backends
|
||||
DataDir string
|
||||
LogLevel string
|
||||
|
||||
// Badger-specific settings
|
||||
BlockCacheMB int // ORLY_DB_BLOCK_CACHE_MB
|
||||
IndexCacheMB int // ORLY_DB_INDEX_CACHE_MB
|
||||
QueryCacheSizeMB int // ORLY_QUERY_CACHE_SIZE_MB
|
||||
QueryCacheMaxAge time.Duration // ORLY_QUERY_CACHE_MAX_AGE
|
||||
InlineEventThreshold int // ORLY_INLINE_EVENT_THRESHOLD
|
||||
|
||||
// Neo4j-specific settings
|
||||
Neo4jURI string // ORLY_NEO4J_URI
|
||||
Neo4jUser string // ORLY_NEO4J_USER
|
||||
Neo4jPassword string // ORLY_NEO4J_PASSWORD
|
||||
}
|
||||
|
||||
// NewDatabase creates a database instance based on the specified type.
|
||||
// Supported types: "badger", "dgraph", "neo4j"
|
||||
func NewDatabase(
|
||||
@@ -14,40 +38,60 @@ func NewDatabase(
|
||||
dbType string,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
) (Database, error) {
|
||||
// Create a default config for backward compatibility with existing callers
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewDatabaseWithConfig(ctx, cancel, dbType, cfg)
|
||||
}
|
||||
|
||||
// NewDatabaseWithConfig creates a database instance with full configuration.
|
||||
// This is the preferred method when you have access to the app config.
|
||||
func NewDatabaseWithConfig(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
cfg *DatabaseConfig,
|
||||
) (Database, error) {
|
||||
switch strings.ToLower(dbType) {
|
||||
case "badger", "":
|
||||
// Use the existing badger implementation
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
case "dgraph":
|
||||
// Use the new dgraph implementation
|
||||
// Import dynamically to avoid import cycles
|
||||
return newDgraphDatabase(ctx, cancel, dataDir, logLevel)
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
case "neo4j":
|
||||
// Use the new neo4j implementation
|
||||
// Import dynamically to avoid import cycles
|
||||
return newNeo4jDatabase(ctx, cancel, dataDir, logLevel)
|
||||
// Use the neo4j implementation
|
||||
if newNeo4jDatabase == nil {
|
||||
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
|
||||
}
|
||||
return newNeo4jDatabase(ctx, cancel, cfg)
|
||||
case "wasmdb", "indexeddb", "wasm":
|
||||
// Use the wasmdb implementation (IndexedDB backend for WebAssembly)
|
||||
if newWasmDBDatabase == nil {
|
||||
return nil, fmt.Errorf("wasmdb database backend not available (import _ \"next.orly.dev/pkg/wasmdb\")")
|
||||
}
|
||||
return newWasmDBDatabase(ctx, cancel, cfg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j)", dbType)
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, neo4j, wasmdb)", dbType)
|
||||
}
|
||||
}
|
||||
|
||||
// newDgraphDatabase creates a dgraph database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newDgraphDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
|
||||
|
||||
// RegisterDgraphFactory registers the dgraph database factory
|
||||
// This is called from the dgraph package's init() function
|
||||
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
|
||||
newDgraphDatabase = factory
|
||||
}
|
||||
|
||||
// newNeo4jDatabase creates a neo4j database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterNeo4jFactory registers the neo4j database factory
|
||||
// This is called from the neo4j package's init() function
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newNeo4jDatabase = factory
|
||||
}
|
||||
|
||||
// newWasmDBDatabase creates a wasmdb database instance (IndexedDB backend for WebAssembly)
|
||||
// This is defined here to avoid import cycles
|
||||
var newWasmDBDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterWasmDBFactory registers the wasmdb database factory
|
||||
// This is called from the wasmdb package's init() function
|
||||
func RegisterWasmDBFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newWasmDBDatabase = factory
|
||||
}
|
||||
|
||||
115
pkg/database/factory_wasm.go
Normal file
115
pkg/database/factory_wasm.go
Normal file
@@ -0,0 +1,115 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DatabaseConfig holds all database configuration options that can be passed
|
||||
// to any database backend. Each backend uses the relevant fields for its type.
|
||||
// This centralizes configuration instead of having each backend read env vars directly.
|
||||
type DatabaseConfig struct {
|
||||
// Common settings for all backends
|
||||
DataDir string
|
||||
LogLevel string
|
||||
|
||||
// Badger-specific settings (not available in WASM)
|
||||
BlockCacheMB int // ORLY_DB_BLOCK_CACHE_MB
|
||||
IndexCacheMB int // ORLY_DB_INDEX_CACHE_MB
|
||||
QueryCacheSizeMB int // ORLY_QUERY_CACHE_SIZE_MB
|
||||
QueryCacheMaxAge time.Duration // ORLY_QUERY_CACHE_MAX_AGE
|
||||
InlineEventThreshold int // ORLY_INLINE_EVENT_THRESHOLD
|
||||
|
||||
// DGraph-specific settings
|
||||
DgraphURL string // ORLY_DGRAPH_URL
|
||||
|
||||
// Neo4j-specific settings
|
||||
Neo4jURI string // ORLY_NEO4J_URI
|
||||
Neo4jUser string // ORLY_NEO4J_USER
|
||||
Neo4jPassword string // ORLY_NEO4J_PASSWORD
|
||||
}
|
||||
|
||||
// NewDatabase creates a database instance based on the specified type.
|
||||
// Supported types in WASM: "wasmdb", "dgraph", "neo4j"
|
||||
// Note: "badger" is not available in WASM builds due to filesystem dependencies
|
||||
func NewDatabase(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
) (Database, error) {
|
||||
// Create a default config for backward compatibility with existing callers
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewDatabaseWithConfig(ctx, cancel, dbType, cfg)
|
||||
}
|
||||
|
||||
// NewDatabaseWithConfig creates a database instance with full configuration.
|
||||
// This is the preferred method when you have access to the app config.
|
||||
func NewDatabaseWithConfig(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
cfg *DatabaseConfig,
|
||||
) (Database, error) {
|
||||
switch strings.ToLower(dbType) {
|
||||
case "wasmdb", "indexeddb", "wasm", "badger", "":
|
||||
// In WASM builds, default to wasmdb (IndexedDB backend)
|
||||
// "badger" is mapped to wasmdb since Badger is not available
|
||||
if newWasmDBDatabase == nil {
|
||||
return nil, fmt.Errorf("wasmdb database backend not available (import _ \"next.orly.dev/pkg/wasmdb\")")
|
||||
}
|
||||
return newWasmDBDatabase(ctx, cancel, cfg)
|
||||
case "dgraph":
|
||||
// Use the dgraph implementation (HTTP-based, works in WASM)
|
||||
if newDgraphDatabase == nil {
|
||||
return nil, fmt.Errorf("dgraph database backend not available (import _ \"next.orly.dev/pkg/dgraph\")")
|
||||
}
|
||||
return newDgraphDatabase(ctx, cancel, cfg)
|
||||
case "neo4j":
|
||||
// Use the neo4j implementation (HTTP-based, works in WASM)
|
||||
if newNeo4jDatabase == nil {
|
||||
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
|
||||
}
|
||||
return newNeo4jDatabase(ctx, cancel, cfg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported in WASM: wasmdb, dgraph, neo4j)", dbType)
|
||||
}
|
||||
}
|
||||
|
||||
// newDgraphDatabase creates a dgraph database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newDgraphDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterDgraphFactory registers the dgraph database factory
|
||||
// This is called from the dgraph package's init() function
|
||||
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newDgraphDatabase = factory
|
||||
}
|
||||
|
||||
// newNeo4jDatabase creates a neo4j database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterNeo4jFactory registers the neo4j database factory
|
||||
// This is called from the neo4j package's init() function
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newNeo4jDatabase = factory
|
||||
}
|
||||
|
||||
// newWasmDBDatabase creates a wasmdb database instance (IndexedDB backend for WebAssembly)
|
||||
// This is defined here to avoid import cycles
|
||||
var newWasmDBDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterWasmDBFactory registers the wasmdb database factory
|
||||
// This is called from the wasmdb package's init() function
|
||||
func RegisterWasmDBFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newWasmDBDatabase = factory
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
// Package database provides shared import utilities for events
|
||||
package database
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -11,13 +13,6 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// NIP43Membership represents membership metadata for NIP-43
|
||||
type NIP43Membership struct {
|
||||
Pubkey []byte
|
||||
AddedAt time.Time
|
||||
InviteCode string
|
||||
}
|
||||
|
||||
// Database key prefixes for NIP-43
|
||||
const (
|
||||
nip43MemberPrefix = "nip43:member:"
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -5,8 +7,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
@@ -270,14 +270,9 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (
|
||||
eventData := eventDataBuf.Bytes()
|
||||
|
||||
// Determine storage strategy (Reiser4 optimizations)
|
||||
// Get threshold from environment, default to 0 (disabled)
|
||||
// When enabled, typical values: 384 (conservative), 512 (recommended), 1024 (aggressive)
|
||||
smallEventThreshold := 1024
|
||||
if v := os.Getenv("ORLY_INLINE_EVENT_THRESHOLD"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n >= 0 {
|
||||
smallEventThreshold = n
|
||||
}
|
||||
}
|
||||
// Use the threshold from database configuration
|
||||
// Typical values: 384 (conservative), 512 (recommended), 1024 (aggressive)
|
||||
smallEventThreshold := d.inlineEventThreshold
|
||||
isSmallEvent := smallEventThreshold > 0 && len(eventData) <= smallEventThreshold
|
||||
isReplaceableEvent := kind.IsReplaceable(ev.Kind)
|
||||
isAddressableEvent := kind.IsParameterizedReplaceable(ev.Kind)
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -12,13 +14,6 @@ import (
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
type Subscription struct {
|
||||
TrialEnd time.Time `json:"trial_end"`
|
||||
PaidUntil time.Time `json:"paid_until"`
|
||||
BlossomLevel string `json:"blossom_level,omitempty"` // Service level name (e.g., "basic", "premium")
|
||||
BlossomStorage int64 `json:"blossom_storage,omitempty"` // Storage quota in MB
|
||||
}
|
||||
|
||||
func (d *D) GetSubscription(pubkey []byte) (*Subscription, error) {
|
||||
key := fmt.Sprintf("sub:%s", hex.EncodeToString(pubkey))
|
||||
var sub *Subscription
|
||||
@@ -122,13 +117,6 @@ func (d *D) ExtendSubscription(pubkey []byte, days int) error {
|
||||
)
|
||||
}
|
||||
|
||||
type Payment struct {
|
||||
Amount int64 `json:"amount"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Invoice string `json:"invoice"`
|
||||
Preimage string `json:"preimage"`
|
||||
}
|
||||
|
||||
func (d *D) RecordPayment(
|
||||
pubkey []byte, amount int64, invoice, preimage string,
|
||||
) error {
|
||||
|
||||
26
pkg/database/types.go
Normal file
26
pkg/database/types.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package database
|
||||
|
||||
import "time"
|
||||
|
||||
// Subscription represents a user's subscription status
|
||||
type Subscription struct {
|
||||
TrialEnd time.Time `json:"trial_end"`
|
||||
PaidUntil time.Time `json:"paid_until"`
|
||||
BlossomLevel string `json:"blossom_level,omitempty"` // Service level name (e.g., "basic", "premium")
|
||||
BlossomStorage int64 `json:"blossom_storage,omitempty"` // Storage quota in MB
|
||||
}
|
||||
|
||||
// Payment represents a recorded payment
|
||||
type Payment struct {
|
||||
Amount int64 `json:"amount"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Invoice string `json:"invoice"`
|
||||
Preimage string `json:"preimage"`
|
||||
}
|
||||
|
||||
// NIP43Membership represents membership metadata for NIP-43
|
||||
type NIP43Membership struct {
|
||||
Pubkey []byte
|
||||
AddedAt time.Time
|
||||
InviteCode string
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
# Dgraph Database Implementation for ORLY
|
||||
|
||||
This package provides a Dgraph-based implementation of the ORLY database interface, enabling graph-based storage for Nostr events with powerful relationship querying capabilities.
|
||||
|
||||
## Status: Step 1 Complete ✅
|
||||
|
||||
**Current State:** Dgraph server integration is complete and functional
|
||||
**Next Step:** DQL query/mutation implementation in save-event.go and query-events.go
|
||||
|
||||
## Architecture
|
||||
|
||||
### Client-Server Model
|
||||
|
||||
The implementation uses a **client-server architecture**:
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ ORLY Relay Process │
|
||||
│ │
|
||||
│ ┌────────────────────────────────────┐ │
|
||||
│ │ Dgraph Client (pkg/dgraph) │ │
|
||||
│ │ - dgo library (gRPC) │ │
|
||||
│ │ - Schema management │────┼───► Dgraph Server
|
||||
│ │ - Query/Mutate methods │ │ (localhost:9080)
|
||||
│ └────────────────────────────────────┘ │ - Event graph
|
||||
│ │ - Authors, tags
|
||||
│ ┌────────────────────────────────────┐ │ - Relationships
|
||||
│ │ Badger Metadata Store │ │
|
||||
│ │ - Markers (key-value) │ │
|
||||
│ │ - Serial counters │ │
|
||||
│ │ - Relay identity │ │
|
||||
│ └────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Dual Storage Strategy
|
||||
|
||||
1. **Dgraph** (Graph Database)
|
||||
- Nostr events and their content
|
||||
- Author relationships
|
||||
- Tag relationships
|
||||
- Event references and mentions
|
||||
- Optimized for graph traversals and complex queries
|
||||
|
||||
2. **Badger** (Key-Value Store)
|
||||
- Metadata markers
|
||||
- Serial number counters
|
||||
- Relay identity keys
|
||||
- Fast key-value operations
|
||||
|
||||
## Setup
|
||||
|
||||
### 1. Start Dgraph Server
|
||||
|
||||
Using Docker (recommended):
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name dgraph \
|
||||
-p 8080:8080 \
|
||||
-p 9080:9080 \
|
||||
-p 8000:8000 \
|
||||
-v ~/dgraph:/dgraph \
|
||||
dgraph/standalone:latest
|
||||
```
|
||||
|
||||
### 2. Configure ORLY
|
||||
|
||||
```bash
|
||||
export ORLY_DB_TYPE=dgraph
|
||||
export ORLY_DGRAPH_URL=localhost:9080 # Optional, this is the default
|
||||
```
|
||||
|
||||
### 3. Run ORLY
|
||||
|
||||
```bash
|
||||
./orly
|
||||
```
|
||||
|
||||
On startup, ORLY will:
|
||||
1. Connect to dgraph server via gRPC
|
||||
2. Apply the Nostr schema automatically
|
||||
3. Initialize badger metadata store
|
||||
4. Initialize serial number counter
|
||||
5. Start accepting events
|
||||
|
||||
## Schema
|
||||
|
||||
The Nostr schema defines the following types:
|
||||
|
||||
### Event Nodes
|
||||
```dql
|
||||
type Event {
|
||||
event.id # Event ID (string, indexed)
|
||||
event.serial # Sequential number (int, indexed)
|
||||
event.kind # Event kind (int, indexed)
|
||||
event.created_at # Timestamp (int, indexed)
|
||||
event.content # Event content (string)
|
||||
event.sig # Signature (string, indexed)
|
||||
event.pubkey # Author pubkey (string, indexed)
|
||||
event.authored_by # -> Author (uid)
|
||||
event.references # -> Events (uid list)
|
||||
event.mentions # -> Events (uid list)
|
||||
event.tagged_with # -> Tags (uid list)
|
||||
}
|
||||
```
|
||||
|
||||
### Author Nodes
|
||||
```dql
|
||||
type Author {
|
||||
author.pubkey # Pubkey (string, indexed, unique)
|
||||
author.events # -> Events (uid list, reverse)
|
||||
}
|
||||
```
|
||||
|
||||
### Tag Nodes
|
||||
```dql
|
||||
type Tag {
|
||||
tag.type # Tag type (string, indexed)
|
||||
tag.value # Tag value (string, indexed + fulltext)
|
||||
tag.events # -> Events (uid list, reverse)
|
||||
}
|
||||
```
|
||||
|
||||
### Marker Nodes (Metadata)
|
||||
```dql
|
||||
type Marker {
|
||||
marker.key # Key (string, indexed, unique)
|
||||
marker.value # Value (string)
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
- `ORLY_DB_TYPE=dgraph` - Enable dgraph database (default: badger)
|
||||
- `ORLY_DGRAPH_URL=host:port` - Dgraph gRPC endpoint (default: localhost:9080)
|
||||
- `ORLY_DATA_DIR=/path` - Data directory for metadata storage
|
||||
|
||||
### Connection Details
|
||||
|
||||
The dgraph client uses **insecure gRPC** by default for local development. For production deployments:
|
||||
|
||||
1. Set up TLS certificates for dgraph
|
||||
2. Modify `pkg/dgraph/dgraph.go` to use `grpc.WithTransportCredentials()` with your certs
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Files
|
||||
|
||||
- `dgraph.go` - Main implementation, initialization, lifecycle
|
||||
- `schema.go` - Schema definition and application
|
||||
- `save-event.go` - Event storage (TODO: update to use Mutate)
|
||||
- `query-events.go` - Event queries (TODO: update to parse DQL responses)
|
||||
- `fetch-event.go` - Event retrieval methods
|
||||
- `delete.go` - Event deletion
|
||||
- `markers.go` - Key-value metadata storage (uses badger)
|
||||
- `serial.go` - Serial number generation (uses badger)
|
||||
- `subscriptions.go` - Subscription/payment tracking (uses markers)
|
||||
- `nip43.go` - NIP-43 invite system (uses markers)
|
||||
- `import-export.go` - Import/export operations
|
||||
- `logger.go` - Logging adapter
|
||||
|
||||
### Key Methods
|
||||
|
||||
#### Initialization
|
||||
```go
|
||||
d, err := dgraph.New(ctx, cancel, dataDir, logLevel)
|
||||
```
|
||||
|
||||
#### Querying (DQL)
|
||||
```go
|
||||
resp, err := d.Query(ctx, dqlQuery)
|
||||
```
|
||||
|
||||
#### Mutations (RDF N-Quads)
|
||||
```go
|
||||
mutation := &api.Mutation{SetNquads: []byte(nquads)}
|
||||
resp, err := d.Mutate(ctx, mutation)
|
||||
```
|
||||
|
||||
## Development Status
|
||||
|
||||
### ✅ Step 1: Dgraph Server Integration (COMPLETE)
|
||||
|
||||
- [x] dgo client library integration
|
||||
- [x] gRPC connection to external dgraph
|
||||
- [x] Schema definition and auto-application
|
||||
- [x] Query() and Mutate() method stubs
|
||||
- [x] ORLY_DGRAPH_URL configuration
|
||||
- [x] Dual-storage architecture
|
||||
- [x] Proper lifecycle management
|
||||
|
||||
### 📝 Step 2: DQL Implementation (NEXT)
|
||||
|
||||
Priority tasks:
|
||||
|
||||
1. **save-event.go** - Replace RDF string building with actual Mutate() calls
|
||||
2. **query-events.go** - Parse actual JSON responses from Query()
|
||||
3. **fetch-event.go** - Implement DQL queries for event retrieval
|
||||
4. **delete.go** - Implement deletion mutations
|
||||
|
||||
### 📝 Step 3: Testing (FUTURE)
|
||||
|
||||
- Integration testing with relay-tester
|
||||
- Performance benchmarks vs badger
|
||||
- Memory profiling
|
||||
- Production deployment testing
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Connection Refused
|
||||
|
||||
```
|
||||
failed to connect to dgraph at localhost:9080: connection refused
|
||||
```
|
||||
|
||||
**Solution:** Ensure dgraph server is running:
|
||||
```bash
|
||||
docker ps | grep dgraph
|
||||
docker logs dgraph
|
||||
```
|
||||
|
||||
### Schema Application Failed
|
||||
|
||||
```
|
||||
failed to apply schema: ...
|
||||
```
|
||||
|
||||
**Solution:** Check dgraph server logs and ensure no schema conflicts:
|
||||
```bash
|
||||
docker logs dgraph
|
||||
```
|
||||
|
||||
### Binary Not Finding libsecp256k1.so
|
||||
|
||||
This is unrelated to dgraph. Ensure:
|
||||
```bash
|
||||
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$(pwd)/pkg/crypto/p8k"
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### When to Use Dgraph
|
||||
|
||||
**Good fit:**
|
||||
- Complex graph queries (follows-of-follows, social graphs)
|
||||
- Full-text search requirements
|
||||
- Advanced filtering and aggregations
|
||||
- Multi-hop relationship traversals
|
||||
|
||||
**Not ideal for:**
|
||||
- Simple key-value lookups (badger is faster)
|
||||
- Very high write throughput (badger has lower latency)
|
||||
- Single-node deployments with simple queries
|
||||
|
||||
### Optimization Tips
|
||||
|
||||
1. **Indexing**: Ensure frequently queried fields have appropriate indexes
|
||||
2. **Pagination**: Use offset/limit in DQL queries for large result sets
|
||||
3. **Caching**: Consider adding an LRU cache for hot events
|
||||
4. **Schema Design**: Use reverse edges for efficient relationship traversal
|
||||
|
||||
## Resources
|
||||
|
||||
- [Dgraph Documentation](https://dgraph.io/docs/)
|
||||
- [DQL Query Language](https://dgraph.io/docs/query-language/)
|
||||
- [dgo Client Library](https://github.com/dgraph-io/dgo)
|
||||
- [ORLY Implementation Status](../../DGRAPH_IMPLEMENTATION_STATUS.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
When working on dgraph implementation:
|
||||
|
||||
1. Test changes against a local dgraph instance
|
||||
2. Update schema.go if adding new node types or predicates
|
||||
3. Ensure dual-storage strategy is maintained (dgraph for events, badger for metadata)
|
||||
4. Add integration tests for new features
|
||||
5. Update DGRAPH_IMPLEMENTATION_STATUS.md with progress
|
||||
@@ -1,330 +0,0 @@
|
||||
# Dgraph Test Suite
|
||||
|
||||
This directory contains a comprehensive test suite for the dgraph database implementation, mirroring all tests from the badger implementation to ensure feature parity.
|
||||
|
||||
## Test Files
|
||||
|
||||
- **testmain_test.go** - Test configuration (logging, setup)
|
||||
- **helpers_test.go** - Helper functions for test database setup/teardown
|
||||
- **save-event_test.go** - Event storage tests
|
||||
- **query-events_test.go** - Event query tests
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Start Dgraph Server
|
||||
|
||||
```bash
|
||||
# From project root
|
||||
./scripts/dgraph-start.sh
|
||||
|
||||
# Verify it's running
|
||||
curl http://localhost:8080/health
|
||||
```
|
||||
|
||||
### 2. Run Tests
|
||||
|
||||
```bash
|
||||
# Run all dgraph tests
|
||||
./scripts/test-dgraph.sh
|
||||
|
||||
# Or run manually
|
||||
export ORLY_DGRAPH_URL=localhost:9080
|
||||
CGO_ENABLED=0 go test -v ./pkg/dgraph/...
|
||||
|
||||
# Run specific test
|
||||
CGO_ENABLED=0 go test -v -run TestSaveEvents ./pkg/dgraph
|
||||
```
|
||||
|
||||
## Test Coverage
|
||||
|
||||
### Event Storage Tests (`save-event_test.go`)
|
||||
|
||||
✅ **TestSaveEvents**
|
||||
- Loads ~100 events from examples.Cache
|
||||
- Saves all events chronologically
|
||||
- Verifies no errors during save
|
||||
- Reports performance metrics
|
||||
|
||||
✅ **TestDeletionEventWithETagRejection**
|
||||
- Creates a regular event
|
||||
- Attempts to save deletion event with e-tag
|
||||
- Verifies deletion events with e-tags are rejected
|
||||
|
||||
✅ **TestSaveExistingEvent**
|
||||
- Saves an event
|
||||
- Attempts to save same event again
|
||||
- Verifies duplicate events are rejected
|
||||
|
||||
### Event Query Tests (`query-events_test.go`)
|
||||
|
||||
✅ **TestQueryEventsByID**
|
||||
- Queries event by exact ID match
|
||||
- Verifies single result returned
|
||||
- Verifies correct event retrieved
|
||||
|
||||
✅ **TestQueryEventsByKind**
|
||||
- Queries events by kind (e.g., kind 1)
|
||||
- Verifies all results have correct kind
|
||||
- Tests filtering logic
|
||||
|
||||
✅ **TestQueryEventsByAuthor**
|
||||
- Queries events by author pubkey
|
||||
- Verifies all results from correct author
|
||||
- Tests author filtering
|
||||
|
||||
✅ **TestReplaceableEventsAndDeletion**
|
||||
- Creates replaceable event (kind 0)
|
||||
- Creates newer version
|
||||
- Verifies only newer version returned in general queries
|
||||
- Creates deletion event
|
||||
- Verifies deleted event not returned
|
||||
- Tests replaceable event logic and deletion
|
||||
|
||||
✅ **TestParameterizedReplaceableEventsAndDeletion**
|
||||
- Creates parameterized replaceable event (kind 30000+)
|
||||
- Adds d-tag
|
||||
- Creates deletion event with e-tag
|
||||
- Verifies deleted event not returned
|
||||
- Tests parameterized replaceable logic
|
||||
|
||||
✅ **TestQueryEventsByTimeRange**
|
||||
- Queries events by since/until timestamps
|
||||
- Verifies all results within time range
|
||||
- Tests temporal filtering
|
||||
|
||||
✅ **TestQueryEventsByTag**
|
||||
- Finds event with tags
|
||||
- Queries by tag key/value
|
||||
- Verifies all results have the tag
|
||||
- Tests tag filtering logic
|
||||
|
||||
✅ **TestCountEvents**
|
||||
- Counts all events
|
||||
- Counts events by kind filter
|
||||
- Verifies correct counts returned
|
||||
- Tests counting functionality
|
||||
|
||||
## Test Helpers
|
||||
|
||||
### setupTestDB(t *testing.T)
|
||||
|
||||
Creates a test dgraph database:
|
||||
|
||||
1. **Checks dgraph availability** - Skips test if server not running
|
||||
2. **Creates temp directory** - For metadata storage
|
||||
3. **Initializes dgraph client** - Connects to server
|
||||
4. **Drops all data** - Starts with clean slate
|
||||
5. **Loads test events** - From examples.Cache (~100 events)
|
||||
6. **Sorts chronologically** - Ensures addressable events processed in order
|
||||
7. **Saves all events** - Populates test database
|
||||
|
||||
**Returns:** `(*D, []*event.E, context.Context, context.CancelFunc, string)`
|
||||
|
||||
### cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
Cleans up after tests:
|
||||
- Closes database connection
|
||||
- Cancels context
|
||||
- Removes temp directory
|
||||
|
||||
### skipIfDgraphNotAvailable(t *testing.T)
|
||||
|
||||
Checks if dgraph is running and skips test if not available.
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Dgraph Server** - Must be running before tests
|
||||
2. **Go 1.21+** - For running tests
|
||||
3. **CGO_ENABLED=0** - For pure Go build
|
||||
|
||||
### Test Execution
|
||||
|
||||
#### All Tests
|
||||
|
||||
```bash
|
||||
./scripts/test-dgraph.sh
|
||||
```
|
||||
|
||||
#### Specific Test File
|
||||
|
||||
```bash
|
||||
CGO_ENABLED=0 go test -v ./pkg/dgraph -run TestSaveEvents
|
||||
```
|
||||
|
||||
#### With Logging
|
||||
|
||||
```bash
|
||||
export TEST_LOG=1
|
||||
CGO_ENABLED=0 go test -v ./pkg/dgraph/...
|
||||
```
|
||||
|
||||
#### With Timeout
|
||||
|
||||
```bash
|
||||
CGO_ENABLED=0 go test -v -timeout 10m ./pkg/dgraph/...
|
||||
```
|
||||
|
||||
### Integration Testing
|
||||
|
||||
Run tests + relay-tester:
|
||||
|
||||
```bash
|
||||
./scripts/test-dgraph.sh --relay-tester
|
||||
```
|
||||
|
||||
This will:
|
||||
1. Run all dgraph package tests
|
||||
2. Start ORLY with dgraph backend
|
||||
3. Run relay-tester against ORLY
|
||||
4. Report results
|
||||
|
||||
## Test Data
|
||||
|
||||
Tests use `pkg/encoders/event/examples.Cache` which contains:
|
||||
- ~100 real Nostr events
|
||||
- Text notes (kind 1)
|
||||
- Profile metadata (kind 0)
|
||||
- Various other kinds
|
||||
- Events with tags, references, mentions
|
||||
- Multiple authors and timestamps
|
||||
|
||||
This ensures tests cover realistic scenarios.
|
||||
|
||||
## Debugging Tests
|
||||
|
||||
### View Test Output
|
||||
|
||||
```bash
|
||||
CGO_ENABLED=0 go test -v ./pkg/dgraph/... 2>&1 | tee test-output.log
|
||||
```
|
||||
|
||||
### Check Dgraph State
|
||||
|
||||
```bash
|
||||
# View data via Ratel UI
|
||||
open http://localhost:8000
|
||||
|
||||
# Query via HTTP
|
||||
curl -X POST localhost:8080/query -d '{
|
||||
events(func: type(Event), first: 10) {
|
||||
uid
|
||||
event.id
|
||||
event.kind
|
||||
event.created_at
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
### Enable Dgraph Logging
|
||||
|
||||
```bash
|
||||
docker logs dgraph-orly-test -f
|
||||
```
|
||||
|
||||
## Test Failures
|
||||
|
||||
### "Dgraph server not available"
|
||||
|
||||
**Cause:** Dgraph is not running
|
||||
|
||||
**Fix:**
|
||||
```bash
|
||||
./scripts/dgraph-start.sh
|
||||
```
|
||||
|
||||
### Connection Timeouts
|
||||
|
||||
**Cause:** Dgraph server overloaded or network issues
|
||||
|
||||
**Fix:**
|
||||
- Increase test timeout: `go test -timeout 20m`
|
||||
- Check dgraph resources: `docker stats dgraph-orly-test`
|
||||
- Restart dgraph: `docker restart dgraph-orly-test`
|
||||
|
||||
### Schema Errors
|
||||
|
||||
**Cause:** Schema conflicts or version mismatch
|
||||
|
||||
**Fix:**
|
||||
- Drop all data: Tests call `dropAll()` automatically
|
||||
- Check dgraph version: `docker exec dgraph-orly-test dgraph version`
|
||||
|
||||
### Test Hangs
|
||||
|
||||
**Cause:** Deadlock or infinite loop
|
||||
|
||||
**Fix:**
|
||||
- Send SIGQUIT: `kill -QUIT <test-pid>`
|
||||
- View goroutine dump
|
||||
- Check dgraph logs
|
||||
|
||||
## Continuous Integration
|
||||
|
||||
### GitHub Actions Example
|
||||
|
||||
```yaml
|
||||
name: Dgraph Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
services:
|
||||
dgraph:
|
||||
image: dgraph/standalone:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
- 9080:9080
|
||||
options: >-
|
||||
--health-cmd "curl -f http://localhost:8080/health"
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Run dgraph tests
|
||||
env:
|
||||
ORLY_DGRAPH_URL: localhost:9080
|
||||
run: |
|
||||
CGO_ENABLED=0 go test -v -timeout 10m ./pkg/dgraph/...
|
||||
```
|
||||
|
||||
## Performance Benchmarks
|
||||
|
||||
Compare with badger:
|
||||
|
||||
```bash
|
||||
# Badger benchmarks
|
||||
go test -bench=. -benchmem ./pkg/database/...
|
||||
|
||||
# Dgraph benchmarks
|
||||
go test -bench=. -benchmem ./pkg/dgraph/...
|
||||
```
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Main Testing Guide](../../scripts/DGRAPH_TESTING.md)
|
||||
- [Implementation Status](../../DGRAPH_IMPLEMENTATION_STATUS.md)
|
||||
- [Package README](README.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
When adding new tests:
|
||||
|
||||
1. **Mirror badger tests** - Ensure feature parity
|
||||
2. **Use test helpers** - setupTestDB() and cleanupTestDB()
|
||||
3. **Skip if unavailable** - Call skipIfDgraphNotAvailable(t)
|
||||
4. **Clean up resources** - Always defer cleanupTestDB()
|
||||
5. **Test chronologically** - Sort events by timestamp for addressable events
|
||||
6. **Verify behavior** - Don't just check for no errors, verify correctness
|
||||
@@ -1,262 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/dgo/v230/protos/api"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// DeleteEvent deletes an event by its ID
|
||||
func (d *D) DeleteEvent(c context.Context, eid []byte) error {
|
||||
idStr := hex.Enc(eid)
|
||||
|
||||
// Find the event's UID
|
||||
query := fmt.Sprintf(`{
|
||||
event(func: eq(event.id, %q)) {
|
||||
uid
|
||||
}
|
||||
}`, idStr)
|
||||
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find event for deletion: %w", err)
|
||||
}
|
||||
|
||||
// Parse UID
|
||||
var result struct {
|
||||
Event []struct {
|
||||
UID string `json:"uid"`
|
||||
} `json:"event"`
|
||||
}
|
||||
|
||||
if err = unmarshalJSON(resp.Json, &result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(result.Event) == 0 {
|
||||
return nil // Event doesn't exist
|
||||
}
|
||||
|
||||
// Delete the event node
|
||||
mutation := &api.Mutation{
|
||||
DelNquads: []byte(fmt.Sprintf("<%s> * * .", result.Event[0].UID)),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
if _, err = d.Mutate(c, mutation); err != nil {
|
||||
return fmt.Errorf("failed to delete event: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteEventBySerial deletes an event by its serial number
|
||||
func (d *D) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.E) error {
|
||||
serial := ser.Get()
|
||||
|
||||
// Find the event's UID
|
||||
query := fmt.Sprintf(`{
|
||||
event(func: eq(event.serial, %d)) {
|
||||
uid
|
||||
}
|
||||
}`, serial)
|
||||
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find event for deletion: %w", err)
|
||||
}
|
||||
|
||||
// Parse UID
|
||||
var result struct {
|
||||
Event []struct {
|
||||
UID string `json:"uid"`
|
||||
} `json:"event"`
|
||||
}
|
||||
|
||||
if err = unmarshalJSON(resp.Json, &result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(result.Event) == 0 {
|
||||
return nil // Event doesn't exist
|
||||
}
|
||||
|
||||
// Delete the event node
|
||||
mutation := &api.Mutation{
|
||||
DelNquads: []byte(fmt.Sprintf("<%s> * * .", result.Event[0].UID)),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
if _, err = d.Mutate(c, mutation); err != nil {
|
||||
return fmt.Errorf("failed to delete event: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteExpired removes events that have passed their expiration time (NIP-40)
|
||||
func (d *D) DeleteExpired() {
|
||||
// Query for events that have an "expiration" tag
|
||||
// NIP-40: events should have a tag ["expiration", "<unix timestamp>"]
|
||||
query := `{
|
||||
events(func: has(event.tags)) {
|
||||
uid
|
||||
event.id
|
||||
event.tags
|
||||
event.created_at
|
||||
}
|
||||
}`
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
d.Logger.Errorf("failed to query events for expiration: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Events []struct {
|
||||
UID string `json:"uid"`
|
||||
ID string `json:"event.id"`
|
||||
Tags string `json:"event.tags"`
|
||||
CreatedAt int64 `json:"event.created_at"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err = unmarshalJSON(resp.Json, &result); err != nil {
|
||||
d.Logger.Errorf("failed to parse events for expiration: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
deletedCount := 0
|
||||
|
||||
for _, ev := range result.Events {
|
||||
// Parse tags
|
||||
if ev.Tags == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var tags [][]string
|
||||
if err := json.Unmarshal([]byte(ev.Tags), &tags); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for expiration tag
|
||||
var expirationTime int64
|
||||
for _, tag := range tags {
|
||||
if len(tag) >= 2 && tag[0] == "expiration" {
|
||||
// Parse expiration timestamp
|
||||
if _, err := fmt.Sscanf(tag[1], "%d", &expirationTime); err != nil {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If expiration time found and passed, delete the event
|
||||
if expirationTime > 0 && now > expirationTime {
|
||||
mutation := &api.Mutation{
|
||||
DelNquads: []byte(fmt.Sprintf("<%s> * * .", ev.UID)),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
if _, err := d.Mutate(context.Background(), mutation); err != nil {
|
||||
d.Logger.Warningf("failed to delete expired event %s: %v", ev.ID, err)
|
||||
} else {
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if deletedCount > 0 {
|
||||
d.Logger.Infof("deleted %d expired events", deletedCount)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDelete processes a kind 5 deletion event
|
||||
func (d *D) ProcessDelete(ev *event.E, admins [][]byte) (err error) {
|
||||
if ev.Kind != 5 {
|
||||
return fmt.Errorf("event is not a deletion event (kind 5)")
|
||||
}
|
||||
|
||||
// Extract event IDs to delete from tags
|
||||
for _, tag := range *ev.Tags {
|
||||
if len(tag.T) >= 2 && string(tag.T[0]) == "e" {
|
||||
eventID := tag.T[1]
|
||||
|
||||
// Verify the deletion is authorized (author must match or be admin)
|
||||
if err = d.CheckForDeleted(ev, admins); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Delete the event
|
||||
if err = d.DeleteEvent(context.Background(), eventID); err != nil {
|
||||
// Log error but continue with other deletions
|
||||
d.Logger.Errorf("failed to delete event %s: %v", hex.Enc(eventID), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckForDeleted checks if an event has been deleted
|
||||
func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
|
||||
// Query for delete events (kind 5) that reference this event
|
||||
evID := hex.Enc(ev.ID[:])
|
||||
|
||||
query := fmt.Sprintf(`{
|
||||
deletes(func: eq(event.kind, 5)) @filter(eq(event.pubkey, %q)) {
|
||||
uid
|
||||
event.pubkey
|
||||
references @filter(eq(event.id, %q)) {
|
||||
event.id
|
||||
}
|
||||
}
|
||||
}`, hex.Enc(ev.Pubkey), evID)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check for deletions: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Deletes []struct {
|
||||
UID string `json:"uid"`
|
||||
Pubkey string `json:"event.pubkey"`
|
||||
References []struct {
|
||||
ID string `json:"event.id"`
|
||||
} `json:"references"`
|
||||
} `json:"deletes"`
|
||||
}
|
||||
|
||||
if err = unmarshalJSON(resp.Json, &result); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if any delete events reference this event
|
||||
for _, del := range result.Deletes {
|
||||
if len(del.References) > 0 {
|
||||
// Check if deletion is from the author or an admin
|
||||
delPubkey, _ := hex.Dec(del.Pubkey)
|
||||
if string(delPubkey) == string(ev.Pubkey) {
|
||||
return fmt.Errorf("event has been deleted by author")
|
||||
}
|
||||
|
||||
// Check admins
|
||||
for _, admin := range admins {
|
||||
if string(delPubkey) == string(admin) {
|
||||
return fmt.Errorf("event has been deleted by admin")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,289 +0,0 @@
|
||||
// Package dgraph provides a Dgraph-based implementation of the database interface.
|
||||
// This is a simplified implementation for testing - full dgraph integration to be completed later.
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/dgraph-io/dgo/v230"
|
||||
"github.com/dgraph-io/dgo/v230/protos/api"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"lol.mleku.dev"
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/database"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"next.orly.dev/pkg/utils/apputil"
|
||||
)
|
||||
|
||||
// D implements the database.Database interface using Dgraph as the storage backend
|
||||
type D struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
|
||||
// Dgraph client connection
|
||||
client *dgo.Dgraph
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// Configuration
|
||||
dgraphURL string
|
||||
enableGraphQL bool
|
||||
enableIntrospection bool
|
||||
|
||||
ready chan struct{} // Closed when database is ready to serve requests
|
||||
}
|
||||
|
||||
// Ensure D implements database.Database interface at compile time
|
||||
var _ database.Database = (*D)(nil)
|
||||
|
||||
// init registers the dgraph database factory
|
||||
func init() {
|
||||
database.RegisterDgraphFactory(func(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
) (database.Database, error) {
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
})
|
||||
}
|
||||
|
||||
// Config holds configuration options for the Dgraph database
|
||||
type Config struct {
|
||||
DataDir string
|
||||
LogLevel string
|
||||
DgraphURL string // Dgraph gRPC endpoint (e.g., "localhost:9080")
|
||||
EnableGraphQL bool
|
||||
EnableIntrospection bool
|
||||
}
|
||||
|
||||
// New creates a new Dgraph-based database instance
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Get dgraph URL from environment, default to localhost
|
||||
dgraphURL := os.Getenv("ORLY_DGRAPH_URL")
|
||||
if dgraphURL == "" {
|
||||
dgraphURL = "localhost:9080"
|
||||
}
|
||||
|
||||
d = &D{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
dgraphURL: dgraphURL,
|
||||
enableGraphQL: false,
|
||||
enableIntrospection: false,
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure directory structure
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize dgraph client connection
|
||||
if err = d.initDgraphClient(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Apply Nostr schema to dgraph
|
||||
if err = d.applySchema(ctx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Initialize serial counter
|
||||
if err = d.initSerialCounter(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Start warmup goroutine to signal when database is ready
|
||||
go d.warmup()
|
||||
|
||||
// Setup shutdown handler
|
||||
go func() {
|
||||
<-d.ctx.Done()
|
||||
d.cancel()
|
||||
if d.conn != nil {
|
||||
d.conn.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// initDgraphClient establishes connection to dgraph server
|
||||
func (d *D) initDgraphClient() error {
|
||||
d.Logger.Infof("connecting to dgraph at %s", d.dgraphURL)
|
||||
|
||||
// Establish gRPC connection
|
||||
conn, err := grpc.Dial(d.dgraphURL, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to dgraph at %s: %w", d.dgraphURL, err)
|
||||
}
|
||||
|
||||
d.conn = conn
|
||||
d.client = dgo.NewDgraphClient(api.NewDgraphClient(conn))
|
||||
|
||||
d.Logger.Infof("successfully connected to dgraph")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
// Query executes a DQL query against dgraph
|
||||
func (d *D) Query(ctx context.Context, query string) (*api.Response, error) {
|
||||
txn := d.client.NewReadOnlyTxn()
|
||||
defer txn.Discard(ctx)
|
||||
|
||||
resp, err := txn.Query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dgraph query failed: %w", err)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Mutate executes a mutation against dgraph
|
||||
func (d *D) Mutate(ctx context.Context, mutation *api.Mutation) (*api.Response, error) {
|
||||
txn := d.client.NewTxn()
|
||||
defer txn.Discard(ctx)
|
||||
|
||||
resp, err := txn.Mutate(ctx, mutation)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dgraph mutation failed: %w", err)
|
||||
}
|
||||
|
||||
// Only commit if CommitNow is false (mutation didn't auto-commit)
|
||||
if !mutation.CommitNow {
|
||||
if err := txn.Commit(ctx); err != nil {
|
||||
return nil, fmt.Errorf("dgraph commit failed: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Path returns the data directory path
|
||||
func (d *D) Path() string { return d.dataDir }
|
||||
|
||||
// Init initializes the database with a given path (no-op, path set in New)
|
||||
func (d *D) Init(path string) (err error) {
|
||||
// Path already set in New()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sync flushes pending writes (DGraph handles persistence automatically)
|
||||
func (d *D) Sync() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the database
|
||||
func (d *D) Close() (err error) {
|
||||
d.cancel()
|
||||
if d.conn != nil {
|
||||
if e := d.conn.Close(); e != nil {
|
||||
err = e
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Wipe removes all data
|
||||
func (d *D) Wipe() (err error) {
|
||||
// Drop all data in DGraph using Alter
|
||||
op := &api.Operation{
|
||||
DropOp: api.Operation_DATA,
|
||||
}
|
||||
|
||||
if err = d.client.Alter(context.Background(), op); err != nil {
|
||||
return fmt.Errorf("failed to drop dgraph data: %w", err)
|
||||
}
|
||||
|
||||
// Remove data directory
|
||||
if err = os.RemoveAll(d.dataDir); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLogLevel sets the logging level
|
||||
func (d *D) SetLogLevel(level string) {
|
||||
// d.Logger.SetLevel(lol.GetLogLevel(level))
|
||||
}
|
||||
|
||||
// EventIdsBySerial retrieves event IDs by serial range
|
||||
func (d *D) EventIdsBySerial(start uint64, count int) (
|
||||
evs []uint64, err error,
|
||||
) {
|
||||
// Query for events in the specified serial range
|
||||
query := fmt.Sprintf(`{
|
||||
events(func: ge(event.serial, %d), orderdesc: event.serial, first: %d) {
|
||||
event.serial
|
||||
}
|
||||
}`, start, count)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query event IDs by serial: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Events []struct {
|
||||
Serial int64 `json:"event.serial"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
evs = make([]uint64, 0, len(result.Events))
|
||||
for _, ev := range result.Events {
|
||||
evs = append(evs, uint64(ev.Serial))
|
||||
}
|
||||
|
||||
return evs, nil
|
||||
}
|
||||
|
||||
// RunMigrations runs database migrations (no-op for dgraph)
|
||||
func (d *D) RunMigrations() {
|
||||
// No-op for dgraph
|
||||
}
|
||||
|
||||
// Ready returns a channel that closes when the database is ready to serve requests.
|
||||
// This allows callers to wait for database warmup to complete.
|
||||
func (d *D) Ready() <-chan struct{} {
|
||||
return d.ready
|
||||
}
|
||||
|
||||
// warmup performs database warmup operations and closes the ready channel when complete.
|
||||
// For Dgraph, warmup ensures the connection is healthy and schema is applied.
|
||||
func (d *D) warmup() {
|
||||
defer close(d.ready)
|
||||
|
||||
// Dgraph connection and schema are already verified during initialization
|
||||
// Just give a brief moment for any background processes to settle
|
||||
d.Logger.Infof("dgraph database warmup complete, ready to serve requests")
|
||||
}
|
||||
func (d *D) GetCachedJSON(f *filter.F) ([][]byte, bool) { return nil, false }
|
||||
func (d *D) CacheMarshaledJSON(f *filter.F, marshaledJSON [][]byte) {}
|
||||
func (d *D) GetCachedEvents(f *filter.F) (event.S, bool) { return nil, false }
|
||||
func (d *D) CacheEvents(f *filter.F, events event.S) {}
|
||||
func (d *D) InvalidateQueryCache() {}
|
||||
@@ -1,392 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"next.orly.dev/pkg/interfaces/store"
|
||||
)
|
||||
|
||||
// FetchEventBySerial retrieves an event by its serial number
|
||||
func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||
serial := ser.Get()
|
||||
|
||||
query := fmt.Sprintf(`{
|
||||
event(func: eq(event.serial, %d)) {
|
||||
event.id
|
||||
event.kind
|
||||
event.created_at
|
||||
event.content
|
||||
event.sig
|
||||
event.pubkey
|
||||
event.tags
|
||||
}
|
||||
}`, serial)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch event by serial: %w", err)
|
||||
}
|
||||
|
||||
evs, err := d.parseEventsFromResponse(resp.Json)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(evs) == 0 {
|
||||
return nil, fmt.Errorf("event not found")
|
||||
}
|
||||
|
||||
return evs[0], nil
|
||||
}
|
||||
|
||||
// FetchEventsBySerials retrieves multiple events by their serial numbers
|
||||
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (
|
||||
events map[uint64]*event.E, err error,
|
||||
) {
|
||||
if len(serials) == 0 {
|
||||
return make(map[uint64]*event.E), nil
|
||||
}
|
||||
|
||||
// Build a filter for multiple serials using OR conditions
|
||||
serialConditions := make([]string, len(serials))
|
||||
for i, ser := range serials {
|
||||
serialConditions[i] = fmt.Sprintf("eq(event.serial, %d)", ser.Get())
|
||||
}
|
||||
serialFilter := strings.Join(serialConditions, " OR ")
|
||||
|
||||
// Query with proper batch filtering
|
||||
query := fmt.Sprintf(`{
|
||||
events(func: has(event.serial)) @filter(%s) {
|
||||
event.id
|
||||
event.kind
|
||||
event.created_at
|
||||
event.content
|
||||
event.sig
|
||||
event.pubkey
|
||||
event.tags
|
||||
event.serial
|
||||
}
|
||||
}`, serialFilter)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch events by serials: %w", err)
|
||||
}
|
||||
|
||||
// Parse the response including serial numbers
|
||||
var result struct {
|
||||
Events []struct {
|
||||
ID string `json:"event.id"`
|
||||
Kind int `json:"event.kind"`
|
||||
CreatedAt int64 `json:"event.created_at"`
|
||||
Content string `json:"event.content"`
|
||||
Sig string `json:"event.sig"`
|
||||
Pubkey string `json:"event.pubkey"`
|
||||
Tags string `json:"event.tags"`
|
||||
Serial int64 `json:"event.serial"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Map events by their serial numbers
|
||||
events = make(map[uint64]*event.E)
|
||||
for _, ev := range result.Events {
|
||||
// Decode hex strings
|
||||
id, err := hex.Dec(ev.ID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sig, err := hex.Dec(ev.Sig)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pubkey, err := hex.Dec(ev.Pubkey)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse tags from JSON
|
||||
var tags tag.S
|
||||
if ev.Tags != "" {
|
||||
if err := json.Unmarshal([]byte(ev.Tags), &tags); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Create event
|
||||
e := &event.E{
|
||||
Kind: uint16(ev.Kind),
|
||||
CreatedAt: ev.CreatedAt,
|
||||
Content: []byte(ev.Content),
|
||||
Tags: &tags,
|
||||
}
|
||||
|
||||
// Copy fixed-size arrays
|
||||
copy(e.ID[:], id)
|
||||
copy(e.Sig[:], sig)
|
||||
copy(e.Pubkey[:], pubkey)
|
||||
|
||||
events[uint64(ev.Serial)] = e
|
||||
}
|
||||
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// GetSerialById retrieves the serial number for an event ID
|
||||
func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
|
||||
idStr := hex.Enc(id)
|
||||
|
||||
query := fmt.Sprintf(`{
|
||||
event(func: eq(event.id, %q)) {
|
||||
event.serial
|
||||
}
|
||||
}`, idStr)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get serial by ID: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Event []struct {
|
||||
Serial int64 `json:"event.serial"`
|
||||
} `json:"event"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(result.Event) == 0 {
|
||||
return nil, fmt.Errorf("event not found")
|
||||
}
|
||||
|
||||
ser = &types.Uint40{}
|
||||
ser.Set(uint64(result.Event[0].Serial))
|
||||
|
||||
return ser, nil
|
||||
}
|
||||
|
||||
// GetSerialsByIds retrieves serial numbers for multiple event IDs
|
||||
func (d *D) GetSerialsByIds(ids *tag.T) (
|
||||
serials map[string]*types.Uint40, err error,
|
||||
) {
|
||||
serials = make(map[string]*types.Uint40)
|
||||
|
||||
if len(ids.T) == 0 {
|
||||
return serials, nil
|
||||
}
|
||||
|
||||
// Build batch query for all IDs at once
|
||||
idConditions := make([]string, 0, len(ids.T))
|
||||
idMap := make(map[string][]byte) // Map hex ID to original bytes
|
||||
|
||||
for _, idBytes := range ids.T {
|
||||
if len(idBytes) > 0 {
|
||||
idStr := hex.Enc(idBytes)
|
||||
idConditions = append(idConditions, fmt.Sprintf("eq(event.id, %q)", idStr))
|
||||
idMap[idStr] = idBytes
|
||||
}
|
||||
}
|
||||
|
||||
if len(idConditions) == 0 {
|
||||
return serials, nil
|
||||
}
|
||||
|
||||
// Create single query with OR conditions
|
||||
idFilter := strings.Join(idConditions, " OR ")
|
||||
query := fmt.Sprintf(`{
|
||||
events(func: has(event.id)) @filter(%s) {
|
||||
event.id
|
||||
event.serial
|
||||
}
|
||||
}`, idFilter)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to batch query serials by IDs: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Events []struct {
|
||||
ID string `json:"event.id"`
|
||||
Serial int64 `json:"event.serial"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Map results back
|
||||
for _, ev := range result.Events {
|
||||
serial := types.Uint40{}
|
||||
serial.Set(uint64(ev.Serial))
|
||||
serials[ev.ID] = &serial
|
||||
}
|
||||
|
||||
return serials, nil
|
||||
}
|
||||
|
||||
// GetSerialsByIdsWithFilter retrieves serials with a filter function
|
||||
func (d *D) GetSerialsByIdsWithFilter(
|
||||
ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool,
|
||||
) (serials map[string]*types.Uint40, err error) {
|
||||
serials = make(map[string]*types.Uint40)
|
||||
|
||||
if fn == nil {
|
||||
// No filter, just return all
|
||||
return d.GetSerialsByIds(ids)
|
||||
}
|
||||
|
||||
// With filter, need to fetch events
|
||||
for _, id := range ids.T {
|
||||
if len(id) > 0 {
|
||||
serial, err := d.GetSerialById(id)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
ev, err := d.FetchEventBySerial(serial)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if fn(ev, serial) {
|
||||
serials[string(id)] = serial
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return serials, nil
|
||||
}
|
||||
|
||||
// GetSerialsByRange retrieves serials within a range
|
||||
func (d *D) GetSerialsByRange(idx database.Range) (
|
||||
serials types.Uint40s, err error,
|
||||
) {
|
||||
// Range represents a byte-prefix range for index scanning
|
||||
// For dgraph, we need to convert this to a query on indexed fields
|
||||
// The range is typically used for scanning event IDs or other hex-encoded keys
|
||||
|
||||
if len(idx.Start) == 0 && len(idx.End) == 0 {
|
||||
return nil, fmt.Errorf("empty range provided")
|
||||
}
|
||||
|
||||
startStr := hex.Enc(idx.Start)
|
||||
endStr := hex.Enc(idx.End)
|
||||
|
||||
// Query for events with IDs in the specified range
|
||||
query := fmt.Sprintf(`{
|
||||
events(func: ge(event.id, %q)) @filter(le(event.id, %q)) {
|
||||
event.serial
|
||||
}
|
||||
}`, startStr, endStr)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query serials by range: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Events []struct {
|
||||
Serial int64 `json:"event.serial"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serials = make([]*types.Uint40, 0, len(result.Events))
|
||||
for _, ev := range result.Events {
|
||||
serial := types.Uint40{}
|
||||
serial.Set(uint64(ev.Serial))
|
||||
serials = append(serials, &serial)
|
||||
}
|
||||
|
||||
return serials, nil
|
||||
}
|
||||
|
||||
// GetFullIdPubkeyBySerial retrieves ID and pubkey for a serial number
|
||||
func (d *D) GetFullIdPubkeyBySerial(ser *types.Uint40) (
|
||||
fidpk *store.IdPkTs, err error,
|
||||
) {
|
||||
serial := ser.Get()
|
||||
|
||||
query := fmt.Sprintf(`{
|
||||
event(func: eq(event.serial, %d)) {
|
||||
event.id
|
||||
event.pubkey
|
||||
event.created_at
|
||||
}
|
||||
}`, serial)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get ID and pubkey by serial: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Event []struct {
|
||||
ID string `json:"event.id"`
|
||||
Pubkey string `json:"event.pubkey"`
|
||||
CreatedAt int64 `json:"event.created_at"`
|
||||
} `json:"event"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(result.Event) == 0 {
|
||||
return nil, fmt.Errorf("event not found")
|
||||
}
|
||||
|
||||
id, err := hex.Dec(result.Event[0].ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubkey, err := hex.Dec(result.Event[0].Pubkey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fidpk = &store.IdPkTs{
|
||||
Id: id,
|
||||
Pub: pubkey,
|
||||
Ts: result.Event[0].CreatedAt,
|
||||
Ser: serial,
|
||||
}
|
||||
|
||||
return fidpk, nil
|
||||
}
|
||||
|
||||
// GetFullIdPubkeyBySerials retrieves IDs and pubkeys for multiple serials
|
||||
func (d *D) GetFullIdPubkeyBySerials(sers []*types.Uint40) (
|
||||
fidpks []*store.IdPkTs, err error,
|
||||
) {
|
||||
fidpks = make([]*store.IdPkTs, 0, len(sers))
|
||||
|
||||
for _, ser := range sers {
|
||||
fidpk, err := d.GetFullIdPubkeyBySerial(ser)
|
||||
if err != nil {
|
||||
continue // Skip errors, continue with others
|
||||
}
|
||||
fidpks = append(fidpks, fidpk)
|
||||
}
|
||||
|
||||
return fidpks, nil
|
||||
}
|
||||
@@ -1,144 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"net"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event/examples"
|
||||
)
|
||||
|
||||
// isDgraphAvailable checks if a dgraph server is running
|
||||
func isDgraphAvailable() bool {
|
||||
dgraphURL := os.Getenv("ORLY_DGRAPH_URL")
|
||||
if dgraphURL == "" {
|
||||
dgraphURL = "localhost:9080"
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("tcp", dgraphURL, 2*time.Second)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
conn.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
// skipIfDgraphNotAvailable skips the test if dgraph is not available
|
||||
func skipIfDgraphNotAvailable(t *testing.T) {
|
||||
if !isDgraphAvailable() {
|
||||
dgraphURL := os.Getenv("ORLY_DGRAPH_URL")
|
||||
if dgraphURL == "" {
|
||||
dgraphURL = "localhost:9080"
|
||||
}
|
||||
t.Skipf("Dgraph server not available at %s. Start with: docker run -p 9080:9080 dgraph/standalone:latest", dgraphURL)
|
||||
}
|
||||
}
|
||||
|
||||
// setupTestDB creates a new test dgraph database and loads example events
|
||||
func setupTestDB(t *testing.T) (
|
||||
*D, []*event.E, context.Context, context.CancelFunc, string,
|
||||
) {
|
||||
skipIfDgraphNotAvailable(t)
|
||||
|
||||
// Create a temporary directory for metadata storage
|
||||
tempDir, err := os.MkdirTemp("", "test-dgraph-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||
}
|
||||
|
||||
// Create a context and cancel function for the database
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Initialize the dgraph database
|
||||
db, err := New(ctx, cancel, tempDir, "info")
|
||||
if err != nil {
|
||||
cancel()
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to create dgraph database: %v", err)
|
||||
}
|
||||
|
||||
// Drop all data to start fresh
|
||||
if err := db.dropAll(ctx); err != nil {
|
||||
db.Close()
|
||||
cancel()
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to drop all data: %v", err)
|
||||
}
|
||||
|
||||
// Create a scanner to read events from examples.Cache
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
|
||||
|
||||
var events []*event.E
|
||||
|
||||
// First, collect all events from examples.Cache
|
||||
for scanner.Scan() {
|
||||
chk.E(scanner.Err())
|
||||
b := scanner.Bytes()
|
||||
ev := event.New()
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
ev.Free()
|
||||
db.Close()
|
||||
cancel()
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
events = append(events, ev)
|
||||
}
|
||||
|
||||
// Check for scanner errors
|
||||
if err = scanner.Err(); err != nil {
|
||||
db.Close()
|
||||
cancel()
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Scanner error: %v", err)
|
||||
}
|
||||
|
||||
// Sort events by CreatedAt to ensure addressable events are processed in chronological order
|
||||
sort.Slice(events, func(i, j int) bool {
|
||||
return events[i].CreatedAt < events[j].CreatedAt
|
||||
})
|
||||
|
||||
// Count the number of events processed
|
||||
eventCount := 0
|
||||
|
||||
// Now process each event in chronological order
|
||||
for _, ev := range events {
|
||||
// Save the event to the database
|
||||
if _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
db.Close()
|
||||
cancel()
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
eventCount++
|
||||
}
|
||||
|
||||
t.Logf("Successfully saved %d events to dgraph database", eventCount)
|
||||
|
||||
return db, events, ctx, cancel, tempDir
|
||||
}
|
||||
|
||||
// cleanupTestDB cleans up the test database
|
||||
func cleanupTestDB(t *testing.T, db *D, cancel context.CancelFunc, tempDir string) {
|
||||
if db != nil {
|
||||
db.Close()
|
||||
}
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
if tempDir != "" {
|
||||
os.RemoveAll(tempDir)
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/crypto/keys"
|
||||
)
|
||||
|
||||
// Relay identity methods
|
||||
// We use the marker system to store the relay's private key
|
||||
|
||||
const relayIdentityMarkerKey = "relay_identity_secret"
|
||||
|
||||
// GetRelayIdentitySecret retrieves the relay's identity secret key
|
||||
func (d *D) GetRelayIdentitySecret() (skb []byte, err error) {
|
||||
return d.GetMarker(relayIdentityMarkerKey)
|
||||
}
|
||||
|
||||
// SetRelayIdentitySecret sets the relay's identity secret key
|
||||
func (d *D) SetRelayIdentitySecret(skb []byte) error {
|
||||
return d.SetMarker(relayIdentityMarkerKey, skb)
|
||||
}
|
||||
|
||||
// GetOrCreateRelayIdentitySecret retrieves or creates the relay identity
|
||||
func (d *D) GetOrCreateRelayIdentitySecret() (skb []byte, err error) {
|
||||
skb, err = d.GetRelayIdentitySecret()
|
||||
if err == nil {
|
||||
return skb, nil
|
||||
}
|
||||
|
||||
// Generate new identity
|
||||
skb, err = keys.GenerateSecretKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate identity: %w", err)
|
||||
}
|
||||
|
||||
// Store it
|
||||
if err = d.SetRelayIdentitySecret(skb); err != nil {
|
||||
return nil, fmt.Errorf("failed to store identity: %w", err)
|
||||
}
|
||||
|
||||
d.Logger.Infof("generated new relay identity")
|
||||
return skb, nil
|
||||
}
|
||||
@@ -1,171 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// Import imports events from a reader (JSONL format)
|
||||
func (d *D) Import(rr io.Reader) {
|
||||
d.ImportEventsFromReader(context.Background(), rr)
|
||||
}
|
||||
|
||||
// Export exports events to a writer (JSONL format)
|
||||
func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
// Build query based on whether pubkeys are specified
|
||||
var query string
|
||||
|
||||
if len(pubkeys) > 0 {
|
||||
// Build pubkey filter
|
||||
pubkeyStrs := make([]string, len(pubkeys))
|
||||
for i, pk := range pubkeys {
|
||||
pubkeyStrs[i] = fmt.Sprintf("eq(event.pubkey, %q)", hex.Enc(pk))
|
||||
}
|
||||
pubkeyFilter := strings.Join(pubkeyStrs, " OR ")
|
||||
|
||||
query = fmt.Sprintf(`{
|
||||
events(func: has(event.id)) @filter(%s) {
|
||||
event.id
|
||||
event.kind
|
||||
event.created_at
|
||||
event.content
|
||||
event.sig
|
||||
event.pubkey
|
||||
event.tags
|
||||
}
|
||||
}`, pubkeyFilter)
|
||||
} else {
|
||||
// Export all events
|
||||
query = `{
|
||||
events(func: has(event.id)) {
|
||||
event.id
|
||||
event.kind
|
||||
event.created_at
|
||||
event.content
|
||||
event.sig
|
||||
event.pubkey
|
||||
event.tags
|
||||
}
|
||||
}`
|
||||
}
|
||||
|
||||
// Execute query
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
d.Logger.Errorf("failed to query events for export: %v", err)
|
||||
fmt.Fprintf(w, "# Error: failed to query events: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse events
|
||||
evs, err := d.parseEventsFromResponse(resp.Json)
|
||||
if err != nil {
|
||||
d.Logger.Errorf("failed to parse events for export: %v", err)
|
||||
fmt.Fprintf(w, "# Error: failed to parse events: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Write header comment
|
||||
fmt.Fprintf(w, "# Exported %d events from dgraph\n", len(evs))
|
||||
|
||||
// Write each event as JSONL
|
||||
count := 0
|
||||
for _, ev := range evs {
|
||||
jsonData, err := json.Marshal(ev)
|
||||
if err != nil {
|
||||
d.Logger.Warningf("failed to marshal event: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintf(w, "%s\n", jsonData); err != nil {
|
||||
d.Logger.Errorf("failed to write event: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
count++
|
||||
if count%1000 == 0 {
|
||||
d.Logger.Infof("exported %d events", count)
|
||||
}
|
||||
}
|
||||
|
||||
d.Logger.Infof("export complete: %d events written", count)
|
||||
}
|
||||
|
||||
// ImportEventsFromReader imports events from a reader
|
||||
func (d *D) ImportEventsFromReader(ctx context.Context, rr io.Reader) error {
|
||||
scanner := bufio.NewScanner(rr)
|
||||
scanner.Buffer(make([]byte, 1024*1024), 10*1024*1024) // 10MB max line size
|
||||
|
||||
count := 0
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip comments
|
||||
if line[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse event
|
||||
ev := &event.E{}
|
||||
if err := json.Unmarshal(line, ev); err != nil {
|
||||
d.Logger.Warningf("failed to parse event: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Save event
|
||||
if _, err := d.SaveEvent(ctx, ev); err != nil {
|
||||
d.Logger.Warningf("failed to import event: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
count++
|
||||
if count%1000 == 0 {
|
||||
d.Logger.Infof("imported %d events", count)
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return fmt.Errorf("scanner error: %w", err)
|
||||
}
|
||||
|
||||
d.Logger.Infof("import complete: %d events", count)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImportEventsFromStrings imports events from JSON strings
|
||||
func (d *D) ImportEventsFromStrings(
|
||||
ctx context.Context,
|
||||
eventJSONs []string,
|
||||
policyManager interface{ CheckPolicy(action string, ev *event.E, pubkey []byte, remote string) (bool, error) },
|
||||
) error {
|
||||
for _, eventJSON := range eventJSONs {
|
||||
ev := &event.E{}
|
||||
if err := json.Unmarshal([]byte(eventJSON), ev); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check policy if manager is provided
|
||||
if policyManager != nil {
|
||||
if allowed, err := policyManager.CheckPolicy("write", ev, ev.Pubkey[:], "import"); err != nil || !allowed {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Save event
|
||||
if _, err := d.SaveEvent(ctx, ev); err != nil {
|
||||
d.Logger.Warningf("failed to import event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,783 +0,0 @@
|
||||
# Dgraph Integration Guide for ORLY Relay
|
||||
|
||||
This document outlines how to integrate Dgraph as an embedded graph database within the ORLY Nostr relay, enabling advanced querying capabilities beyond standard Nostr REQ filters.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Overview](#overview)
|
||||
2. [Architecture](#architecture)
|
||||
3. [Embedding Dgraph as a Goroutine](#embedding-dgraph-as-a-goroutine)
|
||||
4. [Internal Query Interface](#internal-query-interface)
|
||||
5. [GraphQL Endpoint Setup](#graphql-endpoint-setup)
|
||||
6. [Schema Design](#schema-design)
|
||||
7. [Integration Points](#integration-points)
|
||||
8. [Performance Considerations](#performance-considerations)
|
||||
|
||||
## Overview
|
||||
|
||||
### What Dgraph Provides
|
||||
|
||||
Dgraph is a distributed graph database that can be embedded into Go applications. For ORLY, it offers:
|
||||
|
||||
- **Graph Queries**: Traverse relationships between events, authors, and tags
|
||||
- **GraphQL API**: External access to relay data with complex queries
|
||||
- **DQL (Dgraph Query Language)**: Internal programmatic queries
|
||||
- **Real-time Updates**: Live query subscriptions
|
||||
- **Advanced Filtering**: Complex multi-hop queries impossible with Nostr REQ
|
||||
|
||||
### Why Integrate?
|
||||
|
||||
Nostr REQ filters are limited to:
|
||||
- Single-author or tag-based queries
|
||||
- Time range filters
|
||||
- Kind filters
|
||||
- Simple AND/OR combinations
|
||||
|
||||
Dgraph enables:
|
||||
- "Find all events from users followed by my follows" (2-hop social graph)
|
||||
- "Show threads where Alice replied to Bob who replied to Carol"
|
||||
- "Find all events tagged with #bitcoin by authors in my Web of Trust"
|
||||
- Complex graph analytics on social networks
|
||||
|
||||
## Architecture
|
||||
|
||||
### Dgraph Components
|
||||
|
||||
```
|
||||
┌────────────────────────────────────────────────────────┐
|
||||
│ ORLY Relay │
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌─────────────────────────┐ │
|
||||
│ │ HTTP API │◄────────┤ GraphQL Endpoint │ │
|
||||
│ │ (existing) │ │ (new - external) │ │
|
||||
│ └──────────────┘ └─────────────────────────┘ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌──────────────────────────────────────────────────┐ │
|
||||
│ │ Event Ingestion Layer │ │
|
||||
│ │ - Save to Badger (existing) │ │
|
||||
│ │ - Sync to Dgraph (new) │ │
|
||||
│ └──────────────────────────────────────────────────┘ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌────────────┐ ┌─────────────────┐ │
|
||||
│ │ Badger │ │ Dgraph Engine │ │
|
||||
│ │ (events) │ │ (graph index) │ │
|
||||
│ └────────────┘ └─────────────────┘ │
|
||||
│ │ │
|
||||
│ ┌────────┴────────┐ │
|
||||
│ │ │ │
|
||||
│ ▼ ▼ │
|
||||
│ ┌──────────┐ ┌──────────┐ │
|
||||
│ │ Badger │ │ RaftWAL │ │
|
||||
│ │(postings)│ │ (WAL) │ │
|
||||
│ └──────────┘ └──────────┘ │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Storage Strategy
|
||||
|
||||
**Dual Storage Approach:**
|
||||
|
||||
1. **Badger (Primary)**: Continue using existing Badger database for:
|
||||
- Fast event retrieval by ID
|
||||
- Time-based queries
|
||||
- Author-based queries
|
||||
- Tag-based queries
|
||||
- Kind-based queries
|
||||
|
||||
2. **Dgraph (Secondary)**: Use for:
|
||||
- Graph relationship queries
|
||||
- Complex multi-hop traversals
|
||||
- Social graph analytics
|
||||
- Web of Trust calculations
|
||||
|
||||
**Data Sync**: Events are written to both stores, but Dgraph contains:
|
||||
- Event nodes (ID, kind, created_at, content)
|
||||
- Author nodes (pubkey)
|
||||
- Tag nodes (tag values)
|
||||
- Relationships (authored_by, tagged_with, replies_to, mentions, etc.)
|
||||
|
||||
## Embedding Dgraph as a Goroutine
|
||||
|
||||
### Initialization Pattern
|
||||
|
||||
Based on dgraph's embedded mode (`worker/embedded.go` and `worker/server_state.go`):
|
||||
|
||||
```go
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"github.com/dgraph-io/dgraph/edgraph"
|
||||
"github.com/dgraph-io/dgraph/graphql/admin"
|
||||
"github.com/dgraph-io/dgraph/posting"
|
||||
"github.com/dgraph-io/dgraph/schema"
|
||||
"github.com/dgraph-io/dgraph/worker"
|
||||
"github.com/dgraph-io/dgraph/x"
|
||||
"github.com/dgraph-io/ristretto/z"
|
||||
)
|
||||
|
||||
// Manager handles the embedded Dgraph instance
|
||||
type Manager struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// Dgraph components
|
||||
pstore *badger.DB // Postings store
|
||||
walstore *worker.DiskStorage // Write-ahead log
|
||||
|
||||
// GraphQL servers
|
||||
mainServer admin.IServeGraphQL
|
||||
adminServer admin.IServeGraphQL
|
||||
healthStore *admin.GraphQLHealthStore
|
||||
|
||||
// Lifecycle
|
||||
closer *z.Closer
|
||||
serverCloser *z.Closer
|
||||
}
|
||||
|
||||
// Config holds Dgraph configuration
|
||||
type Config struct {
|
||||
DataDir string
|
||||
PostingDir string
|
||||
WALDir string
|
||||
|
||||
// Performance tuning
|
||||
PostingCacheMB int64
|
||||
MutationsMode string
|
||||
|
||||
// Network
|
||||
GraphQLPort int
|
||||
AdminPort int
|
||||
|
||||
// Feature flags
|
||||
EnableGraphQL bool
|
||||
EnableIntrospection bool
|
||||
}
|
||||
|
||||
// New creates a new embedded Dgraph manager
|
||||
func New(ctx context.Context, cfg *Config) (*Manager, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
m := &Manager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
closer: z.NewCloser(1),
|
||||
serverCloser: z.NewCloser(3),
|
||||
}
|
||||
|
||||
// Initialize storage
|
||||
if err := m.initStorage(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Initialize Dgraph components
|
||||
if err := m.initDgraph(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Setup GraphQL endpoints
|
||||
if cfg.EnableGraphQL {
|
||||
if err := m.setupGraphQL(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// initStorage opens Badger databases for postings and WAL
|
||||
func (m *Manager) initStorage(cfg *Config) error {
|
||||
// Open postings store (Dgraph's main data)
|
||||
opts := badger.DefaultOptions(cfg.PostingDir).
|
||||
WithNumVersionsToKeep(math.MaxInt32).
|
||||
WithNamespaceOffset(x.NamespaceOffset)
|
||||
|
||||
var err error
|
||||
m.pstore, err = badger.OpenManaged(opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open postings store: %w", err)
|
||||
}
|
||||
|
||||
// Open WAL store
|
||||
m.walstore, err = worker.InitStorage(cfg.WALDir)
|
||||
if err != nil {
|
||||
m.pstore.Close()
|
||||
return fmt.Errorf("failed to open WAL: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initDgraph initializes Dgraph worker components
|
||||
func (m *Manager) initDgraph(cfg *Config) error {
|
||||
// Initialize server state
|
||||
worker.State.Pstore = m.pstore
|
||||
worker.State.WALstore = m.walstore
|
||||
worker.State.FinishCh = make(chan struct{})
|
||||
|
||||
// Initialize schema and posting layers
|
||||
schema.Init(m.pstore)
|
||||
posting.Init(m.pstore, cfg.PostingCacheMB, true)
|
||||
worker.Init(m.pstore)
|
||||
|
||||
// For embedded/lite mode without Raft
|
||||
worker.InitForLite(m.pstore)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupGraphQL initializes GraphQL servers
|
||||
func (m *Manager) setupGraphQL(cfg *Config) error {
|
||||
globalEpoch := make(map[uint64]*uint64)
|
||||
|
||||
// Create GraphQL servers
|
||||
m.mainServer, m.adminServer, m.healthStore = admin.NewServers(
|
||||
cfg.EnableIntrospection,
|
||||
globalEpoch,
|
||||
m.serverCloser,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start launches Dgraph in goroutines
|
||||
func (m *Manager) Start() error {
|
||||
// Start worker server (internal gRPC)
|
||||
go worker.RunServer(false)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down Dgraph
|
||||
func (m *Manager) Stop() error {
|
||||
m.cancel()
|
||||
|
||||
// Signal shutdown
|
||||
m.closer.SignalAndWait()
|
||||
m.serverCloser.SignalAndWait()
|
||||
|
||||
// Close databases
|
||||
if m.walstore != nil {
|
||||
m.walstore.Close()
|
||||
}
|
||||
if m.pstore != nil {
|
||||
m.pstore.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
### Integration with ORLY Main
|
||||
|
||||
In `app/main.go`:
|
||||
|
||||
```go
|
||||
import (
|
||||
"next.orly.dev/pkg/dgraph"
|
||||
)
|
||||
|
||||
type Listener struct {
|
||||
// ... existing fields ...
|
||||
|
||||
dgraphManager *dgraph.Manager
|
||||
}
|
||||
|
||||
func (l *Listener) init(ctx context.Context, cfg *config.C) (err error) {
|
||||
// ... existing initialization ...
|
||||
|
||||
// Initialize Dgraph if enabled
|
||||
if cfg.DgraphEnabled {
|
||||
dgraphCfg := &dgraph.Config{
|
||||
DataDir: cfg.DgraphDataDir,
|
||||
PostingDir: filepath.Join(cfg.DgraphDataDir, "p"),
|
||||
WALDir: filepath.Join(cfg.DgraphDataDir, "w"),
|
||||
PostingCacheMB: cfg.DgraphCacheMB,
|
||||
EnableGraphQL: cfg.DgraphGraphQL,
|
||||
EnableIntrospection: cfg.DgraphIntrospection,
|
||||
GraphQLPort: cfg.DgraphGraphQLPort,
|
||||
}
|
||||
|
||||
l.dgraphManager, err = dgraph.New(ctx, dgraphCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize dgraph: %w", err)
|
||||
}
|
||||
|
||||
if err = l.dgraphManager.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start dgraph: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("dgraph manager started successfully")
|
||||
}
|
||||
|
||||
// ... rest of initialization ...
|
||||
}
|
||||
```
|
||||
|
||||
## Internal Query Interface
|
||||
|
||||
### Direct Query Execution
|
||||
|
||||
Dgraph provides `edgraph.Server{}.QueryNoGrpc()` for internal queries:
|
||||
|
||||
```go
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/dgraph-io/dgo/v230/protos/api"
|
||||
"github.com/dgraph-io/dgraph/edgraph"
|
||||
)
|
||||
|
||||
// Query executes a DQL query internally
|
||||
func (m *Manager) Query(ctx context.Context, query string) (*api.Response, error) {
|
||||
server := &edgraph.Server{}
|
||||
|
||||
req := &api.Request{
|
||||
Query: query,
|
||||
}
|
||||
|
||||
return server.QueryNoGrpc(ctx, req)
|
||||
}
|
||||
|
||||
// Mutate applies a mutation to the graph
|
||||
func (m *Manager) Mutate(ctx context.Context, mutation *api.Mutation) (*api.Response, error) {
|
||||
server := &edgraph.Server{}
|
||||
|
||||
req := &api.Request{
|
||||
Mutations: []*api.Mutation{mutation},
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
return server.QueryNoGrpc(ctx, req)
|
||||
}
|
||||
```
|
||||
|
||||
### Example: Adding Events to Graph
|
||||
|
||||
```go
|
||||
// AddEvent indexes a Nostr event in the graph
|
||||
func (m *Manager) AddEvent(ctx context.Context, event *event.E) error {
|
||||
// Build RDF triples for the event
|
||||
nquads := buildEventNQuads(event)
|
||||
|
||||
mutation := &api.Mutation{
|
||||
SetNquads: []byte(nquads),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
_, err := m.Mutate(ctx, mutation)
|
||||
return err
|
||||
}
|
||||
|
||||
func buildEventNQuads(event *event.E) string {
|
||||
var nquads strings.Builder
|
||||
|
||||
eventID := hex.EncodeToString(event.ID[:])
|
||||
authorPubkey := hex.EncodeToString(event.Pubkey)
|
||||
|
||||
// Event node
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <dgraph.type> \"Event\" .\n", eventID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.id> %q .\n", eventID, eventID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.kind> %q .\n", eventID, event.Kind))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.created_at> %q .\n", eventID, event.CreatedAt))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.content> %q .\n", eventID, event.Content))
|
||||
|
||||
// Author relationship
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <authored_by> _:%s .\n", eventID, authorPubkey))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <dgraph.type> \"Author\" .\n", authorPubkey))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <author.pubkey> %q .\n", authorPubkey, authorPubkey))
|
||||
|
||||
// Tag relationships
|
||||
for _, tag := range event.Tags {
|
||||
if len(tag) >= 2 {
|
||||
tagType := string(tag[0])
|
||||
tagValue := string(tag[1])
|
||||
|
||||
switch tagType {
|
||||
case "e": // Event reference
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <references> _:%s .\n", eventID, tagValue))
|
||||
case "p": // Pubkey mention
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <mentions> _:%s .\n", eventID, tagValue))
|
||||
case "t": // Hashtag
|
||||
tagID := "tag_" + tagValue
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <tagged_with> _:%s .\n", eventID, tagID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <dgraph.type> \"Tag\" .\n", tagID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <tag.value> %q .\n", tagID, tagValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nquads.String()
|
||||
}
|
||||
```
|
||||
|
||||
### Example: Query Social Graph
|
||||
|
||||
```go
|
||||
// FindFollowsOfFollows returns events from 2-hop social network
|
||||
func (m *Manager) FindFollowsOfFollows(ctx context.Context, pubkey []byte) ([]*event.E, error) {
|
||||
pubkeyHex := hex.EncodeToString(pubkey)
|
||||
|
||||
query := fmt.Sprintf(`{
|
||||
follows_of_follows(func: eq(author.pubkey, %q)) {
|
||||
# My follows (kind 3)
|
||||
~authored_by @filter(eq(event.kind, "3")) {
|
||||
# Their follows
|
||||
references {
|
||||
# Events from their follows
|
||||
~authored_by {
|
||||
event.id
|
||||
event.kind
|
||||
event.created_at
|
||||
event.content
|
||||
authored_by {
|
||||
author.pubkey
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`, pubkeyHex)
|
||||
|
||||
resp, err := m.Query(ctx, query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse response and convert to Nostr events
|
||||
return parseEventsFromDgraphResponse(resp.Json)
|
||||
}
|
||||
```
|
||||
|
||||
## GraphQL Endpoint Setup
|
||||
|
||||
### Exposing GraphQL via HTTP
|
||||
|
||||
Add GraphQL handlers to the existing HTTP mux in `app/server.go`:
|
||||
|
||||
```go
|
||||
// setupGraphQLEndpoints adds Dgraph GraphQL endpoints
|
||||
func (s *Server) setupGraphQLEndpoints() {
|
||||
if s.dgraphManager == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Main GraphQL endpoint for queries
|
||||
s.mux.HandleFunc("/graphql", func(w http.ResponseWriter, r *http.Request) {
|
||||
// Extract namespace (for multi-tenancy)
|
||||
namespace := x.ExtractNamespaceHTTP(r)
|
||||
|
||||
// Lazy load schema
|
||||
admin.LazyLoadSchema(namespace)
|
||||
|
||||
// Serve GraphQL
|
||||
s.dgraphManager.MainServer().HTTPHandler().ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Admin endpoint for schema updates
|
||||
s.mux.HandleFunc("/admin", func(w http.ResponseWriter, r *http.Request) {
|
||||
namespace := x.ExtractNamespaceHTTP(r)
|
||||
admin.LazyLoadSchema(namespace)
|
||||
s.dgraphManager.AdminServer().HTTPHandler().ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Health check
|
||||
s.mux.HandleFunc("/graphql/health", func(w http.ResponseWriter, r *http.Request) {
|
||||
health := s.dgraphManager.HealthStore()
|
||||
if health.IsGraphQLReady() {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("GraphQL is ready"))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusServiceUnavailable)
|
||||
w.Write([]byte("GraphQL is not ready"))
|
||||
}
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
### GraphQL Resolver Integration
|
||||
|
||||
The manager needs to expose the GraphQL servers:
|
||||
|
||||
```go
|
||||
// MainServer returns the main GraphQL server
|
||||
func (m *Manager) MainServer() admin.IServeGraphQL {
|
||||
return m.mainServer
|
||||
}
|
||||
|
||||
// AdminServer returns the admin GraphQL server
|
||||
func (m *Manager) AdminServer() admin.IServeGraphQL {
|
||||
return m.adminServer
|
||||
}
|
||||
|
||||
// HealthStore returns the health check store
|
||||
func (m *Manager) HealthStore() *admin.GraphQLHealthStore {
|
||||
return m.healthStore
|
||||
}
|
||||
```
|
||||
|
||||
## Schema Design
|
||||
|
||||
### Dgraph Schema for Nostr Events
|
||||
|
||||
```graphql
|
||||
# Types
|
||||
type Event {
|
||||
id: String! @id @index(exact)
|
||||
kind: Int! @index(int)
|
||||
created_at: Int! @index(int)
|
||||
content: String @index(fulltext)
|
||||
sig: String
|
||||
|
||||
# Relationships
|
||||
authored_by: Author! @reverse
|
||||
references: [Event] @reverse
|
||||
mentions: [Author] @reverse
|
||||
tagged_with: [Tag] @reverse
|
||||
replies_to: Event @reverse
|
||||
}
|
||||
|
||||
type Author {
|
||||
pubkey: String! @id @index(exact)
|
||||
|
||||
# Relationships
|
||||
events: [Event] @reverse
|
||||
follows: [Author] @reverse
|
||||
followed_by: [Author] @reverse
|
||||
|
||||
# Computed/cached fields
|
||||
follower_count: Int
|
||||
following_count: Int
|
||||
event_count: Int
|
||||
}
|
||||
|
||||
type Tag {
|
||||
value: String! @id @index(exact, term, fulltext)
|
||||
type: String @index(exact)
|
||||
|
||||
# Relationships
|
||||
events: [Event] @reverse
|
||||
usage_count: Int
|
||||
}
|
||||
|
||||
# Indexes for efficient queries
|
||||
<event.kind>: int @index .
|
||||
<event.created_at>: int @index .
|
||||
<event.content>: string @index(fulltext) .
|
||||
<author.pubkey>: string @index(exact) .
|
||||
<tag.value>: string @index(exact, term, fulltext) .
|
||||
```
|
||||
|
||||
### Setting the Schema
|
||||
|
||||
```go
|
||||
func (m *Manager) SetSchema(ctx context.Context) error {
|
||||
schemaStr := `
|
||||
type Event {
|
||||
event.id: string @index(exact) .
|
||||
event.kind: int @index(int) .
|
||||
event.created_at: int @index(int) .
|
||||
event.content: string @index(fulltext) .
|
||||
authored_by: uid @reverse .
|
||||
references: [uid] @reverse .
|
||||
mentions: [uid] @reverse .
|
||||
tagged_with: [uid] @reverse .
|
||||
}
|
||||
|
||||
type Author {
|
||||
author.pubkey: string @index(exact) .
|
||||
}
|
||||
|
||||
type Tag {
|
||||
tag.value: string @index(exact, term, fulltext) .
|
||||
}
|
||||
`
|
||||
|
||||
mutation := &api.Mutation{
|
||||
SetNquads: []byte(schemaStr),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
_, err := m.Mutate(ctx, mutation)
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
## Integration Points
|
||||
|
||||
### Event Ingestion Hook
|
||||
|
||||
Modify `pkg/database/save-event.go` to sync events to Dgraph:
|
||||
|
||||
```go
|
||||
func (d *D) SaveEvent(ctx context.Context, ev *event.E) (exists bool, err error) {
|
||||
// ... existing Badger save logic ...
|
||||
|
||||
// Sync to Dgraph if enabled
|
||||
if d.dgraphManager != nil {
|
||||
go func() {
|
||||
if err := d.dgraphManager.AddEvent(context.Background(), ev); err != nil {
|
||||
log.E.F("failed to sync event to dgraph: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
### Query Interface Extension
|
||||
|
||||
Add GraphQL query support alongside Nostr REQ:
|
||||
|
||||
```go
|
||||
// app/handle-graphql.go
|
||||
|
||||
func (s *Server) handleGraphQLQuery(w http.ResponseWriter, r *http.Request) {
|
||||
if s.dgraphManager == nil {
|
||||
http.Error(w, "GraphQL not enabled", http.StatusNotImplemented)
|
||||
return
|
||||
}
|
||||
|
||||
// Read GraphQL query from request
|
||||
var req struct {
|
||||
Query string `json:"query"`
|
||||
Variables map[string]interface{} `json:"variables"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Execute via Dgraph
|
||||
gqlReq := &schema.Request{
|
||||
Query: req.Query,
|
||||
Variables: req.Variables,
|
||||
}
|
||||
|
||||
namespace := x.ExtractNamespaceHTTP(r)
|
||||
resp := s.dgraphManager.MainServer().ResolveWithNs(r.Context(), namespace, gqlReq)
|
||||
|
||||
// Return response
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Memory Usage
|
||||
|
||||
- **Dgraph Overhead**: ~500MB-1GB baseline
|
||||
- **Posting Cache**: Configurable (recommend 25% of available RAM)
|
||||
- **WAL**: Disk-based, minimal memory impact
|
||||
|
||||
### Storage Requirements
|
||||
|
||||
- **Badger (Postings)**: ~2-3x event data size (compressed)
|
||||
- **WAL**: ~1.5x mutation data (compacted periodically)
|
||||
- **Total**: Estimate 4-5x your Nostr event storage
|
||||
|
||||
### Query Performance
|
||||
|
||||
- **Graph Traversals**: O(edges) typically sub-100ms for 2-3 hops
|
||||
- **Full-text Search**: O(log n) with indexes
|
||||
- **Time-range Queries**: O(log n) with int indexes
|
||||
- **Complex Joins**: Can be expensive; use pagination
|
||||
|
||||
### Optimization Strategies
|
||||
|
||||
1. **Selective Indexing**: Only index events that need graph queries (e.g., kinds 1, 3, 6, 7)
|
||||
2. **Async Writes**: Don't block event saves on Dgraph sync
|
||||
3. **Read-through Cache**: Query Badger first for simple lookups
|
||||
4. **Batch Mutations**: Accumulate mutations and apply in batches
|
||||
5. **Schema Optimization**: Only index fields you'll query
|
||||
6. **Pagination**: Always use `first:` and `after:` in GraphQL queries
|
||||
|
||||
### Monitoring
|
||||
|
||||
```go
|
||||
// Add metrics
|
||||
var (
|
||||
dgraphQueriesTotal = prometheus.NewCounter(...)
|
||||
dgraphQueryDuration = prometheus.NewHistogram(...)
|
||||
dgraphMutationsTotal = prometheus.NewCounter(...)
|
||||
dgraphErrors = prometheus.NewCounter(...)
|
||||
)
|
||||
|
||||
// Wrap queries with instrumentation
|
||||
func (m *Manager) Query(ctx context.Context, query string) (*api.Response, error) {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
dgraphQueriesTotal.Inc()
|
||||
dgraphQueryDuration.Observe(time.Since(start).Seconds())
|
||||
}()
|
||||
|
||||
resp, err := m.query(ctx, query)
|
||||
if err != nil {
|
||||
dgraphErrors.Inc()
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
```
|
||||
|
||||
## Alternative: Lightweight Graph Library
|
||||
|
||||
Given Dgraph's complexity and resource requirements, consider these alternatives:
|
||||
|
||||
### cayley (Google's graph database)
|
||||
|
||||
```bash
|
||||
go get github.com/cayleygraph/cayley
|
||||
```
|
||||
|
||||
- Lighter weight (~50MB overhead)
|
||||
- Multiple backend support (Badger, Memory, SQL)
|
||||
- Simpler API
|
||||
- Good for smaller graphs (<10M nodes)
|
||||
|
||||
### badger-graph (Custom Implementation)
|
||||
|
||||
Build a custom graph layer on top of existing Badger:
|
||||
|
||||
```go
|
||||
// Simplified graph index using Badger directly
|
||||
type GraphIndex struct {
|
||||
db *badger.DB
|
||||
}
|
||||
|
||||
// Store edge: subject -> predicate -> object
|
||||
func (g *GraphIndex) AddEdge(subject, predicate, object string) error {
|
||||
key := fmt.Sprintf("edge:%s:%s:%s", subject, predicate, object)
|
||||
return g.db.Update(func(txn *badger.Txn) error {
|
||||
return txn.Set([]byte(key), []byte{})
|
||||
})
|
||||
}
|
||||
|
||||
// Query edges
|
||||
func (g *GraphIndex) GetEdges(subject, predicate string) ([]string, error) {
|
||||
prefix := fmt.Sprintf("edge:%s:%s:", subject, predicate)
|
||||
// Iterate and collect
|
||||
}
|
||||
```
|
||||
|
||||
This avoids Dgraph's overhead while providing basic graph functionality.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Embedding Dgraph in ORLY enables powerful graph queries that extend far beyond Nostr's REQ filters. However, it comes with significant complexity and resource requirements. Consider:
|
||||
|
||||
- **Full Dgraph**: For production relays with advanced query needs
|
||||
- **Cayley**: For medium-sized relays with moderate graph needs
|
||||
- **Custom Badger-Graph**: For lightweight graph indexing with minimal overhead
|
||||
|
||||
Choose based on your specific use case, expected load, and query complexity requirements.
|
||||
@@ -1,68 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"go.uber.org/atomic"
|
||||
"lol.mleku.dev"
|
||||
"lol.mleku.dev/log"
|
||||
)
|
||||
|
||||
// NewLogger creates a new dgraph logger.
|
||||
func NewLogger(logLevel int, label string) (l *logger) {
|
||||
l = &logger{Label: label}
|
||||
l.Level.Store(int32(logLevel))
|
||||
return
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
Level atomic.Int32
|
||||
Label string
|
||||
}
|
||||
|
||||
// SetLogLevel atomically adjusts the log level to the given log level code.
|
||||
func (l *logger) SetLogLevel(level int) {
|
||||
l.Level.Store(int32(level))
|
||||
}
|
||||
|
||||
// Errorf is a log printer for this level of message.
|
||||
func (l *logger) Errorf(s string, i ...interface{}) {
|
||||
if l.Level.Load() >= lol.Error {
|
||||
s = l.Label + ": " + s
|
||||
txt := fmt.Sprintf(s, i...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
log.E.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
|
||||
}
|
||||
}
|
||||
|
||||
// Warningf is a log printer for this level of message.
|
||||
func (l *logger) Warningf(s string, i ...interface{}) {
|
||||
if l.Level.Load() >= lol.Warn {
|
||||
s = l.Label + ": " + s
|
||||
txt := fmt.Sprintf(s, i...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
log.W.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
|
||||
}
|
||||
}
|
||||
|
||||
// Infof is a log printer for this level of message.
|
||||
func (l *logger) Infof(s string, i ...interface{}) {
|
||||
if l.Level.Load() >= lol.Info {
|
||||
s = l.Label + ": " + s
|
||||
txt := fmt.Sprintf(s, i...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
log.I.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
|
||||
}
|
||||
}
|
||||
|
||||
// Debugf is a log printer for this level of message.
|
||||
func (l *logger) Debugf(s string, i ...interface{}) {
|
||||
if l.Level.Load() >= lol.Debug {
|
||||
s = l.Label + ": " + s
|
||||
txt := fmt.Sprintf(s, i...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
log.D.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
|
||||
}
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/dgraph-io/dgo/v230/protos/api"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// Markers provide metadata key-value storage using Dgraph predicates
|
||||
// We store markers as special nodes with type "Marker"
|
||||
|
||||
// SetMarker sets a metadata marker
|
||||
func (d *D) SetMarker(key string, value []byte) error {
|
||||
// Create or update a marker node
|
||||
markerID := "marker_" + key
|
||||
valueHex := hex.Enc(value)
|
||||
|
||||
nquads := fmt.Sprintf(`
|
||||
_:%s <dgraph.type> "Marker" .
|
||||
_:%s <marker.key> %q .
|
||||
_:%s <marker.value> %q .
|
||||
`, markerID, markerID, key, markerID, valueHex)
|
||||
|
||||
mutation := &api.Mutation{
|
||||
SetNquads: []byte(nquads),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
if _, err := d.Mutate(context.Background(), mutation); err != nil {
|
||||
return fmt.Errorf("failed to set marker: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMarker retrieves a metadata marker
|
||||
func (d *D) GetMarker(key string) (value []byte, err error) {
|
||||
query := fmt.Sprintf(`{
|
||||
marker(func: eq(marker.key, %q)) {
|
||||
marker.value
|
||||
}
|
||||
}`, key)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get marker: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Marker []struct {
|
||||
Value string `json:"marker.value"`
|
||||
} `json:"marker"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse marker response: %w", err)
|
||||
}
|
||||
|
||||
if len(result.Marker) == 0 {
|
||||
return nil, fmt.Errorf("marker not found: %s", key)
|
||||
}
|
||||
|
||||
// Decode hex value
|
||||
value, err = hex.Dec(result.Marker[0].Value)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode marker value: %w", err)
|
||||
}
|
||||
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// HasMarker checks if a marker exists
|
||||
func (d *D) HasMarker(key string) bool {
|
||||
_, err := d.GetMarker(key)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// DeleteMarker removes a metadata marker
|
||||
func (d *D) DeleteMarker(key string) error {
|
||||
// Find the marker's UID
|
||||
query := fmt.Sprintf(`{
|
||||
marker(func: eq(marker.key, %q)) {
|
||||
uid
|
||||
}
|
||||
}`, key)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find marker: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Marker []struct {
|
||||
UID string `json:"uid"`
|
||||
} `json:"marker"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return fmt.Errorf("failed to parse marker query: %w", err)
|
||||
}
|
||||
|
||||
if len(result.Marker) == 0 {
|
||||
return nil // Marker doesn't exist
|
||||
}
|
||||
|
||||
// Delete the marker node
|
||||
mutation := &api.Mutation{
|
||||
DelNquads: []byte(fmt.Sprintf("<%s> * * .", result.Marker[0].UID)),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
if _, err = d.Mutate(context.Background(), mutation); err != nil {
|
||||
return fmt.Errorf("failed to delete marker: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// NIP-43 Invite-based ACL methods
|
||||
// Simplified implementation using marker-based storage
|
||||
|
||||
// AddNIP43Member adds a member using an invite code
|
||||
func (d *D) AddNIP43Member(pubkey []byte, inviteCode string) error {
|
||||
key := "nip43_" + hex.Enc(pubkey)
|
||||
|
||||
member := database.NIP43Membership{
|
||||
InviteCode: inviteCode,
|
||||
AddedAt: time.Now(),
|
||||
}
|
||||
copy(member.Pubkey[:], pubkey)
|
||||
|
||||
data, err := json.Marshal(member)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal membership: %w", err)
|
||||
}
|
||||
|
||||
// Also add to members list
|
||||
if err := d.addToMembersList(pubkey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.SetMarker(key, data)
|
||||
}
|
||||
|
||||
// RemoveNIP43Member removes a member
|
||||
func (d *D) RemoveNIP43Member(pubkey []byte) error {
|
||||
key := "nip43_" + hex.Enc(pubkey)
|
||||
|
||||
// Remove from members list
|
||||
if err := d.removeFromMembersList(pubkey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.DeleteMarker(key)
|
||||
}
|
||||
|
||||
// IsNIP43Member checks if a pubkey is a member
|
||||
func (d *D) IsNIP43Member(pubkey []byte) (isMember bool, err error) {
|
||||
_, err = d.GetNIP43Membership(pubkey)
|
||||
return err == nil, nil
|
||||
}
|
||||
|
||||
// GetNIP43Membership retrieves membership information
|
||||
func (d *D) GetNIP43Membership(pubkey []byte) (*database.NIP43Membership, error) {
|
||||
key := "nip43_" + hex.Enc(pubkey)
|
||||
|
||||
data, err := d.GetMarker(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var member database.NIP43Membership
|
||||
if err := json.Unmarshal(data, &member); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal membership: %w", err)
|
||||
}
|
||||
|
||||
return &member, nil
|
||||
}
|
||||
|
||||
// GetAllNIP43Members retrieves all member pubkeys
|
||||
func (d *D) GetAllNIP43Members() ([][]byte, error) {
|
||||
data, err := d.GetMarker("nip43_members_list")
|
||||
if err != nil {
|
||||
return nil, nil // No members = empty list
|
||||
}
|
||||
|
||||
var members []string
|
||||
if err := json.Unmarshal(data, &members); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal members list: %w", err)
|
||||
}
|
||||
|
||||
result := make([][]byte, 0, len(members))
|
||||
for _, hexPubkey := range members {
|
||||
pubkey, err := hex.Dec(hexPubkey)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, pubkey)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// StoreInviteCode stores an invite code with expiration
|
||||
func (d *D) StoreInviteCode(code string, expiresAt time.Time) error {
|
||||
key := "invite_" + code
|
||||
|
||||
inviteData := map[string]interface{}{
|
||||
"code": code,
|
||||
"expiresAt": expiresAt,
|
||||
}
|
||||
|
||||
data, err := json.Marshal(inviteData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal invite: %w", err)
|
||||
}
|
||||
|
||||
return d.SetMarker(key, data)
|
||||
}
|
||||
|
||||
// ValidateInviteCode checks if an invite code is valid
|
||||
func (d *D) ValidateInviteCode(code string) (valid bool, err error) {
|
||||
key := "invite_" + code
|
||||
|
||||
data, err := d.GetMarker(key)
|
||||
if err != nil {
|
||||
return false, nil // Code doesn't exist
|
||||
}
|
||||
|
||||
var inviteData map[string]interface{}
|
||||
if err := json.Unmarshal(data, &inviteData); err != nil {
|
||||
return false, fmt.Errorf("failed to unmarshal invite: %w", err)
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
if expiresStr, ok := inviteData["expiresAt"].(string); ok {
|
||||
expiresAt, err := time.Parse(time.RFC3339, expiresStr)
|
||||
if err == nil && time.Now().After(expiresAt) {
|
||||
return false, nil // Expired
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DeleteInviteCode removes an invite code
|
||||
func (d *D) DeleteInviteCode(code string) error {
|
||||
key := "invite_" + code
|
||||
return d.DeleteMarker(key)
|
||||
}
|
||||
|
||||
// PublishNIP43MembershipEvent publishes a membership event
|
||||
func (d *D) PublishNIP43MembershipEvent(kind int, pubkey []byte) error {
|
||||
// This would require publishing an actual Nostr event
|
||||
// For now, just log it
|
||||
d.Logger.Infof("would publish NIP-43 event kind %d for %s", kind, hex.Enc(pubkey))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func (d *D) addToMembersList(pubkey []byte) error {
|
||||
data, err := d.GetMarker("nip43_members_list")
|
||||
|
||||
var members []string
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(data, &members); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal members list: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
hexPubkey := hex.Enc(pubkey)
|
||||
|
||||
// Check if already in list
|
||||
for _, member := range members {
|
||||
if member == hexPubkey {
|
||||
return nil // Already in list
|
||||
}
|
||||
}
|
||||
|
||||
members = append(members, hexPubkey)
|
||||
|
||||
data, err = json.Marshal(members)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal members list: %w", err)
|
||||
}
|
||||
|
||||
return d.SetMarker("nip43_members_list", data)
|
||||
}
|
||||
|
||||
func (d *D) removeFromMembersList(pubkey []byte) error {
|
||||
data, err := d.GetMarker("nip43_members_list")
|
||||
if err != nil {
|
||||
return nil // List doesn't exist
|
||||
}
|
||||
|
||||
var members []string
|
||||
if err := json.Unmarshal(data, &members); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal members list: %w", err)
|
||||
}
|
||||
|
||||
hexPubkey := hex.Enc(pubkey)
|
||||
|
||||
// Remove from list
|
||||
newMembers := make([]string, 0, len(members))
|
||||
for _, member := range members {
|
||||
if member != hexPubkey {
|
||||
newMembers = append(newMembers, member)
|
||||
}
|
||||
}
|
||||
|
||||
data, err = json.Marshal(newMembers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal members list: %w", err)
|
||||
}
|
||||
|
||||
return d.SetMarker("nip43_members_list", data)
|
||||
}
|
||||
@@ -1,376 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"next.orly.dev/pkg/interfaces/store"
|
||||
)
|
||||
|
||||
// QueryEvents retrieves events matching the given filter
|
||||
func (d *D) QueryEvents(c context.Context, f *filter.F) (evs event.S, err error) {
|
||||
return d.QueryEventsWithOptions(c, f, false, false)
|
||||
}
|
||||
|
||||
// QueryAllVersions retrieves all versions of events matching the filter
|
||||
func (d *D) QueryAllVersions(c context.Context, f *filter.F) (evs event.S, err error) {
|
||||
return d.QueryEventsWithOptions(c, f, false, true)
|
||||
}
|
||||
|
||||
// QueryEventsWithOptions retrieves events with specific options
|
||||
func (d *D) QueryEventsWithOptions(
|
||||
c context.Context, f *filter.F, includeDeleteEvents bool, showAllVersions bool,
|
||||
) (evs event.S, err error) {
|
||||
// Build DQL query from Nostr filter
|
||||
query := d.buildDQLQuery(f, includeDeleteEvents)
|
||||
|
||||
// Execute query
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to execute query: %w", err)
|
||||
}
|
||||
|
||||
// Parse response
|
||||
evs, err = d.parseEventsFromResponse(resp.Json)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse events: %w", err)
|
||||
}
|
||||
|
||||
return evs, nil
|
||||
}
|
||||
|
||||
// buildDQLQuery constructs a DQL query from a Nostr filter
|
||||
func (d *D) buildDQLQuery(f *filter.F, includeDeleteEvents bool) string {
|
||||
return d.buildDQLQueryWithFields(f, includeDeleteEvents, []string{
|
||||
"uid",
|
||||
"event.id",
|
||||
"event.kind",
|
||||
"event.created_at",
|
||||
"event.content",
|
||||
"event.sig",
|
||||
"event.pubkey",
|
||||
"event.tags",
|
||||
})
|
||||
}
|
||||
|
||||
// buildDQLQueryWithFields constructs a DQL query with custom field selection
|
||||
func (d *D) buildDQLQueryWithFields(f *filter.F, includeDeleteEvents bool, fields []string) string {
|
||||
var conditions []string
|
||||
var funcQuery string
|
||||
|
||||
// IDs filter
|
||||
if len(f.Ids.T) > 0 {
|
||||
idConditions := make([]string, len(f.Ids.T))
|
||||
for i, id := range f.Ids.T {
|
||||
// Handle prefix matching
|
||||
if len(id) < 64 {
|
||||
// Prefix search
|
||||
idConditions[i] = fmt.Sprintf("regexp(event.id, /^%s/)", hex.Enc(id))
|
||||
} else {
|
||||
idConditions[i] = fmt.Sprintf("eq(event.id, %q)", hex.Enc(id))
|
||||
}
|
||||
}
|
||||
if len(idConditions) == 1 {
|
||||
funcQuery = idConditions[0]
|
||||
} else {
|
||||
conditions = append(conditions, "("+strings.Join(idConditions, " OR ")+")")
|
||||
}
|
||||
}
|
||||
|
||||
// Authors filter
|
||||
if len(f.Authors.T) > 0 {
|
||||
authorConditions := make([]string, len(f.Authors.T))
|
||||
for i, author := range f.Authors.T {
|
||||
// Handle prefix matching
|
||||
if len(author) < 64 {
|
||||
authorConditions[i] = fmt.Sprintf("regexp(event.pubkey, /^%s/)", hex.Enc(author))
|
||||
} else {
|
||||
authorConditions[i] = fmt.Sprintf("eq(event.pubkey, %q)", hex.Enc(author))
|
||||
}
|
||||
}
|
||||
if funcQuery == "" && len(authorConditions) == 1 {
|
||||
funcQuery = authorConditions[0]
|
||||
} else {
|
||||
conditions = append(conditions, "("+strings.Join(authorConditions, " OR ")+")")
|
||||
}
|
||||
}
|
||||
|
||||
// Kinds filter
|
||||
if len(f.Kinds.K) > 0 {
|
||||
kindConditions := make([]string, len(f.Kinds.K))
|
||||
for i, kind := range f.Kinds.K {
|
||||
kindConditions[i] = fmt.Sprintf("eq(event.kind, %d)", kind)
|
||||
}
|
||||
conditions = append(conditions, "("+strings.Join(kindConditions, " OR ")+")")
|
||||
}
|
||||
|
||||
// Time range filters
|
||||
if f.Since != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("ge(event.created_at, %d)", f.Since.V))
|
||||
}
|
||||
if f.Until != nil {
|
||||
conditions = append(conditions, fmt.Sprintf("le(event.created_at, %d)", f.Until.V))
|
||||
}
|
||||
|
||||
// Tag filters
|
||||
for _, tagValues := range *f.Tags {
|
||||
if len(tagValues.T) > 0 {
|
||||
tagConditions := make([]string, len(tagValues.T))
|
||||
for i, tagValue := range tagValues.T {
|
||||
// This is a simplified tag query - in production you'd want to use facets
|
||||
tagConditions[i] = fmt.Sprintf("eq(tag.value, %q)", string(tagValue))
|
||||
}
|
||||
conditions = append(conditions, "("+strings.Join(tagConditions, " OR ")+")")
|
||||
}
|
||||
}
|
||||
|
||||
// Exclude delete events unless requested
|
||||
if !includeDeleteEvents {
|
||||
conditions = append(conditions, "NOT eq(event.kind, 5)")
|
||||
}
|
||||
|
||||
// Build the final query
|
||||
if funcQuery == "" {
|
||||
funcQuery = "has(event.id)"
|
||||
}
|
||||
|
||||
filterStr := ""
|
||||
if len(conditions) > 0 {
|
||||
filterStr = " @filter(" + strings.Join(conditions, " AND ") + ")"
|
||||
}
|
||||
|
||||
// Add ordering and limit
|
||||
orderBy := ", orderdesc: event.created_at"
|
||||
limitStr := ""
|
||||
if *f.Limit > 0 {
|
||||
limitStr = fmt.Sprintf(", first: %d", f.Limit)
|
||||
}
|
||||
|
||||
// Build field list
|
||||
fieldStr := strings.Join(fields, "\n\t\t\t")
|
||||
|
||||
query := fmt.Sprintf(`{
|
||||
events(func: %s%s%s%s) {
|
||||
%s
|
||||
}
|
||||
}`, funcQuery, filterStr, orderBy, limitStr, fieldStr)
|
||||
|
||||
return query
|
||||
}
|
||||
|
||||
// parseEventsFromResponse converts Dgraph JSON response to Nostr events
|
||||
func (d *D) parseEventsFromResponse(jsonData []byte) ([]*event.E, error) {
|
||||
var result struct {
|
||||
Events []struct {
|
||||
UID string `json:"uid"`
|
||||
ID string `json:"event.id"`
|
||||
Kind int `json:"event.kind"`
|
||||
CreatedAt int64 `json:"event.created_at"`
|
||||
Content string `json:"event.content"`
|
||||
Sig string `json:"event.sig"`
|
||||
Pubkey string `json:"event.pubkey"`
|
||||
Tags string `json:"event.tags"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(jsonData, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
events := make([]*event.E, 0, len(result.Events))
|
||||
for _, ev := range result.Events {
|
||||
// Decode hex strings
|
||||
id, err := hex.Dec(ev.ID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sig, err := hex.Dec(ev.Sig)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pubkey, err := hex.Dec(ev.Pubkey)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse tags from JSON
|
||||
var tags tag.S
|
||||
if ev.Tags != "" {
|
||||
if err := json.Unmarshal([]byte(ev.Tags), &tags); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Create event
|
||||
e := &event.E{
|
||||
Kind: uint16(ev.Kind),
|
||||
CreatedAt: ev.CreatedAt,
|
||||
Content: []byte(ev.Content),
|
||||
Tags: &tags,
|
||||
}
|
||||
|
||||
// Copy fixed-size arrays
|
||||
copy(e.ID[:], id)
|
||||
copy(e.Sig[:], sig)
|
||||
copy(e.Pubkey[:], pubkey)
|
||||
|
||||
events = append(events, e)
|
||||
}
|
||||
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// QueryDeleteEventsByTargetId retrieves delete events targeting a specific event ID
|
||||
func (d *D) QueryDeleteEventsByTargetId(c context.Context, targetEventId []byte) (
|
||||
evs event.S, err error,
|
||||
) {
|
||||
targetIDStr := hex.Enc(targetEventId)
|
||||
|
||||
// Query for kind 5 events that reference this event
|
||||
query := fmt.Sprintf(`{
|
||||
events(func: eq(event.kind, 5)) {
|
||||
uid
|
||||
event.id
|
||||
event.kind
|
||||
event.created_at
|
||||
event.content
|
||||
event.sig
|
||||
event.pubkey
|
||||
event.tags
|
||||
references @filter(eq(event.id, %q)) {
|
||||
event.id
|
||||
}
|
||||
}
|
||||
}`, targetIDStr)
|
||||
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query delete events: %w", err)
|
||||
}
|
||||
|
||||
evs, err = d.parseEventsFromResponse(resp.Json)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse delete events: %w", err)
|
||||
}
|
||||
|
||||
return evs, nil
|
||||
}
|
||||
|
||||
// QueryForSerials retrieves event serials matching a filter
|
||||
func (d *D) QueryForSerials(c context.Context, f *filter.F) (
|
||||
serials types.Uint40s, err error,
|
||||
) {
|
||||
// Build query requesting only serial numbers
|
||||
query := d.buildDQLQueryWithFields(f, false, []string{"event.serial"})
|
||||
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query serials: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Events []struct {
|
||||
Serial int64 `json:"event.serial"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serials = make([]*types.Uint40, 0, len(result.Events))
|
||||
for _, ev := range result.Events {
|
||||
serial := types.Uint40{}
|
||||
serial.Set(uint64(ev.Serial))
|
||||
serials = append(serials, &serial)
|
||||
}
|
||||
|
||||
return serials, nil
|
||||
}
|
||||
|
||||
// QueryForIds retrieves event IDs matching a filter
|
||||
func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
idPkTs []*store.IdPkTs, err error,
|
||||
) {
|
||||
// Build query requesting only ID, pubkey, created_at, serial
|
||||
query := d.buildDQLQueryWithFields(f, false, []string{
|
||||
"event.id",
|
||||
"event.pubkey",
|
||||
"event.created_at",
|
||||
"event.serial",
|
||||
})
|
||||
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to query IDs: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Events []struct {
|
||||
ID string `json:"event.id"`
|
||||
Pubkey string `json:"event.pubkey"`
|
||||
CreatedAt int64 `json:"event.created_at"`
|
||||
Serial int64 `json:"event.serial"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idPkTs = make([]*store.IdPkTs, 0, len(result.Events))
|
||||
for _, ev := range result.Events {
|
||||
id, err := hex.Dec(ev.ID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pubkey, err := hex.Dec(ev.Pubkey)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
idPkTs = append(idPkTs, &store.IdPkTs{
|
||||
Id: id,
|
||||
Pub: pubkey,
|
||||
Ts: ev.CreatedAt,
|
||||
Ser: uint64(ev.Serial),
|
||||
})
|
||||
}
|
||||
|
||||
return idPkTs, nil
|
||||
}
|
||||
|
||||
// CountEvents counts events matching a filter
|
||||
func (d *D) CountEvents(c context.Context, f *filter.F) (
|
||||
count int, approximate bool, err error,
|
||||
) {
|
||||
// Build query requesting only count
|
||||
query := d.buildDQLQueryWithFields(f, false, []string{"count(uid)"})
|
||||
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
return 0, false, fmt.Errorf("failed to count events: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Events []struct {
|
||||
Count int `json:"count"`
|
||||
} `json:"events"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
if len(result.Events) > 0 {
|
||||
count = result.Events[0].Count
|
||||
}
|
||||
|
||||
return count, false, nil
|
||||
}
|
||||
@@ -1,517 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func TestQueryEventsByID(t *testing.T) {
|
||||
db, events, ctx, cancel, tempDir := setupTestDB(t)
|
||||
defer cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
// Test QueryEvents with an ID filter
|
||||
testEvent := events[3]
|
||||
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(testEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by ID: %v", err)
|
||||
}
|
||||
|
||||
// Verify we got exactly one event
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Verify it's the correct event
|
||||
if !utils.FastEqual(evs[0].ID, testEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match. Got %x, expected %x", evs[0].ID,
|
||||
testEvent.ID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryEventsByKind(t *testing.T) {
|
||||
db, _, ctx, cancel, tempDir := setupTestDB(t)
|
||||
defer cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
// Test querying by kind
|
||||
testKind := kind.New(1) // Kind 1 is typically text notes
|
||||
kindFilter := kind.NewS(testKind)
|
||||
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Kinds: kindFilter,
|
||||
Tags: tag.NewS(),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by kind: %v", err)
|
||||
}
|
||||
|
||||
// Verify we got results
|
||||
if len(evs) == 0 {
|
||||
t.Fatal("Expected events with kind 1, but got none")
|
||||
}
|
||||
|
||||
// Verify all events have the correct kind
|
||||
for i, ev := range evs {
|
||||
if ev.Kind != testKind.K {
|
||||
t.Fatalf(
|
||||
"Event %d has incorrect kind. Got %d, expected %d", i,
|
||||
ev.Kind, testKind.K,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryEventsByAuthor(t *testing.T) {
|
||||
db, events, ctx, cancel, tempDir := setupTestDB(t)
|
||||
defer cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
// Test querying by author
|
||||
authorFilter := tag.NewFromBytesSlice(events[1].Pubkey)
|
||||
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Authors: authorFilter,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by author: %v", err)
|
||||
}
|
||||
|
||||
// Verify we got results
|
||||
if len(evs) == 0 {
|
||||
t.Fatal("Expected events from author, but got none")
|
||||
}
|
||||
|
||||
// Verify all events have the correct author
|
||||
for i, ev := range evs {
|
||||
if !utils.FastEqual(ev.Pubkey, events[1].Pubkey) {
|
||||
t.Fatalf(
|
||||
"Event %d has incorrect author. Got %x, expected %x",
|
||||
i, ev.Pubkey, events[1].Pubkey,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceableEventsAndDeletion(t *testing.T) {
|
||||
db, events, ctx, cancel, tempDir := setupTestDB(t)
|
||||
defer cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
// Create a signer
|
||||
sign := p8k.MustNew()
|
||||
if err := sign.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a replaceable event
|
||||
replaceableEvent := event.New()
|
||||
replaceableEvent.Kind = kind.ProfileMetadata.K // Kind 0 is replaceable
|
||||
replaceableEvent.Pubkey = events[0].Pubkey // Use the same pubkey as an existing event
|
||||
replaceableEvent.CreatedAt = timestamp.Now().V - 7200 // 2 hours ago
|
||||
replaceableEvent.Content = []byte("Original profile")
|
||||
replaceableEvent.Tags = tag.NewS()
|
||||
replaceableEvent.Sign(sign)
|
||||
|
||||
// Save the replaceable event
|
||||
if _, err := db.SaveEvent(ctx, replaceableEvent); err != nil {
|
||||
t.Fatalf("Failed to save replaceable event: %v", err)
|
||||
}
|
||||
|
||||
// Create a newer version of the replaceable event
|
||||
newerEvent := event.New()
|
||||
newerEvent.Kind = kind.ProfileMetadata.K // Same kind
|
||||
newerEvent.Pubkey = replaceableEvent.Pubkey // Same pubkey
|
||||
newerEvent.CreatedAt = timestamp.Now().V - 3600 // 1 hour ago (newer than the original)
|
||||
newerEvent.Content = []byte("Updated profile")
|
||||
newerEvent.Tags = tag.NewS()
|
||||
newerEvent.Sign(sign)
|
||||
|
||||
// Save the newer event
|
||||
if _, err := db.SaveEvent(ctx, newerEvent); err != nil {
|
||||
t.Fatalf("Failed to save newer event: %v", err)
|
||||
}
|
||||
|
||||
// Query for the original event by ID
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.NewFromAny(replaceableEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query for replaced event by ID: %v", err)
|
||||
}
|
||||
|
||||
// Verify the original event is still found (it's kept but not returned in general queries)
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event when querying for replaced event by ID, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Verify it's the original event
|
||||
if !utils.FastEqual(evs[0].ID, replaceableEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match when querying for replaced event. Got %x, expected %x",
|
||||
evs[0].ID, replaceableEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
// Query for all events of this kind and pubkey
|
||||
kindFilter := kind.NewS(kind.ProfileMetadata)
|
||||
authorFilter := tag.NewFromAny(replaceableEvent.Pubkey)
|
||||
|
||||
evs, err = db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Kinds: kindFilter,
|
||||
Authors: authorFilter,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query for replaceable events: %v", err)
|
||||
}
|
||||
|
||||
// Verify we got only one event (the latest one)
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf(
|
||||
"Expected 1 event when querying for replaceable events, got %d",
|
||||
len(evs),
|
||||
)
|
||||
}
|
||||
|
||||
// Verify it's the newer event
|
||||
if !utils.FastEqual(evs[0].ID, newerEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match when querying for replaceable events. Got %x, expected %x",
|
||||
evs[0].ID, newerEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
// Test deletion events
|
||||
// Create a deletion event that references the replaceable event
|
||||
deletionEvent := event.New()
|
||||
deletionEvent.Kind = kind.Deletion.K // Kind 5 is deletion
|
||||
deletionEvent.Pubkey = replaceableEvent.Pubkey // Same pubkey as the event being deleted
|
||||
deletionEvent.CreatedAt = timestamp.Now().V // Current time
|
||||
deletionEvent.Content = []byte("Deleting the replaceable event")
|
||||
deletionEvent.Tags = tag.NewS()
|
||||
deletionEvent.Sign(sign)
|
||||
|
||||
// Add an e-tag referencing the replaceable event
|
||||
*deletionEvent.Tags = append(
|
||||
*deletionEvent.Tags,
|
||||
tag.NewFromAny("e", hex.Enc(replaceableEvent.ID)),
|
||||
)
|
||||
|
||||
// Save the deletion event
|
||||
if _, err = db.SaveEvent(ctx, deletionEvent); err != nil {
|
||||
t.Fatalf("Failed to save deletion event: %v", err)
|
||||
}
|
||||
|
||||
// Query for all events of this kind and pubkey again
|
||||
evs, err = db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Kinds: kindFilter,
|
||||
Authors: authorFilter,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to query for replaceable events after deletion: %v", err,
|
||||
)
|
||||
}
|
||||
|
||||
// Verify we still get the newer event (deletion should only affect the original event)
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf(
|
||||
"Expected 1 event when querying for replaceable events after deletion, got %d",
|
||||
len(evs),
|
||||
)
|
||||
}
|
||||
|
||||
// Verify it's still the newer event
|
||||
if !utils.FastEqual(evs[0].ID, newerEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match after deletion. Got %x, expected %x",
|
||||
evs[0].ID, newerEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
// Query for the original event by ID
|
||||
evs, err = db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(replaceableEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query for deleted event by ID: %v", err)
|
||||
}
|
||||
|
||||
// Verify the original event is not found (it was deleted)
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events when querying for deleted event by ID, got %d", len(evs))
|
||||
}
|
||||
}
|
||||
|
||||
func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
||||
db, events, ctx, cancel, tempDir := setupTestDB(t)
|
||||
defer cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
sign := p8k.MustNew()
|
||||
if err := sign.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a parameterized replaceable event
|
||||
paramEvent := event.New()
|
||||
paramEvent.Kind = 30000 // Kind 30000+ is parameterized replaceable
|
||||
paramEvent.Pubkey = events[0].Pubkey // Use the same pubkey as an existing event
|
||||
paramEvent.CreatedAt = timestamp.Now().V - 7200 // 2 hours ago
|
||||
paramEvent.Content = []byte("Original parameterized event")
|
||||
paramEvent.Tags = tag.NewS()
|
||||
// Add a d-tag
|
||||
*paramEvent.Tags = append(
|
||||
*paramEvent.Tags, tag.NewFromAny([]byte{'d'}, []byte("test-d-tag")),
|
||||
)
|
||||
paramEvent.Sign(sign)
|
||||
|
||||
// Save the parameterized replaceable event
|
||||
if _, err := db.SaveEvent(ctx, paramEvent); err != nil {
|
||||
t.Fatalf("Failed to save parameterized replaceable event: %v", err)
|
||||
}
|
||||
|
||||
// Create a deletion event using e-tag
|
||||
paramDeletionEvent := event.New()
|
||||
paramDeletionEvent.Kind = kind.Deletion.K // Kind 5 is deletion
|
||||
paramDeletionEvent.Pubkey = paramEvent.Pubkey // Same pubkey as the event being deleted
|
||||
paramDeletionEvent.CreatedAt = timestamp.Now().V // Current time
|
||||
paramDeletionEvent.Content = []byte("Deleting the parameterized replaceable event with e-tag")
|
||||
paramDeletionEvent.Tags = tag.NewS()
|
||||
// Add an e-tag referencing the parameterized replaceable event
|
||||
*paramDeletionEvent.Tags = append(
|
||||
*paramDeletionEvent.Tags,
|
||||
tag.NewFromAny("e", []byte(hex.Enc(paramEvent.ID))),
|
||||
)
|
||||
paramDeletionEvent.Sign(sign)
|
||||
|
||||
// Save the parameterized deletion event with e-tag
|
||||
if _, err := db.SaveEvent(ctx, paramDeletionEvent); err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to save parameterized deletion event with e-tag: %v", err,
|
||||
)
|
||||
}
|
||||
|
||||
// Query for parameterized events
|
||||
paramKindFilter := kind.NewS(kind.New(paramEvent.Kind))
|
||||
paramAuthorFilter := tag.NewFromBytesSlice(paramEvent.Pubkey)
|
||||
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Kinds: paramKindFilter,
|
||||
Authors: paramAuthorFilter,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to query for parameterized replaceable events after deletion: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
// Debug output
|
||||
fmt.Printf("Got %d events after deletion\n", len(evs))
|
||||
for i, ev := range evs {
|
||||
fmt.Printf(
|
||||
"Event %d: kind=%d, pubkey=%s\n",
|
||||
i, ev.Kind, hex.Enc(ev.Pubkey),
|
||||
)
|
||||
}
|
||||
|
||||
// Verify we get no events (since the only one was deleted)
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf(
|
||||
"Expected 0 events when querying for deleted parameterized replaceable events, got %d",
|
||||
len(evs),
|
||||
)
|
||||
}
|
||||
|
||||
// Query for the parameterized event by ID
|
||||
evs, err = db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(paramEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to query for deleted parameterized event by ID: %v", err,
|
||||
)
|
||||
}
|
||||
|
||||
// Verify the deleted event is not found when querying by ID
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf(
|
||||
"Expected 0 events when querying for deleted parameterized event by ID, got %d",
|
||||
len(evs),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryEventsByTimeRange(t *testing.T) {
|
||||
db, events, ctx, cancel, tempDir := setupTestDB(t)
|
||||
defer cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
// Test querying by time range
|
||||
// Use the timestamp from the middle event as a reference
|
||||
middleIndex := len(events) / 2
|
||||
middleEvent := events[middleIndex]
|
||||
|
||||
// Create a timestamp range that includes events before and after the middle event
|
||||
sinceTime := new(timestamp.T)
|
||||
sinceTime.V = middleEvent.CreatedAt - 3600 // 1 hour before middle event
|
||||
|
||||
untilTime := new(timestamp.T)
|
||||
untilTime.V = middleEvent.CreatedAt + 3600 // 1 hour after middle event
|
||||
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Since: sinceTime,
|
||||
Until: untilTime,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by time range: %v", err)
|
||||
}
|
||||
|
||||
// Verify we got results
|
||||
if len(evs) == 0 {
|
||||
t.Fatal("Expected events in time range, but got none")
|
||||
}
|
||||
|
||||
// Verify all events are within the time range
|
||||
for i, ev := range evs {
|
||||
if ev.CreatedAt < sinceTime.V || ev.CreatedAt > untilTime.V {
|
||||
t.Fatalf(
|
||||
"Event %d is outside the time range. Got %d, expected between %d and %d",
|
||||
i, ev.CreatedAt, sinceTime.V, untilTime.V,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryEventsByTag(t *testing.T) {
|
||||
db, events, ctx, cancel, tempDir := setupTestDB(t)
|
||||
defer cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
// Find an event with tags to use for testing
|
||||
var testTagEvent *event.E
|
||||
for _, ev := range events {
|
||||
if ev.Tags != nil && ev.Tags.Len() > 0 {
|
||||
// Find a tag with at least 2 elements and first element of length 1
|
||||
for _, tag := range *ev.Tags {
|
||||
if tag.Len() >= 2 && len(tag.Key()) == 1 {
|
||||
testTagEvent = ev
|
||||
break
|
||||
}
|
||||
}
|
||||
if testTagEvent != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if testTagEvent == nil {
|
||||
t.Skip("No suitable event with tags found for testing")
|
||||
return
|
||||
}
|
||||
|
||||
// Get the first tag with at least 2 elements and first element of length 1
|
||||
var testTag *tag.T
|
||||
for _, tag := range *testTagEvent.Tags {
|
||||
if tag.Len() >= 2 && len(tag.Key()) == 1 {
|
||||
testTag = tag
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Create a tags filter with the test tag
|
||||
tagsFilter := tag.NewS(testTag)
|
||||
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Tags: tagsFilter,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by tag: %v", err)
|
||||
}
|
||||
|
||||
// Verify we got results
|
||||
if len(evs) == 0 {
|
||||
t.Fatal("Expected events with tag, but got none")
|
||||
}
|
||||
|
||||
// Verify all events have the tag
|
||||
for i, ev := range evs {
|
||||
var hasTag bool
|
||||
for _, tag := range *ev.Tags {
|
||||
if tag.Len() >= 2 && len(tag.Key()) == 1 {
|
||||
if utils.FastEqual(tag.Key(), testTag.Key()) &&
|
||||
utils.FastEqual(tag.Value(), testTag.Value()) {
|
||||
hasTag = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hasTag {
|
||||
t.Fatalf("Event %d does not have the expected tag", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCountEvents(t *testing.T) {
|
||||
db, _, ctx, cancel, tempDir := setupTestDB(t)
|
||||
defer cleanupTestDB(t, db, cancel, tempDir)
|
||||
|
||||
// Test counting all events
|
||||
count, _, err := db.CountEvents(ctx, &filter.F{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count events: %v", err)
|
||||
}
|
||||
|
||||
// Verify we got a non-zero count
|
||||
if count == 0 {
|
||||
t.Fatal("Expected non-zero event count, but got 0")
|
||||
}
|
||||
|
||||
t.Logf("Total events in database: %d", count)
|
||||
|
||||
// Test counting events by kind
|
||||
testKind := kind.New(1)
|
||||
kindFilter := kind.NewS(testKind)
|
||||
|
||||
count, _, err = db.CountEvents(
|
||||
ctx, &filter.F{
|
||||
Kinds: kindFilter,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count events by kind: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Events with kind 1: %d", count)
|
||||
}
|
||||
@@ -1,183 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/dgraph-io/dgo/v230/protos/api"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// SaveEvent stores a Nostr event in the Dgraph database.
|
||||
// It creates event nodes and relationships for authors, tags, and references.
|
||||
func (d *D) SaveEvent(c context.Context, ev *event.E) (exists bool, err error) {
|
||||
eventID := hex.Enc(ev.ID[:])
|
||||
|
||||
// Check if event already exists
|
||||
query := fmt.Sprintf(`{
|
||||
event(func: eq(event.id, %q)) {
|
||||
uid
|
||||
event.id
|
||||
}
|
||||
}`, eventID)
|
||||
|
||||
resp, err := d.Query(c, query)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to check event existence: %w", err)
|
||||
}
|
||||
|
||||
// Parse response to check if event exists
|
||||
var result struct {
|
||||
Event []map[string]interface{} `json:"event"`
|
||||
}
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return false, fmt.Errorf("failed to parse query response: %w", err)
|
||||
}
|
||||
|
||||
if len(result.Event) > 0 {
|
||||
return true, nil // Event already exists
|
||||
}
|
||||
|
||||
// Get next serial number
|
||||
serial, err := d.getNextSerial()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get serial number: %w", err)
|
||||
}
|
||||
|
||||
// Build N-Quads for the event with serial number
|
||||
nquads := d.buildEventNQuads(ev, serial)
|
||||
|
||||
// Store the event
|
||||
mutation := &api.Mutation{
|
||||
SetNquads: []byte(nquads),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
if _, err = d.Mutate(c, mutation); err != nil {
|
||||
return false, fmt.Errorf("failed to save event: %w", err)
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// buildEventNQuads constructs RDF triples for a Nostr event
|
||||
func (d *D) buildEventNQuads(ev *event.E, serial uint64) string {
|
||||
var nquads strings.Builder
|
||||
|
||||
eventID := hex.Enc(ev.ID[:])
|
||||
authorPubkey := hex.Enc(ev.Pubkey)
|
||||
|
||||
// Event node
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <dgraph.type> \"Event\" .\n", eventID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.id> %q .\n", eventID, eventID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.serial> \"%d\"^^<xs:int> .\n", eventID, serial))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.kind> \"%d\"^^<xs:int> .\n", eventID, ev.Kind))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.created_at> \"%d\"^^<xs:int> .\n", eventID, int64(ev.CreatedAt)))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.content> %q .\n", eventID, ev.Content))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.sig> %q .\n", eventID, hex.Enc(ev.Sig[:])))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.pubkey> %q .\n", eventID, authorPubkey))
|
||||
|
||||
// Serialize tags as JSON string for storage
|
||||
tagsJSON, _ := json.Marshal(ev.Tags)
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <event.tags> %q .\n", eventID, string(tagsJSON)))
|
||||
|
||||
// Author relationship
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <authored_by> _:%s .\n", eventID, authorPubkey))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <dgraph.type> \"Author\" .\n", authorPubkey))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <author.pubkey> %q .\n", authorPubkey, authorPubkey))
|
||||
|
||||
// Tag relationships
|
||||
for _, tag := range *ev.Tags {
|
||||
if len(tag.T) >= 2 {
|
||||
tagType := string(tag.T[0])
|
||||
tagValue := string(tag.T[1])
|
||||
|
||||
switch tagType {
|
||||
case "e": // Event reference
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <references> _:%s .\n", eventID, tagValue))
|
||||
case "p": // Pubkey mention
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <mentions> _:%s .\n", eventID, tagValue))
|
||||
// Ensure mentioned author exists
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <dgraph.type> \"Author\" .\n", tagValue))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <author.pubkey> %q .\n", tagValue, tagValue))
|
||||
case "t": // Hashtag
|
||||
tagID := "tag_" + tagType + "_" + tagValue
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <tagged_with> _:%s .\n", eventID, tagID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <dgraph.type> \"Tag\" .\n", tagID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <tag.type> %q .\n", tagID, tagType))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <tag.value> %q .\n", tagID, tagValue))
|
||||
default:
|
||||
// Store other tag types
|
||||
tagID := "tag_" + tagType + "_" + tagValue
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <tagged_with> _:%s .\n", eventID, tagID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <dgraph.type> \"Tag\" .\n", tagID))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <tag.type> %q .\n", tagID, tagType))
|
||||
nquads.WriteString(fmt.Sprintf("_:%s <tag.value> %q .\n", tagID, tagValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nquads.String()
|
||||
}
|
||||
|
||||
// GetSerialsFromFilter returns event serials matching a filter
|
||||
func (d *D) GetSerialsFromFilter(f *filter.F) (serials types.Uint40s, err error) {
|
||||
// Use QueryForSerials which already implements the proper filter logic
|
||||
return d.QueryForSerials(context.Background(), f)
|
||||
}
|
||||
|
||||
// WouldReplaceEvent checks if an event would replace existing events
|
||||
func (d *D) WouldReplaceEvent(ev *event.E) (bool, types.Uint40s, error) {
|
||||
// Check for replaceable events (kinds 0, 3, and 10000-19999)
|
||||
isReplaceable := ev.Kind == 0 || ev.Kind == 3 || (ev.Kind >= 10000 && ev.Kind < 20000)
|
||||
if !isReplaceable {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// Query for existing events with same kind and pubkey
|
||||
authorPubkey := hex.Enc(ev.Pubkey)
|
||||
query := fmt.Sprintf(`{
|
||||
events(func: eq(event.pubkey, %q)) @filter(eq(event.kind, %d)) {
|
||||
uid
|
||||
event.serial
|
||||
event.created_at
|
||||
}
|
||||
}`, authorPubkey, ev.Kind)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return false, nil, fmt.Errorf("failed to query replaceable events: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Events []struct {
|
||||
UID string `json:"uid"`
|
||||
Serial int64 `json:"event.serial"`
|
||||
CreatedAt int64 `json:"event.created_at"`
|
||||
} `json:"events"`
|
||||
}
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return false, nil, fmt.Errorf("failed to parse query response: %w", err)
|
||||
}
|
||||
|
||||
// Check if our event is newer
|
||||
evTime := int64(ev.CreatedAt)
|
||||
var serials types.Uint40s
|
||||
wouldReplace := false
|
||||
|
||||
for _, existing := range result.Events {
|
||||
if existing.CreatedAt < evTime {
|
||||
wouldReplace = true
|
||||
serial := types.Uint40{}
|
||||
serial.Set(uint64(existing.Serial))
|
||||
serials = append(serials, &serial)
|
||||
}
|
||||
}
|
||||
|
||||
return wouldReplace, serials, nil
|
||||
}
|
||||
@@ -1,253 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event/examples"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// TestSaveEvents tests saving all events from examples.Cache to the dgraph database
|
||||
// to verify there are no errors during the saving process.
|
||||
func TestSaveEvents(t *testing.T) {
|
||||
skipIfDgraphNotAvailable(t)
|
||||
|
||||
// Create a temporary directory for metadata
|
||||
tempDir, err := os.MkdirTemp("", "test-dgraph-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a context and cancel function for the database
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Initialize the dgraph database
|
||||
db, err := New(ctx, cancel, tempDir, "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create dgraph database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Drop all data to start fresh
|
||||
if err := db.dropAll(ctx); err != nil {
|
||||
t.Fatalf("Failed to drop all data: %v", err)
|
||||
}
|
||||
|
||||
// Create a scanner to read events from examples.Cache
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
|
||||
|
||||
// Collect all events first
|
||||
var events []*event.E
|
||||
var original int
|
||||
for scanner.Scan() {
|
||||
chk.E(scanner.Err())
|
||||
b := scanner.Bytes()
|
||||
original += len(b)
|
||||
ev := event.New()
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
events = append(events, ev)
|
||||
}
|
||||
|
||||
// Sort events by timestamp to ensure addressable events are processed in order
|
||||
sort.Slice(events, func(i, j int) bool {
|
||||
return events[i].CreatedAt < events[j].CreatedAt
|
||||
})
|
||||
|
||||
// Count the number of events processed
|
||||
eventCount := 0
|
||||
now := time.Now()
|
||||
|
||||
// Process each event in chronological order
|
||||
for _, ev := range events {
|
||||
// Save the event to the database
|
||||
if _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
eventCount++
|
||||
}
|
||||
|
||||
// Check for scanner errors
|
||||
if err = scanner.Err(); err != nil {
|
||||
t.Fatalf("Scanner error: %v", err)
|
||||
}
|
||||
|
||||
dur := time.Since(now)
|
||||
t.Logf(
|
||||
"Successfully saved %d events (%d bytes) to dgraph in %v (%v/ev; %.2f ev/s)",
|
||||
eventCount,
|
||||
original,
|
||||
dur,
|
||||
dur/time.Duration(eventCount),
|
||||
float64(time.Second)/float64(dur/time.Duration(eventCount)),
|
||||
)
|
||||
}
|
||||
|
||||
// TestDeletionEventWithETagRejection tests that a deletion event with an "e" tag is rejected.
|
||||
func TestDeletionEventWithETagRejection(t *testing.T) {
|
||||
skipIfDgraphNotAvailable(t)
|
||||
|
||||
// Create a temporary directory for metadata
|
||||
tempDir, err := os.MkdirTemp("", "test-dgraph-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a context and cancel function for the database
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Initialize the dgraph database
|
||||
db, err := New(ctx, cancel, tempDir, "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create dgraph database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Drop all data to start fresh
|
||||
if err := db.dropAll(ctx); err != nil {
|
||||
t.Fatalf("Failed to drop all data: %v", err)
|
||||
}
|
||||
|
||||
// Create a signer
|
||||
sign := p8k.MustNew()
|
||||
if err := sign.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a regular event
|
||||
regularEvent := event.New()
|
||||
regularEvent.Kind = kind.TextNote.K
|
||||
regularEvent.Pubkey = sign.Pub()
|
||||
regularEvent.CreatedAt = timestamp.Now().V - 3600 // 1 hour ago
|
||||
regularEvent.Content = []byte("Regular event")
|
||||
regularEvent.Tags = tag.NewS()
|
||||
regularEvent.Sign(sign)
|
||||
|
||||
// Save the regular event
|
||||
if _, err := db.SaveEvent(ctx, regularEvent); err != nil {
|
||||
t.Fatalf("Failed to save regular event: %v", err)
|
||||
}
|
||||
|
||||
// Create a deletion event with an "e" tag referencing the regular event
|
||||
deletionEvent := event.New()
|
||||
deletionEvent.Kind = kind.Deletion.K
|
||||
deletionEvent.Pubkey = sign.Pub()
|
||||
deletionEvent.CreatedAt = timestamp.Now().V // Current time
|
||||
deletionEvent.Content = []byte("Deleting the regular event")
|
||||
deletionEvent.Tags = tag.NewS()
|
||||
|
||||
// Add an e-tag referencing the regular event
|
||||
*deletionEvent.Tags = append(
|
||||
*deletionEvent.Tags,
|
||||
tag.NewFromAny("e", hex.Enc(regularEvent.ID)),
|
||||
)
|
||||
|
||||
deletionEvent.Sign(sign)
|
||||
|
||||
// Check if this is a deletion event with "e" tags
|
||||
if deletionEvent.Kind == kind.Deletion.K && deletionEvent.Tags.GetFirst([]byte{'e'}) != nil {
|
||||
// In this test, we want to reject deletion events with "e" tags
|
||||
err = errorf.E("deletion events referencing other events with 'e' tag are not allowed")
|
||||
} else {
|
||||
// Try to save the deletion event
|
||||
_, err = db.SaveEvent(ctx, deletionEvent)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("Expected deletion event with e-tag to be rejected, but it was accepted")
|
||||
}
|
||||
|
||||
// Verify the error message
|
||||
expectedError := "deletion events referencing other events with 'e' tag are not allowed"
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf(
|
||||
"Expected error message '%s', got '%s'", expectedError, err.Error(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSaveExistingEvent tests that attempting to save an event that already exists
|
||||
// returns an error.
|
||||
func TestSaveExistingEvent(t *testing.T) {
|
||||
skipIfDgraphNotAvailable(t)
|
||||
|
||||
// Create a temporary directory for metadata
|
||||
tempDir, err := os.MkdirTemp("", "test-dgraph-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Create a context and cancel function for the database
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Initialize the dgraph database
|
||||
db, err := New(ctx, cancel, tempDir, "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create dgraph database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Drop all data to start fresh
|
||||
if err := db.dropAll(ctx); err != nil {
|
||||
t.Fatalf("Failed to drop all data: %v", err)
|
||||
}
|
||||
|
||||
// Create a signer
|
||||
sign := p8k.MustNew()
|
||||
if err := sign.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create an event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte("Test event")
|
||||
ev.Tags = tag.NewS()
|
||||
ev.Sign(sign)
|
||||
|
||||
// Save the event for the first time
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Try to save the same event again, it should be rejected
|
||||
_, err = db.SaveEvent(ctx, ev)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when saving an existing event, but got nil")
|
||||
}
|
||||
|
||||
// Verify the error message contains indication of duplicate
|
||||
expectedErrorPrefix := "blocked: event already exists"
|
||||
if !bytes.Contains([]byte(err.Error()), []byte(expectedErrorPrefix)) {
|
||||
t.Fatalf(
|
||||
"Expected error message to contain '%s', got '%s'",
|
||||
expectedErrorPrefix, err.Error(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/dgraph-io/dgo/v230/protos/api"
|
||||
)
|
||||
|
||||
// NostrSchema defines the Dgraph schema for Nostr events
|
||||
const NostrSchema = `
|
||||
# Event node type
|
||||
type Event {
|
||||
event.id
|
||||
event.serial
|
||||
event.kind
|
||||
event.created_at
|
||||
event.content
|
||||
event.sig
|
||||
event.pubkey
|
||||
event.authored_by
|
||||
event.references
|
||||
event.mentions
|
||||
event.tagged_with
|
||||
}
|
||||
|
||||
# Author node type
|
||||
type Author {
|
||||
author.pubkey
|
||||
author.events
|
||||
}
|
||||
|
||||
# Tag node type
|
||||
type Tag {
|
||||
tag.type
|
||||
tag.value
|
||||
tag.events
|
||||
}
|
||||
|
||||
# Marker node type (for key-value metadata)
|
||||
type Marker {
|
||||
marker.key
|
||||
marker.value
|
||||
}
|
||||
|
||||
# Event fields
|
||||
event.id: string @index(exact) @upsert .
|
||||
event.serial: int @index(int) .
|
||||
event.kind: int @index(int) .
|
||||
event.created_at: int @index(int) .
|
||||
event.content: string .
|
||||
event.sig: string @index(exact) .
|
||||
event.pubkey: string @index(exact) .
|
||||
|
||||
# Event relationships
|
||||
event.authored_by: uid @reverse .
|
||||
event.references: [uid] @reverse .
|
||||
event.mentions: [uid] @reverse .
|
||||
event.tagged_with: [uid] @reverse .
|
||||
|
||||
# Author fields
|
||||
author.pubkey: string @index(exact) @upsert .
|
||||
author.events: [uid] @count @reverse .
|
||||
|
||||
# Tag fields
|
||||
tag.type: string @index(exact) .
|
||||
tag.value: string @index(exact, fulltext) .
|
||||
tag.events: [uid] @count @reverse .
|
||||
|
||||
# Marker fields (key-value storage)
|
||||
marker.key: string @index(exact) @upsert .
|
||||
marker.value: string .
|
||||
`
|
||||
|
||||
// applySchema applies the Nostr schema to the connected Dgraph instance
|
||||
func (d *D) applySchema(ctx context.Context) error {
|
||||
d.Logger.Infof("applying Nostr schema to dgraph")
|
||||
|
||||
op := &api.Operation{
|
||||
Schema: NostrSchema,
|
||||
}
|
||||
|
||||
if err := d.client.Alter(ctx, op); err != nil {
|
||||
return fmt.Errorf("failed to apply schema: %w", err)
|
||||
}
|
||||
|
||||
d.Logger.Infof("schema applied successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// dropAll drops all data from dgraph (useful for testing)
|
||||
func (d *D) dropAll(ctx context.Context) error {
|
||||
d.Logger.Warningf("dropping all data from dgraph")
|
||||
|
||||
op := &api.Operation{
|
||||
DropAll: true,
|
||||
}
|
||||
|
||||
if err := d.client.Alter(ctx, op); err != nil {
|
||||
return fmt.Errorf("failed to drop all data: %w", err)
|
||||
}
|
||||
|
||||
// Reapply schema after dropping
|
||||
return d.applySchema(ctx)
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/dgraph-io/dgo/v230/protos/api"
|
||||
)
|
||||
|
||||
// Serial number management
|
||||
// We use a special counter node to track the next available serial number
|
||||
|
||||
const serialCounterKey = "serial_counter"
|
||||
|
||||
var (
|
||||
serialMutex sync.Mutex
|
||||
)
|
||||
|
||||
// getNextSerial atomically increments and returns the next serial number
|
||||
func (d *D) getNextSerial() (uint64, error) {
|
||||
serialMutex.Lock()
|
||||
defer serialMutex.Unlock()
|
||||
|
||||
// Query current serial value
|
||||
query := fmt.Sprintf(`{
|
||||
counter(func: eq(marker.key, %q)) {
|
||||
uid
|
||||
marker.value
|
||||
}
|
||||
}`, serialCounterKey)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to query serial counter: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Counter []struct {
|
||||
UID string `json:"uid"`
|
||||
Value string `json:"marker.value"`
|
||||
} `json:"counter"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return 0, fmt.Errorf("failed to parse serial counter: %w", err)
|
||||
}
|
||||
|
||||
var currentSerial uint64 = 1
|
||||
var uid string
|
||||
|
||||
if len(result.Counter) > 0 {
|
||||
// Parse current serial
|
||||
uid = result.Counter[0].UID
|
||||
if result.Counter[0].Value != "" {
|
||||
fmt.Sscanf(result.Counter[0].Value, "%d", ¤tSerial)
|
||||
}
|
||||
}
|
||||
|
||||
// Increment serial
|
||||
nextSerial := currentSerial + 1
|
||||
|
||||
// Update or create counter
|
||||
var nquads string
|
||||
if uid != "" {
|
||||
// Update existing counter
|
||||
nquads = fmt.Sprintf(`<%s> <marker.value> "%d" .`, uid, nextSerial)
|
||||
} else {
|
||||
// Create new counter
|
||||
nquads = fmt.Sprintf(`
|
||||
_:counter <dgraph.type> "Marker" .
|
||||
_:counter <marker.key> %q .
|
||||
_:counter <marker.value> "%d" .
|
||||
`, serialCounterKey, nextSerial)
|
||||
}
|
||||
|
||||
mutation := &api.Mutation{
|
||||
SetNquads: []byte(nquads),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
if _, err = d.Mutate(context.Background(), mutation); err != nil {
|
||||
return 0, fmt.Errorf("failed to update serial counter: %w", err)
|
||||
}
|
||||
|
||||
return currentSerial, nil
|
||||
}
|
||||
|
||||
// initSerialCounter initializes the serial counter if it doesn't exist
|
||||
func (d *D) initSerialCounter() error {
|
||||
query := fmt.Sprintf(`{
|
||||
counter(func: eq(marker.key, %q)) {
|
||||
uid
|
||||
}
|
||||
}`, serialCounterKey)
|
||||
|
||||
resp, err := d.Query(context.Background(), query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check serial counter: %w", err)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Counter []struct {
|
||||
UID string `json:"uid"`
|
||||
} `json:"counter"`
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(resp.Json, &result); err != nil {
|
||||
return fmt.Errorf("failed to parse counter check: %w", err)
|
||||
}
|
||||
|
||||
// Counter already exists
|
||||
if len(result.Counter) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize counter at 1
|
||||
nquads := fmt.Sprintf(`
|
||||
_:counter <dgraph.type> "Marker" .
|
||||
_:counter <marker.key> %q .
|
||||
_:counter <marker.value> "1" .
|
||||
`, serialCounterKey)
|
||||
|
||||
mutation := &api.Mutation{
|
||||
SetNquads: []byte(nquads),
|
||||
CommitNow: true,
|
||||
}
|
||||
|
||||
if _, err = d.Mutate(context.Background(), mutation); err != nil {
|
||||
return fmt.Errorf("failed to initialize serial counter: %w", err)
|
||||
}
|
||||
|
||||
d.Logger.Infof("initialized serial counter")
|
||||
return nil
|
||||
}
|
||||
@@ -1,188 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// Subscription and payment methods
|
||||
// Simplified implementation using marker-based storage
|
||||
// For production, these should use proper graph nodes with relationships
|
||||
|
||||
// GetSubscription retrieves subscription information for a pubkey
|
||||
func (d *D) GetSubscription(pubkey []byte) (*database.Subscription, error) {
|
||||
key := "sub_" + hex.Enc(pubkey)
|
||||
data, err := d.GetMarker(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sub database.Subscription
|
||||
if err := json.Unmarshal(data, &sub); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal subscription: %w", err)
|
||||
}
|
||||
|
||||
return &sub, nil
|
||||
}
|
||||
|
||||
// IsSubscriptionActive checks if a pubkey has an active subscription
|
||||
func (d *D) IsSubscriptionActive(pubkey []byte) (bool, error) {
|
||||
sub, err := d.GetSubscription(pubkey)
|
||||
if err != nil {
|
||||
return false, nil // No subscription = not active
|
||||
}
|
||||
|
||||
return sub.PaidUntil.After(time.Now()), nil
|
||||
}
|
||||
|
||||
// ExtendSubscription extends a subscription by the specified number of days
|
||||
func (d *D) ExtendSubscription(pubkey []byte, days int) error {
|
||||
key := "sub_" + hex.Enc(pubkey)
|
||||
|
||||
// Get existing subscription or create new
|
||||
var sub database.Subscription
|
||||
data, err := d.GetMarker(key)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(data, &sub); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal subscription: %w", err)
|
||||
}
|
||||
} else {
|
||||
// New subscription - set trial period
|
||||
sub.TrialEnd = time.Now()
|
||||
sub.PaidUntil = time.Now()
|
||||
}
|
||||
|
||||
// Extend expiration
|
||||
if sub.PaidUntil.Before(time.Now()) {
|
||||
sub.PaidUntil = time.Now()
|
||||
}
|
||||
sub.PaidUntil = sub.PaidUntil.Add(time.Duration(days) * 24 * time.Hour)
|
||||
|
||||
// Save
|
||||
data, err = json.Marshal(sub)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal subscription: %w", err)
|
||||
}
|
||||
|
||||
return d.SetMarker(key, data)
|
||||
}
|
||||
|
||||
// RecordPayment records a payment for subscription extension
|
||||
func (d *D) RecordPayment(
|
||||
pubkey []byte, amount int64, invoice, preimage string,
|
||||
) error {
|
||||
// Store payment in payments list
|
||||
key := "payments_" + hex.Enc(pubkey)
|
||||
|
||||
var payments []database.Payment
|
||||
data, err := d.GetMarker(key)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(data, &payments); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal payments: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
payment := database.Payment{
|
||||
Amount: amount,
|
||||
Timestamp: time.Now(),
|
||||
Invoice: invoice,
|
||||
Preimage: preimage,
|
||||
}
|
||||
|
||||
payments = append(payments, payment)
|
||||
|
||||
data, err = json.Marshal(payments)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal payments: %w", err)
|
||||
}
|
||||
|
||||
return d.SetMarker(key, data)
|
||||
}
|
||||
|
||||
// GetPaymentHistory retrieves payment history for a pubkey
|
||||
func (d *D) GetPaymentHistory(pubkey []byte) ([]database.Payment, error) {
|
||||
key := "payments_" + hex.Enc(pubkey)
|
||||
|
||||
data, err := d.GetMarker(key)
|
||||
if err != nil {
|
||||
return nil, nil // No payments = empty list
|
||||
}
|
||||
|
||||
var payments []database.Payment
|
||||
if err := json.Unmarshal(data, &payments); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal payments: %w", err)
|
||||
}
|
||||
|
||||
return payments, nil
|
||||
}
|
||||
|
||||
// ExtendBlossomSubscription extends a Blossom storage subscription
|
||||
func (d *D) ExtendBlossomSubscription(
|
||||
pubkey []byte, tier string, storageMB int64, daysExtended int,
|
||||
) error {
|
||||
key := "blossom_" + hex.Enc(pubkey)
|
||||
|
||||
// Simple implementation - just store tier and expiry
|
||||
data := map[string]interface{}{
|
||||
"tier": tier,
|
||||
"storageMB": storageMB,
|
||||
"extended": daysExtended,
|
||||
"updated": time.Now(),
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal blossom subscription: %w", err)
|
||||
}
|
||||
|
||||
return d.SetMarker(key, jsonData)
|
||||
}
|
||||
|
||||
// GetBlossomStorageQuota retrieves the storage quota for a pubkey
|
||||
func (d *D) GetBlossomStorageQuota(pubkey []byte) (quotaMB int64, err error) {
|
||||
key := "blossom_" + hex.Enc(pubkey)
|
||||
|
||||
data, err := d.GetMarker(key)
|
||||
if err != nil {
|
||||
return 0, nil // No subscription = 0 quota
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := json.Unmarshal(data, &result); err != nil {
|
||||
return 0, fmt.Errorf("failed to unmarshal blossom data: %w", err)
|
||||
}
|
||||
|
||||
// Default quota based on tier - simplified
|
||||
if tier, ok := result["tier"].(string); ok {
|
||||
switch tier {
|
||||
case "basic":
|
||||
return 100, nil
|
||||
case "premium":
|
||||
return 1000, nil
|
||||
default:
|
||||
return 10, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// IsFirstTimeUser checks if a pubkey is a first-time user
|
||||
func (d *D) IsFirstTimeUser(pubkey []byte) (bool, error) {
|
||||
// Check if they have any subscription or payment history
|
||||
sub, _ := d.GetSubscription(pubkey)
|
||||
if sub != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
payments, _ := d.GetPaymentHistory(pubkey)
|
||||
if len(payments) > 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"lol.mleku.dev"
|
||||
"lol.mleku.dev/log"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
// Disable all logging during tests unless explicitly enabled
|
||||
if os.Getenv("TEST_LOG") == "" {
|
||||
// Set log level to Off to suppress all logs
|
||||
lol.SetLogLevel("off")
|
||||
// Also redirect output to discard
|
||||
lol.Writer = io.Discard
|
||||
// Disable all log printers
|
||||
log.T = lol.GetNullPrinter()
|
||||
log.D = lol.GetNullPrinter()
|
||||
log.I = lol.GetNullPrinter()
|
||||
log.W = lol.GetNullPrinter()
|
||||
log.E = lol.GetNullPrinter()
|
||||
log.F = lol.GetNullPrinter()
|
||||
}
|
||||
|
||||
// Run tests
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package dgraph
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// unmarshalJSON is a helper to unmarshal JSON with error handling
|
||||
func unmarshalJSON(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
@@ -4,12 +4,51 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
// Tag binary encoding constants (matching the nostr library)
|
||||
const (
|
||||
binaryEncodedLen = 33 // 32 bytes hash + null terminator
|
||||
hexEncodedLen = 64 // 64 hex characters for 32 bytes
|
||||
hashLen = 32
|
||||
)
|
||||
|
||||
// isBinaryEncoded checks if a value is stored in the nostr library's binary-optimized format
|
||||
func isBinaryEncoded(val []byte) bool {
|
||||
return len(val) == binaryEncodedLen && val[hashLen] == 0
|
||||
}
|
||||
|
||||
// normalizePubkeyHex ensures a pubkey is in lowercase hex format.
|
||||
// Handles binary-encoded values (33 bytes) and uppercase hex strings.
|
||||
func normalizePubkeyHex(val []byte) string {
|
||||
if isBinaryEncoded(val) {
|
||||
return hex.Enc(val[:hashLen])
|
||||
}
|
||||
if len(val) == hexEncodedLen {
|
||||
return strings.ToLower(string(val))
|
||||
}
|
||||
return strings.ToLower(string(val))
|
||||
}
|
||||
|
||||
// extractPTagValue extracts a pubkey from a p-tag, handling binary encoding.
|
||||
// Returns lowercase hex string.
|
||||
func extractPTagValue(t *tag.T) string {
|
||||
if t == nil || len(t.T) < 2 {
|
||||
return ""
|
||||
}
|
||||
hexVal := t.ValueHex()
|
||||
if len(hexVal) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.ToLower(string(hexVal))
|
||||
}
|
||||
|
||||
// getTagValue retrieves the value of the first tag with the given key
|
||||
func getTagValue(ev *event.E, key string) string {
|
||||
t := ev.Tags.GetFirst([]byte(key))
|
||||
@@ -128,6 +167,7 @@ func ParseTrustGraph(ev *event.E) (*TrustGraphEvent, error) {
|
||||
}
|
||||
|
||||
// Parse p tags (trust entries)
|
||||
// Use extractPTagValue to handle binary-encoded pubkeys
|
||||
var entries []TrustEntry
|
||||
pTags := getAllTags(ev, "p")
|
||||
for _, t := range pTags {
|
||||
@@ -135,7 +175,12 @@ func ParseTrustGraph(ev *event.E) (*TrustGraphEvent, error) {
|
||||
continue // Skip malformed tags
|
||||
}
|
||||
|
||||
pubkey := string(t.T[1])
|
||||
// Use extractPTagValue to handle binary encoding and normalize to lowercase hex
|
||||
pubkey := extractPTagValue(t)
|
||||
if pubkey == "" {
|
||||
continue // Skip invalid p-tags
|
||||
}
|
||||
|
||||
serviceURL := ""
|
||||
trustScore := 0.5 // default
|
||||
|
||||
@@ -336,6 +381,8 @@ func ParseCertificate(ev *event.E) (*Certificate, error) {
|
||||
validUntil := time.Unix(validUntilUnix, 0)
|
||||
|
||||
// Parse witness tags
|
||||
// Note: "witness" is a custom tag key (not "p"), so it doesn't have binary encoding,
|
||||
// but we normalize the pubkey to lowercase for consistency
|
||||
var witnesses []WitnessSignature
|
||||
witnessTags := getAllTags(ev, "witness")
|
||||
for _, t := range witnessTags {
|
||||
@@ -344,7 +391,7 @@ func ParseCertificate(ev *event.E) (*Certificate, error) {
|
||||
}
|
||||
|
||||
witnesses = append(witnesses, WitnessSignature{
|
||||
Pubkey: string(t.T[1]),
|
||||
Pubkey: normalizePubkeyHex(t.T[1]), // Normalize to lowercase
|
||||
Signature: string(t.T[2]),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"next.orly.dev/pkg/interfaces/typer"
|
||||
)
|
||||
|
||||
@@ -31,3 +32,9 @@ type I interface {
|
||||
Syncer()
|
||||
typer.T
|
||||
}
|
||||
|
||||
// PolicyChecker is an optional interface that ACL implementations can implement
|
||||
// to provide custom event policy checking beyond basic access level checks.
|
||||
type PolicyChecker interface {
|
||||
CheckPolicy(ev *event.E) (allowed bool, err error)
|
||||
}
|
||||
|
||||
8
pkg/interfaces/neterr/neterr.go
Normal file
8
pkg/interfaces/neterr/neterr.go
Normal file
@@ -0,0 +1,8 @@
|
||||
// Package neterr defines interfaces for network error handling.
|
||||
package neterr
|
||||
|
||||
// TimeoutError is an interface for errors that can indicate a timeout.
|
||||
// This is compatible with net.Error's Timeout() method.
|
||||
type TimeoutError interface {
|
||||
Timeout() bool
|
||||
}
|
||||
16
pkg/interfaces/resultiter/resultiter.go
Normal file
16
pkg/interfaces/resultiter/resultiter.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Package resultiter defines interfaces for iterating over database query results.
|
||||
package resultiter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
|
||||
)
|
||||
|
||||
// Neo4jResultIterator defines the interface for iterating over Neo4j query results.
|
||||
// This is implemented by both neo4j.Result and CollectedResult types.
|
||||
type Neo4jResultIterator interface {
|
||||
Next(context.Context) bool
|
||||
Record() *neo4j.Record
|
||||
Err() error
|
||||
}
|
||||
@@ -15,6 +15,8 @@ docker run -d --name neo4j \
|
||||
|
||||
### 2. Configure Environment
|
||||
|
||||
All Neo4j configuration is defined in `app/config/config.go` and visible via `./orly help`:
|
||||
|
||||
```bash
|
||||
export ORLY_DB_TYPE=neo4j
|
||||
export ORLY_NEO4J_URI=bolt://localhost:7687
|
||||
@@ -22,6 +24,8 @@ export ORLY_NEO4J_USER=neo4j
|
||||
export ORLY_NEO4J_PASSWORD=password
|
||||
```
|
||||
|
||||
> **Note:** Configuration is centralized in `app/config/config.go`. Do not use `os.Getenv()` directly in package code - all environment variables should be passed via the `database.DatabaseConfig` struct.
|
||||
|
||||
### 3. Run ORLY
|
||||
|
||||
```bash
|
||||
|
||||
@@ -3,10 +3,11 @@ package neo4j
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
// DeleteEvent deletes an event by its ID
|
||||
@@ -39,10 +40,60 @@ func (n *N) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteExpired deletes expired events (stub implementation)
|
||||
// DeleteExpired deletes expired events based on NIP-40 expiration tags
|
||||
// Events with an expiration property > 0 and <= current time are deleted
|
||||
func (n *N) DeleteExpired() {
|
||||
// This would need to implement expiration logic based on event.expiration tag (NIP-40)
|
||||
// For now, this is a no-op
|
||||
ctx := context.Background()
|
||||
now := time.Now().Unix()
|
||||
|
||||
// Query for expired events (expiration > 0 means it has an expiration, and <= now means it's expired)
|
||||
cypher := `
|
||||
MATCH (e:Event)
|
||||
WHERE e.expiration > 0 AND e.expiration <= $now
|
||||
RETURN e.serial AS serial, e.id AS id
|
||||
LIMIT 1000`
|
||||
|
||||
params := map[string]any{"now": now}
|
||||
|
||||
result, err := n.ExecuteRead(ctx, cypher, params)
|
||||
if err != nil {
|
||||
n.Logger.Warningf("failed to query expired events: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Collect serials to delete
|
||||
var deleteCount int
|
||||
for result.Next(ctx) {
|
||||
record := result.Record()
|
||||
if record == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
idRaw, found := record.Get("id")
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
idStr, ok := idRaw.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Delete the expired event
|
||||
deleteCypher := "MATCH (e:Event {id: $id}) DETACH DELETE e"
|
||||
deleteParams := map[string]any{"id": idStr}
|
||||
|
||||
if _, err := n.ExecuteWrite(ctx, deleteCypher, deleteParams); err != nil {
|
||||
n.Logger.Warningf("failed to delete expired event %s: %v", idStr[:16], err)
|
||||
continue
|
||||
}
|
||||
|
||||
deleteCount++
|
||||
}
|
||||
|
||||
if deleteCount > 0 {
|
||||
n.Logger.Infof("deleted %d expired events", deleteCount)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDelete processes a kind 5 deletion event
|
||||
|
||||
555
pkg/neo4j/delete_test.go
Normal file
555
pkg/neo4j/delete_test.go
Normal file
@@ -0,0 +1,555 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
func TestDeleteEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event to be deleted")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query event: %v", err)
|
||||
}
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event before deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Delete the event
|
||||
if err := db.DeleteEvent(ctx, ev.ID[:]); err != nil {
|
||||
t.Fatalf("Failed to delete event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event is deleted
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query after deletion: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ DeleteEvent successfully removed event")
|
||||
}
|
||||
|
||||
func TestDeleteEventBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event to be deleted by serial")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
|
||||
// Delete by serial
|
||||
if err := db.DeleteEventBySerial(ctx, serial, ev); err != nil {
|
||||
t.Fatalf("Failed to delete event by serial: %v", err)
|
||||
}
|
||||
|
||||
// Verify event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query after deletion: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ DeleteEventBySerial successfully removed event")
|
||||
}
|
||||
|
||||
func TestProcessDelete_AuthorCanDeleteOwnEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save original event
|
||||
originalEvent := event.New()
|
||||
originalEvent.Pubkey = signer.Pub()
|
||||
originalEvent.CreatedAt = timestamp.Now().V
|
||||
originalEvent.Kind = 1
|
||||
originalEvent.Content = []byte("This event will be deleted via kind 5")
|
||||
|
||||
if err := originalEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, originalEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Create kind 5 deletion event
|
||||
deleteEvent := event.New()
|
||||
deleteEvent.Pubkey = signer.Pub() // Same author
|
||||
deleteEvent.CreatedAt = timestamp.Now().V + 1
|
||||
deleteEvent.Kind = kind.Deletion.K
|
||||
deleteEvent.Content = []byte("Deleting my event")
|
||||
deleteEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(originalEvent.ID[:])),
|
||||
)
|
||||
|
||||
if err := deleteEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
// Process deletion (no admins)
|
||||
if err := db.ProcessDelete(deleteEvent, nil); err != nil {
|
||||
t.Fatalf("Failed to process delete: %v", err)
|
||||
}
|
||||
|
||||
// Verify original event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(originalEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query after deletion: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ ProcessDelete allowed author to delete own event")
|
||||
}
|
||||
|
||||
func TestProcessDelete_OtherUserCannotDelete(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
alice, _ := p8k.New()
|
||||
alice.Generate()
|
||||
|
||||
bob, _ := p8k.New()
|
||||
bob.Generate()
|
||||
|
||||
// Alice creates an event
|
||||
aliceEvent := event.New()
|
||||
aliceEvent.Pubkey = alice.Pub()
|
||||
aliceEvent.CreatedAt = timestamp.Now().V
|
||||
aliceEvent.Kind = 1
|
||||
aliceEvent.Content = []byte("Alice's event")
|
||||
|
||||
if err := aliceEvent.Sign(alice); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, aliceEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Bob tries to delete Alice's event
|
||||
deleteEvent := event.New()
|
||||
deleteEvent.Pubkey = bob.Pub() // Different author
|
||||
deleteEvent.CreatedAt = timestamp.Now().V + 1
|
||||
deleteEvent.Kind = kind.Deletion.K
|
||||
deleteEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(aliceEvent.ID[:])),
|
||||
)
|
||||
|
||||
if err := deleteEvent.Sign(bob); err != nil {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
// Process deletion (Bob is not an admin)
|
||||
_ = db.ProcessDelete(deleteEvent, nil)
|
||||
|
||||
// Verify Alice's event still exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(aliceEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query: %v", err)
|
||||
}
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected Alice's event to still exist, got %d events", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ ProcessDelete correctly prevented unauthorized deletion")
|
||||
}
|
||||
|
||||
func TestProcessDelete_AdminCanDeleteAnyEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
alice, _ := p8k.New()
|
||||
alice.Generate()
|
||||
|
||||
admin, _ := p8k.New()
|
||||
admin.Generate()
|
||||
|
||||
// Alice creates an event
|
||||
aliceEvent := event.New()
|
||||
aliceEvent.Pubkey = alice.Pub()
|
||||
aliceEvent.CreatedAt = timestamp.Now().V
|
||||
aliceEvent.Kind = 1
|
||||
aliceEvent.Content = []byte("Alice's event to be deleted by admin")
|
||||
|
||||
if err := aliceEvent.Sign(alice); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, aliceEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Admin creates deletion event
|
||||
deleteEvent := event.New()
|
||||
deleteEvent.Pubkey = admin.Pub()
|
||||
deleteEvent.CreatedAt = timestamp.Now().V + 1
|
||||
deleteEvent.Kind = kind.Deletion.K
|
||||
deleteEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(aliceEvent.ID[:])),
|
||||
)
|
||||
|
||||
if err := deleteEvent.Sign(admin); err != nil {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
// Process deletion with admin pubkey
|
||||
adminPubkeys := [][]byte{admin.Pub()}
|
||||
if err := db.ProcessDelete(deleteEvent, adminPubkeys); err != nil {
|
||||
t.Fatalf("Failed to process delete: %v", err)
|
||||
}
|
||||
|
||||
// Verify Alice's event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(aliceEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected Alice's event to be deleted, got %d events", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ ProcessDelete allowed admin to delete event")
|
||||
}
|
||||
|
||||
func TestCheckForDeleted(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create target event
|
||||
targetEvent := event.New()
|
||||
targetEvent.Pubkey = signer.Pub()
|
||||
targetEvent.CreatedAt = timestamp.Now().V
|
||||
targetEvent.Kind = 1
|
||||
targetEvent.Content = []byte("Target event")
|
||||
|
||||
if err := targetEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign target event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, targetEvent); err != nil {
|
||||
t.Fatalf("Failed to save target event: %v", err)
|
||||
}
|
||||
|
||||
// Check that event is not deleted (no deletion event exists)
|
||||
err = db.CheckForDeleted(targetEvent, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error for non-deleted event, got: %v", err)
|
||||
}
|
||||
|
||||
// Create deletion event that references target
|
||||
deleteEvent := event.New()
|
||||
deleteEvent.Pubkey = signer.Pub()
|
||||
deleteEvent.CreatedAt = timestamp.Now().V + 1
|
||||
deleteEvent.Kind = kind.Deletion.K
|
||||
deleteEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(targetEvent.ID[:])),
|
||||
)
|
||||
|
||||
if err := deleteEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, deleteEvent); err != nil {
|
||||
t.Fatalf("Failed to save delete event: %v", err)
|
||||
}
|
||||
|
||||
// Now check should return error (event has been deleted)
|
||||
err = db.CheckForDeleted(targetEvent, nil)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for deleted event")
|
||||
}
|
||||
|
||||
t.Logf("✓ CheckForDeleted correctly detected deletion event")
|
||||
}
|
||||
|
||||
func TestReplaceableEventDeletion(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create replaceable event (kind 0 - profile)
|
||||
profileEvent := event.New()
|
||||
profileEvent.Pubkey = signer.Pub()
|
||||
profileEvent.CreatedAt = timestamp.Now().V
|
||||
profileEvent.Kind = 0
|
||||
profileEvent.Content = []byte(`{"name":"Test User"}`)
|
||||
|
||||
if err := profileEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, profileEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(0)),
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query: %v", err)
|
||||
}
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 profile event, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Create a newer replaceable event (replaces the old one)
|
||||
newerProfileEvent := event.New()
|
||||
newerProfileEvent.Pubkey = signer.Pub()
|
||||
newerProfileEvent.CreatedAt = timestamp.Now().V + 100
|
||||
newerProfileEvent.Kind = 0
|
||||
newerProfileEvent.Content = []byte(`{"name":"Updated User"}`)
|
||||
|
||||
if err := newerProfileEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign newer event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, newerProfileEvent); err != nil {
|
||||
t.Fatalf("Failed to save newer event: %v", err)
|
||||
}
|
||||
|
||||
// Query should return only the newer event
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(0)),
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query: %v", err)
|
||||
}
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 profile event after replacement, got %d", len(evs))
|
||||
}
|
||||
|
||||
if hex.Enc(evs[0].ID[:]) != hex.Enc(newerProfileEvent.ID[:]) {
|
||||
t.Fatal("Expected newer profile event to be returned")
|
||||
}
|
||||
|
||||
t.Logf("✓ Replaceable event correctly replaced by newer version")
|
||||
}
|
||||
570
pkg/neo4j/expiration_test.go
Normal file
570
pkg/neo4j/expiration_test.go
Normal file
@@ -0,0 +1,570 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
func TestExpiration_SaveEventWithExpiration(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create event with expiration tag (expires in 1 hour)
|
||||
futureExpiration := time.Now().Add(1 * time.Hour).Unix()
|
||||
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event with expiration")
|
||||
ev.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(futureExpiration).String()))
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Query the event to verify it was saved
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query event: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Event with expiration tag saved successfully")
|
||||
}
|
||||
|
||||
func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create an expired event (expired 1 hour ago)
|
||||
pastExpiration := time.Now().Add(-1 * time.Hour).Unix()
|
||||
|
||||
expiredEv := event.New()
|
||||
expiredEv.Pubkey = signer.Pub()
|
||||
expiredEv.CreatedAt = timestamp.Now().V - 7200 // 2 hours ago
|
||||
expiredEv.Kind = 1
|
||||
expiredEv.Content = []byte("Expired event")
|
||||
expiredEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(pastExpiration).String()))
|
||||
|
||||
if err := expiredEv.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign expired event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, expiredEv); err != nil {
|
||||
t.Fatalf("Failed to save expired event: %v", err)
|
||||
}
|
||||
|
||||
// Create a non-expired event (expires in 1 hour)
|
||||
futureExpiration := time.Now().Add(1 * time.Hour).Unix()
|
||||
|
||||
validEv := event.New()
|
||||
validEv.Pubkey = signer.Pub()
|
||||
validEv.CreatedAt = timestamp.Now().V
|
||||
validEv.Kind = 1
|
||||
validEv.Content = []byte("Valid event")
|
||||
validEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(futureExpiration).String()))
|
||||
|
||||
if err := validEv.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign valid event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, validEv); err != nil {
|
||||
t.Fatalf("Failed to save valid event: %v", err)
|
||||
}
|
||||
|
||||
// Create an event without expiration
|
||||
permanentEv := event.New()
|
||||
permanentEv.Pubkey = signer.Pub()
|
||||
permanentEv.CreatedAt = timestamp.Now().V + 1
|
||||
permanentEv.Kind = 1
|
||||
permanentEv.Content = []byte("Permanent event (no expiration)")
|
||||
|
||||
if err := permanentEv.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign permanent event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, permanentEv); err != nil {
|
||||
t.Fatalf("Failed to save permanent event: %v", err)
|
||||
}
|
||||
|
||||
// Verify all 3 events exist
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events: %v", err)
|
||||
}
|
||||
if len(evs) != 3 {
|
||||
t.Fatalf("Expected 3 events before deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Run DeleteExpired
|
||||
db.DeleteExpired()
|
||||
|
||||
// Verify only expired event was deleted
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events after deletion: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("Expected 2 events after deletion (expired removed), got %d", len(evs))
|
||||
}
|
||||
|
||||
// Verify the correct events remain
|
||||
foundValid := false
|
||||
foundPermanent := false
|
||||
for _, ev := range evs {
|
||||
if hex.Enc(ev.ID[:]) == hex.Enc(validEv.ID[:]) {
|
||||
foundValid = true
|
||||
}
|
||||
if hex.Enc(ev.ID[:]) == hex.Enc(permanentEv.ID[:]) {
|
||||
foundPermanent = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundValid {
|
||||
t.Fatal("Valid event (with future expiration) was incorrectly deleted")
|
||||
}
|
||||
if !foundPermanent {
|
||||
t.Fatal("Permanent event (no expiration) was incorrectly deleted")
|
||||
}
|
||||
|
||||
t.Logf("✓ DeleteExpired correctly removed only expired events")
|
||||
}
|
||||
|
||||
func TestExpiration_NoExpirationTag(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create event without expiration tag
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event without expiration")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Run DeleteExpired - event should not be deleted
|
||||
db.DeleteExpired()
|
||||
|
||||
// Verify event still exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query event: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event (no expiration should not be deleted), got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Events without expiration tag are not deleted")
|
||||
}
|
||||
|
||||
func TestExport_AllEvents(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save some events
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event for export")
|
||||
ev.Tags = tag.NewS(tag.NewFromAny("t", "test"))
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Export all events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
|
||||
// Parse the exported JSONL
|
||||
lines := bytes.Split(buf.Bytes(), []byte("\n"))
|
||||
validLines := 0
|
||||
for _, line := range lines {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
var ev event.E
|
||||
if err := json.Unmarshal(line, &ev); err != nil {
|
||||
t.Fatalf("Failed to parse exported event: %v", err)
|
||||
}
|
||||
validLines++
|
||||
}
|
||||
|
||||
if validLines != 5 {
|
||||
t.Fatalf("Expected 5 exported events, got %d", validLines)
|
||||
}
|
||||
|
||||
t.Logf("✓ Export all events returned %d events in JSONL format", validLines)
|
||||
}
|
||||
|
||||
func TestExport_FilterByPubkey(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Create two signers
|
||||
alice, _ := p8k.New()
|
||||
alice.Generate()
|
||||
|
||||
bob, _ := p8k.New()
|
||||
bob.Generate()
|
||||
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events from Alice
|
||||
for i := 0; i < 3; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = alice.Pub()
|
||||
ev.CreatedAt = baseTs + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Alice's event")
|
||||
|
||||
if err := ev.Sign(alice); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create events from Bob
|
||||
for i := 0; i < 2; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = bob.Pub()
|
||||
ev.CreatedAt = baseTs + int64(i) + 10
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Bob's event")
|
||||
|
||||
if err := ev.Sign(bob); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Export only Alice's events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf, alice.Pub())
|
||||
|
||||
// Parse the exported JSONL
|
||||
lines := bytes.Split(buf.Bytes(), []byte("\n"))
|
||||
validLines := 0
|
||||
alicePubkey := hex.Enc(alice.Pub())
|
||||
for _, line := range lines {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
var ev event.E
|
||||
if err := json.Unmarshal(line, &ev); err != nil {
|
||||
t.Fatalf("Failed to parse exported event: %v", err)
|
||||
}
|
||||
if hex.Enc(ev.Pubkey[:]) != alicePubkey {
|
||||
t.Fatalf("Exported event has wrong pubkey (expected Alice)")
|
||||
}
|
||||
validLines++
|
||||
}
|
||||
|
||||
if validLines != 3 {
|
||||
t.Fatalf("Expected 3 events from Alice, got %d", validLines)
|
||||
}
|
||||
|
||||
t.Logf("✓ Export with pubkey filter returned %d events from Alice only", validLines)
|
||||
}
|
||||
|
||||
func TestExport_Empty(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Export from empty database
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
|
||||
// Should be empty or just whitespace
|
||||
content := bytes.TrimSpace(buf.Bytes())
|
||||
if len(content) != 0 {
|
||||
t.Fatalf("Expected empty export, got: %s", string(content))
|
||||
}
|
||||
|
||||
t.Logf("✓ Export from empty database returns empty result")
|
||||
}
|
||||
|
||||
func TestImportExport_RoundTrip(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
|
||||
// Create original events
|
||||
originalEvents := make([]*event.E, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Round trip test event")
|
||||
ev.Tags = tag.NewS(tag.NewFromAny("t", "roundtrip"))
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
originalEvents[i] = ev
|
||||
}
|
||||
|
||||
// Export events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
|
||||
// Wipe database
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Verify database is empty
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events after wipe, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Import events
|
||||
db.Import(bytes.NewReader(buf.Bytes()))
|
||||
|
||||
// Verify events were restored
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query imported events: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 3 {
|
||||
t.Fatalf("Expected 3 imported events, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Verify event IDs match
|
||||
importedIDs := make(map[string]bool)
|
||||
for _, ev := range evs {
|
||||
importedIDs[hex.Enc(ev.ID[:])] = true
|
||||
}
|
||||
|
||||
for _, orig := range originalEvents {
|
||||
if !importedIDs[hex.Enc(orig.ID[:])] {
|
||||
t.Fatalf("Original event %s not found after import", hex.Enc(orig.ID[:]))
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ Export/Import round trip preserved %d events correctly", len(evs))
|
||||
}
|
||||
502
pkg/neo4j/fetch-event_test.go
Normal file
502
pkg/neo4j/fetch-event_test.go
Normal file
@@ -0,0 +1,502 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
func TestFetchEventBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save a test event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event for fetch by serial")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get the serial for this event
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||
}
|
||||
|
||||
// Fetch event by serial
|
||||
fetchedEvent, err := db.FetchEventBySerial(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to fetch event by serial: %v", err)
|
||||
}
|
||||
|
||||
if fetchedEvent == nil {
|
||||
t.Fatal("Expected fetched event to be non-nil")
|
||||
}
|
||||
|
||||
// Verify event properties
|
||||
if hex.Enc(fetchedEvent.ID[:]) != hex.Enc(ev.ID[:]) {
|
||||
t.Fatalf("Event ID mismatch: got %s, expected %s",
|
||||
hex.Enc(fetchedEvent.ID[:]), hex.Enc(ev.ID[:]))
|
||||
}
|
||||
|
||||
if fetchedEvent.Kind != ev.Kind {
|
||||
t.Fatalf("Kind mismatch: got %d, expected %d", fetchedEvent.Kind, ev.Kind)
|
||||
}
|
||||
|
||||
if hex.Enc(fetchedEvent.Pubkey[:]) != hex.Enc(ev.Pubkey[:]) {
|
||||
t.Fatalf("Pubkey mismatch")
|
||||
}
|
||||
|
||||
if fetchedEvent.CreatedAt != ev.CreatedAt {
|
||||
t.Fatalf("CreatedAt mismatch: got %d, expected %d",
|
||||
fetchedEvent.CreatedAt, ev.CreatedAt)
|
||||
}
|
||||
|
||||
t.Logf("✓ FetchEventBySerial returned correct event")
|
||||
}
|
||||
|
||||
func TestFetchEventBySerial_NonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to fetch with non-existent serial
|
||||
nonExistentSerial := &types.Uint40{}
|
||||
nonExistentSerial.Set(0xFFFFFFFFFF) // Max value
|
||||
|
||||
_, err = db.FetchEventBySerial(nonExistentSerial)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for non-existent serial")
|
||||
}
|
||||
|
||||
t.Logf("✓ FetchEventBySerial correctly returned error for non-existent serial")
|
||||
}
|
||||
|
||||
func TestFetchEventsBySerials(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save multiple events
|
||||
var serials []*types.Uint40
|
||||
eventIDs := make(map[uint64]string)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
|
||||
serials = append(serials, serial)
|
||||
eventIDs[serial.Get()] = hex.Enc(ev.ID[:])
|
||||
}
|
||||
|
||||
// Fetch all events by serials
|
||||
events, err := db.FetchEventsBySerials(serials)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to fetch events by serials: %v", err)
|
||||
}
|
||||
|
||||
if len(events) != 5 {
|
||||
t.Fatalf("Expected 5 events, got %d", len(events))
|
||||
}
|
||||
|
||||
// Verify each event
|
||||
for serial, expectedID := range eventIDs {
|
||||
ev, exists := events[serial]
|
||||
if !exists {
|
||||
t.Fatalf("Event with serial %d not found", serial)
|
||||
}
|
||||
if hex.Enc(ev.ID[:]) != expectedID {
|
||||
t.Fatalf("Event ID mismatch for serial %d", serial)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ FetchEventsBySerials returned %d correct events", len(events))
|
||||
}
|
||||
|
||||
func TestGetSerialById(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial by ID
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||
}
|
||||
|
||||
if serial == nil {
|
||||
t.Fatal("Expected serial to be non-nil")
|
||||
}
|
||||
|
||||
if serial.Get() == 0 {
|
||||
t.Fatal("Expected non-zero serial")
|
||||
}
|
||||
|
||||
t.Logf("✓ GetSerialById returned serial: %d", serial.Get())
|
||||
}
|
||||
|
||||
func TestGetSerialById_NonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to get serial for non-existent event
|
||||
fakeID, _ := hex.Dec("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
|
||||
_, err = db.GetSerialById(fakeID)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for non-existent event ID")
|
||||
}
|
||||
|
||||
t.Logf("✓ GetSerialById correctly returned error for non-existent ID")
|
||||
}
|
||||
|
||||
func TestGetSerialsByIds(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save multiple events
|
||||
ids := tag.NewS()
|
||||
for i := 0; i < 3; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
ids.Append(tag.NewFromAny("", hex.Enc(ev.ID[:])))
|
||||
}
|
||||
|
||||
// Get serials by IDs
|
||||
serials, err := db.GetSerialsByIds(ids)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serials by IDs: %v", err)
|
||||
}
|
||||
|
||||
if len(serials) != 3 {
|
||||
t.Fatalf("Expected 3 serials, got %d", len(serials))
|
||||
}
|
||||
|
||||
t.Logf("✓ GetSerialsByIds returned %d serials", len(serials))
|
||||
}
|
||||
|
||||
func TestGetFullIdPubkeyBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
|
||||
// Get full ID and pubkey
|
||||
idPkTs, err := db.GetFullIdPubkeyBySerial(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get full ID and pubkey: %v", err)
|
||||
}
|
||||
|
||||
if idPkTs == nil {
|
||||
t.Fatal("Expected non-nil result")
|
||||
}
|
||||
|
||||
if hex.Enc(idPkTs.Id) != hex.Enc(ev.ID[:]) {
|
||||
t.Fatalf("ID mismatch")
|
||||
}
|
||||
|
||||
if hex.Enc(idPkTs.Pub) != hex.Enc(ev.Pubkey[:]) {
|
||||
t.Fatalf("Pubkey mismatch")
|
||||
}
|
||||
|
||||
if idPkTs.Ts != ev.CreatedAt {
|
||||
t.Fatalf("Timestamp mismatch")
|
||||
}
|
||||
|
||||
t.Logf("✓ GetFullIdPubkeyBySerial returned correct data")
|
||||
}
|
||||
|
||||
func TestQueryForSerials(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save events
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Query for serials
|
||||
serials, err := db.QueryForSerials(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query for serials: %v", err)
|
||||
}
|
||||
|
||||
if len(serials) != 5 {
|
||||
t.Fatalf("Expected 5 serials, got %d", len(serials))
|
||||
}
|
||||
|
||||
t.Logf("✓ QueryForSerials returned %d serials", len(serials))
|
||||
}
|
||||
103
pkg/neo4j/hex_utils.go
Normal file
103
pkg/neo4j/hex_utils.go
Normal file
@@ -0,0 +1,103 @@
|
||||
// Package neo4j provides hex utilities for normalizing pubkeys and event IDs.
|
||||
//
|
||||
// The nostr library applies binary optimization to e/p tags, storing 64-character
|
||||
// hex strings as 33-byte binary (32 bytes + null terminator). This file provides
|
||||
// utilities to ensure all pubkeys and event IDs stored in Neo4j are in consistent
|
||||
// lowercase hex format.
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
// Tag binary encoding constants (matching the nostr library)
|
||||
const (
|
||||
// BinaryEncodedLen is the length of a binary-encoded 32-byte hash with null terminator
|
||||
BinaryEncodedLen = 33
|
||||
// HexEncodedLen is the length of a hex-encoded 32-byte hash (pubkey or event ID)
|
||||
HexEncodedLen = 64
|
||||
// HashLen is the raw length of a hash (pubkey/event ID)
|
||||
HashLen = 32
|
||||
)
|
||||
|
||||
// IsBinaryEncoded checks if a value is stored in the nostr library's binary-optimized format
|
||||
func IsBinaryEncoded(val []byte) bool {
|
||||
return len(val) == BinaryEncodedLen && val[HashLen] == 0
|
||||
}
|
||||
|
||||
// NormalizePubkeyHex ensures a pubkey/event ID is in lowercase hex format.
|
||||
// It handles:
|
||||
// - Binary-encoded values (33 bytes with null terminator) -> converts to lowercase hex
|
||||
// - Uppercase hex strings -> converts to lowercase
|
||||
// - Already lowercase hex -> returns as-is
|
||||
//
|
||||
// This should be used for all pubkeys and event IDs before storing in Neo4j
|
||||
// to prevent duplicate nodes due to case differences.
|
||||
func NormalizePubkeyHex(val []byte) string {
|
||||
// Handle binary-encoded values from the nostr library
|
||||
if IsBinaryEncoded(val) {
|
||||
// Convert binary to lowercase hex
|
||||
return hex.Enc(val[:HashLen])
|
||||
}
|
||||
|
||||
// Handle hex strings (may be uppercase from external sources)
|
||||
if len(val) == HexEncodedLen {
|
||||
return strings.ToLower(string(val))
|
||||
}
|
||||
|
||||
// For other lengths (possibly prefixes), lowercase the hex
|
||||
return strings.ToLower(string(val))
|
||||
}
|
||||
|
||||
// ExtractPTagValue extracts a pubkey from a p-tag, handling binary encoding.
|
||||
// Returns lowercase hex string suitable for Neo4j storage.
|
||||
// Returns empty string if the tag doesn't have a valid value.
|
||||
func ExtractPTagValue(t *tag.T) string {
|
||||
if t == nil || len(t.T) < 2 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Use ValueHex() which properly handles both binary and hex formats
|
||||
hexVal := t.ValueHex()
|
||||
if len(hexVal) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Ensure lowercase (ValueHex returns the library's encoding which is lowercase,
|
||||
// but we normalize anyway for safety with external data)
|
||||
return strings.ToLower(string(hexVal))
|
||||
}
|
||||
|
||||
// ExtractETagValue extracts an event ID from an e-tag, handling binary encoding.
|
||||
// Returns lowercase hex string suitable for Neo4j storage.
|
||||
// Returns empty string if the tag doesn't have a valid value.
|
||||
func ExtractETagValue(t *tag.T) string {
|
||||
if t == nil || len(t.T) < 2 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Use ValueHex() which properly handles both binary and hex formats
|
||||
hexVal := t.ValueHex()
|
||||
if len(hexVal) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Ensure lowercase
|
||||
return strings.ToLower(string(hexVal))
|
||||
}
|
||||
|
||||
// IsValidHexPubkey checks if a string is a valid 64-character hex pubkey
|
||||
func IsValidHexPubkey(s string) bool {
|
||||
if len(s) != HexEncodedLen {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
if !((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user