Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
de290aeb25
|
|||
|
0a61f274d5
|
|||
|
c8fac06f24
|
|||
|
64c6bd8bdd
|
|||
|
58d75bfc5a
|
|||
|
69e2c873d8
|
|||
|
6c7d55ff7e
|
|||
|
3c17e975df
|
@@ -153,7 +153,25 @@
|
||||
"Bash(git check-ignore:*)",
|
||||
"Bash(git commit:*)",
|
||||
"WebFetch(domain:www.npmjs.com)",
|
||||
"Bash(git stash:*)"
|
||||
"Bash(git stash:*)",
|
||||
"WebFetch(domain:arxiv.org)",
|
||||
"WebFetch(domain:hal.science)",
|
||||
"WebFetch(domain:pkg.go.dev)",
|
||||
"Bash(GOOS=js GOARCH=wasm CGO_ENABLED=0 go build:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go doc:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm CGO_ENABLED=0 go test:*)",
|
||||
"Bash(node --version:*)",
|
||||
"Bash(npm install)",
|
||||
"Bash(node run_wasm_tests.mjs:*)",
|
||||
"Bash(go env:*)",
|
||||
"Bash(GOROOT=/home/mleku/go node run_wasm_tests.mjs:*)",
|
||||
"Bash(./orly:*)",
|
||||
"Bash(./orly -version:*)",
|
||||
"Bash(./orly --version:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go test:*)",
|
||||
"Bash(ls:*)",
|
||||
"Bash(GOROOT=/home/mleku/go node:*)",
|
||||
"Bash(GOOS=js GOARCH=wasm go build:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
395
.claude/skills/cypher/SKILL.md
Normal file
395
.claude/skills/cypher/SKILL.md
Normal file
@@ -0,0 +1,395 @@
|
||||
---
|
||||
name: cypher
|
||||
description: This skill should be used when writing, debugging, or discussing Neo4j Cypher queries. Provides comprehensive knowledge of Cypher syntax, query patterns, performance optimization, and common mistakes. Particularly useful for translating between domain models and graph queries.
|
||||
---
|
||||
|
||||
# Neo4j Cypher Query Language
|
||||
|
||||
## Purpose
|
||||
|
||||
This skill provides expert-level guidance for writing Neo4j Cypher queries, including syntax, patterns, performance optimization, and common pitfalls. It is particularly tuned for the patterns used in this ORLY Nostr relay codebase.
|
||||
|
||||
## When to Use
|
||||
|
||||
Activate this skill when:
|
||||
- Writing Cypher queries for Neo4j
|
||||
- Debugging Cypher syntax errors
|
||||
- Optimizing query performance
|
||||
- Translating Nostr filter queries to Cypher
|
||||
- Working with graph relationships and traversals
|
||||
- Creating or modifying schema (indexes, constraints)
|
||||
|
||||
## Core Cypher Syntax
|
||||
|
||||
### Clause Order (CRITICAL)
|
||||
|
||||
Cypher requires clauses in a specific order. Violating this causes syntax errors:
|
||||
|
||||
```cypher
|
||||
// CORRECT order of clauses
|
||||
MATCH (n:Label) // 1. Pattern matching
|
||||
WHERE n.prop = value // 2. Filtering
|
||||
WITH n, count(*) AS cnt // 3. Intermediate results (resets scope)
|
||||
OPTIONAL MATCH (n)-[r]-() // 4. Optional patterns
|
||||
CREATE (m:NewNode) // 5. Node/relationship creation
|
||||
SET n.prop = value // 6. Property updates
|
||||
DELETE r // 7. Deletions
|
||||
RETURN n.prop AS result // 8. Return clause
|
||||
ORDER BY result DESC // 9. Ordering
|
||||
SKIP 10 LIMIT 20 // 10. Pagination
|
||||
```
|
||||
|
||||
### The WITH Clause (CRITICAL)
|
||||
|
||||
The `WITH` clause is required to transition between certain operations:
|
||||
|
||||
**Rule: Cannot use MATCH after CREATE without WITH**
|
||||
|
||||
```cypher
|
||||
// WRONG - MATCH after CREATE without WITH
|
||||
CREATE (e:Event {id: $id})
|
||||
MATCH (ref:Event {id: $refId}) // ERROR!
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
|
||||
// CORRECT - Use WITH to carry variables forward
|
||||
CREATE (e:Event {id: $id})
|
||||
WITH e
|
||||
MATCH (ref:Event {id: $refId})
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
```
|
||||
|
||||
**Rule: WITH resets the scope**
|
||||
|
||||
Variables not included in WITH are no longer accessible:
|
||||
|
||||
```cypher
|
||||
// WRONG - 'a' is lost after WITH
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH e
|
||||
WHERE a.pubkey = $pubkey // ERROR: 'a' not defined
|
||||
|
||||
// CORRECT - Include all needed variables
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH a, e
|
||||
WHERE a.pubkey = $pubkey
|
||||
```
|
||||
|
||||
### Node and Relationship Patterns
|
||||
|
||||
```cypher
|
||||
// Nodes
|
||||
(n) // Anonymous node
|
||||
(n:Label) // Labeled node
|
||||
(n:Label {prop: value}) // Node with properties
|
||||
(n:Label:OtherLabel) // Multiple labels
|
||||
|
||||
// Relationships
|
||||
-[r]-> // Directed, anonymous
|
||||
-[r:TYPE]-> // Typed relationship
|
||||
-[r:TYPE {prop: value}]-> // With properties
|
||||
-[r:TYPE|OTHER]-> // Multiple types (OR)
|
||||
-[*1..3]-> // Variable length (1 to 3 hops)
|
||||
-[*]-> // Any number of hops
|
||||
```
|
||||
|
||||
### MERGE vs CREATE
|
||||
|
||||
**CREATE**: Always creates new nodes/relationships (may create duplicates)
|
||||
|
||||
```cypher
|
||||
CREATE (n:Event {id: $id}) // Creates even if id exists
|
||||
```
|
||||
|
||||
**MERGE**: Finds or creates (idempotent)
|
||||
|
||||
```cypher
|
||||
MERGE (n:Event {id: $id}) // Finds existing or creates new
|
||||
ON CREATE SET n.created = timestamp()
|
||||
ON MATCH SET n.accessed = timestamp()
|
||||
```
|
||||
|
||||
**Best Practice**: Use MERGE for reference nodes, CREATE for unique events
|
||||
|
||||
```cypher
|
||||
// Reference nodes - use MERGE (idempotent)
|
||||
MERGE (author:Author {pubkey: $pubkey})
|
||||
|
||||
// Unique events - use CREATE (after checking existence)
|
||||
CREATE (e:Event {id: $eventId, ...})
|
||||
```
|
||||
|
||||
### OPTIONAL MATCH
|
||||
|
||||
Returns NULL for non-matching patterns (like LEFT JOIN):
|
||||
|
||||
```cypher
|
||||
// Find events, with or without tags
|
||||
MATCH (e:Event)
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
RETURN e.id, collect(t.value) AS tags
|
||||
```
|
||||
|
||||
### Conditional Creation with FOREACH
|
||||
|
||||
To conditionally create relationships:
|
||||
|
||||
```cypher
|
||||
// FOREACH trick for conditional operations
|
||||
OPTIONAL MATCH (ref:Event {id: $refId})
|
||||
FOREACH (ignoreMe IN CASE WHEN ref IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
)
|
||||
```
|
||||
|
||||
### Aggregation Functions
|
||||
|
||||
```cypher
|
||||
count(*) // Count all rows
|
||||
count(n) // Count non-null values
|
||||
count(DISTINCT n) // Count unique values
|
||||
collect(n) // Collect into list
|
||||
collect(DISTINCT n) // Collect unique values
|
||||
sum(n.value) // Sum values
|
||||
avg(n.value) // Average
|
||||
min(n.value), max(n.value) // Min/max
|
||||
```
|
||||
|
||||
### String Operations
|
||||
|
||||
```cypher
|
||||
// String matching
|
||||
WHERE n.name STARTS WITH 'prefix'
|
||||
WHERE n.name ENDS WITH 'suffix'
|
||||
WHERE n.name CONTAINS 'substring'
|
||||
WHERE n.name =~ 'regex.*pattern' // Regex
|
||||
|
||||
// String functions
|
||||
toLower(str), toUpper(str)
|
||||
trim(str), ltrim(str), rtrim(str)
|
||||
substring(str, start, length)
|
||||
replace(str, search, replacement)
|
||||
```
|
||||
|
||||
### List Operations
|
||||
|
||||
```cypher
|
||||
// IN clause
|
||||
WHERE n.kind IN [1, 7, 30023]
|
||||
WHERE n.pubkey IN $pubkeyList
|
||||
|
||||
// List comprehension
|
||||
[x IN list WHERE x > 0 | x * 2]
|
||||
|
||||
// UNWIND - expand list into rows
|
||||
UNWIND $pubkeys AS pubkey
|
||||
MERGE (u:User {pubkey: pubkey})
|
||||
```
|
||||
|
||||
### Parameters
|
||||
|
||||
Always use parameters for values (security + performance):
|
||||
|
||||
```cypher
|
||||
// CORRECT - parameterized
|
||||
MATCH (e:Event {id: $eventId})
|
||||
WHERE e.kind IN $kinds
|
||||
|
||||
// WRONG - string interpolation (SQL injection risk!)
|
||||
MATCH (e:Event {id: '" + eventId + "'})
|
||||
```
|
||||
|
||||
## Schema Management
|
||||
|
||||
### Constraints
|
||||
|
||||
```cypher
|
||||
// Uniqueness constraint (also creates index)
|
||||
CREATE CONSTRAINT event_id_unique IF NOT EXISTS
|
||||
FOR (e:Event) REQUIRE e.id IS UNIQUE
|
||||
|
||||
// Composite uniqueness
|
||||
CREATE CONSTRAINT card_unique IF NOT EXISTS
|
||||
FOR (c:Card) REQUIRE (c.customer_id, c.observee_pubkey) IS UNIQUE
|
||||
|
||||
// Drop constraint
|
||||
DROP CONSTRAINT event_id_unique IF EXISTS
|
||||
```
|
||||
|
||||
### Indexes
|
||||
|
||||
```cypher
|
||||
// Single property index
|
||||
CREATE INDEX event_kind IF NOT EXISTS FOR (e:Event) ON (e.kind)
|
||||
|
||||
// Composite index
|
||||
CREATE INDEX event_kind_created IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind, e.created_at)
|
||||
|
||||
// Drop index
|
||||
DROP INDEX event_kind IF EXISTS
|
||||
```
|
||||
|
||||
## Common Query Patterns
|
||||
|
||||
### Find with Filter
|
||||
|
||||
```cypher
|
||||
// Multiple conditions with OR
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
AND (e.id = $id1 OR e.id = $id2)
|
||||
AND e.created_at >= $since
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Graph Traversal
|
||||
|
||||
```cypher
|
||||
// Find events by author
|
||||
MATCH (e:Event)-[:AUTHORED_BY]->(a:Author {pubkey: $pubkey})
|
||||
RETURN e
|
||||
|
||||
// Find followers of a user
|
||||
MATCH (follower:NostrUser)-[:FOLLOWS]->(user:NostrUser {pubkey: $pubkey})
|
||||
RETURN follower.pubkey
|
||||
|
||||
// Find mutual follows (friends)
|
||||
MATCH (a:NostrUser {pubkey: $pubkeyA})-[:FOLLOWS]->(b:NostrUser)
|
||||
WHERE (b)-[:FOLLOWS]->(a)
|
||||
RETURN b.pubkey AS mutual_friend
|
||||
```
|
||||
|
||||
### Upsert Pattern
|
||||
|
||||
```cypher
|
||||
MERGE (n:Node {key: $key})
|
||||
ON CREATE SET
|
||||
n.created_at = timestamp(),
|
||||
n.value = $value
|
||||
ON MATCH SET
|
||||
n.updated_at = timestamp(),
|
||||
n.value = $value
|
||||
RETURN n
|
||||
```
|
||||
|
||||
### Batch Processing with UNWIND
|
||||
|
||||
```cypher
|
||||
// Create multiple nodes from list
|
||||
UNWIND $items AS item
|
||||
CREATE (n:Node {id: item.id, value: item.value})
|
||||
|
||||
// Create relationships from list
|
||||
UNWIND $follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[:FOLLOWS]->(followed)
|
||||
```
|
||||
|
||||
## Performance Optimization
|
||||
|
||||
### Index Usage
|
||||
|
||||
1. **Start with indexed properties** - Begin MATCH with most selective indexed field
|
||||
2. **Use composite indexes** - For queries filtering on multiple properties
|
||||
3. **Profile queries** - Use `PROFILE` prefix to see execution plan
|
||||
|
||||
```cypher
|
||||
PROFILE MATCH (e:Event {kind: 1})
|
||||
WHERE e.created_at > $since
|
||||
RETURN e LIMIT 100
|
||||
```
|
||||
|
||||
### Query Optimization Tips
|
||||
|
||||
1. **Filter early** - Put WHERE conditions close to MATCH
|
||||
2. **Limit early** - Use LIMIT as early as possible
|
||||
3. **Avoid Cartesian products** - Connect patterns or use WITH
|
||||
4. **Use parameters** - Enables query plan caching
|
||||
|
||||
```cypher
|
||||
// GOOD - Filter and limit early
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds AND e.created_at >= $since
|
||||
WITH e ORDER BY e.created_at DESC LIMIT 100
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
RETURN e, collect(t)
|
||||
|
||||
// BAD - Late filtering
|
||||
MATCH (e:Event), (t:Tag)
|
||||
WHERE e.kind IN $kinds
|
||||
RETURN e, t LIMIT 100
|
||||
```
|
||||
|
||||
## Reference Materials
|
||||
|
||||
For detailed information, consult the reference files:
|
||||
|
||||
- **references/syntax-reference.md** - Complete Cypher syntax guide with all clause types, operators, and functions
|
||||
- **references/common-patterns.md** - Project-specific patterns for ORLY Nostr relay including event storage, tag queries, and social graph traversals
|
||||
- **references/common-mistakes.md** - Frequent Cypher errors and how to avoid them
|
||||
|
||||
## ORLY-Specific Patterns
|
||||
|
||||
This codebase uses these specific Cypher patterns:
|
||||
|
||||
### Event Storage Pattern
|
||||
|
||||
```cypher
|
||||
// Create event with author relationship
|
||||
MERGE (a:Author {pubkey: $pubkey})
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
kind: $kind,
|
||||
created_at: $createdAt,
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tags
|
||||
})
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
```
|
||||
|
||||
### Tag Query Pattern
|
||||
|
||||
```cypher
|
||||
// Query events by tag (Nostr #<tag> filter)
|
||||
MATCH (e:Event)-[:TAGGED_WITH]->(t:Tag {type: $tagType})
|
||||
WHERE t.value IN $tagValues
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Social Graph Pattern
|
||||
|
||||
```cypher
|
||||
// Process contact list with diff-based updates
|
||||
// Mark old as superseded
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
|
||||
// Create tracking node
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 3,
|
||||
pubkey: $author_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp()
|
||||
})
|
||||
|
||||
// Update relationships
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
WITH author
|
||||
UNWIND $added_follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[:FOLLOWS]->(followed)
|
||||
```
|
||||
|
||||
## Official Resources
|
||||
|
||||
- Neo4j Cypher Manual: https://neo4j.com/docs/cypher-manual/current/
|
||||
- Cypher Cheat Sheet: https://neo4j.com/docs/cypher-cheat-sheet/current/
|
||||
- Query Tuning: https://neo4j.com/docs/cypher-manual/current/query-tuning/
|
||||
381
.claude/skills/cypher/references/common-mistakes.md
Normal file
381
.claude/skills/cypher/references/common-mistakes.md
Normal file
@@ -0,0 +1,381 @@
|
||||
# Common Cypher Mistakes and How to Avoid Them
|
||||
|
||||
## Clause Ordering Errors
|
||||
|
||||
### MATCH After CREATE Without WITH
|
||||
|
||||
**Error**: `Invalid input 'MATCH': expected ... WITH`
|
||||
|
||||
```cypher
|
||||
// WRONG
|
||||
CREATE (e:Event {id: $id})
|
||||
MATCH (ref:Event {id: $refId}) // ERROR!
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
|
||||
// CORRECT - Use WITH to transition
|
||||
CREATE (e:Event {id: $id})
|
||||
WITH e
|
||||
MATCH (ref:Event {id: $refId})
|
||||
CREATE (e)-[:REFERENCES]->(ref)
|
||||
```
|
||||
|
||||
**Rule**: After CREATE, you must use WITH before MATCH.
|
||||
|
||||
### WHERE After WITH Without Carrying Variables
|
||||
|
||||
**Error**: `Variable 'x' not defined`
|
||||
|
||||
```cypher
|
||||
// WRONG - 'a' is lost
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH e
|
||||
WHERE a.pubkey = $pubkey // ERROR: 'a' not in scope
|
||||
|
||||
// CORRECT - Include all needed variables
|
||||
MATCH (a:Author), (e:Event)
|
||||
WITH a, e
|
||||
WHERE a.pubkey = $pubkey
|
||||
```
|
||||
|
||||
**Rule**: WITH resets the scope. Include all variables you need.
|
||||
|
||||
### ORDER BY Without Aliased Return
|
||||
|
||||
**Error**: `Invalid input 'ORDER': expected ... AS`
|
||||
|
||||
```cypher
|
||||
// WRONG in some contexts
|
||||
RETURN n.name
|
||||
ORDER BY n.name
|
||||
|
||||
// SAFER - Use alias
|
||||
RETURN n.name AS name
|
||||
ORDER BY name
|
||||
```
|
||||
|
||||
## MERGE Mistakes
|
||||
|
||||
### MERGE on Complex Pattern Creates Duplicates
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - May create duplicate nodes
|
||||
MERGE (a:Person {name: 'Alice'})-[:KNOWS]->(b:Person {name: 'Bob'})
|
||||
|
||||
// CORRECT - MERGE nodes separately first
|
||||
MERGE (a:Person {name: 'Alice'})
|
||||
MERGE (b:Person {name: 'Bob'})
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
```
|
||||
|
||||
**Rule**: MERGE simple patterns, not complex ones.
|
||||
|
||||
### MERGE Without Unique Property
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - Will keep creating nodes
|
||||
MERGE (p:Person) // No unique identifier!
|
||||
SET p.name = 'Alice'
|
||||
|
||||
// CORRECT - Provide unique key
|
||||
MERGE (p:Person {email: $email})
|
||||
SET p.name = 'Alice'
|
||||
```
|
||||
|
||||
**Rule**: MERGE must have properties that uniquely identify the node.
|
||||
|
||||
### Missing ON CREATE/ON MATCH
|
||||
|
||||
```cypher
|
||||
// LOSES context of whether new or existing
|
||||
MERGE (p:Person {id: $id})
|
||||
SET p.updated_at = timestamp() // Always runs
|
||||
|
||||
// BETTER - Handle each case
|
||||
MERGE (p:Person {id: $id})
|
||||
ON CREATE SET p.created_at = timestamp()
|
||||
ON MATCH SET p.updated_at = timestamp()
|
||||
```
|
||||
|
||||
## NULL Handling Errors
|
||||
|
||||
### Comparing with NULL
|
||||
|
||||
```cypher
|
||||
// WRONG - NULL = NULL is NULL, not true
|
||||
WHERE n.email = null // Never matches!
|
||||
|
||||
// CORRECT
|
||||
WHERE n.email IS NULL
|
||||
WHERE n.email IS NOT NULL
|
||||
```
|
||||
|
||||
### NULL in Aggregations
|
||||
|
||||
```cypher
|
||||
// count(NULL) returns 0, collect(NULL) includes NULL
|
||||
MATCH (n:Person)
|
||||
OPTIONAL MATCH (n)-[:BOUGHT]->(p:Product)
|
||||
RETURN n.name, count(p) // count ignores NULL
|
||||
```
|
||||
|
||||
### NULL Propagation in Expressions
|
||||
|
||||
```cypher
|
||||
// Any operation with NULL returns NULL
|
||||
WHERE n.age + 1 > 21 // If n.age is NULL, whole expression is NULL (falsy)
|
||||
|
||||
// Handle with coalesce
|
||||
WHERE coalesce(n.age, 0) + 1 > 21
|
||||
```
|
||||
|
||||
## List and IN Clause Errors
|
||||
|
||||
### Empty List in IN
|
||||
|
||||
```cypher
|
||||
// An empty list never matches
|
||||
WHERE n.kind IN [] // Always false
|
||||
|
||||
// Check for empty list in application code before query
|
||||
// Or use CASE:
|
||||
WHERE CASE WHEN size($kinds) > 0 THEN n.kind IN $kinds ELSE true END
|
||||
```
|
||||
|
||||
### IN with NULL Values
|
||||
|
||||
```cypher
|
||||
// NULL in the list causes issues
|
||||
WHERE n.id IN [1, NULL, 3] // NULL is never equal to anything
|
||||
|
||||
// Filter NULLs in application code
|
||||
```
|
||||
|
||||
## Relationship Pattern Errors
|
||||
|
||||
### Forgetting Direction
|
||||
|
||||
```cypher
|
||||
// WRONG - Creates both directions
|
||||
MATCH (a)-[:FOLLOWS]-(b) // Undirected!
|
||||
|
||||
// CORRECT - Specify direction
|
||||
MATCH (a)-[:FOLLOWS]->(b) // a follows b
|
||||
MATCH (a)<-[:FOLLOWS]-(b) // b follows a
|
||||
```
|
||||
|
||||
### Variable-Length Without Bounds
|
||||
|
||||
```cypher
|
||||
// DANGEROUS - Potentially explosive
|
||||
MATCH (a)-[*]->(b) // Any length path!
|
||||
|
||||
// SAFE - Set bounds
|
||||
MATCH (a)-[*1..3]->(b) // 1 to 3 hops max
|
||||
```
|
||||
|
||||
### Creating Duplicate Relationships
|
||||
|
||||
```cypher
|
||||
// May create duplicates
|
||||
CREATE (a)-[:KNOWS]->(b)
|
||||
|
||||
// Idempotent
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
```
|
||||
|
||||
## Performance Mistakes
|
||||
|
||||
### Cartesian Products
|
||||
|
||||
```cypher
|
||||
// WRONG - Cartesian product
|
||||
MATCH (a:Person), (b:Product)
|
||||
WHERE a.id = $personId AND b.id = $productId
|
||||
CREATE (a)-[:BOUGHT]->(b)
|
||||
|
||||
// CORRECT - Single pattern or sequential
|
||||
MATCH (a:Person {id: $personId})
|
||||
MATCH (b:Product {id: $productId})
|
||||
CREATE (a)-[:BOUGHT]->(b)
|
||||
```
|
||||
|
||||
### Late Filtering
|
||||
|
||||
```cypher
|
||||
// SLOW - Filters after collecting everything
|
||||
MATCH (e:Event)
|
||||
WITH e
|
||||
WHERE e.kind = 1 // Should be in MATCH or right after
|
||||
|
||||
// FAST - Filter early
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind = 1
|
||||
```
|
||||
|
||||
### Missing LIMIT with ORDER BY
|
||||
|
||||
```cypher
|
||||
// SLOW - Sorts all results
|
||||
MATCH (e:Event)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
|
||||
// FAST - Limits result set
|
||||
MATCH (e:Event)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT 100
|
||||
```
|
||||
|
||||
### Unparameterized Queries
|
||||
|
||||
```cypher
|
||||
// WRONG - No query plan caching, injection risk
|
||||
MATCH (e:Event {id: '" + eventId + "'})
|
||||
|
||||
// CORRECT - Use parameters
|
||||
MATCH (e:Event {id: $eventId})
|
||||
```
|
||||
|
||||
## String Comparison Errors
|
||||
|
||||
### Case Sensitivity
|
||||
|
||||
```cypher
|
||||
// Cypher strings are case-sensitive
|
||||
WHERE n.name = 'alice' // Won't match 'Alice'
|
||||
|
||||
// Use toLower/toUpper for case-insensitive
|
||||
WHERE toLower(n.name) = toLower($name)
|
||||
|
||||
// Or use regex with (?i)
|
||||
WHERE n.name =~ '(?i)alice'
|
||||
```
|
||||
|
||||
### LIKE vs CONTAINS
|
||||
|
||||
```cypher
|
||||
// There's no LIKE in Cypher
|
||||
WHERE n.name LIKE '%alice%' // ERROR!
|
||||
|
||||
// Use CONTAINS, STARTS WITH, ENDS WITH
|
||||
WHERE n.name CONTAINS 'alice'
|
||||
WHERE n.name STARTS WITH 'ali'
|
||||
WHERE n.name ENDS WITH 'ice'
|
||||
|
||||
// Or regex for complex patterns
|
||||
WHERE n.name =~ '.*ali.*ce.*'
|
||||
```
|
||||
|
||||
## Index Mistakes
|
||||
|
||||
### Constraint vs Index
|
||||
|
||||
```cypher
|
||||
// Constraint (also creates index, enforces uniqueness)
|
||||
CREATE CONSTRAINT foo IF NOT EXISTS FOR (n:Node) REQUIRE n.id IS UNIQUE
|
||||
|
||||
// Index only (no uniqueness enforcement)
|
||||
CREATE INDEX bar IF NOT EXISTS FOR (n:Node) ON (n.id)
|
||||
```
|
||||
|
||||
### Index Not Used
|
||||
|
||||
```cypher
|
||||
// Index on n.id won't help here
|
||||
WHERE toLower(n.id) = $id // Function applied to indexed property!
|
||||
|
||||
// Store lowercase if needed, or create computed property
|
||||
```
|
||||
|
||||
### Wrong Composite Index Order
|
||||
|
||||
```cypher
|
||||
// Index on (kind, created_at) won't help query by created_at alone
|
||||
MATCH (e:Event) WHERE e.created_at > $since // Index not used
|
||||
|
||||
// Either create single-property index or query by kind too
|
||||
CREATE INDEX event_created_at FOR (e:Event) ON (e.created_at)
|
||||
```
|
||||
|
||||
## Transaction Errors
|
||||
|
||||
### Read After Write in Same Transaction
|
||||
|
||||
```cypher
|
||||
// In Neo4j, reads in a transaction see the writes
|
||||
// But be careful with external processes
|
||||
CREATE (n:Node {id: 'new'})
|
||||
WITH n
|
||||
MATCH (m:Node {id: 'new'}) // Will find 'n'
|
||||
```
|
||||
|
||||
### Locks and Deadlocks
|
||||
|
||||
```cypher
|
||||
// MERGE takes locks; avoid complex patterns that might deadlock
|
||||
// Bad: two MERGEs on same labels in different order
|
||||
Session 1: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
Session 2: MERGE (b:Person {id: 2}) MERGE (a:Person {id: 1}) // Potential deadlock
|
||||
|
||||
// Good: consistent ordering
|
||||
Session 1: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
Session 2: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
|
||||
```
|
||||
|
||||
## Type Coercion Issues
|
||||
|
||||
### Integer vs String
|
||||
|
||||
```cypher
|
||||
// Types must match
|
||||
WHERE n.id = 123 // Won't match if n.id is "123"
|
||||
WHERE n.id = '123' // Won't match if n.id is 123
|
||||
|
||||
// Use appropriate parameter types from Go
|
||||
params["id"] = int64(123) // For integer
|
||||
params["id"] = "123" // For string
|
||||
```
|
||||
|
||||
### Boolean Handling
|
||||
|
||||
```cypher
|
||||
// Neo4j booleans vs strings
|
||||
WHERE n.active = true // Boolean
|
||||
WHERE n.active = 'true' // String - different!
|
||||
```
|
||||
|
||||
## Delete Errors
|
||||
|
||||
### Delete Node With Relationships
|
||||
|
||||
```cypher
|
||||
// ERROR - Node still has relationships
|
||||
MATCH (n:Person {id: $id})
|
||||
DELETE n
|
||||
|
||||
// CORRECT - Delete relationships first
|
||||
MATCH (n:Person {id: $id})
|
||||
DETACH DELETE n
|
||||
```
|
||||
|
||||
### Optional Match and Delete
|
||||
|
||||
```cypher
|
||||
// WRONG - DELETE NULL causes no error but also doesn't help
|
||||
OPTIONAL MATCH (n:Node {id: $id})
|
||||
DELETE n // If n is NULL, nothing happens silently
|
||||
|
||||
// Better - Check existence first or handle in application
|
||||
MATCH (n:Node {id: $id})
|
||||
DELETE n
|
||||
```
|
||||
|
||||
## Debugging Tips
|
||||
|
||||
1. **Use EXPLAIN** to see query plan without executing
|
||||
2. **Use PROFILE** to see actual execution metrics
|
||||
3. **Break complex queries** into smaller parts to isolate issues
|
||||
4. **Check parameter types** - mismatched types are a common issue
|
||||
5. **Verify indexes exist** with `SHOW INDEXES`
|
||||
6. **Check constraints** with `SHOW CONSTRAINTS`
|
||||
397
.claude/skills/cypher/references/common-patterns.md
Normal file
397
.claude/skills/cypher/references/common-patterns.md
Normal file
@@ -0,0 +1,397 @@
|
||||
# Common Cypher Patterns for ORLY Nostr Relay
|
||||
|
||||
This reference contains project-specific Cypher patterns used in the ORLY Nostr relay's Neo4j backend.
|
||||
|
||||
## Schema Overview
|
||||
|
||||
### Node Types
|
||||
|
||||
| Label | Purpose | Key Properties |
|
||||
|-------|---------|----------------|
|
||||
| `Event` | Nostr events (NIP-01) | `id`, `kind`, `pubkey`, `created_at`, `content`, `sig`, `tags`, `serial` |
|
||||
| `Author` | Event authors (for NIP-01 queries) | `pubkey` |
|
||||
| `Tag` | Generic tags | `type`, `value` |
|
||||
| `NostrUser` | Social graph users (WoT) | `pubkey`, `name`, `about`, `picture`, `nip05` |
|
||||
| `ProcessedSocialEvent` | Social event tracking | `event_id`, `event_kind`, `pubkey`, `superseded_by` |
|
||||
| `Marker` | Internal state markers | `key`, `value` |
|
||||
|
||||
### Relationship Types
|
||||
|
||||
| Type | From | To | Purpose |
|
||||
|------|------|-----|---------|
|
||||
| `AUTHORED_BY` | Event | Author | Links event to author |
|
||||
| `TAGGED_WITH` | Event | Tag | Links event to tags |
|
||||
| `REFERENCES` | Event | Event | e-tag references |
|
||||
| `MENTIONS` | Event | Author | p-tag mentions |
|
||||
| `FOLLOWS` | NostrUser | NostrUser | Contact list (kind 3) |
|
||||
| `MUTES` | NostrUser | NostrUser | Mute list (kind 10000) |
|
||||
| `REPORTS` | NostrUser | NostrUser | Reports (kind 1984) |
|
||||
|
||||
## Event Storage Patterns
|
||||
|
||||
### Create Event with Full Relationships
|
||||
|
||||
This pattern creates an event and all related nodes/relationships atomically:
|
||||
|
||||
```cypher
|
||||
// 1. Create or get author
|
||||
MERGE (a:Author {pubkey: $pubkey})
|
||||
|
||||
// 2. Create event node
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
kind: $kind,
|
||||
created_at: $createdAt,
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tagsJson // JSON string for full tag data
|
||||
})
|
||||
|
||||
// 3. Link to author
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
|
||||
// 4. Process e-tags (event references)
|
||||
WITH e, a
|
||||
OPTIONAL MATCH (ref0:Event {id: $eTag_0})
|
||||
FOREACH (_ IN CASE WHEN ref0 IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (e)-[:REFERENCES]->(ref0)
|
||||
)
|
||||
|
||||
// 5. Process p-tags (mentions)
|
||||
WITH e, a
|
||||
MERGE (mentioned0:Author {pubkey: $pTag_0})
|
||||
CREATE (e)-[:MENTIONS]->(mentioned0)
|
||||
|
||||
// 6. Process other tags
|
||||
WITH e, a
|
||||
MERGE (tag0:Tag {type: $tagType_0, value: $tagValue_0})
|
||||
CREATE (e)-[:TAGGED_WITH]->(tag0)
|
||||
|
||||
RETURN e.id AS id
|
||||
```
|
||||
|
||||
### Check Event Existence
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {id: $id})
|
||||
RETURN e.id AS id
|
||||
LIMIT 1
|
||||
```
|
||||
|
||||
### Get Next Serial Number
|
||||
|
||||
```cypher
|
||||
MERGE (m:Marker {key: 'serial'})
|
||||
ON CREATE SET m.value = 1
|
||||
ON MATCH SET m.value = m.value + 1
|
||||
RETURN m.value AS serial
|
||||
```
|
||||
|
||||
## Query Patterns
|
||||
|
||||
### Basic Filter Query (NIP-01)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
AND e.pubkey IN $authors
|
||||
AND e.created_at >= $since
|
||||
AND e.created_at <= $until
|
||||
RETURN e.id AS id,
|
||||
e.kind AS kind,
|
||||
e.created_at AS created_at,
|
||||
e.content AS content,
|
||||
e.sig AS sig,
|
||||
e.pubkey AS pubkey,
|
||||
e.tags AS tags,
|
||||
e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Query by Event ID (with prefix support)
|
||||
|
||||
```cypher
|
||||
// Exact match
|
||||
MATCH (e:Event {id: $id})
|
||||
RETURN e
|
||||
|
||||
// Prefix match
|
||||
MATCH (e:Event)
|
||||
WHERE e.id STARTS WITH $idPrefix
|
||||
RETURN e
|
||||
```
|
||||
|
||||
### Query by Tag (#<tag> filter)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
|
||||
WHERE t.type = $tagType AND t.value IN $tagValues
|
||||
RETURN DISTINCT e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT $limit
|
||||
```
|
||||
|
||||
### Count Events
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind IN $kinds
|
||||
RETURN count(e) AS count
|
||||
```
|
||||
|
||||
### Query Delete Events Targeting an Event
|
||||
|
||||
```cypher
|
||||
MATCH (target:Event {id: $targetId})
|
||||
MATCH (e:Event {kind: 5})-[:REFERENCES]->(target)
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
### Replaceable Event Check (kinds 0, 3, 10000-19999)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {kind: $kind, pubkey: $pubkey})
|
||||
WHERE e.created_at < $newCreatedAt
|
||||
RETURN e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
### Parameterized Replaceable Event Check (kinds 30000-39999)
|
||||
|
||||
```cypher
|
||||
MATCH (e:Event {kind: $kind, pubkey: $pubkey})-[:TAGGED_WITH]->(t:Tag {type: 'd', value: $dValue})
|
||||
WHERE e.created_at < $newCreatedAt
|
||||
RETURN e.serial AS serial
|
||||
ORDER BY e.created_at DESC
|
||||
```
|
||||
|
||||
## Social Graph Patterns
|
||||
|
||||
### Update Profile (Kind 0)
|
||||
|
||||
```cypher
|
||||
MERGE (user:NostrUser {pubkey: $pubkey})
|
||||
ON CREATE SET
|
||||
user.created_at = timestamp(),
|
||||
user.first_seen_event = $event_id
|
||||
ON MATCH SET
|
||||
user.last_profile_update = $created_at
|
||||
SET
|
||||
user.name = $name,
|
||||
user.about = $about,
|
||||
user.picture = $picture,
|
||||
user.nip05 = $nip05,
|
||||
user.lud16 = $lud16,
|
||||
user.display_name = $display_name
|
||||
```
|
||||
|
||||
### Contact List Update (Kind 3) - Diff-Based
|
||||
|
||||
```cypher
|
||||
// Mark old event as superseded
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
|
||||
// Create new event tracking
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 3,
|
||||
pubkey: $author_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp(),
|
||||
relationship_count: $total_follows,
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// Get or create author
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
|
||||
// Update unchanged relationships to new event
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[unchanged:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE unchanged.created_by_event = $old_event_id
|
||||
AND NOT followed.pubkey IN $removed_follows
|
||||
SET unchanged.created_by_event = $new_event_id,
|
||||
unchanged.created_at = $created_at
|
||||
|
||||
// Remove old relationships for removed follows
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[old_follows:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE old_follows.created_by_event = $old_event_id
|
||||
AND followed.pubkey IN $removed_follows
|
||||
DELETE old_follows
|
||||
|
||||
// Create new relationships for added follows
|
||||
WITH author
|
||||
UNWIND $added_follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[new_follows:FOLLOWS]->(followed)
|
||||
ON CREATE SET
|
||||
new_follows.created_by_event = $new_event_id,
|
||||
new_follows.created_at = $created_at,
|
||||
new_follows.relay_received_at = timestamp()
|
||||
ON MATCH SET
|
||||
new_follows.created_by_event = $new_event_id,
|
||||
new_follows.created_at = $created_at
|
||||
```
|
||||
|
||||
### Create Report (Kind 1984)
|
||||
|
||||
```cypher
|
||||
// Create tracking node
|
||||
CREATE (evt:ProcessedSocialEvent {
|
||||
event_id: $event_id,
|
||||
event_kind: 1984,
|
||||
pubkey: $reporter_pubkey,
|
||||
created_at: $created_at,
|
||||
processed_at: timestamp(),
|
||||
relationship_count: 1,
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// Create users and relationship
|
||||
MERGE (reporter:NostrUser {pubkey: $reporter_pubkey})
|
||||
MERGE (reported:NostrUser {pubkey: $reported_pubkey})
|
||||
CREATE (reporter)-[:REPORTS {
|
||||
created_by_event: $event_id,
|
||||
created_at: $created_at,
|
||||
relay_received_at: timestamp(),
|
||||
report_type: $report_type
|
||||
}]->(reported)
|
||||
```
|
||||
|
||||
### Get Latest Social Event for Pubkey
|
||||
|
||||
```cypher
|
||||
MATCH (evt:ProcessedSocialEvent {pubkey: $pubkey, event_kind: $kind})
|
||||
WHERE evt.superseded_by IS NULL
|
||||
RETURN evt.event_id AS event_id,
|
||||
evt.created_at AS created_at,
|
||||
evt.relationship_count AS relationship_count
|
||||
ORDER BY evt.created_at DESC
|
||||
LIMIT 1
|
||||
```
|
||||
|
||||
### Get Follows for Event
|
||||
|
||||
```cypher
|
||||
MATCH (author:NostrUser)-[f:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE f.created_by_event = $event_id
|
||||
RETURN collect(followed.pubkey) AS pubkeys
|
||||
```
|
||||
|
||||
## WoT Query Patterns
|
||||
|
||||
### Find Mutual Follows
|
||||
|
||||
```cypher
|
||||
MATCH (a:NostrUser {pubkey: $pubkeyA})-[:FOLLOWS]->(b:NostrUser)
|
||||
WHERE (b)-[:FOLLOWS]->(a)
|
||||
RETURN b.pubkey AS mutual_friend
|
||||
```
|
||||
|
||||
### Find Followers
|
||||
|
||||
```cypher
|
||||
MATCH (follower:NostrUser)-[:FOLLOWS]->(user:NostrUser {pubkey: $pubkey})
|
||||
RETURN follower.pubkey, follower.name
|
||||
```
|
||||
|
||||
### Find Following
|
||||
|
||||
```cypher
|
||||
MATCH (user:NostrUser {pubkey: $pubkey})-[:FOLLOWS]->(following:NostrUser)
|
||||
RETURN following.pubkey, following.name
|
||||
```
|
||||
|
||||
### Hop Distance (Trust Path)
|
||||
|
||||
```cypher
|
||||
MATCH (start:NostrUser {pubkey: $startPubkey})
|
||||
MATCH (end:NostrUser {pubkey: $endPubkey})
|
||||
MATCH path = shortestPath((start)-[:FOLLOWS*..6]->(end))
|
||||
RETURN length(path) AS hops, [n IN nodes(path) | n.pubkey] AS path
|
||||
```
|
||||
|
||||
### Second-Degree Connections
|
||||
|
||||
```cypher
|
||||
MATCH (me:NostrUser {pubkey: $myPubkey})-[:FOLLOWS]->(:NostrUser)-[:FOLLOWS]->(suggested:NostrUser)
|
||||
WHERE NOT (me)-[:FOLLOWS]->(suggested)
|
||||
AND suggested.pubkey <> $myPubkey
|
||||
RETURN suggested.pubkey, count(*) AS commonFollows
|
||||
ORDER BY commonFollows DESC
|
||||
LIMIT 20
|
||||
```
|
||||
|
||||
## Schema Management Patterns
|
||||
|
||||
### Create Constraint
|
||||
|
||||
```cypher
|
||||
CREATE CONSTRAINT event_id_unique IF NOT EXISTS
|
||||
FOR (e:Event) REQUIRE e.id IS UNIQUE
|
||||
```
|
||||
|
||||
### Create Index
|
||||
|
||||
```cypher
|
||||
CREATE INDEX event_kind IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind)
|
||||
```
|
||||
|
||||
### Create Composite Index
|
||||
|
||||
```cypher
|
||||
CREATE INDEX event_kind_created_at IF NOT EXISTS
|
||||
FOR (e:Event) ON (e.kind, e.created_at)
|
||||
```
|
||||
|
||||
### Drop All Data (Testing Only)
|
||||
|
||||
```cypher
|
||||
MATCH (n) DETACH DELETE n
|
||||
```
|
||||
|
||||
## Performance Patterns
|
||||
|
||||
### Use EXPLAIN/PROFILE
|
||||
|
||||
```cypher
|
||||
// See query plan without running
|
||||
EXPLAIN MATCH (e:Event) WHERE e.kind = 1 RETURN e
|
||||
|
||||
// Run and see actual metrics
|
||||
PROFILE MATCH (e:Event) WHERE e.kind = 1 RETURN e
|
||||
```
|
||||
|
||||
### Batch Import with UNWIND
|
||||
|
||||
```cypher
|
||||
UNWIND $events AS evt
|
||||
CREATE (e:Event {
|
||||
id: evt.id,
|
||||
kind: evt.kind,
|
||||
pubkey: evt.pubkey,
|
||||
created_at: evt.created_at,
|
||||
content: evt.content,
|
||||
sig: evt.sig,
|
||||
tags: evt.tags
|
||||
})
|
||||
```
|
||||
|
||||
### Efficient Pagination
|
||||
|
||||
```cypher
|
||||
// Use indexed ORDER BY with WHERE for cursor-based pagination
|
||||
MATCH (e:Event)
|
||||
WHERE e.kind = 1 AND e.created_at < $cursor
|
||||
RETURN e
|
||||
ORDER BY e.created_at DESC
|
||||
LIMIT 20
|
||||
```
|
||||
540
.claude/skills/cypher/references/syntax-reference.md
Normal file
540
.claude/skills/cypher/references/syntax-reference.md
Normal file
@@ -0,0 +1,540 @@
|
||||
# Cypher Syntax Reference
|
||||
|
||||
Complete syntax reference for Neo4j Cypher query language.
|
||||
|
||||
## Clause Reference
|
||||
|
||||
### Reading Clauses
|
||||
|
||||
#### MATCH
|
||||
|
||||
Finds patterns in the graph.
|
||||
|
||||
```cypher
|
||||
// Basic node match
|
||||
MATCH (n:Label)
|
||||
|
||||
// Match with properties
|
||||
MATCH (n:Label {key: value})
|
||||
|
||||
// Match relationships
|
||||
MATCH (a)-[r:RELATES_TO]->(b)
|
||||
|
||||
// Match path
|
||||
MATCH path = (a)-[*1..3]->(b)
|
||||
```
|
||||
|
||||
#### OPTIONAL MATCH
|
||||
|
||||
Like MATCH but returns NULL for non-matches (LEFT OUTER JOIN).
|
||||
|
||||
```cypher
|
||||
MATCH (a:Person)
|
||||
OPTIONAL MATCH (a)-[:KNOWS]->(b:Person)
|
||||
RETURN a.name, b.name // b.name may be NULL
|
||||
```
|
||||
|
||||
#### WHERE
|
||||
|
||||
Filters results.
|
||||
|
||||
```cypher
|
||||
// Comparison operators
|
||||
WHERE n.age > 21
|
||||
WHERE n.age >= 21
|
||||
WHERE n.age < 65
|
||||
WHERE n.age <= 65
|
||||
WHERE n.name = 'Alice'
|
||||
WHERE n.name <> 'Bob'
|
||||
|
||||
// Boolean operators
|
||||
WHERE n.age > 21 AND n.active = true
|
||||
WHERE n.age < 18 OR n.age > 65
|
||||
WHERE NOT n.deleted
|
||||
|
||||
// NULL checks
|
||||
WHERE n.email IS NULL
|
||||
WHERE n.email IS NOT NULL
|
||||
|
||||
// Pattern predicates
|
||||
WHERE (n)-[:KNOWS]->(:Person)
|
||||
WHERE NOT (n)-[:BLOCKED]->()
|
||||
WHERE exists((n)-[:FOLLOWS]->())
|
||||
|
||||
// String predicates
|
||||
WHERE n.name STARTS WITH 'A'
|
||||
WHERE n.name ENDS WITH 'son'
|
||||
WHERE n.name CONTAINS 'li'
|
||||
WHERE n.name =~ '(?i)alice.*' // Case-insensitive regex
|
||||
|
||||
// List predicates
|
||||
WHERE n.status IN ['active', 'pending']
|
||||
WHERE any(x IN n.tags WHERE x = 'important')
|
||||
WHERE all(x IN n.scores WHERE x > 50)
|
||||
WHERE none(x IN n.errors WHERE x IS NOT NULL)
|
||||
WHERE single(x IN n.items WHERE x.primary = true)
|
||||
```
|
||||
|
||||
### Writing Clauses
|
||||
|
||||
#### CREATE
|
||||
|
||||
Creates nodes and relationships.
|
||||
|
||||
```cypher
|
||||
// Create node
|
||||
CREATE (n:Label {key: value})
|
||||
|
||||
// Create multiple nodes
|
||||
CREATE (a:Person {name: 'Alice'}), (b:Person {name: 'Bob'})
|
||||
|
||||
// Create relationship
|
||||
CREATE (a)-[r:KNOWS {since: 2020}]->(b)
|
||||
|
||||
// Create path
|
||||
CREATE p = (a)-[:KNOWS]->(b)-[:KNOWS]->(c)
|
||||
```
|
||||
|
||||
#### MERGE
|
||||
|
||||
Find or create pattern. **Critical for idempotency**.
|
||||
|
||||
```cypher
|
||||
// MERGE node
|
||||
MERGE (n:Label {key: $uniqueKey})
|
||||
|
||||
// MERGE with ON CREATE / ON MATCH
|
||||
MERGE (n:Person {email: $email})
|
||||
ON CREATE SET n.created = timestamp(), n.name = $name
|
||||
ON MATCH SET n.accessed = timestamp()
|
||||
|
||||
// MERGE relationship (both nodes must exist or be in scope)
|
||||
MERGE (a)-[r:KNOWS]->(b)
|
||||
ON CREATE SET r.since = date()
|
||||
```
|
||||
|
||||
**MERGE Gotcha**: MERGE on a pattern locks the entire pattern. For relationships, MERGE each node first:
|
||||
|
||||
```cypher
|
||||
// CORRECT
|
||||
MERGE (a:Person {id: $id1})
|
||||
MERGE (b:Person {id: $id2})
|
||||
MERGE (a)-[:KNOWS]->(b)
|
||||
|
||||
// RISKY - may create duplicate nodes
|
||||
MERGE (a:Person {id: $id1})-[:KNOWS]->(b:Person {id: $id2})
|
||||
```
|
||||
|
||||
#### SET
|
||||
|
||||
Updates properties.
|
||||
|
||||
```cypher
|
||||
// Set single property
|
||||
SET n.name = 'Alice'
|
||||
|
||||
// Set multiple properties
|
||||
SET n.name = 'Alice', n.age = 30
|
||||
|
||||
// Set from map (replaces all properties)
|
||||
SET n = {name: 'Alice', age: 30}
|
||||
|
||||
// Set from map (adds/updates, keeps existing)
|
||||
SET n += {name: 'Alice'}
|
||||
|
||||
// Set label
|
||||
SET n:NewLabel
|
||||
|
||||
// Remove property
|
||||
SET n.obsolete = null
|
||||
```
|
||||
|
||||
#### DELETE / DETACH DELETE
|
||||
|
||||
Removes nodes and relationships.
|
||||
|
||||
```cypher
|
||||
// Delete relationship
|
||||
MATCH (a)-[r:KNOWS]->(b)
|
||||
DELETE r
|
||||
|
||||
// Delete node (must have no relationships)
|
||||
MATCH (n:Orphan)
|
||||
DELETE n
|
||||
|
||||
// Delete node and all relationships
|
||||
MATCH (n:Person {name: 'Bob'})
|
||||
DETACH DELETE n
|
||||
```
|
||||
|
||||
#### REMOVE
|
||||
|
||||
Removes properties and labels.
|
||||
|
||||
```cypher
|
||||
// Remove property
|
||||
REMOVE n.temporary
|
||||
|
||||
// Remove label
|
||||
REMOVE n:OldLabel
|
||||
```
|
||||
|
||||
### Projection Clauses
|
||||
|
||||
#### RETURN
|
||||
|
||||
Specifies output.
|
||||
|
||||
```cypher
|
||||
// Return nodes
|
||||
RETURN n
|
||||
|
||||
// Return properties
|
||||
RETURN n.name, n.age
|
||||
|
||||
// Return with alias
|
||||
RETURN n.name AS name, n.age AS age
|
||||
|
||||
// Return all
|
||||
RETURN *
|
||||
|
||||
// Return distinct
|
||||
RETURN DISTINCT n.category
|
||||
|
||||
// Return expression
|
||||
RETURN n.price * n.quantity AS total
|
||||
```
|
||||
|
||||
#### WITH
|
||||
|
||||
Passes results between query parts. **Critical for multi-part queries**.
|
||||
|
||||
```cypher
|
||||
// Filter and pass
|
||||
MATCH (n:Person)
|
||||
WITH n WHERE n.age > 21
|
||||
RETURN n
|
||||
|
||||
// Aggregate and continue
|
||||
MATCH (n:Person)-[:BOUGHT]->(p:Product)
|
||||
WITH n, count(p) AS purchases
|
||||
WHERE purchases > 5
|
||||
RETURN n.name, purchases
|
||||
|
||||
// Order and limit mid-query
|
||||
MATCH (n:Person)
|
||||
WITH n ORDER BY n.age DESC LIMIT 10
|
||||
MATCH (n)-[:LIVES_IN]->(c:City)
|
||||
RETURN n.name, c.name
|
||||
```
|
||||
|
||||
**WITH resets scope**: Variables not listed in WITH are no longer available.
|
||||
|
||||
#### ORDER BY
|
||||
|
||||
Sorts results.
|
||||
|
||||
```cypher
|
||||
ORDER BY n.name // Ascending (default)
|
||||
ORDER BY n.name ASC // Explicit ascending
|
||||
ORDER BY n.name DESC // Descending
|
||||
ORDER BY n.lastName, n.firstName // Multiple fields
|
||||
ORDER BY n.priority DESC, n.name // Mixed
|
||||
```
|
||||
|
||||
#### SKIP and LIMIT
|
||||
|
||||
Pagination.
|
||||
|
||||
```cypher
|
||||
// Skip first 10
|
||||
SKIP 10
|
||||
|
||||
// Return only 20
|
||||
LIMIT 20
|
||||
|
||||
// Pagination
|
||||
ORDER BY n.created_at DESC
|
||||
SKIP $offset LIMIT $pageSize
|
||||
```
|
||||
|
||||
### Sub-queries
|
||||
|
||||
#### CALL (Subquery)
|
||||
|
||||
Execute subquery for each row.
|
||||
|
||||
```cypher
|
||||
MATCH (p:Person)
|
||||
CALL {
|
||||
WITH p
|
||||
MATCH (p)-[:BOUGHT]->(prod:Product)
|
||||
RETURN count(prod) AS purchaseCount
|
||||
}
|
||||
RETURN p.name, purchaseCount
|
||||
```
|
||||
|
||||
#### UNION
|
||||
|
||||
Combine results from multiple queries.
|
||||
|
||||
```cypher
|
||||
MATCH (n:Person) RETURN n.name AS name
|
||||
UNION
|
||||
MATCH (n:Company) RETURN n.name AS name
|
||||
|
||||
// UNION ALL keeps duplicates
|
||||
MATCH (n:Person) RETURN n.name AS name
|
||||
UNION ALL
|
||||
MATCH (n:Company) RETURN n.name AS name
|
||||
```
|
||||
|
||||
### Control Flow
|
||||
|
||||
#### FOREACH
|
||||
|
||||
Iterate over list, execute updates.
|
||||
|
||||
```cypher
|
||||
// Set property on path nodes
|
||||
MATCH path = (a)-[*]->(b)
|
||||
FOREACH (n IN nodes(path) | SET n.visited = true)
|
||||
|
||||
// Conditional operation (common pattern)
|
||||
OPTIONAL MATCH (target:Node {id: $id})
|
||||
FOREACH (_ IN CASE WHEN target IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (source)-[:LINKS_TO]->(target)
|
||||
)
|
||||
```
|
||||
|
||||
#### CASE
|
||||
|
||||
Conditional expressions.
|
||||
|
||||
```cypher
|
||||
// Simple CASE
|
||||
RETURN CASE n.status
|
||||
WHEN 'active' THEN 'A'
|
||||
WHEN 'pending' THEN 'P'
|
||||
ELSE 'X'
|
||||
END AS code
|
||||
|
||||
// Generic CASE
|
||||
RETURN CASE
|
||||
WHEN n.age < 18 THEN 'minor'
|
||||
WHEN n.age < 65 THEN 'adult'
|
||||
ELSE 'senior'
|
||||
END AS category
|
||||
```
|
||||
|
||||
## Operators
|
||||
|
||||
### Comparison
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `=` | Equal |
|
||||
| `<>` | Not equal |
|
||||
| `<` | Less than |
|
||||
| `>` | Greater than |
|
||||
| `<=` | Less than or equal |
|
||||
| `>=` | Greater than or equal |
|
||||
| `IS NULL` | Is null |
|
||||
| `IS NOT NULL` | Is not null |
|
||||
|
||||
### Boolean
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `AND` | Logical AND |
|
||||
| `OR` | Logical OR |
|
||||
| `NOT` | Logical NOT |
|
||||
| `XOR` | Exclusive OR |
|
||||
|
||||
### String
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `STARTS WITH` | Prefix match |
|
||||
| `ENDS WITH` | Suffix match |
|
||||
| `CONTAINS` | Substring match |
|
||||
| `=~` | Regex match |
|
||||
|
||||
### List
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `IN` | List membership |
|
||||
| `+` | List concatenation |
|
||||
|
||||
### Mathematical
|
||||
|
||||
| Operator | Description |
|
||||
|----------|-------------|
|
||||
| `+` | Addition |
|
||||
| `-` | Subtraction |
|
||||
| `*` | Multiplication |
|
||||
| `/` | Division |
|
||||
| `%` | Modulo |
|
||||
| `^` | Exponentiation |
|
||||
|
||||
## Functions
|
||||
|
||||
### Aggregation
|
||||
|
||||
```cypher
|
||||
count(*) // Count rows
|
||||
count(n) // Count non-null
|
||||
count(DISTINCT n) // Count unique
|
||||
sum(n.value) // Sum
|
||||
avg(n.value) // Average
|
||||
min(n.value) // Minimum
|
||||
max(n.value) // Maximum
|
||||
collect(n) // Collect to list
|
||||
collect(DISTINCT n) // Collect unique
|
||||
stDev(n.value) // Standard deviation
|
||||
percentileCont(n.value, 0.5) // Median
|
||||
```
|
||||
|
||||
### Scalar
|
||||
|
||||
```cypher
|
||||
// Type functions
|
||||
id(n) // Internal node ID (deprecated, use elementId)
|
||||
elementId(n) // Element ID string
|
||||
labels(n) // Node labels
|
||||
type(r) // Relationship type
|
||||
properties(n) // Property map
|
||||
|
||||
// Math
|
||||
abs(x)
|
||||
ceil(x)
|
||||
floor(x)
|
||||
round(x)
|
||||
sign(x)
|
||||
sqrt(x)
|
||||
rand() // Random 0-1
|
||||
|
||||
// String
|
||||
size(str) // String length
|
||||
toLower(str)
|
||||
toUpper(str)
|
||||
trim(str)
|
||||
ltrim(str)
|
||||
rtrim(str)
|
||||
replace(str, from, to)
|
||||
substring(str, start, len)
|
||||
left(str, len)
|
||||
right(str, len)
|
||||
split(str, delimiter)
|
||||
reverse(str)
|
||||
toString(val)
|
||||
|
||||
// Null handling
|
||||
coalesce(val1, val2, ...) // First non-null
|
||||
nullIf(val1, val2) // NULL if equal
|
||||
|
||||
// Type conversion
|
||||
toInteger(val)
|
||||
toFloat(val)
|
||||
toBoolean(val)
|
||||
toString(val)
|
||||
```
|
||||
|
||||
### List Functions
|
||||
|
||||
```cypher
|
||||
size(list) // List length
|
||||
head(list) // First element
|
||||
tail(list) // All but first
|
||||
last(list) // Last element
|
||||
range(start, end) // Create range [start..end]
|
||||
range(start, end, step)
|
||||
reverse(list)
|
||||
keys(map) // Map keys as list
|
||||
values(map) // Map values as list
|
||||
|
||||
// List predicates
|
||||
any(x IN list WHERE predicate)
|
||||
all(x IN list WHERE predicate)
|
||||
none(x IN list WHERE predicate)
|
||||
single(x IN list WHERE predicate)
|
||||
|
||||
// List manipulation
|
||||
[x IN list WHERE predicate] // Filter
|
||||
[x IN list | expression] // Map
|
||||
[x IN list WHERE pred | expr] // Filter and map
|
||||
reduce(s = initial, x IN list | s + x) // Reduce
|
||||
```
|
||||
|
||||
### Path Functions
|
||||
|
||||
```cypher
|
||||
nodes(path) // Nodes in path
|
||||
relationships(path) // Relationships in path
|
||||
length(path) // Number of relationships
|
||||
shortestPath((a)-[*]-(b))
|
||||
allShortestPaths((a)-[*]-(b))
|
||||
```
|
||||
|
||||
### Temporal Functions
|
||||
|
||||
```cypher
|
||||
timestamp() // Current Unix timestamp (ms)
|
||||
datetime() // Current datetime
|
||||
date() // Current date
|
||||
time() // Current time
|
||||
duration({days: 1, hours: 12})
|
||||
|
||||
// Components
|
||||
datetime().year
|
||||
datetime().month
|
||||
datetime().day
|
||||
datetime().hour
|
||||
|
||||
// Parsing
|
||||
date('2024-01-15')
|
||||
datetime('2024-01-15T10:30:00Z')
|
||||
```
|
||||
|
||||
### Spatial Functions
|
||||
|
||||
```cypher
|
||||
point({x: 1, y: 2})
|
||||
point({latitude: 37.5, longitude: -122.4})
|
||||
distance(point1, point2)
|
||||
```
|
||||
|
||||
## Comments
|
||||
|
||||
```cypher
|
||||
// Single line comment
|
||||
|
||||
/* Multi-line
|
||||
comment */
|
||||
```
|
||||
|
||||
## Transaction Control
|
||||
|
||||
```cypher
|
||||
// In procedures/transactions
|
||||
:begin
|
||||
:commit
|
||||
:rollback
|
||||
```
|
||||
|
||||
## Parameter Syntax
|
||||
|
||||
```cypher
|
||||
// Parameter reference
|
||||
$paramName
|
||||
|
||||
// In properties
|
||||
{key: $value}
|
||||
|
||||
// In WHERE
|
||||
WHERE n.id = $id
|
||||
|
||||
// In expressions
|
||||
RETURN $multiplier * n.value
|
||||
```
|
||||
1115
.claude/skills/distributed-systems/SKILL.md
Normal file
1115
.claude/skills/distributed-systems/SKILL.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,610 @@
|
||||
# Consensus Protocols - Detailed Reference
|
||||
|
||||
Complete specifications and implementation details for major consensus protocols.
|
||||
|
||||
## Paxos Complete Specification
|
||||
|
||||
### Proposal Numbers
|
||||
|
||||
Proposal numbers must be:
|
||||
- **Unique**: No two proposers use the same number
|
||||
- **Totally ordered**: Any two can be compared
|
||||
|
||||
**Implementation**: `(round_number, proposer_id)` where proposer_id breaks ties.
|
||||
|
||||
### Single-Decree Paxos State
|
||||
|
||||
**Proposer state**:
|
||||
```
|
||||
proposal_number: int
|
||||
value: any
|
||||
```
|
||||
|
||||
**Acceptor state (persistent)**:
|
||||
```
|
||||
highest_promised: int # Highest proposal number promised
|
||||
accepted_proposal: int # Number of accepted proposal (0 if none)
|
||||
accepted_value: any # Value of accepted proposal (null if none)
|
||||
```
|
||||
|
||||
### Message Format
|
||||
|
||||
**Prepare** (Phase 1a):
|
||||
```
|
||||
{
|
||||
type: "PREPARE",
|
||||
proposal_number: n
|
||||
}
|
||||
```
|
||||
|
||||
**Promise** (Phase 1b):
|
||||
```
|
||||
{
|
||||
type: "PROMISE",
|
||||
proposal_number: n,
|
||||
accepted_proposal: m, # null if nothing accepted
|
||||
accepted_value: v # null if nothing accepted
|
||||
}
|
||||
```
|
||||
|
||||
**Accept** (Phase 2a):
|
||||
```
|
||||
{
|
||||
type: "ACCEPT",
|
||||
proposal_number: n,
|
||||
value: v
|
||||
}
|
||||
```
|
||||
|
||||
**Accepted** (Phase 2b):
|
||||
```
|
||||
{
|
||||
type: "ACCEPTED",
|
||||
proposal_number: n,
|
||||
value: v
|
||||
}
|
||||
```
|
||||
|
||||
### Proposer Algorithm
|
||||
|
||||
```
|
||||
function propose(value):
|
||||
n = generate_proposal_number()
|
||||
|
||||
# Phase 1: Prepare
|
||||
promises = []
|
||||
for acceptor in acceptors:
|
||||
send PREPARE(n) to acceptor
|
||||
|
||||
wait until |promises| > |acceptors|/2 or timeout
|
||||
|
||||
if timeout:
|
||||
return FAILED
|
||||
|
||||
# Choose value
|
||||
highest = max(promises, key=p.accepted_proposal)
|
||||
if highest.accepted_value is not null:
|
||||
value = highest.accepted_value
|
||||
|
||||
# Phase 2: Accept
|
||||
accepts = []
|
||||
for acceptor in acceptors:
|
||||
send ACCEPT(n, value) to acceptor
|
||||
|
||||
wait until |accepts| > |acceptors|/2 or timeout
|
||||
|
||||
if timeout:
|
||||
return FAILED
|
||||
|
||||
return SUCCESS(value)
|
||||
```
|
||||
|
||||
### Acceptor Algorithm
|
||||
|
||||
```
|
||||
on receive PREPARE(n):
|
||||
if n > highest_promised:
|
||||
highest_promised = n
|
||||
persist(highest_promised)
|
||||
reply PROMISE(n, accepted_proposal, accepted_value)
|
||||
else:
|
||||
# Optionally reply NACK(highest_promised)
|
||||
ignore or reject
|
||||
|
||||
on receive ACCEPT(n, v):
|
||||
if n >= highest_promised:
|
||||
highest_promised = n
|
||||
accepted_proposal = n
|
||||
accepted_value = v
|
||||
persist(highest_promised, accepted_proposal, accepted_value)
|
||||
reply ACCEPTED(n, v)
|
||||
else:
|
||||
ignore or reject
|
||||
```
|
||||
|
||||
### Multi-Paxos Optimization
|
||||
|
||||
**Stable leader**:
|
||||
```
|
||||
# Leader election (using Paxos or other method)
|
||||
leader = elect_leader()
|
||||
|
||||
# Leader's Phase 1 for all future instances
|
||||
leader sends PREPARE(n) for instance range [i, ∞)
|
||||
|
||||
# For each command:
|
||||
function propose_as_leader(value, instance):
|
||||
# Skip Phase 1 if already leader
|
||||
for acceptor in acceptors:
|
||||
send ACCEPT(n, value, instance) to acceptor
|
||||
wait for majority ACCEPTED
|
||||
return SUCCESS
|
||||
```
|
||||
|
||||
### Paxos Safety Proof Sketch
|
||||
|
||||
**Invariant**: If a value v is chosen for instance i, no other value can be chosen.
|
||||
|
||||
**Proof**:
|
||||
1. Value chosen → accepted by majority with proposal n
|
||||
2. Any higher proposal n' must contact majority
|
||||
3. Majorities intersect → at least one acceptor has accepted v
|
||||
4. New proposer adopts v (or higher already-accepted value)
|
||||
5. By induction, all future proposals use v
|
||||
|
||||
## Raft Complete Specification
|
||||
|
||||
### State
|
||||
|
||||
**All servers (persistent)**:
|
||||
```
|
||||
currentTerm: int # Latest term seen
|
||||
votedFor: ServerId # Candidate voted for in current term (null if none)
|
||||
log[]: LogEntry # Log entries
|
||||
```
|
||||
|
||||
**All servers (volatile)**:
|
||||
```
|
||||
commitIndex: int # Highest log index known to be committed
|
||||
lastApplied: int # Highest log index applied to state machine
|
||||
```
|
||||
|
||||
**Leader (volatile, reinitialized after election)**:
|
||||
```
|
||||
nextIndex[]: int # For each server, next log index to send
|
||||
matchIndex[]: int # For each server, highest log index replicated
|
||||
```
|
||||
|
||||
**LogEntry**:
|
||||
```
|
||||
{
|
||||
term: int,
|
||||
command: any
|
||||
}
|
||||
```
|
||||
|
||||
### RequestVote RPC
|
||||
|
||||
**Request**:
|
||||
```
|
||||
{
|
||||
term: int, # Candidate's term
|
||||
candidateId: ServerId, # Candidate requesting vote
|
||||
lastLogIndex: int, # Index of candidate's last log entry
|
||||
lastLogTerm: int # Term of candidate's last log entry
|
||||
}
|
||||
```
|
||||
|
||||
**Response**:
|
||||
```
|
||||
{
|
||||
term: int, # currentTerm, for candidate to update itself
|
||||
voteGranted: bool # True if candidate received vote
|
||||
}
|
||||
```
|
||||
|
||||
**Receiver implementation**:
|
||||
```
|
||||
on receive RequestVote(term, candidateId, lastLogIndex, lastLogTerm):
|
||||
if term < currentTerm:
|
||||
return {term: currentTerm, voteGranted: false}
|
||||
|
||||
if term > currentTerm:
|
||||
currentTerm = term
|
||||
votedFor = null
|
||||
convert to follower
|
||||
|
||||
# Check if candidate's log is at least as up-to-date as ours
|
||||
ourLastTerm = log[len(log)-1].term if log else 0
|
||||
ourLastIndex = len(log) - 1
|
||||
|
||||
logOK = (lastLogTerm > ourLastTerm) or
|
||||
(lastLogTerm == ourLastTerm and lastLogIndex >= ourLastIndex)
|
||||
|
||||
if (votedFor is null or votedFor == candidateId) and logOK:
|
||||
votedFor = candidateId
|
||||
persist(currentTerm, votedFor)
|
||||
reset election timer
|
||||
return {term: currentTerm, voteGranted: true}
|
||||
|
||||
return {term: currentTerm, voteGranted: false}
|
||||
```
|
||||
|
||||
### AppendEntries RPC
|
||||
|
||||
**Request**:
|
||||
```
|
||||
{
|
||||
term: int, # Leader's term
|
||||
leaderId: ServerId, # For follower to redirect clients
|
||||
prevLogIndex: int, # Index of log entry preceding new ones
|
||||
prevLogTerm: int, # Term of prevLogIndex entry
|
||||
entries[]: LogEntry, # Log entries to store (empty for heartbeat)
|
||||
leaderCommit: int # Leader's commitIndex
|
||||
}
|
||||
```
|
||||
|
||||
**Response**:
|
||||
```
|
||||
{
|
||||
term: int, # currentTerm, for leader to update itself
|
||||
success: bool # True if follower had matching prevLog entry
|
||||
}
|
||||
```
|
||||
|
||||
**Receiver implementation**:
|
||||
```
|
||||
on receive AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit):
|
||||
if term < currentTerm:
|
||||
return {term: currentTerm, success: false}
|
||||
|
||||
reset election timer
|
||||
|
||||
if term > currentTerm:
|
||||
currentTerm = term
|
||||
votedFor = null
|
||||
|
||||
convert to follower
|
||||
|
||||
# Check log consistency
|
||||
if prevLogIndex >= len(log) or
|
||||
(prevLogIndex >= 0 and log[prevLogIndex].term != prevLogTerm):
|
||||
return {term: currentTerm, success: false}
|
||||
|
||||
# Append new entries (handling conflicts)
|
||||
for i, entry in enumerate(entries):
|
||||
index = prevLogIndex + 1 + i
|
||||
if index < len(log):
|
||||
if log[index].term != entry.term:
|
||||
# Delete conflicting entry and all following
|
||||
log = log[:index]
|
||||
log.append(entry)
|
||||
else:
|
||||
log.append(entry)
|
||||
|
||||
persist(currentTerm, votedFor, log)
|
||||
|
||||
# Update commit index
|
||||
if leaderCommit > commitIndex:
|
||||
commitIndex = min(leaderCommit, len(log) - 1)
|
||||
|
||||
return {term: currentTerm, success: true}
|
||||
```
|
||||
|
||||
### Leader Behavior
|
||||
|
||||
```
|
||||
on becoming leader:
|
||||
for each server:
|
||||
nextIndex[server] = len(log)
|
||||
matchIndex[server] = 0
|
||||
|
||||
start sending heartbeats
|
||||
|
||||
on receiving client command:
|
||||
append entry to local log
|
||||
persist log
|
||||
send AppendEntries to all followers
|
||||
|
||||
on receiving AppendEntries response from server:
|
||||
if response.success:
|
||||
matchIndex[server] = prevLogIndex + len(entries)
|
||||
nextIndex[server] = matchIndex[server] + 1
|
||||
|
||||
# Update commit index
|
||||
for N from commitIndex+1 to len(log)-1:
|
||||
if log[N].term == currentTerm and
|
||||
|{s : matchIndex[s] >= N}| > |servers|/2:
|
||||
commitIndex = N
|
||||
else:
|
||||
nextIndex[server] = max(1, nextIndex[server] - 1)
|
||||
retry AppendEntries with lower prevLogIndex
|
||||
|
||||
on commitIndex update:
|
||||
while lastApplied < commitIndex:
|
||||
lastApplied++
|
||||
apply log[lastApplied].command to state machine
|
||||
```
|
||||
|
||||
### Election Timeout
|
||||
|
||||
```
|
||||
on election timeout (follower or candidate):
|
||||
currentTerm++
|
||||
convert to candidate
|
||||
votedFor = self
|
||||
persist(currentTerm, votedFor)
|
||||
reset election timer
|
||||
votes = 1 # Vote for self
|
||||
|
||||
for each server except self:
|
||||
send RequestVote(currentTerm, self, lastLogIndex, lastLogTerm)
|
||||
|
||||
wait for responses or timeout:
|
||||
if received votes > |servers|/2:
|
||||
become leader
|
||||
if received AppendEntries from valid leader:
|
||||
become follower
|
||||
if timeout:
|
||||
start new election
|
||||
```
|
||||
|
||||
## PBFT Complete Specification
|
||||
|
||||
### Message Types
|
||||
|
||||
**REQUEST**:
|
||||
```
|
||||
{
|
||||
type: "REQUEST",
|
||||
operation: o, # Operation to execute
|
||||
timestamp: t, # Client timestamp (for reply matching)
|
||||
client: c # Client identifier
|
||||
}
|
||||
```
|
||||
|
||||
**PRE-PREPARE**:
|
||||
```
|
||||
{
|
||||
type: "PRE-PREPARE",
|
||||
view: v, # Current view number
|
||||
sequence: n, # Sequence number
|
||||
digest: d, # Hash of request
|
||||
request: m # The request message
|
||||
}
|
||||
signature(primary)
|
||||
```
|
||||
|
||||
**PREPARE**:
|
||||
```
|
||||
{
|
||||
type: "PREPARE",
|
||||
view: v,
|
||||
sequence: n,
|
||||
digest: d,
|
||||
replica: i # Sending replica
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**COMMIT**:
|
||||
```
|
||||
{
|
||||
type: "COMMIT",
|
||||
view: v,
|
||||
sequence: n,
|
||||
digest: d,
|
||||
replica: i
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**REPLY**:
|
||||
```
|
||||
{
|
||||
type: "REPLY",
|
||||
view: v,
|
||||
timestamp: t,
|
||||
client: c,
|
||||
replica: i,
|
||||
result: r # Execution result
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
### Replica State
|
||||
|
||||
```
|
||||
view: int # Current view
|
||||
sequence: int # Last assigned sequence number (primary)
|
||||
log[]: {request, prepares, commits, state} # Log of requests
|
||||
prepared_certificates: {} # Prepared certificates (2f+1 prepares)
|
||||
committed_certificates: {} # Committed certificates (2f+1 commits)
|
||||
h: int # Low water mark
|
||||
H: int # High water mark (h + L)
|
||||
```
|
||||
|
||||
### Normal Operation Protocol
|
||||
|
||||
**Primary (replica p = v mod n)**:
|
||||
```
|
||||
on receive REQUEST(m) from client:
|
||||
if not primary for current view:
|
||||
forward to primary
|
||||
return
|
||||
|
||||
n = assign_sequence_number()
|
||||
d = hash(m)
|
||||
|
||||
broadcast PRE-PREPARE(v, n, d, m) to all replicas
|
||||
add to log
|
||||
```
|
||||
|
||||
**All replicas**:
|
||||
```
|
||||
on receive PRE-PREPARE(v, n, d, m) from primary:
|
||||
if v != current_view:
|
||||
ignore
|
||||
if already accepted pre-prepare for (v, n) with different digest:
|
||||
ignore
|
||||
if not in_view_as_backup(v):
|
||||
ignore
|
||||
if not h < n <= H:
|
||||
ignore # Outside sequence window
|
||||
|
||||
# Valid pre-prepare
|
||||
add to log
|
||||
broadcast PREPARE(v, n, d, i) to all replicas
|
||||
|
||||
on receive PREPARE(v, n, d, j) from replica j:
|
||||
if v != current_view:
|
||||
ignore
|
||||
|
||||
add to log[n].prepares
|
||||
|
||||
if |log[n].prepares| >= 2f and not already_prepared(v, n, d):
|
||||
# Prepared certificate complete
|
||||
mark as prepared
|
||||
broadcast COMMIT(v, n, d, i) to all replicas
|
||||
|
||||
on receive COMMIT(v, n, d, j) from replica j:
|
||||
if v != current_view:
|
||||
ignore
|
||||
|
||||
add to log[n].commits
|
||||
|
||||
if |log[n].commits| >= 2f + 1 and prepared(v, n, d):
|
||||
# Committed certificate complete
|
||||
if all entries < n are committed:
|
||||
execute(m)
|
||||
send REPLY(v, t, c, i, result) to client
|
||||
```
|
||||
|
||||
### View Change Protocol
|
||||
|
||||
**Timeout trigger**:
|
||||
```
|
||||
on request timeout (no progress):
|
||||
view_change_timeout++
|
||||
broadcast VIEW-CHANGE(v+1, n, C, P, i)
|
||||
|
||||
where:
|
||||
n = last stable checkpoint sequence number
|
||||
C = checkpoint certificate (2f+1 checkpoint messages)
|
||||
P = set of prepared certificates for messages after n
|
||||
```
|
||||
|
||||
**VIEW-CHANGE**:
|
||||
```
|
||||
{
|
||||
type: "VIEW-CHANGE",
|
||||
view: v, # New view number
|
||||
sequence: n, # Checkpoint sequence
|
||||
checkpoints: C, # Checkpoint certificate
|
||||
prepared: P, # Set of prepared certificates
|
||||
replica: i
|
||||
}
|
||||
signature(replica_i)
|
||||
```
|
||||
|
||||
**New primary (p' = v mod n)**:
|
||||
```
|
||||
on receive 2f VIEW-CHANGE for view v:
|
||||
V = set of valid view-change messages
|
||||
|
||||
# Compute O: set of requests to re-propose
|
||||
O = {}
|
||||
for seq in max_checkpoint_seq(V) to max_seq(V):
|
||||
if exists prepared certificate for seq in V:
|
||||
O[seq] = request from certificate
|
||||
else:
|
||||
O[seq] = null-request # No-op
|
||||
|
||||
broadcast NEW-VIEW(v, V, O)
|
||||
|
||||
# Re-run protocol for requests in O
|
||||
for seq, request in O:
|
||||
if request != null:
|
||||
send PRE-PREPARE(v, seq, hash(request), request)
|
||||
```
|
||||
|
||||
**NEW-VIEW**:
|
||||
```
|
||||
{
|
||||
type: "NEW-VIEW",
|
||||
view: v,
|
||||
view_changes: V, # 2f+1 view-change messages
|
||||
pre_prepares: O # Set of pre-prepare messages
|
||||
}
|
||||
signature(primary)
|
||||
```
|
||||
|
||||
### Checkpointing
|
||||
|
||||
Periodic stable checkpoints to garbage collect logs:
|
||||
|
||||
```
|
||||
every K requests:
|
||||
state_hash = hash(state_machine_state)
|
||||
broadcast CHECKPOINT(n, state_hash, i)
|
||||
|
||||
on receive 2f+1 CHECKPOINT for (n, d):
|
||||
if all digests match:
|
||||
create stable checkpoint
|
||||
h = n # Move low water mark
|
||||
garbage_collect(entries < n)
|
||||
```
|
||||
|
||||
## HotStuff Protocol
|
||||
|
||||
Linear complexity BFT using threshold signatures.
|
||||
|
||||
### Key Innovation
|
||||
|
||||
- **Three-phase**: prepare → pre-commit → commit → decide
|
||||
- **Pipelining**: Next proposal starts before current finishes
|
||||
- **Threshold signatures**: O(n) total messages instead of O(n²)
|
||||
|
||||
### Message Flow
|
||||
|
||||
```
|
||||
Phase 1 (Prepare):
|
||||
Leader: broadcast PREPARE(v, node)
|
||||
Replicas: sign and send partial signature to leader
|
||||
Leader: aggregate into prepare certificate QC
|
||||
|
||||
Phase 2 (Pre-commit):
|
||||
Leader: broadcast PRE-COMMIT(v, QC_prepare)
|
||||
Replicas: sign and send partial signature
|
||||
Leader: aggregate into pre-commit certificate
|
||||
|
||||
Phase 3 (Commit):
|
||||
Leader: broadcast COMMIT(v, QC_precommit)
|
||||
Replicas: sign and send partial signature
|
||||
Leader: aggregate into commit certificate
|
||||
|
||||
Phase 4 (Decide):
|
||||
Leader: broadcast DECIDE(v, QC_commit)
|
||||
Replicas: execute and commit
|
||||
```
|
||||
|
||||
### Pipelining
|
||||
|
||||
```
|
||||
Block k: [prepare] [pre-commit] [commit] [decide]
|
||||
Block k+1: [prepare] [pre-commit] [commit] [decide]
|
||||
Block k+2: [prepare] [pre-commit] [commit] [decide]
|
||||
```
|
||||
|
||||
Each phase of block k+1 piggybacks on messages for block k.
|
||||
|
||||
## Protocol Comparison Matrix
|
||||
|
||||
| Feature | Paxos | Raft | PBFT | HotStuff |
|
||||
|---------|-------|------|------|----------|
|
||||
| Fault model | Crash | Crash | Byzantine | Byzantine |
|
||||
| Fault tolerance | f with 2f+1 | f with 2f+1 | f with 3f+1 | f with 3f+1 |
|
||||
| Message complexity | O(n) | O(n) | O(n²) | O(n) |
|
||||
| Leader required | No (helps) | Yes | Yes | Yes |
|
||||
| Phases | 2 | 2 | 3 | 3 |
|
||||
| View change | Complex | Simple | Complex | Simple |
|
||||
610
.claude/skills/distributed-systems/references/logical-clocks.md
Normal file
610
.claude/skills/distributed-systems/references/logical-clocks.md
Normal file
@@ -0,0 +1,610 @@
|
||||
# Logical Clocks - Implementation Reference
|
||||
|
||||
Detailed implementations and algorithms for causality tracking.
|
||||
|
||||
## Lamport Clock Implementation
|
||||
|
||||
### Data Structure
|
||||
|
||||
```go
|
||||
type LamportClock struct {
|
||||
counter uint64
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewLamportClock() *LamportClock {
|
||||
return &LamportClock{counter: 0}
|
||||
}
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
```go
|
||||
// Tick increments clock for local event
|
||||
func (c *LamportClock) Tick() uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.counter++
|
||||
return c.counter
|
||||
}
|
||||
|
||||
// Send returns timestamp for outgoing message
|
||||
func (c *LamportClock) Send() uint64 {
|
||||
return c.Tick()
|
||||
}
|
||||
|
||||
// Receive updates clock based on incoming message timestamp
|
||||
func (c *LamportClock) Receive(msgTime uint64) uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if msgTime > c.counter {
|
||||
c.counter = msgTime
|
||||
}
|
||||
c.counter++
|
||||
return c.counter
|
||||
}
|
||||
|
||||
// Time returns current clock value without incrementing
|
||||
func (c *LamportClock) Time() uint64 {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.counter
|
||||
}
|
||||
```
|
||||
|
||||
### Usage Example
|
||||
|
||||
```go
|
||||
// Process A
|
||||
clockA := NewLamportClock()
|
||||
e1 := clockA.Tick() // Event 1: time=1
|
||||
msgTime := clockA.Send() // Send: time=2
|
||||
|
||||
// Process B
|
||||
clockB := NewLamportClock()
|
||||
e2 := clockB.Tick() // Event 2: time=1
|
||||
e3 := clockB.Receive(msgTime) // Receive: time=3 (max(1,2)+1)
|
||||
```
|
||||
|
||||
## Vector Clock Implementation
|
||||
|
||||
### Data Structure
|
||||
|
||||
```go
|
||||
type VectorClock struct {
|
||||
clocks map[string]uint64 // processID -> logical time
|
||||
self string // this process's ID
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewVectorClock(processID string, allProcesses []string) *VectorClock {
|
||||
clocks := make(map[string]uint64)
|
||||
for _, p := range allProcesses {
|
||||
clocks[p] = 0
|
||||
}
|
||||
return &VectorClock{
|
||||
clocks: clocks,
|
||||
self: processID,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Operations
|
||||
|
||||
```go
|
||||
// Tick increments own clock
|
||||
func (vc *VectorClock) Tick() map[string]uint64 {
|
||||
vc.mu.Lock()
|
||||
defer vc.mu.Unlock()
|
||||
|
||||
vc.clocks[vc.self]++
|
||||
return vc.copy()
|
||||
}
|
||||
|
||||
// Send returns copy of vector for message
|
||||
func (vc *VectorClock) Send() map[string]uint64 {
|
||||
return vc.Tick()
|
||||
}
|
||||
|
||||
// Receive merges incoming vector and increments
|
||||
func (vc *VectorClock) Receive(incoming map[string]uint64) map[string]uint64 {
|
||||
vc.mu.Lock()
|
||||
defer vc.mu.Unlock()
|
||||
|
||||
// Merge: take max of each component
|
||||
for pid, time := range incoming {
|
||||
if time > vc.clocks[pid] {
|
||||
vc.clocks[pid] = time
|
||||
}
|
||||
}
|
||||
|
||||
// Increment own clock
|
||||
vc.clocks[vc.self]++
|
||||
return vc.copy()
|
||||
}
|
||||
|
||||
// copy returns a copy of the vector
|
||||
func (vc *VectorClock) copy() map[string]uint64 {
|
||||
result := make(map[string]uint64)
|
||||
for k, v := range vc.clocks {
|
||||
result[k] = v
|
||||
}
|
||||
return result
|
||||
}
|
||||
```
|
||||
|
||||
### Comparison Functions
|
||||
|
||||
```go
|
||||
// Compare returns ordering relationship between two vectors
|
||||
type Ordering int
|
||||
|
||||
const (
|
||||
Equal Ordering = iota // V1 == V2
|
||||
HappenedBefore // V1 < V2
|
||||
HappenedAfter // V1 > V2
|
||||
Concurrent // V1 || V2
|
||||
)
|
||||
|
||||
func Compare(v1, v2 map[string]uint64) Ordering {
|
||||
less := false
|
||||
greater := false
|
||||
|
||||
// Get all keys
|
||||
allKeys := make(map[string]bool)
|
||||
for k := range v1 {
|
||||
allKeys[k] = true
|
||||
}
|
||||
for k := range v2 {
|
||||
allKeys[k] = true
|
||||
}
|
||||
|
||||
for k := range allKeys {
|
||||
t1 := v1[k] // 0 if not present
|
||||
t2 := v2[k]
|
||||
|
||||
if t1 < t2 {
|
||||
less = true
|
||||
}
|
||||
if t1 > t2 {
|
||||
greater = true
|
||||
}
|
||||
}
|
||||
|
||||
if !less && !greater {
|
||||
return Equal
|
||||
}
|
||||
if less && !greater {
|
||||
return HappenedBefore
|
||||
}
|
||||
if greater && !less {
|
||||
return HappenedAfter
|
||||
}
|
||||
return Concurrent
|
||||
}
|
||||
|
||||
// IsConcurrent checks if two events are concurrent
|
||||
func IsConcurrent(v1, v2 map[string]uint64) bool {
|
||||
return Compare(v1, v2) == Concurrent
|
||||
}
|
||||
|
||||
// HappenedBefore checks if v1 -> v2 (v1 causally precedes v2)
|
||||
func HappenedBefore(v1, v2 map[string]uint64) bool {
|
||||
return Compare(v1, v2) == HappenedBefore
|
||||
}
|
||||
```
|
||||
|
||||
## Interval Tree Clock Implementation
|
||||
|
||||
### Data Structures
|
||||
|
||||
```go
|
||||
// ID represents the identity tree
|
||||
type ID struct {
|
||||
IsLeaf bool
|
||||
Value int // 0 or 1 for leaves
|
||||
Left *ID // nil for leaves
|
||||
Right *ID
|
||||
}
|
||||
|
||||
// Stamp represents the event tree
|
||||
type Stamp struct {
|
||||
Base int
|
||||
Left *Stamp // nil for leaf stamps
|
||||
Right *Stamp
|
||||
}
|
||||
|
||||
// ITC combines ID and Stamp
|
||||
type ITC struct {
|
||||
ID *ID
|
||||
Stamp *Stamp
|
||||
}
|
||||
```
|
||||
|
||||
### ID Operations
|
||||
|
||||
```go
|
||||
// NewSeedID creates initial full ID (1)
|
||||
func NewSeedID() *ID {
|
||||
return &ID{IsLeaf: true, Value: 1}
|
||||
}
|
||||
|
||||
// Fork splits an ID into two
|
||||
func (id *ID) Fork() (*ID, *ID) {
|
||||
if id.IsLeaf {
|
||||
if id.Value == 0 {
|
||||
// Cannot fork zero ID
|
||||
return &ID{IsLeaf: true, Value: 0},
|
||||
&ID{IsLeaf: true, Value: 0}
|
||||
}
|
||||
// Split full ID into left and right halves
|
||||
return &ID{
|
||||
IsLeaf: false,
|
||||
Left: &ID{IsLeaf: true, Value: 1},
|
||||
Right: &ID{IsLeaf: true, Value: 0},
|
||||
},
|
||||
&ID{
|
||||
IsLeaf: false,
|
||||
Left: &ID{IsLeaf: true, Value: 0},
|
||||
Right: &ID{IsLeaf: true, Value: 1},
|
||||
}
|
||||
}
|
||||
|
||||
// Fork from non-leaf: give half to each
|
||||
if id.Left.IsLeaf && id.Left.Value == 0 {
|
||||
// Left is zero, fork right
|
||||
newRight1, newRight2 := id.Right.Fork()
|
||||
return &ID{IsLeaf: false, Left: id.Left, Right: newRight1},
|
||||
&ID{IsLeaf: false, Left: &ID{IsLeaf: true, Value: 0}, Right: newRight2}
|
||||
}
|
||||
if id.Right.IsLeaf && id.Right.Value == 0 {
|
||||
// Right is zero, fork left
|
||||
newLeft1, newLeft2 := id.Left.Fork()
|
||||
return &ID{IsLeaf: false, Left: newLeft1, Right: id.Right},
|
||||
&ID{IsLeaf: false, Left: newLeft2, Right: &ID{IsLeaf: true, Value: 0}}
|
||||
}
|
||||
|
||||
// Both have IDs, split
|
||||
return &ID{IsLeaf: false, Left: id.Left, Right: &ID{IsLeaf: true, Value: 0}},
|
||||
&ID{IsLeaf: false, Left: &ID{IsLeaf: true, Value: 0}, Right: id.Right}
|
||||
}
|
||||
|
||||
// Join merges two IDs
|
||||
func Join(id1, id2 *ID) *ID {
|
||||
if id1.IsLeaf && id1.Value == 0 {
|
||||
return id2
|
||||
}
|
||||
if id2.IsLeaf && id2.Value == 0 {
|
||||
return id1
|
||||
}
|
||||
if id1.IsLeaf && id2.IsLeaf && id1.Value == 1 && id2.Value == 1 {
|
||||
return &ID{IsLeaf: true, Value: 1}
|
||||
}
|
||||
|
||||
// Normalize to non-leaf
|
||||
left1 := id1.Left
|
||||
right1 := id1.Right
|
||||
left2 := id2.Left
|
||||
right2 := id2.Right
|
||||
|
||||
if id1.IsLeaf {
|
||||
left1 = id1
|
||||
right1 = id1
|
||||
}
|
||||
if id2.IsLeaf {
|
||||
left2 = id2
|
||||
right2 = id2
|
||||
}
|
||||
|
||||
newLeft := Join(left1, left2)
|
||||
newRight := Join(right1, right2)
|
||||
|
||||
return normalize(&ID{IsLeaf: false, Left: newLeft, Right: newRight})
|
||||
}
|
||||
|
||||
func normalize(id *ID) *ID {
|
||||
if !id.IsLeaf {
|
||||
if id.Left.IsLeaf && id.Right.IsLeaf &&
|
||||
id.Left.Value == id.Right.Value {
|
||||
return &ID{IsLeaf: true, Value: id.Left.Value}
|
||||
}
|
||||
}
|
||||
return id
|
||||
}
|
||||
```
|
||||
|
||||
### Stamp Operations
|
||||
|
||||
```go
|
||||
// NewStamp creates initial stamp (0)
|
||||
func NewStamp() *Stamp {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
|
||||
// Event increments the stamp for the given ID
|
||||
func Event(id *ID, stamp *Stamp) *Stamp {
|
||||
if id.IsLeaf {
|
||||
if id.Value == 1 {
|
||||
return &Stamp{Base: stamp.Base + 1}
|
||||
}
|
||||
return stamp // Cannot increment with zero ID
|
||||
}
|
||||
|
||||
// Non-leaf ID: fill where we have ID
|
||||
if id.Left.IsLeaf && id.Left.Value == 1 {
|
||||
// Have left ID, increment left
|
||||
newLeft := Event(&ID{IsLeaf: true, Value: 1}, getLeft(stamp))
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: newLeft,
|
||||
Right: getRight(stamp),
|
||||
})
|
||||
}
|
||||
if id.Right.IsLeaf && id.Right.Value == 1 {
|
||||
newRight := Event(&ID{IsLeaf: true, Value: 1}, getRight(stamp))
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: getLeft(stamp),
|
||||
Right: newRight,
|
||||
})
|
||||
}
|
||||
|
||||
// Both non-zero, choose lower side
|
||||
leftMax := maxStamp(getLeft(stamp))
|
||||
rightMax := maxStamp(getRight(stamp))
|
||||
|
||||
if leftMax <= rightMax {
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: Event(id.Left, getLeft(stamp)),
|
||||
Right: getRight(stamp),
|
||||
})
|
||||
}
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: stamp.Base,
|
||||
Left: getLeft(stamp),
|
||||
Right: Event(id.Right, getRight(stamp)),
|
||||
})
|
||||
}
|
||||
|
||||
func getLeft(s *Stamp) *Stamp {
|
||||
if s.Left == nil {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
return s.Left
|
||||
}
|
||||
|
||||
func getRight(s *Stamp) *Stamp {
|
||||
if s.Right == nil {
|
||||
return &Stamp{Base: 0}
|
||||
}
|
||||
return s.Right
|
||||
}
|
||||
|
||||
func maxStamp(s *Stamp) int {
|
||||
if s.Left == nil && s.Right == nil {
|
||||
return s.Base
|
||||
}
|
||||
left := 0
|
||||
right := 0
|
||||
if s.Left != nil {
|
||||
left = maxStamp(s.Left)
|
||||
}
|
||||
if s.Right != nil {
|
||||
right = maxStamp(s.Right)
|
||||
}
|
||||
max := left
|
||||
if right > max {
|
||||
max = right
|
||||
}
|
||||
return s.Base + max
|
||||
}
|
||||
|
||||
// JoinStamps merges two stamps
|
||||
func JoinStamps(s1, s2 *Stamp) *Stamp {
|
||||
// Take max at each level
|
||||
base := s1.Base
|
||||
if s2.Base > base {
|
||||
base = s2.Base
|
||||
}
|
||||
|
||||
// Adjust for base difference
|
||||
adj1 := s1.Base
|
||||
adj2 := s2.Base
|
||||
|
||||
return normalizeStamp(&Stamp{
|
||||
Base: base,
|
||||
Left: joinStampsRecursive(s1.Left, s2.Left, adj1-base, adj2-base),
|
||||
Right: joinStampsRecursive(s1.Right, s2.Right, adj1-base, adj2-base),
|
||||
})
|
||||
}
|
||||
|
||||
func normalizeStamp(s *Stamp) *Stamp {
|
||||
if s.Left == nil && s.Right == nil {
|
||||
return s
|
||||
}
|
||||
if s.Left != nil && s.Right != nil {
|
||||
if s.Left.Base > 0 && s.Right.Base > 0 {
|
||||
min := s.Left.Base
|
||||
if s.Right.Base < min {
|
||||
min = s.Right.Base
|
||||
}
|
||||
return &Stamp{
|
||||
Base: s.Base + min,
|
||||
Left: &Stamp{Base: s.Left.Base - min, Left: s.Left.Left, Right: s.Left.Right},
|
||||
Right: &Stamp{Base: s.Right.Base - min, Left: s.Right.Left, Right: s.Right.Right},
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
```
|
||||
|
||||
## Hybrid Logical Clock Implementation
|
||||
|
||||
```go
|
||||
type HLC struct {
|
||||
l int64 // logical component (physical time)
|
||||
c int64 // counter
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewHLC() *HLC {
|
||||
return &HLC{l: 0, c: 0}
|
||||
}
|
||||
|
||||
type HLCTimestamp struct {
|
||||
L int64
|
||||
C int64
|
||||
}
|
||||
|
||||
func (hlc *HLC) physicalTime() int64 {
|
||||
return time.Now().UnixNano()
|
||||
}
|
||||
|
||||
// Now returns current HLC timestamp for local/send event
|
||||
func (hlc *HLC) Now() HLCTimestamp {
|
||||
hlc.mu.Lock()
|
||||
defer hlc.mu.Unlock()
|
||||
|
||||
pt := hlc.physicalTime()
|
||||
|
||||
if pt > hlc.l {
|
||||
hlc.l = pt
|
||||
hlc.c = 0
|
||||
} else {
|
||||
hlc.c++
|
||||
}
|
||||
|
||||
return HLCTimestamp{L: hlc.l, C: hlc.c}
|
||||
}
|
||||
|
||||
// Update updates HLC based on received timestamp
|
||||
func (hlc *HLC) Update(received HLCTimestamp) HLCTimestamp {
|
||||
hlc.mu.Lock()
|
||||
defer hlc.mu.Unlock()
|
||||
|
||||
pt := hlc.physicalTime()
|
||||
|
||||
if pt > hlc.l && pt > received.L {
|
||||
hlc.l = pt
|
||||
hlc.c = 0
|
||||
} else if received.L > hlc.l {
|
||||
hlc.l = received.L
|
||||
hlc.c = received.C + 1
|
||||
} else if hlc.l > received.L {
|
||||
hlc.c++
|
||||
} else { // hlc.l == received.L
|
||||
if received.C > hlc.c {
|
||||
hlc.c = received.C + 1
|
||||
} else {
|
||||
hlc.c++
|
||||
}
|
||||
}
|
||||
|
||||
return HLCTimestamp{L: hlc.l, C: hlc.c}
|
||||
}
|
||||
|
||||
// Compare compares two HLC timestamps
|
||||
func (t1 HLCTimestamp) Compare(t2 HLCTimestamp) int {
|
||||
if t1.L < t2.L {
|
||||
return -1
|
||||
}
|
||||
if t1.L > t2.L {
|
||||
return 1
|
||||
}
|
||||
if t1.C < t2.C {
|
||||
return -1
|
||||
}
|
||||
if t1.C > t2.C {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
```
|
||||
|
||||
## Causal Broadcast Implementation
|
||||
|
||||
```go
|
||||
type CausalBroadcast struct {
|
||||
vc *VectorClock
|
||||
pending []PendingMessage
|
||||
deliver func(Message)
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
type PendingMessage struct {
|
||||
Msg Message
|
||||
Timestamp map[string]uint64
|
||||
}
|
||||
|
||||
func NewCausalBroadcast(processID string, processes []string, deliver func(Message)) *CausalBroadcast {
|
||||
return &CausalBroadcast{
|
||||
vc: NewVectorClock(processID, processes),
|
||||
pending: make([]PendingMessage, 0),
|
||||
deliver: deliver,
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast sends a message to all processes
|
||||
func (cb *CausalBroadcast) Broadcast(msg Message) map[string]uint64 {
|
||||
cb.mu.Lock()
|
||||
defer cb.mu.Unlock()
|
||||
|
||||
timestamp := cb.vc.Send()
|
||||
// Actual network broadcast would happen here
|
||||
return timestamp
|
||||
}
|
||||
|
||||
// Receive handles an incoming message
|
||||
func (cb *CausalBroadcast) Receive(msg Message, sender string, timestamp map[string]uint64) {
|
||||
cb.mu.Lock()
|
||||
defer cb.mu.Unlock()
|
||||
|
||||
// Add to pending
|
||||
cb.pending = append(cb.pending, PendingMessage{Msg: msg, Timestamp: timestamp})
|
||||
|
||||
// Try to deliver pending messages
|
||||
cb.tryDeliver()
|
||||
}
|
||||
|
||||
func (cb *CausalBroadcast) tryDeliver() {
|
||||
changed := true
|
||||
for changed {
|
||||
changed = false
|
||||
|
||||
for i, pending := range cb.pending {
|
||||
if cb.canDeliver(pending.Timestamp) {
|
||||
// Deliver message
|
||||
cb.vc.Receive(pending.Timestamp)
|
||||
cb.deliver(pending.Msg)
|
||||
|
||||
// Remove from pending
|
||||
cb.pending = append(cb.pending[:i], cb.pending[i+1:]...)
|
||||
changed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cb *CausalBroadcast) canDeliver(msgVC map[string]uint64) bool {
|
||||
currentVC := cb.vc.clocks
|
||||
|
||||
for pid, msgTime := range msgVC {
|
||||
if pid == cb.vc.self {
|
||||
// Must be next expected from sender
|
||||
if msgTime != currentVC[pid]+1 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
// All other dependencies must be satisfied
|
||||
if msgTime > currentVC[pid] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
```
|
||||
369
.claude/skills/elliptic-curves/SKILL.md
Normal file
369
.claude/skills/elliptic-curves/SKILL.md
Normal file
@@ -0,0 +1,369 @@
|
||||
---
|
||||
name: elliptic-curves
|
||||
description: This skill should be used when working with elliptic curve cryptography, implementing or debugging secp256k1 operations, understanding modular arithmetic and finite fields, or implementing signature schemes like ECDSA and Schnorr. Provides comprehensive knowledge of group theory foundations, curve mathematics, point multiplication algorithms, and cryptographic optimizations.
|
||||
---
|
||||
|
||||
# Elliptic Curve Cryptography
|
||||
|
||||
This skill provides deep knowledge of elliptic curve cryptography (ECC), with particular focus on the secp256k1 curve used in Bitcoin and Nostr, including the mathematical foundations and implementation considerations.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
- Implementing or debugging elliptic curve operations
|
||||
- Working with secp256k1, ECDSA, or Schnorr signatures
|
||||
- Understanding modular arithmetic and finite field operations
|
||||
- Optimizing cryptographic code for performance
|
||||
- Analyzing security properties of curve-based cryptography
|
||||
|
||||
## Mathematical Foundations
|
||||
|
||||
### Groups in Cryptography
|
||||
|
||||
A **group** is a set G with a binary operation (often denoted · or +) satisfying:
|
||||
|
||||
1. **Closure**: For all a, b ∈ G, the result a · b is also in G
|
||||
2. **Associativity**: (a · b) · c = a · (b · c)
|
||||
3. **Identity**: There exists e ∈ G such that e · a = a · e = a
|
||||
4. **Inverse**: For each a ∈ G, there exists a⁻¹ such that a · a⁻¹ = e
|
||||
|
||||
A **cyclic group** is generated by repeatedly applying the operation to a single element (the generator). The **order** of a group is the number of elements.
|
||||
|
||||
**Why groups matter in cryptography**: The discrete logarithm problem—given g and gⁿ, find n—is computationally hard in certain groups, forming the security basis for ECC.
|
||||
|
||||
### Modular Arithmetic
|
||||
|
||||
Modular arithmetic constrains calculations to a finite range [0, p-1] for some modulus p:
|
||||
|
||||
```
|
||||
a ≡ b (mod p) means p divides (a - b)
|
||||
|
||||
Operations:
|
||||
- Addition: (a + b) mod p
|
||||
- Subtraction: (a - b + p) mod p
|
||||
- Multiplication: (a × b) mod p
|
||||
- Inverse: a⁻¹ where (a × a⁻¹) ≡ 1 (mod p)
|
||||
```
|
||||
|
||||
**Computing modular inverse**:
|
||||
- **Fermat's Little Theorem**: If p is prime, a⁻¹ ≡ a^(p-2) (mod p)
|
||||
- **Extended Euclidean Algorithm**: More efficient for general cases
|
||||
- **SafeGCD Algorithm**: Constant-time, used in libsecp256k1
|
||||
|
||||
### Finite Fields (Galois Fields)
|
||||
|
||||
A **finite field** GF(p) or 𝔽ₚ is a field with a finite number of elements where:
|
||||
- p must be prime (or a prime power for extension fields)
|
||||
- All arithmetic operations are defined and produce elements within the field
|
||||
- Every non-zero element has a multiplicative inverse
|
||||
|
||||
For cryptographic curves like secp256k1, the field is 𝔽ₚ where p is a 256-bit prime.
|
||||
|
||||
**Key property**: The non-zero elements of a finite field form a cyclic group under multiplication.
|
||||
|
||||
## Elliptic Curves
|
||||
|
||||
### The Curve Equation
|
||||
|
||||
An elliptic curve over a finite field 𝔽ₚ is defined by the Weierstrass equation:
|
||||
|
||||
```
|
||||
y² = x³ + ax + b (mod p)
|
||||
```
|
||||
|
||||
The curve must satisfy the non-singularity condition: 4a³ + 27b² ≠ 0
|
||||
|
||||
### Points on the Curve
|
||||
|
||||
A point P = (x, y) is on the curve if it satisfies the equation. The set of all points, plus a special "point at infinity" O (the identity element), forms an abelian group.
|
||||
|
||||
### Point Operations
|
||||
|
||||
**Point Addition (P + Q where P ≠ Q)**:
|
||||
```
|
||||
λ = (y₂ - y₁) / (x₂ - x₁) (mod p)
|
||||
x₃ = λ² - x₁ - x₂ (mod p)
|
||||
y₃ = λ(x₁ - x₃) - y₁ (mod p)
|
||||
```
|
||||
|
||||
**Point Doubling (P + P = 2P)**:
|
||||
```
|
||||
λ = (3x₁² + a) / (2y₁) (mod p)
|
||||
x₃ = λ² - 2x₁ (mod p)
|
||||
y₃ = λ(x₁ - x₃) - y₁ (mod p)
|
||||
```
|
||||
|
||||
**Point at Infinity**: Acts as the identity element; P + O = P for all P.
|
||||
|
||||
**Point Negation**: -P = (x, -y) = (x, p - y)
|
||||
|
||||
## The secp256k1 Curve
|
||||
|
||||
### Parameters
|
||||
|
||||
secp256k1 is defined by SECG (Standards for Efficient Cryptography Group):
|
||||
|
||||
```
|
||||
Curve equation: y² = x³ + 7 (a = 0, b = 7)
|
||||
|
||||
Prime modulus p:
|
||||
0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F
|
||||
= 2²⁵⁶ - 2³² - 977
|
||||
|
||||
Group order n:
|
||||
0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE BAAEDCE6 AF48A03B BFD25E8C D0364141
|
||||
|
||||
Generator point G:
|
||||
Gx = 0x79BE667E F9DCBBAC 55A06295 CE870B07 029BFCDB 2DCE28D9 59F2815B 16F81798
|
||||
Gy = 0x483ADA77 26A3C465 5DA4FBFC 0E1108A8 FD17B448 A6855419 9C47D08F FB10D4B8
|
||||
|
||||
Cofactor h = 1
|
||||
```
|
||||
|
||||
### Why secp256k1?
|
||||
|
||||
1. **Koblitz curve**: a = 0 enables faster computation (no ax term)
|
||||
2. **Special prime**: p = 2²⁵⁶ - 2³² - 977 allows efficient modular reduction
|
||||
3. **Deterministic construction**: Not randomly generated, reducing backdoor concerns
|
||||
4. **~30% faster** than random curves when fully optimized
|
||||
|
||||
### Efficient Modular Reduction
|
||||
|
||||
The special form of p enables fast reduction without general division:
|
||||
|
||||
```
|
||||
For p = 2²⁵⁶ - 2³² - 977:
|
||||
To reduce a 512-bit number c = c_high × 2²⁵⁶ + c_low:
|
||||
c ≡ c_low + c_high × 2³² + c_high × 977 (mod p)
|
||||
```
|
||||
|
||||
## Point Multiplication Algorithms
|
||||
|
||||
Scalar multiplication kP (computing P + P + ... + P, k times) is the core operation.
|
||||
|
||||
### Double-and-Add (Binary Method)
|
||||
|
||||
```
|
||||
Input: k (scalar), P (point)
|
||||
Output: kP
|
||||
|
||||
R = O (point at infinity)
|
||||
for i from bit_length(k)-1 down to 0:
|
||||
R = 2R # Point doubling
|
||||
if bit i of k is 1:
|
||||
R = R + P # Point addition
|
||||
return R
|
||||
```
|
||||
|
||||
**Complexity**: O(log k) point operations
|
||||
**Vulnerability**: Timing side-channels (different branches for 0/1 bits)
|
||||
|
||||
### Montgomery Ladder
|
||||
|
||||
Constant-time algorithm that performs the same operations regardless of bit values:
|
||||
|
||||
```
|
||||
Input: k (scalar), P (point)
|
||||
Output: kP
|
||||
|
||||
R0 = O
|
||||
R1 = P
|
||||
for i from bit_length(k)-1 down to 0:
|
||||
if bit i of k is 0:
|
||||
R1 = R0 + R1
|
||||
R0 = 2R0
|
||||
else:
|
||||
R0 = R0 + R1
|
||||
R1 = 2R1
|
||||
return R0
|
||||
```
|
||||
|
||||
**Advantage**: Resistant to simple power analysis and timing attacks.
|
||||
|
||||
### Window Methods (w-NAF)
|
||||
|
||||
Precompute small multiples of P, then process w bits at a time:
|
||||
|
||||
```
|
||||
w-NAF representation reduces additions by ~1/3 compared to binary
|
||||
Precomputation table: [P, 3P, 5P, 7P, ...] for w=4
|
||||
```
|
||||
|
||||
### Endomorphism Optimization (GLV Method)
|
||||
|
||||
secp256k1 has an efficiently computable endomorphism φ where:
|
||||
```
|
||||
φ(x, y) = (βx, y) where β³ ≡ 1 (mod p)
|
||||
φ(P) = λP where λ³ ≡ 1 (mod n)
|
||||
```
|
||||
|
||||
This allows splitting scalar k into k₁ + k₂λ with smaller k₁, k₂, reducing operations by ~33-50%.
|
||||
|
||||
### Multi-Scalar Multiplication (Strauss-Shamir)
|
||||
|
||||
For computing k₁P₁ + k₂P₂ (common in signature verification):
|
||||
|
||||
```
|
||||
Process both scalars simultaneously, combining operations
|
||||
Reduces work compared to separate multiplications
|
||||
```
|
||||
|
||||
## Coordinate Systems
|
||||
|
||||
### Affine Coordinates
|
||||
|
||||
Standard (x, y) representation. Requires modular inversion for each operation.
|
||||
|
||||
### Projective Coordinates
|
||||
|
||||
Represent (X:Y:Z) where x = X/Z, y = Y/Z:
|
||||
- Avoids inversions during intermediate computations
|
||||
- Only one inversion at the end to convert back to affine
|
||||
|
||||
### Jacobian Coordinates
|
||||
|
||||
Represent (X:Y:Z) where x = X/Z², y = Y/Z³:
|
||||
- Fastest for point doubling
|
||||
- Used extensively in libsecp256k1
|
||||
|
||||
### López-Dahab Coordinates
|
||||
|
||||
For curves over GF(2ⁿ), optimized for binary field arithmetic.
|
||||
|
||||
## Signature Schemes
|
||||
|
||||
### ECDSA (Elliptic Curve Digital Signature Algorithm)
|
||||
|
||||
**Key Generation**:
|
||||
```
|
||||
Private key: d (random integer in [1, n-1])
|
||||
Public key: Q = dG
|
||||
```
|
||||
|
||||
**Signing message m**:
|
||||
```
|
||||
1. Hash: e = H(m) truncated to curve order bit length
|
||||
2. Random: k ∈ [1, n-1]
|
||||
3. Compute: (x, y) = kG
|
||||
4. Calculate: r = x mod n (if r = 0, restart with new k)
|
||||
5. Calculate: s = k⁻¹(e + rd) mod n (if s = 0, restart)
|
||||
6. Signature: (r, s)
|
||||
```
|
||||
|
||||
**Verification of signature (r, s) on message m**:
|
||||
```
|
||||
1. Check: r, s ∈ [1, n-1]
|
||||
2. Hash: e = H(m)
|
||||
3. Compute: w = s⁻¹ mod n
|
||||
4. Compute: u₁ = ew mod n, u₂ = rw mod n
|
||||
5. Compute: (x, y) = u₁G + u₂Q
|
||||
6. Valid if: r ≡ x (mod n)
|
||||
```
|
||||
|
||||
**Security considerations**:
|
||||
- k MUST be unique per signature (reuse leaks private key)
|
||||
- Use RFC 6979 for deterministic k derivation
|
||||
|
||||
### Schnorr Signatures (BIP-340)
|
||||
|
||||
Simpler, more efficient, with provable security.
|
||||
|
||||
**Signing message m**:
|
||||
```
|
||||
1. Random: k ∈ [1, n-1]
|
||||
2. Compute: R = kG
|
||||
3. Challenge: e = H(R || Q || m)
|
||||
4. Response: s = k + ed mod n
|
||||
5. Signature: (R, s) or (r_x, s) where r_x is x-coordinate of R
|
||||
```
|
||||
|
||||
**Verification**:
|
||||
```
|
||||
1. Compute: e = H(R || Q || m)
|
||||
2. Check: sG = R + eQ
|
||||
```
|
||||
|
||||
**Advantages over ECDSA**:
|
||||
- Linear: enables signature aggregation (MuSig)
|
||||
- Simpler verification (no modular inverse)
|
||||
- Batch verification support
|
||||
- Provably secure in Random Oracle Model
|
||||
|
||||
## Implementation Considerations
|
||||
|
||||
### Constant-Time Operations
|
||||
|
||||
To prevent timing attacks:
|
||||
- Avoid branches dependent on secret data
|
||||
- Use constant-time comparison functions
|
||||
- Mask operations to hide data-dependent timing
|
||||
|
||||
```go
|
||||
// BAD: Timing leak
|
||||
if secretBit == 1 {
|
||||
doOperation()
|
||||
}
|
||||
|
||||
// GOOD: Constant-time conditional
|
||||
result = conditionalSelect(secretBit, value1, value0)
|
||||
```
|
||||
|
||||
### Memory Safety
|
||||
|
||||
- Zeroize sensitive data after use
|
||||
- Avoid leaving secrets in registers or cache
|
||||
- Use secure memory allocation when available
|
||||
|
||||
### Side-Channel Protections
|
||||
|
||||
- **Timing attacks**: Use constant-time algorithms
|
||||
- **Power analysis**: Montgomery ladder, point blinding
|
||||
- **Cache attacks**: Avoid table lookups indexed by secrets
|
||||
|
||||
### Random Number Generation
|
||||
|
||||
- Use cryptographically secure RNG for k in ECDSA
|
||||
- Consider deterministic k (RFC 6979) for reproducibility
|
||||
- Validate output is in valid range [1, n-1]
|
||||
|
||||
## libsecp256k1 Optimizations
|
||||
|
||||
The Bitcoin Core library includes:
|
||||
|
||||
1. **Field arithmetic**: 5×52-bit limbs for 64-bit platforms
|
||||
2. **Scalar arithmetic**: 4×64-bit representation
|
||||
3. **Endomorphism**: GLV decomposition enabled by default
|
||||
4. **Batch inversion**: Amortizes expensive inversions
|
||||
5. **SafeGCD**: Constant-time modular inverse
|
||||
6. **Precomputed tables**: For generator point multiplications
|
||||
|
||||
## Security Properties
|
||||
|
||||
### Discrete Logarithm Problem (DLP)
|
||||
|
||||
Given P and Q = kP, finding k is computationally infeasible.
|
||||
|
||||
**Best known attacks**:
|
||||
- Generic: Baby-step Giant-step, Pollard's rho: O(√n) operations
|
||||
- For secp256k1: ~2¹²⁸ operations (128-bit security)
|
||||
|
||||
### Curve Security Criteria
|
||||
|
||||
- Large prime order subgroup
|
||||
- Cofactor 1 (no small subgroup attacks)
|
||||
- Resistant to MOV attack (embedding degree)
|
||||
- Not anomalous (n ≠ p)
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. **k reuse in ECDSA**: Immediately leaks private key
|
||||
2. **Weak random k**: Partially leaks key over multiple signatures
|
||||
3. **Invalid curve points**: Validate points are on curve
|
||||
4. **Small subgroup attacks**: Check point order (cofactor = 1 helps)
|
||||
5. **Timing leaks**: Non-constant-time scalar multiplication
|
||||
|
||||
## References
|
||||
|
||||
For detailed implementations, see:
|
||||
- `references/secp256k1-parameters.md` - Full curve parameters
|
||||
- `references/algorithms.md` - Detailed algorithm pseudocode
|
||||
- `references/security.md` - Security analysis and attack vectors
|
||||
513
.claude/skills/elliptic-curves/references/algorithms.md
Normal file
513
.claude/skills/elliptic-curves/references/algorithms.md
Normal file
@@ -0,0 +1,513 @@
|
||||
# Elliptic Curve Algorithms
|
||||
|
||||
Detailed pseudocode for core elliptic curve operations.
|
||||
|
||||
## Field Arithmetic
|
||||
|
||||
### Modular Addition
|
||||
|
||||
```
|
||||
function mod_add(a, b, p):
|
||||
result = a + b
|
||||
if result >= p:
|
||||
result = result - p
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Subtraction
|
||||
|
||||
```
|
||||
function mod_sub(a, b, p):
|
||||
if a >= b:
|
||||
return a - b
|
||||
else:
|
||||
return p - b + a
|
||||
```
|
||||
|
||||
### Modular Multiplication
|
||||
|
||||
For general case:
|
||||
```
|
||||
function mod_mul(a, b, p):
|
||||
return (a * b) mod p
|
||||
```
|
||||
|
||||
For secp256k1 optimized (Barrett reduction):
|
||||
```
|
||||
function mod_mul_secp256k1(a, b):
|
||||
# Compute full 512-bit product
|
||||
product = a * b
|
||||
|
||||
# Split into high and low 256-bit parts
|
||||
low = product & ((1 << 256) - 1)
|
||||
high = product >> 256
|
||||
|
||||
# Reduce: result ≡ low + high * (2³² + 977) (mod p)
|
||||
result = low + high * (1 << 32) + high * 977
|
||||
|
||||
# May need additional reduction
|
||||
while result >= p:
|
||||
result = result - p
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Inverse
|
||||
|
||||
**Extended Euclidean Algorithm**:
|
||||
```
|
||||
function mod_inverse(a, p):
|
||||
if a == 0:
|
||||
error "No inverse exists for 0"
|
||||
|
||||
old_r, r = p, a
|
||||
old_s, s = 0, 1
|
||||
|
||||
while r != 0:
|
||||
quotient = old_r / r
|
||||
old_r, r = r, old_r - quotient * r
|
||||
old_s, s = s, old_s - quotient * s
|
||||
|
||||
if old_r != 1:
|
||||
error "No inverse exists"
|
||||
|
||||
if old_s < 0:
|
||||
old_s = old_s + p
|
||||
|
||||
return old_s
|
||||
```
|
||||
|
||||
**Fermat's Little Theorem** (for prime p):
|
||||
```
|
||||
function mod_inverse_fermat(a, p):
|
||||
return mod_exp(a, p - 2, p)
|
||||
```
|
||||
|
||||
### Modular Exponentiation (Square-and-Multiply)
|
||||
|
||||
```
|
||||
function mod_exp(base, exp, p):
|
||||
result = 1
|
||||
base = base mod p
|
||||
|
||||
while exp > 0:
|
||||
if exp & 1: # exp is odd
|
||||
result = (result * base) mod p
|
||||
exp = exp >> 1
|
||||
base = (base * base) mod p
|
||||
|
||||
return result
|
||||
```
|
||||
|
||||
### Modular Square Root (Tonelli-Shanks)
|
||||
|
||||
For secp256k1 where p ≡ 3 (mod 4):
|
||||
```
|
||||
function mod_sqrt(a, p):
|
||||
# For p ≡ 3 (mod 4), sqrt(a) = a^((p+1)/4)
|
||||
return mod_exp(a, (p + 1) / 4, p)
|
||||
```
|
||||
|
||||
## Point Operations
|
||||
|
||||
### Point Validation
|
||||
|
||||
```
|
||||
function is_on_curve(P, a, b, p):
|
||||
if P is infinity:
|
||||
return true
|
||||
|
||||
x, y = P
|
||||
left = (y * y) mod p
|
||||
right = (x * x * x + a * x + b) mod p
|
||||
|
||||
return left == right
|
||||
```
|
||||
|
||||
### Point Addition (Affine Coordinates)
|
||||
|
||||
```
|
||||
function point_add(P, Q, a, p):
|
||||
if P is infinity:
|
||||
return Q
|
||||
if Q is infinity:
|
||||
return P
|
||||
|
||||
x1, y1 = P
|
||||
x2, y2 = Q
|
||||
|
||||
if x1 == x2:
|
||||
if y1 == mod_neg(y2, p): # P = -Q
|
||||
return infinity
|
||||
else: # P == Q
|
||||
return point_double(P, a, p)
|
||||
|
||||
# λ = (y2 - y1) / (x2 - x1)
|
||||
numerator = mod_sub(y2, y1, p)
|
||||
denominator = mod_sub(x2, x1, p)
|
||||
λ = mod_mul(numerator, mod_inverse(denominator, p), p)
|
||||
|
||||
# x3 = λ² - x1 - x2
|
||||
x3 = mod_sub(mod_sub(mod_mul(λ, λ, p), x1, p), x2, p)
|
||||
|
||||
# y3 = λ(x1 - x3) - y1
|
||||
y3 = mod_sub(mod_mul(λ, mod_sub(x1, x3, p), p), y1, p)
|
||||
|
||||
return (x3, y3)
|
||||
```
|
||||
|
||||
### Point Doubling (Affine Coordinates)
|
||||
|
||||
```
|
||||
function point_double(P, a, p):
|
||||
if P is infinity:
|
||||
return infinity
|
||||
|
||||
x, y = P
|
||||
|
||||
if y == 0:
|
||||
return infinity
|
||||
|
||||
# λ = (3x² + a) / (2y)
|
||||
numerator = mod_add(mod_mul(3, mod_mul(x, x, p), p), a, p)
|
||||
denominator = mod_mul(2, y, p)
|
||||
λ = mod_mul(numerator, mod_inverse(denominator, p), p)
|
||||
|
||||
# x3 = λ² - 2x
|
||||
x3 = mod_sub(mod_mul(λ, λ, p), mod_mul(2, x, p), p)
|
||||
|
||||
# y3 = λ(x - x3) - y
|
||||
y3 = mod_sub(mod_mul(λ, mod_sub(x, x3, p), p), y, p)
|
||||
|
||||
return (x3, y3)
|
||||
```
|
||||
|
||||
### Point Negation
|
||||
|
||||
```
|
||||
function point_negate(P, p):
|
||||
if P is infinity:
|
||||
return infinity
|
||||
|
||||
x, y = P
|
||||
return (x, p - y)
|
||||
```
|
||||
|
||||
## Scalar Multiplication
|
||||
|
||||
### Double-and-Add (Left-to-Right)
|
||||
|
||||
```
|
||||
function scalar_mult_double_add(k, P, a, p):
|
||||
if k == 0 or P is infinity:
|
||||
return infinity
|
||||
|
||||
if k < 0:
|
||||
k = -k
|
||||
P = point_negate(P, p)
|
||||
|
||||
R = infinity
|
||||
bits = binary_representation(k) # MSB first
|
||||
|
||||
for bit in bits:
|
||||
R = point_double(R, a, p)
|
||||
if bit == 1:
|
||||
R = point_add(R, P, a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
### Montgomery Ladder (Constant-Time)
|
||||
|
||||
```
|
||||
function scalar_mult_montgomery(k, P, a, p):
|
||||
R0 = infinity
|
||||
R1 = P
|
||||
|
||||
bits = binary_representation(k) # MSB first
|
||||
|
||||
for bit in bits:
|
||||
if bit == 0:
|
||||
R1 = point_add(R0, R1, a, p)
|
||||
R0 = point_double(R0, a, p)
|
||||
else:
|
||||
R0 = point_add(R0, R1, a, p)
|
||||
R1 = point_double(R1, a, p)
|
||||
|
||||
return R0
|
||||
```
|
||||
|
||||
### w-NAF Scalar Multiplication
|
||||
|
||||
```
|
||||
function compute_wNAF(k, w):
|
||||
# Convert scalar to width-w Non-Adjacent Form
|
||||
naf = []
|
||||
|
||||
while k > 0:
|
||||
if k & 1: # k is odd
|
||||
# Get w-bit window
|
||||
digit = k mod (1 << w)
|
||||
if digit >= (1 << (w-1)):
|
||||
digit = digit - (1 << w)
|
||||
naf.append(digit)
|
||||
k = k - digit
|
||||
else:
|
||||
naf.append(0)
|
||||
k = k >> 1
|
||||
|
||||
return naf
|
||||
|
||||
function scalar_mult_wNAF(k, P, w, a, p):
|
||||
# Precompute odd multiples: [P, 3P, 5P, ..., (2^(w-1)-1)P]
|
||||
precomp = [P]
|
||||
P2 = point_double(P, a, p)
|
||||
for i in range(1, 1 << (w-1)):
|
||||
precomp.append(point_add(precomp[-1], P2, a, p))
|
||||
|
||||
# Convert k to w-NAF
|
||||
naf = compute_wNAF(k, w)
|
||||
|
||||
# Compute scalar multiplication
|
||||
R = infinity
|
||||
for i in range(len(naf) - 1, -1, -1):
|
||||
R = point_double(R, a, p)
|
||||
digit = naf[i]
|
||||
if digit > 0:
|
||||
R = point_add(R, precomp[(digit - 1) / 2], a, p)
|
||||
elif digit < 0:
|
||||
R = point_add(R, point_negate(precomp[(-digit - 1) / 2], p), a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
### Shamir's Trick (Multi-Scalar)
|
||||
|
||||
For computing k₁P + k₂Q efficiently:
|
||||
|
||||
```
|
||||
function multi_scalar_mult(k1, P, k2, Q, a, p):
|
||||
# Precompute P + Q
|
||||
PQ = point_add(P, Q, a, p)
|
||||
|
||||
# Get binary representations (same length, padded)
|
||||
bits1 = binary_representation(k1)
|
||||
bits2 = binary_representation(k2)
|
||||
max_len = max(len(bits1), len(bits2))
|
||||
bits1 = pad_left(bits1, max_len)
|
||||
bits2 = pad_left(bits2, max_len)
|
||||
|
||||
R = infinity
|
||||
|
||||
for i in range(max_len):
|
||||
R = point_double(R, a, p)
|
||||
|
||||
b1, b2 = bits1[i], bits2[i]
|
||||
|
||||
if b1 == 1 and b2 == 1:
|
||||
R = point_add(R, PQ, a, p)
|
||||
elif b1 == 1:
|
||||
R = point_add(R, P, a, p)
|
||||
elif b2 == 1:
|
||||
R = point_add(R, Q, a, p)
|
||||
|
||||
return R
|
||||
```
|
||||
|
||||
## Jacobian Coordinates
|
||||
|
||||
More efficient for repeated operations.
|
||||
|
||||
### Conversion
|
||||
|
||||
```
|
||||
# Affine to Jacobian
|
||||
function affine_to_jacobian(P):
|
||||
if P is infinity:
|
||||
return (1, 1, 0) # Jacobian infinity
|
||||
x, y = P
|
||||
return (x, y, 1)
|
||||
|
||||
# Jacobian to Affine
|
||||
function jacobian_to_affine(P, p):
|
||||
X, Y, Z = P
|
||||
if Z == 0:
|
||||
return infinity
|
||||
|
||||
Z_inv = mod_inverse(Z, p)
|
||||
Z_inv2 = mod_mul(Z_inv, Z_inv, p)
|
||||
Z_inv3 = mod_mul(Z_inv2, Z_inv, p)
|
||||
|
||||
x = mod_mul(X, Z_inv2, p)
|
||||
y = mod_mul(Y, Z_inv3, p)
|
||||
|
||||
return (x, y)
|
||||
```
|
||||
|
||||
### Point Doubling (Jacobian)
|
||||
|
||||
For curve y² = x³ + 7 (a = 0):
|
||||
|
||||
```
|
||||
function jacobian_double(P, p):
|
||||
X, Y, Z = P
|
||||
|
||||
if Y == 0:
|
||||
return (1, 1, 0) # infinity
|
||||
|
||||
# For a = 0: M = 3*X²
|
||||
S = mod_mul(4, mod_mul(X, mod_mul(Y, Y, p), p), p)
|
||||
M = mod_mul(3, mod_mul(X, X, p), p)
|
||||
|
||||
X3 = mod_sub(mod_mul(M, M, p), mod_mul(2, S, p), p)
|
||||
Y3 = mod_sub(mod_mul(M, mod_sub(S, X3, p), p),
|
||||
mod_mul(8, mod_mul(Y, Y, mod_mul(Y, Y, p), p), p), p)
|
||||
Z3 = mod_mul(2, mod_mul(Y, Z, p), p)
|
||||
|
||||
return (X3, Y3, Z3)
|
||||
```
|
||||
|
||||
### Point Addition (Jacobian + Affine)
|
||||
|
||||
Mixed addition is faster when one point is in affine:
|
||||
|
||||
```
|
||||
function jacobian_add_affine(P, Q, p):
|
||||
# P in Jacobian (X1, Y1, Z1), Q in affine (x2, y2)
|
||||
X1, Y1, Z1 = P
|
||||
x2, y2 = Q
|
||||
|
||||
if Z1 == 0:
|
||||
return affine_to_jacobian(Q)
|
||||
|
||||
Z1Z1 = mod_mul(Z1, Z1, p)
|
||||
U2 = mod_mul(x2, Z1Z1, p)
|
||||
S2 = mod_mul(y2, mod_mul(Z1, Z1Z1, p), p)
|
||||
|
||||
H = mod_sub(U2, X1, p)
|
||||
HH = mod_mul(H, H, p)
|
||||
I = mod_mul(4, HH, p)
|
||||
J = mod_mul(H, I, p)
|
||||
r = mod_mul(2, mod_sub(S2, Y1, p), p)
|
||||
V = mod_mul(X1, I, p)
|
||||
|
||||
X3 = mod_sub(mod_sub(mod_mul(r, r, p), J, p), mod_mul(2, V, p), p)
|
||||
Y3 = mod_sub(mod_mul(r, mod_sub(V, X3, p), p), mod_mul(2, mod_mul(Y1, J, p), p), p)
|
||||
Z3 = mod_mul(mod_sub(mod_mul(mod_add(Z1, H, p), mod_add(Z1, H, p), p),
|
||||
mod_add(Z1Z1, HH, p), p), 1, p)
|
||||
|
||||
return (X3, Y3, Z3)
|
||||
```
|
||||
|
||||
## GLV Endomorphism (secp256k1)
|
||||
|
||||
### Scalar Decomposition
|
||||
|
||||
```
|
||||
# Constants for secp256k1
|
||||
LAMBDA = 0x5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72
|
||||
BETA = 0x7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE
|
||||
|
||||
# Decomposition coefficients
|
||||
A1 = 0x3086D221A7D46BCDE86C90E49284EB15
|
||||
B1 = 0x114CA50F7A8E2F3F657C1108D9D44CFD8
|
||||
A2 = 0xE4437ED6010E88286F547FA90ABFE4C3
|
||||
B2 = A1
|
||||
|
||||
function glv_decompose(k, n):
|
||||
# Compute c1 = round(b2 * k / n)
|
||||
# Compute c2 = round(-b1 * k / n)
|
||||
c1 = (B2 * k + n // 2) // n
|
||||
c2 = (-B1 * k + n // 2) // n
|
||||
|
||||
# k1 = k - c1*A1 - c2*A2
|
||||
# k2 = -c1*B1 - c2*B2
|
||||
k1 = k - c1 * A1 - c2 * A2
|
||||
k2 = -c1 * B1 - c2 * B2
|
||||
|
||||
return (k1, k2)
|
||||
|
||||
function glv_scalar_mult(k, P, p, n):
|
||||
k1, k2 = glv_decompose(k, n)
|
||||
|
||||
# Compute endomorphism: φ(P) = (β*x, y)
|
||||
x, y = P
|
||||
phi_P = (mod_mul(BETA, x, p), y)
|
||||
|
||||
# Use Shamir's trick: k1*P + k2*φ(P)
|
||||
return multi_scalar_mult(k1, P, k2, phi_P, 0, p)
|
||||
```
|
||||
|
||||
## Batch Inversion
|
||||
|
||||
Amortize expensive inversions over multiple points:
|
||||
|
||||
```
|
||||
function batch_invert(values, p):
|
||||
n = len(values)
|
||||
if n == 0:
|
||||
return []
|
||||
|
||||
# Compute cumulative products
|
||||
products = [values[0]]
|
||||
for i in range(1, n):
|
||||
products.append(mod_mul(products[-1], values[i], p))
|
||||
|
||||
# Invert the final product
|
||||
inv = mod_inverse(products[-1], p)
|
||||
|
||||
# Compute individual inverses
|
||||
inverses = [0] * n
|
||||
for i in range(n - 1, 0, -1):
|
||||
inverses[i] = mod_mul(inv, products[i - 1], p)
|
||||
inv = mod_mul(inv, values[i], p)
|
||||
inverses[0] = inv
|
||||
|
||||
return inverses
|
||||
```
|
||||
|
||||
## Key Generation
|
||||
|
||||
```
|
||||
function generate_keypair(G, n, p):
|
||||
# Generate random private key
|
||||
d = random_integer(1, n - 1)
|
||||
|
||||
# Compute public key
|
||||
Q = scalar_mult(d, G)
|
||||
|
||||
return (d, Q)
|
||||
```
|
||||
|
||||
## Point Compression/Decompression
|
||||
|
||||
```
|
||||
function compress_point(P, p):
|
||||
if P is infinity:
|
||||
return bytes([0x00])
|
||||
|
||||
x, y = P
|
||||
prefix = 0x02 if (y % 2 == 0) else 0x03
|
||||
return bytes([prefix]) + x.to_bytes(32, 'big')
|
||||
|
||||
function decompress_point(compressed, a, b, p):
|
||||
prefix = compressed[0]
|
||||
|
||||
if prefix == 0x00:
|
||||
return infinity
|
||||
|
||||
x = int.from_bytes(compressed[1:], 'big')
|
||||
|
||||
# Compute y² = x³ + ax + b
|
||||
y_squared = mod_add(mod_add(mod_mul(x, mod_mul(x, x, p), p),
|
||||
mod_mul(a, x, p), p), b, p)
|
||||
|
||||
# Compute y = sqrt(y²)
|
||||
y = mod_sqrt(y_squared, p)
|
||||
|
||||
# Select correct y based on prefix
|
||||
if (prefix == 0x02) != (y % 2 == 0):
|
||||
y = p - y
|
||||
|
||||
return (x, y)
|
||||
```
|
||||
@@ -0,0 +1,194 @@
|
||||
# secp256k1 Complete Parameters
|
||||
|
||||
## Curve Definition
|
||||
|
||||
**Name**: secp256k1 (Standards for Efficient Cryptography, prime field, 256-bit, Koblitz curve #1)
|
||||
|
||||
**Equation**: y² = x³ + 7 (mod p)
|
||||
|
||||
This is the short Weierstrass form with coefficients a = 0, b = 7.
|
||||
|
||||
## Field Parameters
|
||||
|
||||
### Prime Modulus p
|
||||
|
||||
```
|
||||
Decimal:
|
||||
115792089237316195423570985008687907853269984665640564039457584007908834671663
|
||||
|
||||
Hexadecimal:
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
|
||||
|
||||
Binary representation:
|
||||
2²⁵⁶ - 2³² - 2⁹ - 2⁸ - 2⁷ - 2⁶ - 2⁴ - 1
|
||||
= 2²⁵⁶ - 2³² - 977
|
||||
```
|
||||
|
||||
**Special form benefits**:
|
||||
- Efficient modular reduction using: c mod p = c_low + c_high × (2³² + 977)
|
||||
- Near-Mersenne prime enables fast arithmetic
|
||||
|
||||
### Group Order n
|
||||
|
||||
```
|
||||
Decimal:
|
||||
115792089237316195423570985008687907852837564279074904382605163141518161494337
|
||||
|
||||
Hexadecimal:
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
|
||||
```
|
||||
|
||||
The number of points on the curve, including the point at infinity.
|
||||
|
||||
### Cofactor h
|
||||
|
||||
```
|
||||
h = 1
|
||||
```
|
||||
|
||||
Cofactor 1 means the group order n equals the curve order, simplifying security analysis and eliminating small subgroup attacks.
|
||||
|
||||
## Generator Point G
|
||||
|
||||
### Compressed Form
|
||||
|
||||
```
|
||||
02 79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
```
|
||||
|
||||
The 02 prefix indicates the y-coordinate is even.
|
||||
|
||||
### Uncompressed Form
|
||||
|
||||
```
|
||||
04 79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
|
||||
```
|
||||
|
||||
### Individual Coordinates
|
||||
|
||||
**Gx**:
|
||||
```
|
||||
Decimal:
|
||||
55066263022277343669578718895168534326250603453777594175500187360389116729240
|
||||
|
||||
Hexadecimal:
|
||||
0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
|
||||
```
|
||||
|
||||
**Gy**:
|
||||
```
|
||||
Decimal:
|
||||
32670510020758816978083085130507043184471273380659243275938904335757337482424
|
||||
|
||||
Hexadecimal:
|
||||
0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
|
||||
```
|
||||
|
||||
## Endomorphism Parameters
|
||||
|
||||
secp256k1 has an efficiently computable endomorphism φ: (x, y) → (βx, y).
|
||||
|
||||
### β (Beta)
|
||||
|
||||
```
|
||||
Hexadecimal:
|
||||
0x7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE
|
||||
|
||||
Property: β³ ≡ 1 (mod p)
|
||||
```
|
||||
|
||||
### λ (Lambda)
|
||||
|
||||
```
|
||||
Hexadecimal:
|
||||
0x5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72
|
||||
|
||||
Property: λ³ ≡ 1 (mod n)
|
||||
Relationship: φ(P) = λP for all points P
|
||||
```
|
||||
|
||||
### GLV Decomposition Constants
|
||||
|
||||
For splitting scalar k into k₁ + k₂λ:
|
||||
|
||||
```
|
||||
a₁ = 0x3086D221A7D46BCDE86C90E49284EB15
|
||||
b₁ = -0xE4437ED6010E88286F547FA90ABFE4C3
|
||||
a₂ = 0x114CA50F7A8E2F3F657C1108D9D44CFD8
|
||||
b₂ = a₁
|
||||
```
|
||||
|
||||
## Derived Constants
|
||||
|
||||
### Field Characteristics
|
||||
|
||||
```
|
||||
(p + 1) / 4 = 0x3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFBFFFFF0C
|
||||
Used for computing modular square roots via Tonelli-Shanks shortcut
|
||||
```
|
||||
|
||||
### Order Characteristics
|
||||
|
||||
```
|
||||
(n - 1) / 2 = 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0
|
||||
Used in low-S normalization for ECDSA signatures
|
||||
```
|
||||
|
||||
## Validation Formulas
|
||||
|
||||
### Point on Curve Check
|
||||
|
||||
For point (x, y), verify:
|
||||
```
|
||||
y² ≡ x³ + 7 (mod p)
|
||||
```
|
||||
|
||||
### Generator Verification
|
||||
|
||||
Verify G is on curve:
|
||||
```
|
||||
Gy² mod p = 0x9C47D08FFB10D4B8 ... (truncated for display)
|
||||
Gx³ + 7 mod p = same value
|
||||
```
|
||||
|
||||
### Order Verification
|
||||
|
||||
Verify nG = O (point at infinity):
|
||||
```
|
||||
Computing n × G should yield the identity element
|
||||
```
|
||||
|
||||
## Bit Lengths
|
||||
|
||||
| Parameter | Bits | Bytes |
|
||||
|-----------|------|-------|
|
||||
| p (prime) | 256 | 32 |
|
||||
| n (order) | 256 | 32 |
|
||||
| Private key | 256 | 32 |
|
||||
| Public key (compressed) | 257 | 33 |
|
||||
| Public key (uncompressed) | 513 | 65 |
|
||||
| ECDSA signature | 512 | 64 |
|
||||
| Schnorr signature | 512 | 64 |
|
||||
|
||||
## Security Level
|
||||
|
||||
- **Equivalent symmetric key strength**: 128 bits
|
||||
- **Best known attack complexity**: ~2¹²⁸ operations (Pollard's rho)
|
||||
- **Safe until**: Quantum computers with ~1500+ logical qubits
|
||||
|
||||
## ASN.1 OID
|
||||
|
||||
```
|
||||
1.3.132.0.10
|
||||
iso(1) identified-organization(3) certicom(132) curve(0) secp256k1(10)
|
||||
```
|
||||
|
||||
## Comparison with Other Curves
|
||||
|
||||
| Curve | Field Size | Security | Speed | Use Case |
|
||||
|-------|------------|----------|-------|----------|
|
||||
| secp256k1 | 256-bit | 128-bit | Fast (Koblitz) | Bitcoin, Nostr |
|
||||
| secp256r1 (P-256) | 256-bit | 128-bit | Moderate | TLS, general |
|
||||
| Curve25519 | 255-bit | ~128-bit | Very fast | Modern crypto |
|
||||
| secp384r1 (P-384) | 384-bit | 192-bit | Slower | High security |
|
||||
291
.claude/skills/elliptic-curves/references/security.md
Normal file
291
.claude/skills/elliptic-curves/references/security.md
Normal file
@@ -0,0 +1,291 @@
|
||||
# Elliptic Curve Security Analysis
|
||||
|
||||
Security properties, attack vectors, and mitigations for elliptic curve cryptography.
|
||||
|
||||
## The Discrete Logarithm Problem (ECDLP)
|
||||
|
||||
### Definition
|
||||
|
||||
Given points P and Q = kP on an elliptic curve, find the scalar k.
|
||||
|
||||
**Security assumption**: For properly chosen curves, this problem is computationally infeasible.
|
||||
|
||||
### Best Known Attacks
|
||||
|
||||
#### Generic Attacks (Work on Any Group)
|
||||
|
||||
| Attack | Complexity | Notes |
|
||||
|--------|------------|-------|
|
||||
| Baby-step Giant-step | O(√n) space and time | Requires √n storage |
|
||||
| Pollard's rho | O(√n) time, O(1) space | Practical for large groups |
|
||||
| Pollard's lambda | O(√n) | When k is in known range |
|
||||
| Pohlig-Hellman | O(√p) where p is largest prime factor | Exploits factorization of n |
|
||||
|
||||
For secp256k1 (n ≈ 2²⁵⁶):
|
||||
- Generic attack complexity: ~2¹²⁸ operations
|
||||
- Equivalent to 128-bit symmetric security
|
||||
|
||||
#### Curve-Specific Attacks
|
||||
|
||||
| Attack | Applicable When | Mitigation |
|
||||
|--------|-----------------|------------|
|
||||
| MOV/FR reduction | Low embedding degree | Use curves with high embedding degree |
|
||||
| Anomalous curve attack | n = p | Ensure n ≠ p |
|
||||
| GHS attack | Extension field curves | Use prime field curves |
|
||||
|
||||
**secp256k1 is immune to all known curve-specific attacks**.
|
||||
|
||||
## Side-Channel Attacks
|
||||
|
||||
### Timing Attacks
|
||||
|
||||
**Vulnerability**: Execution time varies based on secret data.
|
||||
|
||||
**Examples**:
|
||||
- Conditional branches on secret bits
|
||||
- Early exit conditions
|
||||
- Variable-time modular operations
|
||||
|
||||
**Mitigations**:
|
||||
- Constant-time algorithms (Montgomery ladder)
|
||||
- Fixed execution paths
|
||||
- Dummy operations to equalize timing
|
||||
|
||||
### Power Analysis
|
||||
|
||||
**Simple Power Analysis (SPA)**: Single trace reveals operations.
|
||||
- Double-and-add visible as different power signatures
|
||||
- Mitigation: Montgomery ladder (uniform operations)
|
||||
|
||||
**Differential Power Analysis (DPA)**: Statistical analysis of many traces.
|
||||
- Mitigation: Point blinding, scalar blinding
|
||||
|
||||
### Cache Attacks
|
||||
|
||||
**FLUSH+RELOAD Attack**:
|
||||
```
|
||||
1. Attacker flushes cache line containing lookup table
|
||||
2. Victim performs table lookup based on secret
|
||||
3. Attacker measures reload time to determine which entry was accessed
|
||||
```
|
||||
|
||||
**Mitigations**:
|
||||
- Avoid secret-dependent table lookups
|
||||
- Use constant-time table access patterns
|
||||
- Scatter tables to prevent cache line sharing
|
||||
|
||||
### Electromagnetic (EM) Attacks
|
||||
|
||||
Similar to power analysis but captures electromagnetic emissions.
|
||||
|
||||
**Mitigations**:
|
||||
- Shielding
|
||||
- Same algorithmic protections as power analysis
|
||||
|
||||
## Implementation Vulnerabilities
|
||||
|
||||
### k-Reuse in ECDSA
|
||||
|
||||
**The Sony PS3 Hack (2010)**:
|
||||
|
||||
If the same k is used for two signatures (r₁, s₁) and (r₂, s₂) on messages m₁ and m₂:
|
||||
|
||||
```
|
||||
s₁ = k⁻¹(e₁ + rd) mod n
|
||||
s₂ = k⁻¹(e₂ + rd) mod n
|
||||
|
||||
Since k is the same:
|
||||
s₁ - s₂ = k⁻¹(e₁ - e₂) mod n
|
||||
k = (e₁ - e₂)(s₁ - s₂)⁻¹ mod n
|
||||
|
||||
Once k is known:
|
||||
d = (s₁k - e₁)r⁻¹ mod n
|
||||
```
|
||||
|
||||
**Mitigation**: Use deterministic k (RFC 6979).
|
||||
|
||||
### Weak Random k
|
||||
|
||||
Even with unique k values, if the RNG is biased:
|
||||
- Lattice-based attacks can recover private key
|
||||
- Only ~1% bias in k can be exploitable with enough signatures
|
||||
|
||||
**Mitigations**:
|
||||
- Use cryptographically secure RNG
|
||||
- Use deterministic k (RFC 6979)
|
||||
- Verify k is in valid range [1, n-1]
|
||||
|
||||
### Invalid Curve Attacks
|
||||
|
||||
**Attack**: Attacker provides point not on the curve.
|
||||
- Point may be on a weaker curve
|
||||
- Operations may leak information
|
||||
|
||||
**Mitigation**: Always validate points are on curve:
|
||||
```
|
||||
Verify: y² ≡ x³ + ax + b (mod p)
|
||||
```
|
||||
|
||||
### Small Subgroup Attacks
|
||||
|
||||
**Attack**: If cofactor h > 1, points of small order exist.
|
||||
- Attacker sends point of small order
|
||||
- Response reveals private key mod (small order)
|
||||
|
||||
**Mitigation**:
|
||||
- Use curves with cofactor 1 (secp256k1 has h = 1)
|
||||
- Multiply received points by cofactor
|
||||
- Validate point order
|
||||
|
||||
### Fault Attacks
|
||||
|
||||
**Attack**: Induce computational errors (voltage glitches, radiation).
|
||||
- Corrupted intermediate values may leak information
|
||||
- Differential fault analysis can recover keys
|
||||
|
||||
**Mitigations**:
|
||||
- Redundant computations with comparison
|
||||
- Verify final results
|
||||
- Hardware protections
|
||||
|
||||
## Signature Malleability
|
||||
|
||||
### ECDSA Malleability
|
||||
|
||||
Given valid signature (r, s), signature (r, n - s) is also valid for the same message.
|
||||
|
||||
**Impact**: Transaction ID malleability (historical Bitcoin issue)
|
||||
|
||||
**Mitigation**: Enforce low-S normalization:
|
||||
```
|
||||
if s > n/2:
|
||||
s = n - s
|
||||
```
|
||||
|
||||
### Schnorr Non-Malleability
|
||||
|
||||
BIP-340 Schnorr signatures are non-malleable by design:
|
||||
- Use x-only public keys
|
||||
- Deterministic nonce derivation
|
||||
|
||||
## Quantum Threats
|
||||
|
||||
### Shor's Algorithm
|
||||
|
||||
**Threat**: Polynomial-time discrete log on quantum computers.
|
||||
- Requires ~1500-2000 logical qubits for secp256k1
|
||||
- Current quantum computers: <100 noisy qubits
|
||||
|
||||
**Timeline**: Estimated 10-20+ years for cryptographically relevant quantum computers.
|
||||
|
||||
### Migration Strategy
|
||||
|
||||
1. **Monitor**: Track quantum computing progress
|
||||
2. **Prepare**: Develop post-quantum alternatives
|
||||
3. **Hybrid**: Use classical + post-quantum in transition
|
||||
4. **Migrate**: Full transition when necessary
|
||||
|
||||
### Post-Quantum Alternatives
|
||||
|
||||
- Lattice-based signatures (CRYSTALS-Dilithium)
|
||||
- Hash-based signatures (SPHINCS+)
|
||||
- Code-based cryptography
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Key Generation
|
||||
|
||||
```
|
||||
DO:
|
||||
- Use cryptographically secure RNG
|
||||
- Validate private key is in [1, n-1]
|
||||
- Verify public key is on curve
|
||||
- Verify public key is not point at infinity
|
||||
|
||||
DON'T:
|
||||
- Use predictable seeds
|
||||
- Use truncated random values
|
||||
- Skip validation
|
||||
```
|
||||
|
||||
### Signature Generation
|
||||
|
||||
```
|
||||
DO:
|
||||
- Use RFC 6979 for deterministic k
|
||||
- Validate all inputs
|
||||
- Use constant-time operations
|
||||
- Clear sensitive memory after use
|
||||
|
||||
DON'T:
|
||||
- Reuse k values
|
||||
- Use weak/biased RNG
|
||||
- Skip low-S normalization (ECDSA)
|
||||
```
|
||||
|
||||
### Signature Verification
|
||||
|
||||
```
|
||||
DO:
|
||||
- Validate r, s are in [1, n-1]
|
||||
- Validate public key is on curve
|
||||
- Validate public key is not infinity
|
||||
- Use batch verification when possible
|
||||
|
||||
DON'T:
|
||||
- Skip any validation steps
|
||||
- Accept malformed signatures
|
||||
```
|
||||
|
||||
### Public Key Handling
|
||||
|
||||
```
|
||||
DO:
|
||||
- Validate received points are on curve
|
||||
- Check point is not infinity
|
||||
- Prefer compressed format for storage
|
||||
|
||||
DON'T:
|
||||
- Accept unvalidated points
|
||||
- Skip curve membership check
|
||||
```
|
||||
|
||||
## Security Checklist
|
||||
|
||||
### Implementation Review
|
||||
|
||||
- [ ] All scalar multiplications are constant-time
|
||||
- [ ] No secret-dependent branches
|
||||
- [ ] No secret-indexed table lookups
|
||||
- [ ] Memory is zeroized after use
|
||||
- [ ] Random k uses CSPRNG or RFC 6979
|
||||
- [ ] All received points are validated
|
||||
- [ ] Private keys are in valid range
|
||||
- [ ] Signatures use low-S normalization
|
||||
|
||||
### Operational Security
|
||||
|
||||
- [ ] Private keys stored securely (HSM, secure enclave)
|
||||
- [ ] Key derivation uses proper KDF
|
||||
- [ ] Backups are encrypted
|
||||
- [ ] Key rotation policy exists
|
||||
- [ ] Audit logging enabled
|
||||
- [ ] Incident response plan exists
|
||||
|
||||
## Security Levels Comparison
|
||||
|
||||
| Curve | Bits | Symmetric Equivalent | RSA Equivalent |
|
||||
|-------|------|---------------------|----------------|
|
||||
| secp192r1 | 192 | 96 | 1536 |
|
||||
| secp224r1 | 224 | 112 | 2048 |
|
||||
| secp256k1 | 256 | 128 | 3072 |
|
||||
| secp384r1 | 384 | 192 | 7680 |
|
||||
| secp521r1 | 521 | 256 | 15360 |
|
||||
|
||||
## References
|
||||
|
||||
- NIST SP 800-57: Recommendation for Key Management
|
||||
- SEC 1: Elliptic Curve Cryptography
|
||||
- RFC 6979: Deterministic Usage of DSA and ECDSA
|
||||
- BIP-340: Schnorr Signatures for secp256k1
|
||||
- SafeCurves: Choosing Safe Curves for Elliptic-Curve Cryptography
|
||||
@@ -82,6 +82,49 @@ func (f *File) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
```
|
||||
|
||||
### Interface Design - CRITICAL RULES
|
||||
|
||||
**Rule 1: Define interfaces in a dedicated package (e.g., `pkg/interfaces/<name>/`)**
|
||||
- Interfaces provide isolation between packages and enable dependency inversion
|
||||
- Keeping interfaces in a dedicated package prevents circular dependencies
|
||||
- Each interface package should be minimal (just the interface, no implementations)
|
||||
|
||||
**Rule 2: NEVER use type assertions with interface literals**
|
||||
- **NEVER** write `.(interface{ Method() Type })` - this is non-idiomatic and unmaintainable
|
||||
- Interface literals cannot be documented, tested for satisfaction, or reused
|
||||
|
||||
```go
|
||||
// BAD - interface literal in type assertion (NEVER DO THIS)
|
||||
if checker, ok := obj.(interface{ Check() bool }); ok {
|
||||
checker.Check()
|
||||
}
|
||||
|
||||
// GOOD - use defined interface from dedicated package
|
||||
import "myproject/pkg/interfaces/checker"
|
||||
|
||||
if c, ok := obj.(checker.Checker); ok {
|
||||
c.Check()
|
||||
}
|
||||
```
|
||||
|
||||
**Rule 3: Resolving Circular Dependencies**
|
||||
- If a circular dependency occurs, move the interface to `pkg/interfaces/`
|
||||
- The implementing type stays in its original package
|
||||
- The consuming code imports only the interface package
|
||||
- Pattern:
|
||||
```
|
||||
pkg/interfaces/foo/ <- interface definition (no dependencies)
|
||||
↑ ↑
|
||||
pkg/bar/ pkg/baz/
|
||||
(implements) (consumes via interface)
|
||||
```
|
||||
|
||||
**Rule 4: Verify interface satisfaction at compile time**
|
||||
```go
|
||||
// Add this line to ensure *MyType implements MyInterface
|
||||
var _ MyInterface = (*MyType)(nil)
|
||||
```
|
||||
|
||||
### Concurrency
|
||||
|
||||
Use goroutines and channels for concurrent programming:
|
||||
@@ -178,6 +221,26 @@ For detailed information, consult the reference files:
|
||||
- Start comments with the name being described
|
||||
- Use godoc format
|
||||
|
||||
6. **Configuration - CRITICAL**
|
||||
- **NEVER** use `os.Getenv()` scattered throughout packages
|
||||
- **ALWAYS** centralize environment variable parsing in a single config package (e.g., `app/config/`)
|
||||
- Pass configuration via structs, not by reading environment directly
|
||||
- This ensures discoverability, documentation, and testability of all config options
|
||||
|
||||
7. **Constants - CRITICAL**
|
||||
- **ALWAYS** define named constants for values used more than a few times
|
||||
- **ALWAYS** define named constants if multiple packages depend on the same value
|
||||
- Constants shared across packages belong in a dedicated package (e.g., `pkg/constants/`)
|
||||
- Magic numbers and strings are forbidden
|
||||
```go
|
||||
// BAD - magic number
|
||||
if size > 1024 {
|
||||
|
||||
// GOOD - named constant
|
||||
const MaxBufferSize = 1024
|
||||
if size > MaxBufferSize {
|
||||
```
|
||||
|
||||
## Common Commands
|
||||
|
||||
```bash
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -10,8 +10,6 @@
|
||||
# Especially these
|
||||
.vscode/
|
||||
**/.vscode/
|
||||
node_modules/
|
||||
**/node_modules/
|
||||
/test*
|
||||
.idea/
|
||||
# and others
|
||||
@@ -98,6 +96,10 @@ cmd/benchmark/data
|
||||
# Re-ignore IDE directories (must come after !*/)
|
||||
.idea/
|
||||
**/.idea/
|
||||
|
||||
# Re-ignore node_modules everywhere (must come after !*/)
|
||||
node_modules/
|
||||
**/node_modules/
|
||||
/blocklist.json
|
||||
/gui/gui/main.wasm
|
||||
/gui/gui/index.html
|
||||
@@ -105,7 +107,6 @@ pkg/database/testrealy
|
||||
/ctxproxy.config.yml
|
||||
cmd/benchmark/external/**
|
||||
private*
|
||||
pkg/protocol/directory-client/node_modules
|
||||
|
||||
# Build outputs
|
||||
build/orly-*
|
||||
|
||||
69
CLAUDE.md
69
CLAUDE.md
@@ -346,10 +346,11 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
4. Events stored via `database.SaveEvent()`
|
||||
5. Active subscriptions notified via `publishers.Publish()`
|
||||
|
||||
**Configuration System:**
|
||||
**Configuration System - CRITICAL RULES:**
|
||||
- Uses `go-simpler.org/env` for struct tags
|
||||
- **IMPORTANT: ALL environment variables MUST be defined in `app/config/config.go`**
|
||||
- Never use `os.Getenv()` directly in packages - always pass config via structs
|
||||
- **ALL environment variables MUST be defined in `app/config/config.go`**
|
||||
- **NEVER** use `os.Getenv()` directly in packages - always pass config via structs
|
||||
- **NEVER** parse environment variables outside of `app/config/`
|
||||
- This ensures all config options appear in `./orly help` output
|
||||
- Database backends receive config via `database.DatabaseConfig` struct
|
||||
- Use `GetDatabaseConfigValues()` helper to extract DB config from app config
|
||||
@@ -358,6 +359,21 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
- Default data directory: `~/.local/share/ORLY`
|
||||
- Database-specific config (Neo4j, DGraph, Badger) is passed via `DatabaseConfig` struct in `pkg/database/factory.go`
|
||||
|
||||
**Constants - CRITICAL RULES:**
|
||||
- **ALWAYS** define named constants for values used more than a few times
|
||||
- **ALWAYS** define named constants if multiple packages depend on the same value
|
||||
- Constants shared across packages should be in a dedicated package (e.g., `pkg/constants/`)
|
||||
- Magic numbers and strings are forbidden - use named constants with clear documentation
|
||||
- Example:
|
||||
```go
|
||||
// BAD - magic number
|
||||
if timeout > 30 {
|
||||
|
||||
// GOOD - named constant
|
||||
const DefaultTimeoutSeconds = 30
|
||||
if timeout > DefaultTimeoutSeconds {
|
||||
```
|
||||
|
||||
**Event Publishing:**
|
||||
- `pkg/protocol/publish/` manages publisher registry
|
||||
- Each WebSocket connection registers its subscriptions
|
||||
@@ -394,6 +410,53 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
```
|
||||
- This optimization saves memory and enables faster comparisons in the database layer
|
||||
|
||||
**Interface Design - CRITICAL RULES:**
|
||||
|
||||
**Rule 1: ALL interfaces MUST be defined in `pkg/interfaces/<name>/`**
|
||||
- Interfaces provide isolation between packages and enable dependency inversion
|
||||
- Keeping interfaces in a dedicated package prevents circular dependencies
|
||||
- Each interface package should be minimal (just the interface, no implementations)
|
||||
|
||||
**Rule 2: NEVER use type assertions with interface literals**
|
||||
- **NEVER** write `.(interface{ Method() Type })` - this is non-idiomatic and unmaintainable
|
||||
- Interface literals cannot be documented, tested for satisfaction, or reused
|
||||
- Example of WRONG approach:
|
||||
```go
|
||||
// BAD - interface literal in type assertion
|
||||
if checker, ok := obj.(interface{ Check() bool }); ok {
|
||||
checker.Check()
|
||||
}
|
||||
```
|
||||
- Example of CORRECT approach:
|
||||
```go
|
||||
// GOOD - use defined interface from pkg/interfaces/
|
||||
import "next.orly.dev/pkg/interfaces/checker"
|
||||
|
||||
if c, ok := obj.(checker.Checker); ok {
|
||||
c.Check()
|
||||
}
|
||||
```
|
||||
|
||||
**Rule 3: Resolving Circular Dependencies**
|
||||
- If a circular dependency occurs when adding an interface, move the interface to `pkg/interfaces/`
|
||||
- The implementing type stays in its original package
|
||||
- The consuming code imports only the interface package
|
||||
- This pattern:
|
||||
```
|
||||
pkg/interfaces/foo/ <- interface definition (no dependencies)
|
||||
↑ ↑
|
||||
pkg/bar/ pkg/baz/
|
||||
(implements) (consumes via interface)
|
||||
```
|
||||
|
||||
**Existing interfaces in `pkg/interfaces/`:**
|
||||
- `acl/` - ACL and PolicyChecker interfaces
|
||||
- `neterr/` - TimeoutError interface for network errors
|
||||
- `resultiter/` - Neo4jResultIterator for database results
|
||||
- `store/` - Storage-related interfaces
|
||||
- `publisher/` - Event publishing interfaces
|
||||
- `typer/` - Type identification interface
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### Making Changes to Web UI
|
||||
|
||||
@@ -233,6 +233,21 @@ func ServeRequested() (requested bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// VersionRequested checks if the first command line argument is "version" and returns
|
||||
// whether the version should be printed and the program should exit.
|
||||
//
|
||||
// Return Values
|
||||
// - requested: true if the 'version' subcommand was provided, false otherwise.
|
||||
func VersionRequested() (requested bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "version", "-v", "--v", "-version", "--version":
|
||||
requested = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KV is a key/value pair.
|
||||
type KV struct{ Key, Value string }
|
||||
|
||||
@@ -364,7 +379,7 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
)
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
`Usage: %s [env|help|identity|serve]
|
||||
`Usage: %s [env|help|identity|serve|version]
|
||||
|
||||
- env: print environment variables configuring %s
|
||||
- help: print this help text
|
||||
@@ -372,6 +387,7 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
- serve: start ephemeral relay with RAM-based storage at /dev/shm/orlyserve
|
||||
listening on 0.0.0.0:10547 with 'none' ACL mode (open relay)
|
||||
useful for testing and benchmarking
|
||||
- version: print version and exit (also: -v, --v, -version, --version)
|
||||
|
||||
`,
|
||||
cfg.AppName, cfg.AppName,
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"next.orly.dev/pkg/interfaces/neterr"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -90,7 +91,7 @@ func main() {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if netErr, ok := err.(interface{ Timeout() bool }); ok && netErr.Timeout() {
|
||||
if netErr, ok := err.(neterr.TimeoutError); ok && netErr.Timeout() {
|
||||
continue
|
||||
}
|
||||
log.Printf("Read error: %v", err)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"next.orly.dev/pkg/interfaces/neterr"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -123,7 +124,7 @@ func main() {
|
||||
}
|
||||
|
||||
// Check for timeout errors (these are expected during idle periods)
|
||||
if netErr, ok := err.(interface{ Timeout() bool }); ok && netErr.Timeout() {
|
||||
if netErr, ok := err.(neterr.TimeoutError); ok && netErr.Timeout() {
|
||||
consecutiveTimeouts++
|
||||
if consecutiveTimeouts >= maxConsecutiveTimeouts {
|
||||
log.Printf("Too many consecutive read timeouts (%d), connection may be dead", consecutiveTimeouts)
|
||||
|
||||
2
go.mod
2
go.mod
@@ -30,6 +30,7 @@ require (
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 // indirect
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
||||
github.com/bytedance/sonic v1.13.1 // indirect
|
||||
@@ -50,6 +51,7 @@ require (
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/flatbuffers v25.9.23+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/hack-pad/safejs v0.1.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
|
||||
4
go.sum
4
go.sum
@@ -8,6 +8,8 @@ github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNN
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3/go.mod h1:we0YA5CsBbH5+/NUzC/AlMmxaDtWlXeNsqrwXjTzmzA=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3 h1:DfquIk9YEZjWD/lJyBWZWGCtRga43/a96bx0Ulv9VhQ=
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3/go.mod h1:JV1XngOCCui7zrMSyRz+Wvz00nUSfotRKZqJzWpl5fQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
@@ -99,6 +101,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hack-pad/safejs v0.1.1 h1:d5qPO0iQ7h2oVtpzGnLExE+Wn9AtytxIfltcS2b9KD8=
|
||||
github.com/hack-pad/safejs v0.1.1/go.mod h1:HdS+bKF1NrE72VoXZeWzxFOVQVUSqZJAG0xNCnb+Tio=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
|
||||
7
main.go
7
main.go
@@ -31,6 +31,13 @@ import (
|
||||
func main() {
|
||||
runtime.GOMAXPROCS(128)
|
||||
debug.SetGCPercent(10)
|
||||
|
||||
// Handle 'version' subcommand early, before any other initialization
|
||||
if config.VersionRequested() {
|
||||
fmt.Println(version.V)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var err error
|
||||
var cfg *config.C
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
|
||||
@@ -2,20 +2,20 @@ package acl
|
||||
|
||||
import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"next.orly.dev/pkg/interfaces/acl"
|
||||
acliface "next.orly.dev/pkg/interfaces/acl"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
var Registry = &S{}
|
||||
|
||||
type S struct {
|
||||
ACL []acl.I
|
||||
ACL []acliface.I
|
||||
Active atomic.String
|
||||
}
|
||||
|
||||
type A struct{ S }
|
||||
|
||||
func (s *S) Register(i acl.I) {
|
||||
func (s *S) Register(i acliface.I) {
|
||||
(*s).ACL = append((*s).ACL, i)
|
||||
}
|
||||
|
||||
@@ -85,9 +85,7 @@ func (s *S) CheckPolicy(ev *event.E) (allowed bool, err error) {
|
||||
for _, i := range s.ACL {
|
||||
if i.Type() == s.Active.Load() {
|
||||
// Check if the ACL implementation has a CheckPolicy method
|
||||
if policyChecker, ok := i.(interface {
|
||||
CheckPolicy(ev *event.E) (allowed bool, err error)
|
||||
}); ok {
|
||||
if policyChecker, ok := i.(acliface.PolicyChecker); ok {
|
||||
return policyChecker.CheckPolicy(ev)
|
||||
}
|
||||
// If no CheckPolicy method, default to allowing
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -72,8 +74,14 @@ func NewDatabaseWithConfig(
|
||||
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
|
||||
}
|
||||
return newNeo4jDatabase(ctx, cancel, cfg)
|
||||
case "wasmdb", "indexeddb", "wasm":
|
||||
// Use the wasmdb implementation (IndexedDB backend for WebAssembly)
|
||||
if newWasmDBDatabase == nil {
|
||||
return nil, fmt.Errorf("wasmdb database backend not available (import _ \"next.orly.dev/pkg/wasmdb\")")
|
||||
}
|
||||
return newWasmDBDatabase(ctx, cancel, cfg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j)", dbType)
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j, wasmdb)", dbType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -96,3 +104,13 @@ var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig)
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newNeo4jDatabase = factory
|
||||
}
|
||||
|
||||
// newWasmDBDatabase creates a wasmdb database instance (IndexedDB backend for WebAssembly)
|
||||
// This is defined here to avoid import cycles
|
||||
var newWasmDBDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterWasmDBFactory registers the wasmdb database factory
|
||||
// This is called from the wasmdb package's init() function
|
||||
func RegisterWasmDBFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newWasmDBDatabase = factory
|
||||
}
|
||||
|
||||
115
pkg/database/factory_wasm.go
Normal file
115
pkg/database/factory_wasm.go
Normal file
@@ -0,0 +1,115 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DatabaseConfig holds all database configuration options that can be passed
|
||||
// to any database backend. Each backend uses the relevant fields for its type.
|
||||
// This centralizes configuration instead of having each backend read env vars directly.
|
||||
type DatabaseConfig struct {
|
||||
// Common settings for all backends
|
||||
DataDir string
|
||||
LogLevel string
|
||||
|
||||
// Badger-specific settings (not available in WASM)
|
||||
BlockCacheMB int // ORLY_DB_BLOCK_CACHE_MB
|
||||
IndexCacheMB int // ORLY_DB_INDEX_CACHE_MB
|
||||
QueryCacheSizeMB int // ORLY_QUERY_CACHE_SIZE_MB
|
||||
QueryCacheMaxAge time.Duration // ORLY_QUERY_CACHE_MAX_AGE
|
||||
InlineEventThreshold int // ORLY_INLINE_EVENT_THRESHOLD
|
||||
|
||||
// DGraph-specific settings
|
||||
DgraphURL string // ORLY_DGRAPH_URL
|
||||
|
||||
// Neo4j-specific settings
|
||||
Neo4jURI string // ORLY_NEO4J_URI
|
||||
Neo4jUser string // ORLY_NEO4J_USER
|
||||
Neo4jPassword string // ORLY_NEO4J_PASSWORD
|
||||
}
|
||||
|
||||
// NewDatabase creates a database instance based on the specified type.
|
||||
// Supported types in WASM: "wasmdb", "dgraph", "neo4j"
|
||||
// Note: "badger" is not available in WASM builds due to filesystem dependencies
|
||||
func NewDatabase(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
) (Database, error) {
|
||||
// Create a default config for backward compatibility with existing callers
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewDatabaseWithConfig(ctx, cancel, dbType, cfg)
|
||||
}
|
||||
|
||||
// NewDatabaseWithConfig creates a database instance with full configuration.
|
||||
// This is the preferred method when you have access to the app config.
|
||||
func NewDatabaseWithConfig(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
cfg *DatabaseConfig,
|
||||
) (Database, error) {
|
||||
switch strings.ToLower(dbType) {
|
||||
case "wasmdb", "indexeddb", "wasm", "badger", "":
|
||||
// In WASM builds, default to wasmdb (IndexedDB backend)
|
||||
// "badger" is mapped to wasmdb since Badger is not available
|
||||
if newWasmDBDatabase == nil {
|
||||
return nil, fmt.Errorf("wasmdb database backend not available (import _ \"next.orly.dev/pkg/wasmdb\")")
|
||||
}
|
||||
return newWasmDBDatabase(ctx, cancel, cfg)
|
||||
case "dgraph":
|
||||
// Use the dgraph implementation (HTTP-based, works in WASM)
|
||||
if newDgraphDatabase == nil {
|
||||
return nil, fmt.Errorf("dgraph database backend not available (import _ \"next.orly.dev/pkg/dgraph\")")
|
||||
}
|
||||
return newDgraphDatabase(ctx, cancel, cfg)
|
||||
case "neo4j":
|
||||
// Use the neo4j implementation (HTTP-based, works in WASM)
|
||||
if newNeo4jDatabase == nil {
|
||||
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
|
||||
}
|
||||
return newNeo4jDatabase(ctx, cancel, cfg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported in WASM: wasmdb, dgraph, neo4j)", dbType)
|
||||
}
|
||||
}
|
||||
|
||||
// newDgraphDatabase creates a dgraph database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newDgraphDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterDgraphFactory registers the dgraph database factory
|
||||
// This is called from the dgraph package's init() function
|
||||
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newDgraphDatabase = factory
|
||||
}
|
||||
|
||||
// newNeo4jDatabase creates a neo4j database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterNeo4jFactory registers the neo4j database factory
|
||||
// This is called from the neo4j package's init() function
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newNeo4jDatabase = factory
|
||||
}
|
||||
|
||||
// newWasmDBDatabase creates a wasmdb database instance (IndexedDB backend for WebAssembly)
|
||||
// This is defined here to avoid import cycles
|
||||
var newWasmDBDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterWasmDBFactory registers the wasmdb database factory
|
||||
// This is called from the wasmdb package's init() function
|
||||
func RegisterWasmDBFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newWasmDBDatabase = factory
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
// Package database provides shared import utilities for events
|
||||
package database
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -11,13 +13,6 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
)
|
||||
|
||||
// NIP43Membership represents membership metadata for NIP-43
|
||||
type NIP43Membership struct {
|
||||
Pubkey []byte
|
||||
AddedAt time.Time
|
||||
InviteCode string
|
||||
}
|
||||
|
||||
// Database key prefixes for NIP-43
|
||||
const (
|
||||
nip43MemberPrefix = "nip43:member:"
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package database
|
||||
|
||||
import (
|
||||
@@ -12,13 +14,6 @@ import (
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
)
|
||||
|
||||
type Subscription struct {
|
||||
TrialEnd time.Time `json:"trial_end"`
|
||||
PaidUntil time.Time `json:"paid_until"`
|
||||
BlossomLevel string `json:"blossom_level,omitempty"` // Service level name (e.g., "basic", "premium")
|
||||
BlossomStorage int64 `json:"blossom_storage,omitempty"` // Storage quota in MB
|
||||
}
|
||||
|
||||
func (d *D) GetSubscription(pubkey []byte) (*Subscription, error) {
|
||||
key := fmt.Sprintf("sub:%s", hex.EncodeToString(pubkey))
|
||||
var sub *Subscription
|
||||
@@ -122,13 +117,6 @@ func (d *D) ExtendSubscription(pubkey []byte, days int) error {
|
||||
)
|
||||
}
|
||||
|
||||
type Payment struct {
|
||||
Amount int64 `json:"amount"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Invoice string `json:"invoice"`
|
||||
Preimage string `json:"preimage"`
|
||||
}
|
||||
|
||||
func (d *D) RecordPayment(
|
||||
pubkey []byte, amount int64, invoice, preimage string,
|
||||
) error {
|
||||
|
||||
26
pkg/database/types.go
Normal file
26
pkg/database/types.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package database
|
||||
|
||||
import "time"
|
||||
|
||||
// Subscription represents a user's subscription status
|
||||
type Subscription struct {
|
||||
TrialEnd time.Time `json:"trial_end"`
|
||||
PaidUntil time.Time `json:"paid_until"`
|
||||
BlossomLevel string `json:"blossom_level,omitempty"` // Service level name (e.g., "basic", "premium")
|
||||
BlossomStorage int64 `json:"blossom_storage,omitempty"` // Storage quota in MB
|
||||
}
|
||||
|
||||
// Payment represents a recorded payment
|
||||
type Payment struct {
|
||||
Amount int64 `json:"amount"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Invoice string `json:"invoice"`
|
||||
Preimage string `json:"preimage"`
|
||||
}
|
||||
|
||||
// NIP43Membership represents membership metadata for NIP-43
|
||||
type NIP43Membership struct {
|
||||
Pubkey []byte
|
||||
AddedAt time.Time
|
||||
InviteCode string
|
||||
}
|
||||
@@ -2,6 +2,7 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"next.orly.dev/pkg/interfaces/typer"
|
||||
)
|
||||
|
||||
@@ -31,3 +32,9 @@ type I interface {
|
||||
Syncer()
|
||||
typer.T
|
||||
}
|
||||
|
||||
// PolicyChecker is an optional interface that ACL implementations can implement
|
||||
// to provide custom event policy checking beyond basic access level checks.
|
||||
type PolicyChecker interface {
|
||||
CheckPolicy(ev *event.E) (allowed bool, err error)
|
||||
}
|
||||
|
||||
8
pkg/interfaces/neterr/neterr.go
Normal file
8
pkg/interfaces/neterr/neterr.go
Normal file
@@ -0,0 +1,8 @@
|
||||
// Package neterr defines interfaces for network error handling.
|
||||
package neterr
|
||||
|
||||
// TimeoutError is an interface for errors that can indicate a timeout.
|
||||
// This is compatible with net.Error's Timeout() method.
|
||||
type TimeoutError interface {
|
||||
Timeout() bool
|
||||
}
|
||||
16
pkg/interfaces/resultiter/resultiter.go
Normal file
16
pkg/interfaces/resultiter/resultiter.go
Normal file
@@ -0,0 +1,16 @@
|
||||
// Package resultiter defines interfaces for iterating over database query results.
|
||||
package resultiter
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
|
||||
)
|
||||
|
||||
// Neo4jResultIterator defines the interface for iterating over Neo4j query results.
|
||||
// This is implemented by both neo4j.Result and CollectedResult types.
|
||||
type Neo4jResultIterator interface {
|
||||
Next(context.Context) bool
|
||||
Record() *neo4j.Record
|
||||
Err() error
|
||||
}
|
||||
@@ -3,10 +3,11 @@ package neo4j
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
// DeleteEvent deletes an event by its ID
|
||||
@@ -39,10 +40,60 @@ func (n *N) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteExpired deletes expired events (stub implementation)
|
||||
// DeleteExpired deletes expired events based on NIP-40 expiration tags
|
||||
// Events with an expiration property > 0 and <= current time are deleted
|
||||
func (n *N) DeleteExpired() {
|
||||
// This would need to implement expiration logic based on event.expiration tag (NIP-40)
|
||||
// For now, this is a no-op
|
||||
ctx := context.Background()
|
||||
now := time.Now().Unix()
|
||||
|
||||
// Query for expired events (expiration > 0 means it has an expiration, and <= now means it's expired)
|
||||
cypher := `
|
||||
MATCH (e:Event)
|
||||
WHERE e.expiration > 0 AND e.expiration <= $now
|
||||
RETURN e.serial AS serial, e.id AS id
|
||||
LIMIT 1000`
|
||||
|
||||
params := map[string]any{"now": now}
|
||||
|
||||
result, err := n.ExecuteRead(ctx, cypher, params)
|
||||
if err != nil {
|
||||
n.Logger.Warningf("failed to query expired events: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Collect serials to delete
|
||||
var deleteCount int
|
||||
for result.Next(ctx) {
|
||||
record := result.Record()
|
||||
if record == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
idRaw, found := record.Get("id")
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
idStr, ok := idRaw.(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Delete the expired event
|
||||
deleteCypher := "MATCH (e:Event {id: $id}) DETACH DELETE e"
|
||||
deleteParams := map[string]any{"id": idStr}
|
||||
|
||||
if _, err := n.ExecuteWrite(ctx, deleteCypher, deleteParams); err != nil {
|
||||
n.Logger.Warningf("failed to delete expired event %s: %v", idStr[:16], err)
|
||||
continue
|
||||
}
|
||||
|
||||
deleteCount++
|
||||
}
|
||||
|
||||
if deleteCount > 0 {
|
||||
n.Logger.Infof("deleted %d expired events", deleteCount)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDelete processes a kind 5 deletion event
|
||||
|
||||
555
pkg/neo4j/delete_test.go
Normal file
555
pkg/neo4j/delete_test.go
Normal file
@@ -0,0 +1,555 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
func TestDeleteEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event to be deleted")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query event: %v", err)
|
||||
}
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event before deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Delete the event
|
||||
if err := db.DeleteEvent(ctx, ev.ID[:]); err != nil {
|
||||
t.Fatalf("Failed to delete event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event is deleted
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query after deletion: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ DeleteEvent successfully removed event")
|
||||
}
|
||||
|
||||
func TestDeleteEventBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event to be deleted by serial")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
|
||||
// Delete by serial
|
||||
if err := db.DeleteEventBySerial(ctx, serial, ev); err != nil {
|
||||
t.Fatalf("Failed to delete event by serial: %v", err)
|
||||
}
|
||||
|
||||
// Verify event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query after deletion: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ DeleteEventBySerial successfully removed event")
|
||||
}
|
||||
|
||||
func TestProcessDelete_AuthorCanDeleteOwnEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save original event
|
||||
originalEvent := event.New()
|
||||
originalEvent.Pubkey = signer.Pub()
|
||||
originalEvent.CreatedAt = timestamp.Now().V
|
||||
originalEvent.Kind = 1
|
||||
originalEvent.Content = []byte("This event will be deleted via kind 5")
|
||||
|
||||
if err := originalEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, originalEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Create kind 5 deletion event
|
||||
deleteEvent := event.New()
|
||||
deleteEvent.Pubkey = signer.Pub() // Same author
|
||||
deleteEvent.CreatedAt = timestamp.Now().V + 1
|
||||
deleteEvent.Kind = kind.Deletion.K
|
||||
deleteEvent.Content = []byte("Deleting my event")
|
||||
deleteEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(originalEvent.ID[:])),
|
||||
)
|
||||
|
||||
if err := deleteEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
// Process deletion (no admins)
|
||||
if err := db.ProcessDelete(deleteEvent, nil); err != nil {
|
||||
t.Fatalf("Failed to process delete: %v", err)
|
||||
}
|
||||
|
||||
// Verify original event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(originalEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query after deletion: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events after deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ ProcessDelete allowed author to delete own event")
|
||||
}
|
||||
|
||||
func TestProcessDelete_OtherUserCannotDelete(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
alice, _ := p8k.New()
|
||||
alice.Generate()
|
||||
|
||||
bob, _ := p8k.New()
|
||||
bob.Generate()
|
||||
|
||||
// Alice creates an event
|
||||
aliceEvent := event.New()
|
||||
aliceEvent.Pubkey = alice.Pub()
|
||||
aliceEvent.CreatedAt = timestamp.Now().V
|
||||
aliceEvent.Kind = 1
|
||||
aliceEvent.Content = []byte("Alice's event")
|
||||
|
||||
if err := aliceEvent.Sign(alice); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, aliceEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Bob tries to delete Alice's event
|
||||
deleteEvent := event.New()
|
||||
deleteEvent.Pubkey = bob.Pub() // Different author
|
||||
deleteEvent.CreatedAt = timestamp.Now().V + 1
|
||||
deleteEvent.Kind = kind.Deletion.K
|
||||
deleteEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(aliceEvent.ID[:])),
|
||||
)
|
||||
|
||||
if err := deleteEvent.Sign(bob); err != nil {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
// Process deletion (Bob is not an admin)
|
||||
_ = db.ProcessDelete(deleteEvent, nil)
|
||||
|
||||
// Verify Alice's event still exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(aliceEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query: %v", err)
|
||||
}
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected Alice's event to still exist, got %d events", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ ProcessDelete correctly prevented unauthorized deletion")
|
||||
}
|
||||
|
||||
func TestProcessDelete_AdminCanDeleteAnyEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
alice, _ := p8k.New()
|
||||
alice.Generate()
|
||||
|
||||
admin, _ := p8k.New()
|
||||
admin.Generate()
|
||||
|
||||
// Alice creates an event
|
||||
aliceEvent := event.New()
|
||||
aliceEvent.Pubkey = alice.Pub()
|
||||
aliceEvent.CreatedAt = timestamp.Now().V
|
||||
aliceEvent.Kind = 1
|
||||
aliceEvent.Content = []byte("Alice's event to be deleted by admin")
|
||||
|
||||
if err := aliceEvent.Sign(alice); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, aliceEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Admin creates deletion event
|
||||
deleteEvent := event.New()
|
||||
deleteEvent.Pubkey = admin.Pub()
|
||||
deleteEvent.CreatedAt = timestamp.Now().V + 1
|
||||
deleteEvent.Kind = kind.Deletion.K
|
||||
deleteEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(aliceEvent.ID[:])),
|
||||
)
|
||||
|
||||
if err := deleteEvent.Sign(admin); err != nil {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
// Process deletion with admin pubkey
|
||||
adminPubkeys := [][]byte{admin.Pub()}
|
||||
if err := db.ProcessDelete(deleteEvent, adminPubkeys); err != nil {
|
||||
t.Fatalf("Failed to process delete: %v", err)
|
||||
}
|
||||
|
||||
// Verify Alice's event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(aliceEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected Alice's event to be deleted, got %d events", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ ProcessDelete allowed admin to delete event")
|
||||
}
|
||||
|
||||
func TestCheckForDeleted(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create target event
|
||||
targetEvent := event.New()
|
||||
targetEvent.Pubkey = signer.Pub()
|
||||
targetEvent.CreatedAt = timestamp.Now().V
|
||||
targetEvent.Kind = 1
|
||||
targetEvent.Content = []byte("Target event")
|
||||
|
||||
if err := targetEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign target event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, targetEvent); err != nil {
|
||||
t.Fatalf("Failed to save target event: %v", err)
|
||||
}
|
||||
|
||||
// Check that event is not deleted (no deletion event exists)
|
||||
err = db.CheckForDeleted(targetEvent, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error for non-deleted event, got: %v", err)
|
||||
}
|
||||
|
||||
// Create deletion event that references target
|
||||
deleteEvent := event.New()
|
||||
deleteEvent.Pubkey = signer.Pub()
|
||||
deleteEvent.CreatedAt = timestamp.Now().V + 1
|
||||
deleteEvent.Kind = kind.Deletion.K
|
||||
deleteEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", hex.Enc(targetEvent.ID[:])),
|
||||
)
|
||||
|
||||
if err := deleteEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, deleteEvent); err != nil {
|
||||
t.Fatalf("Failed to save delete event: %v", err)
|
||||
}
|
||||
|
||||
// Now check should return error (event has been deleted)
|
||||
err = db.CheckForDeleted(targetEvent, nil)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for deleted event")
|
||||
}
|
||||
|
||||
t.Logf("✓ CheckForDeleted correctly detected deletion event")
|
||||
}
|
||||
|
||||
func TestReplaceableEventDeletion(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create replaceable event (kind 0 - profile)
|
||||
profileEvent := event.New()
|
||||
profileEvent.Pubkey = signer.Pub()
|
||||
profileEvent.CreatedAt = timestamp.Now().V
|
||||
profileEvent.Kind = 0
|
||||
profileEvent.Content = []byte(`{"name":"Test User"}`)
|
||||
|
||||
if err := profileEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, profileEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(0)),
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query: %v", err)
|
||||
}
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 profile event, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Create a newer replaceable event (replaces the old one)
|
||||
newerProfileEvent := event.New()
|
||||
newerProfileEvent.Pubkey = signer.Pub()
|
||||
newerProfileEvent.CreatedAt = timestamp.Now().V + 100
|
||||
newerProfileEvent.Kind = 0
|
||||
newerProfileEvent.Content = []byte(`{"name":"Updated User"}`)
|
||||
|
||||
if err := newerProfileEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign newer event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, newerProfileEvent); err != nil {
|
||||
t.Fatalf("Failed to save newer event: %v", err)
|
||||
}
|
||||
|
||||
// Query should return only the newer event
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(0)),
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query: %v", err)
|
||||
}
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 profile event after replacement, got %d", len(evs))
|
||||
}
|
||||
|
||||
if hex.Enc(evs[0].ID[:]) != hex.Enc(newerProfileEvent.ID[:]) {
|
||||
t.Fatal("Expected newer profile event to be returned")
|
||||
}
|
||||
|
||||
t.Logf("✓ Replaceable event correctly replaced by newer version")
|
||||
}
|
||||
570
pkg/neo4j/expiration_test.go
Normal file
570
pkg/neo4j/expiration_test.go
Normal file
@@ -0,0 +1,570 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
func TestExpiration_SaveEventWithExpiration(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create event with expiration tag (expires in 1 hour)
|
||||
futureExpiration := time.Now().Add(1 * time.Hour).Unix()
|
||||
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event with expiration")
|
||||
ev.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(futureExpiration).String()))
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Query the event to verify it was saved
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query event: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Event with expiration tag saved successfully")
|
||||
}
|
||||
|
||||
func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create an expired event (expired 1 hour ago)
|
||||
pastExpiration := time.Now().Add(-1 * time.Hour).Unix()
|
||||
|
||||
expiredEv := event.New()
|
||||
expiredEv.Pubkey = signer.Pub()
|
||||
expiredEv.CreatedAt = timestamp.Now().V - 7200 // 2 hours ago
|
||||
expiredEv.Kind = 1
|
||||
expiredEv.Content = []byte("Expired event")
|
||||
expiredEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(pastExpiration).String()))
|
||||
|
||||
if err := expiredEv.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign expired event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, expiredEv); err != nil {
|
||||
t.Fatalf("Failed to save expired event: %v", err)
|
||||
}
|
||||
|
||||
// Create a non-expired event (expires in 1 hour)
|
||||
futureExpiration := time.Now().Add(1 * time.Hour).Unix()
|
||||
|
||||
validEv := event.New()
|
||||
validEv.Pubkey = signer.Pub()
|
||||
validEv.CreatedAt = timestamp.Now().V
|
||||
validEv.Kind = 1
|
||||
validEv.Content = []byte("Valid event")
|
||||
validEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(futureExpiration).String()))
|
||||
|
||||
if err := validEv.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign valid event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, validEv); err != nil {
|
||||
t.Fatalf("Failed to save valid event: %v", err)
|
||||
}
|
||||
|
||||
// Create an event without expiration
|
||||
permanentEv := event.New()
|
||||
permanentEv.Pubkey = signer.Pub()
|
||||
permanentEv.CreatedAt = timestamp.Now().V + 1
|
||||
permanentEv.Kind = 1
|
||||
permanentEv.Content = []byte("Permanent event (no expiration)")
|
||||
|
||||
if err := permanentEv.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign permanent event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, permanentEv); err != nil {
|
||||
t.Fatalf("Failed to save permanent event: %v", err)
|
||||
}
|
||||
|
||||
// Verify all 3 events exist
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events: %v", err)
|
||||
}
|
||||
if len(evs) != 3 {
|
||||
t.Fatalf("Expected 3 events before deletion, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Run DeleteExpired
|
||||
db.DeleteExpired()
|
||||
|
||||
// Verify only expired event was deleted
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events after deletion: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("Expected 2 events after deletion (expired removed), got %d", len(evs))
|
||||
}
|
||||
|
||||
// Verify the correct events remain
|
||||
foundValid := false
|
||||
foundPermanent := false
|
||||
for _, ev := range evs {
|
||||
if hex.Enc(ev.ID[:]) == hex.Enc(validEv.ID[:]) {
|
||||
foundValid = true
|
||||
}
|
||||
if hex.Enc(ev.ID[:]) == hex.Enc(permanentEv.ID[:]) {
|
||||
foundPermanent = true
|
||||
}
|
||||
}
|
||||
|
||||
if !foundValid {
|
||||
t.Fatal("Valid event (with future expiration) was incorrectly deleted")
|
||||
}
|
||||
if !foundPermanent {
|
||||
t.Fatal("Permanent event (no expiration) was incorrectly deleted")
|
||||
}
|
||||
|
||||
t.Logf("✓ DeleteExpired correctly removed only expired events")
|
||||
}
|
||||
|
||||
func TestExpiration_NoExpirationTag(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create event without expiration tag
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event without expiration")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Run DeleteExpired - event should not be deleted
|
||||
db.DeleteExpired()
|
||||
|
||||
// Verify event still exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query event: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event (no expiration should not be deleted), got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Events without expiration tag are not deleted")
|
||||
}
|
||||
|
||||
func TestExport_AllEvents(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save some events
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event for export")
|
||||
ev.Tags = tag.NewS(tag.NewFromAny("t", "test"))
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Export all events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
|
||||
// Parse the exported JSONL
|
||||
lines := bytes.Split(buf.Bytes(), []byte("\n"))
|
||||
validLines := 0
|
||||
for _, line := range lines {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
var ev event.E
|
||||
if err := json.Unmarshal(line, &ev); err != nil {
|
||||
t.Fatalf("Failed to parse exported event: %v", err)
|
||||
}
|
||||
validLines++
|
||||
}
|
||||
|
||||
if validLines != 5 {
|
||||
t.Fatalf("Expected 5 exported events, got %d", validLines)
|
||||
}
|
||||
|
||||
t.Logf("✓ Export all events returned %d events in JSONL format", validLines)
|
||||
}
|
||||
|
||||
func TestExport_FilterByPubkey(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Create two signers
|
||||
alice, _ := p8k.New()
|
||||
alice.Generate()
|
||||
|
||||
bob, _ := p8k.New()
|
||||
bob.Generate()
|
||||
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events from Alice
|
||||
for i := 0; i < 3; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = alice.Pub()
|
||||
ev.CreatedAt = baseTs + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Alice's event")
|
||||
|
||||
if err := ev.Sign(alice); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create events from Bob
|
||||
for i := 0; i < 2; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = bob.Pub()
|
||||
ev.CreatedAt = baseTs + int64(i) + 10
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Bob's event")
|
||||
|
||||
if err := ev.Sign(bob); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Export only Alice's events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf, alice.Pub())
|
||||
|
||||
// Parse the exported JSONL
|
||||
lines := bytes.Split(buf.Bytes(), []byte("\n"))
|
||||
validLines := 0
|
||||
alicePubkey := hex.Enc(alice.Pub())
|
||||
for _, line := range lines {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
var ev event.E
|
||||
if err := json.Unmarshal(line, &ev); err != nil {
|
||||
t.Fatalf("Failed to parse exported event: %v", err)
|
||||
}
|
||||
if hex.Enc(ev.Pubkey[:]) != alicePubkey {
|
||||
t.Fatalf("Exported event has wrong pubkey (expected Alice)")
|
||||
}
|
||||
validLines++
|
||||
}
|
||||
|
||||
if validLines != 3 {
|
||||
t.Fatalf("Expected 3 events from Alice, got %d", validLines)
|
||||
}
|
||||
|
||||
t.Logf("✓ Export with pubkey filter returned %d events from Alice only", validLines)
|
||||
}
|
||||
|
||||
func TestExport_Empty(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Export from empty database
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
|
||||
// Should be empty or just whitespace
|
||||
content := bytes.TrimSpace(buf.Bytes())
|
||||
if len(content) != 0 {
|
||||
t.Fatalf("Expected empty export, got: %s", string(content))
|
||||
}
|
||||
|
||||
t.Logf("✓ Export from empty database returns empty result")
|
||||
}
|
||||
|
||||
func TestImportExport_RoundTrip(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
|
||||
// Create original events
|
||||
originalEvents := make([]*event.E, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Round trip test event")
|
||||
ev.Tags = tag.NewS(tag.NewFromAny("t", "roundtrip"))
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
originalEvents[i] = ev
|
||||
}
|
||||
|
||||
// Export events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
|
||||
// Wipe database
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Verify database is empty
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events: %v", err)
|
||||
}
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events after wipe, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Import events
|
||||
db.Import(bytes.NewReader(buf.Bytes()))
|
||||
|
||||
// Verify events were restored
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query imported events: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 3 {
|
||||
t.Fatalf("Expected 3 imported events, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Verify event IDs match
|
||||
importedIDs := make(map[string]bool)
|
||||
for _, ev := range evs {
|
||||
importedIDs[hex.Enc(ev.ID[:])] = true
|
||||
}
|
||||
|
||||
for _, orig := range originalEvents {
|
||||
if !importedIDs[hex.Enc(orig.ID[:])] {
|
||||
t.Fatalf("Original event %s not found after import", hex.Enc(orig.ID[:]))
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ Export/Import round trip preserved %d events correctly", len(evs))
|
||||
}
|
||||
502
pkg/neo4j/fetch-event_test.go
Normal file
502
pkg/neo4j/fetch-event_test.go
Normal file
@@ -0,0 +1,502 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
func TestFetchEventBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save a test event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event for fetch by serial")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get the serial for this event
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||
}
|
||||
|
||||
// Fetch event by serial
|
||||
fetchedEvent, err := db.FetchEventBySerial(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to fetch event by serial: %v", err)
|
||||
}
|
||||
|
||||
if fetchedEvent == nil {
|
||||
t.Fatal("Expected fetched event to be non-nil")
|
||||
}
|
||||
|
||||
// Verify event properties
|
||||
if hex.Enc(fetchedEvent.ID[:]) != hex.Enc(ev.ID[:]) {
|
||||
t.Fatalf("Event ID mismatch: got %s, expected %s",
|
||||
hex.Enc(fetchedEvent.ID[:]), hex.Enc(ev.ID[:]))
|
||||
}
|
||||
|
||||
if fetchedEvent.Kind != ev.Kind {
|
||||
t.Fatalf("Kind mismatch: got %d, expected %d", fetchedEvent.Kind, ev.Kind)
|
||||
}
|
||||
|
||||
if hex.Enc(fetchedEvent.Pubkey[:]) != hex.Enc(ev.Pubkey[:]) {
|
||||
t.Fatalf("Pubkey mismatch")
|
||||
}
|
||||
|
||||
if fetchedEvent.CreatedAt != ev.CreatedAt {
|
||||
t.Fatalf("CreatedAt mismatch: got %d, expected %d",
|
||||
fetchedEvent.CreatedAt, ev.CreatedAt)
|
||||
}
|
||||
|
||||
t.Logf("✓ FetchEventBySerial returned correct event")
|
||||
}
|
||||
|
||||
func TestFetchEventBySerial_NonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to fetch with non-existent serial
|
||||
nonExistentSerial := &types.Uint40{}
|
||||
nonExistentSerial.Set(0xFFFFFFFFFF) // Max value
|
||||
|
||||
_, err = db.FetchEventBySerial(nonExistentSerial)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for non-existent serial")
|
||||
}
|
||||
|
||||
t.Logf("✓ FetchEventBySerial correctly returned error for non-existent serial")
|
||||
}
|
||||
|
||||
func TestFetchEventsBySerials(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save multiple events
|
||||
var serials []*types.Uint40
|
||||
eventIDs := make(map[uint64]string)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
|
||||
serials = append(serials, serial)
|
||||
eventIDs[serial.Get()] = hex.Enc(ev.ID[:])
|
||||
}
|
||||
|
||||
// Fetch all events by serials
|
||||
events, err := db.FetchEventsBySerials(serials)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to fetch events by serials: %v", err)
|
||||
}
|
||||
|
||||
if len(events) != 5 {
|
||||
t.Fatalf("Expected 5 events, got %d", len(events))
|
||||
}
|
||||
|
||||
// Verify each event
|
||||
for serial, expectedID := range eventIDs {
|
||||
ev, exists := events[serial]
|
||||
if !exists {
|
||||
t.Fatalf("Event with serial %d not found", serial)
|
||||
}
|
||||
if hex.Enc(ev.ID[:]) != expectedID {
|
||||
t.Fatalf("Event ID mismatch for serial %d", serial)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ FetchEventsBySerials returned %d correct events", len(events))
|
||||
}
|
||||
|
||||
func TestGetSerialById(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial by ID
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||
}
|
||||
|
||||
if serial == nil {
|
||||
t.Fatal("Expected serial to be non-nil")
|
||||
}
|
||||
|
||||
if serial.Get() == 0 {
|
||||
t.Fatal("Expected non-zero serial")
|
||||
}
|
||||
|
||||
t.Logf("✓ GetSerialById returned serial: %d", serial.Get())
|
||||
}
|
||||
|
||||
func TestGetSerialById_NonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to get serial for non-existent event
|
||||
fakeID, _ := hex.Dec("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
|
||||
_, err = db.GetSerialById(fakeID)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for non-existent event ID")
|
||||
}
|
||||
|
||||
t.Logf("✓ GetSerialById correctly returned error for non-existent ID")
|
||||
}
|
||||
|
||||
func TestGetSerialsByIds(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save multiple events
|
||||
ids := tag.NewS()
|
||||
for i := 0; i < 3; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
ids.Append(tag.NewFromAny("", hex.Enc(ev.ID[:])))
|
||||
}
|
||||
|
||||
// Get serials by IDs
|
||||
serials, err := db.GetSerialsByIds(ids)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serials by IDs: %v", err)
|
||||
}
|
||||
|
||||
if len(serials) != 3 {
|
||||
t.Fatalf("Expected 3 serials, got %d", len(serials))
|
||||
}
|
||||
|
||||
t.Logf("✓ GetSerialsByIds returned %d serials", len(serials))
|
||||
}
|
||||
|
||||
func TestGetFullIdPubkeyBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
|
||||
// Get full ID and pubkey
|
||||
idPkTs, err := db.GetFullIdPubkeyBySerial(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get full ID and pubkey: %v", err)
|
||||
}
|
||||
|
||||
if idPkTs == nil {
|
||||
t.Fatal("Expected non-nil result")
|
||||
}
|
||||
|
||||
if hex.Enc(idPkTs.Id) != hex.Enc(ev.ID[:]) {
|
||||
t.Fatalf("ID mismatch")
|
||||
}
|
||||
|
||||
if hex.Enc(idPkTs.Pub) != hex.Enc(ev.Pubkey[:]) {
|
||||
t.Fatalf("Pubkey mismatch")
|
||||
}
|
||||
|
||||
if idPkTs.Ts != ev.CreatedAt {
|
||||
t.Fatalf("Timestamp mismatch")
|
||||
}
|
||||
|
||||
t.Logf("✓ GetFullIdPubkeyBySerial returned correct data")
|
||||
}
|
||||
|
||||
func TestQueryForSerials(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create and save events
|
||||
for i := 0; i < 5; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Test event")
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Query for serials
|
||||
serials, err := db.QueryForSerials(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query for serials: %v", err)
|
||||
}
|
||||
|
||||
if len(serials) != 5 {
|
||||
t.Fatalf("Expected 5 serials, got %d", len(serials))
|
||||
}
|
||||
|
||||
t.Logf("✓ QueryForSerials returned %d serials", len(serials))
|
||||
}
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"io"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
// Import imports events from a reader (JSONL format)
|
||||
@@ -16,12 +18,119 @@ func (n *N) Import(rr io.Reader) {
|
||||
}
|
||||
|
||||
// Export exports events to a writer (JSONL format)
|
||||
// If pubkeys are provided, only exports events from those authors
|
||||
// Otherwise exports all events
|
||||
func (n *N) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
// Query all events or events for specific pubkeys
|
||||
// Write as JSONL
|
||||
var cypher string
|
||||
params := make(map[string]any)
|
||||
|
||||
// Stub implementation
|
||||
fmt.Fprintf(w, "# Export not yet implemented for neo4j\n")
|
||||
if len(pubkeys) > 0 {
|
||||
// Export events for specific pubkeys
|
||||
pubkeyStrings := make([]string, len(pubkeys))
|
||||
for i, pk := range pubkeys {
|
||||
pubkeyStrings[i] = hex.Enc(pk)
|
||||
}
|
||||
params["pubkeys"] = pubkeyStrings
|
||||
cypher = `
|
||||
MATCH (e:Event)
|
||||
WHERE e.pubkey IN $pubkeys
|
||||
RETURN e.id AS id, e.kind AS kind, e.pubkey AS pubkey,
|
||||
e.created_at AS created_at, e.content AS content,
|
||||
e.sig AS sig, e.tags AS tags
|
||||
ORDER BY e.created_at ASC`
|
||||
} else {
|
||||
// Export all events
|
||||
cypher = `
|
||||
MATCH (e:Event)
|
||||
RETURN e.id AS id, e.kind AS kind, e.pubkey AS pubkey,
|
||||
e.created_at AS created_at, e.content AS content,
|
||||
e.sig AS sig, e.tags AS tags
|
||||
ORDER BY e.created_at ASC`
|
||||
}
|
||||
|
||||
result, err := n.ExecuteRead(c, cypher, params)
|
||||
if err != nil {
|
||||
n.Logger.Warningf("failed to query events for export: %v", err)
|
||||
fmt.Fprintf(w, "# Export failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
count := 0
|
||||
for result.Next(c) {
|
||||
record := result.Record()
|
||||
if record == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Build event from record
|
||||
ev := &event.E{}
|
||||
|
||||
// Parse ID
|
||||
if idRaw, found := record.Get("id"); found {
|
||||
if idStr, ok := idRaw.(string); ok {
|
||||
if idBytes, err := hex.Dec(idStr); err == nil && len(idBytes) == 32 {
|
||||
copy(ev.ID[:], idBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse kind
|
||||
if kindRaw, found := record.Get("kind"); found {
|
||||
if kindVal, ok := kindRaw.(int64); ok {
|
||||
ev.Kind = uint16(kindVal)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse pubkey
|
||||
if pkRaw, found := record.Get("pubkey"); found {
|
||||
if pkStr, ok := pkRaw.(string); ok {
|
||||
if pkBytes, err := hex.Dec(pkStr); err == nil && len(pkBytes) == 32 {
|
||||
copy(ev.Pubkey[:], pkBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse created_at
|
||||
if tsRaw, found := record.Get("created_at"); found {
|
||||
if tsVal, ok := tsRaw.(int64); ok {
|
||||
ev.CreatedAt = tsVal
|
||||
}
|
||||
}
|
||||
|
||||
// Parse content
|
||||
if contentRaw, found := record.Get("content"); found {
|
||||
if contentStr, ok := contentRaw.(string); ok {
|
||||
ev.Content = []byte(contentStr)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse sig
|
||||
if sigRaw, found := record.Get("sig"); found {
|
||||
if sigStr, ok := sigRaw.(string); ok {
|
||||
if sigBytes, err := hex.Dec(sigStr); err == nil && len(sigBytes) == 64 {
|
||||
copy(ev.Sig[:], sigBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse tags (stored as JSON string)
|
||||
if tagsRaw, found := record.Get("tags"); found {
|
||||
if tagsStr, ok := tagsRaw.(string); ok {
|
||||
ev.Tags = &tag.S{}
|
||||
if err := json.Unmarshal([]byte(tagsStr), ev.Tags); err != nil {
|
||||
n.Logger.Warningf("failed to unmarshal tags: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write event as JSON line
|
||||
if evJSON, err := json.Marshal(ev); err == nil {
|
||||
fmt.Fprintf(w, "%s\n", evJSON)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
n.Logger.Infof("exported %d events", count)
|
||||
}
|
||||
|
||||
// ImportEventsFromReader imports events from a reader
|
||||
|
||||
342
pkg/neo4j/nip43_test.go
Normal file
342
pkg/neo4j/nip43_test.go
Normal file
@@ -0,0 +1,342 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
func TestNIP43_AddAndRemoveMember(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
pubkey := signer.Pub()
|
||||
|
||||
// Add member
|
||||
inviteCode := "test-invite-123"
|
||||
if err := db.AddNIP43Member(pubkey, inviteCode); err != nil {
|
||||
t.Fatalf("Failed to add NIP-43 member: %v", err)
|
||||
}
|
||||
|
||||
// Check membership
|
||||
isMember, err := db.IsNIP43Member(pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check membership: %v", err)
|
||||
}
|
||||
if !isMember {
|
||||
t.Fatal("Expected pubkey to be a member")
|
||||
}
|
||||
|
||||
// Get membership details
|
||||
membership, err := db.GetNIP43Membership(pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get membership: %v", err)
|
||||
}
|
||||
if membership.InviteCode != inviteCode {
|
||||
t.Fatalf("Invite code mismatch: got %s, expected %s", membership.InviteCode, inviteCode)
|
||||
}
|
||||
|
||||
// Remove member
|
||||
if err := db.RemoveNIP43Member(pubkey); err != nil {
|
||||
t.Fatalf("Failed to remove member: %v", err)
|
||||
}
|
||||
|
||||
// Verify no longer a member
|
||||
isMember, _ = db.IsNIP43Member(pubkey)
|
||||
if isMember {
|
||||
t.Fatal("Expected pubkey to not be a member after removal")
|
||||
}
|
||||
|
||||
t.Logf("✓ NIP-43 add and remove member works correctly")
|
||||
}
|
||||
|
||||
func TestNIP43_GetAllMembers(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Add multiple members
|
||||
var pubkeys [][]byte
|
||||
for i := 0; i < 3; i++ {
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
pubkey := signer.Pub()
|
||||
pubkeys = append(pubkeys, pubkey)
|
||||
|
||||
if err := db.AddNIP43Member(pubkey, "invite"+string(rune('A'+i))); err != nil {
|
||||
t.Fatalf("Failed to add member %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get all members
|
||||
members, err := db.GetAllNIP43Members()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get all members: %v", err)
|
||||
}
|
||||
|
||||
if len(members) != 3 {
|
||||
t.Fatalf("Expected 3 members, got %d", len(members))
|
||||
}
|
||||
|
||||
// Verify all added pubkeys are in the members list
|
||||
memberMap := make(map[string]bool)
|
||||
for _, m := range members {
|
||||
memberMap[hex.Enc(m)] = true
|
||||
}
|
||||
|
||||
for i, pk := range pubkeys {
|
||||
if !memberMap[hex.Enc(pk)] {
|
||||
t.Fatalf("Member %d not found in list", i)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ GetAllNIP43Members returned %d members", len(members))
|
||||
}
|
||||
|
||||
func TestNIP43_InviteCode(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Store valid invite code (expires in 1 hour)
|
||||
validCode := "valid-code-123"
|
||||
expiresAt := time.Now().Add(1 * time.Hour)
|
||||
if err := db.StoreInviteCode(validCode, expiresAt); err != nil {
|
||||
t.Fatalf("Failed to store invite code: %v", err)
|
||||
}
|
||||
|
||||
// Validate the code
|
||||
isValid, err := db.ValidateInviteCode(validCode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to validate invite code: %v", err)
|
||||
}
|
||||
if !isValid {
|
||||
t.Fatal("Expected valid invite code to be valid")
|
||||
}
|
||||
|
||||
// Test non-existent code
|
||||
isValid, err = db.ValidateInviteCode("non-existent-code")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to validate non-existent code: %v", err)
|
||||
}
|
||||
if isValid {
|
||||
t.Fatal("Expected non-existent code to be invalid")
|
||||
}
|
||||
|
||||
// Delete the invite code
|
||||
if err := db.DeleteInviteCode(validCode); err != nil {
|
||||
t.Fatalf("Failed to delete invite code: %v", err)
|
||||
}
|
||||
|
||||
// Verify code is no longer valid
|
||||
isValid, _ = db.ValidateInviteCode(validCode)
|
||||
if isValid {
|
||||
t.Fatal("Expected deleted code to be invalid")
|
||||
}
|
||||
|
||||
t.Logf("✓ NIP-43 invite code operations work correctly")
|
||||
}
|
||||
|
||||
func TestNIP43_ExpiredInviteCode(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Store expired invite code (expired 1 hour ago)
|
||||
expiredCode := "expired-code-123"
|
||||
expiresAt := time.Now().Add(-1 * time.Hour)
|
||||
if err := db.StoreInviteCode(expiredCode, expiresAt); err != nil {
|
||||
t.Fatalf("Failed to store expired invite code: %v", err)
|
||||
}
|
||||
|
||||
// Validate should return false for expired code
|
||||
isValid, err := db.ValidateInviteCode(expiredCode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to validate expired code: %v", err)
|
||||
}
|
||||
if isValid {
|
||||
t.Fatal("Expected expired code to be invalid")
|
||||
}
|
||||
|
||||
t.Logf("✓ Expired invite code correctly detected as invalid")
|
||||
}
|
||||
|
||||
func TestNIP43_DuplicateMember(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
pubkey := signer.Pub()
|
||||
|
||||
// Add member first time
|
||||
if err := db.AddNIP43Member(pubkey, "invite1"); err != nil {
|
||||
t.Fatalf("Failed to add member: %v", err)
|
||||
}
|
||||
|
||||
// Add same member again (should not error, just update)
|
||||
if err := db.AddNIP43Member(pubkey, "invite2"); err != nil {
|
||||
t.Fatalf("Failed to re-add member: %v", err)
|
||||
}
|
||||
|
||||
// Check membership still exists
|
||||
isMember, _ := db.IsNIP43Member(pubkey)
|
||||
if !isMember {
|
||||
t.Fatal("Expected pubkey to still be a member")
|
||||
}
|
||||
|
||||
// Get all members should have only 1 entry
|
||||
members, _ := db.GetAllNIP43Members()
|
||||
if len(members) != 1 {
|
||||
t.Fatalf("Expected 1 member, got %d", len(members))
|
||||
}
|
||||
|
||||
t.Logf("✓ Duplicate member handling works correctly")
|
||||
}
|
||||
|
||||
func TestNIP43_MembershipPersistence(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
pubkey := signer.Pub()
|
||||
|
||||
// Add member
|
||||
inviteCode := "persistence-test"
|
||||
if err := db.AddNIP43Member(pubkey, inviteCode); err != nil {
|
||||
t.Fatalf("Failed to add member: %v", err)
|
||||
}
|
||||
|
||||
// Get membership and verify all fields
|
||||
membership, err := db.GetNIP43Membership(pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get membership: %v", err)
|
||||
}
|
||||
|
||||
if membership.InviteCode != inviteCode {
|
||||
t.Fatalf("InviteCode mismatch")
|
||||
}
|
||||
|
||||
if membership.AddedAt.IsZero() {
|
||||
t.Fatal("AddedAt should not be zero")
|
||||
}
|
||||
|
||||
// Verify the pubkey in membership matches
|
||||
if hex.Enc(membership.Pubkey[:]) != hex.Enc(pubkey) {
|
||||
t.Fatal("Pubkey mismatch in membership")
|
||||
}
|
||||
|
||||
t.Logf("✓ NIP-43 membership persistence verified")
|
||||
}
|
||||
@@ -5,12 +5,12 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/interfaces/resultiter"
|
||||
"next.orly.dev/pkg/interfaces/store"
|
||||
)
|
||||
|
||||
@@ -186,14 +186,10 @@ func (n *N) parseEventsFromResult(result any) ([]*event.E, error) {
|
||||
events := make([]*event.E, 0)
|
||||
ctx := context.Background()
|
||||
|
||||
// Type assert to the interface we actually use
|
||||
resultIter, ok := result.(interface {
|
||||
Next(context.Context) bool
|
||||
Record() *neo4j.Record
|
||||
Err() error
|
||||
})
|
||||
// Type assert to the result iterator interface
|
||||
resultIter, ok := result.(resultiter.Neo4jResultIterator)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid result type")
|
||||
return nil, fmt.Errorf("invalid result type: expected resultiter.Neo4jResultIterator")
|
||||
}
|
||||
|
||||
// Iterate through result records
|
||||
|
||||
452
pkg/neo4j/query-events_test.go
Normal file
452
pkg/neo4j/query-events_test.go
Normal file
@@ -0,0 +1,452 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// setupTestDatabase creates a fresh Neo4j database connection for testing
|
||||
func setupTestDatabase(t *testing.T) (*N, context.Context, context.CancelFunc) {
|
||||
t.Helper()
|
||||
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
db.Close()
|
||||
cancel()
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
return db, ctx, cancel
|
||||
}
|
||||
|
||||
// createTestSigner creates a new signer for test events
|
||||
func createTestSigner(t *testing.T) *p8k.Signer {
|
||||
t.Helper()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
return signer
|
||||
}
|
||||
|
||||
// createAndSaveEvent creates a signed event and saves it to the database
|
||||
func createAndSaveEvent(t *testing.T, ctx context.Context, db *N, signer *p8k.Signer, k uint16, content string, tags *tag.S, ts int64) *event.E {
|
||||
t.Helper()
|
||||
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = ts
|
||||
ev.Kind = k
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tags
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
return ev
|
||||
}
|
||||
|
||||
func TestQueryEventsByID(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
signer := createTestSigner(t)
|
||||
|
||||
// Create and save a test event
|
||||
ev := createAndSaveEvent(t, ctx, db, signer, 1, "Test event for ID query", nil, timestamp.Now().V)
|
||||
|
||||
// Query by ID
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by ID: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 event, got %d", len(evs))
|
||||
}
|
||||
|
||||
if hex.Enc(evs[0].ID[:]) != hex.Enc(ev.ID[:]) {
|
||||
t.Fatalf("Event ID mismatch: got %s, expected %s",
|
||||
hex.Enc(evs[0].ID[:]), hex.Enc(ev.ID[:]))
|
||||
}
|
||||
|
||||
t.Logf("✓ Query by ID returned correct event")
|
||||
}
|
||||
|
||||
func TestQueryEventsByKind(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
signer := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events of different kinds
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Kind 1 event A", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Kind 1 event B", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, signer, 7, "Kind 7 reaction", nil, baseTs+2)
|
||||
createAndSaveEvent(t, ctx, db, signer, 30023, "Kind 30023 article", nil, baseTs+3)
|
||||
|
||||
// Query for kind 1
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by kind: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("Expected 2 kind 1 events, got %d", len(evs))
|
||||
}
|
||||
|
||||
for _, ev := range evs {
|
||||
if ev.Kind != 1 {
|
||||
t.Fatalf("Expected kind 1, got %d", ev.Kind)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ Query by kind returned %d correct events", len(evs))
|
||||
}
|
||||
|
||||
func TestQueryEventsByAuthor(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
alice := createTestSigner(t)
|
||||
bob := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events from different authors
|
||||
createAndSaveEvent(t, ctx, db, alice, 1, "Alice's event 1", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, alice, 1, "Alice's event 2", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, bob, 1, "Bob's event", nil, baseTs+2)
|
||||
|
||||
// Query for Alice's events
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(alice.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by author: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("Expected 2 events from Alice, got %d", len(evs))
|
||||
}
|
||||
|
||||
alicePubkey := hex.Enc(alice.Pub())
|
||||
for _, ev := range evs {
|
||||
if hex.Enc(ev.Pubkey[:]) != alicePubkey {
|
||||
t.Fatalf("Expected author %s, got %s", alicePubkey, hex.Enc(ev.Pubkey[:]))
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ Query by author returned %d correct events", len(evs))
|
||||
}
|
||||
|
||||
func TestQueryEventsByTimeRange(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
signer := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events at different times
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Old event", nil, baseTs-7200) // 2 hours ago
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Recent event", nil, baseTs-1800) // 30 min ago
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Current event", nil, baseTs)
|
||||
|
||||
// Query for events in the last hour
|
||||
since := ×tamp.T{V: baseTs - 3600}
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Since: since,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by time range: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("Expected 2 events in last hour, got %d", len(evs))
|
||||
}
|
||||
|
||||
for _, ev := range evs {
|
||||
if ev.CreatedAt < since.V {
|
||||
t.Fatalf("Event created_at %d is before since %d", ev.CreatedAt, since.V)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ Query by time range returned %d correct events", len(evs))
|
||||
}
|
||||
|
||||
func TestQueryEventsByTag(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
signer := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events with tags
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Bitcoin post",
|
||||
tag.NewS(tag.NewFromAny("t", "bitcoin")), baseTs)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Nostr post",
|
||||
tag.NewS(tag.NewFromAny("t", "nostr")), baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Bitcoin and Nostr post",
|
||||
tag.NewS(tag.NewFromAny("t", "bitcoin"), tag.NewFromAny("t", "nostr")), baseTs+2)
|
||||
|
||||
// Query for bitcoin tagged events
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Tags: tag.NewS(tag.NewFromAny("t", "bitcoin")),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by tag: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("Expected 2 bitcoin-tagged events, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Query by tag returned %d correct events", len(evs))
|
||||
}
|
||||
|
||||
func TestQueryEventsByKindAndAuthor(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
alice := createTestSigner(t)
|
||||
bob := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events
|
||||
createAndSaveEvent(t, ctx, db, alice, 1, "Alice note", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, alice, 7, "Alice reaction", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, bob, 1, "Bob note", nil, baseTs+2)
|
||||
|
||||
// Query for Alice's kind 1 events
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Authors: tag.NewFromBytesSlice(alice.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events by kind and author: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 1 {
|
||||
t.Fatalf("Expected 1 kind 1 event from Alice, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Query by kind and author returned correct events")
|
||||
}
|
||||
|
||||
func TestQueryEventsWithLimit(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
signer := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create many events
|
||||
for i := 0; i < 20; i++ {
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Event", nil, baseTs+int64(i))
|
||||
}
|
||||
|
||||
// Query with limit
|
||||
limit := 5
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Limit: limit,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events with limit: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != limit {
|
||||
t.Fatalf("Expected %d events with limit, got %d", limit, len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Query with limit returned %d events", len(evs))
|
||||
}
|
||||
|
||||
func TestQueryEventsOrderByCreatedAt(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
signer := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events at different times
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "First", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Second", nil, baseTs+100)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Third", nil, baseTs+200)
|
||||
|
||||
// Query and verify order (should be descending by created_at)
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) < 2 {
|
||||
t.Fatalf("Expected at least 2 events, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Verify descending order
|
||||
for i := 1; i < len(evs); i++ {
|
||||
if evs[i-1].CreatedAt < evs[i].CreatedAt {
|
||||
t.Fatalf("Events not in descending order: %d < %d at index %d",
|
||||
evs[i-1].CreatedAt, evs[i].CreatedAt, i)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ Query returned events in correct descending order")
|
||||
}
|
||||
|
||||
func TestQueryEventsEmpty(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
// Query for non-existent kind
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(99999)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 0 {
|
||||
t.Fatalf("Expected 0 events, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Query for non-existent kind returned empty result")
|
||||
}
|
||||
|
||||
func TestQueryEventsMultipleKinds(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
signer := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events of different kinds
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Note", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, signer, 7, "Reaction", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, signer, 30023, "Article", nil, baseTs+2)
|
||||
|
||||
// Query for multiple kinds
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1), kind.New(7)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("Expected 2 events (kind 1 and 7), got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Query for multiple kinds returned correct events")
|
||||
}
|
||||
|
||||
func TestQueryEventsMultipleAuthors(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
alice := createTestSigner(t)
|
||||
bob := createTestSigner(t)
|
||||
charlie := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events from different authors
|
||||
createAndSaveEvent(t, ctx, db, alice, 1, "Alice", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, bob, 1, "Bob", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, charlie, 1, "Charlie", nil, baseTs+2)
|
||||
|
||||
// Query for Alice and Bob's events
|
||||
authors := tag.NewFromBytesSlice(alice.Pub())
|
||||
authors.Append(tag.NewFromBytesSlice(bob.Pub()).GetFirst(nil))
|
||||
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Authors: authors,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("Expected 2 events from Alice and Bob, got %d", len(evs))
|
||||
}
|
||||
|
||||
t.Logf("✓ Query for multiple authors returned correct events")
|
||||
}
|
||||
|
||||
func TestCountEvents(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
|
||||
signer := createTestSigner(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events
|
||||
for i := 0; i < 5; i++ {
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Event", nil, baseTs+int64(i))
|
||||
}
|
||||
|
||||
// Count events
|
||||
count, err := db.CountEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count events: %v", err)
|
||||
}
|
||||
|
||||
if count != 5 {
|
||||
t.Fatalf("Expected count 5, got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Count events returned correct count: %d", count)
|
||||
}
|
||||
@@ -3,13 +3,19 @@ package neo4j
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
// parseInt64 parses a string to int64
|
||||
func parseInt64(s string) (int64, error) {
|
||||
return strconv.ParseInt(s, 10, 64)
|
||||
}
|
||||
|
||||
// SaveEvent stores a Nostr event in the Neo4j database.
|
||||
// It creates event nodes and relationships for authors, tags, and references.
|
||||
// This method leverages Neo4j's graph capabilities to model Nostr's social graph naturally.
|
||||
@@ -96,6 +102,17 @@ func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[st
|
||||
params["sig"] = hex.Enc(ev.Sig[:])
|
||||
params["pubkey"] = authorPubkey
|
||||
|
||||
// Check for expiration tag (NIP-40)
|
||||
var expirationTs int64 = 0
|
||||
if ev.Tags != nil {
|
||||
if expTag := ev.Tags.GetFirst([]byte("expiration")); expTag != nil && len(expTag.T) >= 2 {
|
||||
if ts, err := parseInt64(string(expTag.T[1])); err == nil {
|
||||
expirationTs = ts
|
||||
}
|
||||
}
|
||||
}
|
||||
params["expiration"] = expirationTs
|
||||
|
||||
// Serialize tags as JSON string for storage
|
||||
// Handle nil tags gracefully - nil means empty tags "[]"
|
||||
var tagsJSON []byte
|
||||
@@ -112,7 +129,7 @@ func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[st
|
||||
// Create or match author node
|
||||
MERGE (a:Author {pubkey: $pubkey})
|
||||
|
||||
// Create event node
|
||||
// Create event node with expiration for NIP-40 support
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
@@ -121,7 +138,8 @@ CREATE (e:Event {
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tags
|
||||
tags: $tags,
|
||||
expiration: $expiration
|
||||
})
|
||||
|
||||
// Link event to author
|
||||
@@ -154,13 +172,22 @@ CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
paramName := fmt.Sprintf("eTag_%d", eTagIndex)
|
||||
params[paramName] = tagValue
|
||||
|
||||
// Add WITH clause before first OPTIONAL MATCH to transition from CREATE to MATCH
|
||||
// Add WITH clause before OPTIONAL MATCH
|
||||
// This is required because:
|
||||
// 1. Cypher doesn't allow MATCH after CREATE without WITH
|
||||
// 2. Cypher doesn't allow MATCH after FOREACH without WITH
|
||||
// So we need WITH before EVERY OPTIONAL MATCH, not just the first
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
} else {
|
||||
// After a FOREACH, we need WITH to transition back to MATCH
|
||||
cypher += `
|
||||
WITH e, a
|
||||
`
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
|
||||
824
pkg/neo4j/save-event_test.go
Normal file
824
pkg/neo4j/save-event_test.go
Normal file
@@ -0,0 +1,824 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// TestCypherQueryGeneration_WithClause is a unit test that validates the WITH clause fix
|
||||
// without requiring a Neo4j instance. This test verifies the generated Cypher string
|
||||
// has correct syntax for different tag combinations.
|
||||
func TestCypherQueryGeneration_WithClause(t *testing.T) {
|
||||
// Create a mock N struct - we only need it to call buildEventCreationCypher
|
||||
// No actual Neo4j connection is needed for this unit test
|
||||
n := &N{}
|
||||
|
||||
// Generate test keypair
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags *tag.S
|
||||
expectWithClause bool
|
||||
expectOptionalMatch bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "NoTags",
|
||||
tags: nil,
|
||||
expectWithClause: false,
|
||||
expectOptionalMatch: false,
|
||||
description: "Event without tags",
|
||||
},
|
||||
{
|
||||
name: "OnlyPTags_NoWithNeeded",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
),
|
||||
expectWithClause: false,
|
||||
expectOptionalMatch: false,
|
||||
description: "p-tags use MERGE (not OPTIONAL MATCH), no WITH needed",
|
||||
},
|
||||
{
|
||||
name: "OnlyETags_WithRequired",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("e", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
|
||||
tag.NewFromAny("e", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
|
||||
),
|
||||
expectWithClause: true,
|
||||
expectOptionalMatch: true,
|
||||
description: "e-tags use OPTIONAL MATCH which requires WITH clause after CREATE",
|
||||
},
|
||||
{
|
||||
name: "ETagBeforePTag",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("e", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
),
|
||||
expectWithClause: true,
|
||||
expectOptionalMatch: true,
|
||||
description: "e-tag appearing first triggers WITH clause",
|
||||
},
|
||||
{
|
||||
name: "PTagBeforeETag",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
tag.NewFromAny("e", "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"),
|
||||
),
|
||||
expectWithClause: true,
|
||||
expectOptionalMatch: true,
|
||||
description: "WITH clause needed even when p-tag comes before e-tag",
|
||||
},
|
||||
{
|
||||
name: "GenericTagsBeforeETag",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("t", "nostr"),
|
||||
tag.NewFromAny("r", "https://example.com"),
|
||||
tag.NewFromAny("e", "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"),
|
||||
),
|
||||
expectWithClause: true,
|
||||
expectOptionalMatch: true,
|
||||
description: "WITH clause needed when e-tag follows generic tags",
|
||||
},
|
||||
{
|
||||
name: "OnlyGenericTags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("t", "bitcoin"),
|
||||
tag.NewFromAny("d", "identifier"),
|
||||
tag.NewFromAny("r", "wss://relay.example.com"),
|
||||
),
|
||||
expectWithClause: false,
|
||||
expectOptionalMatch: false,
|
||||
description: "Generic tags use MERGE, no WITH needed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte(fmt.Sprintf("Test content for %s", tt.name))
|
||||
ev.Tags = tt.tags
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
// Generate Cypher query
|
||||
cypher, params := n.buildEventCreationCypher(ev, 12345)
|
||||
|
||||
// Validate WITH clause presence
|
||||
hasWithClause := strings.Contains(cypher, "WITH e, a")
|
||||
if tt.expectWithClause && !hasWithClause {
|
||||
t.Errorf("%s: expected WITH clause but none found in Cypher:\n%s", tt.description, cypher)
|
||||
}
|
||||
if !tt.expectWithClause && hasWithClause {
|
||||
t.Errorf("%s: unexpected WITH clause in Cypher:\n%s", tt.description, cypher)
|
||||
}
|
||||
|
||||
// Validate OPTIONAL MATCH presence
|
||||
hasOptionalMatch := strings.Contains(cypher, "OPTIONAL MATCH")
|
||||
if tt.expectOptionalMatch && !hasOptionalMatch {
|
||||
t.Errorf("%s: expected OPTIONAL MATCH but none found", tt.description)
|
||||
}
|
||||
if !tt.expectOptionalMatch && hasOptionalMatch {
|
||||
t.Errorf("%s: unexpected OPTIONAL MATCH found", tt.description)
|
||||
}
|
||||
|
||||
// Validate WITH clause comes BEFORE first OPTIONAL MATCH (if both present)
|
||||
if hasWithClause && hasOptionalMatch {
|
||||
withIndex := strings.Index(cypher, "WITH e, a")
|
||||
optionalIndex := strings.Index(cypher, "OPTIONAL MATCH")
|
||||
if withIndex > optionalIndex {
|
||||
t.Errorf("%s: WITH clause must come BEFORE OPTIONAL MATCH.\nWITH at %d, OPTIONAL MATCH at %d\nCypher:\n%s",
|
||||
tt.description, withIndex, optionalIndex, cypher)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate parameters are set
|
||||
if params == nil {
|
||||
t.Error("params should not be nil")
|
||||
}
|
||||
|
||||
// Validate basic required params exist
|
||||
if _, ok := params["eventId"]; !ok {
|
||||
t.Error("params should contain eventId")
|
||||
}
|
||||
if _, ok := params["serial"]; !ok {
|
||||
t.Error("params should contain serial")
|
||||
}
|
||||
|
||||
t.Logf("✓ %s: WITH=%v, OPTIONAL_MATCH=%v", tt.name, hasWithClause, hasOptionalMatch)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCypherQueryGeneration_MultipleETags verifies WITH clause is added exactly once
|
||||
// even with multiple e-tags.
|
||||
func TestCypherQueryGeneration_MultipleETags(t *testing.T) {
|
||||
n := &N{}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create event with many e-tags
|
||||
manyETags := tag.NewS()
|
||||
for i := 0; i < 10; i++ {
|
||||
manyETags.Append(tag.NewFromAny("e", fmt.Sprintf("%064x", i)))
|
||||
}
|
||||
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event with many e-tags")
|
||||
ev.Tags = manyETags
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
cypher, _ := n.buildEventCreationCypher(ev, 1)
|
||||
|
||||
// Count WITH clauses - should be exactly 1
|
||||
withCount := strings.Count(cypher, "WITH e, a")
|
||||
if withCount != 1 {
|
||||
t.Errorf("Expected exactly 1 WITH clause, found %d\nCypher:\n%s", withCount, cypher)
|
||||
}
|
||||
|
||||
// Count OPTIONAL MATCH - should match number of e-tags
|
||||
optionalMatchCount := strings.Count(cypher, "OPTIONAL MATCH")
|
||||
if optionalMatchCount != 10 {
|
||||
t.Errorf("Expected 10 OPTIONAL MATCH statements (one per e-tag), found %d", optionalMatchCount)
|
||||
}
|
||||
|
||||
// Count FOREACH (which wraps the conditional relationship creation)
|
||||
foreachCount := strings.Count(cypher, "FOREACH")
|
||||
if foreachCount != 10 {
|
||||
t.Errorf("Expected 10 FOREACH blocks, found %d", foreachCount)
|
||||
}
|
||||
|
||||
t.Logf("✓ WITH clause added once, followed by %d OPTIONAL MATCH + FOREACH pairs", optionalMatchCount)
|
||||
}
|
||||
|
||||
// TestCypherQueryGeneration_CriticalBugScenario reproduces the exact bug scenario
|
||||
// that was fixed: CREATE followed by OPTIONAL MATCH without WITH clause.
|
||||
func TestCypherQueryGeneration_CriticalBugScenario(t *testing.T) {
|
||||
n := &N{}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// This is the exact scenario that caused the bug:
|
||||
// An event with just one e-tag should have:
|
||||
// 1. CREATE clause for the event
|
||||
// 2. WITH clause to carry forward variables
|
||||
// 3. OPTIONAL MATCH for the referenced event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Reply to an event")
|
||||
ev.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", "1234567890123456789012345678901234567890123456789012345678901234"),
|
||||
)
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
cypher, _ := n.buildEventCreationCypher(ev, 1)
|
||||
|
||||
// The critical validation: WITH must appear between CREATE and OPTIONAL MATCH
|
||||
createIndex := strings.Index(cypher, "CREATE (e)-[:AUTHORED_BY]->(a)")
|
||||
withIndex := strings.Index(cypher, "WITH e, a")
|
||||
optionalMatchIndex := strings.Index(cypher, "OPTIONAL MATCH")
|
||||
|
||||
if createIndex == -1 {
|
||||
t.Fatal("CREATE clause not found in Cypher")
|
||||
}
|
||||
if withIndex == -1 {
|
||||
t.Fatal("WITH clause not found in Cypher - THIS IS THE BUG!")
|
||||
}
|
||||
if optionalMatchIndex == -1 {
|
||||
t.Fatal("OPTIONAL MATCH not found in Cypher")
|
||||
}
|
||||
|
||||
// Validate order: CREATE < WITH < OPTIONAL MATCH
|
||||
if !(createIndex < withIndex && withIndex < optionalMatchIndex) {
|
||||
t.Errorf("Invalid clause ordering. Expected: CREATE (%d) < WITH (%d) < OPTIONAL MATCH (%d)\nCypher:\n%s",
|
||||
createIndex, withIndex, optionalMatchIndex, cypher)
|
||||
}
|
||||
|
||||
t.Log("✓ Critical bug scenario validated: WITH clause correctly placed between CREATE and OPTIONAL MATCH")
|
||||
}
|
||||
|
||||
// TestBuildEventCreationCypher_WithClause validates the WITH clause fix for Cypher queries.
|
||||
// The bug was that OPTIONAL MATCH cannot directly follow CREATE in Cypher - a WITH clause
|
||||
// is required to carry forward bound variables (e, a) from the CREATE to the MATCH.
|
||||
func TestBuildEventCreationCypher_WithClause(t *testing.T) {
|
||||
// Skip if Neo4j is not available
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
// Create test database
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Wait for database to be ready
|
||||
<-db.Ready()
|
||||
|
||||
// Wipe database to ensure clean state
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Generate test keypair
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Test cases for different tag combinations
|
||||
tests := []struct {
|
||||
name string
|
||||
tags *tag.S
|
||||
wantWithClause bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "NoTags",
|
||||
tags: nil,
|
||||
wantWithClause: false,
|
||||
description: "Event without tags should not have WITH clause",
|
||||
},
|
||||
{
|
||||
name: "OnlyPTags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
),
|
||||
wantWithClause: false,
|
||||
description: "Event with only p-tags (MERGE) should not have WITH clause",
|
||||
},
|
||||
{
|
||||
name: "OnlyETags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("e", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
|
||||
tag.NewFromAny("e", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
|
||||
),
|
||||
wantWithClause: true,
|
||||
description: "Event with e-tags (OPTIONAL MATCH) MUST have WITH clause",
|
||||
},
|
||||
{
|
||||
name: "ETagFirst",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("e", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
),
|
||||
wantWithClause: true,
|
||||
description: "Event with e-tag first MUST have WITH clause before OPTIONAL MATCH",
|
||||
},
|
||||
{
|
||||
name: "PTagFirst",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
tag.NewFromAny("e", "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"),
|
||||
),
|
||||
wantWithClause: true,
|
||||
description: "Event with p-tag first still needs WITH clause before e-tag's OPTIONAL MATCH",
|
||||
},
|
||||
{
|
||||
name: "MixedTags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("t", "nostr"),
|
||||
tag.NewFromAny("e", "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000005"),
|
||||
tag.NewFromAny("r", "https://example.com"),
|
||||
),
|
||||
wantWithClause: true,
|
||||
description: "Mixed tags with e-tag requires WITH clause",
|
||||
},
|
||||
{
|
||||
name: "OnlyGenericTags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("t", "bitcoin"),
|
||||
tag.NewFromAny("r", "wss://relay.example.com"),
|
||||
tag.NewFromAny("d", "identifier"),
|
||||
),
|
||||
wantWithClause: false,
|
||||
description: "Generic tags (MERGE) don't require WITH clause",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte(fmt.Sprintf("Test content for %s", tt.name))
|
||||
ev.Tags = tt.tags
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
// Build Cypher query
|
||||
cypher, params := db.buildEventCreationCypher(ev, 1)
|
||||
|
||||
// Check if WITH clause is present
|
||||
hasWithClause := strings.Contains(cypher, "WITH e, a")
|
||||
|
||||
if tt.wantWithClause && !hasWithClause {
|
||||
t.Errorf("%s: expected WITH clause but none found.\nCypher:\n%s", tt.description, cypher)
|
||||
}
|
||||
if !tt.wantWithClause && hasWithClause {
|
||||
t.Errorf("%s: unexpected WITH clause found.\nCypher:\n%s", tt.description, cypher)
|
||||
}
|
||||
|
||||
// Verify Cypher syntax by executing it against Neo4j
|
||||
// This is the key test - invalid Cypher will fail here
|
||||
_, err := db.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cypher query failed (invalid syntax): %v\nCypher:\n%s", tt.description, err, cypher)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSaveEvent_ETagReference tests that events with e-tags are saved correctly
|
||||
// and the REFERENCES relationships are created when the referenced event exists.
|
||||
func TestSaveEvent_ETagReference(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Generate keypairs
|
||||
alice, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := alice.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
bob, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := bob.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create a root event from Alice
|
||||
rootEvent := event.New()
|
||||
rootEvent.Pubkey = alice.Pub()
|
||||
rootEvent.CreatedAt = timestamp.Now().V
|
||||
rootEvent.Kind = 1
|
||||
rootEvent.Content = []byte("This is the root event")
|
||||
|
||||
if err := rootEvent.Sign(alice); err != nil {
|
||||
t.Fatalf("Failed to sign root event: %v", err)
|
||||
}
|
||||
|
||||
// Save root event
|
||||
exists, err := db.SaveEvent(ctx, rootEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save root event: %v", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Root event should not exist yet")
|
||||
}
|
||||
|
||||
rootEventID := hex.Enc(rootEvent.ID[:])
|
||||
|
||||
// Create a reply from Bob that references the root event
|
||||
replyEvent := event.New()
|
||||
replyEvent.Pubkey = bob.Pub()
|
||||
replyEvent.CreatedAt = timestamp.Now().V + 1
|
||||
replyEvent.Kind = 1
|
||||
replyEvent.Content = []byte("This is a reply to Alice")
|
||||
replyEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", rootEventID, "", "root"),
|
||||
tag.NewFromAny("p", hex.Enc(alice.Pub())),
|
||||
)
|
||||
|
||||
if err := replyEvent.Sign(bob); err != nil {
|
||||
t.Fatalf("Failed to sign reply event: %v", err)
|
||||
}
|
||||
|
||||
// Save reply event - this exercises the WITH clause fix
|
||||
exists, err = db.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reply event: %v", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Reply event should not exist yet")
|
||||
}
|
||||
|
||||
// Verify REFERENCES relationship was created
|
||||
cypher := `
|
||||
MATCH (reply:Event {id: $replyId})-[:REFERENCES]->(root:Event {id: $rootId})
|
||||
RETURN reply.id AS replyId, root.id AS rootId
|
||||
`
|
||||
params := map[string]any{
|
||||
"replyId": hex.Enc(replyEvent.ID[:]),
|
||||
"rootId": rootEventID,
|
||||
}
|
||||
|
||||
result, err := db.ExecuteRead(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query REFERENCES relationship: %v", err)
|
||||
}
|
||||
|
||||
if !result.Next(ctx) {
|
||||
t.Error("Expected REFERENCES relationship between reply and root events")
|
||||
} else {
|
||||
record := result.Record()
|
||||
returnedReplyId := record.Values[0].(string)
|
||||
returnedRootId := record.Values[1].(string)
|
||||
t.Logf("✓ REFERENCES relationship verified: %s -> %s", returnedReplyId[:8], returnedRootId[:8])
|
||||
}
|
||||
|
||||
// Verify MENTIONS relationship was also created for the p-tag
|
||||
mentionsCypher := `
|
||||
MATCH (reply:Event {id: $replyId})-[:MENTIONS]->(author:Author {pubkey: $authorPubkey})
|
||||
RETURN author.pubkey AS pubkey
|
||||
`
|
||||
mentionsParams := map[string]any{
|
||||
"replyId": hex.Enc(replyEvent.ID[:]),
|
||||
"authorPubkey": hex.Enc(alice.Pub()),
|
||||
}
|
||||
|
||||
mentionsResult, err := db.ExecuteRead(ctx, mentionsCypher, mentionsParams)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query MENTIONS relationship: %v", err)
|
||||
}
|
||||
|
||||
if !mentionsResult.Next(ctx) {
|
||||
t.Error("Expected MENTIONS relationship for p-tag")
|
||||
} else {
|
||||
t.Logf("✓ MENTIONS relationship verified")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSaveEvent_ETagMissingReference tests that e-tags to non-existent events
|
||||
// don't create broken relationships (OPTIONAL MATCH handles this gracefully).
|
||||
func TestSaveEvent_ETagMissingReference(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create an event that references a non-existent event
|
||||
nonExistentEventID := "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"
|
||||
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Reply to ghost event")
|
||||
ev.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", nonExistentEventID, "", "reply"),
|
||||
)
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
// Save should succeed (OPTIONAL MATCH handles missing reference)
|
||||
exists, err := db.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save event with missing reference: %v", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Event should not exist yet")
|
||||
}
|
||||
|
||||
// Verify event was saved
|
||||
checkCypher := "MATCH (e:Event {id: $id}) RETURN e.id AS id"
|
||||
checkParams := map[string]any{"id": hex.Enc(ev.ID[:])}
|
||||
|
||||
result, err := db.ExecuteRead(ctx, checkCypher, checkParams)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check event: %v", err)
|
||||
}
|
||||
|
||||
if !result.Next(ctx) {
|
||||
t.Error("Event should have been saved despite missing reference")
|
||||
}
|
||||
|
||||
// Verify no REFERENCES relationship was created (as the target doesn't exist)
|
||||
refCypher := `
|
||||
MATCH (e:Event {id: $eventId})-[:REFERENCES]->(ref:Event)
|
||||
RETURN count(ref) AS refCount
|
||||
`
|
||||
refParams := map[string]any{"eventId": hex.Enc(ev.ID[:])}
|
||||
|
||||
refResult, err := db.ExecuteRead(ctx, refCypher, refParams)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check references: %v", err)
|
||||
}
|
||||
|
||||
if refResult.Next(ctx) {
|
||||
count := refResult.Record().Values[0].(int64)
|
||||
if count > 0 {
|
||||
t.Errorf("Expected no REFERENCES relationship for non-existent event, got %d", count)
|
||||
} else {
|
||||
t.Logf("✓ Correctly handled missing reference (no relationship created)")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSaveEvent_MultipleETags tests events with multiple e-tags.
|
||||
func TestSaveEvent_MultipleETags(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create three events first
|
||||
var eventIDs []string
|
||||
for i := 0; i < 3; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V + int64(i)
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte(fmt.Sprintf("Event %d", i))
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event %d: %v", i, err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event %d: %v", i, err)
|
||||
}
|
||||
|
||||
eventIDs = append(eventIDs, hex.Enc(ev.ID[:]))
|
||||
}
|
||||
|
||||
// Create an event that references all three
|
||||
replyEvent := event.New()
|
||||
replyEvent.Pubkey = signer.Pub()
|
||||
replyEvent.CreatedAt = timestamp.Now().V + 10
|
||||
replyEvent.Kind = 1
|
||||
replyEvent.Content = []byte("This references multiple events")
|
||||
replyEvent.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", eventIDs[0], "", "root"),
|
||||
tag.NewFromAny("e", eventIDs[1], "", "reply"),
|
||||
tag.NewFromAny("e", eventIDs[2], "", "mention"),
|
||||
)
|
||||
|
||||
if err := replyEvent.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign reply event: %v", err)
|
||||
}
|
||||
|
||||
// Save reply event - tests multiple OPTIONAL MATCH statements after WITH
|
||||
exists, err := db.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save multi-reference event: %v", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Reply event should not exist yet")
|
||||
}
|
||||
|
||||
// Verify all REFERENCES relationships were created
|
||||
cypher := `
|
||||
MATCH (reply:Event {id: $replyId})-[:REFERENCES]->(ref:Event)
|
||||
RETURN ref.id AS refId
|
||||
`
|
||||
params := map[string]any{"replyId": hex.Enc(replyEvent.ID[:])}
|
||||
|
||||
result, err := db.ExecuteRead(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query REFERENCES relationships: %v", err)
|
||||
}
|
||||
|
||||
referencedIDs := make(map[string]bool)
|
||||
for result.Next(ctx) {
|
||||
refID := result.Record().Values[0].(string)
|
||||
referencedIDs[refID] = true
|
||||
}
|
||||
|
||||
if len(referencedIDs) != 3 {
|
||||
t.Errorf("Expected 3 REFERENCES relationships, got %d", len(referencedIDs))
|
||||
}
|
||||
|
||||
for i, id := range eventIDs {
|
||||
if !referencedIDs[id] {
|
||||
t.Errorf("Missing REFERENCES relationship to event %d (%s)", i, id[:8])
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ All %d REFERENCES relationships created successfully", len(referencedIDs))
|
||||
}
|
||||
|
||||
// TestBuildEventCreationCypher_CypherSyntaxValidation validates the generated Cypher
|
||||
// is syntactically correct for all edge cases.
|
||||
func TestBuildEventCreationCypher_CypherSyntaxValidation(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Test many e-tags to ensure WITH clause is added only once
|
||||
manyETags := tag.NewS()
|
||||
for i := 0; i < 10; i++ {
|
||||
manyETags.Append(tag.NewFromAny("e", fmt.Sprintf("%064x", i)))
|
||||
}
|
||||
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event with many e-tags")
|
||||
ev.Tags = manyETags
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
cypher, _ := db.buildEventCreationCypher(ev, 1)
|
||||
|
||||
// Count occurrences of WITH clause - should be exactly 1
|
||||
withCount := strings.Count(cypher, "WITH e, a")
|
||||
if withCount != 1 {
|
||||
t.Errorf("Expected exactly 1 WITH clause, found %d\nCypher:\n%s", withCount, cypher)
|
||||
}
|
||||
|
||||
// Count OPTIONAL MATCH statements - should equal number of e-tags
|
||||
optionalMatchCount := strings.Count(cypher, "OPTIONAL MATCH")
|
||||
if optionalMatchCount != 10 {
|
||||
t.Errorf("Expected 10 OPTIONAL MATCH statements, found %d", optionalMatchCount)
|
||||
}
|
||||
|
||||
t.Logf("✓ WITH clause correctly added once, followed by %d OPTIONAL MATCH statements", optionalMatchCount)
|
||||
}
|
||||
@@ -125,6 +125,10 @@ func (n *N) applySchema(ctx context.Context) error {
|
||||
// Used for cursor-based pagination and sync operations
|
||||
"CREATE INDEX event_serial IF NOT EXISTS FOR (e:Event) ON (e.serial)",
|
||||
|
||||
// OPTIONAL (NIP-40): Event.expiration for expired event cleanup
|
||||
// Used by DeleteExpired to efficiently find events past their expiration time
|
||||
"CREATE INDEX event_expiration IF NOT EXISTS FOR (e:Event) ON (e.expiration)",
|
||||
|
||||
// ============================================================
|
||||
// === OPTIONAL: Social Graph Event Processing Indexes ===
|
||||
// Support tracking of processed social events for graph updates
|
||||
@@ -230,6 +234,7 @@ func (n *N) dropAll(ctx context.Context) error {
|
||||
|
||||
// OPTIONAL (Internal) indexes
|
||||
"DROP INDEX event_serial IF EXISTS",
|
||||
"DROP INDEX event_expiration IF EXISTS",
|
||||
|
||||
// OPTIONAL (Social Graph) indexes
|
||||
"DROP INDEX processedSocialEvent_pubkey_kind IF EXISTS",
|
||||
|
||||
@@ -236,6 +236,7 @@ func (p *SocialEventProcessor) processReport(ctx context.Context, ev *event.E) e
|
||||
}
|
||||
|
||||
// Create REPORTS relationship
|
||||
// Note: WITH is required between CREATE and MERGE in Cypher
|
||||
cypher := `
|
||||
// Create event tracking node
|
||||
CREATE (evt:ProcessedSocialEvent {
|
||||
@@ -248,6 +249,9 @@ func (p *SocialEventProcessor) processReport(ctx context.Context, ev *event.E) e
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// WITH required to transition from CREATE to MERGE
|
||||
WITH evt
|
||||
|
||||
// Create or get reporter and reported users
|
||||
MERGE (reporter:NostrUser {pubkey: $reporter_pubkey})
|
||||
MERGE (reported:NostrUser {pubkey: $reported_pubkey})
|
||||
@@ -293,12 +297,15 @@ type UpdateContactListParams struct {
|
||||
|
||||
// updateContactListGraph performs atomic graph update for contact list changes
|
||||
func (p *SocialEventProcessor) updateContactListGraph(ctx context.Context, params UpdateContactListParams) error {
|
||||
// Note: WITH is required between CREATE and MERGE in Cypher
|
||||
cypher := `
|
||||
// Mark old event as superseded (if exists)
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
|
||||
// Create new event tracking node
|
||||
// WITH required after OPTIONAL MATCH + SET before CREATE
|
||||
WITH old
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 3,
|
||||
@@ -309,6 +316,9 @@ func (p *SocialEventProcessor) updateContactListGraph(ctx context.Context, param
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// WITH required to transition from CREATE to MERGE
|
||||
WITH new
|
||||
|
||||
// Get or create author node
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
|
||||
@@ -369,12 +379,15 @@ type UpdateMuteListParams struct {
|
||||
|
||||
// updateMuteListGraph performs atomic graph update for mute list changes
|
||||
func (p *SocialEventProcessor) updateMuteListGraph(ctx context.Context, params UpdateMuteListParams) error {
|
||||
// Note: WITH is required between CREATE and MERGE in Cypher
|
||||
cypher := `
|
||||
// Mark old event as superseded (if exists)
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
|
||||
// Create new event tracking node
|
||||
// WITH required after OPTIONAL MATCH + SET before CREATE
|
||||
WITH old
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 10000,
|
||||
@@ -385,6 +398,9 @@ func (p *SocialEventProcessor) updateMuteListGraph(ctx context.Context, params U
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// WITH required to transition from CREATE to MERGE
|
||||
WITH new
|
||||
|
||||
// Get or create author node
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
|
||||
|
||||
436
pkg/neo4j/subscriptions_test.go
Normal file
436
pkg/neo4j/subscriptions_test.go
Normal file
@@ -0,0 +1,436 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
func TestSubscriptions_AddAndRemove(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Create a subscription
|
||||
subID := "test-sub-123"
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
}
|
||||
|
||||
// Add subscription
|
||||
db.AddSubscription(subID, f)
|
||||
|
||||
// Get subscription count (should be 1)
|
||||
count := db.GetSubscriptionCount()
|
||||
if count != 1 {
|
||||
t.Fatalf("Expected 1 subscription, got %d", count)
|
||||
}
|
||||
|
||||
// Remove subscription
|
||||
db.RemoveSubscription(subID)
|
||||
|
||||
// Get subscription count (should be 0)
|
||||
count = db.GetSubscriptionCount()
|
||||
if count != 0 {
|
||||
t.Fatalf("Expected 0 subscriptions after removal, got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Subscription add/remove works correctly")
|
||||
}
|
||||
|
||||
func TestSubscriptions_MultipleSubscriptions(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Add multiple subscriptions
|
||||
for i := 0; i < 5; i++ {
|
||||
subID := string(rune('A' + i))
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(uint16(i + 1))),
|
||||
}
|
||||
db.AddSubscription(subID, f)
|
||||
}
|
||||
|
||||
// Get subscription count
|
||||
count := db.GetSubscriptionCount()
|
||||
if count != 5 {
|
||||
t.Fatalf("Expected 5 subscriptions, got %d", count)
|
||||
}
|
||||
|
||||
// Remove some subscriptions
|
||||
db.RemoveSubscription("A")
|
||||
db.RemoveSubscription("C")
|
||||
|
||||
count = db.GetSubscriptionCount()
|
||||
if count != 3 {
|
||||
t.Fatalf("Expected 3 subscriptions after removal, got %d", count)
|
||||
}
|
||||
|
||||
// Clear all subscriptions
|
||||
db.ClearSubscriptions()
|
||||
|
||||
count = db.GetSubscriptionCount()
|
||||
if count != 0 {
|
||||
t.Fatalf("Expected 0 subscriptions after clear, got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Multiple subscriptions managed correctly")
|
||||
}
|
||||
|
||||
func TestSubscriptions_DuplicateID(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
subID := "duplicate-test"
|
||||
|
||||
// Add first subscription
|
||||
f1 := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
}
|
||||
db.AddSubscription(subID, f1)
|
||||
|
||||
// Add subscription with same ID (should replace)
|
||||
f2 := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(7)),
|
||||
}
|
||||
db.AddSubscription(subID, f2)
|
||||
|
||||
// Should still have only 1 subscription
|
||||
count := db.GetSubscriptionCount()
|
||||
if count != 1 {
|
||||
t.Fatalf("Expected 1 subscription (duplicate replaced), got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Duplicate subscription ID handling works correctly")
|
||||
}
|
||||
|
||||
func TestSubscriptions_RemoveNonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to remove non-existent subscription (should not panic)
|
||||
db.RemoveSubscription("non-existent")
|
||||
|
||||
// Should still have 0 subscriptions
|
||||
count := db.GetSubscriptionCount()
|
||||
if count != 0 {
|
||||
t.Fatalf("Expected 0 subscriptions, got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Removing non-existent subscription handled gracefully")
|
||||
}
|
||||
|
||||
func TestMarkers_SetGetDelete(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Set a marker
|
||||
key := "test-marker"
|
||||
value := []byte("test-value-123")
|
||||
if err := db.SetMarker(key, value); err != nil {
|
||||
t.Fatalf("Failed to set marker: %v", err)
|
||||
}
|
||||
|
||||
// Get the marker
|
||||
retrieved, err := db.GetMarker(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get marker: %v", err)
|
||||
}
|
||||
if string(retrieved) != string(value) {
|
||||
t.Fatalf("Marker value mismatch: got %s, expected %s", string(retrieved), string(value))
|
||||
}
|
||||
|
||||
// Update the marker
|
||||
newValue := []byte("updated-value")
|
||||
if err := db.SetMarker(key, newValue); err != nil {
|
||||
t.Fatalf("Failed to update marker: %v", err)
|
||||
}
|
||||
|
||||
retrieved, err = db.GetMarker(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get updated marker: %v", err)
|
||||
}
|
||||
if string(retrieved) != string(newValue) {
|
||||
t.Fatalf("Updated marker value mismatch")
|
||||
}
|
||||
|
||||
// Delete the marker
|
||||
if err := db.DeleteMarker(key); err != nil {
|
||||
t.Fatalf("Failed to delete marker: %v", err)
|
||||
}
|
||||
|
||||
// Verify marker is deleted
|
||||
_, err = db.GetMarker(key)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when getting deleted marker")
|
||||
}
|
||||
|
||||
t.Logf("✓ Markers set/get/delete works correctly")
|
||||
}
|
||||
|
||||
func TestMarkers_GetNonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to get non-existent marker
|
||||
_, err = db.GetMarker("non-existent-marker")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when getting non-existent marker")
|
||||
}
|
||||
|
||||
t.Logf("✓ Getting non-existent marker returns error as expected")
|
||||
}
|
||||
|
||||
func TestSerial_GetNextSerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Get first serial
|
||||
serial1, err := db.getNextSerial()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get first serial: %v", err)
|
||||
}
|
||||
|
||||
// Get second serial
|
||||
serial2, err := db.getNextSerial()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get second serial: %v", err)
|
||||
}
|
||||
|
||||
// Serial should increment
|
||||
if serial2 <= serial1 {
|
||||
t.Fatalf("Expected serial to increment: serial1=%d, serial2=%d", serial1, serial2)
|
||||
}
|
||||
|
||||
// Get multiple more serials and verify they're all unique and increasing
|
||||
var serials []uint64
|
||||
for i := 0; i < 10; i++ {
|
||||
s, err := db.getNextSerial()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial %d: %v", i, err)
|
||||
}
|
||||
serials = append(serials, s)
|
||||
}
|
||||
|
||||
for i := 1; i < len(serials); i++ {
|
||||
if serials[i] <= serials[i-1] {
|
||||
t.Fatalf("Serials not increasing: %d <= %d", serials[i], serials[i-1])
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ Serial generation works correctly (generated %d unique serials)", len(serials)+2)
|
||||
}
|
||||
|
||||
func TestDatabaseReady(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Wait for ready
|
||||
<-db.Ready()
|
||||
|
||||
// Database should be ready now
|
||||
t.Logf("✓ Database ready signal works correctly")
|
||||
}
|
||||
|
||||
func TestIdentity(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Get identity (creates if not exists)
|
||||
signer := db.Identity()
|
||||
if signer == nil {
|
||||
t.Fatal("Expected non-nil signer from Identity()")
|
||||
}
|
||||
|
||||
// Get identity again (should return same one)
|
||||
signer2 := db.Identity()
|
||||
if signer2 == nil {
|
||||
t.Fatal("Expected non-nil signer from second Identity() call")
|
||||
}
|
||||
|
||||
// Public keys should match
|
||||
pub1 := signer.Pub()
|
||||
pub2 := signer2.Pub()
|
||||
for i := range pub1 {
|
||||
if pub1[i] != pub2[i] {
|
||||
t.Fatal("Identity pubkeys don't match across calls")
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ Identity persistence works correctly")
|
||||
}
|
||||
|
||||
func TestWipe(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
|
||||
// Add some data
|
||||
if err := db.AddNIP43Member(signer.Pub(), "test"); err != nil {
|
||||
t.Fatalf("Failed to add member: %v", err)
|
||||
}
|
||||
|
||||
// Wipe the database
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Verify data is gone
|
||||
isMember, _ := db.IsNIP43Member(signer.Pub())
|
||||
if isMember {
|
||||
t.Fatal("Expected data to be wiped")
|
||||
}
|
||||
|
||||
t.Logf("✓ Wipe clears database correctly")
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
v0.31.9
|
||||
v0.32.0
|
||||
614
pkg/wasmdb/delete-event.go
Normal file
614
pkg/wasmdb/delete-event.go
Normal file
@@ -0,0 +1,614 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aperturerobotics/go-indexeddb/idb"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
hexenc "git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/ints"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag/atag"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/interfaces/store"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// DeleteEvent removes an event from the database identified by `eid`.
|
||||
func (w *W) DeleteEvent(c context.Context, eid []byte) (err error) {
|
||||
w.Logger.Warnf("deleting event %0x", eid)
|
||||
|
||||
// Get the serial number for the event ID
|
||||
var ser *types.Uint40
|
||||
ser, err = w.GetSerialById(eid)
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
if ser == nil {
|
||||
// Event wasn't found, nothing to delete
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch the event to get its data
|
||||
var ev *event.E
|
||||
ev, err = w.FetchEventBySerial(ser)
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
if ev == nil {
|
||||
// Event wasn't found, nothing to delete
|
||||
return
|
||||
}
|
||||
|
||||
if err = w.DeleteEventBySerial(c, ser, ev); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteEventBySerial removes an event and all its indexes by serial number.
|
||||
func (w *W) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.E) (err error) {
|
||||
w.Logger.Infof("DeleteEventBySerial: deleting event %0x (serial %d)", ev.ID, ser.Get())
|
||||
|
||||
// Get all indexes for the event
|
||||
var idxs [][]byte
|
||||
idxs, err = database.GetIndexesForEvent(ev, ser.Get())
|
||||
if chk.E(err) {
|
||||
w.Logger.Errorf("DeleteEventBySerial: failed to get indexes for event %0x: %v", ev.ID, err)
|
||||
return
|
||||
}
|
||||
w.Logger.Infof("DeleteEventBySerial: found %d indexes for event %0x", len(idxs), ev.ID)
|
||||
|
||||
// Collect all unique store names we need to access
|
||||
storeNames := make(map[string]struct{})
|
||||
for _, key := range idxs {
|
||||
if len(key) >= 3 {
|
||||
storeNames[string(key[:3])] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Also include event stores
|
||||
storeNames[string(indexes.EventPrefix)] = struct{}{}
|
||||
storeNames[string(indexes.SmallEventPrefix)] = struct{}{}
|
||||
|
||||
// Convert to slice
|
||||
storeList := make([]string, 0, len(storeNames))
|
||||
for name := range storeNames {
|
||||
storeList = append(storeList, name)
|
||||
}
|
||||
|
||||
if len(storeList) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start a transaction to delete the event and all its indexes
|
||||
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeList[0], storeList[1:]...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start delete transaction: %w", err)
|
||||
}
|
||||
|
||||
// Delete all indexes
|
||||
for _, key := range idxs {
|
||||
if len(key) < 3 {
|
||||
continue
|
||||
}
|
||||
storeName := string(key[:3])
|
||||
objStore, storeErr := tx.ObjectStore(storeName)
|
||||
if storeErr != nil {
|
||||
w.Logger.Warnf("DeleteEventBySerial: failed to get object store %s: %v", storeName, storeErr)
|
||||
continue
|
||||
}
|
||||
|
||||
keyJS := bytesToSafeValue(key)
|
||||
if _, delErr := objStore.Delete(keyJS); delErr != nil {
|
||||
w.Logger.Warnf("DeleteEventBySerial: failed to delete index from %s: %v", storeName, delErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Delete from small event store
|
||||
sevKeyBuf := new(bytes.Buffer)
|
||||
if err = indexes.SmallEventEnc(ser).MarshalWrite(sevKeyBuf); err == nil {
|
||||
if objStore, storeErr := tx.ObjectStore(string(indexes.SmallEventPrefix)); storeErr == nil {
|
||||
// For small events, the key includes size and data, so we need to scan
|
||||
w.deleteKeysByPrefix(objStore, sevKeyBuf.Bytes())
|
||||
}
|
||||
}
|
||||
|
||||
// Delete from large event store
|
||||
evtKeyBuf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(evtKeyBuf); err == nil {
|
||||
if objStore, storeErr := tx.ObjectStore(string(indexes.EventPrefix)); storeErr == nil {
|
||||
keyJS := bytesToSafeValue(evtKeyBuf.Bytes())
|
||||
objStore.Delete(keyJS)
|
||||
}
|
||||
}
|
||||
|
||||
// Commit transaction
|
||||
if err = tx.Await(c); err != nil {
|
||||
return fmt.Errorf("failed to commit delete transaction: %w", err)
|
||||
}
|
||||
|
||||
w.Logger.Infof("DeleteEventBySerial: successfully deleted event %0x and all indexes", ev.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteKeysByPrefix deletes all keys starting with the given prefix from an object store
|
||||
func (w *W) deleteKeysByPrefix(store *idb.ObjectStore, prefix []byte) {
|
||||
cursorReq, err := store.OpenCursor(idb.CursorNext)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var keysToDelete [][]byte
|
||||
cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
|
||||
keyBytes := safeValueToBytes(keyVal)
|
||||
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
|
||||
keysToDelete = append(keysToDelete, keyBytes)
|
||||
}
|
||||
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
// Delete collected keys
|
||||
for _, key := range keysToDelete {
|
||||
keyJS := bytesToSafeValue(key)
|
||||
store.Delete(keyJS)
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteExpired scans for events with expiration timestamps that have passed and deletes them.
|
||||
func (w *W) DeleteExpired() {
|
||||
now := time.Now().Unix()
|
||||
|
||||
// Open read transaction to find expired events
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.ExpirationPrefix))
|
||||
if err != nil {
|
||||
w.Logger.Warnf("DeleteExpired: failed to start transaction: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
objStore, err := tx.ObjectStore(string(indexes.ExpirationPrefix))
|
||||
if err != nil {
|
||||
w.Logger.Warnf("DeleteExpired: failed to get expiration store: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var expiredSerials types.Uint40s
|
||||
|
||||
cursorReq, err := objStore.OpenCursor(idb.CursorNext)
|
||||
if err != nil {
|
||||
w.Logger.Warnf("DeleteExpired: failed to open cursor: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
|
||||
keyBytes := safeValueToBytes(keyVal)
|
||||
if len(keyBytes) < 8 { // exp prefix (3) + expiration (variable) + serial (5)
|
||||
return cursor.Continue()
|
||||
}
|
||||
|
||||
// Parse expiration key: exp|expiration_timestamp|serial
|
||||
exp, ser := indexes.ExpirationVars()
|
||||
buf := bytes.NewBuffer(keyBytes)
|
||||
if err := indexes.ExpirationDec(exp, ser).UnmarshalRead(buf); err != nil {
|
||||
return cursor.Continue()
|
||||
}
|
||||
|
||||
if int64(exp.Get()) > now {
|
||||
// Not expired yet
|
||||
return cursor.Continue()
|
||||
}
|
||||
|
||||
expiredSerials = append(expiredSerials, ser)
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
// Delete expired events
|
||||
for _, ser := range expiredSerials {
|
||||
ev, fetchErr := w.FetchEventBySerial(ser)
|
||||
if fetchErr != nil || ev == nil {
|
||||
continue
|
||||
}
|
||||
if err := w.DeleteEventBySerial(context.Background(), ser, ev); err != nil {
|
||||
w.Logger.Warnf("DeleteExpired: failed to delete expired event: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessDelete processes a kind 5 deletion event, deleting referenced events.
|
||||
func (w *W) ProcessDelete(ev *event.E, admins [][]byte) (err error) {
|
||||
eTags := ev.Tags.GetAll([]byte("e"))
|
||||
aTags := ev.Tags.GetAll([]byte("a"))
|
||||
kTags := ev.Tags.GetAll([]byte("k"))
|
||||
|
||||
// Process e-tags: delete specific events by ID
|
||||
for _, eTag := range eTags {
|
||||
if eTag.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
// Use ValueHex() to handle both binary and hex storage formats
|
||||
eventIdHex := eTag.ValueHex()
|
||||
if len(eventIdHex) != 64 { // hex encoded event ID
|
||||
continue
|
||||
}
|
||||
// Decode hex event ID
|
||||
var eid []byte
|
||||
if eid, err = hexenc.DecAppend(nil, eventIdHex); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Fetch the event to verify ownership
|
||||
var ser *types.Uint40
|
||||
if ser, err = w.GetSerialById(eid); chk.E(err) || ser == nil {
|
||||
continue
|
||||
}
|
||||
var targetEv *event.E
|
||||
if targetEv, err = w.FetchEventBySerial(ser); chk.E(err) || targetEv == nil {
|
||||
continue
|
||||
}
|
||||
// Only allow users to delete their own events
|
||||
if !utils.FastEqual(targetEv.Pubkey, ev.Pubkey) {
|
||||
continue
|
||||
}
|
||||
// Delete the event
|
||||
if err = w.DeleteEvent(context.Background(), eid); chk.E(err) {
|
||||
w.Logger.Warnf("failed to delete event %x via e-tag: %v", eid, err)
|
||||
continue
|
||||
}
|
||||
w.Logger.Debugf("deleted event %x via e-tag deletion", eid)
|
||||
}
|
||||
|
||||
// Process a-tags: delete addressable events by kind:pubkey:d-tag
|
||||
for _, aTag := range aTags {
|
||||
if aTag.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
// Parse the 'a' tag value: kind:pubkey:d-tag (for parameterized) or kind:pubkey (for regular)
|
||||
split := bytes.Split(aTag.Value(), []byte{':'})
|
||||
if len(split) < 2 {
|
||||
continue
|
||||
}
|
||||
// Parse the kind
|
||||
kindStr := string(split[0])
|
||||
kindInt, parseErr := strconv.Atoi(kindStr)
|
||||
if parseErr != nil {
|
||||
continue
|
||||
}
|
||||
kk := kind.New(uint16(kindInt))
|
||||
// Parse the pubkey
|
||||
var pk []byte
|
||||
if pk, err = hexenc.DecAppend(nil, split[1]); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Only allow users to delete their own events
|
||||
if !utils.FastEqual(pk, ev.Pubkey) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Build filter for events to delete
|
||||
delFilter := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(pk),
|
||||
Kinds: kind.NewS(kk),
|
||||
}
|
||||
|
||||
// For parameterized replaceable events, add d-tag filter
|
||||
if kind.IsParameterizedReplaceable(kk.K) && len(split) >= 3 {
|
||||
dValue := split[2]
|
||||
delFilter.Tags = tag.NewS(tag.NewFromAny([]byte("d"), dValue))
|
||||
}
|
||||
|
||||
// Find matching events
|
||||
var idxs []database.Range
|
||||
if idxs, err = database.GetIndexesFromFilter(delFilter); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
var sers types.Uint40s
|
||||
for _, idx := range idxs {
|
||||
var s types.Uint40s
|
||||
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
sers = append(sers, s...)
|
||||
}
|
||||
|
||||
// Delete events older than the deletion event
|
||||
if len(sers) > 0 {
|
||||
var idPkTss []*store.IdPkTs
|
||||
var tmp []*store.IdPkTs
|
||||
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
idPkTss = append(idPkTss, tmp...)
|
||||
// Sort by timestamp
|
||||
sort.Slice(idPkTss, func(i, j int) bool {
|
||||
return idPkTss[i].Ts > idPkTss[j].Ts
|
||||
})
|
||||
for _, v := range idPkTss {
|
||||
if v.Ts < ev.CreatedAt {
|
||||
if err = w.DeleteEvent(context.Background(), v.Id); chk.E(err) {
|
||||
w.Logger.Warnf("failed to delete event %x via a-tag: %v", v.Id, err)
|
||||
continue
|
||||
}
|
||||
w.Logger.Debugf("deleted event %x via a-tag deletion", v.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If there are no e or a tags, delete all replaceable events of the kinds
|
||||
// specified by the k tags for the pubkey of the delete event.
|
||||
if len(eTags) == 0 && len(aTags) == 0 {
|
||||
// Parse the kind tags
|
||||
var kinds []*kind.K
|
||||
for _, k := range kTags {
|
||||
kv := k.Value()
|
||||
iv := ints.New(0)
|
||||
if _, err = iv.Unmarshal(kv); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
kinds = append(kinds, kind.New(iv.N))
|
||||
}
|
||||
|
||||
var idxs []database.Range
|
||||
if idxs, err = database.GetIndexesFromFilter(
|
||||
&filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kinds...),
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
var sers types.Uint40s
|
||||
for _, idx := range idxs {
|
||||
var s types.Uint40s
|
||||
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sers = append(sers, s...)
|
||||
}
|
||||
|
||||
if len(sers) > 0 {
|
||||
var idPkTss []*store.IdPkTs
|
||||
var tmp []*store.IdPkTs
|
||||
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
|
||||
return
|
||||
}
|
||||
idPkTss = append(idPkTss, tmp...)
|
||||
// Sort by timestamp
|
||||
sort.Slice(idPkTss, func(i, j int) bool {
|
||||
return idPkTss[i].Ts > idPkTss[j].Ts
|
||||
})
|
||||
for _, v := range idPkTss {
|
||||
if v.Ts < ev.CreatedAt {
|
||||
if err = w.DeleteEvent(context.Background(), v.Id); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CheckForDeleted checks if the event has been deleted, and returns an error with
|
||||
// prefix "blocked:" if it is. This function also allows designating admin
|
||||
// pubkeys that may also delete the event.
|
||||
func (w *W) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
|
||||
keys := append([][]byte{ev.Pubkey}, admins...)
|
||||
authors := tag.NewFromBytesSlice(keys...)
|
||||
|
||||
// If the event is addressable, check for a deletion event with the same
|
||||
// kind/pubkey/dtag
|
||||
if kind.IsParameterizedReplaceable(ev.Kind) {
|
||||
var idxs []database.Range
|
||||
// Construct an a-tag
|
||||
t := ev.Tags.GetFirst([]byte("d"))
|
||||
var dTagValue []byte
|
||||
if t != nil {
|
||||
dTagValue = t.Value()
|
||||
}
|
||||
a := atag.T{
|
||||
Kind: kind.New(ev.Kind),
|
||||
Pubkey: ev.Pubkey,
|
||||
DTag: dTagValue,
|
||||
}
|
||||
at := a.Marshal(nil)
|
||||
if idxs, err = database.GetIndexesFromFilter(
|
||||
&filter.F{
|
||||
Authors: authors,
|
||||
Kinds: kind.NewS(kind.Deletion),
|
||||
Tags: tag.NewS(tag.NewFromAny("#a", at)),
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
var sers types.Uint40s
|
||||
for _, idx := range idxs {
|
||||
var s types.Uint40s
|
||||
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sers = append(sers, s...)
|
||||
}
|
||||
|
||||
if len(sers) > 0 {
|
||||
var idPkTss []*store.IdPkTs
|
||||
var tmp []*store.IdPkTs
|
||||
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
|
||||
return
|
||||
}
|
||||
idPkTss = append(idPkTss, tmp...)
|
||||
// Find the newest deletion timestamp
|
||||
maxTs := idPkTss[0].Ts
|
||||
for i := 1; i < len(idPkTss); i++ {
|
||||
if idPkTss[i].Ts > maxTs {
|
||||
maxTs = idPkTss[i].Ts
|
||||
}
|
||||
}
|
||||
if ev.CreatedAt < maxTs {
|
||||
err = errorf.E(
|
||||
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
|
||||
ev.ID, at, ev.CreatedAt, maxTs,
|
||||
)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If the event is replaceable, check if there is a deletion event newer
|
||||
// than the event
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
var idxs []database.Range
|
||||
if idxs, err = database.GetIndexesFromFilter(
|
||||
&filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.Deletion),
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("#k", fmt.Sprint(ev.Kind)),
|
||||
),
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
var sers types.Uint40s
|
||||
for _, idx := range idxs {
|
||||
var s types.Uint40s
|
||||
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sers = append(sers, s...)
|
||||
}
|
||||
|
||||
if len(sers) > 0 {
|
||||
var idPkTss []*store.IdPkTs
|
||||
var tmp []*store.IdPkTs
|
||||
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
|
||||
return
|
||||
}
|
||||
idPkTss = append(idPkTss, tmp...)
|
||||
// Find the newest deletion
|
||||
maxTs := idPkTss[0].Ts
|
||||
maxId := idPkTss[0].Id
|
||||
for i := 1; i < len(idPkTss); i++ {
|
||||
if idPkTss[i].Ts > maxTs {
|
||||
maxTs = idPkTss[i].Ts
|
||||
maxId = idPkTss[i].Id
|
||||
}
|
||||
}
|
||||
if ev.CreatedAt < maxTs {
|
||||
err = fmt.Errorf(
|
||||
"blocked: %0x was deleted: the event is older than the delete event %0x: event: %d delete: %d",
|
||||
ev.ID, maxId, ev.CreatedAt, maxTs,
|
||||
)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// This type of delete can also use an a tag to specify kind and author
|
||||
idxs = nil
|
||||
a := atag.T{
|
||||
Kind: kind.New(ev.Kind),
|
||||
Pubkey: ev.Pubkey,
|
||||
}
|
||||
at := a.Marshal(nil)
|
||||
if idxs, err = database.GetIndexesFromFilter(
|
||||
&filter.F{
|
||||
Authors: authors,
|
||||
Kinds: kind.NewS(kind.Deletion),
|
||||
Tags: tag.NewS(tag.NewFromAny("#a", at)),
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
sers = nil
|
||||
for _, idx := range idxs {
|
||||
var s types.Uint40s
|
||||
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sers = append(sers, s...)
|
||||
}
|
||||
|
||||
if len(sers) > 0 {
|
||||
var idPkTss []*store.IdPkTs
|
||||
var tmp []*store.IdPkTs
|
||||
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
|
||||
return
|
||||
}
|
||||
idPkTss = append(idPkTss, tmp...)
|
||||
// Find the newest deletion
|
||||
maxTs := idPkTss[0].Ts
|
||||
for i := 1; i < len(idPkTss); i++ {
|
||||
if idPkTss[i].Ts > maxTs {
|
||||
maxTs = idPkTss[i].Ts
|
||||
}
|
||||
}
|
||||
if ev.CreatedAt < maxTs {
|
||||
err = errorf.E(
|
||||
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
|
||||
ev.ID, at, ev.CreatedAt, maxTs,
|
||||
)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise check for a delete by event id
|
||||
var idxs []database.Range
|
||||
if idxs, err = database.GetIndexesFromFilter(
|
||||
&filter.F{
|
||||
Authors: authors,
|
||||
Kinds: kind.NewS(kind.Deletion),
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("e", hexenc.Enc(ev.ID)),
|
||||
),
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
for _, idx := range idxs {
|
||||
var s types.Uint40s
|
||||
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if len(s) > 0 {
|
||||
// Any e-tag deletion found means the exact event was deleted
|
||||
err = errorf.E("blocked: %0x has been deleted", ev.ID)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
256
pkg/wasmdb/fetch-event.go
Normal file
256
pkg/wasmdb/fetch-event.go
Normal file
@@ -0,0 +1,256 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
|
||||
"github.com/aperturerobotics/go-indexeddb/idb"
|
||||
"lol.mleku.dev/chk"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/interfaces/store"
|
||||
)
|
||||
|
||||
// FetchEventBySerial retrieves an event by its serial number
|
||||
func (w *W) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||
if ser == nil {
|
||||
return nil, errors.New("nil serial")
|
||||
}
|
||||
|
||||
// First try small event store (sev prefix)
|
||||
ev, err = w.fetchSmallEvent(ser)
|
||||
if err == nil && ev != nil {
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
// Then try large event store (evt prefix)
|
||||
ev, err = w.fetchLargeEvent(ser)
|
||||
if err == nil && ev != nil {
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("event not found")
|
||||
}
|
||||
|
||||
// fetchSmallEvent fetches an event from the small event store
|
||||
func (w *W) fetchSmallEvent(ser *types.Uint40) (*event.E, error) {
|
||||
// Build the key prefix
|
||||
keyBuf := new(bytes.Buffer)
|
||||
if err := indexes.SmallEventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
prefix := keyBuf.Bytes()
|
||||
|
||||
// Open transaction
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.SmallEventPrefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(string(indexes.SmallEventPrefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use cursor to find matching key
|
||||
cursorReq, err := store.OpenCursor(idb.CursorNext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var foundEvent *event.E
|
||||
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
|
||||
keyBytes := safeValueToBytes(keyVal)
|
||||
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
|
||||
// Found matching key
|
||||
// Format: sev|serial(5)|size(2)|data(variable)
|
||||
if len(keyBytes) > 10 { // 3 + 5 + 2 = 10 minimum
|
||||
sizeOffset := 8 // 3 prefix + 5 serial
|
||||
if len(keyBytes) > sizeOffset+2 {
|
||||
size := int(keyBytes[sizeOffset])<<8 | int(keyBytes[sizeOffset+1])
|
||||
dataStart := sizeOffset + 2
|
||||
if len(keyBytes) >= dataStart+size {
|
||||
eventData := keyBytes[dataStart : dataStart+size]
|
||||
ev := new(event.E)
|
||||
if unmarshalErr := ev.UnmarshalBinary(bytes.NewReader(eventData)); unmarshalErr == nil {
|
||||
foundEvent = ev
|
||||
return errors.New("found") // Stop iteration
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
if foundEvent != nil {
|
||||
return foundEvent, nil
|
||||
}
|
||||
if err != nil && err.Error() != "found" {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.New("small event not found")
|
||||
}
|
||||
|
||||
// fetchLargeEvent fetches an event from the large event store
|
||||
func (w *W) fetchLargeEvent(ser *types.Uint40) (*event.E, error) {
|
||||
// Build the key
|
||||
keyBuf := new(bytes.Buffer)
|
||||
if err := indexes.EventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Open transaction
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.EventPrefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(string(indexes.EventPrefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the value directly
|
||||
keyJS := bytesToSafeValue(keyBuf.Bytes())
|
||||
req, err := store.Get(keyJS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
val, err := req.Await(w.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if val.IsUndefined() || val.IsNull() {
|
||||
return nil, errors.New("large event not found")
|
||||
}
|
||||
|
||||
eventData := safeValueToBytes(val)
|
||||
if len(eventData) == 0 {
|
||||
return nil, errors.New("empty event data")
|
||||
}
|
||||
|
||||
ev := new(event.E)
|
||||
if err := ev.UnmarshalBinary(bytes.NewReader(eventData)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ev, nil
|
||||
}
|
||||
|
||||
// FetchEventsBySerials retrieves multiple events by their serial numbers
|
||||
func (w *W) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*event.E, err error) {
|
||||
events = make(map[uint64]*event.E)
|
||||
|
||||
for _, ser := range serials {
|
||||
if ser == nil {
|
||||
continue
|
||||
}
|
||||
ev, fetchErr := w.FetchEventBySerial(ser)
|
||||
if fetchErr == nil && ev != nil {
|
||||
events[ser.Get()] = ev
|
||||
}
|
||||
}
|
||||
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// GetFullIdPubkeyBySerial retrieves the ID, pubkey hash, and timestamp for a serial
|
||||
func (w *W) GetFullIdPubkeyBySerial(ser *types.Uint40) (fidpk *store.IdPkTs, err error) {
|
||||
if ser == nil {
|
||||
return nil, errors.New("nil serial")
|
||||
}
|
||||
|
||||
// Build the prefix to search for
|
||||
keyBuf := new(bytes.Buffer)
|
||||
indexes.FullIdPubkeyEnc(ser, nil, nil, nil).MarshalWrite(keyBuf)
|
||||
prefix := keyBuf.Bytes()[:8] // 3 prefix + 5 serial
|
||||
|
||||
// Search in the fpc object store
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.FullIdPubkeyPrefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objStore, err := tx.ObjectStore(string(indexes.FullIdPubkeyPrefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use cursor to find matching key
|
||||
cursorReq, err := objStore.OpenCursor(idb.CursorNext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
|
||||
keyBytes := safeValueToBytes(keyVal)
|
||||
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
|
||||
// Found matching key
|
||||
// Format: fpc|serial(5)|id(32)|pubkey_hash(8)|timestamp(8)
|
||||
if len(keyBytes) >= 56 { // 3 + 5 + 32 + 8 + 8 = 56
|
||||
fidpk = &store.IdPkTs{
|
||||
Id: make([]byte, 32),
|
||||
Pub: make([]byte, 8),
|
||||
Ts: 0,
|
||||
}
|
||||
copy(fidpk.Id, keyBytes[8:40])
|
||||
copy(fidpk.Pub, keyBytes[40:48])
|
||||
// Parse timestamp (big-endian uint64)
|
||||
var ts int64
|
||||
for i := 0; i < 8; i++ {
|
||||
ts = (ts << 8) | int64(keyBytes[48+i])
|
||||
}
|
||||
fidpk.Ts = ts
|
||||
fidpk.Ser = ser.Get()
|
||||
return errors.New("found") // Stop iteration
|
||||
}
|
||||
}
|
||||
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
if fidpk != nil {
|
||||
return fidpk, nil
|
||||
}
|
||||
if err != nil && err.Error() != "found" {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.New("full id pubkey not found")
|
||||
}
|
||||
|
||||
// GetFullIdPubkeyBySerials retrieves ID/pubkey/timestamp for multiple serials
|
||||
func (w *W) GetFullIdPubkeyBySerials(sers []*types.Uint40) (fidpks []*store.IdPkTs, err error) {
|
||||
fidpks = make([]*store.IdPkTs, 0, len(sers))
|
||||
|
||||
for _, ser := range sers {
|
||||
if ser == nil {
|
||||
continue
|
||||
}
|
||||
fidpk, fetchErr := w.GetFullIdPubkeyBySerial(ser)
|
||||
if fetchErr == nil && fidpk != nil {
|
||||
fidpks = append(fidpks, fidpk)
|
||||
}
|
||||
}
|
||||
|
||||
return fidpks, nil
|
||||
}
|
||||
162
pkg/wasmdb/helpers.go
Normal file
162
pkg/wasmdb/helpers.go
Normal file
@@ -0,0 +1,162 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"syscall/js"
|
||||
|
||||
"github.com/hack-pad/safejs"
|
||||
)
|
||||
|
||||
// safeValueToBytes converts a safejs.Value to a []byte
|
||||
// This handles Uint8Array, ArrayBuffer, and strings from IndexedDB
|
||||
func safeValueToBytes(val safejs.Value) []byte {
|
||||
if val.IsUndefined() || val.IsNull() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get global Uint8Array and ArrayBuffer constructors
|
||||
uint8ArrayType := safejs.MustGetGlobal("Uint8Array")
|
||||
arrayBufferType := safejs.MustGetGlobal("ArrayBuffer")
|
||||
|
||||
// Check if it's a Uint8Array
|
||||
isUint8Array, _ := val.InstanceOf(uint8ArrayType)
|
||||
if isUint8Array {
|
||||
length, err := val.Length()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
buf := make([]byte, length)
|
||||
// Copy bytes - we need to iterate since safejs doesn't have CopyBytesToGo
|
||||
for i := 0; i < length; i++ {
|
||||
elem, err := val.Index(i)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
intVal, err := elem.Int()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
buf[i] = byte(intVal)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// Check if it's an ArrayBuffer
|
||||
isArrayBuffer, _ := val.InstanceOf(arrayBufferType)
|
||||
if isArrayBuffer {
|
||||
// Create a Uint8Array view of the ArrayBuffer
|
||||
uint8Array, err := uint8ArrayType.New(val)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return safeValueToBytes(uint8Array)
|
||||
}
|
||||
|
||||
// Try to treat it as a typed array-like object
|
||||
length, err := val.Length()
|
||||
if err == nil && length > 0 {
|
||||
buf := make([]byte, length)
|
||||
for i := 0; i < length; i++ {
|
||||
elem, err := val.Index(i)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
intVal, err := elem.Int()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
buf[i] = byte(intVal)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
// Last resort: check if it's a string (for string keys in IndexedDB)
|
||||
if val.Type() == safejs.TypeString {
|
||||
str, err := val.String()
|
||||
if err == nil {
|
||||
return []byte(str)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// bytesToSafeValue converts a []byte to a safejs.Value (Uint8Array)
|
||||
func bytesToSafeValue(buf []byte) safejs.Value {
|
||||
if buf == nil {
|
||||
return safejs.Null()
|
||||
}
|
||||
|
||||
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
|
||||
js.CopyBytesToJS(uint8Array, buf)
|
||||
return safejs.Safe(uint8Array)
|
||||
}
|
||||
|
||||
// cryptoRandom fills the provided byte slice with cryptographically secure random bytes
|
||||
// using the Web Crypto API (crypto.getRandomValues) or Node.js crypto.randomFillSync
|
||||
func cryptoRandom(buf []byte) error {
|
||||
if len(buf) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// First try browser's crypto.getRandomValues
|
||||
crypto := js.Global().Get("crypto")
|
||||
if crypto.IsUndefined() {
|
||||
// Fallback to msCrypto for older IE
|
||||
crypto = js.Global().Get("msCrypto")
|
||||
}
|
||||
|
||||
if !crypto.IsUndefined() {
|
||||
// Try getRandomValues (browser API)
|
||||
getRandomValues := crypto.Get("getRandomValues")
|
||||
if !getRandomValues.IsUndefined() && getRandomValues.Type() == js.TypeFunction {
|
||||
// Create a Uint8Array to receive random bytes
|
||||
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
|
||||
|
||||
// Call crypto.getRandomValues - may throw in Node.js
|
||||
defer func() {
|
||||
// Recover from panic if this method doesn't work
|
||||
recover()
|
||||
}()
|
||||
getRandomValues.Invoke(uint8Array)
|
||||
|
||||
// Copy the random bytes to our Go slice
|
||||
js.CopyBytesToGo(buf, uint8Array)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try randomFillSync (Node.js API)
|
||||
randomFillSync := crypto.Get("randomFillSync")
|
||||
if !randomFillSync.IsUndefined() && randomFillSync.Type() == js.TypeFunction {
|
||||
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
|
||||
randomFillSync.Invoke(uint8Array)
|
||||
js.CopyBytesToGo(buf, uint8Array)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Try to load Node.js crypto module via require
|
||||
requireFunc := js.Global().Get("require")
|
||||
if !requireFunc.IsUndefined() && requireFunc.Type() == js.TypeFunction {
|
||||
nodeCrypto := requireFunc.Invoke("crypto")
|
||||
if !nodeCrypto.IsUndefined() {
|
||||
randomFillSync := nodeCrypto.Get("randomFillSync")
|
||||
if !randomFillSync.IsUndefined() && randomFillSync.Type() == js.TypeFunction {
|
||||
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
|
||||
randomFillSync.Invoke(uint8Array)
|
||||
js.CopyBytesToGo(buf, uint8Array)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errNoCryptoAPI
|
||||
}
|
||||
|
||||
// errNoCryptoAPI is returned when the Web Crypto API is not available
|
||||
type cryptoAPIError struct{}
|
||||
|
||||
func (cryptoAPIError) Error() string { return "Web Crypto API not available" }
|
||||
|
||||
var errNoCryptoAPI = cryptoAPIError{}
|
||||
293
pkg/wasmdb/import-export.go
Normal file
293
pkg/wasmdb/import-export.go
Normal file
@@ -0,0 +1,293 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/aperturerobotics/go-indexeddb/idb"
|
||||
"lol.mleku.dev/chk"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
// Import reads events from a JSONL reader and imports them into the database
|
||||
func (w *W) Import(rr io.Reader) {
|
||||
ctx := context.Background()
|
||||
scanner := bufio.NewScanner(rr)
|
||||
// Increase buffer size for large events
|
||||
buf := make([]byte, 1024*1024) // 1MB buffer
|
||||
scanner.Buffer(buf, len(buf))
|
||||
|
||||
imported := 0
|
||||
for scanner.Scan() {
|
||||
line := scanner.Bytes()
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ev := event.New()
|
||||
if err := json.Unmarshal(line, ev); err != nil {
|
||||
w.Logger.Warnf("Import: failed to unmarshal event: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := w.SaveEvent(ctx, ev); err != nil {
|
||||
w.Logger.Debugf("Import: failed to save event: %v", err)
|
||||
continue
|
||||
}
|
||||
imported++
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
w.Logger.Errorf("Import: scanner error: %v", err)
|
||||
}
|
||||
|
||||
w.Logger.Infof("Import: imported %d events", imported)
|
||||
}
|
||||
|
||||
// Export writes events to a JSONL writer, optionally filtered by pubkeys
|
||||
func (w *W) Export(c context.Context, wr io.Writer, pubkeys ...[]byte) {
|
||||
var evs event.S
|
||||
var err error
|
||||
|
||||
// Query events
|
||||
if len(pubkeys) > 0 {
|
||||
// Export only events from specified pubkeys
|
||||
for _, pk := range pubkeys {
|
||||
// Get all serials for this pubkey
|
||||
serials, err := w.GetSerialsByPubkey(pk)
|
||||
if err != nil {
|
||||
w.Logger.Warnf("Export: failed to get serials for pubkey: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ser := range serials {
|
||||
ev, err := w.FetchEventBySerial(ser)
|
||||
if err != nil || ev == nil {
|
||||
continue
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Export all events
|
||||
evs, err = w.getAllEvents(c)
|
||||
if err != nil {
|
||||
w.Logger.Errorf("Export: failed to get all events: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Write events as JSONL
|
||||
exported := 0
|
||||
for _, ev := range evs {
|
||||
data, err := json.Marshal(ev)
|
||||
if err != nil {
|
||||
w.Logger.Warnf("Export: failed to marshal event: %v", err)
|
||||
continue
|
||||
}
|
||||
wr.Write(data)
|
||||
wr.Write([]byte("\n"))
|
||||
exported++
|
||||
}
|
||||
|
||||
w.Logger.Infof("Export: exported %d events", exported)
|
||||
}
|
||||
|
||||
// ImportEventsFromReader imports events from a JSONL reader with context support
|
||||
func (w *W) ImportEventsFromReader(ctx context.Context, rr io.Reader) error {
|
||||
scanner := bufio.NewScanner(rr)
|
||||
buf := make([]byte, 1024*1024)
|
||||
scanner.Buffer(buf, len(buf))
|
||||
|
||||
imported := 0
|
||||
for scanner.Scan() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
w.Logger.Infof("ImportEventsFromReader: cancelled after %d events", imported)
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
line := scanner.Bytes()
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
ev := event.New()
|
||||
if err := json.Unmarshal(line, ev); err != nil {
|
||||
w.Logger.Warnf("ImportEventsFromReader: failed to unmarshal: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := w.SaveEvent(ctx, ev); err != nil {
|
||||
w.Logger.Debugf("ImportEventsFromReader: failed to save: %v", err)
|
||||
continue
|
||||
}
|
||||
imported++
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.Logger.Infof("ImportEventsFromReader: imported %d events", imported)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImportEventsFromStrings imports events from JSON strings with policy checking
|
||||
func (w *W) ImportEventsFromStrings(
|
||||
ctx context.Context,
|
||||
eventJSONs []string,
|
||||
policyManager interface {
|
||||
CheckPolicy(action string, ev *event.E, pubkey []byte, remote string) (bool, error)
|
||||
},
|
||||
) error {
|
||||
imported := 0
|
||||
|
||||
for _, jsonStr := range eventJSONs {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
w.Logger.Infof("ImportEventsFromStrings: cancelled after %d events", imported)
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
ev := event.New()
|
||||
if err := json.Unmarshal([]byte(jsonStr), ev); err != nil {
|
||||
w.Logger.Warnf("ImportEventsFromStrings: failed to unmarshal: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check policy if manager is provided
|
||||
if policyManager != nil {
|
||||
allowed, err := policyManager.CheckPolicy("write", ev, ev.Pubkey, "import")
|
||||
if err != nil || !allowed {
|
||||
w.Logger.Debugf("ImportEventsFromStrings: policy rejected event")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := w.SaveEvent(ctx, ev); err != nil {
|
||||
w.Logger.Debugf("ImportEventsFromStrings: failed to save: %v", err)
|
||||
continue
|
||||
}
|
||||
imported++
|
||||
}
|
||||
|
||||
w.Logger.Infof("ImportEventsFromStrings: imported %d events", imported)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSerialsByPubkey returns all event serials for a given pubkey
|
||||
func (w *W) GetSerialsByPubkey(pubkey []byte) ([]*types.Uint40, error) {
|
||||
// Build range for pubkey index
|
||||
idx, err := database.GetIndexesFromFilter(&filter.F{
|
||||
Authors: tag.NewFromBytesSlice(pubkey),
|
||||
})
|
||||
if chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var serials []*types.Uint40
|
||||
for _, r := range idx {
|
||||
sers, err := w.GetSerialsByRange(r)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
serials = append(serials, sers...)
|
||||
}
|
||||
|
||||
return serials, nil
|
||||
}
|
||||
|
||||
// getAllEvents retrieves all events from the database
|
||||
func (w *W) getAllEvents(c context.Context) (event.S, error) {
|
||||
// Scan through the small event store and large event store
|
||||
var events event.S
|
||||
|
||||
// Get events from small event store
|
||||
sevEvents, err := w.scanEventStore(string(indexes.SmallEventPrefix), true)
|
||||
if err == nil {
|
||||
events = append(events, sevEvents...)
|
||||
}
|
||||
|
||||
// Get events from large event store
|
||||
evtEvents, err := w.scanEventStore(string(indexes.EventPrefix), false)
|
||||
if err == nil {
|
||||
events = append(events, evtEvents...)
|
||||
}
|
||||
|
||||
return events, nil
|
||||
}
|
||||
|
||||
// scanEventStore scans an event store and returns all events
|
||||
func (w *W) scanEventStore(storeName string, isSmallEvent bool) (event.S, error) {
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var events event.S
|
||||
|
||||
cursorReq, err := store.OpenCursor(idb.CursorNext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
var eventData []byte
|
||||
|
||||
if isSmallEvent {
|
||||
// Small events: data is embedded in the key
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
keyBytes := safeValueToBytes(keyVal)
|
||||
// Format: sev|serial|size_uint16|event_data
|
||||
if len(keyBytes) > 10 { // 3 + 5 + 2 minimum
|
||||
sizeOffset := 8 // 3 prefix + 5 serial
|
||||
if len(keyBytes) > sizeOffset+2 {
|
||||
size := int(keyBytes[sizeOffset])<<8 | int(keyBytes[sizeOffset+1])
|
||||
if len(keyBytes) >= sizeOffset+2+size {
|
||||
eventData = keyBytes[sizeOffset+2 : sizeOffset+2+size]
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Large events: data is in the value
|
||||
val, valErr := cursor.Value()
|
||||
if valErr != nil {
|
||||
return valErr
|
||||
}
|
||||
eventData = safeValueToBytes(val)
|
||||
}
|
||||
|
||||
if len(eventData) > 0 {
|
||||
ev := event.New()
|
||||
if err := ev.UnmarshalBinary(bytes.NewReader(eventData)); err == nil {
|
||||
events = append(events, ev)
|
||||
}
|
||||
}
|
||||
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
return events, err
|
||||
}
|
||||
75
pkg/wasmdb/logger.go
Normal file
75
pkg/wasmdb/logger.go
Normal file
@@ -0,0 +1,75 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"syscall/js"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev"
|
||||
)
|
||||
|
||||
// logger provides logging functionality for the wasmdb package
|
||||
// It outputs to the browser console via console.log/warn/error
|
||||
type logger struct {
|
||||
level int
|
||||
}
|
||||
|
||||
// NewLogger creates a new logger with the specified level
|
||||
func NewLogger(level int) *logger {
|
||||
return &logger{level: level}
|
||||
}
|
||||
|
||||
// SetLogLevel changes the logging level
|
||||
func (l *logger) SetLogLevel(level int) {
|
||||
l.level = level
|
||||
}
|
||||
|
||||
// formatMessage creates a formatted log message with timestamp
|
||||
func (l *logger) formatMessage(level, format string, args ...interface{}) string {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
return fmt.Sprintf("[%s] [wasmdb] [%s] %s",
|
||||
time.Now().Format("15:04:05.000"),
|
||||
level,
|
||||
msg,
|
||||
)
|
||||
}
|
||||
|
||||
// Debugf logs a debug message
|
||||
func (l *logger) Debugf(format string, args ...interface{}) {
|
||||
if l.level <= lol.Debug {
|
||||
msg := l.formatMessage("DEBUG", format, args...)
|
||||
js.Global().Get("console").Call("log", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Infof logs an info message
|
||||
func (l *logger) Infof(format string, args ...interface{}) {
|
||||
if l.level <= lol.Info {
|
||||
msg := l.formatMessage("INFO", format, args...)
|
||||
js.Global().Get("console").Call("log", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Warnf logs a warning message
|
||||
func (l *logger) Warnf(format string, args ...interface{}) {
|
||||
if l.level <= lol.Warn {
|
||||
msg := l.formatMessage("WARN", format, args...)
|
||||
js.Global().Get("console").Call("warn", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf logs an error message
|
||||
func (l *logger) Errorf(format string, args ...interface{}) {
|
||||
if l.level <= lol.Error {
|
||||
msg := l.formatMessage("ERROR", format, args...)
|
||||
js.Global().Get("console").Call("error", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Fatalf logs a fatal message (does not exit in WASM)
|
||||
func (l *logger) Fatalf(format string, args ...interface{}) {
|
||||
msg := l.formatMessage("FATAL", format, args...)
|
||||
js.Global().Get("console").Call("error", msg)
|
||||
}
|
||||
213
pkg/wasmdb/nip43.go
Normal file
213
pkg/wasmdb/nip43.go
Normal file
@@ -0,0 +1,213 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/aperturerobotics/go-indexeddb/idb"
|
||||
"github.com/hack-pad/safejs"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
)
|
||||
|
||||
const (
|
||||
// NIP43StoreName is the object store for NIP-43 membership
|
||||
NIP43StoreName = "nip43"
|
||||
|
||||
// InvitesStoreName is the object store for invite codes
|
||||
InvitesStoreName = "invites"
|
||||
)
|
||||
|
||||
// AddNIP43Member adds a pubkey as a NIP-43 member with the given invite code
|
||||
func (w *W) AddNIP43Member(pubkey []byte, inviteCode string) error {
|
||||
if len(pubkey) != 32 {
|
||||
return errors.New("invalid pubkey length")
|
||||
}
|
||||
|
||||
// Create membership record
|
||||
membership := &database.NIP43Membership{
|
||||
Pubkey: make([]byte, 32),
|
||||
InviteCode: inviteCode,
|
||||
AddedAt: time.Now(),
|
||||
}
|
||||
copy(membership.Pubkey, pubkey)
|
||||
|
||||
// Serialize membership
|
||||
data := w.serializeNIP43Membership(membership)
|
||||
|
||||
// Store using pubkey as key
|
||||
return w.setStoreValue(NIP43StoreName, string(pubkey), data)
|
||||
}
|
||||
|
||||
// RemoveNIP43Member removes a pubkey from NIP-43 membership
|
||||
func (w *W) RemoveNIP43Member(pubkey []byte) error {
|
||||
return w.deleteStoreValue(NIP43StoreName, string(pubkey))
|
||||
}
|
||||
|
||||
// IsNIP43Member checks if a pubkey is a NIP-43 member
|
||||
func (w *W) IsNIP43Member(pubkey []byte) (isMember bool, err error) {
|
||||
data, err := w.getStoreValue(NIP43StoreName, string(pubkey))
|
||||
if err != nil {
|
||||
return false, nil // Not found is not an error, just not a member
|
||||
}
|
||||
return data != nil, nil
|
||||
}
|
||||
|
||||
// GetNIP43Membership returns the full membership details for a pubkey
|
||||
func (w *W) GetNIP43Membership(pubkey []byte) (*database.NIP43Membership, error) {
|
||||
data, err := w.getStoreValue(NIP43StoreName, string(pubkey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if data == nil {
|
||||
return nil, errors.New("membership not found")
|
||||
}
|
||||
|
||||
return w.deserializeNIP43Membership(data)
|
||||
}
|
||||
|
||||
// GetAllNIP43Members returns all NIP-43 member pubkeys
|
||||
func (w *W) GetAllNIP43Members() ([][]byte, error) {
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, NIP43StoreName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(NIP43StoreName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var members [][]byte
|
||||
|
||||
cursorReq, err := store.OpenCursor(idb.CursorNext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
|
||||
// Key is the pubkey stored as string
|
||||
keyBytes := safeValueToBytes(keyVal)
|
||||
if len(keyBytes) == 32 {
|
||||
pubkey := make([]byte, 32)
|
||||
copy(pubkey, keyBytes)
|
||||
members = append(members, pubkey)
|
||||
}
|
||||
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
if err != nil && err.Error() != "found" {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return members, nil
|
||||
}
|
||||
|
||||
// StoreInviteCode stores an invite code with expiration time
|
||||
func (w *W) StoreInviteCode(code string, expiresAt time.Time) error {
|
||||
// Serialize expiration time as unix timestamp
|
||||
data := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(data, uint64(expiresAt.Unix()))
|
||||
|
||||
return w.setStoreValue(InvitesStoreName, code, data)
|
||||
}
|
||||
|
||||
// ValidateInviteCode checks if an invite code is valid (exists and not expired)
|
||||
func (w *W) ValidateInviteCode(code string) (valid bool, err error) {
|
||||
data, err := w.getStoreValue(InvitesStoreName, code)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
if data == nil || len(data) < 8 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
expiresAt := time.Unix(int64(binary.BigEndian.Uint64(data)), 0)
|
||||
if time.Now().After(expiresAt) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// DeleteInviteCode removes an invite code
|
||||
func (w *W) DeleteInviteCode(code string) error {
|
||||
return w.deleteStoreValue(InvitesStoreName, code)
|
||||
}
|
||||
|
||||
// PublishNIP43MembershipEvent is a no-op in WASM (events are handled by the relay)
|
||||
func (w *W) PublishNIP43MembershipEvent(kind int, pubkey []byte) error {
|
||||
// In WASM context, this would typically be handled by the client
|
||||
// This is a no-op implementation
|
||||
return nil
|
||||
}
|
||||
|
||||
// serializeNIP43Membership converts a membership to bytes for storage
|
||||
func (w *W) serializeNIP43Membership(m *database.NIP43Membership) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Write pubkey (32 bytes)
|
||||
buf.Write(m.Pubkey)
|
||||
|
||||
// Write AddedAt as unix timestamp (8 bytes)
|
||||
ts := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(ts, uint64(m.AddedAt.Unix()))
|
||||
buf.Write(ts)
|
||||
|
||||
// Write invite code length (4 bytes) + invite code
|
||||
codeBytes := []byte(m.InviteCode)
|
||||
codeLen := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(codeLen, uint32(len(codeBytes)))
|
||||
buf.Write(codeLen)
|
||||
buf.Write(codeBytes)
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// deserializeNIP43Membership converts bytes back to a membership
|
||||
func (w *W) deserializeNIP43Membership(data []byte) (*database.NIP43Membership, error) {
|
||||
if len(data) < 44 { // 32 + 8 + 4 minimum
|
||||
return nil, errors.New("invalid membership data")
|
||||
}
|
||||
|
||||
m := &database.NIP43Membership{}
|
||||
|
||||
// Read pubkey
|
||||
m.Pubkey = make([]byte, 32)
|
||||
copy(m.Pubkey, data[:32])
|
||||
|
||||
// Read AddedAt
|
||||
m.AddedAt = time.Unix(int64(binary.BigEndian.Uint64(data[32:40])), 0)
|
||||
|
||||
// Read invite code
|
||||
codeLen := binary.BigEndian.Uint32(data[40:44])
|
||||
if len(data) < int(44+codeLen) {
|
||||
return nil, errors.New("invalid invite code length")
|
||||
}
|
||||
m.InviteCode = string(data[44 : 44+codeLen])
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// Helper to convert safejs.Value to string for keys
|
||||
func safeValueToString(v safejs.Value) string {
|
||||
if v.IsUndefined() || v.IsNull() {
|
||||
return ""
|
||||
}
|
||||
str, err := v.String()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
767
pkg/wasmdb/query-events.go
Normal file
767
pkg/wasmdb/query-events.go
Normal file
@@ -0,0 +1,767 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/aperturerobotics/go-indexeddb/idb"
|
||||
"lol.mleku.dev/chk"
|
||||
sha256 "github.com/minio/sha256-simd"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/ints"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/interfaces/store"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// CheckExpiration checks if an event has expired based on its "expiration" tag
|
||||
func CheckExpiration(ev *event.E) (expired bool) {
|
||||
var err error
|
||||
expTag := ev.Tags.GetFirst([]byte("expiration"))
|
||||
if expTag != nil {
|
||||
expTS := ints.New(0)
|
||||
if _, err = expTS.Unmarshal(expTag.Value()); err == nil {
|
||||
if int64(expTS.N) < time.Now().Unix() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetSerialsByRange retrieves serials from an index range using cursor iteration.
|
||||
// The index keys must end with a 5-byte serial number.
|
||||
func (w *W) GetSerialsByRange(idx database.Range) (sers types.Uint40s, err error) {
|
||||
if len(idx.Start) < 3 {
|
||||
return nil, errors.New("invalid range: start key too short")
|
||||
}
|
||||
|
||||
// Extract the object store name from the 3-byte prefix
|
||||
storeName := string(idx.Start[:3])
|
||||
|
||||
// Open a read transaction
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objStore, err := tx.ObjectStore(storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Open cursor in reverse order (newest first like Badger)
|
||||
cursorReq, err := objStore.OpenCursor(idb.CursorPrevious)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Pre-allocate slice
|
||||
sers = make(types.Uint40s, 0, 100)
|
||||
|
||||
// Create end boundary with 0xff suffix for inclusive range
|
||||
endBoundary := make([]byte, len(idx.End)+5)
|
||||
copy(endBoundary, idx.End)
|
||||
for i := len(idx.End); i < len(endBoundary); i++ {
|
||||
endBoundary[i] = 0xff
|
||||
}
|
||||
|
||||
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
|
||||
key := safeValueToBytes(keyVal)
|
||||
if len(key) < 8 { // minimum: 3 prefix + 5 serial
|
||||
return cursor.Continue()
|
||||
}
|
||||
|
||||
// Check if key is within range
|
||||
keyWithoutSerial := key[:len(key)-5]
|
||||
|
||||
// Compare with start (lower bound)
|
||||
cmp := bytes.Compare(keyWithoutSerial, idx.Start)
|
||||
if cmp < 0 {
|
||||
// Key is before range start, stop iteration
|
||||
return errors.New("done")
|
||||
}
|
||||
|
||||
// Compare with end boundary
|
||||
if bytes.Compare(key, endBoundary) > 0 {
|
||||
// Key is after range end, continue to find keys in range
|
||||
return cursor.Continue()
|
||||
}
|
||||
|
||||
// Extract serial from last 5 bytes
|
||||
ser := new(types.Uint40)
|
||||
if err := ser.UnmarshalRead(bytes.NewReader(key[len(key)-5:])); err == nil {
|
||||
sers = append(sers, ser)
|
||||
}
|
||||
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
if err != nil && err.Error() != "done" {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Sort by serial (ascending)
|
||||
sort.Slice(sers, func(i, j int) bool {
|
||||
return sers[i].Get() < sers[j].Get()
|
||||
})
|
||||
|
||||
return sers, nil
|
||||
}
|
||||
|
||||
// QueryForIds retrieves IdPkTs records based on a filter.
|
||||
// Results are sorted by timestamp in reverse chronological order.
|
||||
func (w *W) QueryForIds(c context.Context, f *filter.F) (idPkTs []*store.IdPkTs, err error) {
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
err = errors.New("query for Ids is invalid for a filter with Ids")
|
||||
return
|
||||
}
|
||||
|
||||
var idxs []database.Range
|
||||
if idxs, err = database.GetIndexesFromFilter(f); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
var results []*store.IdPkTs
|
||||
results = make([]*store.IdPkTs, 0, len(idxs)*100)
|
||||
|
||||
// Track match counts for search ranking
|
||||
counts := make(map[uint64]int)
|
||||
|
||||
for _, idx := range idxs {
|
||||
var founds types.Uint40s
|
||||
if founds, err = w.GetSerialsByRange(idx); err != nil {
|
||||
w.Logger.Warnf("QueryForIds: GetSerialsByRange error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
var tmp []*store.IdPkTs
|
||||
if tmp, err = w.GetFullIdPubkeyBySerials(founds); err != nil {
|
||||
w.Logger.Warnf("QueryForIds: GetFullIdPubkeyBySerials error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Track match counts for search queries
|
||||
if len(f.Search) > 0 {
|
||||
for _, v := range tmp {
|
||||
counts[v.Ser]++
|
||||
}
|
||||
}
|
||||
results = append(results, tmp...)
|
||||
}
|
||||
|
||||
// Deduplicate results
|
||||
seen := make(map[uint64]struct{}, len(results))
|
||||
idPkTs = make([]*store.IdPkTs, 0, len(results))
|
||||
for _, idpk := range results {
|
||||
if _, ok := seen[idpk.Ser]; !ok {
|
||||
seen[idpk.Ser] = struct{}{}
|
||||
idPkTs = append(idPkTs, idpk)
|
||||
}
|
||||
}
|
||||
|
||||
// For search queries combined with other filters, verify matches
|
||||
if len(f.Search) > 0 && ((f.Authors != nil && f.Authors.Len() > 0) ||
|
||||
(f.Kinds != nil && f.Kinds.Len() > 0) ||
|
||||
(f.Tags != nil && f.Tags.Len() > 0)) {
|
||||
// Build serial list for fetching
|
||||
serials := make([]*types.Uint40, 0, len(idPkTs))
|
||||
for _, v := range idPkTs {
|
||||
s := new(types.Uint40)
|
||||
s.Set(v.Ser)
|
||||
serials = append(serials, s)
|
||||
}
|
||||
|
||||
var evs map[uint64]*event.E
|
||||
if evs, err = w.FetchEventsBySerials(serials); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
filtered := make([]*store.IdPkTs, 0, len(idPkTs))
|
||||
for _, v := range idPkTs {
|
||||
ev, ok := evs[v.Ser]
|
||||
if !ok || ev == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
matchesAll := true
|
||||
if f.Authors != nil && f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) {
|
||||
matchesAll = false
|
||||
}
|
||||
if matchesAll && f.Kinds != nil && f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) {
|
||||
matchesAll = false
|
||||
}
|
||||
if matchesAll && f.Tags != nil && f.Tags.Len() > 0 {
|
||||
tagOK := true
|
||||
for _, t := range *f.Tags {
|
||||
if t.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
key := t.Key()
|
||||
values := t.T[1:]
|
||||
if !ev.Tags.ContainsAny(key, values) {
|
||||
tagOK = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !tagOK {
|
||||
matchesAll = false
|
||||
}
|
||||
}
|
||||
if matchesAll {
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
}
|
||||
idPkTs = filtered
|
||||
}
|
||||
|
||||
// Sort by timestamp (newest first)
|
||||
if len(f.Search) == 0 {
|
||||
sort.Slice(idPkTs, func(i, j int) bool {
|
||||
return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
})
|
||||
} else {
|
||||
// Search ranking: blend match count with recency
|
||||
var maxCount int
|
||||
var minTs, maxTs int64
|
||||
if len(idPkTs) > 0 {
|
||||
minTs, maxTs = idPkTs[0].Ts, idPkTs[0].Ts
|
||||
}
|
||||
for _, v := range idPkTs {
|
||||
if c := counts[v.Ser]; c > maxCount {
|
||||
maxCount = c
|
||||
}
|
||||
if v.Ts < minTs {
|
||||
minTs = v.Ts
|
||||
}
|
||||
if v.Ts > maxTs {
|
||||
maxTs = v.Ts
|
||||
}
|
||||
}
|
||||
tsSpan := maxTs - minTs
|
||||
if tsSpan <= 0 {
|
||||
tsSpan = 1
|
||||
}
|
||||
if maxCount <= 0 {
|
||||
maxCount = 1
|
||||
}
|
||||
sort.Slice(idPkTs, func(i, j int) bool {
|
||||
ci := float64(counts[idPkTs[i].Ser]) / float64(maxCount)
|
||||
cj := float64(counts[idPkTs[j].Ser]) / float64(maxCount)
|
||||
ai := float64(idPkTs[i].Ts-minTs) / float64(tsSpan)
|
||||
aj := float64(idPkTs[j].Ts-minTs) / float64(tsSpan)
|
||||
si := 0.5*ci + 0.5*ai
|
||||
sj := 0.5*cj + 0.5*aj
|
||||
if si == sj {
|
||||
return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
}
|
||||
return si > sj
|
||||
})
|
||||
}
|
||||
|
||||
// Apply limit
|
||||
if f.Limit != nil && len(idPkTs) > int(*f.Limit) {
|
||||
idPkTs = idPkTs[:*f.Limit]
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// QueryForSerials takes a filter and returns matching event serials
|
||||
func (w *W) QueryForSerials(c context.Context, f *filter.F) (sers types.Uint40s, err error) {
|
||||
var founds []*types.Uint40
|
||||
var idPkTs []*store.IdPkTs
|
||||
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
// Use batch lookup for IDs
|
||||
var serialMap map[string]*types.Uint40
|
||||
if serialMap, err = w.GetSerialsByIds(f.Ids); chk.E(err) {
|
||||
return
|
||||
}
|
||||
for _, ser := range serialMap {
|
||||
founds = append(founds, ser)
|
||||
}
|
||||
var tmp []*store.IdPkTs
|
||||
if tmp, err = w.GetFullIdPubkeyBySerials(founds); chk.E(err) {
|
||||
return
|
||||
}
|
||||
idPkTs = append(idPkTs, tmp...)
|
||||
} else {
|
||||
if idPkTs, err = w.QueryForIds(c, f); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Extract serials
|
||||
for _, idpk := range idPkTs {
|
||||
ser := new(types.Uint40)
|
||||
if err = ser.Set(idpk.Ser); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
sers = append(sers, ser)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// QueryEvents queries events based on a filter
|
||||
func (w *W) QueryEvents(c context.Context, f *filter.F) (evs event.S, err error) {
|
||||
return w.QueryEventsWithOptions(c, f, true, false)
|
||||
}
|
||||
|
||||
// QueryAllVersions queries events and returns all versions of replaceable events
|
||||
func (w *W) QueryAllVersions(c context.Context, f *filter.F) (evs event.S, err error) {
|
||||
return w.QueryEventsWithOptions(c, f, true, true)
|
||||
}
|
||||
|
||||
// QueryEventsWithOptions queries events with additional options for deletion and versioning
|
||||
func (w *W) QueryEventsWithOptions(c context.Context, f *filter.F, includeDeleteEvents bool, showAllVersions bool) (evs event.S, err error) {
|
||||
wantMultipleVersions := showAllVersions || (f.Limit != nil && *f.Limit > 1)
|
||||
|
||||
var expDeletes types.Uint40s
|
||||
var expEvs event.S
|
||||
|
||||
// Handle ID-based queries
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
w.Logger.Debugf("QueryEvents: ids path, count=%d", f.Ids.Len())
|
||||
|
||||
serials, idErr := w.GetSerialsByIds(f.Ids)
|
||||
if idErr != nil {
|
||||
w.Logger.Warnf("QueryEvents: error looking up ids: %v", idErr)
|
||||
}
|
||||
|
||||
// Convert to slice for batch fetch
|
||||
var serialsSlice []*types.Uint40
|
||||
idHexToSerial := make(map[uint64]string, len(serials))
|
||||
for idHex, ser := range serials {
|
||||
serialsSlice = append(serialsSlice, ser)
|
||||
idHexToSerial[ser.Get()] = idHex
|
||||
}
|
||||
|
||||
// Batch fetch events
|
||||
var fetchedEvents map[uint64]*event.E
|
||||
if fetchedEvents, err = w.FetchEventsBySerials(serialsSlice); err != nil {
|
||||
w.Logger.Warnf("QueryEvents: batch fetch failed: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Process fetched events
|
||||
for serialValue, ev := range fetchedEvents {
|
||||
idHex := idHexToSerial[serialValue]
|
||||
|
||||
ser := new(types.Uint40)
|
||||
if err = ser.Set(serialValue); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
if CheckExpiration(ev) {
|
||||
w.Logger.Debugf("QueryEvents: id=%s filtered out due to expiration", idHex)
|
||||
expDeletes = append(expDeletes, ser)
|
||||
expEvs = append(expEvs, ev)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for deletion
|
||||
if derr := w.CheckForDeleted(ev, nil); derr != nil {
|
||||
w.Logger.Debugf("QueryEvents: id=%s filtered out due to deletion: %v", idHex, derr)
|
||||
continue
|
||||
}
|
||||
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
|
||||
// Sort and apply limit
|
||||
sort.Slice(evs, func(i, j int) bool {
|
||||
return evs[i].CreatedAt > evs[j].CreatedAt
|
||||
})
|
||||
if f.Limit != nil && len(evs) > int(*f.Limit) {
|
||||
evs = evs[:*f.Limit]
|
||||
}
|
||||
} else {
|
||||
// Non-IDs path
|
||||
var idPkTs []*store.IdPkTs
|
||||
if idPkTs, err = w.QueryForIds(c, f); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Maps for replaceable event handling
|
||||
replaceableEvents := make(map[string]*event.E)
|
||||
replaceableEventVersions := make(map[string]event.S)
|
||||
paramReplaceableEvents := make(map[string]map[string]*event.E)
|
||||
paramReplaceableEventVersions := make(map[string]map[string]event.S)
|
||||
var regularEvents event.S
|
||||
|
||||
// Deletion tracking maps
|
||||
deletionsByKindPubkey := make(map[string]bool)
|
||||
deletionsByKindPubkeyDTag := make(map[string]map[string]int64)
|
||||
deletedEventIds := make(map[string]bool)
|
||||
|
||||
// Query for deletion events if we have authors
|
||||
if f.Authors != nil && f.Authors.Len() > 0 {
|
||||
deletionFilter := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(5)),
|
||||
Authors: f.Authors,
|
||||
}
|
||||
var deletionIdPkTs []*store.IdPkTs
|
||||
if deletionIdPkTs, err = w.QueryForIds(c, deletionFilter); err == nil {
|
||||
idPkTs = append(idPkTs, deletionIdPkTs...)
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare serials for batch fetch
|
||||
var allSerials []*types.Uint40
|
||||
serialToIdPk := make(map[uint64]*store.IdPkTs, len(idPkTs))
|
||||
for _, idpk := range idPkTs {
|
||||
ser := new(types.Uint40)
|
||||
if err = ser.Set(idpk.Ser); err != nil {
|
||||
continue
|
||||
}
|
||||
allSerials = append(allSerials, ser)
|
||||
serialToIdPk[ser.Get()] = idpk
|
||||
}
|
||||
|
||||
// Batch fetch all events
|
||||
var allEvents map[uint64]*event.E
|
||||
if allEvents, err = w.FetchEventsBySerials(allSerials); err != nil {
|
||||
w.Logger.Warnf("QueryEvents: batch fetch failed in non-IDs path: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// First pass: collect deletion events
|
||||
for serialValue, ev := range allEvents {
|
||||
ser := new(types.Uint40)
|
||||
if err = ser.Set(serialValue); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if CheckExpiration(ev) {
|
||||
expDeletes = append(expDeletes, ser)
|
||||
expEvs = append(expEvs, ev)
|
||||
continue
|
||||
}
|
||||
|
||||
if ev.Kind == kind.Deletion.K {
|
||||
// Process e-tags and a-tags for deletion tracking
|
||||
aTags := ev.Tags.GetAll([]byte("a"))
|
||||
for _, aTag := range aTags {
|
||||
if aTag.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
split := bytes.Split(aTag.Value(), []byte{':'})
|
||||
if len(split) < 2 {
|
||||
continue
|
||||
}
|
||||
kindInt, parseErr := strconv.Atoi(string(split[0]))
|
||||
if parseErr != nil {
|
||||
continue
|
||||
}
|
||||
kk := kind.New(uint16(kindInt))
|
||||
if !kind.IsReplaceable(kk.K) {
|
||||
continue
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = hex.DecAppend(nil, split[1]); err != nil {
|
||||
continue
|
||||
}
|
||||
if !utils.FastEqual(pk, ev.Pubkey) {
|
||||
continue
|
||||
}
|
||||
key := hex.Enc(pk) + ":" + strconv.Itoa(int(kk.K))
|
||||
|
||||
if kind.IsParameterizedReplaceable(kk.K) {
|
||||
if len(split) < 3 {
|
||||
continue
|
||||
}
|
||||
if _, exists := deletionsByKindPubkeyDTag[key]; !exists {
|
||||
deletionsByKindPubkeyDTag[key] = make(map[string]int64)
|
||||
}
|
||||
dValue := string(split[2])
|
||||
if ts, ok := deletionsByKindPubkeyDTag[key][dValue]; !ok || ev.CreatedAt > ts {
|
||||
deletionsByKindPubkeyDTag[key][dValue] = ev.CreatedAt
|
||||
}
|
||||
} else {
|
||||
deletionsByKindPubkey[key] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Process e-tags for specific event deletions
|
||||
eTags := ev.Tags.GetAll([]byte("e"))
|
||||
for _, eTag := range eTags {
|
||||
eTagHex := eTag.ValueHex()
|
||||
if len(eTagHex) != 64 {
|
||||
continue
|
||||
}
|
||||
evId := make([]byte, sha256.Size)
|
||||
if _, hexErr := hex.DecBytes(evId, eTagHex); hexErr != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for target in current batch
|
||||
var targetEv *event.E
|
||||
for _, candidateEv := range allEvents {
|
||||
if utils.FastEqual(candidateEv.ID, evId) {
|
||||
targetEv = candidateEv
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Try to fetch if not in batch
|
||||
if targetEv == nil {
|
||||
ser, serErr := w.GetSerialById(evId)
|
||||
if serErr != nil || ser == nil {
|
||||
continue
|
||||
}
|
||||
targetEv, serErr = w.FetchEventBySerial(ser)
|
||||
if serErr != nil || targetEv == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !utils.FastEqual(targetEv.Pubkey, ev.Pubkey) {
|
||||
continue
|
||||
}
|
||||
deletedEventIds[hex.Enc(targetEv.ID)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: process all events, filtering deleted ones
|
||||
for _, ev := range allEvents {
|
||||
// Tag filter verification
|
||||
if f.Tags != nil && f.Tags.Len() > 0 {
|
||||
tagMatches := 0
|
||||
for _, filterTag := range *f.Tags {
|
||||
if filterTag.Len() >= 2 {
|
||||
filterKey := filterTag.Key()
|
||||
var actualKey []byte
|
||||
if len(filterKey) == 2 && filterKey[0] == '#' {
|
||||
actualKey = filterKey[1:]
|
||||
} else {
|
||||
actualKey = filterKey
|
||||
}
|
||||
eventHasTag := false
|
||||
if ev.Tags != nil {
|
||||
for _, eventTag := range *ev.Tags {
|
||||
if eventTag.Len() >= 2 && bytes.Equal(eventTag.Key(), actualKey) {
|
||||
for _, filterValue := range filterTag.T[1:] {
|
||||
if database.TagValuesMatchUsingTagMethods(eventTag, filterValue) {
|
||||
eventHasTag = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if eventHasTag {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if eventHasTag {
|
||||
tagMatches++
|
||||
}
|
||||
}
|
||||
}
|
||||
if tagMatches < f.Tags.Len() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Skip deletion events unless explicitly requested
|
||||
if ev.Kind == kind.Deletion.K {
|
||||
kind5Requested := false
|
||||
if f.Kinds != nil && f.Kinds.Len() > 0 {
|
||||
for i := 0; i < f.Kinds.Len(); i++ {
|
||||
if f.Kinds.K[i].K == kind.Deletion.K {
|
||||
kind5Requested = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !kind5Requested {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Check if event ID is in filter
|
||||
isIdInFilter := false
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
for i := 0; i < f.Ids.Len(); i++ {
|
||||
if utils.FastEqual(ev.ID, (*f.Ids).T[i]) {
|
||||
isIdInFilter = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if specifically deleted
|
||||
eventIdHex := hex.Enc(ev.ID)
|
||||
if deletedEventIds[eventIdHex] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle replaceable events
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
key := hex.Enc(ev.Pubkey) + ":" + strconv.Itoa(int(ev.Kind))
|
||||
if deletionsByKindPubkey[key] && !isIdInFilter {
|
||||
continue
|
||||
} else if wantMultipleVersions {
|
||||
replaceableEventVersions[key] = append(replaceableEventVersions[key], ev)
|
||||
} else {
|
||||
existing, exists := replaceableEvents[key]
|
||||
if !exists || ev.CreatedAt > existing.CreatedAt {
|
||||
replaceableEvents[key] = ev
|
||||
}
|
||||
}
|
||||
} else if kind.IsParameterizedReplaceable(ev.Kind) {
|
||||
key := hex.Enc(ev.Pubkey) + ":" + strconv.Itoa(int(ev.Kind))
|
||||
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||
var dValue string
|
||||
if dTag != nil && dTag.Len() > 1 {
|
||||
dValue = string(dTag.Value())
|
||||
}
|
||||
|
||||
if deletionMap, exists := deletionsByKindPubkeyDTag[key]; exists {
|
||||
if delTs, ok := deletionMap[dValue]; ok && ev.CreatedAt < delTs && !isIdInFilter {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if wantMultipleVersions {
|
||||
if _, exists := paramReplaceableEventVersions[key]; !exists {
|
||||
paramReplaceableEventVersions[key] = make(map[string]event.S)
|
||||
}
|
||||
paramReplaceableEventVersions[key][dValue] = append(paramReplaceableEventVersions[key][dValue], ev)
|
||||
} else {
|
||||
if _, exists := paramReplaceableEvents[key]; !exists {
|
||||
paramReplaceableEvents[key] = make(map[string]*event.E)
|
||||
}
|
||||
existing, exists := paramReplaceableEvents[key][dValue]
|
||||
if !exists || ev.CreatedAt > existing.CreatedAt {
|
||||
paramReplaceableEvents[key][dValue] = ev
|
||||
}
|
||||
}
|
||||
} else {
|
||||
regularEvents = append(regularEvents, ev)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect results
|
||||
if wantMultipleVersions {
|
||||
for _, versions := range replaceableEventVersions {
|
||||
sort.Slice(versions, func(i, j int) bool {
|
||||
return versions[i].CreatedAt > versions[j].CreatedAt
|
||||
})
|
||||
limit := len(versions)
|
||||
if f.Limit != nil && int(*f.Limit) < limit {
|
||||
limit = int(*f.Limit)
|
||||
}
|
||||
for i := 0; i < limit; i++ {
|
||||
evs = append(evs, versions[i])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, ev := range replaceableEvents {
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
}
|
||||
|
||||
if wantMultipleVersions {
|
||||
for _, dTagMap := range paramReplaceableEventVersions {
|
||||
for _, versions := range dTagMap {
|
||||
sort.Slice(versions, func(i, j int) bool {
|
||||
return versions[i].CreatedAt > versions[j].CreatedAt
|
||||
})
|
||||
limit := len(versions)
|
||||
if f.Limit != nil && int(*f.Limit) < limit {
|
||||
limit = int(*f.Limit)
|
||||
}
|
||||
for i := 0; i < limit; i++ {
|
||||
evs = append(evs, versions[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, innerMap := range paramReplaceableEvents {
|
||||
for _, ev := range innerMap {
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
evs = append(evs, regularEvents...)
|
||||
|
||||
// Sort and limit
|
||||
sort.Slice(evs, func(i, j int) bool {
|
||||
return evs[i].CreatedAt > evs[j].CreatedAt
|
||||
})
|
||||
if f.Limit != nil && len(evs) > int(*f.Limit) {
|
||||
evs = evs[:*f.Limit]
|
||||
}
|
||||
|
||||
// Delete expired events in background
|
||||
go func() {
|
||||
for i, ser := range expDeletes {
|
||||
w.DeleteEventBySerial(context.Background(), ser, expEvs[i])
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// QueryDeleteEventsByTargetId queries for delete events targeting a specific event ID
|
||||
func (w *W) QueryDeleteEventsByTargetId(c context.Context, targetEventId []byte) (evs event.S, err error) {
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.Deletion),
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("#e", hex.Enc(targetEventId)),
|
||||
),
|
||||
}
|
||||
return w.QueryEventsWithOptions(c, f, true, false)
|
||||
}
|
||||
|
||||
// CountEvents counts events matching a filter
|
||||
func (w *W) CountEvents(c context.Context, f *filter.F) (count int, approx bool, err error) {
|
||||
approx = false
|
||||
if f == nil {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
// For ID-based queries, count resolved IDs
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
serials, idErr := w.GetSerialsByIds(f.Ids)
|
||||
if idErr != nil {
|
||||
return 0, false, idErr
|
||||
}
|
||||
return len(serials), false, nil
|
||||
}
|
||||
|
||||
// For other queries, get serials and count
|
||||
var sers types.Uint40s
|
||||
if sers, err = w.QueryForSerials(c, f); err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
return len(sers), false, nil
|
||||
}
|
||||
|
||||
// GetSerialsFromFilter is an alias for QueryForSerials for interface compatibility
|
||||
func (w *W) GetSerialsFromFilter(f *filter.F) (serials types.Uint40s, err error) {
|
||||
return w.QueryForSerials(w.ctx, f)
|
||||
}
|
||||
26
pkg/wasmdb/run-tests.sh
Executable file
26
pkg/wasmdb/run-tests.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run wasmdb tests using Node.js with fake-indexeddb
|
||||
# This script builds the test binary and runs it in Node.js
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
TESTDATA_DIR="$SCRIPT_DIR/testdata"
|
||||
WASM_FILE="$TESTDATA_DIR/wasmdb_test.wasm"
|
||||
|
||||
# Ensure Node.js dependencies are installed
|
||||
if [ ! -d "$TESTDATA_DIR/node_modules" ]; then
|
||||
echo "Installing Node.js dependencies..."
|
||||
cd "$TESTDATA_DIR"
|
||||
npm install
|
||||
cd - > /dev/null
|
||||
fi
|
||||
|
||||
# Build the test binary
|
||||
echo "Building WASM test binary..."
|
||||
GOOS=js GOARCH=wasm CGO_ENABLED=0 go test -c -o "$WASM_FILE" "$SCRIPT_DIR"
|
||||
|
||||
# Run the tests
|
||||
echo "Running tests in Node.js..."
|
||||
node "$TESTDATA_DIR/run_wasm_tests.mjs" "$WASM_FILE" "$@"
|
||||
423
pkg/wasmdb/save-event.go
Normal file
423
pkg/wasmdb/save-event.go
Normal file
@@ -0,0 +1,423 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/aperturerobotics/go-indexeddb/idb"
|
||||
"github.com/hack-pad/safejs"
|
||||
"lol.mleku.dev/chk"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrOlderThanExisting is returned when a candidate event is older than an existing replaceable/addressable event.
|
||||
ErrOlderThanExisting = errors.New("older than existing event")
|
||||
// ErrMissingDTag is returned when a parameterized replaceable event lacks the required 'd' tag.
|
||||
ErrMissingDTag = errors.New("event is missing a d tag identifier")
|
||||
)
|
||||
|
||||
// SaveEvent saves an event to the database, generating all necessary indexes.
|
||||
func (w *W) SaveEvent(c context.Context, ev *event.E) (replaced bool, err error) {
|
||||
if ev == nil {
|
||||
err = errors.New("nil event")
|
||||
return
|
||||
}
|
||||
|
||||
// Reject ephemeral events (kinds 20000-29999) - they should never be stored
|
||||
if ev.Kind >= 20000 && ev.Kind <= 29999 {
|
||||
err = errors.New("blocked: ephemeral events should not be stored")
|
||||
return
|
||||
}
|
||||
|
||||
// Validate kind 3 (follow list) events have at least one p tag
|
||||
if ev.Kind == 3 {
|
||||
hasPTag := false
|
||||
if ev.Tags != nil {
|
||||
for _, t := range *ev.Tags {
|
||||
if t != nil && t.Len() >= 2 {
|
||||
key := t.Key()
|
||||
if len(key) == 1 && key[0] == 'p' {
|
||||
hasPTag = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !hasPTag {
|
||||
w.Logger.Warnf("SaveEvent: rejecting kind 3 event without p tags from pubkey %x", ev.Pubkey)
|
||||
err = errors.New("blocked: kind 3 follow list events must have at least one p tag")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the event already exists
|
||||
var ser *types.Uint40
|
||||
if ser, err = w.GetSerialById(ev.ID); err == nil && ser != nil {
|
||||
err = errors.New("blocked: event already exists: " + hex.Enc(ev.ID[:]))
|
||||
return
|
||||
}
|
||||
|
||||
// If the error is "id not found", we can proceed
|
||||
if err != nil && strings.Contains(err.Error(), "id not found") {
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for replacement - only validate, don't delete old events
|
||||
if kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind) {
|
||||
var werr error
|
||||
if replaced, _, werr = w.WouldReplaceEvent(ev); werr != nil {
|
||||
if errors.Is(werr, ErrOlderThanExisting) {
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
err = errors.New("blocked: event is older than existing replaceable event")
|
||||
} else {
|
||||
err = errors.New("blocked: event is older than existing addressable event")
|
||||
}
|
||||
return
|
||||
}
|
||||
if errors.Is(werr, ErrMissingDTag) {
|
||||
err = ErrMissingDTag
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get the next sequence number for the event
|
||||
serial, err := w.nextEventSerial()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Generate all indexes for the event
|
||||
idxs, err := database.GetIndexesForEvent(ev, serial)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Serialize event to binary
|
||||
eventDataBuf := new(bytes.Buffer)
|
||||
ev.MarshalBinary(eventDataBuf)
|
||||
eventData := eventDataBuf.Bytes()
|
||||
|
||||
// Determine storage strategy
|
||||
smallEventThreshold := 1024 // Could be made configurable
|
||||
isSmallEvent := len(eventData) <= smallEventThreshold
|
||||
isReplaceableEvent := kind.IsReplaceable(ev.Kind)
|
||||
isAddressableEvent := kind.IsParameterizedReplaceable(ev.Kind)
|
||||
|
||||
// Create serial type
|
||||
ser = new(types.Uint40)
|
||||
if err = ser.Set(serial); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Start a transaction to save the event and all its indexes
|
||||
// We need to include all object stores we'll write to
|
||||
storesToWrite := []string{
|
||||
string(indexes.IdPrefix),
|
||||
string(indexes.FullIdPubkeyPrefix),
|
||||
string(indexes.CreatedAtPrefix),
|
||||
string(indexes.PubkeyPrefix),
|
||||
string(indexes.KindPrefix),
|
||||
string(indexes.KindPubkeyPrefix),
|
||||
string(indexes.TagPrefix),
|
||||
string(indexes.TagKindPrefix),
|
||||
string(indexes.TagPubkeyPrefix),
|
||||
string(indexes.TagKindPubkeyPrefix),
|
||||
string(indexes.WordPrefix),
|
||||
}
|
||||
|
||||
// Add event storage store
|
||||
if isSmallEvent {
|
||||
storesToWrite = append(storesToWrite, string(indexes.SmallEventPrefix))
|
||||
} else {
|
||||
storesToWrite = append(storesToWrite, string(indexes.EventPrefix))
|
||||
}
|
||||
|
||||
// Add specialized stores if needed
|
||||
if isAddressableEvent && isSmallEvent {
|
||||
storesToWrite = append(storesToWrite, string(indexes.AddressableEventPrefix))
|
||||
} else if isReplaceableEvent && isSmallEvent {
|
||||
storesToWrite = append(storesToWrite, string(indexes.ReplaceableEventPrefix))
|
||||
}
|
||||
|
||||
// Start transaction
|
||||
tx, err := w.db.Transaction(idb.TransactionReadWrite, storesToWrite[0], storesToWrite[1:]...)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to start transaction: %w", err)
|
||||
}
|
||||
|
||||
// Save each index to its respective object store
|
||||
for _, key := range idxs {
|
||||
if len(key) < 3 {
|
||||
continue
|
||||
}
|
||||
// Extract store name from 3-byte prefix
|
||||
storeName := string(key[:3])
|
||||
|
||||
store, storeErr := tx.ObjectStore(storeName)
|
||||
if storeErr != nil {
|
||||
w.Logger.Warnf("SaveEvent: failed to get object store %s: %v", storeName, storeErr)
|
||||
continue
|
||||
}
|
||||
|
||||
// Use the full key as the IndexedDB key, empty value
|
||||
keyJS := bytesToSafeValue(key)
|
||||
_, putErr := store.PutKey(keyJS, safejs.Null())
|
||||
if putErr != nil {
|
||||
w.Logger.Warnf("SaveEvent: failed to put index %s: %v", storeName, putErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Store the event data
|
||||
if isSmallEvent {
|
||||
// Small event: store inline with sev prefix
|
||||
// Format: sev|serial|size_uint16|event_data
|
||||
keyBuf := new(bytes.Buffer)
|
||||
if err = indexes.SmallEventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Append size as uint16 big-endian
|
||||
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
|
||||
keyBuf.Write(sizeBytes)
|
||||
keyBuf.Write(eventData)
|
||||
|
||||
store, storeErr := tx.ObjectStore(string(indexes.SmallEventPrefix))
|
||||
if storeErr == nil {
|
||||
keyJS := bytesToSafeValue(keyBuf.Bytes())
|
||||
store.PutKey(keyJS, safejs.Null())
|
||||
}
|
||||
} else {
|
||||
// Large event: store separately with evt prefix
|
||||
keyBuf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
store, storeErr := tx.ObjectStore(string(indexes.EventPrefix))
|
||||
if storeErr == nil {
|
||||
keyJS := bytesToSafeValue(keyBuf.Bytes())
|
||||
valueJS := bytesToSafeValue(eventData)
|
||||
store.PutKey(keyJS, valueJS)
|
||||
}
|
||||
}
|
||||
|
||||
// Store specialized keys for replaceable/addressable events
|
||||
if isAddressableEvent && isSmallEvent {
|
||||
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||
if dTag != nil {
|
||||
pubHash := new(types.PubHash)
|
||||
pubHash.FromPubkey(ev.Pubkey)
|
||||
kindVal := new(types.Uint16)
|
||||
kindVal.Set(ev.Kind)
|
||||
dTagHash := new(types.Ident)
|
||||
dTagHash.FromIdent(dTag.Value())
|
||||
|
||||
keyBuf := new(bytes.Buffer)
|
||||
if err = indexes.AddressableEventEnc(pubHash, kindVal, dTagHash).MarshalWrite(keyBuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
|
||||
keyBuf.Write(sizeBytes)
|
||||
keyBuf.Write(eventData)
|
||||
|
||||
store, storeErr := tx.ObjectStore(string(indexes.AddressableEventPrefix))
|
||||
if storeErr == nil {
|
||||
keyJS := bytesToSafeValue(keyBuf.Bytes())
|
||||
store.PutKey(keyJS, safejs.Null())
|
||||
}
|
||||
}
|
||||
} else if isReplaceableEvent && isSmallEvent {
|
||||
pubHash := new(types.PubHash)
|
||||
pubHash.FromPubkey(ev.Pubkey)
|
||||
kindVal := new(types.Uint16)
|
||||
kindVal.Set(ev.Kind)
|
||||
|
||||
keyBuf := new(bytes.Buffer)
|
||||
if err = indexes.ReplaceableEventEnc(pubHash, kindVal).MarshalWrite(keyBuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
|
||||
keyBuf.Write(sizeBytes)
|
||||
keyBuf.Write(eventData)
|
||||
|
||||
store, storeErr := tx.ObjectStore(string(indexes.ReplaceableEventPrefix))
|
||||
if storeErr == nil {
|
||||
keyJS := bytesToSafeValue(keyBuf.Bytes())
|
||||
store.PutKey(keyJS, safejs.Null())
|
||||
}
|
||||
}
|
||||
|
||||
// Commit transaction
|
||||
if err = tx.Await(c); err != nil {
|
||||
return false, fmt.Errorf("failed to commit transaction: %w", err)
|
||||
}
|
||||
|
||||
w.Logger.Debugf("SaveEvent: saved event %x (kind %d, %d bytes, %d indexes)",
|
||||
ev.ID[:8], ev.Kind, len(eventData), len(idxs))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// WouldReplaceEvent checks if the provided event would replace existing events
|
||||
func (w *W) WouldReplaceEvent(ev *event.E) (bool, types.Uint40s, error) {
|
||||
// Only relevant for replaceable or parameterized replaceable kinds
|
||||
if !(kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind)) {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// Build filter for existing events
|
||||
var f interface{}
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
// For now, simplified check - would need full filter implementation
|
||||
return false, nil, nil
|
||||
} else {
|
||||
// Parameterized replaceable requires 'd' tag
|
||||
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||
if dTag == nil {
|
||||
return false, nil, ErrMissingDTag
|
||||
}
|
||||
// Simplified - full implementation would query existing events
|
||||
_ = f
|
||||
}
|
||||
|
||||
// Simplified implementation - assume no conflicts for now
|
||||
// Full implementation would query the database and compare timestamps
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// GetSerialById looks up the serial number for an event ID
|
||||
func (w *W) GetSerialById(id []byte) (ser *types.Uint40, err error) {
|
||||
if len(id) != 32 {
|
||||
return nil, errors.New("invalid event ID length")
|
||||
}
|
||||
|
||||
// Create ID hash
|
||||
idHash := new(types.IdHash)
|
||||
if err = idHash.FromId(id); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build the prefix to search for
|
||||
keyBuf := new(bytes.Buffer)
|
||||
indexes.IdEnc(idHash, nil).MarshalWrite(keyBuf)
|
||||
prefix := keyBuf.Bytes()[:11] // 3 prefix + 8 id hash
|
||||
|
||||
// Search in the eid object store
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.IdPrefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(string(indexes.IdPrefix))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Use cursor to find matching key
|
||||
cursorReq, err := store.OpenCursor(idb.CursorNext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
|
||||
keyBytes := safeValueToBytes(keyVal)
|
||||
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
|
||||
// Found matching key, extract serial from last 5 bytes
|
||||
if len(keyBytes) >= 16 { // 3 + 8 + 5
|
||||
ser = new(types.Uint40)
|
||||
ser.UnmarshalRead(bytes.NewReader(keyBytes[11:16]))
|
||||
return errors.New("found") // Stop iteration
|
||||
}
|
||||
}
|
||||
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
if ser != nil {
|
||||
return ser, nil
|
||||
}
|
||||
if err != nil && err.Error() != "found" {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.New("id not found in database")
|
||||
}
|
||||
|
||||
// GetSerialsByIds looks up serial numbers for multiple event IDs
|
||||
func (w *W) GetSerialsByIds(ids *tag.T) (serials map[string]*types.Uint40, err error) {
|
||||
serials = make(map[string]*types.Uint40)
|
||||
|
||||
if ids == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 1; i < ids.Len(); i++ {
|
||||
idBytes := ids.T[i]
|
||||
if len(idBytes) == 64 {
|
||||
// Hex encoded ID
|
||||
var decoded []byte
|
||||
decoded, err = hex.Dec(string(idBytes))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
idBytes = decoded
|
||||
}
|
||||
|
||||
if len(idBytes) == 32 {
|
||||
var ser *types.Uint40
|
||||
ser, err = w.GetSerialById(idBytes)
|
||||
if err == nil && ser != nil {
|
||||
serials[hex.Enc(idBytes)] = ser
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
||||
// GetSerialsByIdsWithFilter looks up serial numbers with a filter function
|
||||
func (w *W) GetSerialsByIdsWithFilter(ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool) (serials map[string]*types.Uint40, err error) {
|
||||
allSerials, err := w.GetSerialsByIds(ids)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fn == nil {
|
||||
return allSerials, nil
|
||||
}
|
||||
|
||||
serials = make(map[string]*types.Uint40)
|
||||
for idHex, ser := range allSerials {
|
||||
ev, fetchErr := w.FetchEventBySerial(ser)
|
||||
if fetchErr != nil {
|
||||
continue
|
||||
}
|
||||
if fn(ev, ser) {
|
||||
serials[idHex] = ser
|
||||
}
|
||||
}
|
||||
|
||||
return serials, nil
|
||||
}
|
||||
332
pkg/wasmdb/subscriptions.go
Normal file
332
pkg/wasmdb/subscriptions.go
Normal file
@@ -0,0 +1,332 @@
|
||||
//go:build js && wasm
|
||||
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/aperturerobotics/go-indexeddb/idb"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
)
|
||||
|
||||
const (
|
||||
// SubscriptionsStoreName is the object store for payment subscriptions
|
||||
SubscriptionsStoreName = "subscriptions"
|
||||
|
||||
// PaymentsPrefix is the key prefix for payment records
|
||||
PaymentsPrefix = "payment:"
|
||||
)
|
||||
|
||||
// GetSubscription retrieves a subscription for a pubkey
|
||||
func (w *W) GetSubscription(pubkey []byte) (*database.Subscription, error) {
|
||||
key := "sub:" + string(pubkey)
|
||||
data, err := w.getStoreValue(SubscriptionsStoreName, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if data == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return w.deserializeSubscription(data)
|
||||
}
|
||||
|
||||
// IsSubscriptionActive checks if a pubkey has an active subscription
|
||||
// If no subscription exists, creates a 30-day trial
|
||||
func (w *W) IsSubscriptionActive(pubkey []byte) (bool, error) {
|
||||
key := "sub:" + string(pubkey)
|
||||
data, err := w.getStoreValue(SubscriptionsStoreName, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
if data == nil {
|
||||
// Create new trial subscription
|
||||
sub := &database.Subscription{
|
||||
TrialEnd: now.AddDate(0, 0, 30),
|
||||
}
|
||||
subData := w.serializeSubscription(sub)
|
||||
if err := w.setStoreValue(SubscriptionsStoreName, key, subData); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
sub, err := w.deserializeSubscription(data)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Active if within trial or paid period
|
||||
return now.Before(sub.TrialEnd) || (!sub.PaidUntil.IsZero() && now.Before(sub.PaidUntil)), nil
|
||||
}
|
||||
|
||||
// ExtendSubscription extends a subscription by the given number of days
|
||||
func (w *W) ExtendSubscription(pubkey []byte, days int) error {
|
||||
if days <= 0 {
|
||||
return errors.New("invalid days")
|
||||
}
|
||||
|
||||
key := "sub:" + string(pubkey)
|
||||
data, err := w.getStoreValue(SubscriptionsStoreName, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
var sub *database.Subscription
|
||||
|
||||
if data == nil {
|
||||
// Create new subscription
|
||||
sub = &database.Subscription{
|
||||
PaidUntil: now.AddDate(0, 0, days),
|
||||
}
|
||||
} else {
|
||||
sub, err = w.deserializeSubscription(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Extend from current paid date if still active, otherwise from now
|
||||
extendFrom := now
|
||||
if !sub.PaidUntil.IsZero() && sub.PaidUntil.After(now) {
|
||||
extendFrom = sub.PaidUntil
|
||||
}
|
||||
sub.PaidUntil = extendFrom.AddDate(0, 0, days)
|
||||
}
|
||||
|
||||
// Serialize and store
|
||||
subData := w.serializeSubscription(sub)
|
||||
return w.setStoreValue(SubscriptionsStoreName, key, subData)
|
||||
}
|
||||
|
||||
// RecordPayment records a payment for a pubkey
|
||||
func (w *W) RecordPayment(pubkey []byte, amount int64, invoice, preimage string) error {
|
||||
now := time.Now()
|
||||
payment := &database.Payment{
|
||||
Amount: amount,
|
||||
Timestamp: now,
|
||||
Invoice: invoice,
|
||||
Preimage: preimage,
|
||||
}
|
||||
|
||||
data := w.serializePayment(payment)
|
||||
|
||||
// Create unique key with timestamp
|
||||
key := PaymentsPrefix + string(pubkey) + ":" + now.Format(time.RFC3339Nano)
|
||||
return w.setStoreValue(SubscriptionsStoreName, key, data)
|
||||
}
|
||||
|
||||
// GetPaymentHistory retrieves all payments for a pubkey
|
||||
func (w *W) GetPaymentHistory(pubkey []byte) ([]database.Payment, error) {
|
||||
prefix := PaymentsPrefix + string(pubkey) + ":"
|
||||
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, SubscriptionsStoreName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(SubscriptionsStoreName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var payments []database.Payment
|
||||
|
||||
cursorReq, err := store.OpenCursor(idb.CursorNext)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
prefixBytes := []byte(prefix)
|
||||
|
||||
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
|
||||
keyVal, keyErr := cursor.Key()
|
||||
if keyErr != nil {
|
||||
return keyErr
|
||||
}
|
||||
|
||||
keyBytes := safeValueToBytes(keyVal)
|
||||
if bytes.HasPrefix(keyBytes, prefixBytes) {
|
||||
val, valErr := cursor.Value()
|
||||
if valErr != nil {
|
||||
return valErr
|
||||
}
|
||||
valBytes := safeValueToBytes(val)
|
||||
if payment, err := w.deserializePayment(valBytes); err == nil {
|
||||
payments = append(payments, *payment)
|
||||
}
|
||||
}
|
||||
|
||||
return cursor.Continue()
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return payments, nil
|
||||
}
|
||||
|
||||
// ExtendBlossomSubscription extends a blossom subscription with storage quota
|
||||
func (w *W) ExtendBlossomSubscription(pubkey []byte, level string, storageMB int64, days int) error {
|
||||
if days <= 0 {
|
||||
return errors.New("invalid days")
|
||||
}
|
||||
|
||||
key := "sub:" + string(pubkey)
|
||||
data, err := w.getStoreValue(SubscriptionsStoreName, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
var sub *database.Subscription
|
||||
|
||||
if data == nil {
|
||||
sub = &database.Subscription{
|
||||
PaidUntil: now.AddDate(0, 0, days),
|
||||
BlossomLevel: level,
|
||||
BlossomStorage: storageMB,
|
||||
}
|
||||
} else {
|
||||
sub, err = w.deserializeSubscription(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Extend from current paid date if still active
|
||||
extendFrom := now
|
||||
if !sub.PaidUntil.IsZero() && sub.PaidUntil.After(now) {
|
||||
extendFrom = sub.PaidUntil
|
||||
}
|
||||
sub.PaidUntil = extendFrom.AddDate(0, 0, days)
|
||||
|
||||
// Set level and accumulate storage
|
||||
sub.BlossomLevel = level
|
||||
if sub.BlossomStorage > 0 && sub.PaidUntil.After(now) {
|
||||
sub.BlossomStorage += storageMB
|
||||
} else {
|
||||
sub.BlossomStorage = storageMB
|
||||
}
|
||||
}
|
||||
|
||||
subData := w.serializeSubscription(sub)
|
||||
return w.setStoreValue(SubscriptionsStoreName, key, subData)
|
||||
}
|
||||
|
||||
// GetBlossomStorageQuota returns the storage quota for a pubkey
|
||||
func (w *W) GetBlossomStorageQuota(pubkey []byte) (quotaMB int64, err error) {
|
||||
sub, err := w.GetSubscription(pubkey)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if sub == nil {
|
||||
return 0, nil
|
||||
}
|
||||
// Only return quota if subscription is active
|
||||
if sub.PaidUntil.IsZero() || time.Now().After(sub.PaidUntil) {
|
||||
return 0, nil
|
||||
}
|
||||
return sub.BlossomStorage, nil
|
||||
}
|
||||
|
||||
// IsFirstTimeUser checks if a pubkey is a first-time user (no subscription history)
|
||||
func (w *W) IsFirstTimeUser(pubkey []byte) (bool, error) {
|
||||
key := "firstlogin:" + string(pubkey)
|
||||
data, err := w.getStoreValue(SubscriptionsStoreName, key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if data == nil {
|
||||
// First time - record the login
|
||||
now := time.Now()
|
||||
loginData, _ := json.Marshal(map[string]interface{}{
|
||||
"first_login": now,
|
||||
})
|
||||
_ = w.setStoreValue(SubscriptionsStoreName, key, loginData)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// serializeSubscription converts a subscription to bytes using JSON
|
||||
func (w *W) serializeSubscription(s *database.Subscription) []byte {
|
||||
data, _ := json.Marshal(s)
|
||||
return data
|
||||
}
|
||||
|
||||
// deserializeSubscription converts bytes to a subscription
|
||||
func (w *W) deserializeSubscription(data []byte) (*database.Subscription, error) {
|
||||
s := &database.Subscription{}
|
||||
if err := json.Unmarshal(data, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// serializePayment converts a payment to bytes
|
||||
func (w *W) serializePayment(p *database.Payment) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Amount (8 bytes)
|
||||
amt := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(amt, uint64(p.Amount))
|
||||
buf.Write(amt)
|
||||
|
||||
// Timestamp (8 bytes)
|
||||
ts := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(ts, uint64(p.Timestamp.Unix()))
|
||||
buf.Write(ts)
|
||||
|
||||
// Invoice length (4 bytes) + Invoice
|
||||
invBytes := []byte(p.Invoice)
|
||||
invLen := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(invLen, uint32(len(invBytes)))
|
||||
buf.Write(invLen)
|
||||
buf.Write(invBytes)
|
||||
|
||||
// Preimage length (4 bytes) + Preimage
|
||||
preBytes := []byte(p.Preimage)
|
||||
preLen := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(preLen, uint32(len(preBytes)))
|
||||
buf.Write(preLen)
|
||||
buf.Write(preBytes)
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
// deserializePayment converts bytes to a payment
|
||||
func (w *W) deserializePayment(data []byte) (*database.Payment, error) {
|
||||
if len(data) < 24 { // 8 + 8 + 4 + 4 minimum
|
||||
return nil, errors.New("invalid payment data")
|
||||
}
|
||||
|
||||
p := &database.Payment{}
|
||||
|
||||
p.Amount = int64(binary.BigEndian.Uint64(data[0:8]))
|
||||
p.Timestamp = time.Unix(int64(binary.BigEndian.Uint64(data[8:16])), 0)
|
||||
|
||||
invLen := binary.BigEndian.Uint32(data[16:20])
|
||||
if len(data) < int(20+invLen+4) {
|
||||
return nil, errors.New("invalid invoice length")
|
||||
}
|
||||
p.Invoice = string(data[20 : 20+invLen])
|
||||
|
||||
offset := 20 + invLen
|
||||
preLen := binary.BigEndian.Uint32(data[offset : offset+4])
|
||||
if len(data) < int(offset+4+preLen) {
|
||||
return nil, errors.New("invalid preimage length")
|
||||
}
|
||||
p.Preimage = string(data[offset+4 : offset+4+preLen])
|
||||
|
||||
return p, nil
|
||||
}
|
||||
24
pkg/wasmdb/testdata/package-lock.json
generated
vendored
Normal file
24
pkg/wasmdb/testdata/package-lock.json
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"name": "wasmdb-test",
|
||||
"version": "1.0.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "wasmdb-test",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"fake-indexeddb": "^6.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/fake-indexeddb": {
|
||||
"version": "6.2.5",
|
||||
"resolved": "https://registry.npmjs.org/fake-indexeddb/-/fake-indexeddb-6.2.5.tgz",
|
||||
"integrity": "sha512-CGnyrvbhPlWYMngksqrSSUT1BAVP49dZocrHuK0SvtR0D5TMs5wP0o3j7jexDJW01KSadjBp1M/71o/KR3nD1w==",
|
||||
"license": "Apache-2.0",
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
12
pkg/wasmdb/testdata/package.json
vendored
Normal file
12
pkg/wasmdb/testdata/package.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "wasmdb-test",
|
||||
"version": "1.0.0",
|
||||
"description": "Node.js test harness for wasmdb WASM tests",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"test": "node run_wasm_tests.mjs"
|
||||
},
|
||||
"dependencies": {
|
||||
"fake-indexeddb": "^6.0.0"
|
||||
}
|
||||
}
|
||||
572
pkg/wasmdb/wasmdb.go
Normal file
572
pkg/wasmdb/wasmdb.go
Normal file
@@ -0,0 +1,572 @@
|
||||
//go:build js && wasm
|
||||
|
||||
// Package wasmdb provides a WebAssembly-compatible database implementation
|
||||
// using IndexedDB as the storage backend. It replicates the Badger database's
|
||||
// index schema for full query compatibility.
|
||||
//
|
||||
// This implementation uses aperturerobotics/go-indexeddb (a fork of hack-pad/go-indexeddb)
|
||||
// which provides full IndexedDB bindings with cursor/range support and transaction retry
|
||||
// mechanisms to handle IndexedDB's transaction expiration issues in Go WASM.
|
||||
//
|
||||
// Architecture:
|
||||
// - Each index type (evt, eid, kc-, pc-, etc.) maps to an IndexedDB object store
|
||||
// - Keys are binary-encoded using the same format as the Badger implementation
|
||||
// - Range queries use IndexedDB cursors with KeyRange bounds
|
||||
// - Serial numbers are managed using a dedicated "meta" object store
|
||||
package wasmdb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/aperturerobotics/go-indexeddb/idb"
|
||||
"github.com/hack-pad/safejs"
|
||||
"lol.mleku.dev"
|
||||
"lol.mleku.dev/chk"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
)
|
||||
|
||||
const (
|
||||
// DatabaseName is the IndexedDB database name
|
||||
DatabaseName = "orly-nostr-relay"
|
||||
|
||||
// DatabaseVersion is incremented when schema changes require migration
|
||||
DatabaseVersion = 1
|
||||
|
||||
// MetaStoreName holds metadata like serial counters
|
||||
MetaStoreName = "meta"
|
||||
|
||||
// EventSerialKey is the key for the event serial counter in meta store
|
||||
EventSerialKey = "event_serial"
|
||||
|
||||
// PubkeySerialKey is the key for the pubkey serial counter in meta store
|
||||
PubkeySerialKey = "pubkey_serial"
|
||||
|
||||
// RelayIdentityKey is the key for the relay identity secret
|
||||
RelayIdentityKey = "relay_identity"
|
||||
)
|
||||
|
||||
// Object store names matching Badger index prefixes
|
||||
var objectStoreNames = []string{
|
||||
MetaStoreName,
|
||||
string(indexes.EventPrefix), // "evt" - full events
|
||||
string(indexes.SmallEventPrefix), // "sev" - small events inline
|
||||
string(indexes.ReplaceableEventPrefix), // "rev" - replaceable events
|
||||
string(indexes.AddressableEventPrefix), // "aev" - addressable events
|
||||
string(indexes.IdPrefix), // "eid" - event ID index
|
||||
string(indexes.FullIdPubkeyPrefix), // "fpc" - full ID + pubkey + timestamp
|
||||
string(indexes.CreatedAtPrefix), // "c--" - created_at index
|
||||
string(indexes.KindPrefix), // "kc-" - kind index
|
||||
string(indexes.PubkeyPrefix), // "pc-" - pubkey index
|
||||
string(indexes.KindPubkeyPrefix), // "kpc" - kind + pubkey index
|
||||
string(indexes.TagPrefix), // "tc-" - tag index
|
||||
string(indexes.TagKindPrefix), // "tkc" - tag + kind index
|
||||
string(indexes.TagPubkeyPrefix), // "tpc" - tag + pubkey index
|
||||
string(indexes.TagKindPubkeyPrefix), // "tkp" - tag + kind + pubkey index
|
||||
string(indexes.WordPrefix), // "wrd" - word search index
|
||||
string(indexes.ExpirationPrefix), // "exp" - expiration index
|
||||
string(indexes.VersionPrefix), // "ver" - schema version
|
||||
string(indexes.PubkeySerialPrefix), // "pks" - pubkey serial index
|
||||
string(indexes.SerialPubkeyPrefix), // "spk" - serial to pubkey
|
||||
string(indexes.EventPubkeyGraphPrefix), // "epg" - event-pubkey graph
|
||||
string(indexes.PubkeyEventGraphPrefix), // "peg" - pubkey-event graph
|
||||
"markers", // metadata key-value storage
|
||||
"subscriptions", // payment subscriptions
|
||||
"nip43", // NIP-43 membership
|
||||
"invites", // invite codes
|
||||
}
|
||||
|
||||
// W implements the database.Database interface using IndexedDB
|
||||
type W struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
dataDir string // Not really used in WASM, but kept for interface compatibility
|
||||
Logger *logger
|
||||
|
||||
db *idb.Database
|
||||
dbMu sync.RWMutex
|
||||
ready chan struct{}
|
||||
|
||||
// Serial counters (cached in memory, persisted to IndexedDB)
|
||||
eventSerial uint64
|
||||
pubkeySerial uint64
|
||||
serialMu sync.Mutex
|
||||
}
|
||||
|
||||
// Ensure W implements database.Database interface at compile time
|
||||
var _ database.Database = (*W)(nil)
|
||||
|
||||
// init registers the wasmdb database factory
|
||||
func init() {
|
||||
database.RegisterWasmDBFactory(func(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
cfg *database.DatabaseConfig,
|
||||
) (database.Database, error) {
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// NewWithConfig creates a new IndexedDB-based database instance
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *database.DatabaseConfig,
|
||||
) (*W, error) {
|
||||
w := &W{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel)),
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Open or create the IndexedDB database
|
||||
if err := w.openDatabase(); err != nil {
|
||||
return nil, fmt.Errorf("failed to open IndexedDB: %w", err)
|
||||
}
|
||||
|
||||
// Load serial counters from storage
|
||||
if err := w.loadSerialCounters(); err != nil {
|
||||
return nil, fmt.Errorf("failed to load serial counters: %w", err)
|
||||
}
|
||||
|
||||
// Start warmup goroutine
|
||||
go w.warmup()
|
||||
|
||||
// Setup shutdown handler
|
||||
go func() {
|
||||
<-w.ctx.Done()
|
||||
w.cancel()
|
||||
w.Close()
|
||||
}()
|
||||
|
||||
return w, nil
|
||||
}
|
||||
|
||||
// New creates a new IndexedDB-based database instance with default configuration
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (*W, error) {
|
||||
cfg := &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// openDatabase opens or creates the IndexedDB database with all required object stores
|
||||
func (w *W) openDatabase() error {
|
||||
w.dbMu.Lock()
|
||||
defer w.dbMu.Unlock()
|
||||
|
||||
// Get the IndexedDB factory (panics if not available)
|
||||
factory := idb.Global()
|
||||
|
||||
// Open the database with upgrade handler
|
||||
openReq, err := factory.Open(w.ctx, DatabaseName, DatabaseVersion, func(db *idb.Database, oldVersion, newVersion uint) error {
|
||||
// This is called when the database needs to be created or upgraded
|
||||
w.Logger.Infof("IndexedDB upgrade: version %d -> %d", oldVersion, newVersion)
|
||||
|
||||
// Create all object stores
|
||||
for _, storeName := range objectStoreNames {
|
||||
// Check if store already exists
|
||||
if !w.hasObjectStore(db, storeName) {
|
||||
// Create object store without auto-increment (we manage keys manually)
|
||||
opts := idb.ObjectStoreOptions{}
|
||||
if _, err := db.CreateObjectStore(storeName, opts); err != nil {
|
||||
return fmt.Errorf("failed to create object store %s: %w", storeName, err)
|
||||
}
|
||||
w.Logger.Debugf("created object store: %s", storeName)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open IndexedDB: %w", err)
|
||||
}
|
||||
|
||||
db, err := openReq.Await(w.ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to await IndexedDB open: %w", err)
|
||||
}
|
||||
|
||||
w.db = db
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasObjectStore checks if an object store exists in the database
|
||||
func (w *W) hasObjectStore(db *idb.Database, name string) bool {
|
||||
names, err := db.ObjectStoreNames()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, n := range names {
|
||||
if n == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// loadSerialCounters loads the event and pubkey serial counters from IndexedDB
|
||||
func (w *W) loadSerialCounters() error {
|
||||
w.serialMu.Lock()
|
||||
defer w.serialMu.Unlock()
|
||||
|
||||
// Load event serial
|
||||
eventSerialBytes, err := w.getMeta(EventSerialKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if eventSerialBytes != nil && len(eventSerialBytes) == 8 {
|
||||
w.eventSerial = binary.BigEndian.Uint64(eventSerialBytes)
|
||||
}
|
||||
|
||||
// Load pubkey serial
|
||||
pubkeySerialBytes, err := w.getMeta(PubkeySerialKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pubkeySerialBytes != nil && len(pubkeySerialBytes) == 8 {
|
||||
w.pubkeySerial = binary.BigEndian.Uint64(pubkeySerialBytes)
|
||||
}
|
||||
|
||||
w.Logger.Infof("loaded serials: event=%d, pubkey=%d", w.eventSerial, w.pubkeySerial)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMeta retrieves a value from the meta object store
|
||||
func (w *W) getMeta(key string) ([]byte, error) {
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, MetaStoreName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(MetaStoreName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyVal, err := safejs.ValueOf(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := store.Get(keyVal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
val, err := req.Await(w.ctx)
|
||||
if err != nil {
|
||||
// Key not found is not an error
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if val.IsUndefined() || val.IsNull() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Convert safejs.Value to []byte
|
||||
return safeValueToBytes(val), nil
|
||||
}
|
||||
|
||||
// setMeta stores a value in the meta object store
|
||||
func (w *W) setMeta(key string, value []byte) error {
|
||||
tx, err := w.db.Transaction(idb.TransactionReadWrite, MetaStoreName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(MetaStoreName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert value to Uint8Array for IndexedDB storage
|
||||
valueJS := bytesToSafeValue(value)
|
||||
|
||||
// Put with key - using PutKey since we're managing keys
|
||||
keyVal, err := safejs.ValueOf(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = store.PutKey(keyVal, valueJS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Await(w.ctx)
|
||||
}
|
||||
|
||||
// nextEventSerial returns the next event serial number and persists it
|
||||
func (w *W) nextEventSerial() (uint64, error) {
|
||||
w.serialMu.Lock()
|
||||
defer w.serialMu.Unlock()
|
||||
|
||||
w.eventSerial++
|
||||
serial := w.eventSerial
|
||||
|
||||
// Persist to IndexedDB
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, serial)
|
||||
if err := w.setMeta(EventSerialKey, buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return serial, nil
|
||||
}
|
||||
|
||||
// nextPubkeySerial returns the next pubkey serial number and persists it
|
||||
func (w *W) nextPubkeySerial() (uint64, error) {
|
||||
w.serialMu.Lock()
|
||||
defer w.serialMu.Unlock()
|
||||
|
||||
w.pubkeySerial++
|
||||
serial := w.pubkeySerial
|
||||
|
||||
// Persist to IndexedDB
|
||||
buf := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(buf, serial)
|
||||
if err := w.setMeta(PubkeySerialKey, buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return serial, nil
|
||||
}
|
||||
|
||||
// warmup performs database warmup and closes the ready channel when complete
|
||||
func (w *W) warmup() {
|
||||
defer close(w.ready)
|
||||
// IndexedDB is ready immediately after opening
|
||||
w.Logger.Infof("IndexedDB database warmup complete, ready to serve requests")
|
||||
}
|
||||
|
||||
// Path returns the database path (not used in WASM)
|
||||
func (w *W) Path() string { return w.dataDir }
|
||||
|
||||
// Init initializes the database (no-op, done in New)
|
||||
func (w *W) Init(path string) error { return nil }
|
||||
|
||||
// Sync flushes pending writes (IndexedDB handles persistence automatically)
|
||||
func (w *W) Sync() error { return nil }
|
||||
|
||||
// Close closes the database
|
||||
func (w *W) Close() error {
|
||||
w.dbMu.Lock()
|
||||
defer w.dbMu.Unlock()
|
||||
|
||||
if w.db != nil {
|
||||
w.db.Close()
|
||||
w.db = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wipe removes all data and recreates object stores
|
||||
func (w *W) Wipe() error {
|
||||
w.dbMu.Lock()
|
||||
defer w.dbMu.Unlock()
|
||||
|
||||
// Close the current database
|
||||
if w.db != nil {
|
||||
w.db.Close()
|
||||
w.db = nil
|
||||
}
|
||||
|
||||
// Delete the database
|
||||
factory := idb.Global()
|
||||
delReq, err := factory.DeleteDatabase(DatabaseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete IndexedDB: %w", err)
|
||||
}
|
||||
if err := delReq.Await(w.ctx); err != nil {
|
||||
return fmt.Errorf("failed to await IndexedDB delete: %w", err)
|
||||
}
|
||||
|
||||
// Reset serial counters
|
||||
w.serialMu.Lock()
|
||||
w.eventSerial = 0
|
||||
w.pubkeySerial = 0
|
||||
w.serialMu.Unlock()
|
||||
|
||||
// Reopen the database (this will recreate all object stores)
|
||||
w.dbMu.Unlock()
|
||||
err = w.openDatabase()
|
||||
w.dbMu.Lock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SetLogLevel sets the logging level
|
||||
func (w *W) SetLogLevel(level string) {
|
||||
w.Logger.SetLogLevel(lol.GetLogLevel(level))
|
||||
}
|
||||
|
||||
// Ready returns a channel that closes when the database is ready
|
||||
func (w *W) Ready() <-chan struct{} { return w.ready }
|
||||
|
||||
// RunMigrations runs database migrations (handled by IndexedDB upgrade)
|
||||
func (w *W) RunMigrations() {}
|
||||
|
||||
// EventIdsBySerial retrieves event IDs by serial range
|
||||
func (w *W) EventIdsBySerial(start uint64, count int) ([]uint64, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Query cache methods (simplified for WASM - no caching)
|
||||
func (w *W) GetCachedJSON(f *filter.F) ([][]byte, bool) { return nil, false }
|
||||
func (w *W) CacheMarshaledJSON(f *filter.F, marshaledJSON [][]byte) {}
|
||||
func (w *W) GetCachedEvents(f *filter.F) (event.S, bool) { return nil, false }
|
||||
func (w *W) CacheEvents(f *filter.F, events event.S) {}
|
||||
func (w *W) InvalidateQueryCache() {}
|
||||
|
||||
// Placeholder implementations for remaining interface methods
|
||||
// Query methods are implemented in query-events.go
|
||||
// Delete methods are implemented in delete-event.go
|
||||
|
||||
// Import, Export, and ImportEvents methods are implemented in import-export.go
|
||||
|
||||
func (w *W) GetRelayIdentitySecret() (skb []byte, err error) {
|
||||
return w.getMeta(RelayIdentityKey)
|
||||
}
|
||||
|
||||
func (w *W) SetRelayIdentitySecret(skb []byte) error {
|
||||
return w.setMeta(RelayIdentityKey, skb)
|
||||
}
|
||||
|
||||
func (w *W) GetOrCreateRelayIdentitySecret() (skb []byte, err error) {
|
||||
skb, err = w.GetRelayIdentitySecret()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if skb != nil {
|
||||
return skb, nil
|
||||
}
|
||||
// Generate new secret key (32 random bytes)
|
||||
// In WASM, we use crypto.getRandomValues
|
||||
skb = make([]byte, 32)
|
||||
if err := cryptoRandom(skb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := w.SetRelayIdentitySecret(skb); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return skb, nil
|
||||
}
|
||||
|
||||
func (w *W) SetMarker(key string, value []byte) error {
|
||||
return w.setStoreValue("markers", key, value)
|
||||
}
|
||||
|
||||
func (w *W) GetMarker(key string) (value []byte, err error) {
|
||||
return w.getStoreValue("markers", key)
|
||||
}
|
||||
|
||||
func (w *W) HasMarker(key string) bool {
|
||||
val, err := w.GetMarker(key)
|
||||
return err == nil && val != nil
|
||||
}
|
||||
|
||||
func (w *W) DeleteMarker(key string) error {
|
||||
return w.deleteStoreValue("markers", key)
|
||||
}
|
||||
|
||||
// Subscription methods are implemented in subscriptions.go
|
||||
// NIP-43 methods are implemented in nip43.go
|
||||
|
||||
// Helper methods for object store operations
|
||||
|
||||
func (w *W) setStoreValue(storeName, key string, value []byte) error {
|
||||
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(storeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keyVal, err := safejs.ValueOf(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
valueJS := bytesToSafeValue(value)
|
||||
|
||||
_, err = store.PutKey(keyVal, valueJS)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Await(w.ctx)
|
||||
}
|
||||
|
||||
func (w *W) getStoreValue(storeName, key string) ([]byte, error) {
|
||||
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(storeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyVal, err := safejs.ValueOf(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := store.Get(keyVal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
val, err := req.Await(w.ctx)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if val.IsUndefined() || val.IsNull() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return safeValueToBytes(val), nil
|
||||
}
|
||||
|
||||
func (w *W) deleteStoreValue(storeName, key string) error {
|
||||
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store, err := tx.ObjectStore(storeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keyVal, err := safejs.ValueOf(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = store.Delete(keyVal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Await(w.ctx)
|
||||
}
|
||||
|
||||
// Placeholder for unused variable
|
||||
var _ = chk.E
|
||||
1739
pkg/wasmdb/wasmdb_test.go
Normal file
1739
pkg/wasmdb/wasmdb_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -7,9 +7,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"lol.mleku.dev/errorf"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"lol.mleku.dev/errorf"
|
||||
"next.orly.dev/pkg/interfaces/neterr"
|
||||
)
|
||||
|
||||
// Client wraps a WebSocket connection to a relay for testing.
|
||||
@@ -118,7 +119,7 @@ func (c *Client) readLoop() {
|
||||
default:
|
||||
}
|
||||
// Check if it's a timeout - connection might still be alive
|
||||
if netErr, ok := err.(interface{ Timeout() bool }); ok && netErr.Timeout() {
|
||||
if netErr, ok := err.(neterr.TimeoutError); ok && netErr.Timeout() {
|
||||
// Pong handler should have extended deadline, but if we timeout,
|
||||
// reset it and continue - connection might still be alive
|
||||
// This can happen during idle periods when no messages are received
|
||||
|
||||
Reference in New Issue
Block a user