Unify NostrUser and Author nodes; add migrations support
Some checks failed
Go / build-and-release (push) Has been cancelled
Some checks failed
Go / build-and-release (push) Has been cancelled
Merged 'Author' nodes into 'NostrUser' for unified identity tracking and social graph representation. Introduced migrations framework to handle schema changes, including retroactive updates for existing relationships and constraints. Updated tests, schema definitions, and documentation to reflect these changes.
This commit is contained in:
@@ -176,7 +176,9 @@
|
||||
"Bash(xxd:*)",
|
||||
"Bash(CGO_ENABLED=0 go mod tidy:*)",
|
||||
"WebFetch(domain:git.mleku.dev)",
|
||||
"Bash(CGO_ENABLED=0 LOG_LEVEL=trace go test:*)"
|
||||
"Bash(CGO_ENABLED=0 LOG_LEVEL=trace go test:*)",
|
||||
"Bash(go vet:*)",
|
||||
"Bash(gofmt:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
@@ -16,12 +16,13 @@ This document provides a comprehensive guide to the Neo4j database schema used b
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The Neo4j implementation uses a **dual-node architecture** to separate concerns:
|
||||
The Neo4j implementation uses a **unified node architecture**:
|
||||
|
||||
1. **NIP-01 Base Layer**: Stores Nostr events with `Event`, `Author`, and `Tag` nodes for standard relay operations
|
||||
2. **WoT Extension Layer**: Stores social graph data with `NostrUser` nodes and relationship types (`FOLLOWS`, `MUTES`, `REPORTS`) for trust calculations
|
||||
1. **Event Storage**: `Event` and `Tag` nodes store Nostr events for standard relay operations
|
||||
2. **User Identity**: `NostrUser` nodes represent all Nostr users (both event authors and social graph participants)
|
||||
3. **Social Graph**: Relationship types (`FOLLOWS`, `MUTES`, `REPORTS`) between `NostrUser` nodes for trust calculations
|
||||
|
||||
This separation allows the WoT extension to be modified independently without affecting NIP-01 compliance.
|
||||
**Note:** The `Author` label was deprecated and merged into `NostrUser` to eliminate redundancy. A migration automatically converts existing `Author` nodes when the relay starts.
|
||||
|
||||
### Data Model Summary
|
||||
|
||||
@@ -72,16 +73,17 @@ From the specification document:
|
||||
|
||||
These elements are **required** for a NIP-01 compliant relay.
|
||||
|
||||
### Constraints (schema.go:30-43)
|
||||
### Constraints (schema.go:30-44)
|
||||
|
||||
```cypher
|
||||
-- Event ID uniqueness (for "ids" filter)
|
||||
CREATE CONSTRAINT event_id_unique IF NOT EXISTS
|
||||
FOR (e:Event) REQUIRE e.id IS UNIQUE
|
||||
|
||||
-- Author pubkey uniqueness (for "authors" filter)
|
||||
CREATE CONSTRAINT author_pubkey_unique IF NOT EXISTS
|
||||
FOR (a:Author) REQUIRE a.pubkey IS UNIQUE
|
||||
-- NostrUser pubkey uniqueness (for "authors" filter and social graph)
|
||||
-- NostrUser unifies both NIP-01 author tracking and WoT social graph
|
||||
CREATE CONSTRAINT nostrUser_pubkey IF NOT EXISTS
|
||||
FOR (n:NostrUser) REQUIRE n.pubkey IS UNIQUE
|
||||
```
|
||||
|
||||
### Indexes (schema.go:84-108)
|
||||
@@ -122,14 +124,14 @@ Created in `save-event.go:buildEventCreationCypher()`:
|
||||
Created in `save-event.go:buildEventCreationCypher()`:
|
||||
|
||||
```cypher
|
||||
-- Event → Author relationship
|
||||
(e:Event)-[:AUTHORED_BY]->(a:Author {pubkey: ...})
|
||||
-- Event → NostrUser relationship (author)
|
||||
(e:Event)-[:AUTHORED_BY]->(u:NostrUser {pubkey: ...})
|
||||
|
||||
-- Event → Event reference (e-tags)
|
||||
(e:Event)-[:REFERENCES]->(ref:Event)
|
||||
|
||||
-- Event → Author mention (p-tags)
|
||||
(e:Event)-[:MENTIONS]->(mentioned:Author)
|
||||
-- Event → NostrUser mention (p-tags)
|
||||
(e:Event)-[:MENTIONS]->(mentioned:NostrUser)
|
||||
|
||||
-- Event → Tag (other tags like #t, #d, etc.)
|
||||
(e:Event)-[:TAGGED_WITH]->(t:Tag {type: ..., value: ...})
|
||||
@@ -146,7 +148,7 @@ The `query-events.go` file translates Nostr REQ filters into Cypher queries.
|
||||
| NIP-01 Filter | Cypher Translation | Index Used |
|
||||
|---------------|-------------------|------------|
|
||||
| `ids: ["abc..."]` | `e.id = $id_0` or `e.id STARTS WITH $id_0` | `event_id_unique` |
|
||||
| `authors: ["def..."]` | `e.pubkey = $author_0` or `e.pubkey STARTS WITH $author_0` | `author_pubkey_unique` |
|
||||
| `authors: ["def..."]` | `e.pubkey = $author_0` or `e.pubkey STARTS WITH $author_0` | `nostrUser_pubkey` |
|
||||
| `kinds: [1, 7]` | `e.kind IN $kinds` | `event_kind` |
|
||||
| `since: 1234567890` | `e.created_at >= $since` | `event_created_at` |
|
||||
| `until: 1234567890` | `e.created_at <= $until` | `event_created_at` |
|
||||
@@ -435,25 +437,28 @@ if ev.Kind == 1 {
|
||||
|
||||
### Adding NostrEventTag → NostrUser REFERENCES
|
||||
|
||||
Per the specification update, p-tags should create `REFERENCES` relationships to `NostrUser` nodes:
|
||||
The current implementation creates `MENTIONS` relationships from Events to `NostrUser` nodes for p-tags:
|
||||
|
||||
```go
|
||||
// In save-event.go buildEventCreationCypher(), modify p-tag handling:
|
||||
// In save-event.go buildEventCreationCypher(), p-tag handling:
|
||||
case "p":
|
||||
// Current implementation: creates MENTIONS to Author
|
||||
// Creates MENTIONS to NostrUser (unified node for both author and social graph)
|
||||
cypher += fmt.Sprintf(`
|
||||
MERGE (mentioned%d:Author {pubkey: $%s})
|
||||
MERGE (mentioned%d:NostrUser {pubkey: $%s})
|
||||
ON CREATE SET mentioned%d.created_at = timestamp()
|
||||
CREATE (e)-[:MENTIONS]->(mentioned%d)
|
||||
`, pTagIndex, paramName, pTagIndex)
|
||||
`, pTagIndex, paramName, pTagIndex, pTagIndex)
|
||||
```
|
||||
|
||||
// NEW: Also reference NostrUser for WoT traversal
|
||||
To add additional tag nodes for enhanced query patterns:
|
||||
|
||||
```go
|
||||
// Optional: Also create a Tag node for the p-tag
|
||||
cypher += fmt.Sprintf(`
|
||||
MERGE (user%d:NostrUser {pubkey: $%s})
|
||||
// Create a Tag node for the p-tag
|
||||
MERGE (pTag%d:NostrEventTag {tag_name: 'p', tag_value: $%s})
|
||||
CREATE (e)-[:HAS_TAG]->(pTag%d)
|
||||
CREATE (pTag%d)-[:REFERENCES]->(user%d)
|
||||
`, pTagIndex, paramName, pTagIndex, paramName, pTagIndex, pTagIndex, pTagIndex)
|
||||
CREATE (pTag%d)-[:REFERENCES]->(mentioned%d)
|
||||
`, pTagIndex, paramName, pTagIndex, pTagIndex, pTagIndex)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -168,9 +168,9 @@ RETURN e
|
||||
|
||||
### Social graph query
|
||||
```cypher
|
||||
MATCH (author:Author {pubkey: "abc123..."})
|
||||
MATCH (author:NostrUser {pubkey: "abc123..."})
|
||||
<-[:AUTHORED_BY]-(e:Event)
|
||||
-[:MENTIONS]->(mentioned:Author)
|
||||
-[:MENTIONS]->(mentioned:NostrUser)
|
||||
RETURN author, e, mentioned
|
||||
```
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ func TestExpiration_SaveEventWithExpiration(t *testing.T) {
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event with expiration")
|
||||
ev.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(futureExpiration).String()))
|
||||
ev.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.FromUnix(futureExpiration).String()))
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
@@ -118,7 +118,7 @@ func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
expiredEv.CreatedAt = timestamp.Now().V - 7200 // 2 hours ago
|
||||
expiredEv.Kind = 1
|
||||
expiredEv.Content = []byte("Expired event")
|
||||
expiredEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(pastExpiration).String()))
|
||||
expiredEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.FromUnix(pastExpiration).String()))
|
||||
|
||||
if err := expiredEv.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign expired event: %v", err)
|
||||
@@ -136,7 +136,7 @@ func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
validEv.CreatedAt = timestamp.Now().V
|
||||
validEv.Kind = 1
|
||||
validEv.Content = []byte("Valid event")
|
||||
validEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.From(futureExpiration).String()))
|
||||
validEv.Tags = tag.NewS(tag.NewFromAny("expiration", timestamp.FromUnix(futureExpiration).String()))
|
||||
|
||||
if err := validEv.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign valid event: %v", err)
|
||||
|
||||
@@ -331,7 +331,7 @@ func TestGetSerialsByIds(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create and save multiple events
|
||||
ids := tag.NewS()
|
||||
ids := tag.New()
|
||||
for i := 0; i < 3; i++ {
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
@@ -347,7 +347,8 @@ func TestGetSerialsByIds(t *testing.T) {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
ids.Append(tag.NewFromAny("", hex.Enc(ev.ID[:])))
|
||||
// Append ID to the tag's T slice
|
||||
ids.T = append(ids.T, []byte(hex.Enc(ev.ID[:])))
|
||||
}
|
||||
|
||||
// Get serials by IDs
|
||||
|
||||
197
pkg/neo4j/migrations.go
Normal file
197
pkg/neo4j/migrations.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Migration represents a database migration with a version identifier
|
||||
type Migration struct {
|
||||
Version string
|
||||
Description string
|
||||
Migrate func(ctx context.Context, n *N) error
|
||||
}
|
||||
|
||||
// migrations is the ordered list of database migrations
|
||||
// Migrations are applied in order and tracked via Marker nodes
|
||||
var migrations = []Migration{
|
||||
{
|
||||
Version: "v1",
|
||||
Description: "Merge Author nodes into NostrUser nodes",
|
||||
Migrate: migrateAuthorToNostrUser,
|
||||
},
|
||||
}
|
||||
|
||||
// RunMigrations executes all pending migrations
|
||||
func (n *N) RunMigrations() {
|
||||
ctx := context.Background()
|
||||
|
||||
for _, migration := range migrations {
|
||||
// Check if migration has already been applied
|
||||
if n.migrationApplied(ctx, migration.Version) {
|
||||
n.Logger.Infof("migration %s already applied, skipping", migration.Version)
|
||||
continue
|
||||
}
|
||||
|
||||
n.Logger.Infof("applying migration %s: %s", migration.Version, migration.Description)
|
||||
|
||||
if err := migration.Migrate(ctx, n); err != nil {
|
||||
n.Logger.Errorf("migration %s failed: %v", migration.Version, err)
|
||||
// Continue to next migration - don't fail startup
|
||||
continue
|
||||
}
|
||||
|
||||
// Mark migration as complete
|
||||
if err := n.markMigrationComplete(ctx, migration.Version, migration.Description); err != nil {
|
||||
n.Logger.Warningf("failed to mark migration %s as complete: %v", migration.Version, err)
|
||||
}
|
||||
|
||||
n.Logger.Infof("migration %s completed successfully", migration.Version)
|
||||
}
|
||||
}
|
||||
|
||||
// migrationApplied checks if a migration has already been applied
|
||||
func (n *N) migrationApplied(ctx context.Context, version string) bool {
|
||||
cypher := `
|
||||
MATCH (m:Migration {version: $version})
|
||||
RETURN m.version
|
||||
`
|
||||
result, err := n.ExecuteRead(ctx, cypher, map[string]any{"version": version})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return result.Next(ctx)
|
||||
}
|
||||
|
||||
// markMigrationComplete marks a migration as applied
|
||||
func (n *N) markMigrationComplete(ctx context.Context, version, description string) error {
|
||||
cypher := `
|
||||
CREATE (m:Migration {
|
||||
version: $version,
|
||||
description: $description,
|
||||
applied_at: timestamp()
|
||||
})
|
||||
`
|
||||
_, err := n.ExecuteWrite(ctx, cypher, map[string]any{
|
||||
"version": version,
|
||||
"description": description,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// migrateAuthorToNostrUser migrates Author nodes to NostrUser nodes
|
||||
// This consolidates the separate Author (NIP-01) and NostrUser (WoT) labels
|
||||
// into a unified NostrUser label for the social graph
|
||||
func migrateAuthorToNostrUser(ctx context.Context, n *N) error {
|
||||
// Step 1: Check if there are any Author nodes to migrate
|
||||
countCypher := `MATCH (a:Author) RETURN count(a) AS count`
|
||||
countResult, err := n.ExecuteRead(ctx, countCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count Author nodes: %w", err)
|
||||
}
|
||||
|
||||
var authorCount int64
|
||||
if countResult.Next(ctx) {
|
||||
record := countResult.Record()
|
||||
if count, ok := record.Values[0].(int64); ok {
|
||||
authorCount = count
|
||||
}
|
||||
}
|
||||
|
||||
if authorCount == 0 {
|
||||
n.Logger.Infof("no Author nodes to migrate")
|
||||
return nil
|
||||
}
|
||||
|
||||
n.Logger.Infof("migrating %d Author nodes to NostrUser", authorCount)
|
||||
|
||||
// Step 2: For each Author node, merge into NostrUser with same pubkey
|
||||
// This uses MERGE to either match existing NostrUser or create new one
|
||||
// Then copies any relationships from Author to NostrUser
|
||||
mergeCypher := `
|
||||
// Match all Author nodes
|
||||
MATCH (a:Author)
|
||||
|
||||
// For each Author, merge into NostrUser (creates if doesn't exist)
|
||||
MERGE (u:NostrUser {pubkey: a.pubkey})
|
||||
ON CREATE SET u.created_at = timestamp(), u.migrated_from_author = true
|
||||
|
||||
// Return count for logging
|
||||
RETURN count(DISTINCT a) AS migrated
|
||||
`
|
||||
|
||||
result, err := n.ExecuteWrite(ctx, mergeCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to merge Author nodes to NostrUser: %w", err)
|
||||
}
|
||||
|
||||
// Log result (result consumption happens within the session)
|
||||
_ = result
|
||||
|
||||
// Step 3: Migrate AUTHORED_BY relationships from Author to NostrUser
|
||||
// Events should now point to NostrUser instead of Author
|
||||
relationshipCypher := `
|
||||
// Find events linked to Author via AUTHORED_BY
|
||||
MATCH (e:Event)-[r:AUTHORED_BY]->(a:Author)
|
||||
|
||||
// Get or create the corresponding NostrUser
|
||||
MATCH (u:NostrUser {pubkey: a.pubkey})
|
||||
|
||||
// Create new relationship to NostrUser if it doesn't exist
|
||||
MERGE (e)-[:AUTHORED_BY]->(u)
|
||||
|
||||
// Delete old relationship to Author
|
||||
DELETE r
|
||||
|
||||
RETURN count(r) AS migrated_relationships
|
||||
`
|
||||
|
||||
_, err = n.ExecuteWrite(ctx, relationshipCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to migrate AUTHORED_BY relationships: %w", err)
|
||||
}
|
||||
|
||||
// Step 4: Migrate MENTIONS relationships from Author to NostrUser
|
||||
mentionsCypher := `
|
||||
// Find events with MENTIONS to Author
|
||||
MATCH (e:Event)-[r:MENTIONS]->(a:Author)
|
||||
|
||||
// Get or create the corresponding NostrUser
|
||||
MATCH (u:NostrUser {pubkey: a.pubkey})
|
||||
|
||||
// Create new relationship to NostrUser if it doesn't exist
|
||||
MERGE (e)-[:MENTIONS]->(u)
|
||||
|
||||
// Delete old relationship to Author
|
||||
DELETE r
|
||||
|
||||
RETURN count(r) AS migrated_mentions
|
||||
`
|
||||
|
||||
_, err = n.ExecuteWrite(ctx, mentionsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to migrate MENTIONS relationships: %w", err)
|
||||
}
|
||||
|
||||
// Step 5: Delete orphaned Author nodes (no longer needed)
|
||||
deleteCypher := `
|
||||
// Find Author nodes with no remaining relationships
|
||||
MATCH (a:Author)
|
||||
WHERE NOT (a)<-[:AUTHORED_BY]-() AND NOT (a)<-[:MENTIONS]-()
|
||||
DETACH DELETE a
|
||||
RETURN count(a) AS deleted
|
||||
`
|
||||
|
||||
_, err = n.ExecuteWrite(ctx, deleteCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete orphaned Author nodes: %w", err)
|
||||
}
|
||||
|
||||
// Step 6: Drop the old Author constraint if it exists
|
||||
dropConstraintCypher := `DROP CONSTRAINT author_pubkey_unique IF EXISTS`
|
||||
_, _ = n.ExecuteWrite(ctx, dropConstraintCypher, nil)
|
||||
// Ignore error as constraint may not exist
|
||||
|
||||
n.Logger.Infof("completed Author to NostrUser migration")
|
||||
return nil
|
||||
}
|
||||
@@ -135,6 +135,9 @@ func NewWithConfig(
|
||||
return
|
||||
}
|
||||
|
||||
// Run database migrations (e.g., Author -> NostrUser consolidation)
|
||||
n.RunMigrations()
|
||||
|
||||
// Initialize serial counter
|
||||
if err = n.initSerialCounter(); chk.E(err) {
|
||||
return
|
||||
@@ -298,10 +301,8 @@ func (n *N) EventIdsBySerial(start uint64, count int) (
|
||||
return
|
||||
}
|
||||
|
||||
// RunMigrations runs database migrations (no-op for neo4j)
|
||||
func (n *N) RunMigrations() {
|
||||
// No-op for neo4j
|
||||
}
|
||||
// RunMigrations is implemented in migrations.go
|
||||
// It handles schema migrations like the Author -> NostrUser consolidation
|
||||
|
||||
// Ready returns a channel that closes when the database is ready to serve requests.
|
||||
// This allows callers to wait for database warmup to complete.
|
||||
|
||||
@@ -290,16 +290,16 @@ func TestQueryEventsWithLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Query with limit
|
||||
limit := 5
|
||||
limit := uint(5)
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Limit: limit,
|
||||
Limit: &limit,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query events with limit: %v", err)
|
||||
}
|
||||
|
||||
if len(evs) != limit {
|
||||
if len(evs) != int(limit) {
|
||||
t.Fatalf("Expected %d events with limit, got %d", limit, len(evs))
|
||||
}
|
||||
|
||||
@@ -406,8 +406,7 @@ func TestQueryEventsMultipleAuthors(t *testing.T) {
|
||||
createAndSaveEvent(t, ctx, db, charlie, 1, "Charlie", nil, baseTs+2)
|
||||
|
||||
// Query for Alice and Bob's events
|
||||
authors := tag.NewFromBytesSlice(alice.Pub())
|
||||
authors.Append(tag.NewFromBytesSlice(bob.Pub()).GetFirst(nil))
|
||||
authors := tag.NewFromBytesSlice(alice.Pub(), bob.Pub())
|
||||
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
Authors: authors,
|
||||
@@ -437,7 +436,7 @@ func TestCountEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
// Count events
|
||||
count, err := db.CountEvents(ctx, &filter.F{
|
||||
count, _, err := db.CountEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -84,7 +84,7 @@ func (n *N) SaveEvent(c context.Context, ev *event.E) (exists bool, err error) {
|
||||
// buildEventCreationCypher constructs a Cypher query to create an event node with all relationships
|
||||
// This is a single atomic operation that creates:
|
||||
// - Event node with all properties
|
||||
// - Author node and AUTHORED_BY relationship
|
||||
// - NostrUser node and AUTHORED_BY relationship (unified author + WoT node)
|
||||
// - Tag nodes and TAGGED_WITH relationships
|
||||
// - Reference relationships (REFERENCES for 'e' tags, MENTIONS for 'p' tags)
|
||||
func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[string]any) {
|
||||
@@ -124,10 +124,12 @@ func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[st
|
||||
params["tags"] = string(tagsJSON)
|
||||
|
||||
// Start building the Cypher query
|
||||
// Use MERGE to ensure idempotency for author nodes
|
||||
// Use MERGE to ensure idempotency for NostrUser nodes
|
||||
// NostrUser serves both NIP-01 author tracking and WoT social graph
|
||||
cypher := `
|
||||
// Create or match author node
|
||||
MERGE (a:Author {pubkey: $pubkey})
|
||||
// Create or match NostrUser node (unified author + social graph)
|
||||
MERGE (a:NostrUser {pubkey: $pubkey})
|
||||
ON CREATE SET a.created_at = timestamp(), a.first_seen_event = $eventId
|
||||
|
||||
// Create event node with expiration for NIP-40 support
|
||||
CREATE (e:Event {
|
||||
@@ -212,15 +214,16 @@ FOREACH (ignoreMe IN CASE WHEN ref%d IS NOT NULL THEN [1] ELSE [] END |
|
||||
continue // Skip invalid p-tags
|
||||
}
|
||||
|
||||
// Create mention to another author
|
||||
// Create mention to another NostrUser
|
||||
paramName := fmt.Sprintf("pTag_%d", pTagIndex)
|
||||
params[paramName] = tagValue
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// Mention of author (p-tag)
|
||||
MERGE (mentioned%d:Author {pubkey: $%s})
|
||||
// Mention of NostrUser (p-tag)
|
||||
MERGE (mentioned%d:NostrUser {pubkey: $%s})
|
||||
ON CREATE SET mentioned%d.created_at = timestamp()
|
||||
CREATE (e)-[:MENTIONS]->(mentioned%d)
|
||||
`, pTagIndex, paramName, pTagIndex)
|
||||
`, pTagIndex, paramName, pTagIndex, pTagIndex)
|
||||
|
||||
pTagIndex++
|
||||
|
||||
|
||||
@@ -542,7 +542,7 @@ func TestSaveEvent_ETagReference(t *testing.T) {
|
||||
|
||||
// Verify MENTIONS relationship was also created for the p-tag
|
||||
mentionsCypher := `
|
||||
MATCH (reply:Event {id: $replyId})-[:MENTIONS]->(author:Author {pubkey: $authorPubkey})
|
||||
MATCH (reply:Event {id: $replyId})-[:MENTIONS]->(author:NostrUser {pubkey: $authorPubkey})
|
||||
RETURN author.pubkey AS pubkey
|
||||
`
|
||||
mentionsParams := map[string]any{
|
||||
|
||||
@@ -37,10 +37,11 @@ func (n *N) applySchema(ctx context.Context) error {
|
||||
// REQ filters can specify: {"ids": ["<event_id>", ...]}
|
||||
"CREATE CONSTRAINT event_id_unique IF NOT EXISTS FOR (e:Event) REQUIRE e.id IS UNIQUE",
|
||||
|
||||
// MANDATORY (NIP-01): Author.pubkey uniqueness for "authors" filter
|
||||
// MANDATORY (NIP-01): NostrUser.pubkey uniqueness for "authors" filter
|
||||
// REQ filters can specify: {"authors": ["<pubkey>", ...]}
|
||||
// Events are linked to Author nodes via AUTHORED_BY relationship
|
||||
"CREATE CONSTRAINT author_pubkey_unique IF NOT EXISTS FOR (a:Author) REQUIRE a.pubkey IS UNIQUE",
|
||||
// Events are linked to NostrUser nodes via AUTHORED_BY relationship
|
||||
// NOTE: NostrUser unifies both NIP-01 author tracking and WoT social graph
|
||||
"CREATE CONSTRAINT nostrUser_pubkey IF NOT EXISTS FOR (n:NostrUser) REQUIRE n.pubkey IS UNIQUE",
|
||||
|
||||
// ============================================================
|
||||
// === OPTIONAL: Internal Relay Operations ===
|
||||
@@ -66,9 +67,8 @@ func (n *N) applySchema(ctx context.Context) error {
|
||||
// Not required for NIP-01 compliance
|
||||
// ============================================================
|
||||
|
||||
// OPTIONAL (WoT): NostrUser nodes for social graph/trust metrics
|
||||
// Separate from Author nodes - Author is for NIP-01, NostrUser for WoT
|
||||
"CREATE CONSTRAINT nostrUser_pubkey IF NOT EXISTS FOR (n:NostrUser) REQUIRE n.pubkey IS UNIQUE",
|
||||
// NOTE: NostrUser constraint is defined above in MANDATORY section
|
||||
// It serves both NIP-01 (author tracking) and WoT (social graph) purposes
|
||||
|
||||
// OPTIONAL (WoT): Container for WoT metrics cards per observee
|
||||
"CREATE CONSTRAINT setOfNostrUserWotMetricsCards_observee_pubkey IF NOT EXISTS FOR (n:SetOfNostrUserWotMetricsCards) REQUIRE n.observee_pubkey IS UNIQUE",
|
||||
@@ -200,6 +200,9 @@ func (n *N) dropAll(ctx context.Context) error {
|
||||
constraints := []string{
|
||||
// MANDATORY (NIP-01) constraints
|
||||
"DROP CONSTRAINT event_id_unique IF EXISTS",
|
||||
"DROP CONSTRAINT nostrUser_pubkey IF EXISTS", // Unified author + WoT constraint
|
||||
|
||||
// Legacy constraint (removed in migration)
|
||||
"DROP CONSTRAINT author_pubkey_unique IF EXISTS",
|
||||
|
||||
// OPTIONAL (Internal) constraints
|
||||
@@ -207,9 +210,6 @@ func (n *N) dropAll(ctx context.Context) error {
|
||||
|
||||
// OPTIONAL (Social Graph) constraints
|
||||
"DROP CONSTRAINT processedSocialEvent_event_id IF EXISTS",
|
||||
|
||||
// OPTIONAL (WoT) constraints
|
||||
"DROP CONSTRAINT nostrUser_pubkey IF EXISTS",
|
||||
"DROP CONSTRAINT setOfNostrUserWotMetricsCards_observee_pubkey IF EXISTS",
|
||||
"DROP CONSTRAINT nostrUserWotMetricsCard_unique_combination_1 IF EXISTS",
|
||||
"DROP CONSTRAINT nostrUserWotMetricsCard_unique_combination_2 IF EXISTS",
|
||||
|
||||
@@ -5,179 +5,12 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
func TestSubscriptions_AddAndRemove(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Create a subscription
|
||||
subID := "test-sub-123"
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
}
|
||||
|
||||
// Add subscription
|
||||
db.AddSubscription(subID, f)
|
||||
|
||||
// Get subscription count (should be 1)
|
||||
count := db.GetSubscriptionCount()
|
||||
if count != 1 {
|
||||
t.Fatalf("Expected 1 subscription, got %d", count)
|
||||
}
|
||||
|
||||
// Remove subscription
|
||||
db.RemoveSubscription(subID)
|
||||
|
||||
// Get subscription count (should be 0)
|
||||
count = db.GetSubscriptionCount()
|
||||
if count != 0 {
|
||||
t.Fatalf("Expected 0 subscriptions after removal, got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Subscription add/remove works correctly")
|
||||
}
|
||||
|
||||
func TestSubscriptions_MultipleSubscriptions(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Add multiple subscriptions
|
||||
for i := 0; i < 5; i++ {
|
||||
subID := string(rune('A' + i))
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(uint16(i + 1))),
|
||||
}
|
||||
db.AddSubscription(subID, f)
|
||||
}
|
||||
|
||||
// Get subscription count
|
||||
count := db.GetSubscriptionCount()
|
||||
if count != 5 {
|
||||
t.Fatalf("Expected 5 subscriptions, got %d", count)
|
||||
}
|
||||
|
||||
// Remove some subscriptions
|
||||
db.RemoveSubscription("A")
|
||||
db.RemoveSubscription("C")
|
||||
|
||||
count = db.GetSubscriptionCount()
|
||||
if count != 3 {
|
||||
t.Fatalf("Expected 3 subscriptions after removal, got %d", count)
|
||||
}
|
||||
|
||||
// Clear all subscriptions
|
||||
db.ClearSubscriptions()
|
||||
|
||||
count = db.GetSubscriptionCount()
|
||||
if count != 0 {
|
||||
t.Fatalf("Expected 0 subscriptions after clear, got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Multiple subscriptions managed correctly")
|
||||
}
|
||||
|
||||
func TestSubscriptions_DuplicateID(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
subID := "duplicate-test"
|
||||
|
||||
// Add first subscription
|
||||
f1 := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
}
|
||||
db.AddSubscription(subID, f1)
|
||||
|
||||
// Add subscription with same ID (should replace)
|
||||
f2 := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(7)),
|
||||
}
|
||||
db.AddSubscription(subID, f2)
|
||||
|
||||
// Should still have only 1 subscription
|
||||
count := db.GetSubscriptionCount()
|
||||
if count != 1 {
|
||||
t.Fatalf("Expected 1 subscription (duplicate replaced), got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Duplicate subscription ID handling works correctly")
|
||||
}
|
||||
|
||||
func TestSubscriptions_RemoveNonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to remove non-existent subscription (should not panic)
|
||||
db.RemoveSubscription("non-existent")
|
||||
|
||||
// Should still have 0 subscriptions
|
||||
count := db.GetSubscriptionCount()
|
||||
if count != 0 {
|
||||
t.Fatalf("Expected 0 subscriptions, got %d", count)
|
||||
}
|
||||
|
||||
t.Logf("✓ Removing non-existent subscription handled gracefully")
|
||||
}
|
||||
// Note: WebSocket subscription management (AddSubscription, GetSubscriptionCount,
|
||||
// RemoveSubscription, ClearSubscriptions) is handled at the app layer, not the
|
||||
// database layer. Tests for those methods have been removed.
|
||||
|
||||
func TestMarkers_SetGetDelete(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
@@ -371,24 +204,36 @@ func TestIdentity(t *testing.T) {
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Wipe to ensure clean state
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Get identity (creates if not exists)
|
||||
signer := db.Identity()
|
||||
if signer == nil {
|
||||
t.Fatal("Expected non-nil signer from Identity()")
|
||||
secret1, err := db.GetOrCreateRelayIdentitySecret()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get identity: %v", err)
|
||||
}
|
||||
if secret1 == nil {
|
||||
t.Fatal("Expected non-nil secret from GetOrCreateRelayIdentitySecret()")
|
||||
}
|
||||
|
||||
// Get identity again (should return same one)
|
||||
signer2 := db.Identity()
|
||||
if signer2 == nil {
|
||||
t.Fatal("Expected non-nil signer from second Identity() call")
|
||||
secret2, err := db.GetOrCreateRelayIdentitySecret()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get identity second time: %v", err)
|
||||
}
|
||||
if secret2 == nil {
|
||||
t.Fatal("Expected non-nil secret from second GetOrCreateRelayIdentitySecret() call")
|
||||
}
|
||||
|
||||
// Public keys should match
|
||||
pub1 := signer.Pub()
|
||||
pub2 := signer2.Pub()
|
||||
for i := range pub1 {
|
||||
if pub1[i] != pub2[i] {
|
||||
t.Fatal("Identity pubkeys don't match across calls")
|
||||
// Secrets should match
|
||||
if len(secret1) != len(secret2) {
|
||||
t.Fatalf("Secret lengths don't match: %d vs %d", len(secret1), len(secret2))
|
||||
}
|
||||
for i := range secret1 {
|
||||
if secret1[i] != secret2[i] {
|
||||
t.Fatal("Identity secrets don't match across calls")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.32.5
|
||||
v0.32.6
|
||||
Reference in New Issue
Block a user