Refactor Neo4j tests and improve tag handling in Cypher
Replaces outdated Neo4j test setup with a robust TestMain, shared test database, and utility functions for test data and migrations. Improves Cypher generation for processing e-tags, p-tags, and other tags to ensure compliance with Neo4j syntax. Added integration test script and updated benchmark reports for Badger backend.
This commit is contained in:
@@ -25,7 +25,15 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||
log.I.F("HandleDelete: processing delete event %0x from pubkey %0x", env.E.ID, env.E.Pubkey)
|
||||
log.I.F("HandleDelete: delete event tags: %d tags", len(*env.E.Tags))
|
||||
for i, t := range *env.E.Tags {
|
||||
log.I.F("HandleDelete: tag %d: %s = %s", i, string(t.Key()), string(t.Value()))
|
||||
// Use ValueHex() for e/p tags to properly display binary-encoded values
|
||||
key := string(t.Key())
|
||||
var val string
|
||||
if key == "e" || key == "p" {
|
||||
val = string(t.ValueHex()) // Properly converts binary to hex
|
||||
} else {
|
||||
val = string(t.Value())
|
||||
}
|
||||
log.I.F("HandleDelete: tag %d: %s = %s", i, key, val)
|
||||
}
|
||||
|
||||
// Debug: log admin and owner lists
|
||||
@@ -142,27 +150,21 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||
// if e tags are found, delete them if the author is signer, or one of
|
||||
// the owners is signer
|
||||
if utils.FastEqual(t.Key(), []byte("e")) {
|
||||
// First try binary format (optimized storage for e-tags)
|
||||
var dst []byte
|
||||
if binVal := t.ValueBinary(); binVal != nil {
|
||||
dst = binVal
|
||||
log.I.F("HandleDelete: processing binary e-tag event ID: %0x", dst)
|
||||
} else {
|
||||
// Fall back to hex decoding for non-binary values
|
||||
val := t.Value()
|
||||
if len(val) == 0 {
|
||||
log.W.F("HandleDelete: empty e-tag value")
|
||||
continue
|
||||
}
|
||||
log.I.F("HandleDelete: processing e-tag with value: %s", string(val))
|
||||
if b, e := hex.Dec(string(val)); chk.E(e) {
|
||||
log.E.F("HandleDelete: failed to decode hex event ID %s: %v", string(val), e)
|
||||
continue
|
||||
} else {
|
||||
dst = b
|
||||
log.I.F("HandleDelete: decoded event ID: %0x", dst)
|
||||
}
|
||||
// Use ValueHex() which properly handles both binary-encoded and hex string formats
|
||||
hexVal := t.ValueHex()
|
||||
if len(hexVal) == 0 {
|
||||
log.W.F("HandleDelete: empty e-tag value")
|
||||
continue
|
||||
}
|
||||
log.I.F("HandleDelete: processing e-tag event ID: %s", string(hexVal))
|
||||
|
||||
// Decode hex to binary for filter
|
||||
dst, e := hex.Dec(string(hexVal))
|
||||
if chk.E(e) {
|
||||
log.E.F("HandleDelete: failed to decode event ID %s: %v", string(hexVal), e)
|
||||
continue
|
||||
}
|
||||
|
||||
f := &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(dst),
|
||||
}
|
||||
|
||||
@@ -11,7 +11,12 @@
|
||||
"Bash(docker compose:*)",
|
||||
"Bash(./run-benchmark.sh:*)",
|
||||
"Bash(tee:*)",
|
||||
"Bash(sudo rm:*)"
|
||||
"Bash(sudo rm:*)",
|
||||
"Bash(go mod tidy:*)",
|
||||
"Bash(docker run --rm -v \"/home/mleku/src/next.orly.dev/cmd/benchmark/data:/data\" --user root alpine sh -c \"rm -rf /data/* /data/.[!.]*\")",
|
||||
"Bash(head:*)",
|
||||
"Bash(cat:*)",
|
||||
"Bash(chmod:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
@@ -18,8 +18,11 @@ RUN git clone https://git.nostrdev.com/mleku/next.orly.dev.git . && \
|
||||
echo "Building benchmark from ORLY version: ${LATEST_TAG}" && \
|
||||
git checkout "${LATEST_TAG}"
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
# Remove local replace directives and update to released version, then download dependencies
|
||||
RUN sed -i '/^replace .* => \/home/d' go.mod && \
|
||||
sed -i 's/git.mleku.dev\/mleku\/nostr v1.0.7/git.mleku.dev\/mleku\/nostr v1.0.8/' go.mod && \
|
||||
go mod tidy && \
|
||||
go mod download
|
||||
|
||||
# Build the benchmark tool with CGO disabled (uses purego for crypto)
|
||||
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o benchmark ./cmd/benchmark
|
||||
|
||||
@@ -19,8 +19,11 @@ RUN git clone https://git.nostrdev.com/mleku/next.orly.dev.git . && \
|
||||
echo "Building ORLY version: ${LATEST_TAG}" && \
|
||||
git checkout "${LATEST_TAG}"
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
# Remove local replace directives and update to released version, then download dependencies
|
||||
RUN sed -i '/^replace .* => \/home/d' go.mod && \
|
||||
sed -i 's/git.mleku.dev\/mleku\/nostr v1.0.7/git.mleku.dev\/mleku\/nostr v1.0.8/' go.mod && \
|
||||
go mod tidy && \
|
||||
go mod download
|
||||
|
||||
# Build the relay with CGO disabled (uses purego for crypto)
|
||||
# Include debug symbols for profiling
|
||||
|
||||
4
go.mod
4
go.mod
@@ -3,7 +3,7 @@ module next.orly.dev
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
git.mleku.dev/mleku/nostr v1.0.7
|
||||
git.mleku.dev/mleku/nostr v1.0.8
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
@@ -82,5 +82,3 @@ require (
|
||||
)
|
||||
|
||||
retract v1.0.3
|
||||
|
||||
replace git.mleku.dev/mleku/nostr => /home/mleku/src/git.mleku.dev/mleku/nostr
|
||||
|
||||
2
go.sum
2
go.sum
@@ -1,3 +1,5 @@
|
||||
git.mleku.dev/mleku/nostr v1.0.8 h1:YYREdIxobEqYkzxQ7/5ALACPzLkiHW+CTira+VvSQZk=
|
||||
git.mleku.dev/mleku/nostr v1.0.8/go.mod h1:iYTlg2WKJXJ0kcsM6QBGOJ0UDiJidMgL/i64cHyPjZc=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||
|
||||
24
pkg/neo4j/docker-compose.yaml
Normal file
24
pkg/neo4j/docker-compose.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
# Docker Compose file for Neo4j test database
|
||||
# Usage: docker compose up -d && go test ./pkg/neo4j/... && docker compose down
|
||||
services:
|
||||
neo4j-test:
|
||||
image: neo4j:5.15.0-community
|
||||
container_name: neo4j-test
|
||||
ports:
|
||||
- "7687:7687" # Bolt protocol
|
||||
- "7474:7474" # HTTP (browser interface)
|
||||
environment:
|
||||
- NEO4J_AUTH=neo4j/testpassword
|
||||
- NEO4J_PLUGINS=["apoc"]
|
||||
- NEO4J_dbms_security_procedures_unrestricted=apoc.*
|
||||
- NEO4J_dbms_memory_heap_initial__size=512m
|
||||
- NEO4J_dbms_memory_heap_max__size=1g
|
||||
- NEO4J_dbms_memory_pagecache_size=512m
|
||||
healthcheck:
|
||||
test: ["CMD", "cypher-shell", "-u", "neo4j", "-p", "testpassword", "RETURN 1"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 10
|
||||
start_period: 30s
|
||||
tmpfs:
|
||||
- /data # Use tmpfs for faster tests
|
||||
277
pkg/neo4j/hex_utils_test.go
Normal file
277
pkg/neo4j/hex_utils_test.go
Normal file
@@ -0,0 +1,277 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
// TestIsBinaryEncoded tests the IsBinaryEncoded function
|
||||
func TestIsBinaryEncoded(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []byte
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Valid binary encoded (33 bytes with null terminator)",
|
||||
input: append(make([]byte, 32), 0),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Invalid - 32 bytes without terminator",
|
||||
input: make([]byte, 32),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid - 33 bytes without null terminator",
|
||||
input: append(make([]byte, 32), 1),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid - 64 bytes (hex string)",
|
||||
input: []byte("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid - empty",
|
||||
input: []byte{},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid - too short",
|
||||
input: []byte{0, 1, 2, 3},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := IsBinaryEncoded(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("IsBinaryEncoded(%v) = %v, want %v", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNormalizePubkeyHex tests the NormalizePubkeyHex function
|
||||
func TestNormalizePubkeyHex(t *testing.T) {
|
||||
// Create a 32-byte test value
|
||||
testBytes := make([]byte, 32)
|
||||
testBytes[31] = 0x01 // Set last byte to 1
|
||||
|
||||
// Create binary-encoded version (33 bytes with null terminator)
|
||||
binaryEncoded := append(testBytes, 0)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input []byte
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Binary encoded to hex",
|
||||
input: binaryEncoded,
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
{
|
||||
name: "Lowercase hex passthrough",
|
||||
input: []byte("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
{
|
||||
name: "Uppercase hex to lowercase",
|
||||
input: []byte("ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789"),
|
||||
expected: "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
},
|
||||
{
|
||||
name: "Mixed case hex to lowercase",
|
||||
input: []byte("AbCdEf0123456789AbCdEf0123456789AbCdEf0123456789AbCdEf0123456789"),
|
||||
expected: "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
},
|
||||
{
|
||||
name: "Prefix hex (shorter than 64)",
|
||||
input: []byte("ABCD"),
|
||||
expected: "abcd",
|
||||
},
|
||||
{
|
||||
name: "Empty input",
|
||||
input: []byte{},
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := NormalizePubkeyHex(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("NormalizePubkeyHex(%v) = %q, want %q", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractPTagValue tests the ExtractPTagValue function
|
||||
func TestExtractPTagValue(t *testing.T) {
|
||||
// Create a valid pubkey hex string
|
||||
validHex := "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tag *tag.T
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Nil tag",
|
||||
tag: nil,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Empty tag",
|
||||
tag: &tag.T{T: [][]byte{}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Tag with only key",
|
||||
tag: &tag.T{T: [][]byte{[]byte("p")}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Valid p-tag with hex value",
|
||||
tag: &tag.T{T: [][]byte{
|
||||
[]byte("p"),
|
||||
[]byte(validHex),
|
||||
}},
|
||||
expected: validHex,
|
||||
},
|
||||
{
|
||||
name: "P-tag with uppercase hex",
|
||||
tag: &tag.T{T: [][]byte{
|
||||
[]byte("p"),
|
||||
[]byte("ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789"),
|
||||
}},
|
||||
expected: "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ExtractPTagValue(tt.tag)
|
||||
if result != tt.expected {
|
||||
t.Errorf("ExtractPTagValue() = %q, want %q", result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestExtractETagValue tests the ExtractETagValue function
|
||||
func TestExtractETagValue(t *testing.T) {
|
||||
// Create a valid event ID hex string
|
||||
validHex := "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tag *tag.T
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Nil tag",
|
||||
tag: nil,
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Empty tag",
|
||||
tag: &tag.T{T: [][]byte{}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Tag with only key",
|
||||
tag: &tag.T{T: [][]byte{[]byte("e")}},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Valid e-tag with hex value",
|
||||
tag: &tag.T{T: [][]byte{
|
||||
[]byte("e"),
|
||||
[]byte(validHex),
|
||||
}},
|
||||
expected: validHex,
|
||||
},
|
||||
{
|
||||
name: "E-tag with uppercase hex",
|
||||
tag: &tag.T{T: [][]byte{
|
||||
[]byte("e"),
|
||||
[]byte("1234567890ABCDEF1234567890ABCDEF1234567890ABCDEF1234567890ABCDEF"),
|
||||
}},
|
||||
expected: "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := ExtractETagValue(tt.tag)
|
||||
if result != tt.expected {
|
||||
t.Errorf("ExtractETagValue() = %q, want %q", result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsValidHexPubkey tests the IsValidHexPubkey function
|
||||
func TestIsValidHexPubkey(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Valid lowercase hex",
|
||||
input: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Valid uppercase hex",
|
||||
input: "ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Valid mixed case hex",
|
||||
input: "AbCdEf0123456789AbCdEf0123456789AbCdEf0123456789AbCdEf0123456789",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Too short",
|
||||
input: "0000000000000000000000000000000000000000000000000000000000000",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Too long",
|
||||
input: "00000000000000000000000000000000000000000000000000000000000000001",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Contains non-hex character",
|
||||
input: "000000000000000000000000000000000000000000000000000000000000000g",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Empty string",
|
||||
input: "",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Contains space",
|
||||
input: "0000000000000000000000000000000000000000000000000000000000000 01",
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := IsValidHexPubkey(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("IsValidHexPubkey(%q) = %v, want %v", tt.input, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,11 @@ var migrations = []Migration{
|
||||
Description: "Merge Author nodes into NostrUser nodes",
|
||||
Migrate: migrateAuthorToNostrUser,
|
||||
},
|
||||
{
|
||||
Version: "v2",
|
||||
Description: "Clean up binary-encoded pubkeys and event IDs to lowercase hex",
|
||||
Migrate: migrateBinaryToHex,
|
||||
},
|
||||
}
|
||||
|
||||
// RunMigrations executes all pending migrations
|
||||
@@ -195,3 +200,146 @@ func migrateAuthorToNostrUser(ctx context.Context, n *N) error {
|
||||
n.Logger.Infof("completed Author to NostrUser migration")
|
||||
return nil
|
||||
}
|
||||
|
||||
// migrateBinaryToHex cleans up any binary-encoded pubkeys and event IDs
|
||||
// The nostr library stores e/p tag values in binary format (33 bytes with null terminator),
|
||||
// but Neo4j should store them as lowercase hex strings for consistent querying.
|
||||
// This migration:
|
||||
// 1. Finds NostrUser nodes with invalid (non-hex) pubkeys and deletes them
|
||||
// 2. Finds Event nodes with invalid pubkeys/IDs and deletes them
|
||||
// 3. Finds Tag nodes (type 'e' or 'p') with invalid values and deletes them
|
||||
// 4. Cleans up MENTIONS relationships pointing to invalid NostrUser nodes
|
||||
func migrateBinaryToHex(ctx context.Context, n *N) error {
|
||||
// Step 1: Count problematic nodes before cleanup
|
||||
n.Logger.Infof("scanning for binary-encoded values in Neo4j...")
|
||||
|
||||
// Check for NostrUser nodes with invalid pubkeys (not 64 char hex)
|
||||
// A valid hex pubkey is exactly 64 lowercase hex characters
|
||||
countInvalidUsersCypher := `
|
||||
MATCH (u:NostrUser)
|
||||
WHERE size(u.pubkey) <> 64
|
||||
OR NOT u.pubkey =~ '^[0-9a-f]{64}$'
|
||||
RETURN count(u) AS count
|
||||
`
|
||||
result, err := n.ExecuteRead(ctx, countInvalidUsersCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count invalid NostrUser nodes: %w", err)
|
||||
}
|
||||
|
||||
var invalidUserCount int64
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
invalidUserCount = count
|
||||
}
|
||||
}
|
||||
n.Logger.Infof("found %d NostrUser nodes with invalid pubkeys", invalidUserCount)
|
||||
|
||||
// Check for Event nodes with invalid pubkeys or IDs
|
||||
countInvalidEventsCypher := `
|
||||
MATCH (e:Event)
|
||||
WHERE (size(e.pubkey) <> 64 OR NOT e.pubkey =~ '^[0-9a-f]{64}$')
|
||||
OR (size(e.id) <> 64 OR NOT e.id =~ '^[0-9a-f]{64}$')
|
||||
RETURN count(e) AS count
|
||||
`
|
||||
result, err = n.ExecuteRead(ctx, countInvalidEventsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count invalid Event nodes: %w", err)
|
||||
}
|
||||
|
||||
var invalidEventCount int64
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
invalidEventCount = count
|
||||
}
|
||||
}
|
||||
n.Logger.Infof("found %d Event nodes with invalid pubkeys or IDs", invalidEventCount)
|
||||
|
||||
// Check for Tag nodes (e/p type) with invalid values
|
||||
countInvalidTagsCypher := `
|
||||
MATCH (t:Tag)
|
||||
WHERE t.type IN ['e', 'p']
|
||||
AND (size(t.value) <> 64 OR NOT t.value =~ '^[0-9a-f]{64}$')
|
||||
RETURN count(t) AS count
|
||||
`
|
||||
result, err = n.ExecuteRead(ctx, countInvalidTagsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to count invalid Tag nodes: %w", err)
|
||||
}
|
||||
|
||||
var invalidTagCount int64
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
invalidTagCount = count
|
||||
}
|
||||
}
|
||||
n.Logger.Infof("found %d Tag nodes (e/p type) with invalid values", invalidTagCount)
|
||||
|
||||
// If nothing to clean up, we're done
|
||||
if invalidUserCount == 0 && invalidEventCount == 0 && invalidTagCount == 0 {
|
||||
n.Logger.Infof("no binary-encoded values found, migration complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Step 2: Delete invalid NostrUser nodes and their relationships
|
||||
if invalidUserCount > 0 {
|
||||
n.Logger.Infof("deleting %d invalid NostrUser nodes...", invalidUserCount)
|
||||
deleteInvalidUsersCypher := `
|
||||
MATCH (u:NostrUser)
|
||||
WHERE size(u.pubkey) <> 64
|
||||
OR NOT u.pubkey =~ '^[0-9a-f]{64}$'
|
||||
DETACH DELETE u
|
||||
`
|
||||
_, err = n.ExecuteWrite(ctx, deleteInvalidUsersCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete invalid NostrUser nodes: %w", err)
|
||||
}
|
||||
n.Logger.Infof("deleted %d invalid NostrUser nodes", invalidUserCount)
|
||||
}
|
||||
|
||||
// Step 3: Delete invalid Event nodes and their relationships
|
||||
if invalidEventCount > 0 {
|
||||
n.Logger.Infof("deleting %d invalid Event nodes...", invalidEventCount)
|
||||
deleteInvalidEventsCypher := `
|
||||
MATCH (e:Event)
|
||||
WHERE (size(e.pubkey) <> 64 OR NOT e.pubkey =~ '^[0-9a-f]{64}$')
|
||||
OR (size(e.id) <> 64 OR NOT e.id =~ '^[0-9a-f]{64}$')
|
||||
DETACH DELETE e
|
||||
`
|
||||
_, err = n.ExecuteWrite(ctx, deleteInvalidEventsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete invalid Event nodes: %w", err)
|
||||
}
|
||||
n.Logger.Infof("deleted %d invalid Event nodes", invalidEventCount)
|
||||
}
|
||||
|
||||
// Step 4: Delete invalid Tag nodes (e/p type) and their relationships
|
||||
if invalidTagCount > 0 {
|
||||
n.Logger.Infof("deleting %d invalid Tag nodes...", invalidTagCount)
|
||||
deleteInvalidTagsCypher := `
|
||||
MATCH (t:Tag)
|
||||
WHERE t.type IN ['e', 'p']
|
||||
AND (size(t.value) <> 64 OR NOT t.value =~ '^[0-9a-f]{64}$')
|
||||
DETACH DELETE t
|
||||
`
|
||||
_, err = n.ExecuteWrite(ctx, deleteInvalidTagsCypher, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete invalid Tag nodes: %w", err)
|
||||
}
|
||||
n.Logger.Infof("deleted %d invalid Tag nodes", invalidTagCount)
|
||||
}
|
||||
|
||||
// Step 5: Clean up any orphaned MENTIONS/REFERENCES relationships
|
||||
// These would be relationships pointing to nodes we just deleted
|
||||
cleanupOrphanedCypher := `
|
||||
// Clean up any ProcessedSocialEvent nodes with invalid pubkeys
|
||||
MATCH (p:ProcessedSocialEvent)
|
||||
WHERE size(p.pubkey) <> 64
|
||||
OR NOT p.pubkey =~ '^[0-9a-f]{64}$'
|
||||
DETACH DELETE p
|
||||
`
|
||||
_, _ = n.ExecuteWrite(ctx, cleanupOrphanedCypher, nil)
|
||||
// Ignore errors - best effort cleanup
|
||||
|
||||
n.Logger.Infof("binary-to-hex migration completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
302
pkg/neo4j/migrations_test.go
Normal file
302
pkg/neo4j/migrations_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestMigrationV2_CleanupBinaryEncodedValues tests that migration v2 properly
|
||||
// cleans up binary-encoded pubkeys and event IDs
|
||||
func TestMigrationV2_CleanupBinaryEncodedValues(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create some valid NostrUser nodes (should NOT be deleted)
|
||||
validPubkeys := []string{
|
||||
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||
"abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789",
|
||||
}
|
||||
for _, pk := range validPubkeys {
|
||||
setupInvalidNostrUser(t, pk) // Using setupInvalidNostrUser to create directly
|
||||
}
|
||||
|
||||
// Create some invalid NostrUser nodes (should be deleted)
|
||||
invalidPubkeys := []string{
|
||||
"binary\x00garbage\x00data", // Binary garbage
|
||||
"ABCDEF", // Too short
|
||||
"GGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGGG", // Non-hex chars
|
||||
string(append(make([]byte, 32), 0)), // 33-byte binary format
|
||||
}
|
||||
for _, pk := range invalidPubkeys {
|
||||
setupInvalidNostrUser(t, pk)
|
||||
}
|
||||
|
||||
// Verify invalid nodes exist before migration
|
||||
invalidCountBefore := countInvalidNostrUsers(t)
|
||||
if invalidCountBefore != 4 {
|
||||
t.Errorf("Expected 4 invalid NostrUsers before migration, got %d", invalidCountBefore)
|
||||
}
|
||||
|
||||
totalBefore := countNodes(t, "NostrUser")
|
||||
if totalBefore != 6 {
|
||||
t.Errorf("Expected 6 total NostrUsers before migration, got %d", totalBefore)
|
||||
}
|
||||
|
||||
// Run the migration
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify invalid nodes were deleted
|
||||
invalidCountAfter := countInvalidNostrUsers(t)
|
||||
if invalidCountAfter != 0 {
|
||||
t.Errorf("Expected 0 invalid NostrUsers after migration, got %d", invalidCountAfter)
|
||||
}
|
||||
|
||||
// Verify valid nodes were NOT deleted
|
||||
totalAfter := countNodes(t, "NostrUser")
|
||||
if totalAfter != 2 {
|
||||
t.Errorf("Expected 2 valid NostrUsers after migration, got %d", totalAfter)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationV2_CleanupInvalidEvents tests that migration v2 properly
|
||||
// cleans up Event nodes with invalid pubkeys or IDs
|
||||
func TestMigrationV2_CleanupInvalidEvents(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create valid events
|
||||
validEventID := "1111111111111111111111111111111111111111111111111111111111111111"
|
||||
validPubkey := "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
setupTestEvent(t, validEventID, validPubkey, 1, "[]")
|
||||
|
||||
// Create invalid events directly
|
||||
setupInvalidEvent(t, "invalid_id", validPubkey) // Invalid ID
|
||||
setupInvalidEvent(t, validEventID+"2", "invalid_pubkey") // Invalid pubkey (different ID to avoid duplicate)
|
||||
setupInvalidEvent(t, "TOOSHORT", "binary\x00garbage") // Both invalid
|
||||
|
||||
// Count events before migration
|
||||
eventsBefore := countNodes(t, "Event")
|
||||
if eventsBefore != 4 {
|
||||
t.Errorf("Expected 4 Events before migration, got %d", eventsBefore)
|
||||
}
|
||||
|
||||
// Run the migration
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify only valid event remains
|
||||
eventsAfter := countNodes(t, "Event")
|
||||
if eventsAfter != 1 {
|
||||
t.Errorf("Expected 1 valid Event after migration, got %d", eventsAfter)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationV2_CleanupInvalidTags tests that migration v2 properly
|
||||
// cleans up Tag nodes (e/p type) with invalid values
|
||||
func TestMigrationV2_CleanupInvalidTags(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create valid tags
|
||||
validHex := "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"
|
||||
setupInvalidTag(t, "e", validHex) // Valid e-tag
|
||||
setupInvalidTag(t, "p", validHex) // Valid p-tag
|
||||
setupInvalidTag(t, "t", "topic") // Non e/p tag (should not be affected)
|
||||
|
||||
// Create invalid e/p tags
|
||||
setupInvalidTag(t, "e", "binary\x00garbage") // Invalid e-tag
|
||||
setupInvalidTag(t, "p", "TOOSHORT") // Invalid p-tag (too short)
|
||||
setupInvalidTag(t, "e", string(append(make([]byte, 32), 0))) // Binary encoded
|
||||
|
||||
// Count tags before migration
|
||||
tagsBefore := countNodes(t, "Tag")
|
||||
if tagsBefore != 6 {
|
||||
t.Errorf("Expected 6 Tags before migration, got %d", tagsBefore)
|
||||
}
|
||||
|
||||
invalidBefore := countInvalidTags(t)
|
||||
if invalidBefore != 3 {
|
||||
t.Errorf("Expected 3 invalid e/p Tags before migration, got %d", invalidBefore)
|
||||
}
|
||||
|
||||
// Run the migration
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify invalid tags were deleted
|
||||
invalidAfter := countInvalidTags(t)
|
||||
if invalidAfter != 0 {
|
||||
t.Errorf("Expected 0 invalid e/p Tags after migration, got %d", invalidAfter)
|
||||
}
|
||||
|
||||
// Verify valid tags remain (2 e/p valid + 1 t-tag)
|
||||
tagsAfter := countNodes(t, "Tag")
|
||||
if tagsAfter != 3 {
|
||||
t.Errorf("Expected 3 Tags after migration, got %d", tagsAfter)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationV2_Idempotent tests that migration v2 can be run multiple times safely
|
||||
func TestMigrationV2_Idempotent(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create only valid data
|
||||
validPubkey := "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
validEventID := "1111111111111111111111111111111111111111111111111111111111111111"
|
||||
setupTestEvent(t, validEventID, validPubkey, 1, "[]")
|
||||
|
||||
countBefore := countNodes(t, "Event")
|
||||
|
||||
// Run migration first time
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("First migration run failed: %v", err)
|
||||
}
|
||||
|
||||
countAfterFirst := countNodes(t, "Event")
|
||||
if countAfterFirst != countBefore {
|
||||
t.Errorf("First migration changed valid event count: before=%d, after=%d", countBefore, countAfterFirst)
|
||||
}
|
||||
|
||||
// Run migration second time
|
||||
err = migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Second migration run failed: %v", err)
|
||||
}
|
||||
|
||||
countAfterSecond := countNodes(t, "Event")
|
||||
if countAfterSecond != countBefore {
|
||||
t.Errorf("Second migration changed valid event count: before=%d, after=%d", countBefore, countAfterSecond)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationV2_NoDataDoesNotFail tests that migration v2 succeeds with empty database
|
||||
func TestMigrationV2_NoDataDoesNotFail(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up completely
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Run migration on empty database - should not fail
|
||||
err := migrateBinaryToHex(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration on empty database failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMigrationMarking tests that migrations are properly tracked
|
||||
func TestMigrationMarking(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Verify migration v2 has not been applied
|
||||
if testDB.migrationApplied(ctx, "v2") {
|
||||
t.Error("Migration v2 should not be applied before test")
|
||||
}
|
||||
|
||||
// Mark migration as complete
|
||||
err := testDB.markMigrationComplete(ctx, "v2", "Test migration")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to mark migration complete: %v", err)
|
||||
}
|
||||
|
||||
// Verify migration is now marked as applied
|
||||
if !testDB.migrationApplied(ctx, "v2") {
|
||||
t.Error("Migration v2 should be applied after marking")
|
||||
}
|
||||
|
||||
// Clean up
|
||||
cleanTestDatabase()
|
||||
}
|
||||
|
||||
// TestMigrationV1_AuthorToNostrUserMerge tests the author migration
|
||||
func TestMigrationV1_AuthorToNostrUserMerge(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Create some Author nodes (legacy format)
|
||||
authorPubkeys := []string{
|
||||
"0000000000000000000000000000000000000000000000000000000000000001",
|
||||
"0000000000000000000000000000000000000000000000000000000000000002",
|
||||
}
|
||||
|
||||
for _, pk := range authorPubkeys {
|
||||
cypher := `CREATE (a:Author {pubkey: $pubkey})`
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, map[string]any{"pubkey": pk})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create Author node: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify Author nodes exist
|
||||
authorCount := countNodes(t, "Author")
|
||||
if authorCount != 2 {
|
||||
t.Errorf("Expected 2 Author nodes, got %d", authorCount)
|
||||
}
|
||||
|
||||
// Run migration
|
||||
err := migrateAuthorToNostrUser(ctx, testDB)
|
||||
if err != nil {
|
||||
t.Fatalf("Migration failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify NostrUser nodes were created
|
||||
nostrUserCount := countNodes(t, "NostrUser")
|
||||
if nostrUserCount != 2 {
|
||||
t.Errorf("Expected 2 NostrUser nodes after migration, got %d", nostrUserCount)
|
||||
}
|
||||
|
||||
// Verify Author nodes were deleted (they should have no relationships after migration)
|
||||
authorCountAfter := countNodes(t, "Author")
|
||||
if authorCountAfter != 0 {
|
||||
t.Errorf("Expected 0 Author nodes after migration, got %d", authorCountAfter)
|
||||
}
|
||||
}
|
||||
@@ -55,43 +55,71 @@ func (n *N) buildCypherQuery(f *filter.F, includeDeleteEvents bool) (string, map
|
||||
matchClause := "MATCH (e:Event)"
|
||||
|
||||
// IDs filter - uses exact match or prefix matching
|
||||
if len(f.Ids.T) > 0 {
|
||||
idConditions := make([]string, len(f.Ids.T))
|
||||
// Note: IDs can be either binary (32 bytes) or hex strings (64 chars)
|
||||
// We need to normalize to lowercase hex for consistent Neo4j matching
|
||||
if f.Ids != nil && len(f.Ids.T) > 0 {
|
||||
idConditions := make([]string, 0, len(f.Ids.T))
|
||||
for i, id := range f.Ids.T {
|
||||
if len(id) == 0 {
|
||||
continue // Skip empty IDs
|
||||
}
|
||||
paramName := fmt.Sprintf("id_%d", i)
|
||||
hexID := hex.Enc(id)
|
||||
|
||||
// Normalize to lowercase hex using our utility function
|
||||
// This handles both binary-encoded IDs and hex string IDs (including uppercase)
|
||||
hexID := NormalizePubkeyHex(id)
|
||||
if hexID == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle prefix matching for partial IDs
|
||||
if len(id) < 32 { // Full event ID is 32 bytes (64 hex chars)
|
||||
idConditions[i] = fmt.Sprintf("e.id STARTS WITH $%s", paramName)
|
||||
// After normalization, check hex length (should be 64 for full ID)
|
||||
if len(hexID) < 64 {
|
||||
idConditions = append(idConditions, fmt.Sprintf("e.id STARTS WITH $%s", paramName))
|
||||
} else {
|
||||
idConditions[i] = fmt.Sprintf("e.id = $%s", paramName)
|
||||
idConditions = append(idConditions, fmt.Sprintf("e.id = $%s", paramName))
|
||||
}
|
||||
params[paramName] = hexID
|
||||
}
|
||||
whereClauses = append(whereClauses, "("+strings.Join(idConditions, " OR ")+")")
|
||||
if len(idConditions) > 0 {
|
||||
whereClauses = append(whereClauses, "("+strings.Join(idConditions, " OR ")+")")
|
||||
}
|
||||
}
|
||||
|
||||
// Authors filter - supports prefix matching for partial pubkeys
|
||||
if len(f.Authors.T) > 0 {
|
||||
authorConditions := make([]string, len(f.Authors.T))
|
||||
// Note: Authors can be either binary (32 bytes) or hex strings (64 chars)
|
||||
// We need to normalize to lowercase hex for consistent Neo4j matching
|
||||
if f.Authors != nil && len(f.Authors.T) > 0 {
|
||||
authorConditions := make([]string, 0, len(f.Authors.T))
|
||||
for i, author := range f.Authors.T {
|
||||
if len(author) == 0 {
|
||||
continue // Skip empty authors
|
||||
}
|
||||
paramName := fmt.Sprintf("author_%d", i)
|
||||
hexAuthor := hex.Enc(author)
|
||||
|
||||
// Normalize to lowercase hex using our utility function
|
||||
// This handles both binary-encoded pubkeys and hex string pubkeys (including uppercase)
|
||||
hexAuthor := NormalizePubkeyHex(author)
|
||||
if hexAuthor == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle prefix matching for partial pubkeys
|
||||
if len(author) < 32 { // Full pubkey is 32 bytes (64 hex chars)
|
||||
authorConditions[i] = fmt.Sprintf("e.pubkey STARTS WITH $%s", paramName)
|
||||
// After normalization, check hex length (should be 64 for full pubkey)
|
||||
if len(hexAuthor) < 64 {
|
||||
authorConditions = append(authorConditions, fmt.Sprintf("e.pubkey STARTS WITH $%s", paramName))
|
||||
} else {
|
||||
authorConditions[i] = fmt.Sprintf("e.pubkey = $%s", paramName)
|
||||
authorConditions = append(authorConditions, fmt.Sprintf("e.pubkey = $%s", paramName))
|
||||
}
|
||||
params[paramName] = hexAuthor
|
||||
}
|
||||
whereClauses = append(whereClauses, "("+strings.Join(authorConditions, " OR ")+")")
|
||||
if len(authorConditions) > 0 {
|
||||
whereClauses = append(whereClauses, "("+strings.Join(authorConditions, " OR ")+")")
|
||||
}
|
||||
}
|
||||
|
||||
// Kinds filter - matches event types
|
||||
if len(f.Kinds.K) > 0 {
|
||||
if f.Kinds != nil && len(f.Kinds.K) > 0 {
|
||||
kinds := make([]int64, len(f.Kinds.K))
|
||||
for i, k := range f.Kinds.K {
|
||||
kinds[i] = int64(k.K)
|
||||
|
||||
314
pkg/neo4j/query_events_test.go
Normal file
314
pkg/neo4j/query_events_test.go
Normal file
@@ -0,0 +1,314 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
"git.mleku.dev/mleku/nostr/encoders/timestamp"
|
||||
)
|
||||
|
||||
// Valid test pubkeys and event IDs (64-character lowercase hex)
|
||||
const (
|
||||
validPubkey1 = "0000000000000000000000000000000000000000000000000000000000000001"
|
||||
validPubkey2 = "0000000000000000000000000000000000000000000000000000000000000002"
|
||||
validPubkey3 = "0000000000000000000000000000000000000000000000000000000000000003"
|
||||
validEventID1 = "1111111111111111111111111111111111111111111111111111111111111111"
|
||||
validEventID2 = "2222222222222222222222222222222222222222222222222222222222222222"
|
||||
validEventID3 = "3333333333333333333333333333333333333333333333333333333333333333"
|
||||
)
|
||||
|
||||
// TestQueryEventsWithNilFilter tests that QueryEvents handles nil filter fields gracefully
|
||||
// This test covers the nil pointer fix in query-events.go
|
||||
func TestQueryEventsWithNilFilter(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Setup some test events
|
||||
setupTestEvent(t, validEventID1, validPubkey1, 1, "[]")
|
||||
setupTestEvent(t, validEventID2, validPubkey2, 1, "[]")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Test 1: Completely empty filter (all nil fields)
|
||||
t.Run("EmptyFilter", func(t *testing.T) {
|
||||
f := &filter.F{}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with empty filter should not panic: %v", err)
|
||||
}
|
||||
if len(events) == 0 {
|
||||
t.Error("Expected to find events with empty filter")
|
||||
}
|
||||
})
|
||||
|
||||
// Test 2: Filter with nil Ids
|
||||
t.Run("NilIds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: nil, // Explicitly nil
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with nil Ids should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 3: Filter with nil Authors
|
||||
t.Run("NilAuthors", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Authors: nil, // Explicitly nil
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with nil Authors should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 4: Filter with nil Kinds
|
||||
t.Run("NilKinds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: nil, // Explicitly nil
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with nil Kinds should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Filter with empty Ids slice
|
||||
t.Run("EmptyIds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: &tag.S{T: [][]byte{}},
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with empty Ids should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 6: Filter with empty Authors slice
|
||||
t.Run("EmptyAuthors", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Authors: &tag.S{T: [][]byte{}},
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with empty Authors should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test 7: Filter with empty Kinds slice
|
||||
t.Run("EmptyKinds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: &kind.S{K: []*kind.T{}},
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents with empty Kinds should not panic: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestQueryEventsWithValidFilters tests that QueryEvents works correctly with valid filters
|
||||
func TestQueryEventsWithValidFilters(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Setup test events
|
||||
setupTestEvent(t, validEventID1, validPubkey1, 1, "[]")
|
||||
setupTestEvent(t, validEventID2, validPubkey2, 3, "[]")
|
||||
setupTestEvent(t, validEventID3, validPubkey1, 1, "[]")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Test 1: Filter by ID
|
||||
t.Run("FilterByID", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: tag.NewFromBytesSlice([]byte(validEventID1)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Errorf("Expected 1 event, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test 2: Filter by Author
|
||||
t.Run("FilterByAuthor", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice([]byte(validPubkey1)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 2 {
|
||||
t.Errorf("Expected 2 events from pubkey1, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test 3: Filter by Kind
|
||||
t.Run("FilterByKind", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 2 {
|
||||
t.Errorf("Expected 2 kind-1 events, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test 4: Combined filters (kind + author)
|
||||
t.Run("FilterByKindAndAuthor", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Authors: tag.NewFromBytesSlice([]byte(validPubkey1)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 2 {
|
||||
t.Errorf("Expected 2 kind-1 events from pubkey1, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Filter with limit
|
||||
t.Run("FilterWithLimit", func(t *testing.T) {
|
||||
limit := 1
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Limit: &limit,
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Errorf("Expected 1 event due to limit, got %d", len(events))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestBuildCypherQueryWithNilFields tests the buildCypherQuery function with nil fields
|
||||
func TestBuildCypherQueryWithNilFields(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Test that buildCypherQuery doesn't panic with nil fields
|
||||
t.Run("AllNilFields", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: nil,
|
||||
Authors: nil,
|
||||
Kinds: nil,
|
||||
Since: nil,
|
||||
Until: nil,
|
||||
Tags: nil,
|
||||
Limit: nil,
|
||||
}
|
||||
cypher, params := testDB.buildCypherQuery(f, false)
|
||||
if cypher == "" {
|
||||
t.Error("Expected non-empty Cypher query")
|
||||
}
|
||||
if params == nil {
|
||||
t.Error("Expected non-nil params map")
|
||||
}
|
||||
})
|
||||
|
||||
// Test with empty slices
|
||||
t.Run("EmptySlices", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: &tag.S{T: [][]byte{}},
|
||||
Authors: &tag.S{T: [][]byte{}},
|
||||
Kinds: &kind.S{K: []*kind.T{}},
|
||||
}
|
||||
cypher, params := testDB.buildCypherQuery(f, false)
|
||||
if cypher == "" {
|
||||
t.Error("Expected non-empty Cypher query")
|
||||
}
|
||||
if params == nil {
|
||||
t.Error("Expected non-nil params map")
|
||||
}
|
||||
})
|
||||
|
||||
// Test with time filters
|
||||
t.Run("TimeFilters", func(t *testing.T) {
|
||||
since := timestamp.Now()
|
||||
until := timestamp.Now()
|
||||
f := &filter.F{
|
||||
Since: &since,
|
||||
Until: &until,
|
||||
}
|
||||
cypher, params := testDB.buildCypherQuery(f, false)
|
||||
if _, ok := params["since"]; !ok {
|
||||
t.Error("Expected 'since' param")
|
||||
}
|
||||
if _, ok := params["until"]; !ok {
|
||||
t.Error("Expected 'until' param")
|
||||
}
|
||||
_ = cypher
|
||||
})
|
||||
}
|
||||
|
||||
// TestQueryEventsUppercaseHexNormalization tests that uppercase hex in filters is normalized
|
||||
func TestQueryEventsUppercaseHexNormalization(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Setup test event with lowercase pubkey (as Neo4j stores)
|
||||
lowercasePubkey := "abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"
|
||||
lowercaseEventID := "fedcba9876543210fedcba9876543210fedcba9876543210fedcba9876543210"
|
||||
setupTestEvent(t, lowercaseEventID, lowercasePubkey, 1, "[]")
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Test query with uppercase pubkey - should be normalized and still match
|
||||
t.Run("UppercaseAuthor", func(t *testing.T) {
|
||||
uppercasePubkey := "ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789"
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice([]byte(uppercasePubkey)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Errorf("Expected to find 1 event with uppercase pubkey filter, got %d", len(events))
|
||||
}
|
||||
})
|
||||
|
||||
// Test query with uppercase event ID - should be normalized and still match
|
||||
t.Run("UppercaseEventID", func(t *testing.T) {
|
||||
uppercaseEventID := "FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210FEDCBA9876543210"
|
||||
f := &filter.F{
|
||||
Ids: tag.NewFromBytesSlice([]byte(uppercaseEventID)),
|
||||
}
|
||||
events, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
t.Fatalf("QueryEvents failed: %v", err)
|
||||
}
|
||||
if len(events) != 1 {
|
||||
t.Errorf("Expected to find 1 event with uppercase ID filter, got %d", len(events))
|
||||
}
|
||||
})
|
||||
}
|
||||
50
pkg/neo4j/run-tests.sh
Executable file
50
pkg/neo4j/run-tests.sh
Executable file
@@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
# Run Neo4j integration tests with Docker
|
||||
# Usage: ./run-tests.sh
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
echo "Starting Neo4j test database..."
|
||||
docker compose up -d
|
||||
|
||||
echo "Waiting for Neo4j to be ready..."
|
||||
for i in {1..30}; do
|
||||
if docker compose exec -T neo4j-test cypher-shell -u neo4j -p testpassword "RETURN 1" > /dev/null 2>&1; then
|
||||
echo "Neo4j is ready!"
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
echo "Timeout waiting for Neo4j"
|
||||
docker compose logs
|
||||
docker compose down
|
||||
exit 1
|
||||
fi
|
||||
echo "Waiting... ($i/30)"
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "Running tests..."
|
||||
echo "================="
|
||||
|
||||
# Set environment variables for tests
|
||||
export NEO4J_TEST_URI="bolt://localhost:7687"
|
||||
export NEO4J_TEST_USER="neo4j"
|
||||
export NEO4J_TEST_PASSWORD="testpassword"
|
||||
|
||||
# Run tests with verbose output
|
||||
cd ../..
|
||||
CGO_ENABLED=0 go test -v ./pkg/neo4j/... -count=1
|
||||
TEST_EXIT_CODE=$?
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
echo ""
|
||||
echo "================="
|
||||
echo "Stopping Neo4j test database..."
|
||||
docker compose down
|
||||
|
||||
exit $TEST_EXIT_CODE
|
||||
@@ -158,6 +158,14 @@ CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
// This is required because Cypher doesn't allow MATCH after CREATE without WITH
|
||||
needsWithClause := true
|
||||
|
||||
// Collect all e-tags, p-tags, and other tags first so we can generate proper Cypher
|
||||
// Neo4j requires WITH clauses between certain clause types (FOREACH -> MATCH/MERGE)
|
||||
type tagInfo struct {
|
||||
tagType string
|
||||
value string
|
||||
}
|
||||
var eTags, pTags, otherTags []tagInfo
|
||||
|
||||
// Only process tags if they exist
|
||||
if ev.Tags != nil {
|
||||
for _, tagItem := range *ev.Tags {
|
||||
@@ -168,30 +176,39 @@ CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
tagType := string(tagItem.T[0])
|
||||
|
||||
switch tagType {
|
||||
case "e": // Event reference - creates REFERENCES relationship
|
||||
// Use ExtractETagValue to handle binary encoding and normalize to lowercase hex
|
||||
case "e": // Event reference
|
||||
tagValue := ExtractETagValue(tagItem)
|
||||
if tagValue == "" {
|
||||
continue // Skip invalid e-tags
|
||||
if tagValue != "" {
|
||||
eTags = append(eTags, tagInfo{"e", tagValue})
|
||||
}
|
||||
case "p": // Pubkey mention
|
||||
tagValue := ExtractPTagValue(tagItem)
|
||||
if tagValue != "" {
|
||||
pTags = append(pTags, tagInfo{"p", tagValue})
|
||||
}
|
||||
default: // Other tags
|
||||
tagValue := string(tagItem.T[1])
|
||||
otherTags = append(otherTags, tagInfo{tagType, tagValue})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create reference to another event (if it exists)
|
||||
paramName := fmt.Sprintf("eTag_%d", eTagIndex)
|
||||
params[paramName] = tagValue
|
||||
// Generate Cypher for e-tags (OPTIONAL MATCH + FOREACH pattern)
|
||||
// These need WITH clause before first one, and WITH after all FOREACHes
|
||||
for i, tag := range eTags {
|
||||
paramName := fmt.Sprintf("eTag_%d", eTagIndex)
|
||||
params[paramName] = tag.value
|
||||
|
||||
// Add WITH clause before first OPTIONAL MATCH only
|
||||
// This is required because Cypher doesn't allow MATCH after CREATE without WITH.
|
||||
// However, you CAN chain multiple OPTIONAL MATCH + FOREACH pairs without
|
||||
// additional WITH clauses between them - Cypher allows OPTIONAL MATCH after FOREACH.
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Add WITH clause before first OPTIONAL MATCH only
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
cypher += fmt.Sprintf(`
|
||||
// Reference to event (e-tag)
|
||||
OPTIONAL MATCH (ref%d:Event {id: $%s})
|
||||
FOREACH (ignoreMe IN CASE WHEN ref%d IS NOT NULL THEN [1] ELSE [] END |
|
||||
@@ -199,47 +216,64 @@ FOREACH (ignoreMe IN CASE WHEN ref%d IS NOT NULL THEN [1] ELSE [] END |
|
||||
)
|
||||
`, eTagIndex, paramName, eTagIndex, eTagIndex)
|
||||
|
||||
eTagIndex++
|
||||
eTagIndex++
|
||||
|
||||
case "p": // Pubkey mention - creates MENTIONS relationship
|
||||
// Use ExtractPTagValue to handle binary encoding and normalize to lowercase hex
|
||||
tagValue := ExtractPTagValue(tagItem)
|
||||
if tagValue == "" {
|
||||
continue // Skip invalid p-tags
|
||||
}
|
||||
// After the last e-tag FOREACH, add WITH clause if there are p-tags or other tags
|
||||
if i == len(eTags)-1 && (len(pTags) > 0 || len(otherTags) > 0) {
|
||||
cypher += `
|
||||
// Required WITH after FOREACH before MERGE/MATCH
|
||||
WITH e, a
|
||||
`
|
||||
}
|
||||
}
|
||||
|
||||
// Create mention to another NostrUser
|
||||
paramName := fmt.Sprintf("pTag_%d", pTagIndex)
|
||||
params[paramName] = tagValue
|
||||
// Generate Cypher for p-tags (MERGE pattern)
|
||||
for _, tag := range pTags {
|
||||
paramName := fmt.Sprintf("pTag_%d", pTagIndex)
|
||||
params[paramName] = tag.value
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// If no e-tags were processed, we still need the initial WITH
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// Mention of NostrUser (p-tag)
|
||||
MERGE (mentioned%d:NostrUser {pubkey: $%s})
|
||||
ON CREATE SET mentioned%d.created_at = timestamp()
|
||||
CREATE (e)-[:MENTIONS]->(mentioned%d)
|
||||
`, pTagIndex, paramName, pTagIndex, pTagIndex)
|
||||
|
||||
pTagIndex++
|
||||
pTagIndex++
|
||||
}
|
||||
|
||||
default: // Other tags - creates Tag nodes and TAGGED_WITH relationships
|
||||
// For non-e/p tags, use direct string conversion (no binary encoding)
|
||||
tagValue := string(tagItem.T[1])
|
||||
// Generate Cypher for other tags (MERGE pattern)
|
||||
for _, tag := range otherTags {
|
||||
typeParam := fmt.Sprintf("tagType_%d", tagNodeIndex)
|
||||
valueParam := fmt.Sprintf("tagValue_%d", tagNodeIndex)
|
||||
params[typeParam] = tag.tagType
|
||||
params[valueParam] = tag.value
|
||||
|
||||
// Create tag node and relationship
|
||||
typeParam := fmt.Sprintf("tagType_%d", tagNodeIndex)
|
||||
valueParam := fmt.Sprintf("tagValue_%d", tagNodeIndex)
|
||||
params[typeParam] = tagType
|
||||
params[valueParam] = tagValue
|
||||
// If no e-tags or p-tags were processed, we still need the initial WITH
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
cypher += fmt.Sprintf(`
|
||||
// Generic tag relationship
|
||||
MERGE (tag%d:Tag {type: $%s, value: $%s})
|
||||
CREATE (e)-[:TAGGED_WITH]->(tag%d)
|
||||
`, tagNodeIndex, typeParam, valueParam, tagNodeIndex)
|
||||
|
||||
tagNodeIndex++
|
||||
}
|
||||
}
|
||||
tagNodeIndex++
|
||||
}
|
||||
|
||||
// Return the created event
|
||||
|
||||
@@ -1,15 +1,246 @@
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"next.orly.dev/pkg/database"
|
||||
)
|
||||
|
||||
// skipIfNeo4jNotAvailable skips the test if Neo4j is not available
|
||||
func skipIfNeo4jNotAvailable(t *testing.T) {
|
||||
// Check if Neo4j connection details are provided
|
||||
uri := os.Getenv("ORLY_NEO4J_URI")
|
||||
if uri == "" {
|
||||
t.Skip("Neo4j not available (set ORLY_NEO4J_URI to enable tests)")
|
||||
// testDB is the shared database instance for tests
|
||||
var testDB *N
|
||||
|
||||
// TestMain sets up and tears down the test database
|
||||
func TestMain(m *testing.M) {
|
||||
// Skip integration tests if NEO4J_TEST_URI is not set
|
||||
neo4jURI := os.Getenv("NEO4J_TEST_URI")
|
||||
if neo4jURI == "" {
|
||||
neo4jURI = "bolt://localhost:7687"
|
||||
}
|
||||
neo4jUser := os.Getenv("NEO4J_TEST_USER")
|
||||
if neo4jUser == "" {
|
||||
neo4jUser = "neo4j"
|
||||
}
|
||||
neo4jPassword := os.Getenv("NEO4J_TEST_PASSWORD")
|
||||
if neo4jPassword == "" {
|
||||
neo4jPassword = "testpassword"
|
||||
}
|
||||
|
||||
// Try to connect to Neo4j
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cfg := &database.DatabaseConfig{
|
||||
DataDir: os.TempDir(),
|
||||
Neo4jURI: neo4jURI,
|
||||
Neo4jUser: neo4jUser,
|
||||
Neo4jPassword: neo4jPassword,
|
||||
}
|
||||
|
||||
var err error
|
||||
testDB, err = NewWithConfig(ctx, cancel, cfg)
|
||||
if err != nil {
|
||||
// If Neo4j is not available, skip integration tests
|
||||
os.Stderr.WriteString("Neo4j not available, skipping integration tests: " + err.Error() + "\n")
|
||||
os.Stderr.WriteString("Start Neo4j with: docker compose -f pkg/neo4j/docker-compose.yaml up -d\n")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// Wait for database to be ready
|
||||
select {
|
||||
case <-testDB.Ready():
|
||||
// Database is ready
|
||||
case <-time.After(30 * time.Second):
|
||||
os.Stderr.WriteString("Timeout waiting for Neo4j to be ready\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Clean database before running tests
|
||||
cleanTestDatabase()
|
||||
|
||||
// Run tests
|
||||
code := m.Run()
|
||||
|
||||
// Clean up
|
||||
cleanTestDatabase()
|
||||
testDB.Close()
|
||||
cancel()
|
||||
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// cleanTestDatabase removes all nodes and relationships
|
||||
func cleanTestDatabase() {
|
||||
ctx := context.Background()
|
||||
// Delete all nodes and relationships
|
||||
_, _ = testDB.ExecuteWrite(ctx, "MATCH (n) DETACH DELETE n", nil)
|
||||
// Clear migration markers so migrations can run fresh
|
||||
_, _ = testDB.ExecuteWrite(ctx, "MATCH (m:Migration) DELETE m", nil)
|
||||
}
|
||||
|
||||
// setupTestEvent creates a test event directly in Neo4j for testing queries
|
||||
func setupTestEvent(t *testing.T, eventID, pubkey string, kind int64, tags string) {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `
|
||||
MERGE (a:NostrUser {pubkey: $pubkey})
|
||||
CREATE (e:Event {
|
||||
id: $eventId,
|
||||
serial: $serial,
|
||||
kind: $kind,
|
||||
created_at: $createdAt,
|
||||
content: $content,
|
||||
sig: $sig,
|
||||
pubkey: $pubkey,
|
||||
tags: $tags,
|
||||
expiration: 0
|
||||
})
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
`
|
||||
|
||||
params := map[string]any{
|
||||
"eventId": eventID,
|
||||
"serial": time.Now().UnixNano(),
|
||||
"kind": kind,
|
||||
"createdAt": time.Now().Unix(),
|
||||
"content": "test content",
|
||||
"sig": "0000000000000000000000000000000000000000000000000000000000000000" +
|
||||
"0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"pubkey": pubkey,
|
||||
"tags": tags,
|
||||
}
|
||||
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup test event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setupInvalidNostrUser creates a NostrUser with an invalid (binary) pubkey for testing migrations
|
||||
func setupInvalidNostrUser(t *testing.T, invalidPubkey string) {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `CREATE (u:NostrUser {pubkey: $pubkey, created_at: timestamp()})`
|
||||
params := map[string]any{"pubkey": invalidPubkey}
|
||||
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup invalid NostrUser: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setupInvalidEvent creates an Event with an invalid pubkey/ID for testing migrations
|
||||
func setupInvalidEvent(t *testing.T, invalidID, invalidPubkey string) {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `
|
||||
CREATE (e:Event {
|
||||
id: $id,
|
||||
pubkey: $pubkey,
|
||||
kind: 1,
|
||||
created_at: timestamp(),
|
||||
content: 'test',
|
||||
sig: 'invalid',
|
||||
tags: '[]',
|
||||
serial: $serial,
|
||||
expiration: 0
|
||||
})
|
||||
`
|
||||
params := map[string]any{
|
||||
"id": invalidID,
|
||||
"pubkey": invalidPubkey,
|
||||
"serial": time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup invalid Event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setupInvalidTag creates a Tag node with invalid value for testing migrations
|
||||
func setupInvalidTag(t *testing.T, tagType string, invalidValue string) {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `CREATE (tag:Tag {type: $type, value: $value})`
|
||||
params := map[string]any{
|
||||
"type": tagType,
|
||||
"value": invalidValue,
|
||||
}
|
||||
|
||||
_, err := testDB.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to setup invalid Tag: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// countNodes counts nodes with a given label
|
||||
func countNodes(t *testing.T, label string) int64 {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := "MATCH (n:" + label + ") RETURN count(n) AS count"
|
||||
result, err := testDB.ExecuteRead(ctx, cypher, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count nodes: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
return count
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// countInvalidNostrUsers counts NostrUser nodes with invalid pubkeys
|
||||
func countInvalidNostrUsers(t *testing.T) int64 {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `
|
||||
MATCH (u:NostrUser)
|
||||
WHERE size(u.pubkey) <> 64
|
||||
OR NOT u.pubkey =~ '^[0-9a-f]{64}$'
|
||||
RETURN count(u) AS count
|
||||
`
|
||||
result, err := testDB.ExecuteRead(ctx, cypher, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count invalid NostrUsers: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
return count
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// countInvalidTags counts Tag nodes (e/p type) with invalid values
|
||||
func countInvalidTags(t *testing.T) int64 {
|
||||
t.Helper()
|
||||
ctx := context.Background()
|
||||
|
||||
cypher := `
|
||||
MATCH (t:Tag)
|
||||
WHERE t.type IN ['e', 'p']
|
||||
AND (size(t.value) <> 64 OR NOT t.value =~ '^[0-9a-f]{64}$')
|
||||
RETURN count(t) AS count
|
||||
`
|
||||
result, err := testDB.ExecuteRead(ctx, cypher, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count invalid Tags: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
if count, ok := result.Record().Values[0].(int64); ok {
|
||||
return count
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user