Decompose handle-event.go into DDD domain services (v0.36.15)
Some checks failed
Go / build-and-release (push) Has been cancelled

Major refactoring of event handling into clean, testable domain services:

- Add pkg/event/validation: JSON hex validation, signature verification,
  timestamp bounds, NIP-70 protected tag validation
- Add pkg/event/authorization: Policy and ACL authorization decisions,
  auth challenge handling, access level determination
- Add pkg/event/routing: Event router registry with ephemeral and delete
  handlers, kind-based dispatch
- Add pkg/event/processing: Event persistence, delivery to subscribers,
  and post-save hooks (ACL reconfig, sync, relay groups)
- Reduce handle-event.go from 783 to 296 lines (62% reduction)
- Add comprehensive unit tests for all new domain services
- Refactor database tests to use shared TestMain setup
- Fix blossom URL test expectations (missing "/" separator)
- Add go-memory-optimization skill and analysis documentation
- Update DDD_ANALYSIS.md to reflect completed decomposition

Files modified:
- app/handle-event.go: Slim orchestrator using domain services
- app/server.go: Service initialization and interface wrappers
- app/handle-event-types.go: Shared types (OkHelper, result types)
- pkg/event/validation/*: New validation service package
- pkg/event/authorization/*: New authorization service package
- pkg/event/routing/*: New routing service package
- pkg/event/processing/*: New processing service package
- pkg/database/*_test.go: Refactored to shared TestMain
- pkg/blossom/http_test.go: Fixed URL format expectations

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-25 05:30:07 +01:00
parent 3e0a94a053
commit 24383ef1f4
42 changed files with 4791 additions and 2118 deletions

View File

@@ -3,100 +3,28 @@ package database
import (
"bufio"
"bytes"
"context"
"os"
"sort"
"testing"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/event/examples"
"lol.mleku.dev/chk"
)
// TestExport tests the Export function by:
// 1. Creating a new database with events from examples.Cache
// 2. Checking that all event IDs in the cache are found in the export
// 3. Verifying this also works when only a few pubkeys are requested
// 1. Using the shared database with events from examples.Cache
// 2. Checking that events can be exported
// 3. Verifying the exported events can be parsed
func TestExport(t *testing.T) {
// Create a temporary directory for the database
tempDir, err := os.MkdirTemp("", "test-db-*")
if err != nil {
t.Fatalf("Failed to create temporary directory: %v", err)
}
defer os.RemoveAll(tempDir) // Clean up after the test
// Use shared database (skips in short mode)
db, ctx := GetSharedDB(t)
savedEvents := GetSharedEvents(t)
// Create a context and cancel function for the database
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
t.Logf("Shared database has %d events", len(savedEvents))
// Initialize the database
db, err := New(ctx, cancel, tempDir, "info")
if err != nil {
t.Fatalf("Failed to create database: %v", err)
}
defer db.Close()
// Create a scanner to read events from examples.Cache
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
var events []*event.E
// First, collect all events
for scanner.Scan() {
chk.E(scanner.Err())
b := scanner.Bytes()
ev := event.New()
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
t.Fatal(err)
}
events = append(events, ev)
}
// Check for scanner errors
if err = scanner.Err(); err != nil {
t.Fatalf("Scanner error: %v", err)
}
// Sort events by CreatedAt to ensure addressable events are processed in chronological order
sort.Slice(events, func(i, j int) bool {
return events[i].CreatedAt < events[j].CreatedAt
})
// Maps to store event IDs and their associated pubkeys
eventIDs := make(map[string]bool)
pubkeyToEventIDs := make(map[string][]string)
// Process each event in chronological order
skippedCount := 0
for _, ev := range events {
// Save the event to the database
if _, err = db.SaveEvent(ctx, ev); err != nil {
// Skip events that fail validation (e.g., kind 3 without p tags)
// This can happen with real-world test data from examples.Cache
skippedCount++
continue
}
// Store the event ID
eventID := string(ev.ID)
eventIDs[eventID] = true
// Store the event ID by pubkey
pubkey := string(ev.Pubkey)
pubkeyToEventIDs[pubkey] = append(pubkeyToEventIDs[pubkey], eventID)
}
t.Logf("Saved %d events to the database (skipped %d invalid events)", len(eventIDs), skippedCount)
// Test 1: Export all events and verify all IDs are in the export
// Test 1: Export all events and verify they can be parsed
var exportBuffer bytes.Buffer
db.Export(ctx, &exportBuffer)
// Parse the exported events and check that all IDs are present
// Parse the exported events and count them
exportedIDs := make(map[string]bool)
exportScanner := bufio.NewScanner(&exportBuffer)
exportScanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
@@ -104,26 +32,21 @@ func TestExport(t *testing.T) {
for exportScanner.Scan() {
b := exportScanner.Bytes()
ev := event.New()
if _, err = ev.Unmarshal(b); chk.E(err) {
if _, err := ev.Unmarshal(b); chk.E(err) {
t.Fatal(err)
}
exportedIDs[string(ev.ID)] = true
exportCount++
}
// Check for scanner errors
if err = exportScanner.Err(); err != nil {
if err := exportScanner.Err(); err != nil {
t.Fatalf("Scanner error: %v", err)
}
t.Logf("Found %d events in the export", exportCount)
// todo: this fails because some of the events replace earlier versions
// // Check that all original event IDs are in the export
// for id := range eventIDs {
// if !exportedIDs[id] {
// t.Errorf("Event ID %0x not found in export", id)
// }
// }
t.Logf("All %d event IDs found in export", len(eventIDs))
// Verify we exported a reasonable number of events
if exportCount == 0 {
t.Fatal("Export returned no events")
}
}