diff --git a/.claude/commands/release.md b/.claude/commands/release.md
new file mode 100644
index 00000000..1e414366
--- /dev/null
+++ b/.claude/commands/release.md
@@ -0,0 +1,40 @@
+# Release Command
+
+Create a new version tag and optionally push it to the remote.
+
+## Arguments
+- `major` - Increment major version (e.g., v0.1.1 -> v1.0.0)
+- `minor` - Increment minor version (e.g., v0.1.1 -> v0.2.0)
+- (default) - Increment patch version (e.g., v0.1.1 -> v0.1.2)
+- `--push` - Push the tag to remote after creating
+
+## Instructions
+
+1. Get the latest version tag:
+ ```bash
+ git tag -l 'v*' --sort=-v:refname | head -1
+ ```
+
+2. If no tags exist, start with v0.1.0 as the base (next will be v0.1.1)
+
+3. Parse the version and increment based on the argument:
+ - Extract major, minor, patch from the tag (e.g., v1.2.3 -> 1, 2, 3)
+ - If argument is `major`: increment major, reset minor and patch to 0
+ - If argument is `minor`: increment minor, reset patch to 0
+ - Otherwise (default): increment patch
+
+4. Create the new tag:
+ ```bash
+ git tag -a v{VERSION} -m "Release v{VERSION}"
+ ```
+
+5. Show the created tag and recent commits since the last tag
+
+6. If `--push` was specified, push the tag:
+ ```bash
+ git push origin v{VERSION}
+ ```
+
+7. Display the new version and instructions for pushing if not auto-pushed
+
+$ARGUMENTS
diff --git a/.claude/skills/applesauce-core/SKILL.md b/.claude/skills/applesauce-core/SKILL.md
new file mode 100644
index 00000000..78307113
--- /dev/null
+++ b/.claude/skills/applesauce-core/SKILL.md
@@ -0,0 +1,634 @@
+---
+name: applesauce-core
+description: This skill should be used when working with applesauce-core library for Nostr client development, including event stores, queries, observables, and client utilities. Provides comprehensive knowledge of applesauce patterns for building reactive Nostr applications.
+---
+
+# applesauce-core Skill
+
+This skill provides comprehensive knowledge and patterns for working with applesauce-core, a library that provides reactive utilities and patterns for building Nostr clients.
+
+## When to Use This Skill
+
+Use this skill when:
+- Building reactive Nostr applications
+- Managing event stores and caches
+- Working with observable patterns for Nostr
+- Implementing real-time updates
+- Building timeline and feed views
+- Managing replaceable events
+- Working with profiles and metadata
+- Creating efficient Nostr queries
+
+## Core Concepts
+
+### applesauce-core Overview
+
+applesauce-core provides:
+- **Event stores** - Reactive event caching and management
+- **Queries** - Declarative event querying patterns
+- **Observables** - RxJS-based reactive patterns
+- **Profile helpers** - Profile metadata management
+- **Timeline utilities** - Feed and timeline building
+- **NIP helpers** - NIP-specific utilities
+
+### Installation
+
+```bash
+npm install applesauce-core
+```
+
+### Basic Architecture
+
+applesauce-core is built on reactive principles:
+- Events are stored in reactive stores
+- Queries return observables that update when new events arrive
+- Components subscribe to observables for real-time updates
+
+## Event Store
+
+### Creating an Event Store
+
+```javascript
+import { EventStore } from 'applesauce-core';
+
+// Create event store
+const eventStore = new EventStore();
+
+// Add events
+eventStore.add(event1);
+eventStore.add(event2);
+
+// Add multiple events
+eventStore.addMany([event1, event2, event3]);
+
+// Check if event exists
+const exists = eventStore.has(eventId);
+
+// Get event by ID
+const event = eventStore.get(eventId);
+
+// Remove event
+eventStore.remove(eventId);
+
+// Clear all events
+eventStore.clear();
+```
+
+### Event Store Queries
+
+```javascript
+// Get all events
+const allEvents = eventStore.getAll();
+
+// Get events by filter
+const filtered = eventStore.filter({
+ kinds: [1],
+ authors: [pubkey]
+});
+
+// Get events by author
+const authorEvents = eventStore.getByAuthor(pubkey);
+
+// Get events by kind
+const textNotes = eventStore.getByKind(1);
+```
+
+### Replaceable Events
+
+applesauce-core handles replaceable events automatically:
+
+```javascript
+// For kind 0 (profile), only latest is kept
+eventStore.add(profileEvent1); // stored
+eventStore.add(profileEvent2); // replaces if newer
+
+// For parameterized replaceable (30000-39999)
+eventStore.add(articleEvent); // keyed by author + kind + d-tag
+
+// Get replaceable event
+const profile = eventStore.getReplaceable(0, pubkey);
+const article = eventStore.getReplaceable(30023, pubkey, 'article-slug');
+```
+
+## Queries
+
+### Query Patterns
+
+```javascript
+import { createQuery } from 'applesauce-core';
+
+// Create a query
+const query = createQuery(eventStore, {
+ kinds: [1],
+ limit: 50
+});
+
+// Subscribe to query results
+query.subscribe(events => {
+ console.log('Current events:', events);
+});
+
+// Query updates automatically when new events added
+eventStore.add(newEvent); // Subscribers notified
+```
+
+### Timeline Query
+
+```javascript
+import { TimelineQuery } from 'applesauce-core';
+
+// Create timeline for user's notes
+const timeline = new TimelineQuery(eventStore, {
+ kinds: [1],
+ authors: [userPubkey]
+});
+
+// Get observable of timeline
+const timeline$ = timeline.events$;
+
+// Subscribe
+timeline$.subscribe(events => {
+ // Events sorted by created_at, newest first
+ renderTimeline(events);
+});
+```
+
+### Profile Query
+
+```javascript
+import { ProfileQuery } from 'applesauce-core';
+
+// Query profile metadata
+const profileQuery = new ProfileQuery(eventStore, pubkey);
+
+// Get observable
+const profile$ = profileQuery.profile$;
+
+profile$.subscribe(profile => {
+ if (profile) {
+ console.log('Name:', profile.name);
+ console.log('Picture:', profile.picture);
+ }
+});
+```
+
+## Observables
+
+### Working with RxJS
+
+applesauce-core uses RxJS observables:
+
+```javascript
+import { map, filter, distinctUntilChanged } from 'rxjs/operators';
+
+// Transform query results
+const names$ = profileQuery.profile$.pipe(
+ filter(profile => profile !== null),
+ map(profile => profile.name),
+ distinctUntilChanged()
+);
+
+// Combine multiple observables
+import { combineLatest } from 'rxjs';
+
+const combined$ = combineLatest([
+ timeline$,
+ profile$
+]).pipe(
+ map(([events, profile]) => ({
+ events,
+ authorName: profile?.name
+ }))
+);
+```
+
+### Creating Custom Observables
+
+```javascript
+import { Observable } from 'rxjs';
+
+function createEventObservable(store, filter) {
+ return new Observable(subscriber => {
+ // Initial emit
+ subscriber.next(store.filter(filter));
+
+ // Subscribe to store changes
+ const unsubscribe = store.onChange(() => {
+ subscriber.next(store.filter(filter));
+ });
+
+ // Cleanup
+ return () => unsubscribe();
+ });
+}
+```
+
+## Profile Helpers
+
+### Profile Metadata
+
+```javascript
+import { parseProfile, ProfileContent } from 'applesauce-core';
+
+// Parse kind 0 content
+const profileEvent = await getProfileEvent(pubkey);
+const profile = parseProfile(profileEvent);
+
+// Profile fields
+console.log(profile.name); // Display name
+console.log(profile.about); // Bio
+console.log(profile.picture); // Avatar URL
+console.log(profile.banner); // Banner image URL
+console.log(profile.nip05); // NIP-05 identifier
+console.log(profile.lud16); // Lightning address
+console.log(profile.website); // Website URL
+```
+
+### Profile Store
+
+```javascript
+import { ProfileStore } from 'applesauce-core';
+
+const profileStore = new ProfileStore(eventStore);
+
+// Get profile observable
+const profile$ = profileStore.getProfile(pubkey);
+
+// Get multiple profiles
+const profiles$ = profileStore.getProfiles([pubkey1, pubkey2]);
+
+// Request profile load (triggers fetch if not cached)
+profileStore.requestProfile(pubkey);
+```
+
+## Timeline Utilities
+
+### Building Feeds
+
+```javascript
+import { Timeline } from 'applesauce-core';
+
+// Create timeline
+const timeline = new Timeline(eventStore);
+
+// Add filter
+timeline.setFilter({
+ kinds: [1, 6],
+ authors: followedPubkeys
+});
+
+// Get events observable
+const events$ = timeline.events$;
+
+// Load more (pagination)
+timeline.loadMore(50);
+
+// Refresh (get latest)
+timeline.refresh();
+```
+
+### Thread Building
+
+```javascript
+import { ThreadBuilder } from 'applesauce-core';
+
+// Build thread from root event
+const thread = new ThreadBuilder(eventStore, rootEventId);
+
+// Get thread observable
+const thread$ = thread.thread$;
+
+thread$.subscribe(threadData => {
+ console.log('Root:', threadData.root);
+ console.log('Replies:', threadData.replies);
+ console.log('Reply count:', threadData.replyCount);
+});
+```
+
+### Reactions and Zaps
+
+```javascript
+import { ReactionStore, ZapStore } from 'applesauce-core';
+
+// Reactions
+const reactionStore = new ReactionStore(eventStore);
+const reactions$ = reactionStore.getReactions(eventId);
+
+reactions$.subscribe(reactions => {
+ console.log('Likes:', reactions.likes);
+ console.log('Custom:', reactions.custom);
+});
+
+// Zaps
+const zapStore = new ZapStore(eventStore);
+const zaps$ = zapStore.getZaps(eventId);
+
+zaps$.subscribe(zaps => {
+ console.log('Total sats:', zaps.totalAmount);
+ console.log('Zap count:', zaps.count);
+});
+```
+
+## NIP Helpers
+
+### NIP-05 Verification
+
+```javascript
+import { verifyNip05 } from 'applesauce-core';
+
+// Verify NIP-05
+const result = await verifyNip05('alice@example.com', expectedPubkey);
+
+if (result.valid) {
+ console.log('NIP-05 verified');
+} else {
+ console.log('Verification failed:', result.error);
+}
+```
+
+### NIP-10 Reply Parsing
+
+```javascript
+import { parseReplyTags } from 'applesauce-core';
+
+// Parse reply structure
+const parsed = parseReplyTags(event);
+
+console.log('Root event:', parsed.root);
+console.log('Reply to:', parsed.reply);
+console.log('Mentions:', parsed.mentions);
+```
+
+### NIP-65 Relay Lists
+
+```javascript
+import { parseRelayList } from 'applesauce-core';
+
+// Parse relay list event (kind 10002)
+const relays = parseRelayList(relayListEvent);
+
+console.log('Read relays:', relays.read);
+console.log('Write relays:', relays.write);
+```
+
+## Integration with nostr-tools
+
+### Using with SimplePool
+
+```javascript
+import { SimplePool } from 'nostr-tools';
+import { EventStore } from 'applesauce-core';
+
+const pool = new SimplePool();
+const eventStore = new EventStore();
+
+// Load events into store
+pool.subscribeMany(relays, [filter], {
+ onevent(event) {
+ eventStore.add(event);
+ }
+});
+
+// Query store reactively
+const timeline$ = createTimelineQuery(eventStore, filter);
+```
+
+### Publishing Events
+
+```javascript
+import { finalizeEvent } from 'nostr-tools';
+
+// Create event
+const event = finalizeEvent({
+ kind: 1,
+ content: 'Hello!',
+ created_at: Math.floor(Date.now() / 1000),
+ tags: []
+}, secretKey);
+
+// Add to local store immediately (optimistic update)
+eventStore.add(event);
+
+// Publish to relays
+await pool.publish(relays, event);
+```
+
+## Svelte Integration
+
+### Using in Svelte Components
+
+```svelte
+
+
+{#each events as event}
+
+ {event.content}
+
+{/each}
+```
+
+### Svelte Store Adapter
+
+```javascript
+import { readable } from 'svelte/store';
+
+// Convert RxJS observable to Svelte store
+function fromObservable(observable, initialValue) {
+ return readable(initialValue, set => {
+ const subscription = observable.subscribe(set);
+ return () => subscription.unsubscribe();
+ });
+}
+
+// Usage
+const events$ = timeline.events$;
+const eventsStore = fromObservable(events$, []);
+```
+
+```svelte
+
+
+{#each $eventsStore as event}
+ {event.content}
+{/each}
+```
+
+## Best Practices
+
+### Store Management
+
+1. **Single store instance** - Use one EventStore per app
+2. **Clear stale data** - Implement cache limits
+3. **Handle replaceable events** - Let store manage deduplication
+4. **Unsubscribe** - Clean up subscriptions on component destroy
+
+### Query Optimization
+
+1. **Use specific filters** - Narrow queries perform better
+2. **Limit results** - Use limit for initial loads
+3. **Cache queries** - Reuse query instances
+4. **Debounce updates** - Throttle rapid changes
+
+### Memory Management
+
+1. **Limit store size** - Implement LRU or time-based eviction
+2. **Clean up observables** - Unsubscribe when done
+3. **Use weak references** - For profile caches
+4. **Paginate large feeds** - Don't load everything at once
+
+### Reactive Patterns
+
+1. **Prefer observables** - Over imperative queries
+2. **Use operators** - Transform data with RxJS
+3. **Combine streams** - For complex views
+4. **Handle loading states** - Show placeholders
+
+## Common Patterns
+
+### Event Deduplication
+
+```javascript
+// EventStore handles deduplication automatically
+eventStore.add(event1);
+eventStore.add(event1); // No duplicate
+
+// For manual deduplication
+const seen = new Set();
+events.filter(e => {
+ if (seen.has(e.id)) return false;
+ seen.add(e.id);
+ return true;
+});
+```
+
+### Optimistic Updates
+
+```javascript
+async function publishNote(content) {
+ // Create event
+ const event = await createEvent(content);
+
+ // Add to store immediately (optimistic)
+ eventStore.add(event);
+
+ try {
+ // Publish to relays
+ await pool.publish(relays, event);
+ } catch (error) {
+ // Remove on failure
+ eventStore.remove(event.id);
+ throw error;
+ }
+}
+```
+
+### Loading States
+
+```javascript
+import { BehaviorSubject, combineLatest } from 'rxjs';
+
+const loading$ = new BehaviorSubject(true);
+const events$ = timeline.events$;
+
+const state$ = combineLatest([loading$, events$]).pipe(
+ map(([loading, events]) => ({
+ loading,
+ events,
+ empty: !loading && events.length === 0
+ }))
+);
+
+// Start loading
+loading$.next(true);
+await loadEvents();
+loading$.next(false);
+```
+
+### Infinite Scroll
+
+```javascript
+function createInfiniteScroll(timeline, pageSize = 50) {
+ let loading = false;
+
+ async function loadMore() {
+ if (loading) return;
+
+ loading = true;
+ await timeline.loadMore(pageSize);
+ loading = false;
+ }
+
+ function onScroll(event) {
+ const { scrollTop, scrollHeight, clientHeight } = event.target;
+ if (scrollHeight - scrollTop <= clientHeight * 1.5) {
+ loadMore();
+ }
+ }
+
+ return { loadMore, onScroll };
+}
+```
+
+## Troubleshooting
+
+### Common Issues
+
+**Events not updating:**
+- Check subscription is active
+- Verify events are being added to store
+- Ensure filter matches events
+
+**Memory growing:**
+- Implement store size limits
+- Clean up subscriptions
+- Use weak references where appropriate
+
+**Slow queries:**
+- Add indexes for common queries
+- Use more specific filters
+- Implement pagination
+
+**Stale data:**
+- Implement refresh mechanisms
+- Set up real-time subscriptions
+- Handle replaceable event updates
+
+## References
+
+- **applesauce GitHub**: https://github.com/hzrd149/applesauce
+- **RxJS Documentation**: https://rxjs.dev
+- **nostr-tools**: https://github.com/nbd-wtf/nostr-tools
+- **Nostr Protocol**: https://github.com/nostr-protocol/nostr
+
+## Related Skills
+
+- **nostr-tools** - Lower-level Nostr operations
+- **applesauce-signers** - Event signing abstractions
+- **svelte** - Building reactive UIs
+- **nostr** - Nostr protocol fundamentals
diff --git a/.claude/skills/applesauce-signers/SKILL.md b/.claude/skills/applesauce-signers/SKILL.md
new file mode 100644
index 00000000..40d07e6e
--- /dev/null
+++ b/.claude/skills/applesauce-signers/SKILL.md
@@ -0,0 +1,757 @@
+---
+name: applesauce-signers
+description: This skill should be used when working with applesauce-signers library for Nostr event signing, including NIP-07 browser extensions, NIP-46 remote signing, and custom signer implementations. Provides comprehensive knowledge of signing patterns and signer abstractions.
+---
+
+# applesauce-signers Skill
+
+This skill provides comprehensive knowledge and patterns for working with applesauce-signers, a library that provides signing abstractions for Nostr applications.
+
+## When to Use This Skill
+
+Use this skill when:
+- Implementing event signing in Nostr applications
+- Integrating with NIP-07 browser extensions
+- Working with NIP-46 remote signers
+- Building custom signer implementations
+- Managing signing sessions
+- Handling signing requests and permissions
+- Implementing multi-signer support
+
+## Core Concepts
+
+### applesauce-signers Overview
+
+applesauce-signers provides:
+- **Signer abstraction** - Unified interface for different signers
+- **NIP-07 integration** - Browser extension support
+- **NIP-46 support** - Remote signing (Nostr Connect)
+- **Simple signers** - Direct key signing
+- **Permission handling** - Manage signing requests
+- **Observable patterns** - Reactive signing states
+
+### Installation
+
+```bash
+npm install applesauce-signers
+```
+
+### Signer Interface
+
+All signers implement a common interface:
+
+```typescript
+interface Signer {
+ // Get public key
+ getPublicKey(): Promise;
+
+ // Sign event
+ signEvent(event: UnsignedEvent): Promise;
+
+ // Encrypt (NIP-04)
+ nip04Encrypt?(pubkey: string, plaintext: string): Promise;
+ nip04Decrypt?(pubkey: string, ciphertext: string): Promise;
+
+ // Encrypt (NIP-44)
+ nip44Encrypt?(pubkey: string, plaintext: string): Promise;
+ nip44Decrypt?(pubkey: string, ciphertext: string): Promise;
+}
+```
+
+## Simple Signer
+
+### Using Secret Key
+
+```javascript
+import { SimpleSigner } from 'applesauce-signers';
+import { generateSecretKey } from 'nostr-tools';
+
+// Create signer with existing key
+const signer = new SimpleSigner(secretKey);
+
+// Or generate new key
+const newSecretKey = generateSecretKey();
+const newSigner = new SimpleSigner(newSecretKey);
+
+// Get public key
+const pubkey = await signer.getPublicKey();
+
+// Sign event
+const unsignedEvent = {
+ kind: 1,
+ content: 'Hello Nostr!',
+ created_at: Math.floor(Date.now() / 1000),
+ tags: []
+};
+
+const signedEvent = await signer.signEvent(unsignedEvent);
+```
+
+### NIP-04 Encryption
+
+```javascript
+// Encrypt message
+const ciphertext = await signer.nip04Encrypt(
+ recipientPubkey,
+ 'Secret message'
+);
+
+// Decrypt message
+const plaintext = await signer.nip04Decrypt(
+ senderPubkey,
+ ciphertext
+);
+```
+
+### NIP-44 Encryption
+
+```javascript
+// Encrypt with NIP-44 (preferred)
+const ciphertext = await signer.nip44Encrypt(
+ recipientPubkey,
+ 'Secret message'
+);
+
+// Decrypt
+const plaintext = await signer.nip44Decrypt(
+ senderPubkey,
+ ciphertext
+);
+```
+
+## NIP-07 Signer
+
+### Browser Extension Integration
+
+```javascript
+import { Nip07Signer } from 'applesauce-signers';
+
+// Check if extension is available
+if (window.nostr) {
+ const signer = new Nip07Signer();
+
+ // Get public key (may prompt user)
+ const pubkey = await signer.getPublicKey();
+
+ // Sign event (prompts user)
+ const signedEvent = await signer.signEvent(unsignedEvent);
+}
+```
+
+### Handling Extension Availability
+
+```javascript
+function getAvailableSigner() {
+ if (typeof window !== 'undefined' && window.nostr) {
+ return new Nip07Signer();
+ }
+ return null;
+}
+
+// Wait for extension to load
+async function waitForExtension(timeout = 3000) {
+ const start = Date.now();
+
+ while (Date.now() - start < timeout) {
+ if (window.nostr) {
+ return new Nip07Signer();
+ }
+ await new Promise(r => setTimeout(r, 100));
+ }
+
+ return null;
+}
+```
+
+### Extension Permissions
+
+```javascript
+// Some extensions support granular permissions
+const signer = new Nip07Signer();
+
+// Request specific permissions
+try {
+ // This varies by extension
+ await window.nostr.enable();
+} catch (error) {
+ console.log('User denied permission');
+}
+```
+
+## NIP-46 Remote Signer
+
+### Nostr Connect
+
+```javascript
+import { Nip46Signer } from 'applesauce-signers';
+
+// Create remote signer
+const signer = new Nip46Signer({
+ // Remote signer's pubkey
+ remotePubkey: signerPubkey,
+
+ // Relays for communication
+ relays: ['wss://relay.example.com'],
+
+ // Local secret key for encryption
+ localSecretKey: localSecretKey,
+
+ // Optional: custom client name
+ clientName: 'My Nostr App'
+});
+
+// Connect to remote signer
+await signer.connect();
+
+// Get public key
+const pubkey = await signer.getPublicKey();
+
+// Sign event
+const signedEvent = await signer.signEvent(unsignedEvent);
+
+// Disconnect when done
+signer.disconnect();
+```
+
+### Connection URL
+
+```javascript
+// Parse nostrconnect:// URL
+function parseNostrConnectUrl(url) {
+ const parsed = new URL(url);
+
+ return {
+ pubkey: parsed.pathname.replace('//', ''),
+ relay: parsed.searchParams.get('relay'),
+ secret: parsed.searchParams.get('secret')
+ };
+}
+
+// Create signer from URL
+const { pubkey, relay, secret } = parseNostrConnectUrl(connectUrl);
+
+const signer = new Nip46Signer({
+ remotePubkey: pubkey,
+ relays: [relay],
+ localSecretKey: generateSecretKey(),
+ secret: secret
+});
+```
+
+### Bunker URL
+
+```javascript
+// Parse bunker:// URL (NIP-46)
+function parseBunkerUrl(url) {
+ const parsed = new URL(url);
+
+ return {
+ pubkey: parsed.pathname.replace('//', ''),
+ relays: parsed.searchParams.getAll('relay'),
+ secret: parsed.searchParams.get('secret')
+ };
+}
+
+const { pubkey, relays, secret } = parseBunkerUrl(bunkerUrl);
+```
+
+## Signer Management
+
+### Signer Store
+
+```javascript
+import { SignerStore } from 'applesauce-signers';
+
+const signerStore = new SignerStore();
+
+// Set active signer
+signerStore.setSigner(signer);
+
+// Get active signer
+const activeSigner = signerStore.getSigner();
+
+// Clear signer (logout)
+signerStore.clearSigner();
+
+// Observable for signer changes
+signerStore.signer$.subscribe(signer => {
+ if (signer) {
+ console.log('Logged in');
+ } else {
+ console.log('Logged out');
+ }
+});
+```
+
+### Multi-Account Support
+
+```javascript
+class AccountManager {
+ constructor() {
+ this.accounts = new Map();
+ this.activeAccount = null;
+ }
+
+ addAccount(pubkey, signer) {
+ this.accounts.set(pubkey, signer);
+ }
+
+ removeAccount(pubkey) {
+ this.accounts.delete(pubkey);
+ if (this.activeAccount === pubkey) {
+ this.activeAccount = null;
+ }
+ }
+
+ switchAccount(pubkey) {
+ if (this.accounts.has(pubkey)) {
+ this.activeAccount = pubkey;
+ return this.accounts.get(pubkey);
+ }
+ return null;
+ }
+
+ getActiveSigner() {
+ return this.activeAccount
+ ? this.accounts.get(this.activeAccount)
+ : null;
+ }
+}
+```
+
+## Custom Signers
+
+### Implementing a Custom Signer
+
+```javascript
+class CustomSigner {
+ constructor(options) {
+ this.options = options;
+ }
+
+ async getPublicKey() {
+ // Return public key
+ return this.options.pubkey;
+ }
+
+ async signEvent(event) {
+ // Implement signing logic
+ // Could call external API, hardware wallet, etc.
+
+ const signedEvent = await this.externalSign(event);
+ return signedEvent;
+ }
+
+ async nip04Encrypt(pubkey, plaintext) {
+ // Implement NIP-04 encryption
+ throw new Error('NIP-04 not supported');
+ }
+
+ async nip04Decrypt(pubkey, ciphertext) {
+ throw new Error('NIP-04 not supported');
+ }
+
+ async nip44Encrypt(pubkey, plaintext) {
+ // Implement NIP-44 encryption
+ throw new Error('NIP-44 not supported');
+ }
+
+ async nip44Decrypt(pubkey, ciphertext) {
+ throw new Error('NIP-44 not supported');
+ }
+}
+```
+
+### Hardware Wallet Signer
+
+```javascript
+class HardwareWalletSigner {
+ constructor(devicePath) {
+ this.devicePath = devicePath;
+ }
+
+ async connect() {
+ // Connect to hardware device
+ this.device = await connectToDevice(this.devicePath);
+ }
+
+ async getPublicKey() {
+ // Get public key from device
+ return await this.device.getNostrPubkey();
+ }
+
+ async signEvent(event) {
+ // Sign on device (user confirms on device)
+ const signature = await this.device.signNostrEvent(event);
+
+ return {
+ ...event,
+ pubkey: await this.getPublicKey(),
+ id: getEventHash(event),
+ sig: signature
+ };
+ }
+}
+```
+
+### Read-Only Signer
+
+```javascript
+class ReadOnlySigner {
+ constructor(pubkey) {
+ this.pubkey = pubkey;
+ }
+
+ async getPublicKey() {
+ return this.pubkey;
+ }
+
+ async signEvent(event) {
+ throw new Error('Read-only mode: cannot sign events');
+ }
+
+ async nip04Encrypt(pubkey, plaintext) {
+ throw new Error('Read-only mode: cannot encrypt');
+ }
+
+ async nip04Decrypt(pubkey, ciphertext) {
+ throw new Error('Read-only mode: cannot decrypt');
+ }
+}
+```
+
+## Signing Utilities
+
+### Event Creation Helper
+
+```javascript
+async function createAndSignEvent(signer, template) {
+ const pubkey = await signer.getPublicKey();
+
+ const event = {
+ ...template,
+ pubkey,
+ created_at: template.created_at || Math.floor(Date.now() / 1000)
+ };
+
+ return await signer.signEvent(event);
+}
+
+// Usage
+const signedNote = await createAndSignEvent(signer, {
+ kind: 1,
+ content: 'Hello!',
+ tags: []
+});
+```
+
+### Batch Signing
+
+```javascript
+async function signEvents(signer, events) {
+ const signed = [];
+
+ for (const event of events) {
+ const signedEvent = await signer.signEvent(event);
+ signed.push(signedEvent);
+ }
+
+ return signed;
+}
+
+// With parallelization (if signer supports)
+async function signEventsParallel(signer, events) {
+ return Promise.all(
+ events.map(event => signer.signEvent(event))
+ );
+}
+```
+
+## Svelte Integration
+
+### Signer Context
+
+```svelte
+
+
+
+
+```
+
+```svelte
+
+
+```
+
+### Login Component
+
+```svelte
+
+
+{#if $signer}
+
+{:else}
+
+
+
+
+
+
+{/if}
+```
+
+## Best Practices
+
+### Security
+
+1. **Never store secret keys in plain text** - Use secure storage
+2. **Prefer NIP-07** - Let extensions manage keys
+3. **Clear keys on logout** - Don't leave in memory
+4. **Validate before signing** - Check event content
+
+### User Experience
+
+1. **Show signing status** - Loading states
+2. **Handle rejections gracefully** - User may cancel
+3. **Provide fallbacks** - Multiple login options
+4. **Remember preferences** - Store signer type
+
+### Error Handling
+
+```javascript
+async function safeSign(signer, event) {
+ try {
+ return await signer.signEvent(event);
+ } catch (error) {
+ if (error.message.includes('rejected')) {
+ console.log('User rejected signing');
+ return null;
+ }
+ if (error.message.includes('timeout')) {
+ console.log('Signing timed out');
+ return null;
+ }
+ throw error;
+ }
+}
+```
+
+### Permission Checking
+
+```javascript
+function hasEncryptionSupport(signer) {
+ return typeof signer.nip04Encrypt === 'function' ||
+ typeof signer.nip44Encrypt === 'function';
+}
+
+function getEncryptionMethod(signer) {
+ // Prefer NIP-44
+ if (typeof signer.nip44Encrypt === 'function') {
+ return 'nip44';
+ }
+ if (typeof signer.nip04Encrypt === 'function') {
+ return 'nip04';
+ }
+ return null;
+}
+```
+
+## Common Patterns
+
+### Signer Detection
+
+```javascript
+async function detectSigners() {
+ const available = [];
+
+ // Check NIP-07
+ if (typeof window !== 'undefined' && window.nostr) {
+ available.push({
+ type: 'nip07',
+ name: 'Browser Extension',
+ create: () => new Nip07Signer()
+ });
+ }
+
+ // Check stored credentials
+ const storedKey = localStorage.getItem('nsec');
+ if (storedKey) {
+ available.push({
+ type: 'stored',
+ name: 'Saved Key',
+ create: () => new SimpleSigner(storedKey)
+ });
+ }
+
+ return available;
+}
+```
+
+### Auto-Reconnect for NIP-46
+
+```javascript
+class ReconnectingNip46Signer {
+ constructor(options) {
+ this.options = options;
+ this.signer = null;
+ }
+
+ async connect() {
+ this.signer = new Nip46Signer(this.options);
+ await this.signer.connect();
+ }
+
+ async signEvent(event) {
+ try {
+ return await this.signer.signEvent(event);
+ } catch (error) {
+ if (error.message.includes('disconnected')) {
+ await this.connect();
+ return await this.signer.signEvent(event);
+ }
+ throw error;
+ }
+ }
+}
+```
+
+### Signer Type Persistence
+
+```javascript
+const SIGNER_KEY = 'nostr_signer_type';
+
+function saveSigner(type, data) {
+ localStorage.setItem(SIGNER_KEY, JSON.stringify({ type, data }));
+}
+
+async function restoreSigner() {
+ const saved = localStorage.getItem(SIGNER_KEY);
+ if (!saved) return null;
+
+ const { type, data } = JSON.parse(saved);
+
+ switch (type) {
+ case 'nip07':
+ if (window.nostr) {
+ return new Nip07Signer();
+ }
+ break;
+ case 'simple':
+ // Don't store secret keys!
+ break;
+ case 'nip46':
+ const signer = new Nip46Signer(data);
+ await signer.connect();
+ return signer;
+ }
+
+ return null;
+}
+```
+
+## Troubleshooting
+
+### Common Issues
+
+**Extension not detected:**
+- Wait for page load
+- Check window.nostr exists
+- Verify extension is enabled
+
+**Signing rejected:**
+- User cancelled in extension
+- Handle gracefully with error message
+
+**NIP-46 connection fails:**
+- Check relay is accessible
+- Verify remote signer is online
+- Check secret matches
+
+**Encryption not supported:**
+- Check signer has encrypt methods
+- Fall back to alternative method
+- Show user appropriate error
+
+## References
+
+- **applesauce GitHub**: https://github.com/hzrd149/applesauce
+- **NIP-07 Specification**: https://github.com/nostr-protocol/nips/blob/master/07.md
+- **NIP-46 Specification**: https://github.com/nostr-protocol/nips/blob/master/46.md
+- **nostr-tools**: https://github.com/nbd-wtf/nostr-tools
+
+## Related Skills
+
+- **nostr-tools** - Event creation and signing utilities
+- **applesauce-core** - Event stores and queries
+- **nostr** - Nostr protocol fundamentals
+- **svelte** - Building Nostr UIs
diff --git a/.claude/skills/cypher/SKILL.md b/.claude/skills/cypher/SKILL.md
new file mode 100644
index 00000000..e8ae91b2
--- /dev/null
+++ b/.claude/skills/cypher/SKILL.md
@@ -0,0 +1,395 @@
+---
+name: cypher
+description: This skill should be used when writing, debugging, or discussing Neo4j Cypher queries. Provides comprehensive knowledge of Cypher syntax, query patterns, performance optimization, and common mistakes. Particularly useful for translating between domain models and graph queries.
+---
+
+# Neo4j Cypher Query Language
+
+## Purpose
+
+This skill provides expert-level guidance for writing Neo4j Cypher queries, including syntax, patterns, performance optimization, and common pitfalls. It is particularly tuned for the patterns used in this ORLY Nostr relay codebase.
+
+## When to Use
+
+Activate this skill when:
+- Writing Cypher queries for Neo4j
+- Debugging Cypher syntax errors
+- Optimizing query performance
+- Translating Nostr filter queries to Cypher
+- Working with graph relationships and traversals
+- Creating or modifying schema (indexes, constraints)
+
+## Core Cypher Syntax
+
+### Clause Order (CRITICAL)
+
+Cypher requires clauses in a specific order. Violating this causes syntax errors:
+
+```cypher
+// CORRECT order of clauses
+MATCH (n:Label) // 1. Pattern matching
+WHERE n.prop = value // 2. Filtering
+WITH n, count(*) AS cnt // 3. Intermediate results (resets scope)
+OPTIONAL MATCH (n)-[r]-() // 4. Optional patterns
+CREATE (m:NewNode) // 5. Node/relationship creation
+SET n.prop = value // 6. Property updates
+DELETE r // 7. Deletions
+RETURN n.prop AS result // 8. Return clause
+ORDER BY result DESC // 9. Ordering
+SKIP 10 LIMIT 20 // 10. Pagination
+```
+
+### The WITH Clause (CRITICAL)
+
+The `WITH` clause is required to transition between certain operations:
+
+**Rule: Cannot use MATCH after CREATE without WITH**
+
+```cypher
+// WRONG - MATCH after CREATE without WITH
+CREATE (e:Event {id: $id})
+MATCH (ref:Event {id: $refId}) // ERROR!
+CREATE (e)-[:REFERENCES]->(ref)
+
+// CORRECT - Use WITH to carry variables forward
+CREATE (e:Event {id: $id})
+WITH e
+MATCH (ref:Event {id: $refId})
+CREATE (e)-[:REFERENCES]->(ref)
+```
+
+**Rule: WITH resets the scope**
+
+Variables not included in WITH are no longer accessible:
+
+```cypher
+// WRONG - 'a' is lost after WITH
+MATCH (a:Author), (e:Event)
+WITH e
+WHERE a.pubkey = $pubkey // ERROR: 'a' not defined
+
+// CORRECT - Include all needed variables
+MATCH (a:Author), (e:Event)
+WITH a, e
+WHERE a.pubkey = $pubkey
+```
+
+### Node and Relationship Patterns
+
+```cypher
+// Nodes
+(n) // Anonymous node
+(n:Label) // Labeled node
+(n:Label {prop: value}) // Node with properties
+(n:Label:OtherLabel) // Multiple labels
+
+// Relationships
+-[r]-> // Directed, anonymous
+-[r:TYPE]-> // Typed relationship
+-[r:TYPE {prop: value}]-> // With properties
+-[r:TYPE|OTHER]-> // Multiple types (OR)
+-[*1..3]-> // Variable length (1 to 3 hops)
+-[*]-> // Any number of hops
+```
+
+### MERGE vs CREATE
+
+**CREATE**: Always creates new nodes/relationships (may create duplicates)
+
+```cypher
+CREATE (n:Event {id: $id}) // Creates even if id exists
+```
+
+**MERGE**: Finds or creates (idempotent)
+
+```cypher
+MERGE (n:Event {id: $id}) // Finds existing or creates new
+ON CREATE SET n.created = timestamp()
+ON MATCH SET n.accessed = timestamp()
+```
+
+**Best Practice**: Use MERGE for reference nodes, CREATE for unique events
+
+```cypher
+// Reference nodes - use MERGE (idempotent)
+MERGE (author:Author {pubkey: $pubkey})
+
+// Unique events - use CREATE (after checking existence)
+CREATE (e:Event {id: $eventId, ...})
+```
+
+### OPTIONAL MATCH
+
+Returns NULL for non-matching patterns (like LEFT JOIN):
+
+```cypher
+// Find events, with or without tags
+MATCH (e:Event)
+OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
+RETURN e.id, collect(t.value) AS tags
+```
+
+### Conditional Creation with FOREACH
+
+To conditionally create relationships:
+
+```cypher
+// FOREACH trick for conditional operations
+OPTIONAL MATCH (ref:Event {id: $refId})
+FOREACH (ignoreMe IN CASE WHEN ref IS NOT NULL THEN [1] ELSE [] END |
+ CREATE (e)-[:REFERENCES]->(ref)
+)
+```
+
+### Aggregation Functions
+
+```cypher
+count(*) // Count all rows
+count(n) // Count non-null values
+count(DISTINCT n) // Count unique values
+collect(n) // Collect into list
+collect(DISTINCT n) // Collect unique values
+sum(n.value) // Sum values
+avg(n.value) // Average
+min(n.value), max(n.value) // Min/max
+```
+
+### String Operations
+
+```cypher
+// String matching
+WHERE n.name STARTS WITH 'prefix'
+WHERE n.name ENDS WITH 'suffix'
+WHERE n.name CONTAINS 'substring'
+WHERE n.name =~ 'regex.*pattern' // Regex
+
+// String functions
+toLower(str), toUpper(str)
+trim(str), ltrim(str), rtrim(str)
+substring(str, start, length)
+replace(str, search, replacement)
+```
+
+### List Operations
+
+```cypher
+// IN clause
+WHERE n.kind IN [1, 7, 30023]
+WHERE n.pubkey IN $pubkeyList
+
+// List comprehension
+[x IN list WHERE x > 0 | x * 2]
+
+// UNWIND - expand list into rows
+UNWIND $pubkeys AS pubkey
+MERGE (u:User {pubkey: pubkey})
+```
+
+### Parameters
+
+Always use parameters for values (security + performance):
+
+```cypher
+// CORRECT - parameterized
+MATCH (e:Event {id: $eventId})
+WHERE e.kind IN $kinds
+
+// WRONG - string interpolation (SQL injection risk!)
+MATCH (e:Event {id: '" + eventId + "'})
+```
+
+## Schema Management
+
+### Constraints
+
+```cypher
+// Uniqueness constraint (also creates index)
+CREATE CONSTRAINT event_id_unique IF NOT EXISTS
+FOR (e:Event) REQUIRE e.id IS UNIQUE
+
+// Composite uniqueness
+CREATE CONSTRAINT card_unique IF NOT EXISTS
+FOR (c:Card) REQUIRE (c.customer_id, c.observee_pubkey) IS UNIQUE
+
+// Drop constraint
+DROP CONSTRAINT event_id_unique IF EXISTS
+```
+
+### Indexes
+
+```cypher
+// Single property index
+CREATE INDEX event_kind IF NOT EXISTS FOR (e:Event) ON (e.kind)
+
+// Composite index
+CREATE INDEX event_kind_created IF NOT EXISTS
+FOR (e:Event) ON (e.kind, e.created_at)
+
+// Drop index
+DROP INDEX event_kind IF EXISTS
+```
+
+## Common Query Patterns
+
+### Find with Filter
+
+```cypher
+// Multiple conditions with OR
+MATCH (e:Event)
+WHERE e.kind IN $kinds
+ AND (e.id = $id1 OR e.id = $id2)
+ AND e.created_at >= $since
+RETURN e
+ORDER BY e.created_at DESC
+LIMIT $limit
+```
+
+### Graph Traversal
+
+```cypher
+// Find events by author
+MATCH (e:Event)-[:AUTHORED_BY]->(a:Author {pubkey: $pubkey})
+RETURN e
+
+// Find followers of a user
+MATCH (follower:NostrUser)-[:FOLLOWS]->(user:NostrUser {pubkey: $pubkey})
+RETURN follower.pubkey
+
+// Find mutual follows (friends)
+MATCH (a:NostrUser {pubkey: $pubkeyA})-[:FOLLOWS]->(b:NostrUser)
+WHERE (b)-[:FOLLOWS]->(a)
+RETURN b.pubkey AS mutual_friend
+```
+
+### Upsert Pattern
+
+```cypher
+MERGE (n:Node {key: $key})
+ON CREATE SET
+ n.created_at = timestamp(),
+ n.value = $value
+ON MATCH SET
+ n.updated_at = timestamp(),
+ n.value = $value
+RETURN n
+```
+
+### Batch Processing with UNWIND
+
+```cypher
+// Create multiple nodes from list
+UNWIND $items AS item
+CREATE (n:Node {id: item.id, value: item.value})
+
+// Create relationships from list
+UNWIND $follows AS followed_pubkey
+MERGE (followed:NostrUser {pubkey: followed_pubkey})
+MERGE (author)-[:FOLLOWS]->(followed)
+```
+
+## Performance Optimization
+
+### Index Usage
+
+1. **Start with indexed properties** - Begin MATCH with most selective indexed field
+2. **Use composite indexes** - For queries filtering on multiple properties
+3. **Profile queries** - Use `PROFILE` prefix to see execution plan
+
+```cypher
+PROFILE MATCH (e:Event {kind: 1})
+WHERE e.created_at > $since
+RETURN e LIMIT 100
+```
+
+### Query Optimization Tips
+
+1. **Filter early** - Put WHERE conditions close to MATCH
+2. **Limit early** - Use LIMIT as early as possible
+3. **Avoid Cartesian products** - Connect patterns or use WITH
+4. **Use parameters** - Enables query plan caching
+
+```cypher
+// GOOD - Filter and limit early
+MATCH (e:Event)
+WHERE e.kind IN $kinds AND e.created_at >= $since
+WITH e ORDER BY e.created_at DESC LIMIT 100
+OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
+RETURN e, collect(t)
+
+// BAD - Late filtering
+MATCH (e:Event), (t:Tag)
+WHERE e.kind IN $kinds
+RETURN e, t LIMIT 100
+```
+
+## Reference Materials
+
+For detailed information, consult the reference files:
+
+- **references/syntax-reference.md** - Complete Cypher syntax guide with all clause types, operators, and functions
+- **references/common-patterns.md** - Project-specific patterns for ORLY Nostr relay including event storage, tag queries, and social graph traversals
+- **references/common-mistakes.md** - Frequent Cypher errors and how to avoid them
+
+## ORLY-Specific Patterns
+
+This codebase uses these specific Cypher patterns:
+
+### Event Storage Pattern
+
+```cypher
+// Create event with author relationship
+MERGE (a:Author {pubkey: $pubkey})
+CREATE (e:Event {
+ id: $eventId,
+ serial: $serial,
+ kind: $kind,
+ created_at: $createdAt,
+ content: $content,
+ sig: $sig,
+ pubkey: $pubkey,
+ tags: $tags
+})
+CREATE (e)-[:AUTHORED_BY]->(a)
+```
+
+### Tag Query Pattern
+
+```cypher
+// Query events by tag (Nostr # filter)
+MATCH (e:Event)-[:TAGGED_WITH]->(t:Tag {type: $tagType})
+WHERE t.value IN $tagValues
+RETURN e
+ORDER BY e.created_at DESC
+LIMIT $limit
+```
+
+### Social Graph Pattern
+
+```cypher
+// Process contact list with diff-based updates
+// Mark old as superseded
+OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
+SET old.superseded_by = $new_event_id
+
+// Create tracking node
+CREATE (new:ProcessedSocialEvent {
+ event_id: $new_event_id,
+ event_kind: 3,
+ pubkey: $author_pubkey,
+ created_at: $created_at,
+ processed_at: timestamp()
+})
+
+// Update relationships
+MERGE (author:NostrUser {pubkey: $author_pubkey})
+WITH author
+UNWIND $added_follows AS followed_pubkey
+MERGE (followed:NostrUser {pubkey: followed_pubkey})
+MERGE (author)-[:FOLLOWS]->(followed)
+```
+
+## Official Resources
+
+- Neo4j Cypher Manual: https://neo4j.com/docs/cypher-manual/current/
+- Cypher Cheat Sheet: https://neo4j.com/docs/cypher-cheat-sheet/current/
+- Query Tuning: https://neo4j.com/docs/cypher-manual/current/query-tuning/
\ No newline at end of file
diff --git a/.claude/skills/cypher/references/common-mistakes.md b/.claude/skills/cypher/references/common-mistakes.md
new file mode 100644
index 00000000..a61efe97
--- /dev/null
+++ b/.claude/skills/cypher/references/common-mistakes.md
@@ -0,0 +1,381 @@
+# Common Cypher Mistakes and How to Avoid Them
+
+## Clause Ordering Errors
+
+### MATCH After CREATE Without WITH
+
+**Error**: `Invalid input 'MATCH': expected ... WITH`
+
+```cypher
+// WRONG
+CREATE (e:Event {id: $id})
+MATCH (ref:Event {id: $refId}) // ERROR!
+CREATE (e)-[:REFERENCES]->(ref)
+
+// CORRECT - Use WITH to transition
+CREATE (e:Event {id: $id})
+WITH e
+MATCH (ref:Event {id: $refId})
+CREATE (e)-[:REFERENCES]->(ref)
+```
+
+**Rule**: After CREATE, you must use WITH before MATCH.
+
+### WHERE After WITH Without Carrying Variables
+
+**Error**: `Variable 'x' not defined`
+
+```cypher
+// WRONG - 'a' is lost
+MATCH (a:Author), (e:Event)
+WITH e
+WHERE a.pubkey = $pubkey // ERROR: 'a' not in scope
+
+// CORRECT - Include all needed variables
+MATCH (a:Author), (e:Event)
+WITH a, e
+WHERE a.pubkey = $pubkey
+```
+
+**Rule**: WITH resets the scope. Include all variables you need.
+
+### ORDER BY Without Aliased Return
+
+**Error**: `Invalid input 'ORDER': expected ... AS`
+
+```cypher
+// WRONG in some contexts
+RETURN n.name
+ORDER BY n.name
+
+// SAFER - Use alias
+RETURN n.name AS name
+ORDER BY name
+```
+
+## MERGE Mistakes
+
+### MERGE on Complex Pattern Creates Duplicates
+
+```cypher
+// DANGEROUS - May create duplicate nodes
+MERGE (a:Person {name: 'Alice'})-[:KNOWS]->(b:Person {name: 'Bob'})
+
+// CORRECT - MERGE nodes separately first
+MERGE (a:Person {name: 'Alice'})
+MERGE (b:Person {name: 'Bob'})
+MERGE (a)-[:KNOWS]->(b)
+```
+
+**Rule**: MERGE simple patterns, not complex ones.
+
+### MERGE Without Unique Property
+
+```cypher
+// DANGEROUS - Will keep creating nodes
+MERGE (p:Person) // No unique identifier!
+SET p.name = 'Alice'
+
+// CORRECT - Provide unique key
+MERGE (p:Person {email: $email})
+SET p.name = 'Alice'
+```
+
+**Rule**: MERGE must have properties that uniquely identify the node.
+
+### Missing ON CREATE/ON MATCH
+
+```cypher
+// LOSES context of whether new or existing
+MERGE (p:Person {id: $id})
+SET p.updated_at = timestamp() // Always runs
+
+// BETTER - Handle each case
+MERGE (p:Person {id: $id})
+ON CREATE SET p.created_at = timestamp()
+ON MATCH SET p.updated_at = timestamp()
+```
+
+## NULL Handling Errors
+
+### Comparing with NULL
+
+```cypher
+// WRONG - NULL = NULL is NULL, not true
+WHERE n.email = null // Never matches!
+
+// CORRECT
+WHERE n.email IS NULL
+WHERE n.email IS NOT NULL
+```
+
+### NULL in Aggregations
+
+```cypher
+// count(NULL) returns 0, collect(NULL) includes NULL
+MATCH (n:Person)
+OPTIONAL MATCH (n)-[:BOUGHT]->(p:Product)
+RETURN n.name, count(p) // count ignores NULL
+```
+
+### NULL Propagation in Expressions
+
+```cypher
+// Any operation with NULL returns NULL
+WHERE n.age + 1 > 21 // If n.age is NULL, whole expression is NULL (falsy)
+
+// Handle with coalesce
+WHERE coalesce(n.age, 0) + 1 > 21
+```
+
+## List and IN Clause Errors
+
+### Empty List in IN
+
+```cypher
+// An empty list never matches
+WHERE n.kind IN [] // Always false
+
+// Check for empty list in application code before query
+// Or use CASE:
+WHERE CASE WHEN size($kinds) > 0 THEN n.kind IN $kinds ELSE true END
+```
+
+### IN with NULL Values
+
+```cypher
+// NULL in the list causes issues
+WHERE n.id IN [1, NULL, 3] // NULL is never equal to anything
+
+// Filter NULLs in application code
+```
+
+## Relationship Pattern Errors
+
+### Forgetting Direction
+
+```cypher
+// WRONG - Creates both directions
+MATCH (a)-[:FOLLOWS]-(b) // Undirected!
+
+// CORRECT - Specify direction
+MATCH (a)-[:FOLLOWS]->(b) // a follows b
+MATCH (a)<-[:FOLLOWS]-(b) // b follows a
+```
+
+### Variable-Length Without Bounds
+
+```cypher
+// DANGEROUS - Potentially explosive
+MATCH (a)-[*]->(b) // Any length path!
+
+// SAFE - Set bounds
+MATCH (a)-[*1..3]->(b) // 1 to 3 hops max
+```
+
+### Creating Duplicate Relationships
+
+```cypher
+// May create duplicates
+CREATE (a)-[:KNOWS]->(b)
+
+// Idempotent
+MERGE (a)-[:KNOWS]->(b)
+```
+
+## Performance Mistakes
+
+### Cartesian Products
+
+```cypher
+// WRONG - Cartesian product
+MATCH (a:Person), (b:Product)
+WHERE a.id = $personId AND b.id = $productId
+CREATE (a)-[:BOUGHT]->(b)
+
+// CORRECT - Single pattern or sequential
+MATCH (a:Person {id: $personId})
+MATCH (b:Product {id: $productId})
+CREATE (a)-[:BOUGHT]->(b)
+```
+
+### Late Filtering
+
+```cypher
+// SLOW - Filters after collecting everything
+MATCH (e:Event)
+WITH e
+WHERE e.kind = 1 // Should be in MATCH or right after
+
+// FAST - Filter early
+MATCH (e:Event)
+WHERE e.kind = 1
+```
+
+### Missing LIMIT with ORDER BY
+
+```cypher
+// SLOW - Sorts all results
+MATCH (e:Event)
+RETURN e
+ORDER BY e.created_at DESC
+
+// FAST - Limits result set
+MATCH (e:Event)
+RETURN e
+ORDER BY e.created_at DESC
+LIMIT 100
+```
+
+### Unparameterized Queries
+
+```cypher
+// WRONG - No query plan caching, injection risk
+MATCH (e:Event {id: '" + eventId + "'})
+
+// CORRECT - Use parameters
+MATCH (e:Event {id: $eventId})
+```
+
+## String Comparison Errors
+
+### Case Sensitivity
+
+```cypher
+// Cypher strings are case-sensitive
+WHERE n.name = 'alice' // Won't match 'Alice'
+
+// Use toLower/toUpper for case-insensitive
+WHERE toLower(n.name) = toLower($name)
+
+// Or use regex with (?i)
+WHERE n.name =~ '(?i)alice'
+```
+
+### LIKE vs CONTAINS
+
+```cypher
+// There's no LIKE in Cypher
+WHERE n.name LIKE '%alice%' // ERROR!
+
+// Use CONTAINS, STARTS WITH, ENDS WITH
+WHERE n.name CONTAINS 'alice'
+WHERE n.name STARTS WITH 'ali'
+WHERE n.name ENDS WITH 'ice'
+
+// Or regex for complex patterns
+WHERE n.name =~ '.*ali.*ce.*'
+```
+
+## Index Mistakes
+
+### Constraint vs Index
+
+```cypher
+// Constraint (also creates index, enforces uniqueness)
+CREATE CONSTRAINT foo IF NOT EXISTS FOR (n:Node) REQUIRE n.id IS UNIQUE
+
+// Index only (no uniqueness enforcement)
+CREATE INDEX bar IF NOT EXISTS FOR (n:Node) ON (n.id)
+```
+
+### Index Not Used
+
+```cypher
+// Index on n.id won't help here
+WHERE toLower(n.id) = $id // Function applied to indexed property!
+
+// Store lowercase if needed, or create computed property
+```
+
+### Wrong Composite Index Order
+
+```cypher
+// Index on (kind, created_at) won't help query by created_at alone
+MATCH (e:Event) WHERE e.created_at > $since // Index not used
+
+// Either create single-property index or query by kind too
+CREATE INDEX event_created_at FOR (e:Event) ON (e.created_at)
+```
+
+## Transaction Errors
+
+### Read After Write in Same Transaction
+
+```cypher
+// In Neo4j, reads in a transaction see the writes
+// But be careful with external processes
+CREATE (n:Node {id: 'new'})
+WITH n
+MATCH (m:Node {id: 'new'}) // Will find 'n'
+```
+
+### Locks and Deadlocks
+
+```cypher
+// MERGE takes locks; avoid complex patterns that might deadlock
+// Bad: two MERGEs on same labels in different order
+Session 1: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
+Session 2: MERGE (b:Person {id: 2}) MERGE (a:Person {id: 1}) // Potential deadlock
+
+// Good: consistent ordering
+Session 1: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
+Session 2: MERGE (a:Person {id: 1}) MERGE (b:Person {id: 2})
+```
+
+## Type Coercion Issues
+
+### Integer vs String
+
+```cypher
+// Types must match
+WHERE n.id = 123 // Won't match if n.id is "123"
+WHERE n.id = '123' // Won't match if n.id is 123
+
+// Use appropriate parameter types from Go
+params["id"] = int64(123) // For integer
+params["id"] = "123" // For string
+```
+
+### Boolean Handling
+
+```cypher
+// Neo4j booleans vs strings
+WHERE n.active = true // Boolean
+WHERE n.active = 'true' // String - different!
+```
+
+## Delete Errors
+
+### Delete Node With Relationships
+
+```cypher
+// ERROR - Node still has relationships
+MATCH (n:Person {id: $id})
+DELETE n
+
+// CORRECT - Delete relationships first
+MATCH (n:Person {id: $id})
+DETACH DELETE n
+```
+
+### Optional Match and Delete
+
+```cypher
+// WRONG - DELETE NULL causes no error but also doesn't help
+OPTIONAL MATCH (n:Node {id: $id})
+DELETE n // If n is NULL, nothing happens silently
+
+// Better - Check existence first or handle in application
+MATCH (n:Node {id: $id})
+DELETE n
+```
+
+## Debugging Tips
+
+1. **Use EXPLAIN** to see query plan without executing
+2. **Use PROFILE** to see actual execution metrics
+3. **Break complex queries** into smaller parts to isolate issues
+4. **Check parameter types** - mismatched types are a common issue
+5. **Verify indexes exist** with `SHOW INDEXES`
+6. **Check constraints** with `SHOW CONSTRAINTS`
diff --git a/.claude/skills/cypher/references/common-patterns.md b/.claude/skills/cypher/references/common-patterns.md
new file mode 100644
index 00000000..5a53ee91
--- /dev/null
+++ b/.claude/skills/cypher/references/common-patterns.md
@@ -0,0 +1,397 @@
+# Common Cypher Patterns for ORLY Nostr Relay
+
+This reference contains project-specific Cypher patterns used in the ORLY Nostr relay's Neo4j backend.
+
+## Schema Overview
+
+### Node Types
+
+| Label | Purpose | Key Properties |
+|-------|---------|----------------|
+| `Event` | Nostr events (NIP-01) | `id`, `kind`, `pubkey`, `created_at`, `content`, `sig`, `tags`, `serial` |
+| `Author` | Event authors (for NIP-01 queries) | `pubkey` |
+| `Tag` | Generic tags | `type`, `value` |
+| `NostrUser` | Social graph users (WoT) | `pubkey`, `name`, `about`, `picture`, `nip05` |
+| `ProcessedSocialEvent` | Social event tracking | `event_id`, `event_kind`, `pubkey`, `superseded_by` |
+| `Marker` | Internal state markers | `key`, `value` |
+
+### Relationship Types
+
+| Type | From | To | Purpose |
+|------|------|-----|---------|
+| `AUTHORED_BY` | Event | Author | Links event to author |
+| `TAGGED_WITH` | Event | Tag | Links event to tags |
+| `REFERENCES` | Event | Event | e-tag references |
+| `MENTIONS` | Event | Author | p-tag mentions |
+| `FOLLOWS` | NostrUser | NostrUser | Contact list (kind 3) |
+| `MUTES` | NostrUser | NostrUser | Mute list (kind 10000) |
+| `REPORTS` | NostrUser | NostrUser | Reports (kind 1984) |
+
+## Event Storage Patterns
+
+### Create Event with Full Relationships
+
+This pattern creates an event and all related nodes/relationships atomically:
+
+```cypher
+// 1. Create or get author
+MERGE (a:Author {pubkey: $pubkey})
+
+// 2. Create event node
+CREATE (e:Event {
+ id: $eventId,
+ serial: $serial,
+ kind: $kind,
+ created_at: $createdAt,
+ content: $content,
+ sig: $sig,
+ pubkey: $pubkey,
+ tags: $tagsJson // JSON string for full tag data
+})
+
+// 3. Link to author
+CREATE (e)-[:AUTHORED_BY]->(a)
+
+// 4. Process e-tags (event references)
+WITH e, a
+OPTIONAL MATCH (ref0:Event {id: $eTag_0})
+FOREACH (_ IN CASE WHEN ref0 IS NOT NULL THEN [1] ELSE [] END |
+ CREATE (e)-[:REFERENCES]->(ref0)
+)
+
+// 5. Process p-tags (mentions)
+WITH e, a
+MERGE (mentioned0:Author {pubkey: $pTag_0})
+CREATE (e)-[:MENTIONS]->(mentioned0)
+
+// 6. Process other tags
+WITH e, a
+MERGE (tag0:Tag {type: $tagType_0, value: $tagValue_0})
+CREATE (e)-[:TAGGED_WITH]->(tag0)
+
+RETURN e.id AS id
+```
+
+### Check Event Existence
+
+```cypher
+MATCH (e:Event {id: $id})
+RETURN e.id AS id
+LIMIT 1
+```
+
+### Get Next Serial Number
+
+```cypher
+MERGE (m:Marker {key: 'serial'})
+ON CREATE SET m.value = 1
+ON MATCH SET m.value = m.value + 1
+RETURN m.value AS serial
+```
+
+## Query Patterns
+
+### Basic Filter Query (NIP-01)
+
+```cypher
+MATCH (e:Event)
+WHERE e.kind IN $kinds
+ AND e.pubkey IN $authors
+ AND e.created_at >= $since
+ AND e.created_at <= $until
+RETURN e.id AS id,
+ e.kind AS kind,
+ e.created_at AS created_at,
+ e.content AS content,
+ e.sig AS sig,
+ e.pubkey AS pubkey,
+ e.tags AS tags,
+ e.serial AS serial
+ORDER BY e.created_at DESC
+LIMIT $limit
+```
+
+### Query by Event ID (with prefix support)
+
+```cypher
+// Exact match
+MATCH (e:Event {id: $id})
+RETURN e
+
+// Prefix match
+MATCH (e:Event)
+WHERE e.id STARTS WITH $idPrefix
+RETURN e
+```
+
+### Query by Tag (# filter)
+
+```cypher
+MATCH (e:Event)
+OPTIONAL MATCH (e)-[:TAGGED_WITH]->(t:Tag)
+WHERE t.type = $tagType AND t.value IN $tagValues
+RETURN DISTINCT e
+ORDER BY e.created_at DESC
+LIMIT $limit
+```
+
+### Count Events
+
+```cypher
+MATCH (e:Event)
+WHERE e.kind IN $kinds
+RETURN count(e) AS count
+```
+
+### Query Delete Events Targeting an Event
+
+```cypher
+MATCH (target:Event {id: $targetId})
+MATCH (e:Event {kind: 5})-[:REFERENCES]->(target)
+RETURN e
+ORDER BY e.created_at DESC
+```
+
+### Replaceable Event Check (kinds 0, 3, 10000-19999)
+
+```cypher
+MATCH (e:Event {kind: $kind, pubkey: $pubkey})
+WHERE e.created_at < $newCreatedAt
+RETURN e.serial AS serial
+ORDER BY e.created_at DESC
+```
+
+### Parameterized Replaceable Event Check (kinds 30000-39999)
+
+```cypher
+MATCH (e:Event {kind: $kind, pubkey: $pubkey})-[:TAGGED_WITH]->(t:Tag {type: 'd', value: $dValue})
+WHERE e.created_at < $newCreatedAt
+RETURN e.serial AS serial
+ORDER BY e.created_at DESC
+```
+
+## Social Graph Patterns
+
+### Update Profile (Kind 0)
+
+```cypher
+MERGE (user:NostrUser {pubkey: $pubkey})
+ON CREATE SET
+ user.created_at = timestamp(),
+ user.first_seen_event = $event_id
+ON MATCH SET
+ user.last_profile_update = $created_at
+SET
+ user.name = $name,
+ user.about = $about,
+ user.picture = $picture,
+ user.nip05 = $nip05,
+ user.lud16 = $lud16,
+ user.display_name = $display_name
+```
+
+### Contact List Update (Kind 3) - Diff-Based
+
+```cypher
+// Mark old event as superseded
+OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
+SET old.superseded_by = $new_event_id
+
+// Create new event tracking
+CREATE (new:ProcessedSocialEvent {
+ event_id: $new_event_id,
+ event_kind: 3,
+ pubkey: $author_pubkey,
+ created_at: $created_at,
+ processed_at: timestamp(),
+ relationship_count: $total_follows,
+ superseded_by: null
+})
+
+// Get or create author
+MERGE (author:NostrUser {pubkey: $author_pubkey})
+
+// Update unchanged relationships to new event
+WITH author
+OPTIONAL MATCH (author)-[unchanged:FOLLOWS]->(followed:NostrUser)
+WHERE unchanged.created_by_event = $old_event_id
+ AND NOT followed.pubkey IN $removed_follows
+SET unchanged.created_by_event = $new_event_id,
+ unchanged.created_at = $created_at
+
+// Remove old relationships for removed follows
+WITH author
+OPTIONAL MATCH (author)-[old_follows:FOLLOWS]->(followed:NostrUser)
+WHERE old_follows.created_by_event = $old_event_id
+ AND followed.pubkey IN $removed_follows
+DELETE old_follows
+
+// Create new relationships for added follows
+WITH author
+UNWIND $added_follows AS followed_pubkey
+MERGE (followed:NostrUser {pubkey: followed_pubkey})
+MERGE (author)-[new_follows:FOLLOWS]->(followed)
+ON CREATE SET
+ new_follows.created_by_event = $new_event_id,
+ new_follows.created_at = $created_at,
+ new_follows.relay_received_at = timestamp()
+ON MATCH SET
+ new_follows.created_by_event = $new_event_id,
+ new_follows.created_at = $created_at
+```
+
+### Create Report (Kind 1984)
+
+```cypher
+// Create tracking node
+CREATE (evt:ProcessedSocialEvent {
+ event_id: $event_id,
+ event_kind: 1984,
+ pubkey: $reporter_pubkey,
+ created_at: $created_at,
+ processed_at: timestamp(),
+ relationship_count: 1,
+ superseded_by: null
+})
+
+// Create users and relationship
+MERGE (reporter:NostrUser {pubkey: $reporter_pubkey})
+MERGE (reported:NostrUser {pubkey: $reported_pubkey})
+CREATE (reporter)-[:REPORTS {
+ created_by_event: $event_id,
+ created_at: $created_at,
+ relay_received_at: timestamp(),
+ report_type: $report_type
+}]->(reported)
+```
+
+### Get Latest Social Event for Pubkey
+
+```cypher
+MATCH (evt:ProcessedSocialEvent {pubkey: $pubkey, event_kind: $kind})
+WHERE evt.superseded_by IS NULL
+RETURN evt.event_id AS event_id,
+ evt.created_at AS created_at,
+ evt.relationship_count AS relationship_count
+ORDER BY evt.created_at DESC
+LIMIT 1
+```
+
+### Get Follows for Event
+
+```cypher
+MATCH (author:NostrUser)-[f:FOLLOWS]->(followed:NostrUser)
+WHERE f.created_by_event = $event_id
+RETURN collect(followed.pubkey) AS pubkeys
+```
+
+## WoT Query Patterns
+
+### Find Mutual Follows
+
+```cypher
+MATCH (a:NostrUser {pubkey: $pubkeyA})-[:FOLLOWS]->(b:NostrUser)
+WHERE (b)-[:FOLLOWS]->(a)
+RETURN b.pubkey AS mutual_friend
+```
+
+### Find Followers
+
+```cypher
+MATCH (follower:NostrUser)-[:FOLLOWS]->(user:NostrUser {pubkey: $pubkey})
+RETURN follower.pubkey, follower.name
+```
+
+### Find Following
+
+```cypher
+MATCH (user:NostrUser {pubkey: $pubkey})-[:FOLLOWS]->(following:NostrUser)
+RETURN following.pubkey, following.name
+```
+
+### Hop Distance (Trust Path)
+
+```cypher
+MATCH (start:NostrUser {pubkey: $startPubkey})
+MATCH (end:NostrUser {pubkey: $endPubkey})
+MATCH path = shortestPath((start)-[:FOLLOWS*..6]->(end))
+RETURN length(path) AS hops, [n IN nodes(path) | n.pubkey] AS path
+```
+
+### Second-Degree Connections
+
+```cypher
+MATCH (me:NostrUser {pubkey: $myPubkey})-[:FOLLOWS]->(:NostrUser)-[:FOLLOWS]->(suggested:NostrUser)
+WHERE NOT (me)-[:FOLLOWS]->(suggested)
+ AND suggested.pubkey <> $myPubkey
+RETURN suggested.pubkey, count(*) AS commonFollows
+ORDER BY commonFollows DESC
+LIMIT 20
+```
+
+## Schema Management Patterns
+
+### Create Constraint
+
+```cypher
+CREATE CONSTRAINT event_id_unique IF NOT EXISTS
+FOR (e:Event) REQUIRE e.id IS UNIQUE
+```
+
+### Create Index
+
+```cypher
+CREATE INDEX event_kind IF NOT EXISTS
+FOR (e:Event) ON (e.kind)
+```
+
+### Create Composite Index
+
+```cypher
+CREATE INDEX event_kind_created_at IF NOT EXISTS
+FOR (e:Event) ON (e.kind, e.created_at)
+```
+
+### Drop All Data (Testing Only)
+
+```cypher
+MATCH (n) DETACH DELETE n
+```
+
+## Performance Patterns
+
+### Use EXPLAIN/PROFILE
+
+```cypher
+// See query plan without running
+EXPLAIN MATCH (e:Event) WHERE e.kind = 1 RETURN e
+
+// Run and see actual metrics
+PROFILE MATCH (e:Event) WHERE e.kind = 1 RETURN e
+```
+
+### Batch Import with UNWIND
+
+```cypher
+UNWIND $events AS evt
+CREATE (e:Event {
+ id: evt.id,
+ kind: evt.kind,
+ pubkey: evt.pubkey,
+ created_at: evt.created_at,
+ content: evt.content,
+ sig: evt.sig,
+ tags: evt.tags
+})
+```
+
+### Efficient Pagination
+
+```cypher
+// Use indexed ORDER BY with WHERE for cursor-based pagination
+MATCH (e:Event)
+WHERE e.kind = 1 AND e.created_at < $cursor
+RETURN e
+ORDER BY e.created_at DESC
+LIMIT 20
+```
diff --git a/.claude/skills/cypher/references/syntax-reference.md b/.claude/skills/cypher/references/syntax-reference.md
new file mode 100644
index 00000000..ebb94b2b
--- /dev/null
+++ b/.claude/skills/cypher/references/syntax-reference.md
@@ -0,0 +1,540 @@
+# Cypher Syntax Reference
+
+Complete syntax reference for Neo4j Cypher query language.
+
+## Clause Reference
+
+### Reading Clauses
+
+#### MATCH
+
+Finds patterns in the graph.
+
+```cypher
+// Basic node match
+MATCH (n:Label)
+
+// Match with properties
+MATCH (n:Label {key: value})
+
+// Match relationships
+MATCH (a)-[r:RELATES_TO]->(b)
+
+// Match path
+MATCH path = (a)-[*1..3]->(b)
+```
+
+#### OPTIONAL MATCH
+
+Like MATCH but returns NULL for non-matches (LEFT OUTER JOIN).
+
+```cypher
+MATCH (a:Person)
+OPTIONAL MATCH (a)-[:KNOWS]->(b:Person)
+RETURN a.name, b.name // b.name may be NULL
+```
+
+#### WHERE
+
+Filters results.
+
+```cypher
+// Comparison operators
+WHERE n.age > 21
+WHERE n.age >= 21
+WHERE n.age < 65
+WHERE n.age <= 65
+WHERE n.name = 'Alice'
+WHERE n.name <> 'Bob'
+
+// Boolean operators
+WHERE n.age > 21 AND n.active = true
+WHERE n.age < 18 OR n.age > 65
+WHERE NOT n.deleted
+
+// NULL checks
+WHERE n.email IS NULL
+WHERE n.email IS NOT NULL
+
+// Pattern predicates
+WHERE (n)-[:KNOWS]->(:Person)
+WHERE NOT (n)-[:BLOCKED]->()
+WHERE exists((n)-[:FOLLOWS]->())
+
+// String predicates
+WHERE n.name STARTS WITH 'A'
+WHERE n.name ENDS WITH 'son'
+WHERE n.name CONTAINS 'li'
+WHERE n.name =~ '(?i)alice.*' // Case-insensitive regex
+
+// List predicates
+WHERE n.status IN ['active', 'pending']
+WHERE any(x IN n.tags WHERE x = 'important')
+WHERE all(x IN n.scores WHERE x > 50)
+WHERE none(x IN n.errors WHERE x IS NOT NULL)
+WHERE single(x IN n.items WHERE x.primary = true)
+```
+
+### Writing Clauses
+
+#### CREATE
+
+Creates nodes and relationships.
+
+```cypher
+// Create node
+CREATE (n:Label {key: value})
+
+// Create multiple nodes
+CREATE (a:Person {name: 'Alice'}), (b:Person {name: 'Bob'})
+
+// Create relationship
+CREATE (a)-[r:KNOWS {since: 2020}]->(b)
+
+// Create path
+CREATE p = (a)-[:KNOWS]->(b)-[:KNOWS]->(c)
+```
+
+#### MERGE
+
+Find or create pattern. **Critical for idempotency**.
+
+```cypher
+// MERGE node
+MERGE (n:Label {key: $uniqueKey})
+
+// MERGE with ON CREATE / ON MATCH
+MERGE (n:Person {email: $email})
+ON CREATE SET n.created = timestamp(), n.name = $name
+ON MATCH SET n.accessed = timestamp()
+
+// MERGE relationship (both nodes must exist or be in scope)
+MERGE (a)-[r:KNOWS]->(b)
+ON CREATE SET r.since = date()
+```
+
+**MERGE Gotcha**: MERGE on a pattern locks the entire pattern. For relationships, MERGE each node first:
+
+```cypher
+// CORRECT
+MERGE (a:Person {id: $id1})
+MERGE (b:Person {id: $id2})
+MERGE (a)-[:KNOWS]->(b)
+
+// RISKY - may create duplicate nodes
+MERGE (a:Person {id: $id1})-[:KNOWS]->(b:Person {id: $id2})
+```
+
+#### SET
+
+Updates properties.
+
+```cypher
+// Set single property
+SET n.name = 'Alice'
+
+// Set multiple properties
+SET n.name = 'Alice', n.age = 30
+
+// Set from map (replaces all properties)
+SET n = {name: 'Alice', age: 30}
+
+// Set from map (adds/updates, keeps existing)
+SET n += {name: 'Alice'}
+
+// Set label
+SET n:NewLabel
+
+// Remove property
+SET n.obsolete = null
+```
+
+#### DELETE / DETACH DELETE
+
+Removes nodes and relationships.
+
+```cypher
+// Delete relationship
+MATCH (a)-[r:KNOWS]->(b)
+DELETE r
+
+// Delete node (must have no relationships)
+MATCH (n:Orphan)
+DELETE n
+
+// Delete node and all relationships
+MATCH (n:Person {name: 'Bob'})
+DETACH DELETE n
+```
+
+#### REMOVE
+
+Removes properties and labels.
+
+```cypher
+// Remove property
+REMOVE n.temporary
+
+// Remove label
+REMOVE n:OldLabel
+```
+
+### Projection Clauses
+
+#### RETURN
+
+Specifies output.
+
+```cypher
+// Return nodes
+RETURN n
+
+// Return properties
+RETURN n.name, n.age
+
+// Return with alias
+RETURN n.name AS name, n.age AS age
+
+// Return all
+RETURN *
+
+// Return distinct
+RETURN DISTINCT n.category
+
+// Return expression
+RETURN n.price * n.quantity AS total
+```
+
+#### WITH
+
+Passes results between query parts. **Critical for multi-part queries**.
+
+```cypher
+// Filter and pass
+MATCH (n:Person)
+WITH n WHERE n.age > 21
+RETURN n
+
+// Aggregate and continue
+MATCH (n:Person)-[:BOUGHT]->(p:Product)
+WITH n, count(p) AS purchases
+WHERE purchases > 5
+RETURN n.name, purchases
+
+// Order and limit mid-query
+MATCH (n:Person)
+WITH n ORDER BY n.age DESC LIMIT 10
+MATCH (n)-[:LIVES_IN]->(c:City)
+RETURN n.name, c.name
+```
+
+**WITH resets scope**: Variables not listed in WITH are no longer available.
+
+#### ORDER BY
+
+Sorts results.
+
+```cypher
+ORDER BY n.name // Ascending (default)
+ORDER BY n.name ASC // Explicit ascending
+ORDER BY n.name DESC // Descending
+ORDER BY n.lastName, n.firstName // Multiple fields
+ORDER BY n.priority DESC, n.name // Mixed
+```
+
+#### SKIP and LIMIT
+
+Pagination.
+
+```cypher
+// Skip first 10
+SKIP 10
+
+// Return only 20
+LIMIT 20
+
+// Pagination
+ORDER BY n.created_at DESC
+SKIP $offset LIMIT $pageSize
+```
+
+### Sub-queries
+
+#### CALL (Subquery)
+
+Execute subquery for each row.
+
+```cypher
+MATCH (p:Person)
+CALL {
+ WITH p
+ MATCH (p)-[:BOUGHT]->(prod:Product)
+ RETURN count(prod) AS purchaseCount
+}
+RETURN p.name, purchaseCount
+```
+
+#### UNION
+
+Combine results from multiple queries.
+
+```cypher
+MATCH (n:Person) RETURN n.name AS name
+UNION
+MATCH (n:Company) RETURN n.name AS name
+
+// UNION ALL keeps duplicates
+MATCH (n:Person) RETURN n.name AS name
+UNION ALL
+MATCH (n:Company) RETURN n.name AS name
+```
+
+### Control Flow
+
+#### FOREACH
+
+Iterate over list, execute updates.
+
+```cypher
+// Set property on path nodes
+MATCH path = (a)-[*]->(b)
+FOREACH (n IN nodes(path) | SET n.visited = true)
+
+// Conditional operation (common pattern)
+OPTIONAL MATCH (target:Node {id: $id})
+FOREACH (_ IN CASE WHEN target IS NOT NULL THEN [1] ELSE [] END |
+ CREATE (source)-[:LINKS_TO]->(target)
+)
+```
+
+#### CASE
+
+Conditional expressions.
+
+```cypher
+// Simple CASE
+RETURN CASE n.status
+ WHEN 'active' THEN 'A'
+ WHEN 'pending' THEN 'P'
+ ELSE 'X'
+END AS code
+
+// Generic CASE
+RETURN CASE
+ WHEN n.age < 18 THEN 'minor'
+ WHEN n.age < 65 THEN 'adult'
+ ELSE 'senior'
+END AS category
+```
+
+## Operators
+
+### Comparison
+
+| Operator | Description |
+|----------|-------------|
+| `=` | Equal |
+| `<>` | Not equal |
+| `<` | Less than |
+| `>` | Greater than |
+| `<=` | Less than or equal |
+| `>=` | Greater than or equal |
+| `IS NULL` | Is null |
+| `IS NOT NULL` | Is not null |
+
+### Boolean
+
+| Operator | Description |
+|----------|-------------|
+| `AND` | Logical AND |
+| `OR` | Logical OR |
+| `NOT` | Logical NOT |
+| `XOR` | Exclusive OR |
+
+### String
+
+| Operator | Description |
+|----------|-------------|
+| `STARTS WITH` | Prefix match |
+| `ENDS WITH` | Suffix match |
+| `CONTAINS` | Substring match |
+| `=~` | Regex match |
+
+### List
+
+| Operator | Description |
+|----------|-------------|
+| `IN` | List membership |
+| `+` | List concatenation |
+
+### Mathematical
+
+| Operator | Description |
+|----------|-------------|
+| `+` | Addition |
+| `-` | Subtraction |
+| `*` | Multiplication |
+| `/` | Division |
+| `%` | Modulo |
+| `^` | Exponentiation |
+
+## Functions
+
+### Aggregation
+
+```cypher
+count(*) // Count rows
+count(n) // Count non-null
+count(DISTINCT n) // Count unique
+sum(n.value) // Sum
+avg(n.value) // Average
+min(n.value) // Minimum
+max(n.value) // Maximum
+collect(n) // Collect to list
+collect(DISTINCT n) // Collect unique
+stDev(n.value) // Standard deviation
+percentileCont(n.value, 0.5) // Median
+```
+
+### Scalar
+
+```cypher
+// Type functions
+id(n) // Internal node ID (deprecated, use elementId)
+elementId(n) // Element ID string
+labels(n) // Node labels
+type(r) // Relationship type
+properties(n) // Property map
+
+// Math
+abs(x)
+ceil(x)
+floor(x)
+round(x)
+sign(x)
+sqrt(x)
+rand() // Random 0-1
+
+// String
+size(str) // String length
+toLower(str)
+toUpper(str)
+trim(str)
+ltrim(str)
+rtrim(str)
+replace(str, from, to)
+substring(str, start, len)
+left(str, len)
+right(str, len)
+split(str, delimiter)
+reverse(str)
+toString(val)
+
+// Null handling
+coalesce(val1, val2, ...) // First non-null
+nullIf(val1, val2) // NULL if equal
+
+// Type conversion
+toInteger(val)
+toFloat(val)
+toBoolean(val)
+toString(val)
+```
+
+### List Functions
+
+```cypher
+size(list) // List length
+head(list) // First element
+tail(list) // All but first
+last(list) // Last element
+range(start, end) // Create range [start..end]
+range(start, end, step)
+reverse(list)
+keys(map) // Map keys as list
+values(map) // Map values as list
+
+// List predicates
+any(x IN list WHERE predicate)
+all(x IN list WHERE predicate)
+none(x IN list WHERE predicate)
+single(x IN list WHERE predicate)
+
+// List manipulation
+[x IN list WHERE predicate] // Filter
+[x IN list | expression] // Map
+[x IN list WHERE pred | expr] // Filter and map
+reduce(s = initial, x IN list | s + x) // Reduce
+```
+
+### Path Functions
+
+```cypher
+nodes(path) // Nodes in path
+relationships(path) // Relationships in path
+length(path) // Number of relationships
+shortestPath((a)-[*]-(b))
+allShortestPaths((a)-[*]-(b))
+```
+
+### Temporal Functions
+
+```cypher
+timestamp() // Current Unix timestamp (ms)
+datetime() // Current datetime
+date() // Current date
+time() // Current time
+duration({days: 1, hours: 12})
+
+// Components
+datetime().year
+datetime().month
+datetime().day
+datetime().hour
+
+// Parsing
+date('2024-01-15')
+datetime('2024-01-15T10:30:00Z')
+```
+
+### Spatial Functions
+
+```cypher
+point({x: 1, y: 2})
+point({latitude: 37.5, longitude: -122.4})
+distance(point1, point2)
+```
+
+## Comments
+
+```cypher
+// Single line comment
+
+/* Multi-line
+ comment */
+```
+
+## Transaction Control
+
+```cypher
+// In procedures/transactions
+:begin
+:commit
+:rollback
+```
+
+## Parameter Syntax
+
+```cypher
+// Parameter reference
+$paramName
+
+// In properties
+{key: $value}
+
+// In WHERE
+WHERE n.id = $id
+
+// In expressions
+RETURN $multiplier * n.value
+```
diff --git a/.claude/skills/distributed-systems/SKILL.md b/.claude/skills/distributed-systems/SKILL.md
new file mode 100644
index 00000000..c5af8b33
--- /dev/null
+++ b/.claude/skills/distributed-systems/SKILL.md
@@ -0,0 +1,1115 @@
+---
+name: distributed-systems
+description: This skill should be used when designing or implementing distributed systems, understanding consensus protocols (Paxos, Raft, PBFT, Nakamoto, PnyxDB), analyzing CAP theorem trade-offs, implementing logical clocks (Lamport, Vector, ITC), or building fault-tolerant architectures. Provides comprehensive knowledge of consensus algorithms, Byzantine fault tolerance, adversarial oracle protocols, replication strategies, causality tracking, and distributed system design principles.
+---
+
+# Distributed Systems
+
+This skill provides deep knowledge of distributed systems design, consensus protocols, fault tolerance, and the fundamental trade-offs in building reliable distributed architectures.
+
+## When to Use This Skill
+
+- Designing distributed databases or storage systems
+- Implementing consensus protocols (Raft, Paxos, PBFT, Nakamoto, PnyxDB)
+- Analyzing system trade-offs using CAP theorem
+- Building fault-tolerant or Byzantine fault-tolerant systems
+- Understanding replication and consistency models
+- Implementing causality tracking with logical clocks
+- Building blockchain consensus mechanisms
+- Designing decentralized oracle systems
+- Understanding adversarial attack vectors in distributed systems
+
+## CAP Theorem
+
+### The Fundamental Trade-off
+
+The CAP theorem, introduced by Eric Brewer in 2000, states that a distributed data store cannot simultaneously provide more than two of:
+
+1. **Consistency (C)**: Every read receives the most recent write or an error
+2. **Availability (A)**: Every request receives a non-error response (without guarantee of most recent data)
+3. **Partition Tolerance (P)**: System continues operating despite network partitions
+
+### Why P is Non-Negotiable
+
+In any distributed system over a network:
+- Network partitions **will** occur (cable cuts, router failures, congestion)
+- A system that isn't partition-tolerant isn't truly distributed
+- The real choice is between **CP** and **AP** during partitions
+
+### System Classifications
+
+#### CP Systems (Consistency + Partition Tolerance)
+
+**Behavior during partition**: Refuses some requests to maintain consistency.
+
+**Examples**:
+- MongoDB (with majority write concern)
+- HBase
+- Zookeeper
+- etcd
+
+**Use when**:
+- Correctness is paramount (financial systems)
+- Stale reads are unacceptable
+- Brief unavailability is tolerable
+
+#### AP Systems (Availability + Partition Tolerance)
+
+**Behavior during partition**: Continues serving requests, may return stale data.
+
+**Examples**:
+- Cassandra
+- DynamoDB
+- CouchDB
+- Riak
+
+**Use when**:
+- High availability is critical
+- Eventual consistency is acceptable
+- Shopping carts, social media feeds
+
+#### CA Systems
+
+**Theoretical only**: Cannot exist in distributed systems because partitions are inevitable.
+
+Single-node databases are technically CA but aren't distributed.
+
+### PACELC Extension
+
+PACELC extends CAP to address normal operation:
+
+> If there is a **P**artition, choose between **A**vailability and **C**onsistency.
+> **E**lse (normal operation), choose between **L**atency and **C**onsistency.
+
+| System | P: A or C | E: L or C |
+|--------|-----------|-----------|
+| DynamoDB | A | L |
+| Cassandra | A | L |
+| MongoDB | C | C |
+| PNUTS | C | L |
+
+## Consistency Models
+
+### Strong Consistency
+
+Every read returns the most recent write. Achieved through:
+- Single leader with synchronous replication
+- Consensus protocols (Paxos, Raft)
+
+**Trade-off**: Higher latency, lower availability during failures.
+
+### Eventual Consistency
+
+If no new updates, all replicas eventually converge to the same state.
+
+**Variants**:
+- **Causal consistency**: Preserves causally related operations order
+- **Read-your-writes**: Clients see their own writes
+- **Monotonic reads**: Never see older data after seeing newer
+- **Session consistency**: Consistency within a session
+
+### Linearizability
+
+Operations appear instantaneous at some point between invocation and response.
+
+**Provides**:
+- Single-object operations appear atomic
+- Real-time ordering guarantees
+- Foundation for distributed locks, leader election
+
+### Serializability
+
+Transactions appear to execute in some serial order.
+
+**Note**: Linearizability ≠ Serializability
+- Linearizability: Single-operation recency guarantee
+- Serializability: Multi-operation isolation guarantee
+
+## Consensus Protocols
+
+### The Consensus Problem
+
+Getting distributed nodes to agree on a single value despite failures.
+
+**Requirements**:
+1. **Agreement**: All correct nodes decide on the same value
+2. **Validity**: Decided value was proposed by some node
+3. **Termination**: All correct nodes eventually decide
+
+### Paxos
+
+Developed by Leslie Lamport (1989/1998), foundational consensus algorithm.
+
+#### Roles
+
+- **Proposers**: Propose values
+- **Acceptors**: Vote on proposals
+- **Learners**: Learn decided values
+
+#### Basic Protocol (Single-Decree)
+
+**Phase 1a: Prepare**
+```
+Proposer → Acceptors: PREPARE(n)
+ - n is unique proposal number
+```
+
+**Phase 1b: Promise**
+```
+Acceptor → Proposer: PROMISE(n, accepted_proposal)
+ - If n > highest_seen: promise to ignore lower proposals
+ - Return previously accepted proposal if any
+```
+
+**Phase 2a: Accept**
+```
+Proposer → Acceptors: ACCEPT(n, v)
+ - v = value from highest accepted proposal, or proposer's own value
+```
+
+**Phase 2b: Accepted**
+```
+Acceptor → Learners: ACCEPTED(n, v)
+ - If n >= highest_promised: accept the proposal
+```
+
+**Decision**: Value is decided when majority of acceptors accept it.
+
+#### Multi-Paxos
+
+Optimization for sequences of values:
+- Elect stable leader
+- Skip Phase 1 for subsequent proposals
+- Significantly reduces message complexity
+
+#### Strengths and Weaknesses
+
+**Strengths**:
+- Proven correct
+- Tolerates f failures with 2f+1 nodes
+- Foundation for many systems
+
+**Weaknesses**:
+- Complex to implement correctly
+- No specified leader election
+- Performance requires Multi-Paxos optimizations
+
+### Raft
+
+Designed by Diego Ongaro and John Ousterhout (2013) for understandability.
+
+#### Key Design Principles
+
+1. **Decomposition**: Separates leader election, log replication, safety
+2. **State reduction**: Minimizes states to consider
+3. **Strong leader**: All writes through leader
+
+#### Server States
+
+- **Leader**: Handles all client requests, replicates log
+- **Follower**: Passive, responds to leader and candidates
+- **Candidate**: Trying to become leader
+
+#### Leader Election
+
+```
+1. Follower times out (no heartbeat from leader)
+2. Becomes Candidate, increments term, votes for self
+3. Requests votes from other servers
+4. Wins with majority votes → becomes Leader
+5. Loses (another leader) → becomes Follower
+6. Timeout → starts new election
+```
+
+**Safety**: Only candidates with up-to-date logs can win.
+
+#### Log Replication
+
+```
+1. Client sends command to Leader
+2. Leader appends to local log
+3. Leader sends AppendEntries to Followers
+4. On majority acknowledgment: entry is committed
+5. Leader applies to state machine, responds to client
+6. Followers apply committed entries
+```
+
+#### Log Matching Property
+
+If two logs contain entry with same index and term:
+- Entries are identical
+- All preceding entries are identical
+
+#### Term
+
+Logical clock that increases with each election:
+- Detects stale leaders
+- Resolves conflicts
+- Included in all messages
+
+#### Comparison with Paxos
+
+| Aspect | Paxos | Raft |
+|--------|-------|------|
+| Understandability | Complex | Designed for clarity |
+| Leader | Optional (Multi-Paxos) | Required |
+| Log gaps | Allowed | Not allowed |
+| Membership changes | Complex | Joint consensus |
+| Implementations | Many variants | Consistent |
+
+### PBFT (Practical Byzantine Fault Tolerance)
+
+Developed by Castro and Liskov (1999) for Byzantine faults.
+
+#### Byzantine Faults
+
+Nodes can behave arbitrarily:
+- Crash
+- Send incorrect messages
+- Collude maliciously
+- Act inconsistently to different nodes
+
+#### Fault Tolerance
+
+Tolerates f Byzantine faults with **3f+1** nodes.
+
+**Why 3f+1?**
+- Need 2f+1 honest responses
+- f Byzantine nodes might lie
+- Need f more to distinguish honest majority
+
+#### Protocol Phases
+
+**Normal Operation** (leader is honest):
+
+```
+1. REQUEST: Client → Primary (leader)
+2. PRE-PREPARE: Primary → All replicas
+ - Primary assigns sequence number
+3. PREPARE: Each replica → All replicas
+ - Validates pre-prepare
+4. COMMIT: Each replica → All replicas
+ - After receiving 2f+1 prepares
+5. REPLY: Each replica → Client
+ - After receiving 2f+1 commits
+```
+
+**Client waits for f+1 matching replies**.
+
+#### View Change
+
+When primary appears faulty:
+1. Replicas timeout waiting for primary
+2. Broadcast VIEW-CHANGE with prepared certificates
+3. New primary collects 2f+1 view-changes
+4. Broadcasts NEW-VIEW with proof
+5. System resumes with new primary
+
+#### Message Complexity
+
+- **Normal case**: O(n²) messages per request
+- **View change**: O(n³) messages
+
+**Scalability challenge**: Quadratic messaging limits cluster size.
+
+#### Optimizations
+
+- **Speculative execution**: Execute before commit
+- **Batching**: Group multiple requests
+- **Signatures**: Use MACs instead of digital signatures
+- **Threshold signatures**: Reduce signature overhead
+
+### Modern BFT Variants
+
+#### HotStuff (2019)
+
+- Linear message complexity O(n)
+- Used in LibraBFT (Diem), other blockchains
+- Three-phase protocol with threshold signatures
+
+#### Tendermint
+
+- Blockchain-focused BFT
+- Integrated with Cosmos SDK
+- Immediate finality
+
+#### QBFT (Quorum BFT)
+
+- Enterprise-focused (ConsenSys/JPMorgan)
+- Enhanced IBFT for Ethereum-based systems
+
+### Nakamoto Consensus
+
+The consensus mechanism powering Bitcoin, introduced by Satoshi Nakamoto (2008).
+
+#### Core Innovation
+
+Combines three elements:
+1. **Proof-of-Work (PoW)**: Cryptographic puzzle for block creation
+2. **Longest Chain Rule**: Fork resolution by accumulated work
+3. **Probabilistic Finality**: Security increases with confirmations
+
+#### How It Works
+
+```
+1. Transactions broadcast to network
+2. Miners collect transactions into blocks
+3. Miners race to solve PoW puzzle:
+ - Find nonce such that Hash(block_header) < target
+ - Difficulty adjusts to maintain ~10 min block time
+4. First miner to solve broadcasts block
+5. Other nodes verify and append to longest chain
+6. Miner receives block reward + transaction fees
+```
+
+#### Longest Chain Rule
+
+When forks occur:
+```
+Chain A: [genesis] → [1] → [2] → [3]
+Chain B: [genesis] → [1] → [2'] → [3'] → [4']
+
+Nodes follow Chain B (more accumulated work)
+Chain A blocks become "orphaned"
+```
+
+**Note**: Actually "most accumulated work" not "most blocks"—a chain with fewer but harder blocks wins.
+
+#### Security Model
+
+**Honest Majority Assumption**: Protocol secure if honest mining power > 50%.
+
+Formal analysis (Ren 2019):
+```
+Safe if: g²α > β
+
+Where:
+ α = honest mining rate
+ β = adversarial mining rate
+ g = growth rate accounting for network delay
+ Δ = maximum network delay
+```
+
+**Implications**:
+- Larger block interval → more security margin
+- Higher network delay → need more honest majority
+- 10-minute block time provides safety margin for global network
+
+#### Probabilistic Finality
+
+No instant finality—deeper blocks are exponentially harder to reverse:
+
+| Confirmations | Attack Probability (30% attacker) |
+|---------------|-----------------------------------|
+| 1 | ~50% |
+| 3 | ~12% |
+| 6 | ~0.2% |
+| 12 | ~0.003% |
+
+**Convention**: 6 confirmations (~1 hour) considered "final" for Bitcoin.
+
+#### Attacks
+
+**51% Attack**: Attacker with majority hashrate can:
+- Double-spend transactions
+- Prevent confirmations
+- NOT: steal funds, change consensus rules, create invalid transactions
+
+**Selfish Mining**: Strategic block withholding to waste honest miners' work.
+- Profitable with < 50% hashrate under certain conditions
+- Mitigated by network propagation improvements
+
+**Long-Range Attacks**: Not applicable to PoW (unlike PoS).
+
+#### Trade-offs vs Traditional BFT
+
+| Aspect | Nakamoto | Classical BFT |
+|--------|----------|---------------|
+| Finality | Probabilistic | Immediate |
+| Throughput | Low (~7 TPS) | Higher |
+| Participants | Permissionless | Permissioned |
+| Energy | High (PoW) | Low |
+| Fault tolerance | 50% hashrate | 33% nodes |
+| Scalability | Global | Limited nodes |
+
+### PnyxDB: Leaderless Democratic BFT
+
+Developed by Bonniot, Neumann, and Taïani (2019) for consortia applications.
+
+#### Key Innovation: Conditional Endorsements
+
+Unlike leader-based BFT, PnyxDB uses **leaderless quorums** with conditional endorsements:
+- Endorsements track conflicts between transactions
+- If transactions commute (no conflicting operations), quorums built independently
+- Non-commuting transactions handled via Byzantine Veto Procedure (BVP)
+
+#### Transaction Lifecycle
+
+```
+1. Client broadcasts transaction to endorsers
+2. Endorsers evaluate against application-defined policies
+3. If no conflicts: endorser sends acknowledgment
+4. If conflicts detected: conditional endorsement specifying
+ which transactions must NOT be committed for this to be valid
+5. Transaction commits when quorum of valid endorsements collected
+6. BVP resolves conflicting transactions
+```
+
+#### Byzantine Veto Procedure (BVP)
+
+Ensures termination with conflicting transactions:
+- Transactions have deadlines
+- Conflicting endorsements trigger resolution loop
+- Protocol guarantees exit when deadline passes
+- At most f Byzantine nodes tolerated with n endorsers
+
+#### Application-Level Voting
+
+Unique feature: nodes can endorse or reject transactions based on **application-defined policies** without compromising consistency.
+
+Use cases:
+- Consortium governance decisions
+- Policy-based access control
+- Democratic decision making
+
+#### Performance
+
+Compared to BFT-SMaRt and Tendermint:
+- **11x faster** commit latencies
+- **< 5 seconds** in worldwide geo-distributed deployment
+- Tested with **180 nodes**
+
+#### Implementation
+
+- Written in Go (requires Go 1.11+)
+- Uses gossip broadcast for message propagation
+- Web-of-trust node authentication
+- Scales to hundreds/thousands of nodes
+
+## Replication Strategies
+
+### Single-Leader Replication
+
+```
+Clients → Leader → Followers
+```
+
+**Pros**: Simple, strong consistency possible
+**Cons**: Leader bottleneck, failover complexity
+
+#### Synchronous vs Asynchronous
+
+| Type | Durability | Latency | Availability |
+|------|------------|---------|--------------|
+| Synchronous | Guaranteed | High | Lower |
+| Asynchronous | At-risk | Low | Higher |
+| Semi-synchronous | Balanced | Medium | Medium |
+
+### Multi-Leader Replication
+
+Multiple nodes accept writes, replicate to each other.
+
+**Use cases**:
+- Multi-datacenter deployment
+- Clients with offline operation
+
+**Challenges**:
+- Write conflicts
+- Conflict resolution complexity
+
+#### Conflict Resolution
+
+- **Last-write-wins (LWW)**: Timestamp-based, may lose data
+- **Application-specific**: Custom merge logic
+- **CRDTs**: Mathematically guaranteed convergence
+
+### Leaderless Replication
+
+Any node can accept reads and writes.
+
+**Examples**: Dynamo, Cassandra, Riak
+
+#### Quorum Reads/Writes
+
+```
+n = total replicas
+w = write quorum (nodes that must acknowledge write)
+r = read quorum (nodes that must respond to read)
+
+For strong consistency: w + r > n
+```
+
+**Common configurations**:
+- n=3, w=2, r=2: Tolerates 1 failure
+- n=5, w=3, r=3: Tolerates 2 failures
+
+#### Sloppy Quorums and Hinted Handoff
+
+During partitions:
+- Write to available nodes (even if not home replicas)
+- "Hints" stored for unavailable nodes
+- Hints replayed when nodes recover
+
+## Failure Modes
+
+### Crash Failures
+
+Node stops responding. Simplest failure model.
+
+**Detection**: Heartbeats, timeouts
+**Tolerance**: 2f+1 nodes for f failures (Paxos, Raft)
+
+### Byzantine Failures
+
+Arbitrary behavior including malicious.
+
+**Detection**: Difficult without redundancy
+**Tolerance**: 3f+1 nodes for f failures (PBFT)
+
+### Network Partitions
+
+Nodes can't communicate with some other nodes.
+
+**Impact**: Forces CP vs AP choice
+**Recovery**: Reconciliation after partition heals
+
+### Split Brain
+
+Multiple nodes believe they are leader.
+
+**Prevention**:
+- Fencing (STONITH: Shoot The Other Node In The Head)
+- Quorum-based leader election
+- Lease-based leadership
+
+## Design Patterns
+
+### State Machine Replication
+
+Replicate deterministic state machine across nodes:
+1. All replicas start in same state
+2. Apply same commands in same order
+3. All reach same final state
+
+**Requires**: Total order broadcast (consensus)
+
+### Chain Replication
+
+```
+Head → Node2 → Node3 → ... → Tail
+```
+
+- Writes enter at head, propagate down chain
+- Reads served by tail (strongly consistent)
+- Simple, high throughput
+
+### Primary-Backup
+
+Primary handles all operations, synchronously replicates to backups.
+
+**Failover**: Backup promoted to primary on failure.
+
+### Quorum Systems
+
+Intersecting sets ensure consistency:
+- Any read quorum intersects any write quorum
+- Guarantees reads see latest write
+
+## Balancing Trade-offs
+
+### Identifying Critical Requirements
+
+1. **Correctness requirements**
+ - Is data loss acceptable?
+ - Can operations be reordered?
+ - Are conflicts resolvable?
+
+2. **Availability requirements**
+ - What's acceptable downtime?
+ - Geographic distribution needs?
+ - Partition recovery strategy?
+
+3. **Performance requirements**
+ - Latency targets?
+ - Throughput needs?
+ - Consistency cost tolerance?
+
+### Vulnerability Mitigation by Protocol
+
+#### Paxos/Raft (Crash Fault Tolerant)
+
+**Vulnerabilities**:
+- Leader failure causes brief unavailability
+- Split-brain without proper fencing
+- Slow follower impacts commit latency (sync replication)
+
+**Mitigations**:
+- Fast leader election (pre-voting)
+- Quorum-based fencing
+- Flexible quorum configurations
+- Learner nodes for read scaling
+
+#### PBFT (Byzantine Fault Tolerant)
+
+**Vulnerabilities**:
+- O(n²) messages limit scalability
+- View change is expensive
+- Requires 3f+1 nodes (more infrastructure)
+
+**Mitigations**:
+- Batching and pipelining
+- Optimistic execution (HotStuff)
+- Threshold signatures
+- Hierarchical consensus for scaling
+
+### Choosing the Right Protocol
+
+| Scenario | Recommended | Rationale |
+|----------|-------------|-----------|
+| Internal infrastructure | Raft | Simple, well-understood |
+| High consistency needs | Raft/Paxos | Proven correctness |
+| Public/untrusted network | PBFT variant | Byzantine tolerance |
+| Blockchain | HotStuff/Tendermint | Linear complexity BFT |
+| Eventually consistent | Dynamo-style | High availability |
+| Global distribution | Multi-leader + CRDTs | Partition tolerance |
+
+## Implementation Considerations
+
+### Timeouts
+
+- **Heartbeat interval**: 100-300ms typical
+- **Election timeout**: 10x heartbeat (avoid split votes)
+- **Request timeout**: Application-dependent
+
+### Persistence
+
+What must be persisted before acknowledgment:
+- **Raft**: Current term, voted-for, log entries
+- **PBFT**: View number, prepared/committed certificates
+
+### Membership Changes
+
+Dynamic cluster membership:
+- **Raft**: Joint consensus (old + new config)
+- **Paxos**: α-reconfiguration
+- **PBFT**: View change with new configuration
+
+### Testing
+
+- **Jepsen**: Distributed systems testing framework
+- **Chaos engineering**: Intentional failure injection
+- **Formal verification**: TLA+, Coq proofs
+
+## Adversarial Oracle Protocols
+
+Oracles bridge on-chain smart contracts with off-chain data, but introduce trust assumptions into trustless systems.
+
+### The Oracle Problem
+
+**Definition**: The security, authenticity, and trust conflict between third-party oracles and the trustless execution of smart contracts.
+
+**Core Challenge**: Blockchains cannot verify correctness of external data. Oracles become:
+- Single points of failure
+- Targets for manipulation
+- Trust assumptions in "trustless" systems
+
+### Attack Vectors
+
+#### Price Oracle Manipulation
+
+**Flash Loan Attacks**:
+```
+1. Borrow large amount via flash loan (no collateral)
+2. Manipulate price on DEX (large trade)
+3. Oracle reads manipulated price
+4. Smart contract executes with wrong price
+5. Profit from arbitrage/liquidation
+6. Repay flash loan in same transaction
+```
+
+**Notable Example**: Harvest Finance ($30M+ loss, 2020)
+
+#### Data Source Attacks
+
+- **Compromised API**: Single data source manipulation
+- **Front-running**: Oracle updates exploited before on-chain
+- **Liveness attacks**: Preventing oracle updates
+- **Bribery**: Incentivizing oracle operators to lie
+
+#### Economic Attacks
+
+**Cost of Corruption Analysis**:
+```
+If oracle controls value V:
+ - Attack profit: V
+ - Attack cost: oracle stake + reputation
+ - Rational to attack if: profit > cost
+```
+
+**Implication**: Oracles must have stake > value they secure.
+
+### Decentralized Oracle Networks (DONs)
+
+#### Chainlink Model
+
+**Multi-layer Security**:
+```
+1. Multiple independent data sources
+2. Multiple independent node operators
+3. Aggregation (median, weighted average)
+4. Reputation system
+5. Cryptoeconomic incentives (staking)
+```
+
+**Data Aggregation**:
+```
+Nodes: [Oracle₁: $100, Oracle₂: $101, Oracle₃: $150, Oracle₄: $100]
+Median: $100.50
+Outlier (Oracle₃) has minimal impact
+```
+
+#### Reputation and Staking
+
+```
+Node reputation based on:
+ - Historical accuracy
+ - Response time
+ - Uptime
+ - Stake amount
+
+Job assignment weighted by reputation
+Slashing for misbehavior
+```
+
+### Oracle Design Patterns
+
+#### Time-Weighted Average Price (TWAP)
+
+Resist single-block manipulation:
+```
+TWAP = Σ(price_i × duration_i) / total_duration
+
+Example over 1 hour:
+ - 30 min at $100: 30 × 100 = 3000
+ - 20 min at $101: 20 × 101 = 2020
+ - 10 min at $150 (manipulation): 10 × 150 = 1500
+ TWAP = 6520 / 60 = $108.67 (vs $150 spot)
+```
+
+#### Commit-Reveal Schemes
+
+Prevent front-running oracle updates:
+```
+Phase 1 (Commit):
+ - Oracle commits: hash(price || salt)
+ - Cannot be read by others
+
+Phase 2 (Reveal):
+ - Oracle reveals: price, salt
+ - Contract verifies hash matches
+ - All oracles reveal simultaneously
+```
+
+#### Schelling Points
+
+Game-theoretic oracle coordination:
+```
+1. Multiple oracles submit answers
+2. Consensus answer determined
+3. Oracles matching consensus rewarded
+4. Outliers penalized
+
+Assumption: Honest answer is "obvious" Schelling point
+```
+
+### Trusted Execution Environments (TEEs)
+
+Hardware-based oracle security:
+```
+TEE (Intel SGX, ARM TrustZone):
+ - Isolated execution environment
+ - Code attestation
+ - Protected memory
+ - External data fetching inside enclave
+```
+
+**Benefits**:
+- Verifiable computation
+- Protected from host machine
+- Cryptographic proofs of execution
+
+**Limitations**:
+- Hardware trust assumption
+- Side-channel attacks possible
+- Intel SGX vulnerabilities discovered
+
+### Oracle Types by Data Source
+
+| Type | Source | Trust Model | Use Case |
+|------|--------|-------------|----------|
+| Price feeds | Exchanges | Multiple sources | DeFi |
+| Randomness | VRF/DRAND | Cryptographic | Gaming, NFTs |
+| Event outcomes | Manual report | Reputation | Prediction markets |
+| Cross-chain | Other blockchains | Bridge security | Interoperability |
+| Computation | Off-chain compute | Verifiable | Complex logic |
+
+### Defense Mechanisms
+
+1. **Diversification**: Multiple independent oracles
+2. **Economic security**: Stake > protected value
+3. **Time delays**: Allow dispute periods
+4. **Circuit breakers**: Pause on anomalous data
+5. **TWAP**: Resist flash manipulation
+6. **Commit-reveal**: Prevent front-running
+7. **Reputation**: Long-term incentives
+
+### Hybrid Approaches
+
+**Optimistic Oracles**:
+```
+1. Oracle posts answer + bond
+2. Dispute window (e.g., 2 hours)
+3. If disputed: escalate to arbitration
+4. If not disputed: answer accepted
+5. Incorrect oracle loses bond
+```
+
+**Examples**: UMA Protocol, Optimistic Oracle
+
+## Causality and Logical Clocks
+
+Physical clocks cannot reliably order events in distributed systems due to clock drift and synchronization issues. Logical clocks provide ordering based on causality.
+
+### The Happened-Before Relation
+
+Defined by Leslie Lamport (1978):
+
+Event a **happened-before** event b (a → b) if:
+1. a and b are in the same process, and a comes before b
+2. a is a send event and b is the corresponding receive
+3. There exists c such that a → c and c → b (transitivity)
+
+If neither a → b nor b → a, events are **concurrent** (a || b).
+
+### Lamport Clocks
+
+Simple scalar timestamps providing partial ordering.
+
+**Rules**:
+```
+1. Each process maintains counter C
+2. Before each event: C = C + 1
+3. Send message m with timestamp C
+4. On receive: C = max(C, message_timestamp) + 1
+```
+
+**Properties**:
+- If a → b, then C(a) < C(b)
+- **Limitation**: C(a) < C(b) does NOT imply a → b
+- Cannot detect concurrent events
+
+**Use cases**:
+- Total ordering with tie-breaker (process ID)
+- Distributed snapshots
+- Simple event ordering
+
+### Vector Clocks
+
+Array of counters, one per process. Captures full causality.
+
+**Structure** (for n processes):
+```
+VC[1..n] where VC[i] is process i's logical time
+```
+
+**Rules** (at process i):
+```
+1. Before each event: VC[i] = VC[i] + 1
+2. Send message with full vector VC
+3. On receive from j:
+ for k in 1..n:
+ VC[k] = max(VC[k], received_VC[k])
+ VC[i] = VC[i] + 1
+```
+
+**Comparison** (for vectors V1 and V2):
+```
+V1 = V2 iff ∀i: V1[i] = V2[i]
+V1 ≤ V2 iff ∀i: V1[i] ≤ V2[i]
+V1 < V2 iff V1 ≤ V2 and V1 ≠ V2
+V1 || V2 iff NOT(V1 ≤ V2) and NOT(V2 ≤ V1) # concurrent
+```
+
+**Properties**:
+- a → b iff VC(a) < VC(b)
+- a || b iff VC(a) || VC(b)
+- **Full causality detection**
+
+**Trade-off**: O(n) space per event, where n = number of processes.
+
+### Interval Tree Clocks (ITC)
+
+Developed by Almeida, Baquero, and Fonte (2008) for dynamic systems.
+
+**Problem with Vector Clocks**:
+- Static: size fixed to max number of processes
+- ID retirement requires global coordination
+- Unsuitable for high-churn systems (P2P)
+
+**ITC Solution**:
+- Binary tree structure for ID space
+- Dynamic ID allocation and deallocation
+- Localized fork/join operations
+
+**Core Operations**:
+
+```
+fork(id): Split ID into two children
+ - Parent retains left half
+ - New process gets right half
+
+join(id1, id2): Merge two IDs
+ - Combine ID trees
+ - Localized operation, no global coordination
+
+event(id, stamp): Increment logical clock
+peek(id, stamp): Read without increment
+```
+
+**ID Space Representation**:
+```
+ 1 # Full ID space
+ / \
+ 0 1 # After one fork
+ / \
+ 0 1 # After another fork (left child)
+```
+
+**Stamp (Clock) Representation**:
+- Tree structure mirrors ID space
+- Each node has base value + optional children
+- Efficient representation of sparse vectors
+
+**Example**:
+```
+Initial: id=(1), stamp=0
+Fork: id1=(1,0), stamp1=0
+ id2=(0,1), stamp2=0
+Event at id1: stamp1=(0,(1,0))
+Join id1+id2: id=(1), stamp=max of both
+```
+
+**Advantages over Vector Clocks**:
+- Constant-size representation possible
+- Dynamic membership without global state
+- Efficient ID garbage collection
+- Causality preserved across reconfigurations
+
+**Use cases**:
+- Peer-to-peer systems
+- Mobile/ad-hoc networks
+- Systems with frequent node join/leave
+
+### Version Vectors
+
+Specialization of vector clocks for tracking data versions.
+
+**Difference from Vector Clocks**:
+- Vector clocks: track all events
+- Version vectors: track data updates only
+
+**Usage in Dynamo-style systems**:
+```
+Client reads with version vector V1
+Client writes with version vector V2
+Server compares:
+ - If V1 < current: stale read, conflict possible
+ - If V1 = current: safe update
+ - If V1 || current: concurrent writes, need resolution
+```
+
+### Hybrid Logical Clocks (HLC)
+
+Combines physical and logical time.
+
+**Structure**:
+```
+HLC = (physical_time, logical_counter)
+```
+
+**Rules**:
+```
+1. On local/send event:
+ pt = physical_clock()
+ if pt > l:
+ l = pt
+ c = 0
+ else:
+ c = c + 1
+ return (l, c)
+
+2. On receive with timestamp (l', c'):
+ pt = physical_clock()
+ if pt > l and pt > l':
+ l = pt
+ c = 0
+ elif l' > l:
+ l = l'
+ c = c' + 1
+ elif l > l':
+ c = c + 1
+ else: # l = l'
+ c = max(c, c') + 1
+ return (l, c)
+```
+
+**Properties**:
+- Bounded drift from physical time
+- Captures causality like Lamport clocks
+- Timestamps comparable to wall-clock time
+- Used in CockroachDB, Google Spanner
+
+### Comparison of Logical Clocks
+
+| Clock Type | Space | Causality | Concurrency | Dynamic |
+|------------|-------|-----------|-------------|---------|
+| Lamport | O(1) | Partial | No | Yes |
+| Vector | O(n) | Full | Yes | No |
+| ITC | O(log n)* | Full | Yes | Yes |
+| HLC | O(1) | Partial | No | Yes |
+
+*ITC space varies based on tree structure
+
+### Practical Applications
+
+**Conflict Detection** (Vector Clocks):
+```
+if V1 < V2:
+ # v1 is ancestor of v2, no conflict
+elif V1 > V2:
+ # v2 is ancestor of v1, no conflict
+else: # V1 || V2
+ # Concurrent updates, need conflict resolution
+```
+
+**Causal Broadcast**:
+```
+Deliver message m with VC only when:
+1. VC[sender] = local_VC[sender] + 1 (next expected from sender)
+2. ∀j ≠ sender: VC[j] ≤ local_VC[j] (all causal deps satisfied)
+```
+
+**Snapshot Algorithms**:
+```
+Consistent cut: set of events S where
+ if e ∈ S and f → e, then f ∈ S
+Vector clocks make this efficiently verifiable
+```
+
+## References
+
+For detailed protocol specifications and proofs, see:
+- `references/consensus-protocols.md` - Detailed protocol descriptions
+- `references/consistency-models.md` - Formal consistency definitions
+- `references/failure-scenarios.md` - Failure mode analysis
+- `references/logical-clocks.md` - Clock algorithms and implementations
diff --git a/.claude/skills/distributed-systems/references/consensus-protocols.md b/.claude/skills/distributed-systems/references/consensus-protocols.md
new file mode 100644
index 00000000..a3bfb071
--- /dev/null
+++ b/.claude/skills/distributed-systems/references/consensus-protocols.md
@@ -0,0 +1,610 @@
+# Consensus Protocols - Detailed Reference
+
+Complete specifications and implementation details for major consensus protocols.
+
+## Paxos Complete Specification
+
+### Proposal Numbers
+
+Proposal numbers must be:
+- **Unique**: No two proposers use the same number
+- **Totally ordered**: Any two can be compared
+
+**Implementation**: `(round_number, proposer_id)` where proposer_id breaks ties.
+
+### Single-Decree Paxos State
+
+**Proposer state**:
+```
+proposal_number: int
+value: any
+```
+
+**Acceptor state (persistent)**:
+```
+highest_promised: int # Highest proposal number promised
+accepted_proposal: int # Number of accepted proposal (0 if none)
+accepted_value: any # Value of accepted proposal (null if none)
+```
+
+### Message Format
+
+**Prepare** (Phase 1a):
+```
+{
+ type: "PREPARE",
+ proposal_number: n
+}
+```
+
+**Promise** (Phase 1b):
+```
+{
+ type: "PROMISE",
+ proposal_number: n,
+ accepted_proposal: m, # null if nothing accepted
+ accepted_value: v # null if nothing accepted
+}
+```
+
+**Accept** (Phase 2a):
+```
+{
+ type: "ACCEPT",
+ proposal_number: n,
+ value: v
+}
+```
+
+**Accepted** (Phase 2b):
+```
+{
+ type: "ACCEPTED",
+ proposal_number: n,
+ value: v
+}
+```
+
+### Proposer Algorithm
+
+```
+function propose(value):
+ n = generate_proposal_number()
+
+ # Phase 1: Prepare
+ promises = []
+ for acceptor in acceptors:
+ send PREPARE(n) to acceptor
+
+ wait until |promises| > |acceptors|/2 or timeout
+
+ if timeout:
+ return FAILED
+
+ # Choose value
+ highest = max(promises, key=p.accepted_proposal)
+ if highest.accepted_value is not null:
+ value = highest.accepted_value
+
+ # Phase 2: Accept
+ accepts = []
+ for acceptor in acceptors:
+ send ACCEPT(n, value) to acceptor
+
+ wait until |accepts| > |acceptors|/2 or timeout
+
+ if timeout:
+ return FAILED
+
+ return SUCCESS(value)
+```
+
+### Acceptor Algorithm
+
+```
+on receive PREPARE(n):
+ if n > highest_promised:
+ highest_promised = n
+ persist(highest_promised)
+ reply PROMISE(n, accepted_proposal, accepted_value)
+ else:
+ # Optionally reply NACK(highest_promised)
+ ignore or reject
+
+on receive ACCEPT(n, v):
+ if n >= highest_promised:
+ highest_promised = n
+ accepted_proposal = n
+ accepted_value = v
+ persist(highest_promised, accepted_proposal, accepted_value)
+ reply ACCEPTED(n, v)
+ else:
+ ignore or reject
+```
+
+### Multi-Paxos Optimization
+
+**Stable leader**:
+```
+# Leader election (using Paxos or other method)
+leader = elect_leader()
+
+# Leader's Phase 1 for all future instances
+leader sends PREPARE(n) for instance range [i, ∞)
+
+# For each command:
+function propose_as_leader(value, instance):
+ # Skip Phase 1 if already leader
+ for acceptor in acceptors:
+ send ACCEPT(n, value, instance) to acceptor
+ wait for majority ACCEPTED
+ return SUCCESS
+```
+
+### Paxos Safety Proof Sketch
+
+**Invariant**: If a value v is chosen for instance i, no other value can be chosen.
+
+**Proof**:
+1. Value chosen → accepted by majority with proposal n
+2. Any higher proposal n' must contact majority
+3. Majorities intersect → at least one acceptor has accepted v
+4. New proposer adopts v (or higher already-accepted value)
+5. By induction, all future proposals use v
+
+## Raft Complete Specification
+
+### State
+
+**All servers (persistent)**:
+```
+currentTerm: int # Latest term seen
+votedFor: ServerId # Candidate voted for in current term (null if none)
+log[]: LogEntry # Log entries
+```
+
+**All servers (volatile)**:
+```
+commitIndex: int # Highest log index known to be committed
+lastApplied: int # Highest log index applied to state machine
+```
+
+**Leader (volatile, reinitialized after election)**:
+```
+nextIndex[]: int # For each server, next log index to send
+matchIndex[]: int # For each server, highest log index replicated
+```
+
+**LogEntry**:
+```
+{
+ term: int,
+ command: any
+}
+```
+
+### RequestVote RPC
+
+**Request**:
+```
+{
+ term: int, # Candidate's term
+ candidateId: ServerId, # Candidate requesting vote
+ lastLogIndex: int, # Index of candidate's last log entry
+ lastLogTerm: int # Term of candidate's last log entry
+}
+```
+
+**Response**:
+```
+{
+ term: int, # currentTerm, for candidate to update itself
+ voteGranted: bool # True if candidate received vote
+}
+```
+
+**Receiver implementation**:
+```
+on receive RequestVote(term, candidateId, lastLogIndex, lastLogTerm):
+ if term < currentTerm:
+ return {term: currentTerm, voteGranted: false}
+
+ if term > currentTerm:
+ currentTerm = term
+ votedFor = null
+ convert to follower
+
+ # Check if candidate's log is at least as up-to-date as ours
+ ourLastTerm = log[len(log)-1].term if log else 0
+ ourLastIndex = len(log) - 1
+
+ logOK = (lastLogTerm > ourLastTerm) or
+ (lastLogTerm == ourLastTerm and lastLogIndex >= ourLastIndex)
+
+ if (votedFor is null or votedFor == candidateId) and logOK:
+ votedFor = candidateId
+ persist(currentTerm, votedFor)
+ reset election timer
+ return {term: currentTerm, voteGranted: true}
+
+ return {term: currentTerm, voteGranted: false}
+```
+
+### AppendEntries RPC
+
+**Request**:
+```
+{
+ term: int, # Leader's term
+ leaderId: ServerId, # For follower to redirect clients
+ prevLogIndex: int, # Index of log entry preceding new ones
+ prevLogTerm: int, # Term of prevLogIndex entry
+ entries[]: LogEntry, # Log entries to store (empty for heartbeat)
+ leaderCommit: int # Leader's commitIndex
+}
+```
+
+**Response**:
+```
+{
+ term: int, # currentTerm, for leader to update itself
+ success: bool # True if follower had matching prevLog entry
+}
+```
+
+**Receiver implementation**:
+```
+on receive AppendEntries(term, leaderId, prevLogIndex, prevLogTerm, entries, leaderCommit):
+ if term < currentTerm:
+ return {term: currentTerm, success: false}
+
+ reset election timer
+
+ if term > currentTerm:
+ currentTerm = term
+ votedFor = null
+
+ convert to follower
+
+ # Check log consistency
+ if prevLogIndex >= len(log) or
+ (prevLogIndex >= 0 and log[prevLogIndex].term != prevLogTerm):
+ return {term: currentTerm, success: false}
+
+ # Append new entries (handling conflicts)
+ for i, entry in enumerate(entries):
+ index = prevLogIndex + 1 + i
+ if index < len(log):
+ if log[index].term != entry.term:
+ # Delete conflicting entry and all following
+ log = log[:index]
+ log.append(entry)
+ else:
+ log.append(entry)
+
+ persist(currentTerm, votedFor, log)
+
+ # Update commit index
+ if leaderCommit > commitIndex:
+ commitIndex = min(leaderCommit, len(log) - 1)
+
+ return {term: currentTerm, success: true}
+```
+
+### Leader Behavior
+
+```
+on becoming leader:
+ for each server:
+ nextIndex[server] = len(log)
+ matchIndex[server] = 0
+
+ start sending heartbeats
+
+on receiving client command:
+ append entry to local log
+ persist log
+ send AppendEntries to all followers
+
+on receiving AppendEntries response from server:
+ if response.success:
+ matchIndex[server] = prevLogIndex + len(entries)
+ nextIndex[server] = matchIndex[server] + 1
+
+ # Update commit index
+ for N from commitIndex+1 to len(log)-1:
+ if log[N].term == currentTerm and
+ |{s : matchIndex[s] >= N}| > |servers|/2:
+ commitIndex = N
+ else:
+ nextIndex[server] = max(1, nextIndex[server] - 1)
+ retry AppendEntries with lower prevLogIndex
+
+on commitIndex update:
+ while lastApplied < commitIndex:
+ lastApplied++
+ apply log[lastApplied].command to state machine
+```
+
+### Election Timeout
+
+```
+on election timeout (follower or candidate):
+ currentTerm++
+ convert to candidate
+ votedFor = self
+ persist(currentTerm, votedFor)
+ reset election timer
+ votes = 1 # Vote for self
+
+ for each server except self:
+ send RequestVote(currentTerm, self, lastLogIndex, lastLogTerm)
+
+ wait for responses or timeout:
+ if received votes > |servers|/2:
+ become leader
+ if received AppendEntries from valid leader:
+ become follower
+ if timeout:
+ start new election
+```
+
+## PBFT Complete Specification
+
+### Message Types
+
+**REQUEST**:
+```
+{
+ type: "REQUEST",
+ operation: o, # Operation to execute
+ timestamp: t, # Client timestamp (for reply matching)
+ client: c # Client identifier
+}
+```
+
+**PRE-PREPARE**:
+```
+{
+ type: "PRE-PREPARE",
+ view: v, # Current view number
+ sequence: n, # Sequence number
+ digest: d, # Hash of request
+ request: m # The request message
+}
+signature(primary)
+```
+
+**PREPARE**:
+```
+{
+ type: "PREPARE",
+ view: v,
+ sequence: n,
+ digest: d,
+ replica: i # Sending replica
+}
+signature(replica_i)
+```
+
+**COMMIT**:
+```
+{
+ type: "COMMIT",
+ view: v,
+ sequence: n,
+ digest: d,
+ replica: i
+}
+signature(replica_i)
+```
+
+**REPLY**:
+```
+{
+ type: "REPLY",
+ view: v,
+ timestamp: t,
+ client: c,
+ replica: i,
+ result: r # Execution result
+}
+signature(replica_i)
+```
+
+### Replica State
+
+```
+view: int # Current view
+sequence: int # Last assigned sequence number (primary)
+log[]: {request, prepares, commits, state} # Log of requests
+prepared_certificates: {} # Prepared certificates (2f+1 prepares)
+committed_certificates: {} # Committed certificates (2f+1 commits)
+h: int # Low water mark
+H: int # High water mark (h + L)
+```
+
+### Normal Operation Protocol
+
+**Primary (replica p = v mod n)**:
+```
+on receive REQUEST(m) from client:
+ if not primary for current view:
+ forward to primary
+ return
+
+ n = assign_sequence_number()
+ d = hash(m)
+
+ broadcast PRE-PREPARE(v, n, d, m) to all replicas
+ add to log
+```
+
+**All replicas**:
+```
+on receive PRE-PREPARE(v, n, d, m) from primary:
+ if v != current_view:
+ ignore
+ if already accepted pre-prepare for (v, n) with different digest:
+ ignore
+ if not in_view_as_backup(v):
+ ignore
+ if not h < n <= H:
+ ignore # Outside sequence window
+
+ # Valid pre-prepare
+ add to log
+ broadcast PREPARE(v, n, d, i) to all replicas
+
+on receive PREPARE(v, n, d, j) from replica j:
+ if v != current_view:
+ ignore
+
+ add to log[n].prepares
+
+ if |log[n].prepares| >= 2f and not already_prepared(v, n, d):
+ # Prepared certificate complete
+ mark as prepared
+ broadcast COMMIT(v, n, d, i) to all replicas
+
+on receive COMMIT(v, n, d, j) from replica j:
+ if v != current_view:
+ ignore
+
+ add to log[n].commits
+
+ if |log[n].commits| >= 2f + 1 and prepared(v, n, d):
+ # Committed certificate complete
+ if all entries < n are committed:
+ execute(m)
+ send REPLY(v, t, c, i, result) to client
+```
+
+### View Change Protocol
+
+**Timeout trigger**:
+```
+on request timeout (no progress):
+ view_change_timeout++
+ broadcast VIEW-CHANGE(v+1, n, C, P, i)
+
+ where:
+ n = last stable checkpoint sequence number
+ C = checkpoint certificate (2f+1 checkpoint messages)
+ P = set of prepared certificates for messages after n
+```
+
+**VIEW-CHANGE**:
+```
+{
+ type: "VIEW-CHANGE",
+ view: v, # New view number
+ sequence: n, # Checkpoint sequence
+ checkpoints: C, # Checkpoint certificate
+ prepared: P, # Set of prepared certificates
+ replica: i
+}
+signature(replica_i)
+```
+
+**New primary (p' = v mod n)**:
+```
+on receive 2f VIEW-CHANGE for view v:
+ V = set of valid view-change messages
+
+ # Compute O: set of requests to re-propose
+ O = {}
+ for seq in max_checkpoint_seq(V) to max_seq(V):
+ if exists prepared certificate for seq in V:
+ O[seq] = request from certificate
+ else:
+ O[seq] = null-request # No-op
+
+ broadcast NEW-VIEW(v, V, O)
+
+ # Re-run protocol for requests in O
+ for seq, request in O:
+ if request != null:
+ send PRE-PREPARE(v, seq, hash(request), request)
+```
+
+**NEW-VIEW**:
+```
+{
+ type: "NEW-VIEW",
+ view: v,
+ view_changes: V, # 2f+1 view-change messages
+ pre_prepares: O # Set of pre-prepare messages
+}
+signature(primary)
+```
+
+### Checkpointing
+
+Periodic stable checkpoints to garbage collect logs:
+
+```
+every K requests:
+ state_hash = hash(state_machine_state)
+ broadcast CHECKPOINT(n, state_hash, i)
+
+on receive 2f+1 CHECKPOINT for (n, d):
+ if all digests match:
+ create stable checkpoint
+ h = n # Move low water mark
+ garbage_collect(entries < n)
+```
+
+## HotStuff Protocol
+
+Linear complexity BFT using threshold signatures.
+
+### Key Innovation
+
+- **Three-phase**: prepare → pre-commit → commit → decide
+- **Pipelining**: Next proposal starts before current finishes
+- **Threshold signatures**: O(n) total messages instead of O(n²)
+
+### Message Flow
+
+```
+Phase 1 (Prepare):
+ Leader: broadcast PREPARE(v, node)
+ Replicas: sign and send partial signature to leader
+ Leader: aggregate into prepare certificate QC
+
+Phase 2 (Pre-commit):
+ Leader: broadcast PRE-COMMIT(v, QC_prepare)
+ Replicas: sign and send partial signature
+ Leader: aggregate into pre-commit certificate
+
+Phase 3 (Commit):
+ Leader: broadcast COMMIT(v, QC_precommit)
+ Replicas: sign and send partial signature
+ Leader: aggregate into commit certificate
+
+Phase 4 (Decide):
+ Leader: broadcast DECIDE(v, QC_commit)
+ Replicas: execute and commit
+```
+
+### Pipelining
+
+```
+Block k: [prepare] [pre-commit] [commit] [decide]
+Block k+1: [prepare] [pre-commit] [commit] [decide]
+Block k+2: [prepare] [pre-commit] [commit] [decide]
+```
+
+Each phase of block k+1 piggybacks on messages for block k.
+
+## Protocol Comparison Matrix
+
+| Feature | Paxos | Raft | PBFT | HotStuff |
+|---------|-------|------|------|----------|
+| Fault model | Crash | Crash | Byzantine | Byzantine |
+| Fault tolerance | f with 2f+1 | f with 2f+1 | f with 3f+1 | f with 3f+1 |
+| Message complexity | O(n) | O(n) | O(n²) | O(n) |
+| Leader required | No (helps) | Yes | Yes | Yes |
+| Phases | 2 | 2 | 3 | 3 |
+| View change | Complex | Simple | Complex | Simple |
diff --git a/.claude/skills/distributed-systems/references/logical-clocks.md b/.claude/skills/distributed-systems/references/logical-clocks.md
new file mode 100644
index 00000000..c8b09faf
--- /dev/null
+++ b/.claude/skills/distributed-systems/references/logical-clocks.md
@@ -0,0 +1,610 @@
+# Logical Clocks - Implementation Reference
+
+Detailed implementations and algorithms for causality tracking.
+
+## Lamport Clock Implementation
+
+### Data Structure
+
+```go
+type LamportClock struct {
+ counter uint64
+ mu sync.Mutex
+}
+
+func NewLamportClock() *LamportClock {
+ return &LamportClock{counter: 0}
+}
+```
+
+### Operations
+
+```go
+// Tick increments clock for local event
+func (c *LamportClock) Tick() uint64 {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.counter++
+ return c.counter
+}
+
+// Send returns timestamp for outgoing message
+func (c *LamportClock) Send() uint64 {
+ return c.Tick()
+}
+
+// Receive updates clock based on incoming message timestamp
+func (c *LamportClock) Receive(msgTime uint64) uint64 {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if msgTime > c.counter {
+ c.counter = msgTime
+ }
+ c.counter++
+ return c.counter
+}
+
+// Time returns current clock value without incrementing
+func (c *LamportClock) Time() uint64 {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.counter
+}
+```
+
+### Usage Example
+
+```go
+// Process A
+clockA := NewLamportClock()
+e1 := clockA.Tick() // Event 1: time=1
+msgTime := clockA.Send() // Send: time=2
+
+// Process B
+clockB := NewLamportClock()
+e2 := clockB.Tick() // Event 2: time=1
+e3 := clockB.Receive(msgTime) // Receive: time=3 (max(1,2)+1)
+```
+
+## Vector Clock Implementation
+
+### Data Structure
+
+```go
+type VectorClock struct {
+ clocks map[string]uint64 // processID -> logical time
+ self string // this process's ID
+ mu sync.RWMutex
+}
+
+func NewVectorClock(processID string, allProcesses []string) *VectorClock {
+ clocks := make(map[string]uint64)
+ for _, p := range allProcesses {
+ clocks[p] = 0
+ }
+ return &VectorClock{
+ clocks: clocks,
+ self: processID,
+ }
+}
+```
+
+### Operations
+
+```go
+// Tick increments own clock
+func (vc *VectorClock) Tick() map[string]uint64 {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+
+ vc.clocks[vc.self]++
+ return vc.copy()
+}
+
+// Send returns copy of vector for message
+func (vc *VectorClock) Send() map[string]uint64 {
+ return vc.Tick()
+}
+
+// Receive merges incoming vector and increments
+func (vc *VectorClock) Receive(incoming map[string]uint64) map[string]uint64 {
+ vc.mu.Lock()
+ defer vc.mu.Unlock()
+
+ // Merge: take max of each component
+ for pid, time := range incoming {
+ if time > vc.clocks[pid] {
+ vc.clocks[pid] = time
+ }
+ }
+
+ // Increment own clock
+ vc.clocks[vc.self]++
+ return vc.copy()
+}
+
+// copy returns a copy of the vector
+func (vc *VectorClock) copy() map[string]uint64 {
+ result := make(map[string]uint64)
+ for k, v := range vc.clocks {
+ result[k] = v
+ }
+ return result
+}
+```
+
+### Comparison Functions
+
+```go
+// Compare returns ordering relationship between two vectors
+type Ordering int
+
+const (
+ Equal Ordering = iota // V1 == V2
+ HappenedBefore // V1 < V2
+ HappenedAfter // V1 > V2
+ Concurrent // V1 || V2
+)
+
+func Compare(v1, v2 map[string]uint64) Ordering {
+ less := false
+ greater := false
+
+ // Get all keys
+ allKeys := make(map[string]bool)
+ for k := range v1 {
+ allKeys[k] = true
+ }
+ for k := range v2 {
+ allKeys[k] = true
+ }
+
+ for k := range allKeys {
+ t1 := v1[k] // 0 if not present
+ t2 := v2[k]
+
+ if t1 < t2 {
+ less = true
+ }
+ if t1 > t2 {
+ greater = true
+ }
+ }
+
+ if !less && !greater {
+ return Equal
+ }
+ if less && !greater {
+ return HappenedBefore
+ }
+ if greater && !less {
+ return HappenedAfter
+ }
+ return Concurrent
+}
+
+// IsConcurrent checks if two events are concurrent
+func IsConcurrent(v1, v2 map[string]uint64) bool {
+ return Compare(v1, v2) == Concurrent
+}
+
+// HappenedBefore checks if v1 -> v2 (v1 causally precedes v2)
+func HappenedBefore(v1, v2 map[string]uint64) bool {
+ return Compare(v1, v2) == HappenedBefore
+}
+```
+
+## Interval Tree Clock Implementation
+
+### Data Structures
+
+```go
+// ID represents the identity tree
+type ID struct {
+ IsLeaf bool
+ Value int // 0 or 1 for leaves
+ Left *ID // nil for leaves
+ Right *ID
+}
+
+// Stamp represents the event tree
+type Stamp struct {
+ Base int
+ Left *Stamp // nil for leaf stamps
+ Right *Stamp
+}
+
+// ITC combines ID and Stamp
+type ITC struct {
+ ID *ID
+ Stamp *Stamp
+}
+```
+
+### ID Operations
+
+```go
+// NewSeedID creates initial full ID (1)
+func NewSeedID() *ID {
+ return &ID{IsLeaf: true, Value: 1}
+}
+
+// Fork splits an ID into two
+func (id *ID) Fork() (*ID, *ID) {
+ if id.IsLeaf {
+ if id.Value == 0 {
+ // Cannot fork zero ID
+ return &ID{IsLeaf: true, Value: 0},
+ &ID{IsLeaf: true, Value: 0}
+ }
+ // Split full ID into left and right halves
+ return &ID{
+ IsLeaf: false,
+ Left: &ID{IsLeaf: true, Value: 1},
+ Right: &ID{IsLeaf: true, Value: 0},
+ },
+ &ID{
+ IsLeaf: false,
+ Left: &ID{IsLeaf: true, Value: 0},
+ Right: &ID{IsLeaf: true, Value: 1},
+ }
+ }
+
+ // Fork from non-leaf: give half to each
+ if id.Left.IsLeaf && id.Left.Value == 0 {
+ // Left is zero, fork right
+ newRight1, newRight2 := id.Right.Fork()
+ return &ID{IsLeaf: false, Left: id.Left, Right: newRight1},
+ &ID{IsLeaf: false, Left: &ID{IsLeaf: true, Value: 0}, Right: newRight2}
+ }
+ if id.Right.IsLeaf && id.Right.Value == 0 {
+ // Right is zero, fork left
+ newLeft1, newLeft2 := id.Left.Fork()
+ return &ID{IsLeaf: false, Left: newLeft1, Right: id.Right},
+ &ID{IsLeaf: false, Left: newLeft2, Right: &ID{IsLeaf: true, Value: 0}}
+ }
+
+ // Both have IDs, split
+ return &ID{IsLeaf: false, Left: id.Left, Right: &ID{IsLeaf: true, Value: 0}},
+ &ID{IsLeaf: false, Left: &ID{IsLeaf: true, Value: 0}, Right: id.Right}
+}
+
+// Join merges two IDs
+func Join(id1, id2 *ID) *ID {
+ if id1.IsLeaf && id1.Value == 0 {
+ return id2
+ }
+ if id2.IsLeaf && id2.Value == 0 {
+ return id1
+ }
+ if id1.IsLeaf && id2.IsLeaf && id1.Value == 1 && id2.Value == 1 {
+ return &ID{IsLeaf: true, Value: 1}
+ }
+
+ // Normalize to non-leaf
+ left1 := id1.Left
+ right1 := id1.Right
+ left2 := id2.Left
+ right2 := id2.Right
+
+ if id1.IsLeaf {
+ left1 = id1
+ right1 = id1
+ }
+ if id2.IsLeaf {
+ left2 = id2
+ right2 = id2
+ }
+
+ newLeft := Join(left1, left2)
+ newRight := Join(right1, right2)
+
+ return normalize(&ID{IsLeaf: false, Left: newLeft, Right: newRight})
+}
+
+func normalize(id *ID) *ID {
+ if !id.IsLeaf {
+ if id.Left.IsLeaf && id.Right.IsLeaf &&
+ id.Left.Value == id.Right.Value {
+ return &ID{IsLeaf: true, Value: id.Left.Value}
+ }
+ }
+ return id
+}
+```
+
+### Stamp Operations
+
+```go
+// NewStamp creates initial stamp (0)
+func NewStamp() *Stamp {
+ return &Stamp{Base: 0}
+}
+
+// Event increments the stamp for the given ID
+func Event(id *ID, stamp *Stamp) *Stamp {
+ if id.IsLeaf {
+ if id.Value == 1 {
+ return &Stamp{Base: stamp.Base + 1}
+ }
+ return stamp // Cannot increment with zero ID
+ }
+
+ // Non-leaf ID: fill where we have ID
+ if id.Left.IsLeaf && id.Left.Value == 1 {
+ // Have left ID, increment left
+ newLeft := Event(&ID{IsLeaf: true, Value: 1}, getLeft(stamp))
+ return normalizeStamp(&Stamp{
+ Base: stamp.Base,
+ Left: newLeft,
+ Right: getRight(stamp),
+ })
+ }
+ if id.Right.IsLeaf && id.Right.Value == 1 {
+ newRight := Event(&ID{IsLeaf: true, Value: 1}, getRight(stamp))
+ return normalizeStamp(&Stamp{
+ Base: stamp.Base,
+ Left: getLeft(stamp),
+ Right: newRight,
+ })
+ }
+
+ // Both non-zero, choose lower side
+ leftMax := maxStamp(getLeft(stamp))
+ rightMax := maxStamp(getRight(stamp))
+
+ if leftMax <= rightMax {
+ return normalizeStamp(&Stamp{
+ Base: stamp.Base,
+ Left: Event(id.Left, getLeft(stamp)),
+ Right: getRight(stamp),
+ })
+ }
+ return normalizeStamp(&Stamp{
+ Base: stamp.Base,
+ Left: getLeft(stamp),
+ Right: Event(id.Right, getRight(stamp)),
+ })
+}
+
+func getLeft(s *Stamp) *Stamp {
+ if s.Left == nil {
+ return &Stamp{Base: 0}
+ }
+ return s.Left
+}
+
+func getRight(s *Stamp) *Stamp {
+ if s.Right == nil {
+ return &Stamp{Base: 0}
+ }
+ return s.Right
+}
+
+func maxStamp(s *Stamp) int {
+ if s.Left == nil && s.Right == nil {
+ return s.Base
+ }
+ left := 0
+ right := 0
+ if s.Left != nil {
+ left = maxStamp(s.Left)
+ }
+ if s.Right != nil {
+ right = maxStamp(s.Right)
+ }
+ max := left
+ if right > max {
+ max = right
+ }
+ return s.Base + max
+}
+
+// JoinStamps merges two stamps
+func JoinStamps(s1, s2 *Stamp) *Stamp {
+ // Take max at each level
+ base := s1.Base
+ if s2.Base > base {
+ base = s2.Base
+ }
+
+ // Adjust for base difference
+ adj1 := s1.Base
+ adj2 := s2.Base
+
+ return normalizeStamp(&Stamp{
+ Base: base,
+ Left: joinStampsRecursive(s1.Left, s2.Left, adj1-base, adj2-base),
+ Right: joinStampsRecursive(s1.Right, s2.Right, adj1-base, adj2-base),
+ })
+}
+
+func normalizeStamp(s *Stamp) *Stamp {
+ if s.Left == nil && s.Right == nil {
+ return s
+ }
+ if s.Left != nil && s.Right != nil {
+ if s.Left.Base > 0 && s.Right.Base > 0 {
+ min := s.Left.Base
+ if s.Right.Base < min {
+ min = s.Right.Base
+ }
+ return &Stamp{
+ Base: s.Base + min,
+ Left: &Stamp{Base: s.Left.Base - min, Left: s.Left.Left, Right: s.Left.Right},
+ Right: &Stamp{Base: s.Right.Base - min, Left: s.Right.Left, Right: s.Right.Right},
+ }
+ }
+ }
+ return s
+}
+```
+
+## Hybrid Logical Clock Implementation
+
+```go
+type HLC struct {
+ l int64 // logical component (physical time)
+ c int64 // counter
+ mu sync.Mutex
+}
+
+func NewHLC() *HLC {
+ return &HLC{l: 0, c: 0}
+}
+
+type HLCTimestamp struct {
+ L int64
+ C int64
+}
+
+func (hlc *HLC) physicalTime() int64 {
+ return time.Now().UnixNano()
+}
+
+// Now returns current HLC timestamp for local/send event
+func (hlc *HLC) Now() HLCTimestamp {
+ hlc.mu.Lock()
+ defer hlc.mu.Unlock()
+
+ pt := hlc.physicalTime()
+
+ if pt > hlc.l {
+ hlc.l = pt
+ hlc.c = 0
+ } else {
+ hlc.c++
+ }
+
+ return HLCTimestamp{L: hlc.l, C: hlc.c}
+}
+
+// Update updates HLC based on received timestamp
+func (hlc *HLC) Update(received HLCTimestamp) HLCTimestamp {
+ hlc.mu.Lock()
+ defer hlc.mu.Unlock()
+
+ pt := hlc.physicalTime()
+
+ if pt > hlc.l && pt > received.L {
+ hlc.l = pt
+ hlc.c = 0
+ } else if received.L > hlc.l {
+ hlc.l = received.L
+ hlc.c = received.C + 1
+ } else if hlc.l > received.L {
+ hlc.c++
+ } else { // hlc.l == received.L
+ if received.C > hlc.c {
+ hlc.c = received.C + 1
+ } else {
+ hlc.c++
+ }
+ }
+
+ return HLCTimestamp{L: hlc.l, C: hlc.c}
+}
+
+// Compare compares two HLC timestamps
+func (t1 HLCTimestamp) Compare(t2 HLCTimestamp) int {
+ if t1.L < t2.L {
+ return -1
+ }
+ if t1.L > t2.L {
+ return 1
+ }
+ if t1.C < t2.C {
+ return -1
+ }
+ if t1.C > t2.C {
+ return 1
+ }
+ return 0
+}
+```
+
+## Causal Broadcast Implementation
+
+```go
+type CausalBroadcast struct {
+ vc *VectorClock
+ pending []PendingMessage
+ deliver func(Message)
+ mu sync.Mutex
+}
+
+type PendingMessage struct {
+ Msg Message
+ Timestamp map[string]uint64
+}
+
+func NewCausalBroadcast(processID string, processes []string, deliver func(Message)) *CausalBroadcast {
+ return &CausalBroadcast{
+ vc: NewVectorClock(processID, processes),
+ pending: make([]PendingMessage, 0),
+ deliver: deliver,
+ }
+}
+
+// Broadcast sends a message to all processes
+func (cb *CausalBroadcast) Broadcast(msg Message) map[string]uint64 {
+ cb.mu.Lock()
+ defer cb.mu.Unlock()
+
+ timestamp := cb.vc.Send()
+ // Actual network broadcast would happen here
+ return timestamp
+}
+
+// Receive handles an incoming message
+func (cb *CausalBroadcast) Receive(msg Message, sender string, timestamp map[string]uint64) {
+ cb.mu.Lock()
+ defer cb.mu.Unlock()
+
+ // Add to pending
+ cb.pending = append(cb.pending, PendingMessage{Msg: msg, Timestamp: timestamp})
+
+ // Try to deliver pending messages
+ cb.tryDeliver()
+}
+
+func (cb *CausalBroadcast) tryDeliver() {
+ changed := true
+ for changed {
+ changed = false
+
+ for i, pending := range cb.pending {
+ if cb.canDeliver(pending.Timestamp) {
+ // Deliver message
+ cb.vc.Receive(pending.Timestamp)
+ cb.deliver(pending.Msg)
+
+ // Remove from pending
+ cb.pending = append(cb.pending[:i], cb.pending[i+1:]...)
+ changed = true
+ break
+ }
+ }
+ }
+}
+
+func (cb *CausalBroadcast) canDeliver(msgVC map[string]uint64) bool {
+ currentVC := cb.vc.clocks
+
+ for pid, msgTime := range msgVC {
+ if pid == cb.vc.self {
+ // Must be next expected from sender
+ if msgTime != currentVC[pid]+1 {
+ return false
+ }
+ } else {
+ // All other dependencies must be satisfied
+ if msgTime > currentVC[pid] {
+ return false
+ }
+ }
+ }
+ return true
+}
+```
diff --git a/.claude/skills/domain-driven-design/SKILL.md b/.claude/skills/domain-driven-design/SKILL.md
new file mode 100644
index 00000000..6b7534ec
--- /dev/null
+++ b/.claude/skills/domain-driven-design/SKILL.md
@@ -0,0 +1,166 @@
+---
+name: domain-driven-design
+description: This skill should be used when designing software architecture, modeling domains, reviewing code for DDD compliance, identifying bounded contexts, designing aggregates, or discussing strategic and tactical DDD patterns. Provides comprehensive Domain-Driven Design principles, axioms, heuristics, and anti-patterns for building maintainable, domain-centric software systems.
+---
+
+# Domain-Driven Design
+
+## Overview
+
+Domain-Driven Design (DDD) is an approach to software development that centers the design on the core business domain. This skill provides principles, patterns, and heuristics for both strategic design (system boundaries and relationships) and tactical design (code-level patterns).
+
+## When to Apply This Skill
+
+- Designing new systems or features with complex business logic
+- Identifying and defining bounded contexts
+- Modeling aggregates, entities, and value objects
+- Reviewing code for DDD pattern compliance
+- Decomposing monoliths into services
+- Establishing ubiquitous language with domain experts
+
+## Core Axioms
+
+### Axiom 1: The Domain is Supreme
+
+Software exists to solve domain problems. Technical decisions serve the domain, not vice versa. When technical elegance conflicts with domain clarity, domain clarity wins.
+
+### Axiom 2: Language Creates Reality
+
+The ubiquitous language shapes how teams think about the domain. Ambiguous language creates ambiguous software. Invest heavily in precise terminology.
+
+### Axiom 3: Boundaries Enable Autonomy
+
+Explicit boundaries (bounded contexts) allow teams to evolve independently. The cost of integration is worth the benefit of isolation.
+
+### Axiom 4: Models are Imperfect Approximations
+
+No model captures all domain complexity. Accept that models simplify reality. Refine models continuously as understanding deepens.
+
+## Strategic Design Quick Reference
+
+| Pattern | Purpose | Key Heuristic |
+|---------|---------|---------------|
+| **Bounded Context** | Define linguistic/model boundaries | One team, one language, one model |
+| **Context Map** | Document context relationships | Make implicit integrations explicit |
+| **Subdomain** | Classify domain areas by value | Core (invest), Supporting (adequate), Generic (outsource) |
+| **Ubiquitous Language** | Shared vocabulary | If experts don't use the term, neither should code |
+
+For detailed strategic patterns, consult `references/strategic-patterns.md`.
+
+## Tactical Design Quick Reference
+
+| Pattern | Purpose | Key Heuristic |
+|---------|---------|---------------|
+| **Entity** | Identity-tracked object | "Same identity = same thing" regardless of attributes |
+| **Value Object** | Immutable, identity-less | Equality by value, always immutable, self-validating |
+| **Aggregate** | Consistency boundary | Small aggregates, reference by ID, one transaction = one aggregate |
+| **Domain Event** | Record state changes | Past tense naming, immutable, contains all relevant data |
+| **Repository** | Collection abstraction | One per aggregate root, domain-focused interface |
+| **Domain Service** | Stateless operations | When logic doesn't belong to any single entity |
+| **Factory** | Complex object creation | When construction logic is complex or variable |
+
+For detailed tactical patterns, consult `references/tactical-patterns.md`.
+
+## Essential Heuristics
+
+### Aggregate Design Heuristics
+
+1. **Protect business invariants inside aggregate boundaries** - If two pieces of data must be consistent, they belong in the same aggregate
+2. **Design small aggregates** - Large aggregates cause concurrency issues and slow performance
+3. **Reference other aggregates by identity only** - Never hold direct object references across aggregate boundaries
+4. **Update one aggregate per transaction** - Eventual consistency across aggregates using domain events
+5. **Aggregate roots are the only entry point** - External code never reaches inside to manipulate child entities
+
+### Bounded Context Heuristics
+
+1. **Linguistic boundaries** - When the same word means different things, you have different contexts
+2. **Team boundaries** - One context per team enables autonomy
+3. **Process boundaries** - Different business processes often indicate different contexts
+4. **Data ownership** - Each context owns its data; no shared databases
+
+### Modeling Heuristics
+
+1. **Nouns → Entities or Value Objects** - Things with identity become entities; descriptive things become value objects
+2. **Verbs → Domain Services or Methods** - Actions become methods on entities or stateless services
+3. **Business rules → Invariants** - Rules the domain must always satisfy become aggregate invariants
+4. **Events in domain expert language → Domain Events** - "When X happens" becomes a domain event
+
+## Decision Guides
+
+### Entity vs Value Object
+
+```
+Does this thing have a lifecycle and identity that matters?
+├─ YES → Is identity based on an ID (not attributes)?
+│ ├─ YES → Entity
+│ └─ NO → Reconsider; might be Value Object with natural key
+└─ NO → Value Object
+```
+
+### Where Does This Logic Belong?
+
+```
+Is this logic stateless?
+├─ NO → Does it belong to a single aggregate?
+│ ├─ YES → Method on the aggregate/entity
+│ └─ NO → Reconsider aggregate boundaries
+└─ YES → Does it coordinate multiple aggregates?
+ ├─ YES → Application Service
+ └─ NO → Does it represent a domain concept?
+ ├─ YES → Domain Service
+ └─ NO → Infrastructure Service
+```
+
+### Should This Be a Separate Bounded Context?
+
+```
+Do different stakeholders use different language for this?
+├─ YES → Separate bounded context
+└─ NO → Does a different team own this?
+ ├─ YES → Separate bounded context
+ └─ NO → Would a separate model reduce complexity?
+ ├─ YES → Consider separation (but weigh integration cost)
+ └─ NO → Keep in current context
+```
+
+## Anti-Patterns Overview
+
+| Anti-Pattern | Description | Fix |
+|--------------|-------------|-----|
+| **Anemic Domain Model** | Entities with only getters/setters | Move behavior into domain objects |
+| **Big Ball of Mud** | No clear boundaries | Identify bounded contexts |
+| **Smart UI** | Business logic in presentation layer | Extract domain layer |
+| **Database-Driven Design** | Model follows database schema | Model follows domain, map to database |
+| **Leaky Abstractions** | Infrastructure concerns in domain | Dependency inversion, ports and adapters |
+| **God Aggregate** | One aggregate does everything | Split by invariant boundaries |
+| **Premature Abstraction** | Abstracting before understanding | Concrete first, abstract when patterns emerge |
+
+For detailed anti-patterns and remediation, consult `references/anti-patterns.md`.
+
+## Implementation Checklist
+
+When implementing DDD in a codebase:
+
+- [ ] Ubiquitous language documented and used consistently in code
+- [ ] Bounded contexts identified with clear boundaries
+- [ ] Context map documenting integration patterns
+- [ ] Aggregates designed small with clear invariants
+- [ ] Entities have behavior, not just data
+- [ ] Value objects are immutable and self-validating
+- [ ] Domain events capture important state changes
+- [ ] Repositories abstract persistence for aggregate roots
+- [ ] No business logic in application services (orchestration only)
+- [ ] No infrastructure concerns in domain layer
+
+## Resources
+
+### references/
+
+- `strategic-patterns.md` - Detailed strategic DDD patterns including bounded contexts, context maps, subdomain classification, and ubiquitous language
+- `tactical-patterns.md` - Detailed tactical DDD patterns including entities, value objects, aggregates, domain events, repositories, and services
+- `anti-patterns.md` - Common DDD anti-patterns, how to identify them, and remediation strategies
+
+To search references for specific topics:
+- Bounded contexts: `grep -i "bounded context" references/`
+- Aggregate design: `grep -i "aggregate" references/`
+- Value objects: `grep -i "value object" references/`
diff --git a/.claude/skills/domain-driven-design/references/anti-patterns.md b/.claude/skills/domain-driven-design/references/anti-patterns.md
new file mode 100644
index 00000000..62a45b3c
--- /dev/null
+++ b/.claude/skills/domain-driven-design/references/anti-patterns.md
@@ -0,0 +1,853 @@
+# DDD Anti-Patterns
+
+This reference documents common anti-patterns encountered when implementing Domain-Driven Design, how to identify them, and remediation strategies.
+
+## Anemic Domain Model
+
+### Description
+
+Entities that are mere data containers with getters and setters, while all business logic lives in "service" classes. The domain model looks like a relational database schema mapped to objects.
+
+### Symptoms
+
+- Entities with only get/set methods and no behavior
+- Service classes with methods like `orderService.calculateTotal(order)`
+- Business rules scattered across multiple services
+- Heavy use of DTOs that mirror entity structure
+- "Transaction scripts" in application services
+
+### Example
+
+```typescript
+// ANTI-PATTERN: Anemic domain model
+class Order {
+ id: string;
+ customerId: string;
+ items: OrderItem[];
+ status: string;
+ total: number;
+
+ // Only data access, no behavior
+ getId(): string { return this.id; }
+ setStatus(status: string): void { this.status = status; }
+ getItems(): OrderItem[] { return this.items; }
+ setTotal(total: number): void { this.total = total; }
+}
+
+class OrderService {
+ // All logic external to the entity
+ calculateTotal(order: Order): number {
+ let total = 0;
+ for (const item of order.getItems()) {
+ total += item.price * item.quantity;
+ }
+ order.setTotal(total);
+ return total;
+ }
+
+ canShip(order: Order): boolean {
+ return order.status === 'PAID' && order.getItems().length > 0;
+ }
+
+ ship(order: Order, trackingNumber: string): void {
+ if (!this.canShip(order)) {
+ throw new Error('Cannot ship order');
+ }
+ order.setStatus('SHIPPED');
+ order.trackingNumber = trackingNumber;
+ }
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: Rich domain model
+class Order {
+ private _id: OrderId;
+ private _items: OrderItem[];
+ private _status: OrderStatus;
+
+ // Behavior lives in the entity
+ get total(): Money {
+ return this._items.reduce(
+ (sum, item) => sum.add(item.subtotal()),
+ Money.zero()
+ );
+ }
+
+ canShip(): boolean {
+ return this._status === OrderStatus.Paid && this._items.length > 0;
+ }
+
+ ship(trackingNumber: TrackingNumber): void {
+ if (!this.canShip()) {
+ throw new OrderNotShippableError(this._id, this._status);
+ }
+ this._status = OrderStatus.Shipped;
+ this._trackingNumber = trackingNumber;
+ }
+
+ addItem(item: OrderItem): void {
+ this.ensureCanModify();
+ this._items.push(item);
+ }
+}
+
+// Application service is thin - only orchestration
+class OrderApplicationService {
+ async shipOrder(orderId: OrderId, trackingNumber: TrackingNumber): Promise {
+ const order = await this.orderRepository.findById(orderId);
+ order.ship(trackingNumber); // Domain logic in entity
+ await this.orderRepository.save(order);
+ }
+}
+```
+
+### Root Causes
+
+- Developers treating objects as data structures
+- Thinking in terms of database tables
+- Copying patterns from CRUD applications
+- Misunderstanding "service" to mean "all logic goes here"
+
+## God Aggregate
+
+### Description
+
+An aggregate that has grown to encompass too much. It handles multiple concerns, has many child entities, and becomes a performance and concurrency bottleneck.
+
+### Symptoms
+
+- Aggregates with 10+ child entity types
+- Long load times due to eager loading everything
+- Frequent optimistic concurrency conflicts
+- Methods that only touch a small subset of the aggregate
+- Difficulty reasoning about invariants
+
+### Example
+
+```typescript
+// ANTI-PATTERN: God aggregate
+class Customer {
+ private _id: CustomerId;
+ private _profile: CustomerProfile;
+ private _addresses: Address[];
+ private _paymentMethods: PaymentMethod[];
+ private _orders: Order[]; // History of all orders!
+ private _wishlist: WishlistItem[];
+ private _reviews: Review[];
+ private _loyaltyPoints: LoyaltyAccount;
+ private _preferences: Preferences;
+ private _notifications: Notification[];
+ private _supportTickets: SupportTicket[];
+
+ // Loading this customer loads EVERYTHING
+ // Updating preferences causes concurrency conflict with order placement
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: Small, focused aggregates
+class Customer {
+ private _id: CustomerId;
+ private _profile: CustomerProfile;
+ private _defaultAddressId: AddressId;
+ private _membershipTier: MembershipTier;
+}
+
+class CustomerAddressBook {
+ private _customerId: CustomerId;
+ private _addresses: Address[];
+}
+
+class ShoppingCart {
+ private _customerId: CustomerId; // Reference by ID
+ private _items: CartItem[];
+}
+
+class Wishlist {
+ private _customerId: CustomerId; // Reference by ID
+ private _items: WishlistItem[];
+}
+
+class LoyaltyAccount {
+ private _customerId: CustomerId; // Reference by ID
+ private _points: Points;
+ private _transactions: LoyaltyTransaction[];
+}
+```
+
+### Identification Heuristic
+
+Ask: "Do all these things need to be immediately consistent?" If the answer is no, they probably belong in separate aggregates.
+
+## Aggregate Reference Violation
+
+### Description
+
+Aggregates holding direct object references to other aggregates instead of referencing by identity. Creates implicit coupling and makes it impossible to reason about transactional boundaries.
+
+### Symptoms
+
+- Navigation from one aggregate to another: `order.customer.address`
+- Loading an aggregate brings in connected aggregates
+- Unclear what gets saved when calling `save()`
+- Difficulty implementing eventual consistency
+
+### Example
+
+```typescript
+// ANTI-PATTERN: Direct reference
+class Order {
+ private customer: Customer; // Direct reference!
+ private shippingAddress: Address;
+
+ getCustomerEmail(): string {
+ return this.customer.email; // Navigating through!
+ }
+
+ validate(): void {
+ // Touching another aggregate's data
+ if (this.customer.creditLimit < this.total) {
+ throw new Error('Credit limit exceeded');
+ }
+ }
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: Reference by identity
+class Order {
+ private _customerId: CustomerId; // ID only!
+ private _shippingAddress: Address; // Value object copied at order time
+
+ // If customer data is needed, it must be explicitly loaded
+ static create(
+ customerId: CustomerId,
+ shippingAddress: Address,
+ creditLimit: Money // Passed in, not navigated to
+ ): Order {
+ return new Order(customerId, shippingAddress, creditLimit);
+ }
+}
+
+// Application service coordinates loading if needed
+class OrderApplicationService {
+ async getOrderWithCustomerDetails(orderId: OrderId): Promise {
+ const order = await this.orderRepository.findById(orderId);
+ const customer = await this.customerRepository.findById(order.customerId);
+
+ return new OrderDetails(order, customer);
+ }
+}
+```
+
+## Smart UI
+
+### Description
+
+Business logic embedded directly in the user interface layer. Controllers, presenters, or UI components contain domain rules.
+
+### Symptoms
+
+- Validation logic in form handlers
+- Business calculations in controllers
+- State machines in UI components
+- Domain rules duplicated across different UI views
+- "If we change the UI framework, we lose the business logic"
+
+### Example
+
+```typescript
+// ANTI-PATTERN: Smart UI
+class OrderController {
+ submitOrder(request: Request): Response {
+ const cart = request.body;
+
+ // Business logic in controller!
+ let total = 0;
+ for (const item of cart.items) {
+ total += item.price * item.quantity;
+ }
+
+ // Discount rules in controller!
+ if (cart.items.length > 10) {
+ total *= 0.9; // 10% bulk discount
+ }
+
+ if (total > 1000 && !this.hasValidPaymentMethod(cart.customerId)) {
+ return Response.error('Orders over $1000 require verified payment');
+ }
+
+ // More business rules...
+ const order = {
+ customerId: cart.customerId,
+ items: cart.items,
+ total: total,
+ status: 'PENDING'
+ };
+
+ this.database.insert('orders', order);
+ return Response.ok(order);
+ }
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: UI delegates to domain
+class OrderController {
+ submitOrder(request: Request): Response {
+ const command = new PlaceOrderCommand(
+ request.body.customerId,
+ request.body.items
+ );
+
+ try {
+ const orderId = this.orderApplicationService.placeOrder(command);
+ return Response.ok({ orderId });
+ } catch (error) {
+ if (error instanceof DomainError) {
+ return Response.badRequest(error.message);
+ }
+ throw error;
+ }
+ }
+}
+
+// Domain logic in domain layer
+class Order {
+ private calculateTotal(): Money {
+ const subtotal = this._items.reduce(
+ (sum, item) => sum.add(item.subtotal()),
+ Money.zero()
+ );
+ return this._discountPolicy.apply(subtotal, this._items.length);
+ }
+}
+
+class BulkDiscountPolicy implements DiscountPolicy {
+ apply(subtotal: Money, itemCount: number): Money {
+ if (itemCount > 10) {
+ return subtotal.multiply(0.9);
+ }
+ return subtotal;
+ }
+}
+```
+
+## Database-Driven Design
+
+### Description
+
+The domain model is derived from the database schema rather than from domain concepts. Tables become classes; foreign keys become object references; database constraints become business rules.
+
+### Symptoms
+
+- Class names match table names exactly
+- Foreign key relationships drive object graph
+- ID fields everywhere, even where identity doesn't matter
+- `nullable` database columns drive optional properties
+- Domain model changes require database migration first
+
+### Example
+
+```typescript
+// ANTI-PATTERN: Database-driven model
+// Mirrors database schema exactly
+class orders {
+ order_id: number;
+ customer_id: number;
+ order_date: Date;
+ status_cd: string;
+ shipping_address_id: number;
+ billing_address_id: number;
+ total_amt: number;
+ tax_amt: number;
+ created_ts: Date;
+ updated_ts: Date;
+}
+
+class order_items {
+ order_item_id: number;
+ order_id: number;
+ product_id: number;
+ quantity: number;
+ unit_price: number;
+ discount_pct: number;
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: Domain-driven model
+class Order {
+ private readonly _id: OrderId;
+ private _status: OrderStatus;
+ private _items: OrderItem[];
+ private _shippingAddress: Address; // Value object, not FK
+ private _billingAddress: Address;
+
+ // Domain behavior, not database structure
+ get total(): Money {
+ return this._items.reduce(
+ (sum, item) => sum.add(item.lineTotal()),
+ Money.zero()
+ );
+ }
+
+ ship(trackingNumber: TrackingNumber): void {
+ // Business logic
+ }
+}
+
+// Mapping is infrastructure concern
+class OrderRepository {
+ async save(order: Order): Promise {
+ // Map rich domain object to database tables
+ await this.db.query(
+ 'INSERT INTO orders (id, status, shipping_street, shipping_city...) VALUES (...)'
+ );
+ }
+}
+```
+
+### Key Principle
+
+The domain model reflects how domain experts think, not how data is stored. Persistence is an infrastructure detail.
+
+## Leaky Abstractions
+
+### Description
+
+Infrastructure concerns bleeding into the domain layer. Domain objects depend on frameworks, databases, or external services.
+
+### Symptoms
+
+- Domain entities with ORM decorators
+- Repository interfaces returning database-specific types
+- Domain services making HTTP calls
+- Framework annotations on domain objects
+- `import { Entity } from 'typeorm'` in domain layer
+
+### Example
+
+```typescript
+// ANTI-PATTERN: Infrastructure leaking into domain
+import { Entity, Column, PrimaryColumn, ManyToOne } from 'typeorm';
+import { IsEmail, IsNotEmpty } from 'class-validator';
+
+@Entity('customers') // ORM in domain!
+export class Customer {
+ @PrimaryColumn()
+ id: string;
+
+ @Column()
+ @IsNotEmpty() // Validation framework in domain!
+ name: string;
+
+ @Column()
+ @IsEmail()
+ email: string;
+
+ @ManyToOne(() => Subscription) // ORM relationship in domain!
+ subscription: Subscription;
+}
+
+// Domain service calling external API directly
+class ShippingCostService {
+ async calculateCost(order: Order): Promise {
+ // HTTP call in domain!
+ const response = await fetch('https://shipping-api.com/rates', {
+ body: JSON.stringify(order)
+ });
+ return response.json().cost;
+ }
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: Clean domain layer
+// Domain object - no framework dependencies
+class Customer {
+ private constructor(
+ private readonly _id: CustomerId,
+ private readonly _name: CustomerName,
+ private readonly _email: Email
+ ) {}
+
+ static create(name: string, email: string): Customer {
+ return new Customer(
+ CustomerId.generate(),
+ CustomerName.create(name), // Self-validating value object
+ Email.create(email) // Self-validating value object
+ );
+ }
+}
+
+// Port (interface) defined in domain
+interface ShippingRateProvider {
+ getRate(destination: Address, weight: Weight): Promise;
+}
+
+// Domain service uses port
+class ShippingCostCalculator {
+ constructor(private rateProvider: ShippingRateProvider) {}
+
+ async calculate(order: Order): Promise {
+ return this.rateProvider.getRate(
+ order.shippingAddress,
+ order.totalWeight()
+ );
+ }
+}
+
+// Adapter (infrastructure) implements port
+class ShippingApiRateProvider implements ShippingRateProvider {
+ async getRate(destination: Address, weight: Weight): Promise {
+ const response = await fetch('https://shipping-api.com/rates', {
+ body: JSON.stringify({ destination, weight })
+ });
+ const data = await response.json();
+ return Money.of(data.cost, Currency.USD);
+ }
+}
+```
+
+## Shared Database
+
+### Description
+
+Multiple bounded contexts accessing the same database tables. Changes in one context break others. No clear data ownership.
+
+### Symptoms
+
+- Multiple services querying the same tables
+- Fear of schema changes because "something else might break"
+- Unclear which service is authoritative for data
+- Cross-context joins in queries
+- Database triggers coordinating contexts
+
+### Example
+
+```typescript
+// ANTI-PATTERN: Shared database
+// Sales context
+class SalesOrderService {
+ async getOrder(orderId: string) {
+ return this.db.query(`
+ SELECT o.*, c.name, c.email, p.name as product_name
+ FROM orders o
+ JOIN customers c ON o.customer_id = c.id
+ JOIN products p ON o.product_id = p.id
+ WHERE o.id = ?
+ `, [orderId]);
+ }
+}
+
+// Shipping context - same tables!
+class ShippingService {
+ async getOrdersToShip() {
+ return this.db.query(`
+ SELECT o.*, c.address
+ FROM orders o
+ JOIN customers c ON o.customer_id = c.id
+ WHERE o.status = 'PAID'
+ `);
+ }
+
+ async markShipped(orderId: string) {
+ // Directly modifying shared table
+ await this.db.query(
+ "UPDATE orders SET status = 'SHIPPED' WHERE id = ?",
+ [orderId]
+ );
+ }
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: Each context owns its data
+// Sales context - owns order creation
+class SalesOrderRepository {
+ async save(order: SalesOrder): Promise {
+ await this.salesDb.query('INSERT INTO sales_orders...');
+
+ // Publish event for other contexts
+ await this.eventPublisher.publish(
+ new OrderPlaced(order.id, order.customerId, order.items)
+ );
+ }
+}
+
+// Shipping context - owns its projection
+class ShippingOrderProjection {
+ // Handles events to build local projection
+ async handleOrderPlaced(event: OrderPlaced): Promise {
+ await this.shippingDb.query(`
+ INSERT INTO shipments (order_id, customer_id, status)
+ VALUES (?, ?, 'PENDING')
+ `, [event.orderId, event.customerId]);
+ }
+}
+
+class ShipmentRepository {
+ async findPendingShipments(): Promise {
+ // Queries only shipping context's data
+ return this.shippingDb.query(
+ "SELECT * FROM shipments WHERE status = 'PENDING'"
+ );
+ }
+}
+```
+
+## Premature Abstraction
+
+### Description
+
+Creating abstractions, interfaces, and frameworks before understanding the problem space. Often justified as "flexibility for the future."
+
+### Symptoms
+
+- Interfaces with single implementations
+- Generic frameworks solving hypothetical problems
+- Heavy use of design patterns without clear benefit
+- Configuration systems for things that never change
+- "We might need this someday"
+
+### Example
+
+```typescript
+// ANTI-PATTERN: Premature abstraction
+interface IOrderProcessor {
+ process(order: TOrder): Promise;
+}
+
+interface IOrderValidator {
+ validate(order: TOrder): ValidationResult;
+}
+
+interface IOrderPersister {
+ persist(order: TOrder): Promise;
+}
+
+abstract class AbstractOrderProcessor
+ implements IOrderProcessor {
+
+ constructor(
+ protected validator: IOrderValidator,
+ protected persister: IOrderPersister,
+ protected notifier: INotificationService,
+ protected logger: ILogger,
+ protected metrics: IMetricsCollector
+ ) {}
+
+ async process(order: TOrder): Promise {
+ this.logger.log('Processing order');
+ this.metrics.increment('orders.processed');
+
+ const validation = this.validator.validate(order);
+ if (!validation.isValid) {
+ throw new ValidationException(validation.errors);
+ }
+
+ const result = await this.doProcess(order);
+ await this.persister.persist(order);
+ await this.notifier.notify(order);
+
+ return result;
+ }
+
+ protected abstract doProcess(order: TOrder): Promise;
+}
+
+// Only one concrete implementation ever created
+class StandardOrderProcessor extends AbstractOrderProcessor {
+ protected async doProcess(order: Order): Promise {
+ // The actual logic is trivial
+ return new OrderResult(order.id);
+ }
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: Concrete first, abstract when patterns emerge
+class OrderService {
+ async placeOrder(command: PlaceOrderCommand): Promise {
+ const order = Order.create(command);
+
+ if (!order.isValid()) {
+ throw new InvalidOrderError(order.validationErrors());
+ }
+
+ await this.orderRepository.save(order);
+
+ return order.id;
+ }
+}
+
+// Only add abstraction when you have multiple implementations
+// and understand the variation points
+```
+
+### Heuristic
+
+Wait until you have three similar implementations before abstracting. The right abstraction will be obvious then.
+
+## Big Ball of Mud
+
+### Description
+
+A system without clear architectural boundaries. Everything depends on everything. Changes ripple unpredictably.
+
+### Symptoms
+
+- No clear module boundaries
+- Circular dependencies
+- Any change might break anything
+- "Only Bob understands how this works"
+- Integration tests are the only reliable tests
+- Fear of refactoring
+
+### Identification
+
+```
+# Circular dependency example
+OrderService → CustomerService → PaymentService → OrderService
+```
+
+### Remediation Strategy
+
+1. **Identify implicit contexts** - Find clusters of related functionality
+2. **Define explicit boundaries** - Create modules/packages with clear interfaces
+3. **Break cycles** - Introduce events or shared kernel for circular dependencies
+4. **Enforce boundaries** - Use architectural tests, linting rules
+
+```typescript
+// Step 1: Identify boundaries
+// sales/ - order creation, pricing
+// fulfillment/ - shipping, tracking
+// customer/ - customer management
+// shared/ - shared kernel (Money, Address)
+
+// Step 2: Define public interfaces
+// sales/index.ts
+export { OrderService } from './application/OrderService';
+export { OrderPlaced, OrderCancelled } from './domain/events';
+// Internal types not exported
+
+// Step 3: Break cycles with events
+class OrderService {
+ async placeOrder(command: PlaceOrderCommand): Promise {
+ const order = Order.create(command);
+ await this.orderRepository.save(order);
+
+ // Instead of calling PaymentService directly
+ await this.eventPublisher.publish(new OrderPlaced(order));
+
+ return order.id;
+ }
+}
+
+class PaymentEventHandler {
+ async handleOrderPlaced(event: OrderPlaced): Promise {
+ await this.paymentService.collectPayment(event.orderId, event.total);
+ }
+}
+```
+
+## CRUD-Driven Development
+
+### Description
+
+Treating all domain operations as Create, Read, Update, Delete operations. Loses domain intent and behavior.
+
+### Symptoms
+
+- Endpoints like `PUT /orders/{id}` that accept any field changes
+- Service methods like `updateOrder(orderId, updates)`
+- Domain events named `OrderUpdated` instead of `OrderShipped`
+- No validation of state transitions
+- Business operations hidden behind generic updates
+
+### Example
+
+```typescript
+// ANTI-PATTERN: CRUD-driven
+class OrderController {
+ @Put('/orders/:id')
+ async updateOrder(id: string, body: Partial) {
+ // Any field can be updated!
+ return this.orderService.update(id, body);
+ }
+}
+
+class OrderService {
+ async update(id: string, updates: Partial): Promise {
+ const order = await this.repo.findById(id);
+ Object.assign(order, updates); // Blindly apply updates
+ return this.repo.save(order);
+ }
+}
+```
+
+### Remediation
+
+```typescript
+// CORRECT: Intent-revealing operations
+class OrderController {
+ @Post('/orders/:id/ship')
+ async shipOrder(id: string, body: ShipOrderRequest) {
+ return this.orderService.ship(id, body.trackingNumber);
+ }
+
+ @Post('/orders/:id/cancel')
+ async cancelOrder(id: string, body: CancelOrderRequest) {
+ return this.orderService.cancel(id, body.reason);
+ }
+}
+
+class OrderService {
+ async ship(orderId: OrderId, trackingNumber: TrackingNumber): Promise {
+ const order = await this.repo.findById(orderId);
+ order.ship(trackingNumber); // Domain logic with validation
+ await this.repo.save(order);
+ await this.publish(new OrderShipped(orderId, trackingNumber));
+ }
+
+ async cancel(orderId: OrderId, reason: CancellationReason): Promise {
+ const order = await this.repo.findById(orderId);
+ order.cancel(reason); // Validates cancellation is allowed
+ await this.repo.save(order);
+ await this.publish(new OrderCancelled(orderId, reason));
+ }
+}
+```
+
+## Summary: Detection Checklist
+
+| Anti-Pattern | Key Question |
+|--------------|--------------|
+| Anemic Domain Model | Do entities have behavior or just data? |
+| God Aggregate | Does everything need immediate consistency? |
+| Aggregate Reference Violation | Are aggregates holding other aggregates? |
+| Smart UI | Would changing UI framework lose business logic? |
+| Database-Driven Design | Does model match tables or domain concepts? |
+| Leaky Abstractions | Does domain code import infrastructure? |
+| Shared Database | Do multiple contexts write to same tables? |
+| Premature Abstraction | Are there interfaces with single implementations? |
+| Big Ball of Mud | Can any change break anything? |
+| CRUD-Driven Development | Are operations generic updates or domain intents? |
diff --git a/.claude/skills/domain-driven-design/references/strategic-patterns.md b/.claude/skills/domain-driven-design/references/strategic-patterns.md
new file mode 100644
index 00000000..bcf132dd
--- /dev/null
+++ b/.claude/skills/domain-driven-design/references/strategic-patterns.md
@@ -0,0 +1,358 @@
+# Strategic DDD Patterns
+
+Strategic DDD patterns address the large-scale structure of a system: how to divide it into bounded contexts, how those contexts relate, and how to prioritize investment across subdomains.
+
+## Bounded Context
+
+### Definition
+
+A Bounded Context is an explicit boundary within which a domain model exists. Inside the boundary, all terms have specific, unambiguous meanings. The same term may mean different things in different bounded contexts.
+
+### Why It Matters
+
+- **Linguistic clarity** - "Customer" in Sales means something different than "Customer" in Shipping
+- **Model isolation** - Changes to one model don't cascade across the system
+- **Team autonomy** - Teams can work independently within their context
+- **Focused complexity** - Each context solves one set of problems well
+
+### Identification Heuristics
+
+1. **Language divergence** - When stakeholders use the same word differently, there's a context boundary
+2. **Department boundaries** - Organizational structure often mirrors domain structure
+3. **Process boundaries** - End-to-end business processes often define context edges
+4. **Data ownership** - Who is the authoritative source for this data?
+5. **Change frequency** - Parts that change together should stay together
+
+### Example: E-Commerce Platform
+
+| Context | "Order" means... | "Product" means... |
+|---------|------------------|-------------------|
+| **Catalog** | N/A | Displayable item with description, images, categories |
+| **Inventory** | N/A | Stock keeping unit with quantity and location |
+| **Sales** | Shopping cart ready for checkout | Line item with price |
+| **Fulfillment** | Shipment to be picked and packed | Physical item to ship |
+| **Billing** | Invoice to collect payment | Taxable good |
+
+### Implementation Patterns
+
+#### Separate Deployables
+Each bounded context as its own service/application.
+
+```
+catalog-service/
+├── src/domain/Product.ts
+└── src/infrastructure/CatalogRepository.ts
+
+sales-service/
+├── src/domain/Product.ts # Different model!
+└── src/domain/Order.ts
+```
+
+#### Module Boundaries
+Bounded contexts as modules within a monolith.
+
+```
+src/
+├── catalog/
+│ └── domain/Product.ts
+├── sales/
+│ └── domain/Product.ts # Different model!
+└── shared/
+ └── kernel/Money.ts # Shared kernel
+```
+
+## Context Map
+
+### Definition
+
+A Context Map is a visual and documented representation of how bounded contexts relate to each other. It makes integration patterns explicit.
+
+### Integration Patterns
+
+#### Partnership
+
+Two contexts develop together with mutual dependencies. Changes are coordinated.
+
+```
+┌─────────────┐ Partnership ┌─────────────┐
+│ Catalog │◄──────────────────►│ Inventory │
+└─────────────┘ └─────────────┘
+```
+
+**Use when**: Two teams must succeed or fail together.
+
+#### Shared Kernel
+
+A small, shared model that multiple contexts depend on. Changes require agreement from all consumers.
+
+```
+┌─────────────┐ ┌─────────────┐
+│ Sales │ │ Billing │
+└──────┬──────┘ └──────┬──────┘
+ │ │
+ └─────────► Money ◄──────────────┘
+ (shared kernel)
+```
+
+**Use when**: Core concepts genuinely need the same model.
+**Danger**: Creates coupling. Keep shared kernels minimal.
+
+#### Customer-Supplier
+
+Upstream context (supplier) provides data/services; downstream context (customer) consumes. Supplier considers customer needs.
+
+```
+┌─────────────┐ ┌─────────────┐
+│ Catalog │───── supplies ────►│ Sales │
+│ (upstream) │ │ (downstream)│
+└─────────────┘ └─────────────┘
+```
+
+**Use when**: One context clearly serves another, and the supplier is responsive.
+
+#### Conformist
+
+Downstream adopts upstream's model without negotiation. Upstream doesn't accommodate downstream needs.
+
+```
+┌─────────────┐ ┌─────────────┐
+│ External │───── dictates ────►│ Our App │
+│ API │ │ (conformist)│
+└─────────────┘ └─────────────┘
+```
+
+**Use when**: Upstream won't change (third-party API), and their model is acceptable.
+
+#### Anti-Corruption Layer (ACL)
+
+Translation layer that protects a context from external models. Transforms data at the boundary.
+
+```
+┌─────────────┐ ┌───────┐ ┌─────────────┐
+│ Legacy │───────►│ ACL │───────►│ New System │
+│ System │ └───────┘ └─────────────┘
+```
+
+**Use when**: Upstream model would pollute downstream; translation is worth the cost.
+
+```typescript
+// Anti-Corruption Layer example
+class LegacyOrderAdapter {
+ constructor(private legacyApi: LegacyOrderApi) {}
+
+ translateOrder(legacyOrder: LegacyOrder): Order {
+ return new Order({
+ id: OrderId.from(legacyOrder.order_num),
+ customer: this.translateCustomer(legacyOrder.cust_data),
+ items: legacyOrder.line_items.map(this.translateLineItem),
+ // Transform legacy status codes to domain concepts
+ status: this.mapStatus(legacyOrder.stat_cd),
+ });
+ }
+
+ private mapStatus(legacyCode: string): OrderStatus {
+ const mapping: Record = {
+ 'OP': OrderStatus.Open,
+ 'SH': OrderStatus.Shipped,
+ 'CL': OrderStatus.Closed,
+ };
+ return mapping[legacyCode] ?? OrderStatus.Unknown;
+ }
+}
+```
+
+#### Open Host Service
+
+A context provides a well-defined protocol/API for others to consume.
+
+```
+ ┌─────────────┐
+ ┌──────────►│ Reports │
+ │ └─────────────┘
+┌───────┴───────┐ ┌─────────────┐
+│ Catalog API │──►│ Search │
+│ (open host) │ └─────────────┘
+└───────┬───────┘ ┌─────────────┐
+ └──────────►│ Partner │
+ └─────────────┘
+```
+
+**Use when**: Multiple downstream contexts need access; worth investing in a stable API.
+
+#### Published Language
+
+A shared language format (schema) for communication between contexts. Often combined with Open Host Service.
+
+Examples: JSON schemas, Protocol Buffers, GraphQL schemas, industry standards (HL7 for healthcare).
+
+#### Separate Ways
+
+Contexts have no integration. Each solves its needs independently.
+
+**Use when**: Integration cost exceeds benefit; duplication is acceptable.
+
+### Context Map Notation
+
+```
+┌───────────────────────────────────────────────────────────────┐
+│ CONTEXT MAP │
+├───────────────────────────────────────────────────────────────┤
+│ │
+│ ┌─────────┐ Partnership ┌─────────┐ │
+│ │ Sales │◄────────────────────────────►│Inventory│ │
+│ │ (U,D) │ │ (U,D) │ │
+│ └────┬────┘ └────┬────┘ │
+│ │ │ │
+│ │ Customer/Supplier │ │
+│ ▼ │ │
+│ ┌─────────┐ │ │
+│ │ Billing │◄──────────────────────────────────┘ │
+│ │ (D) │ Conformist │
+│ └─────────┘ │
+│ │
+│ Legend: U = Upstream, D = Downstream │
+└───────────────────────────────────────────────────────────────┘
+```
+
+## Subdomain Classification
+
+### Core Domain
+
+The essential differentiator. This is where competitive advantage lives.
+
+**Characteristics**:
+- Unique to this business
+- Complex, requires deep expertise
+- Frequently changing as business evolves
+- Worth significant investment
+
+**Strategy**: Build in-house with best talent. Invest heavily in modeling.
+
+### Supporting Subdomain
+
+Necessary for the business but not a differentiator.
+
+**Characteristics**:
+- Important but not unique
+- Moderate complexity
+- Changes less frequently
+- Custom implementation needed
+
+**Strategy**: Build with adequate (not exceptional) investment. May outsource.
+
+### Generic Subdomain
+
+Solved problems with off-the-shelf solutions.
+
+**Characteristics**:
+- Common across industries
+- Well-understood solutions exist
+- Rarely changes
+- Not a differentiator
+
+**Strategy**: Buy or use open-source. Don't reinvent.
+
+### Example: E-Commerce Platform
+
+| Subdomain | Type | Strategy |
+|-----------|------|----------|
+| Product Recommendation Engine | Core | In-house, top talent |
+| Inventory Management | Supporting | Build, adequate investment |
+| Payment Processing | Generic | Third-party (Stripe, etc.) |
+| User Authentication | Generic | Third-party or standard library |
+| Shipping Logistics | Supporting | Build or integrate vendor |
+| Customer Analytics | Core | In-house, strategic investment |
+
+## Ubiquitous Language
+
+### Definition
+
+A common language shared by developers and domain experts. It appears in conversations, documentation, and code.
+
+### Building Ubiquitous Language
+
+1. **Listen to experts** - Use their terminology, not technical jargon
+2. **Challenge vague terms** - "Process the order" → What exactly happens?
+3. **Document glossary** - Maintain a living dictionary
+4. **Enforce in code** - Class and method names use the language
+5. **Refine continuously** - Language evolves with understanding
+
+### Language in Code
+
+```typescript
+// Bad: Technical terms
+class OrderProcessor {
+ handleOrderCreation(data: OrderData): void {
+ this.validateData(data);
+ this.persistToDatabase(data);
+ this.sendNotification(data);
+ }
+}
+
+// Good: Ubiquitous language
+class OrderTaker {
+ placeOrder(cart: ShoppingCart): PlacedOrder {
+ const order = cart.checkout();
+ order.confirmWith(this.paymentGateway);
+ this.orderRepository.save(order);
+ this.domainEvents.publish(new OrderPlaced(order));
+ return order;
+ }
+}
+```
+
+### Glossary Example
+
+| Term | Definition | Context |
+|------|------------|---------|
+| **Order** | A confirmed purchase with payment collected | Sales |
+| **Shipment** | Physical package(s) sent to fulfill an order | Fulfillment |
+| **SKU** | Stock Keeping Unit; unique identifier for inventory | Inventory |
+| **Cart** | Uncommitted collection of items a customer intends to buy | Sales |
+| **Listing** | Product displayed for purchase in the catalog | Catalog |
+
+### Anti-Pattern: Technical Language Leakage
+
+```typescript
+// Bad: Database terminology leaks into domain
+order.setForeignKeyCustomerId(customerId);
+order.persist();
+
+// Bad: HTTP concerns leak into domain
+order.deserializeFromJson(request.body);
+order.setHttpStatus(200);
+
+// Good: Domain language only
+order.placeFor(customer);
+orderRepository.save(order);
+```
+
+## Strategic Design Decisions
+
+### When to Split a Bounded Context
+
+Split when:
+- Different parts need to evolve at different speeds
+- Different teams need ownership
+- Model complexity is becoming unmanageable
+- Language conflicts are emerging within the context
+
+Don't split when:
+- Transaction boundaries would become awkward
+- Integration cost outweighs isolation benefit
+- Single team can handle the complexity
+
+### When to Merge Bounded Contexts
+
+Merge when:
+- Integration overhead is excessive
+- Same team owns both
+- Models are converging naturally
+- Separate contexts create artificial complexity
+
+### Dealing with Legacy Systems
+
+1. **Bubble context** - New bounded context with ACL to legacy
+2. **Strangler fig** - Gradually replace legacy feature by feature
+3. **Conformist** - Accept legacy model if acceptable
+4. **Separate ways** - Rebuild independently, migrate data later
diff --git a/.claude/skills/domain-driven-design/references/tactical-patterns.md b/.claude/skills/domain-driven-design/references/tactical-patterns.md
new file mode 100644
index 00000000..ae543b1c
--- /dev/null
+++ b/.claude/skills/domain-driven-design/references/tactical-patterns.md
@@ -0,0 +1,927 @@
+# Tactical DDD Patterns
+
+Tactical DDD patterns are code-level building blocks for implementing a rich domain model. They help express domain concepts in code that mirrors how domain experts think.
+
+## Entity
+
+### Definition
+
+An object defined by its identity rather than its attributes. Two entities with the same attribute values but different identities are different things.
+
+### Characteristics
+
+- Has a unique identifier that persists through state changes
+- Identity established at creation, immutable thereafter
+- Equality based on identity, not attribute values
+- Has a lifecycle (created, modified, potentially deleted)
+- Contains behavior relevant to the domain concept it represents
+
+### When to Use
+
+- The object represents something tracked over time
+- "Is this the same one?" is a meaningful question
+- The object needs to be referenced from other parts of the system
+- State changes are important to track
+
+### Implementation
+
+```typescript
+// Entity with identity and behavior
+class Order {
+ private readonly _id: OrderId;
+ private _status: OrderStatus;
+ private _items: OrderItem[];
+ private _shippingAddress: Address;
+
+ constructor(id: OrderId, items: OrderItem[], shippingAddress: Address) {
+ this._id = id;
+ this._items = items;
+ this._shippingAddress = shippingAddress;
+ this._status = OrderStatus.Pending;
+ }
+
+ get id(): OrderId {
+ return this._id;
+ }
+
+ // Behavior, not just data access
+ confirm(): void {
+ if (this._items.length === 0) {
+ throw new EmptyOrderError(this._id);
+ }
+ this._status = OrderStatus.Confirmed;
+ }
+
+ ship(trackingNumber: TrackingNumber): void {
+ if (this._status !== OrderStatus.Confirmed) {
+ throw new InvalidOrderStateError(this._id, this._status, 'ship');
+ }
+ this._status = OrderStatus.Shipped;
+ // Domain event raised
+ }
+
+ addItem(item: OrderItem): void {
+ if (this._status !== OrderStatus.Pending) {
+ throw new OrderModificationError(this._id);
+ }
+ this._items.push(item);
+ }
+
+ // Identity-based equality
+ equals(other: Order): boolean {
+ return this._id.equals(other._id);
+ }
+}
+
+// Strongly-typed identity
+class OrderId {
+ constructor(private readonly value: string) {
+ if (!value || value.trim() === '') {
+ throw new InvalidOrderIdError();
+ }
+ }
+
+ equals(other: OrderId): boolean {
+ return this.value === other.value;
+ }
+
+ toString(): string {
+ return this.value;
+ }
+}
+```
+
+### Entity vs Data Structure
+
+```typescript
+// Bad: Anemic entity (data structure)
+class Order {
+ id: string;
+ status: string;
+ items: Item[];
+
+ // Only getters/setters, no behavior
+}
+
+// Good: Rich entity with behavior
+class Order {
+ private _id: OrderId;
+ private _status: OrderStatus;
+ private _items: OrderItem[];
+
+ confirm(): void { /* enforces rules */ }
+ cancel(reason: CancellationReason): void { /* enforces rules */ }
+ addItem(item: OrderItem): void { /* enforces rules */ }
+}
+```
+
+## Value Object
+
+### Definition
+
+An object defined entirely by its attributes. Two value objects with the same attributes are interchangeable. Has no identity.
+
+### Characteristics
+
+- Immutable - once created, never changes
+- Equality based on attributes, not identity
+- Self-validating - always in a valid state
+- Side-effect free - methods return new instances
+- Conceptually whole - attributes form a complete concept
+
+### When to Use
+
+- The concept has no lifecycle or identity
+- "Are these the same?" means "do they have the same values?"
+- Measurement, description, or quantification
+- Combinations of attributes that belong together
+
+### Implementation
+
+```typescript
+// Value Object: Money
+class Money {
+ private constructor(
+ private readonly amount: number,
+ private readonly currency: Currency
+ ) {}
+
+ // Factory method with validation
+ static of(amount: number, currency: Currency): Money {
+ if (amount < 0) {
+ throw new NegativeMoneyError(amount);
+ }
+ return new Money(amount, currency);
+ }
+
+ // Immutable operations - return new instances
+ add(other: Money): Money {
+ this.ensureSameCurrency(other);
+ return Money.of(this.amount + other.amount, this.currency);
+ }
+
+ subtract(other: Money): Money {
+ this.ensureSameCurrency(other);
+ return Money.of(this.amount - other.amount, this.currency);
+ }
+
+ multiply(factor: number): Money {
+ return Money.of(this.amount * factor, this.currency);
+ }
+
+ // Value-based equality
+ equals(other: Money): boolean {
+ return this.amount === other.amount &&
+ this.currency.equals(other.currency);
+ }
+
+ private ensureSameCurrency(other: Money): void {
+ if (!this.currency.equals(other.currency)) {
+ throw new CurrencyMismatchError(this.currency, other.currency);
+ }
+ }
+}
+
+// Value Object: Address
+class Address {
+ private constructor(
+ readonly street: string,
+ readonly city: string,
+ readonly postalCode: string,
+ readonly country: Country
+ ) {}
+
+ static create(street: string, city: string, postalCode: string, country: Country): Address {
+ if (!street || !city || !postalCode) {
+ throw new InvalidAddressError();
+ }
+ if (!country.validatePostalCode(postalCode)) {
+ throw new InvalidPostalCodeError(postalCode, country);
+ }
+ return new Address(street, city, postalCode, country);
+ }
+
+ // Returns new instance with modified value
+ withStreet(newStreet: string): Address {
+ return Address.create(newStreet, this.city, this.postalCode, this.country);
+ }
+
+ equals(other: Address): boolean {
+ return this.street === other.street &&
+ this.city === other.city &&
+ this.postalCode === other.postalCode &&
+ this.country.equals(other.country);
+ }
+}
+
+// Value Object: DateRange
+class DateRange {
+ private constructor(
+ readonly start: Date,
+ readonly end: Date
+ ) {}
+
+ static create(start: Date, end: Date): DateRange {
+ if (end < start) {
+ throw new InvalidDateRangeError(start, end);
+ }
+ return new DateRange(start, end);
+ }
+
+ contains(date: Date): boolean {
+ return date >= this.start && date <= this.end;
+ }
+
+ overlaps(other: DateRange): boolean {
+ return this.start <= other.end && this.end >= other.start;
+ }
+
+ durationInDays(): number {
+ return Math.floor((this.end.getTime() - this.start.getTime()) / (1000 * 60 * 60 * 24));
+ }
+}
+```
+
+### Common Value Objects
+
+| Domain | Value Objects |
+|--------|--------------|
+| **E-commerce** | Money, Price, Quantity, SKU, Address, PhoneNumber |
+| **Healthcare** | BloodPressure, Dosage, DateRange, PatientId |
+| **Finance** | AccountNumber, IBAN, TaxId, Percentage |
+| **Shipping** | Weight, Dimensions, TrackingNumber, PostalCode |
+| **General** | Email, URL, PhoneNumber, Name, Coordinates |
+
+## Aggregate
+
+### Definition
+
+A cluster of entities and value objects with defined boundaries. Has an aggregate root entity that serves as the single entry point. External objects can only reference the root.
+
+### Characteristics
+
+- Defines a transactional consistency boundary
+- Aggregate root is the only externally accessible object
+- Enforces invariants across the cluster
+- Loaded and saved as a unit
+- Other aggregates referenced by identity only
+
+### Design Rules
+
+1. **Protect invariants** - All rules that must be consistent are inside the boundary
+2. **Small aggregates** - Prefer single-entity aggregates; add children only when invariants require
+3. **Reference by identity** - Never hold direct references to other aggregates
+4. **Update one per transaction** - Eventual consistency between aggregates
+5. **Design around invariants** - Identify what must be immediately consistent
+
+### Implementation
+
+```typescript
+// Aggregate: Order (root) with OrderItems (child entities)
+class Order {
+ private readonly _id: OrderId;
+ private _items: Map;
+ private _status: OrderStatus;
+
+ // Invariant: Order total cannot exceed credit limit
+ private _creditLimit: Money;
+
+ private constructor(
+ id: OrderId,
+ creditLimit: Money
+ ) {
+ this._id = id;
+ this._items = new Map();
+ this._status = OrderStatus.Draft;
+ this._creditLimit = creditLimit;
+ }
+
+ static create(id: OrderId, creditLimit: Money): Order {
+ return new Order(id, creditLimit);
+ }
+
+ // All modifications go through aggregate root
+ addItem(productId: ProductId, quantity: Quantity, unitPrice: Money): void {
+ this.ensureCanModify();
+
+ const newItem = OrderItem.create(productId, quantity, unitPrice);
+ const projectedTotal = this.calculateTotalWith(newItem);
+
+ // Invariant enforcement
+ if (projectedTotal.isGreaterThan(this._creditLimit)) {
+ throw new CreditLimitExceededError(projectedTotal, this._creditLimit);
+ }
+
+ this._items.set(productId, newItem);
+ }
+
+ removeItem(productId: ProductId): void {
+ this.ensureCanModify();
+ this._items.delete(productId);
+ }
+
+ updateItemQuantity(productId: ProductId, newQuantity: Quantity): void {
+ this.ensureCanModify();
+
+ const item = this._items.get(productId);
+ if (!item) {
+ throw new ItemNotFoundError(productId);
+ }
+
+ const updatedItem = item.withQuantity(newQuantity);
+ const projectedTotal = this.calculateTotalWithUpdate(productId, updatedItem);
+
+ if (projectedTotal.isGreaterThan(this._creditLimit)) {
+ throw new CreditLimitExceededError(projectedTotal, this._creditLimit);
+ }
+
+ this._items.set(productId, updatedItem);
+ }
+
+ submit(): OrderSubmitted {
+ if (this._items.size === 0) {
+ throw new EmptyOrderError();
+ }
+ this._status = OrderStatus.Submitted;
+
+ return new OrderSubmitted(this._id, this.total(), new Date());
+ }
+
+ // Read-only access to child entities
+ get items(): ReadonlyArray {
+ return Array.from(this._items.values());
+ }
+
+ total(): Money {
+ return this.items.reduce(
+ (sum, item) => sum.add(item.subtotal()),
+ Money.zero(Currency.USD)
+ );
+ }
+
+ private ensureCanModify(): void {
+ if (this._status !== OrderStatus.Draft) {
+ throw new OrderNotModifiableError(this._id, this._status);
+ }
+ }
+
+ private calculateTotalWith(newItem: OrderItem): Money {
+ return this.total().add(newItem.subtotal());
+ }
+
+ private calculateTotalWithUpdate(productId: ProductId, updatedItem: OrderItem): Money {
+ const currentItem = this._items.get(productId)!;
+ return this.total().subtract(currentItem.subtotal()).add(updatedItem.subtotal());
+ }
+}
+
+// Child entity - only accessible through aggregate root
+class OrderItem {
+ private constructor(
+ private readonly _productId: ProductId,
+ private _quantity: Quantity,
+ private readonly _unitPrice: Money
+ ) {}
+
+ static create(productId: ProductId, quantity: Quantity, unitPrice: Money): OrderItem {
+ return new OrderItem(productId, quantity, unitPrice);
+ }
+
+ get productId(): ProductId { return this._productId; }
+ get quantity(): Quantity { return this._quantity; }
+ get unitPrice(): Money { return this._unitPrice; }
+
+ subtotal(): Money {
+ return this._unitPrice.multiply(this._quantity.value);
+ }
+
+ withQuantity(newQuantity: Quantity): OrderItem {
+ return new OrderItem(this._productId, newQuantity, this._unitPrice);
+ }
+}
+```
+
+### Aggregate Reference Patterns
+
+```typescript
+// Bad: Direct object reference across aggregates
+class Order {
+ private customer: Customer; // Holds the entire aggregate!
+}
+
+// Good: Reference by identity
+class Order {
+ private customerId: CustomerId;
+
+ // If customer data needed, load separately
+ getCustomerAddress(customerRepository: CustomerRepository): Address {
+ const customer = customerRepository.findById(this.customerId);
+ return customer.shippingAddress;
+ }
+}
+```
+
+## Domain Event
+
+### Definition
+
+A record of something significant that happened in the domain. Captures state changes that domain experts care about.
+
+### Characteristics
+
+- Named in past tense (OrderPlaced, PaymentReceived)
+- Immutable - records historical fact
+- Contains all relevant data about what happened
+- Published after state change is committed
+- May trigger reactions in same or different bounded contexts
+
+### When to Use
+
+- Domain experts talk about "when X happens, Y should happen"
+- Need to communicate changes across aggregate boundaries
+- Maintaining an audit trail
+- Implementing eventual consistency
+- Integration with other bounded contexts
+
+### Implementation
+
+```typescript
+// Base domain event
+abstract class DomainEvent {
+ readonly occurredAt: Date;
+ readonly eventId: string;
+
+ constructor() {
+ this.occurredAt = new Date();
+ this.eventId = generateUUID();
+ }
+
+ abstract get eventType(): string;
+}
+
+// Specific domain events
+class OrderPlaced extends DomainEvent {
+ constructor(
+ readonly orderId: OrderId,
+ readonly customerId: CustomerId,
+ readonly totalAmount: Money,
+ readonly items: ReadonlyArray
+ ) {
+ super();
+ }
+
+ get eventType(): string {
+ return 'order.placed';
+ }
+}
+
+class OrderShipped extends DomainEvent {
+ constructor(
+ readonly orderId: OrderId,
+ readonly trackingNumber: TrackingNumber,
+ readonly carrier: string,
+ readonly estimatedDelivery: Date
+ ) {
+ super();
+ }
+
+ get eventType(): string {
+ return 'order.shipped';
+ }
+}
+
+class PaymentReceived extends DomainEvent {
+ constructor(
+ readonly orderId: OrderId,
+ readonly amount: Money,
+ readonly paymentMethod: PaymentMethod,
+ readonly transactionId: string
+ ) {
+ super();
+ }
+
+ get eventType(): string {
+ return 'payment.received';
+ }
+}
+
+// Entity raising events
+class Order {
+ private _domainEvents: DomainEvent[] = [];
+
+ submit(): void {
+ // State change
+ this._status = OrderStatus.Submitted;
+
+ // Raise event
+ this._domainEvents.push(
+ new OrderPlaced(
+ this._id,
+ this._customerId,
+ this.total(),
+ this.itemSnapshots()
+ )
+ );
+ }
+
+ pullDomainEvents(): DomainEvent[] {
+ const events = [...this._domainEvents];
+ this._domainEvents = [];
+ return events;
+ }
+}
+
+// Event handler
+class OrderPlacedHandler {
+ constructor(
+ private inventoryService: InventoryService,
+ private emailService: EmailService
+ ) {}
+
+ async handle(event: OrderPlaced): Promise {
+ // Reserve inventory (different aggregate)
+ await this.inventoryService.reserveItems(event.items);
+
+ // Send confirmation email
+ await this.emailService.sendOrderConfirmation(
+ event.customerId,
+ event.orderId,
+ event.totalAmount
+ );
+ }
+}
+```
+
+### Event Publishing Patterns
+
+```typescript
+// Pattern 1: Collect and dispatch after save
+class OrderApplicationService {
+ async placeOrder(command: PlaceOrderCommand): Promise {
+ const order = Order.create(command);
+
+ await this.orderRepository.save(order);
+
+ // Dispatch events after successful save
+ const events = order.pullDomainEvents();
+ await this.eventDispatcher.dispatchAll(events);
+
+ return order.id;
+ }
+}
+
+// Pattern 2: Outbox pattern (reliable publishing)
+class OrderApplicationService {
+ async placeOrder(command: PlaceOrderCommand): Promise {
+ await this.unitOfWork.transaction(async () => {
+ const order = Order.create(command);
+ await this.orderRepository.save(order);
+
+ // Save events to outbox in same transaction
+ const events = order.pullDomainEvents();
+ await this.outbox.saveEvents(events);
+ });
+
+ // Separate process publishes from outbox
+ return order.id;
+ }
+}
+```
+
+## Repository
+
+### Definition
+
+Mediates between the domain and data mapping layers. Provides collection-like interface for accessing aggregates.
+
+### Characteristics
+
+- One repository per aggregate root
+- Interface defined in domain layer, implementation in infrastructure
+- Returns fully reconstituted aggregates
+- Abstracts persistence concerns from domain
+
+### Interface Design
+
+```typescript
+// Domain layer interface
+interface OrderRepository {
+ findById(id: OrderId): Promise;
+ save(order: Order): Promise;
+ delete(order: Order): Promise;
+
+ // Domain-specific queries
+ findPendingOrdersFor(customerId: CustomerId): Promise;
+ findOrdersToShipBefore(deadline: Date): Promise;
+}
+
+// Infrastructure implementation
+class PostgresOrderRepository implements OrderRepository {
+ constructor(private db: Database) {}
+
+ async findById(id: OrderId): Promise {
+ const row = await this.db.query(
+ 'SELECT * FROM orders WHERE id = $1',
+ [id.toString()]
+ );
+
+ if (!row) return null;
+
+ const items = await this.db.query(
+ 'SELECT * FROM order_items WHERE order_id = $1',
+ [id.toString()]
+ );
+
+ return this.reconstitute(row, items);
+ }
+
+ async save(order: Order): Promise {
+ await this.db.transaction(async (tx) => {
+ await tx.query(
+ 'INSERT INTO orders (id, status, customer_id) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET status = $2',
+ [order.id.toString(), order.status, order.customerId.toString()]
+ );
+
+ // Save items
+ for (const item of order.items) {
+ await tx.query(
+ 'INSERT INTO order_items (order_id, product_id, quantity, unit_price) VALUES ($1, $2, $3, $4) ON CONFLICT DO UPDATE...',
+ [order.id.toString(), item.productId.toString(), item.quantity.value, item.unitPrice.amount]
+ );
+ }
+ });
+ }
+
+ private reconstitute(orderRow: any, itemRows: any[]): Order {
+ // Rebuild aggregate from persistence data
+ return Order.reconstitute({
+ id: OrderId.from(orderRow.id),
+ status: OrderStatus[orderRow.status],
+ customerId: CustomerId.from(orderRow.customer_id),
+ items: itemRows.map(row => OrderItem.reconstitute({
+ productId: ProductId.from(row.product_id),
+ quantity: Quantity.of(row.quantity),
+ unitPrice: Money.of(row.unit_price, Currency.USD)
+ }))
+ });
+ }
+}
+```
+
+### Repository vs DAO
+
+```typescript
+// DAO: Data-centric, returns raw data
+interface OrderDao {
+ findById(id: string): Promise;
+ findItems(orderId: string): Promise;
+ insert(row: OrderRow): Promise;
+}
+
+// Repository: Domain-centric, returns aggregates
+interface OrderRepository {
+ findById(id: OrderId): Promise;
+ save(order: Order): Promise;
+}
+```
+
+## Domain Service
+
+### Definition
+
+Stateless operations that represent domain concepts but don't naturally belong to any entity or value object.
+
+### When to Use
+
+- The operation involves multiple aggregates
+- The operation represents a domain concept
+- Putting the operation on an entity would create awkward dependencies
+- The operation is stateless
+
+### Examples
+
+```typescript
+// Domain Service: Transfer money between accounts
+class MoneyTransferService {
+ transfer(
+ from: Account,
+ to: Account,
+ amount: Money
+ ): TransferResult {
+ // Involves two aggregates
+ // Neither account should "own" this operation
+
+ if (!from.canWithdraw(amount)) {
+ return TransferResult.insufficientFunds();
+ }
+
+ from.withdraw(amount);
+ to.deposit(amount);
+
+ return TransferResult.success(
+ new MoneyTransferred(from.id, to.id, amount)
+ );
+ }
+}
+
+// Domain Service: Calculate shipping cost
+class ShippingCostCalculator {
+ constructor(
+ private rateProvider: ShippingRateProvider
+ ) {}
+
+ calculate(
+ items: OrderItem[],
+ destination: Address,
+ shippingMethod: ShippingMethod
+ ): Money {
+ const totalWeight = items.reduce(
+ (sum, item) => sum.add(item.weight),
+ Weight.zero()
+ );
+
+ const rate = this.rateProvider.getRate(
+ destination.country,
+ shippingMethod
+ );
+
+ return rate.calculateFor(totalWeight);
+ }
+}
+
+// Domain Service: Check inventory availability
+class InventoryAvailabilityService {
+ constructor(
+ private inventoryRepository: InventoryRepository
+ ) {}
+
+ checkAvailability(
+ items: Array<{ productId: ProductId; quantity: Quantity }>
+ ): AvailabilityResult {
+ const unavailable: ProductId[] = [];
+
+ for (const { productId, quantity } of items) {
+ const inventory = this.inventoryRepository.findByProductId(productId);
+ if (!inventory || !inventory.hasAvailable(quantity)) {
+ unavailable.push(productId);
+ }
+ }
+
+ return unavailable.length === 0
+ ? AvailabilityResult.allAvailable()
+ : AvailabilityResult.someUnavailable(unavailable);
+ }
+}
+```
+
+### Domain Service vs Application Service
+
+```typescript
+// Domain Service: Domain logic, domain types, stateless
+class PricingService {
+ calculateDiscountedPrice(product: Product, customer: Customer): Money {
+ const basePrice = product.price;
+ const discount = customer.membershipLevel.discountPercentage;
+ return basePrice.applyDiscount(discount);
+ }
+}
+
+// Application Service: Orchestration, use cases, transaction boundary
+class OrderApplicationService {
+ constructor(
+ private orderRepository: OrderRepository,
+ private pricingService: PricingService,
+ private eventPublisher: EventPublisher
+ ) {}
+
+ async createOrder(command: CreateOrderCommand): Promise {
+ const customer = await this.customerRepository.findById(command.customerId);
+ const order = Order.create(command.orderId, customer.id);
+
+ for (const item of command.items) {
+ const product = await this.productRepository.findById(item.productId);
+ const price = this.pricingService.calculateDiscountedPrice(product, customer);
+ order.addItem(item.productId, item.quantity, price);
+ }
+
+ await this.orderRepository.save(order);
+ await this.eventPublisher.publish(order.pullDomainEvents());
+
+ return order.id;
+ }
+}
+```
+
+## Factory
+
+### Definition
+
+Encapsulates complex object or aggregate creation logic. Creates objects in a valid state.
+
+### When to Use
+
+- Construction logic is complex
+- Multiple ways to create the same type of object
+- Creation involves other objects or services
+- Need to enforce invariants at creation time
+
+### Implementation
+
+```typescript
+// Factory as static method
+class Order {
+ static create(customerId: CustomerId, creditLimit: Money): Order {
+ return new Order(
+ OrderId.generate(),
+ customerId,
+ creditLimit,
+ OrderStatus.Draft,
+ []
+ );
+ }
+
+ static reconstitute(data: OrderData): Order {
+ // For rebuilding from persistence
+ return new Order(
+ data.id,
+ data.customerId,
+ data.creditLimit,
+ data.status,
+ data.items
+ );
+ }
+}
+
+// Factory as separate class
+class OrderFactory {
+ constructor(
+ private creditLimitService: CreditLimitService,
+ private idGenerator: IdGenerator
+ ) {}
+
+ async createForCustomer(customerId: CustomerId): Promise {
+ const creditLimit = await this.creditLimitService.getLimit(customerId);
+ const orderId = this.idGenerator.generate();
+
+ return Order.create(orderId, customerId, creditLimit);
+ }
+
+ createFromQuote(quote: Quote): Order {
+ const order = Order.create(
+ this.idGenerator.generate(),
+ quote.customerId,
+ quote.creditLimit
+ );
+
+ for (const item of quote.items) {
+ order.addItem(item.productId, item.quantity, item.agreedPrice);
+ }
+
+ return order;
+ }
+}
+
+// Builder pattern for complex construction
+class OrderBuilder {
+ private customerId?: CustomerId;
+ private items: OrderItemData[] = [];
+ private shippingAddress?: Address;
+ private billingAddress?: Address;
+
+ forCustomer(customerId: CustomerId): this {
+ this.customerId = customerId;
+ return this;
+ }
+
+ withItem(productId: ProductId, quantity: Quantity, price: Money): this {
+ this.items.push({ productId, quantity, price });
+ return this;
+ }
+
+ shippingTo(address: Address): this {
+ this.shippingAddress = address;
+ return this;
+ }
+
+ billingTo(address: Address): this {
+ this.billingAddress = address;
+ return this;
+ }
+
+ build(): Order {
+ if (!this.customerId) throw new Error('Customer required');
+ if (!this.shippingAddress) throw new Error('Shipping address required');
+ if (this.items.length === 0) throw new Error('At least one item required');
+
+ const order = Order.create(this.customerId);
+ order.setShippingAddress(this.shippingAddress);
+ order.setBillingAddress(this.billingAddress ?? this.shippingAddress);
+
+ for (const item of this.items) {
+ order.addItem(item.productId, item.quantity, item.price);
+ }
+
+ return order;
+ }
+}
+```
diff --git a/.claude/skills/elliptic-curves/SKILL.md b/.claude/skills/elliptic-curves/SKILL.md
new file mode 100644
index 00000000..82bab00f
--- /dev/null
+++ b/.claude/skills/elliptic-curves/SKILL.md
@@ -0,0 +1,369 @@
+---
+name: elliptic-curves
+description: This skill should be used when working with elliptic curve cryptography, implementing or debugging secp256k1 operations, understanding modular arithmetic and finite fields, or implementing signature schemes like ECDSA and Schnorr. Provides comprehensive knowledge of group theory foundations, curve mathematics, point multiplication algorithms, and cryptographic optimizations.
+---
+
+# Elliptic Curve Cryptography
+
+This skill provides deep knowledge of elliptic curve cryptography (ECC), with particular focus on the secp256k1 curve used in Bitcoin and Nostr, including the mathematical foundations and implementation considerations.
+
+## When to Use This Skill
+
+- Implementing or debugging elliptic curve operations
+- Working with secp256k1, ECDSA, or Schnorr signatures
+- Understanding modular arithmetic and finite field operations
+- Optimizing cryptographic code for performance
+- Analyzing security properties of curve-based cryptography
+
+## Mathematical Foundations
+
+### Groups in Cryptography
+
+A **group** is a set G with a binary operation (often denoted · or +) satisfying:
+
+1. **Closure**: For all a, b ∈ G, the result a · b is also in G
+2. **Associativity**: (a · b) · c = a · (b · c)
+3. **Identity**: There exists e ∈ G such that e · a = a · e = a
+4. **Inverse**: For each a ∈ G, there exists a⁻¹ such that a · a⁻¹ = e
+
+A **cyclic group** is generated by repeatedly applying the operation to a single element (the generator). The **order** of a group is the number of elements.
+
+**Why groups matter in cryptography**: The discrete logarithm problem—given g and gⁿ, find n—is computationally hard in certain groups, forming the security basis for ECC.
+
+### Modular Arithmetic
+
+Modular arithmetic constrains calculations to a finite range [0, p-1] for some modulus p:
+
+```
+a ≡ b (mod p) means p divides (a - b)
+
+Operations:
+- Addition: (a + b) mod p
+- Subtraction: (a - b + p) mod p
+- Multiplication: (a × b) mod p
+- Inverse: a⁻¹ where (a × a⁻¹) ≡ 1 (mod p)
+```
+
+**Computing modular inverse**:
+- **Fermat's Little Theorem**: If p is prime, a⁻¹ ≡ a^(p-2) (mod p)
+- **Extended Euclidean Algorithm**: More efficient for general cases
+- **SafeGCD Algorithm**: Constant-time, used in libsecp256k1
+
+### Finite Fields (Galois Fields)
+
+A **finite field** GF(p) or 𝔽ₚ is a field with a finite number of elements where:
+- p must be prime (or a prime power for extension fields)
+- All arithmetic operations are defined and produce elements within the field
+- Every non-zero element has a multiplicative inverse
+
+For cryptographic curves like secp256k1, the field is 𝔽ₚ where p is a 256-bit prime.
+
+**Key property**: The non-zero elements of a finite field form a cyclic group under multiplication.
+
+## Elliptic Curves
+
+### The Curve Equation
+
+An elliptic curve over a finite field 𝔽ₚ is defined by the Weierstrass equation:
+
+```
+y² = x³ + ax + b (mod p)
+```
+
+The curve must satisfy the non-singularity condition: 4a³ + 27b² ≠ 0
+
+### Points on the Curve
+
+A point P = (x, y) is on the curve if it satisfies the equation. The set of all points, plus a special "point at infinity" O (the identity element), forms an abelian group.
+
+### Point Operations
+
+**Point Addition (P + Q where P ≠ Q)**:
+```
+λ = (y₂ - y₁) / (x₂ - x₁) (mod p)
+x₃ = λ² - x₁ - x₂ (mod p)
+y₃ = λ(x₁ - x₃) - y₁ (mod p)
+```
+
+**Point Doubling (P + P = 2P)**:
+```
+λ = (3x₁² + a) / (2y₁) (mod p)
+x₃ = λ² - 2x₁ (mod p)
+y₃ = λ(x₁ - x₃) - y₁ (mod p)
+```
+
+**Point at Infinity**: Acts as the identity element; P + O = P for all P.
+
+**Point Negation**: -P = (x, -y) = (x, p - y)
+
+## The secp256k1 Curve
+
+### Parameters
+
+secp256k1 is defined by SECG (Standards for Efficient Cryptography Group):
+
+```
+Curve equation: y² = x³ + 7 (a = 0, b = 7)
+
+Prime modulus p:
+ 0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE FFFFFC2F
+ = 2²⁵⁶ - 2³² - 977
+
+Group order n:
+ 0xFFFFFFFF FFFFFFFF FFFFFFFF FFFFFFFE BAAEDCE6 AF48A03B BFD25E8C D0364141
+
+Generator point G:
+ Gx = 0x79BE667E F9DCBBAC 55A06295 CE870B07 029BFCDB 2DCE28D9 59F2815B 16F81798
+ Gy = 0x483ADA77 26A3C465 5DA4FBFC 0E1108A8 FD17B448 A6855419 9C47D08F FB10D4B8
+
+Cofactor h = 1
+```
+
+### Why secp256k1?
+
+1. **Koblitz curve**: a = 0 enables faster computation (no ax term)
+2. **Special prime**: p = 2²⁵⁶ - 2³² - 977 allows efficient modular reduction
+3. **Deterministic construction**: Not randomly generated, reducing backdoor concerns
+4. **~30% faster** than random curves when fully optimized
+
+### Efficient Modular Reduction
+
+The special form of p enables fast reduction without general division:
+
+```
+For p = 2²⁵⁶ - 2³² - 977:
+To reduce a 512-bit number c = c_high × 2²⁵⁶ + c_low:
+ c ≡ c_low + c_high × 2³² + c_high × 977 (mod p)
+```
+
+## Point Multiplication Algorithms
+
+Scalar multiplication kP (computing P + P + ... + P, k times) is the core operation.
+
+### Double-and-Add (Binary Method)
+
+```
+Input: k (scalar), P (point)
+Output: kP
+
+R = O (point at infinity)
+for i from bit_length(k)-1 down to 0:
+ R = 2R # Point doubling
+ if bit i of k is 1:
+ R = R + P # Point addition
+return R
+```
+
+**Complexity**: O(log k) point operations
+**Vulnerability**: Timing side-channels (different branches for 0/1 bits)
+
+### Montgomery Ladder
+
+Constant-time algorithm that performs the same operations regardless of bit values:
+
+```
+Input: k (scalar), P (point)
+Output: kP
+
+R0 = O
+R1 = P
+for i from bit_length(k)-1 down to 0:
+ if bit i of k is 0:
+ R1 = R0 + R1
+ R0 = 2R0
+ else:
+ R0 = R0 + R1
+ R1 = 2R1
+return R0
+```
+
+**Advantage**: Resistant to simple power analysis and timing attacks.
+
+### Window Methods (w-NAF)
+
+Precompute small multiples of P, then process w bits at a time:
+
+```
+w-NAF representation reduces additions by ~1/3 compared to binary
+Precomputation table: [P, 3P, 5P, 7P, ...] for w=4
+```
+
+### Endomorphism Optimization (GLV Method)
+
+secp256k1 has an efficiently computable endomorphism φ where:
+```
+φ(x, y) = (βx, y) where β³ ≡ 1 (mod p)
+φ(P) = λP where λ³ ≡ 1 (mod n)
+```
+
+This allows splitting scalar k into k₁ + k₂λ with smaller k₁, k₂, reducing operations by ~33-50%.
+
+### Multi-Scalar Multiplication (Strauss-Shamir)
+
+For computing k₁P₁ + k₂P₂ (common in signature verification):
+
+```
+Process both scalars simultaneously, combining operations
+Reduces work compared to separate multiplications
+```
+
+## Coordinate Systems
+
+### Affine Coordinates
+
+Standard (x, y) representation. Requires modular inversion for each operation.
+
+### Projective Coordinates
+
+Represent (X:Y:Z) where x = X/Z, y = Y/Z:
+- Avoids inversions during intermediate computations
+- Only one inversion at the end to convert back to affine
+
+### Jacobian Coordinates
+
+Represent (X:Y:Z) where x = X/Z², y = Y/Z³:
+- Fastest for point doubling
+- Used extensively in libsecp256k1
+
+### López-Dahab Coordinates
+
+For curves over GF(2ⁿ), optimized for binary field arithmetic.
+
+## Signature Schemes
+
+### ECDSA (Elliptic Curve Digital Signature Algorithm)
+
+**Key Generation**:
+```
+Private key: d (random integer in [1, n-1])
+Public key: Q = dG
+```
+
+**Signing message m**:
+```
+1. Hash: e = H(m) truncated to curve order bit length
+2. Random: k ∈ [1, n-1]
+3. Compute: (x, y) = kG
+4. Calculate: r = x mod n (if r = 0, restart with new k)
+5. Calculate: s = k⁻¹(e + rd) mod n (if s = 0, restart)
+6. Signature: (r, s)
+```
+
+**Verification of signature (r, s) on message m**:
+```
+1. Check: r, s ∈ [1, n-1]
+2. Hash: e = H(m)
+3. Compute: w = s⁻¹ mod n
+4. Compute: u₁ = ew mod n, u₂ = rw mod n
+5. Compute: (x, y) = u₁G + u₂Q
+6. Valid if: r ≡ x (mod n)
+```
+
+**Security considerations**:
+- k MUST be unique per signature (reuse leaks private key)
+- Use RFC 6979 for deterministic k derivation
+
+### Schnorr Signatures (BIP-340)
+
+Simpler, more efficient, with provable security.
+
+**Signing message m**:
+```
+1. Random: k ∈ [1, n-1]
+2. Compute: R = kG
+3. Challenge: e = H(R || Q || m)
+4. Response: s = k + ed mod n
+5. Signature: (R, s) or (r_x, s) where r_x is x-coordinate of R
+```
+
+**Verification**:
+```
+1. Compute: e = H(R || Q || m)
+2. Check: sG = R + eQ
+```
+
+**Advantages over ECDSA**:
+- Linear: enables signature aggregation (MuSig)
+- Simpler verification (no modular inverse)
+- Batch verification support
+- Provably secure in Random Oracle Model
+
+## Implementation Considerations
+
+### Constant-Time Operations
+
+To prevent timing attacks:
+- Avoid branches dependent on secret data
+- Use constant-time comparison functions
+- Mask operations to hide data-dependent timing
+
+```go
+// BAD: Timing leak
+if secretBit == 1 {
+ doOperation()
+}
+
+// GOOD: Constant-time conditional
+result = conditionalSelect(secretBit, value1, value0)
+```
+
+### Memory Safety
+
+- Zeroize sensitive data after use
+- Avoid leaving secrets in registers or cache
+- Use secure memory allocation when available
+
+### Side-Channel Protections
+
+- **Timing attacks**: Use constant-time algorithms
+- **Power analysis**: Montgomery ladder, point blinding
+- **Cache attacks**: Avoid table lookups indexed by secrets
+
+### Random Number Generation
+
+- Use cryptographically secure RNG for k in ECDSA
+- Consider deterministic k (RFC 6979) for reproducibility
+- Validate output is in valid range [1, n-1]
+
+## libsecp256k1 Optimizations
+
+The Bitcoin Core library includes:
+
+1. **Field arithmetic**: 5×52-bit limbs for 64-bit platforms
+2. **Scalar arithmetic**: 4×64-bit representation
+3. **Endomorphism**: GLV decomposition enabled by default
+4. **Batch inversion**: Amortizes expensive inversions
+5. **SafeGCD**: Constant-time modular inverse
+6. **Precomputed tables**: For generator point multiplications
+
+## Security Properties
+
+### Discrete Logarithm Problem (DLP)
+
+Given P and Q = kP, finding k is computationally infeasible.
+
+**Best known attacks**:
+- Generic: Baby-step Giant-step, Pollard's rho: O(√n) operations
+- For secp256k1: ~2¹²⁸ operations (128-bit security)
+
+### Curve Security Criteria
+
+- Large prime order subgroup
+- Cofactor 1 (no small subgroup attacks)
+- Resistant to MOV attack (embedding degree)
+- Not anomalous (n ≠ p)
+
+## Common Pitfalls
+
+1. **k reuse in ECDSA**: Immediately leaks private key
+2. **Weak random k**: Partially leaks key over multiple signatures
+3. **Invalid curve points**: Validate points are on curve
+4. **Small subgroup attacks**: Check point order (cofactor = 1 helps)
+5. **Timing leaks**: Non-constant-time scalar multiplication
+
+## References
+
+For detailed implementations, see:
+- `references/secp256k1-parameters.md` - Full curve parameters
+- `references/algorithms.md` - Detailed algorithm pseudocode
+- `references/security.md` - Security analysis and attack vectors
diff --git a/.claude/skills/elliptic-curves/references/algorithms.md b/.claude/skills/elliptic-curves/references/algorithms.md
new file mode 100644
index 00000000..63ec1dd2
--- /dev/null
+++ b/.claude/skills/elliptic-curves/references/algorithms.md
@@ -0,0 +1,513 @@
+# Elliptic Curve Algorithms
+
+Detailed pseudocode for core elliptic curve operations.
+
+## Field Arithmetic
+
+### Modular Addition
+
+```
+function mod_add(a, b, p):
+ result = a + b
+ if result >= p:
+ result = result - p
+ return result
+```
+
+### Modular Subtraction
+
+```
+function mod_sub(a, b, p):
+ if a >= b:
+ return a - b
+ else:
+ return p - b + a
+```
+
+### Modular Multiplication
+
+For general case:
+```
+function mod_mul(a, b, p):
+ return (a * b) mod p
+```
+
+For secp256k1 optimized (Barrett reduction):
+```
+function mod_mul_secp256k1(a, b):
+ # Compute full 512-bit product
+ product = a * b
+
+ # Split into high and low 256-bit parts
+ low = product & ((1 << 256) - 1)
+ high = product >> 256
+
+ # Reduce: result ≡ low + high * (2³² + 977) (mod p)
+ result = low + high * (1 << 32) + high * 977
+
+ # May need additional reduction
+ while result >= p:
+ result = result - p
+
+ return result
+```
+
+### Modular Inverse
+
+**Extended Euclidean Algorithm**:
+```
+function mod_inverse(a, p):
+ if a == 0:
+ error "No inverse exists for 0"
+
+ old_r, r = p, a
+ old_s, s = 0, 1
+
+ while r != 0:
+ quotient = old_r / r
+ old_r, r = r, old_r - quotient * r
+ old_s, s = s, old_s - quotient * s
+
+ if old_r != 1:
+ error "No inverse exists"
+
+ if old_s < 0:
+ old_s = old_s + p
+
+ return old_s
+```
+
+**Fermat's Little Theorem** (for prime p):
+```
+function mod_inverse_fermat(a, p):
+ return mod_exp(a, p - 2, p)
+```
+
+### Modular Exponentiation (Square-and-Multiply)
+
+```
+function mod_exp(base, exp, p):
+ result = 1
+ base = base mod p
+
+ while exp > 0:
+ if exp & 1: # exp is odd
+ result = (result * base) mod p
+ exp = exp >> 1
+ base = (base * base) mod p
+
+ return result
+```
+
+### Modular Square Root (Tonelli-Shanks)
+
+For secp256k1 where p ≡ 3 (mod 4):
+```
+function mod_sqrt(a, p):
+ # For p ≡ 3 (mod 4), sqrt(a) = a^((p+1)/4)
+ return mod_exp(a, (p + 1) / 4, p)
+```
+
+## Point Operations
+
+### Point Validation
+
+```
+function is_on_curve(P, a, b, p):
+ if P is infinity:
+ return true
+
+ x, y = P
+ left = (y * y) mod p
+ right = (x * x * x + a * x + b) mod p
+
+ return left == right
+```
+
+### Point Addition (Affine Coordinates)
+
+```
+function point_add(P, Q, a, p):
+ if P is infinity:
+ return Q
+ if Q is infinity:
+ return P
+
+ x1, y1 = P
+ x2, y2 = Q
+
+ if x1 == x2:
+ if y1 == mod_neg(y2, p): # P = -Q
+ return infinity
+ else: # P == Q
+ return point_double(P, a, p)
+
+ # λ = (y2 - y1) / (x2 - x1)
+ numerator = mod_sub(y2, y1, p)
+ denominator = mod_sub(x2, x1, p)
+ λ = mod_mul(numerator, mod_inverse(denominator, p), p)
+
+ # x3 = λ² - x1 - x2
+ x3 = mod_sub(mod_sub(mod_mul(λ, λ, p), x1, p), x2, p)
+
+ # y3 = λ(x1 - x3) - y1
+ y3 = mod_sub(mod_mul(λ, mod_sub(x1, x3, p), p), y1, p)
+
+ return (x3, y3)
+```
+
+### Point Doubling (Affine Coordinates)
+
+```
+function point_double(P, a, p):
+ if P is infinity:
+ return infinity
+
+ x, y = P
+
+ if y == 0:
+ return infinity
+
+ # λ = (3x² + a) / (2y)
+ numerator = mod_add(mod_mul(3, mod_mul(x, x, p), p), a, p)
+ denominator = mod_mul(2, y, p)
+ λ = mod_mul(numerator, mod_inverse(denominator, p), p)
+
+ # x3 = λ² - 2x
+ x3 = mod_sub(mod_mul(λ, λ, p), mod_mul(2, x, p), p)
+
+ # y3 = λ(x - x3) - y
+ y3 = mod_sub(mod_mul(λ, mod_sub(x, x3, p), p), y, p)
+
+ return (x3, y3)
+```
+
+### Point Negation
+
+```
+function point_negate(P, p):
+ if P is infinity:
+ return infinity
+
+ x, y = P
+ return (x, p - y)
+```
+
+## Scalar Multiplication
+
+### Double-and-Add (Left-to-Right)
+
+```
+function scalar_mult_double_add(k, P, a, p):
+ if k == 0 or P is infinity:
+ return infinity
+
+ if k < 0:
+ k = -k
+ P = point_negate(P, p)
+
+ R = infinity
+ bits = binary_representation(k) # MSB first
+
+ for bit in bits:
+ R = point_double(R, a, p)
+ if bit == 1:
+ R = point_add(R, P, a, p)
+
+ return R
+```
+
+### Montgomery Ladder (Constant-Time)
+
+```
+function scalar_mult_montgomery(k, P, a, p):
+ R0 = infinity
+ R1 = P
+
+ bits = binary_representation(k) # MSB first
+
+ for bit in bits:
+ if bit == 0:
+ R1 = point_add(R0, R1, a, p)
+ R0 = point_double(R0, a, p)
+ else:
+ R0 = point_add(R0, R1, a, p)
+ R1 = point_double(R1, a, p)
+
+ return R0
+```
+
+### w-NAF Scalar Multiplication
+
+```
+function compute_wNAF(k, w):
+ # Convert scalar to width-w Non-Adjacent Form
+ naf = []
+
+ while k > 0:
+ if k & 1: # k is odd
+ # Get w-bit window
+ digit = k mod (1 << w)
+ if digit >= (1 << (w-1)):
+ digit = digit - (1 << w)
+ naf.append(digit)
+ k = k - digit
+ else:
+ naf.append(0)
+ k = k >> 1
+
+ return naf
+
+function scalar_mult_wNAF(k, P, w, a, p):
+ # Precompute odd multiples: [P, 3P, 5P, ..., (2^(w-1)-1)P]
+ precomp = [P]
+ P2 = point_double(P, a, p)
+ for i in range(1, 1 << (w-1)):
+ precomp.append(point_add(precomp[-1], P2, a, p))
+
+ # Convert k to w-NAF
+ naf = compute_wNAF(k, w)
+
+ # Compute scalar multiplication
+ R = infinity
+ for i in range(len(naf) - 1, -1, -1):
+ R = point_double(R, a, p)
+ digit = naf[i]
+ if digit > 0:
+ R = point_add(R, precomp[(digit - 1) / 2], a, p)
+ elif digit < 0:
+ R = point_add(R, point_negate(precomp[(-digit - 1) / 2], p), a, p)
+
+ return R
+```
+
+### Shamir's Trick (Multi-Scalar)
+
+For computing k₁P + k₂Q efficiently:
+
+```
+function multi_scalar_mult(k1, P, k2, Q, a, p):
+ # Precompute P + Q
+ PQ = point_add(P, Q, a, p)
+
+ # Get binary representations (same length, padded)
+ bits1 = binary_representation(k1)
+ bits2 = binary_representation(k2)
+ max_len = max(len(bits1), len(bits2))
+ bits1 = pad_left(bits1, max_len)
+ bits2 = pad_left(bits2, max_len)
+
+ R = infinity
+
+ for i in range(max_len):
+ R = point_double(R, a, p)
+
+ b1, b2 = bits1[i], bits2[i]
+
+ if b1 == 1 and b2 == 1:
+ R = point_add(R, PQ, a, p)
+ elif b1 == 1:
+ R = point_add(R, P, a, p)
+ elif b2 == 1:
+ R = point_add(R, Q, a, p)
+
+ return R
+```
+
+## Jacobian Coordinates
+
+More efficient for repeated operations.
+
+### Conversion
+
+```
+# Affine to Jacobian
+function affine_to_jacobian(P):
+ if P is infinity:
+ return (1, 1, 0) # Jacobian infinity
+ x, y = P
+ return (x, y, 1)
+
+# Jacobian to Affine
+function jacobian_to_affine(P, p):
+ X, Y, Z = P
+ if Z == 0:
+ return infinity
+
+ Z_inv = mod_inverse(Z, p)
+ Z_inv2 = mod_mul(Z_inv, Z_inv, p)
+ Z_inv3 = mod_mul(Z_inv2, Z_inv, p)
+
+ x = mod_mul(X, Z_inv2, p)
+ y = mod_mul(Y, Z_inv3, p)
+
+ return (x, y)
+```
+
+### Point Doubling (Jacobian)
+
+For curve y² = x³ + 7 (a = 0):
+
+```
+function jacobian_double(P, p):
+ X, Y, Z = P
+
+ if Y == 0:
+ return (1, 1, 0) # infinity
+
+ # For a = 0: M = 3*X²
+ S = mod_mul(4, mod_mul(X, mod_mul(Y, Y, p), p), p)
+ M = mod_mul(3, mod_mul(X, X, p), p)
+
+ X3 = mod_sub(mod_mul(M, M, p), mod_mul(2, S, p), p)
+ Y3 = mod_sub(mod_mul(M, mod_sub(S, X3, p), p),
+ mod_mul(8, mod_mul(Y, Y, mod_mul(Y, Y, p), p), p), p)
+ Z3 = mod_mul(2, mod_mul(Y, Z, p), p)
+
+ return (X3, Y3, Z3)
+```
+
+### Point Addition (Jacobian + Affine)
+
+Mixed addition is faster when one point is in affine:
+
+```
+function jacobian_add_affine(P, Q, p):
+ # P in Jacobian (X1, Y1, Z1), Q in affine (x2, y2)
+ X1, Y1, Z1 = P
+ x2, y2 = Q
+
+ if Z1 == 0:
+ return affine_to_jacobian(Q)
+
+ Z1Z1 = mod_mul(Z1, Z1, p)
+ U2 = mod_mul(x2, Z1Z1, p)
+ S2 = mod_mul(y2, mod_mul(Z1, Z1Z1, p), p)
+
+ H = mod_sub(U2, X1, p)
+ HH = mod_mul(H, H, p)
+ I = mod_mul(4, HH, p)
+ J = mod_mul(H, I, p)
+ r = mod_mul(2, mod_sub(S2, Y1, p), p)
+ V = mod_mul(X1, I, p)
+
+ X3 = mod_sub(mod_sub(mod_mul(r, r, p), J, p), mod_mul(2, V, p), p)
+ Y3 = mod_sub(mod_mul(r, mod_sub(V, X3, p), p), mod_mul(2, mod_mul(Y1, J, p), p), p)
+ Z3 = mod_mul(mod_sub(mod_mul(mod_add(Z1, H, p), mod_add(Z1, H, p), p),
+ mod_add(Z1Z1, HH, p), p), 1, p)
+
+ return (X3, Y3, Z3)
+```
+
+## GLV Endomorphism (secp256k1)
+
+### Scalar Decomposition
+
+```
+# Constants for secp256k1
+LAMBDA = 0x5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72
+BETA = 0x7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE
+
+# Decomposition coefficients
+A1 = 0x3086D221A7D46BCDE86C90E49284EB15
+B1 = 0x114CA50F7A8E2F3F657C1108D9D44CFD8
+A2 = 0xE4437ED6010E88286F547FA90ABFE4C3
+B2 = A1
+
+function glv_decompose(k, n):
+ # Compute c1 = round(b2 * k / n)
+ # Compute c2 = round(-b1 * k / n)
+ c1 = (B2 * k + n // 2) // n
+ c2 = (-B1 * k + n // 2) // n
+
+ # k1 = k - c1*A1 - c2*A2
+ # k2 = -c1*B1 - c2*B2
+ k1 = k - c1 * A1 - c2 * A2
+ k2 = -c1 * B1 - c2 * B2
+
+ return (k1, k2)
+
+function glv_scalar_mult(k, P, p, n):
+ k1, k2 = glv_decompose(k, n)
+
+ # Compute endomorphism: φ(P) = (β*x, y)
+ x, y = P
+ phi_P = (mod_mul(BETA, x, p), y)
+
+ # Use Shamir's trick: k1*P + k2*φ(P)
+ return multi_scalar_mult(k1, P, k2, phi_P, 0, p)
+```
+
+## Batch Inversion
+
+Amortize expensive inversions over multiple points:
+
+```
+function batch_invert(values, p):
+ n = len(values)
+ if n == 0:
+ return []
+
+ # Compute cumulative products
+ products = [values[0]]
+ for i in range(1, n):
+ products.append(mod_mul(products[-1], values[i], p))
+
+ # Invert the final product
+ inv = mod_inverse(products[-1], p)
+
+ # Compute individual inverses
+ inverses = [0] * n
+ for i in range(n - 1, 0, -1):
+ inverses[i] = mod_mul(inv, products[i - 1], p)
+ inv = mod_mul(inv, values[i], p)
+ inverses[0] = inv
+
+ return inverses
+```
+
+## Key Generation
+
+```
+function generate_keypair(G, n, p):
+ # Generate random private key
+ d = random_integer(1, n - 1)
+
+ # Compute public key
+ Q = scalar_mult(d, G)
+
+ return (d, Q)
+```
+
+## Point Compression/Decompression
+
+```
+function compress_point(P, p):
+ if P is infinity:
+ return bytes([0x00])
+
+ x, y = P
+ prefix = 0x02 if (y % 2 == 0) else 0x03
+ return bytes([prefix]) + x.to_bytes(32, 'big')
+
+function decompress_point(compressed, a, b, p):
+ prefix = compressed[0]
+
+ if prefix == 0x00:
+ return infinity
+
+ x = int.from_bytes(compressed[1:], 'big')
+
+ # Compute y² = x³ + ax + b
+ y_squared = mod_add(mod_add(mod_mul(x, mod_mul(x, x, p), p),
+ mod_mul(a, x, p), p), b, p)
+
+ # Compute y = sqrt(y²)
+ y = mod_sqrt(y_squared, p)
+
+ # Select correct y based on prefix
+ if (prefix == 0x02) != (y % 2 == 0):
+ y = p - y
+
+ return (x, y)
+```
\ No newline at end of file
diff --git a/.claude/skills/elliptic-curves/references/secp256k1-parameters.md b/.claude/skills/elliptic-curves/references/secp256k1-parameters.md
new file mode 100644
index 00000000..a8ed0561
--- /dev/null
+++ b/.claude/skills/elliptic-curves/references/secp256k1-parameters.md
@@ -0,0 +1,194 @@
+# secp256k1 Complete Parameters
+
+## Curve Definition
+
+**Name**: secp256k1 (Standards for Efficient Cryptography, prime field, 256-bit, Koblitz curve #1)
+
+**Equation**: y² = x³ + 7 (mod p)
+
+This is the short Weierstrass form with coefficients a = 0, b = 7.
+
+## Field Parameters
+
+### Prime Modulus p
+
+```
+Decimal:
+115792089237316195423570985008687907853269984665640564039457584007908834671663
+
+Hexadecimal:
+0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F
+
+Binary representation:
+2²⁵⁶ - 2³² - 2⁹ - 2⁸ - 2⁷ - 2⁶ - 2⁴ - 1
+= 2²⁵⁶ - 2³² - 977
+```
+
+**Special form benefits**:
+- Efficient modular reduction using: c mod p = c_low + c_high × (2³² + 977)
+- Near-Mersenne prime enables fast arithmetic
+
+### Group Order n
+
+```
+Decimal:
+115792089237316195423570985008687907852837564279074904382605163141518161494337
+
+Hexadecimal:
+0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
+```
+
+The number of points on the curve, including the point at infinity.
+
+### Cofactor h
+
+```
+h = 1
+```
+
+Cofactor 1 means the group order n equals the curve order, simplifying security analysis and eliminating small subgroup attacks.
+
+## Generator Point G
+
+### Compressed Form
+
+```
+02 79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
+```
+
+The 02 prefix indicates the y-coordinate is even.
+
+### Uncompressed Form
+
+```
+04 79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
+ 483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
+```
+
+### Individual Coordinates
+
+**Gx**:
+```
+Decimal:
+55066263022277343669578718895168534326250603453777594175500187360389116729240
+
+Hexadecimal:
+0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798
+```
+
+**Gy**:
+```
+Decimal:
+32670510020758816978083085130507043184471273380659243275938904335757337482424
+
+Hexadecimal:
+0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8
+```
+
+## Endomorphism Parameters
+
+secp256k1 has an efficiently computable endomorphism φ: (x, y) → (βx, y).
+
+### β (Beta)
+
+```
+Hexadecimal:
+0x7AE96A2B657C07106E64479EAC3434E99CF0497512F58995C1396C28719501EE
+
+Property: β³ ≡ 1 (mod p)
+```
+
+### λ (Lambda)
+
+```
+Hexadecimal:
+0x5363AD4CC05C30E0A5261C028812645A122E22EA20816678DF02967C1B23BD72
+
+Property: λ³ ≡ 1 (mod n)
+Relationship: φ(P) = λP for all points P
+```
+
+### GLV Decomposition Constants
+
+For splitting scalar k into k₁ + k₂λ:
+
+```
+a₁ = 0x3086D221A7D46BCDE86C90E49284EB15
+b₁ = -0xE4437ED6010E88286F547FA90ABFE4C3
+a₂ = 0x114CA50F7A8E2F3F657C1108D9D44CFD8
+b₂ = a₁
+```
+
+## Derived Constants
+
+### Field Characteristics
+
+```
+(p + 1) / 4 = 0x3FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFBFFFFF0C
+Used for computing modular square roots via Tonelli-Shanks shortcut
+```
+
+### Order Characteristics
+
+```
+(n - 1) / 2 = 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0
+Used in low-S normalization for ECDSA signatures
+```
+
+## Validation Formulas
+
+### Point on Curve Check
+
+For point (x, y), verify:
+```
+y² ≡ x³ + 7 (mod p)
+```
+
+### Generator Verification
+
+Verify G is on curve:
+```
+Gy² mod p = 0x9C47D08FFB10D4B8 ... (truncated for display)
+Gx³ + 7 mod p = same value
+```
+
+### Order Verification
+
+Verify nG = O (point at infinity):
+```
+Computing n × G should yield the identity element
+```
+
+## Bit Lengths
+
+| Parameter | Bits | Bytes |
+|-----------|------|-------|
+| p (prime) | 256 | 32 |
+| n (order) | 256 | 32 |
+| Private key | 256 | 32 |
+| Public key (compressed) | 257 | 33 |
+| Public key (uncompressed) | 513 | 65 |
+| ECDSA signature | 512 | 64 |
+| Schnorr signature | 512 | 64 |
+
+## Security Level
+
+- **Equivalent symmetric key strength**: 128 bits
+- **Best known attack complexity**: ~2¹²⁸ operations (Pollard's rho)
+- **Safe until**: Quantum computers with ~1500+ logical qubits
+
+## ASN.1 OID
+
+```
+1.3.132.0.10
+iso(1) identified-organization(3) certicom(132) curve(0) secp256k1(10)
+```
+
+## Comparison with Other Curves
+
+| Curve | Field Size | Security | Speed | Use Case |
+|-------|------------|----------|-------|----------|
+| secp256k1 | 256-bit | 128-bit | Fast (Koblitz) | Bitcoin, Nostr |
+| secp256r1 (P-256) | 256-bit | 128-bit | Moderate | TLS, general |
+| Curve25519 | 255-bit | ~128-bit | Very fast | Modern crypto |
+| secp384r1 (P-384) | 384-bit | 192-bit | Slower | High security |
diff --git a/.claude/skills/elliptic-curves/references/security.md b/.claude/skills/elliptic-curves/references/security.md
new file mode 100644
index 00000000..8c241bfd
--- /dev/null
+++ b/.claude/skills/elliptic-curves/references/security.md
@@ -0,0 +1,291 @@
+# Elliptic Curve Security Analysis
+
+Security properties, attack vectors, and mitigations for elliptic curve cryptography.
+
+## The Discrete Logarithm Problem (ECDLP)
+
+### Definition
+
+Given points P and Q = kP on an elliptic curve, find the scalar k.
+
+**Security assumption**: For properly chosen curves, this problem is computationally infeasible.
+
+### Best Known Attacks
+
+#### Generic Attacks (Work on Any Group)
+
+| Attack | Complexity | Notes |
+|--------|------------|-------|
+| Baby-step Giant-step | O(√n) space and time | Requires √n storage |
+| Pollard's rho | O(√n) time, O(1) space | Practical for large groups |
+| Pollard's lambda | O(√n) | When k is in known range |
+| Pohlig-Hellman | O(√p) where p is largest prime factor | Exploits factorization of n |
+
+For secp256k1 (n ≈ 2²⁵⁶):
+- Generic attack complexity: ~2¹²⁸ operations
+- Equivalent to 128-bit symmetric security
+
+#### Curve-Specific Attacks
+
+| Attack | Applicable When | Mitigation |
+|--------|-----------------|------------|
+| MOV/FR reduction | Low embedding degree | Use curves with high embedding degree |
+| Anomalous curve attack | n = p | Ensure n ≠ p |
+| GHS attack | Extension field curves | Use prime field curves |
+
+**secp256k1 is immune to all known curve-specific attacks**.
+
+## Side-Channel Attacks
+
+### Timing Attacks
+
+**Vulnerability**: Execution time varies based on secret data.
+
+**Examples**:
+- Conditional branches on secret bits
+- Early exit conditions
+- Variable-time modular operations
+
+**Mitigations**:
+- Constant-time algorithms (Montgomery ladder)
+- Fixed execution paths
+- Dummy operations to equalize timing
+
+### Power Analysis
+
+**Simple Power Analysis (SPA)**: Single trace reveals operations.
+- Double-and-add visible as different power signatures
+- Mitigation: Montgomery ladder (uniform operations)
+
+**Differential Power Analysis (DPA)**: Statistical analysis of many traces.
+- Mitigation: Point blinding, scalar blinding
+
+### Cache Attacks
+
+**FLUSH+RELOAD Attack**:
+```
+1. Attacker flushes cache line containing lookup table
+2. Victim performs table lookup based on secret
+3. Attacker measures reload time to determine which entry was accessed
+```
+
+**Mitigations**:
+- Avoid secret-dependent table lookups
+- Use constant-time table access patterns
+- Scatter tables to prevent cache line sharing
+
+### Electromagnetic (EM) Attacks
+
+Similar to power analysis but captures electromagnetic emissions.
+
+**Mitigations**:
+- Shielding
+- Same algorithmic protections as power analysis
+
+## Implementation Vulnerabilities
+
+### k-Reuse in ECDSA
+
+**The Sony PS3 Hack (2010)**:
+
+If the same k is used for two signatures (r₁, s₁) and (r₂, s₂) on messages m₁ and m₂:
+
+```
+s₁ = k⁻¹(e₁ + rd) mod n
+s₂ = k⁻¹(e₂ + rd) mod n
+
+Since k is the same:
+s₁ - s₂ = k⁻¹(e₁ - e₂) mod n
+k = (e₁ - e₂)(s₁ - s₂)⁻¹ mod n
+
+Once k is known:
+d = (s₁k - e₁)r⁻¹ mod n
+```
+
+**Mitigation**: Use deterministic k (RFC 6979).
+
+### Weak Random k
+
+Even with unique k values, if the RNG is biased:
+- Lattice-based attacks can recover private key
+- Only ~1% bias in k can be exploitable with enough signatures
+
+**Mitigations**:
+- Use cryptographically secure RNG
+- Use deterministic k (RFC 6979)
+- Verify k is in valid range [1, n-1]
+
+### Invalid Curve Attacks
+
+**Attack**: Attacker provides point not on the curve.
+- Point may be on a weaker curve
+- Operations may leak information
+
+**Mitigation**: Always validate points are on curve:
+```
+Verify: y² ≡ x³ + ax + b (mod p)
+```
+
+### Small Subgroup Attacks
+
+**Attack**: If cofactor h > 1, points of small order exist.
+- Attacker sends point of small order
+- Response reveals private key mod (small order)
+
+**Mitigation**:
+- Use curves with cofactor 1 (secp256k1 has h = 1)
+- Multiply received points by cofactor
+- Validate point order
+
+### Fault Attacks
+
+**Attack**: Induce computational errors (voltage glitches, radiation).
+- Corrupted intermediate values may leak information
+- Differential fault analysis can recover keys
+
+**Mitigations**:
+- Redundant computations with comparison
+- Verify final results
+- Hardware protections
+
+## Signature Malleability
+
+### ECDSA Malleability
+
+Given valid signature (r, s), signature (r, n - s) is also valid for the same message.
+
+**Impact**: Transaction ID malleability (historical Bitcoin issue)
+
+**Mitigation**: Enforce low-S normalization:
+```
+if s > n/2:
+ s = n - s
+```
+
+### Schnorr Non-Malleability
+
+BIP-340 Schnorr signatures are non-malleable by design:
+- Use x-only public keys
+- Deterministic nonce derivation
+
+## Quantum Threats
+
+### Shor's Algorithm
+
+**Threat**: Polynomial-time discrete log on quantum computers.
+- Requires ~1500-2000 logical qubits for secp256k1
+- Current quantum computers: <100 noisy qubits
+
+**Timeline**: Estimated 10-20+ years for cryptographically relevant quantum computers.
+
+### Migration Strategy
+
+1. **Monitor**: Track quantum computing progress
+2. **Prepare**: Develop post-quantum alternatives
+3. **Hybrid**: Use classical + post-quantum in transition
+4. **Migrate**: Full transition when necessary
+
+### Post-Quantum Alternatives
+
+- Lattice-based signatures (CRYSTALS-Dilithium)
+- Hash-based signatures (SPHINCS+)
+- Code-based cryptography
+
+## Best Practices
+
+### Key Generation
+
+```
+DO:
+- Use cryptographically secure RNG
+- Validate private key is in [1, n-1]
+- Verify public key is on curve
+- Verify public key is not point at infinity
+
+DON'T:
+- Use predictable seeds
+- Use truncated random values
+- Skip validation
+```
+
+### Signature Generation
+
+```
+DO:
+- Use RFC 6979 for deterministic k
+- Validate all inputs
+- Use constant-time operations
+- Clear sensitive memory after use
+
+DON'T:
+- Reuse k values
+- Use weak/biased RNG
+- Skip low-S normalization (ECDSA)
+```
+
+### Signature Verification
+
+```
+DO:
+- Validate r, s are in [1, n-1]
+- Validate public key is on curve
+- Validate public key is not infinity
+- Use batch verification when possible
+
+DON'T:
+- Skip any validation steps
+- Accept malformed signatures
+```
+
+### Public Key Handling
+
+```
+DO:
+- Validate received points are on curve
+- Check point is not infinity
+- Prefer compressed format for storage
+
+DON'T:
+- Accept unvalidated points
+- Skip curve membership check
+```
+
+## Security Checklist
+
+### Implementation Review
+
+- [ ] All scalar multiplications are constant-time
+- [ ] No secret-dependent branches
+- [ ] No secret-indexed table lookups
+- [ ] Memory is zeroized after use
+- [ ] Random k uses CSPRNG or RFC 6979
+- [ ] All received points are validated
+- [ ] Private keys are in valid range
+- [ ] Signatures use low-S normalization
+
+### Operational Security
+
+- [ ] Private keys stored securely (HSM, secure enclave)
+- [ ] Key derivation uses proper KDF
+- [ ] Backups are encrypted
+- [ ] Key rotation policy exists
+- [ ] Audit logging enabled
+- [ ] Incident response plan exists
+
+## Security Levels Comparison
+
+| Curve | Bits | Symmetric Equivalent | RSA Equivalent |
+|-------|------|---------------------|----------------|
+| secp192r1 | 192 | 96 | 1536 |
+| secp224r1 | 224 | 112 | 2048 |
+| secp256k1 | 256 | 128 | 3072 |
+| secp384r1 | 384 | 192 | 7680 |
+| secp521r1 | 521 | 256 | 15360 |
+
+## References
+
+- NIST SP 800-57: Recommendation for Key Management
+- SEC 1: Elliptic Curve Cryptography
+- RFC 6979: Deterministic Usage of DSA and ECDSA
+- BIP-340: Schnorr Signatures for secp256k1
+- SafeCurves: Choosing Safe Curves for Elliptic-Curve Cryptography
diff --git a/.claude/skills/go-memory-optimization/SKILL.md b/.claude/skills/go-memory-optimization/SKILL.md
new file mode 100644
index 00000000..f5ed5810
--- /dev/null
+++ b/.claude/skills/go-memory-optimization/SKILL.md
@@ -0,0 +1,478 @@
+---
+name: go-memory-optimization
+description: This skill should be used when optimizing Go code for memory efficiency, reducing GC pressure, implementing object pooling, analyzing escape behavior, choosing between fixed-size arrays and slices, designing worker pools, or profiling memory allocations. Provides comprehensive knowledge of Go's memory model, stack vs heap allocation, sync.Pool patterns, goroutine reuse, and GC tuning.
+---
+
+# Go Memory Optimization
+
+## Overview
+
+This skill provides guidance on optimizing Go programs for memory efficiency and reduced garbage collection overhead. Topics include stack allocation semantics, fixed-size types, escape analysis, object pooling, goroutine management, and GC tuning.
+
+## Core Principles
+
+### The Allocation Hierarchy
+
+Prefer allocations in this order (fastest to slowest):
+
+1. **Stack allocation** - Zero GC cost, automatic cleanup on function return
+2. **Pooled objects** - Amortized allocation cost via sync.Pool
+3. **Pre-allocated buffers** - Single allocation, reused across operations
+4. **Heap allocation** - GC-managed, use when lifetime exceeds function scope
+
+### When Optimization Matters
+
+Focus memory optimization efforts on:
+- Hot paths executed thousands/millions of times per second
+- Large objects (>32KB) that stress the GC
+- Long-running services where GC pauses affect latency
+- Memory-constrained environments
+
+Avoid premature optimization. Profile first with `go tool pprof` to identify actual bottlenecks.
+
+## Fixed-Size Types vs Slices
+
+### Stack Allocation with Arrays
+
+Arrays with known compile-time size can be stack-allocated, avoiding heap entirely:
+
+```go
+// HEAP: slice header + backing array escape to heap
+func processSlice() []byte {
+ data := make([]byte, 32)
+ // ... use data
+ return data // escapes
+}
+
+// STACK: fixed array stays on stack if doesn't escape
+func processArray() {
+ var data [32]byte // stack-allocated
+ // ... use data
+} // automatically cleaned up
+```
+
+### Fixed-Size Binary Types Pattern
+
+Define types with explicit sizes for protocol fields, cryptographic values, and identifiers:
+
+```go
+// Binary types enforce length and enable stack allocation
+type EventID [32]byte // SHA256 hash
+type Pubkey [32]byte // Schnorr public key
+type Signature [64]byte // Schnorr signature
+
+// Methods operate on value receivers when size permits
+func (id EventID) Hex() string {
+ return hex.EncodeToString(id[:])
+}
+
+func (id EventID) IsZero() bool {
+ return id == EventID{} // efficient zero-value comparison
+}
+```
+
+### Size Thresholds
+
+| Size | Recommendation |
+|------|----------------|
+| ≤64 bytes | Pass by value, stack-friendly |
+| 65-128 bytes | Consider context; value for read-only, pointer for mutation |
+| >128 bytes | Pass by pointer to avoid copy overhead |
+
+### Array to Slice Conversion
+
+Convert fixed arrays to slices only at API boundaries:
+
+```go
+type Hash [32]byte
+
+func (h Hash) Bytes() []byte {
+ return h[:] // creates slice header, array stays on stack if h does
+}
+
+// Prefer methods that accept arrays directly
+func VerifySignature(pubkey Pubkey, msg []byte, sig Signature) bool {
+ // pubkey and sig are stack-allocated in caller
+}
+```
+
+## Escape Analysis
+
+### Understanding Escape
+
+Variables "escape" to the heap when the compiler cannot prove their lifetime is bounded by the stack frame. Check escape behavior with:
+
+```bash
+go build -gcflags="-m -m" ./...
+```
+
+### Common Escape Causes
+
+```go
+// 1. Returning pointers to local variables
+func escapes() *int {
+ x := 42
+ return &x // x escapes
+}
+
+// 2. Storing in interface{}
+func escapes(x int) interface{} {
+ return x // x escapes (boxed)
+}
+
+// 3. Closures capturing by reference
+func escapes() func() int {
+ x := 42
+ return func() int { return x } // x escapes
+}
+
+// 4. Slice/map with unknown capacity
+func escapes(n int) []byte {
+ return make([]byte, n) // escapes (size unknown at compile time)
+}
+
+// 5. Sending pointers to channels
+func escapes(ch chan *int) {
+ x := 42
+ ch <- &x // x escapes
+}
+```
+
+### Preventing Escape
+
+```go
+// 1. Accept pointers, don't return them
+func noEscape(result *[32]byte) {
+ // caller owns memory, function fills it
+ copy(result[:], computeHash())
+}
+
+// 2. Use fixed-size arrays
+func noEscape() {
+ var buf [1024]byte // known size, stack-allocated
+ process(buf[:])
+}
+
+// 3. Preallocate with known capacity
+func noEscape() {
+ buf := make([]byte, 0, 1024) // may stay on stack
+ // ... append up to 1024 bytes
+}
+
+// 4. Avoid interface{} on hot paths
+func noEscape(x int) int {
+ return x * 2 // no boxing
+}
+```
+
+## sync.Pool Usage
+
+### Basic Pattern
+
+```go
+var bufferPool = sync.Pool{
+ New: func() interface{} {
+ return make([]byte, 0, 4096)
+ },
+}
+
+func processRequest(data []byte) {
+ buf := bufferPool.Get().([]byte)
+ buf = buf[:0] // reset length, keep capacity
+ defer bufferPool.Put(buf)
+
+ // use buf...
+}
+```
+
+### Typed Pool Wrapper
+
+```go
+type BufferPool struct {
+ pool sync.Pool
+ size int
+}
+
+func NewBufferPool(size int) *BufferPool {
+ return &BufferPool{
+ pool: sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, size)
+ return &b
+ },
+ },
+ size: size,
+ }
+}
+
+func (p *BufferPool) Get() *[]byte {
+ return p.pool.Get().(*[]byte)
+}
+
+func (p *BufferPool) Put(b *[]byte) {
+ if b == nil || cap(*b) < p.size {
+ return // don't pool undersized buffers
+ }
+ *b = (*b)[:p.size] // reset to full size
+ p.pool.Put(b)
+}
+```
+
+### Pool Anti-Patterns
+
+```go
+// BAD: Pool of pointers to small values (overhead exceeds benefit)
+var intPool = sync.Pool{New: func() interface{} { return new(int) }}
+
+// BAD: Not resetting state before Put
+bufPool.Put(buf) // may contain sensitive data
+
+// BAD: Pooling objects with goroutine-local state
+var connPool = sync.Pool{...} // connections are stateful
+
+// BAD: Assuming pooled objects persist (GC clears pools)
+obj := pool.Get()
+// ... long delay
+pool.Put(obj) // obj may have been GC'd during delay
+```
+
+### When to Use sync.Pool
+
+| Use Case | Pool? | Reason |
+|----------|-------|--------|
+| Buffers in HTTP handlers | Yes | High allocation rate, short lifetime |
+| Encoder/decoder state | Yes | Expensive to initialize |
+| Small values (<64 bytes) | No | Pointer overhead exceeds benefit |
+| Long-lived objects | No | Pools are for short-lived reuse |
+| Objects with cleanup needs | No | Pool provides no finalization |
+
+## Goroutine Pooling
+
+### Worker Pool Pattern
+
+```go
+type WorkerPool struct {
+ jobs chan func()
+ workers int
+ wg sync.WaitGroup
+}
+
+func NewWorkerPool(workers, queueSize int) *WorkerPool {
+ p := &WorkerPool{
+ jobs: make(chan func(), queueSize),
+ workers: workers,
+ }
+ p.wg.Add(workers)
+ for i := 0; i < workers; i++ {
+ go p.worker()
+ }
+ return p
+}
+
+func (p *WorkerPool) worker() {
+ defer p.wg.Done()
+ for job := range p.jobs {
+ job()
+ }
+}
+
+func (p *WorkerPool) Submit(job func()) {
+ p.jobs <- job
+}
+
+func (p *WorkerPool) Shutdown() {
+ close(p.jobs)
+ p.wg.Wait()
+}
+```
+
+### Bounded Concurrency with Semaphore
+
+```go
+type Semaphore struct {
+ sem chan struct{}
+}
+
+func NewSemaphore(n int) *Semaphore {
+ return &Semaphore{sem: make(chan struct{}, n)}
+}
+
+func (s *Semaphore) Acquire() { s.sem <- struct{}{} }
+func (s *Semaphore) Release() { <-s.sem }
+
+// Usage
+sem := NewSemaphore(runtime.GOMAXPROCS(0))
+for _, item := range items {
+ sem.Acquire()
+ go func(it Item) {
+ defer sem.Release()
+ process(it)
+ }(item)
+}
+```
+
+### Goroutine Reuse Benefits
+
+| Metric | Spawn per request | Worker pool |
+|--------|-------------------|-------------|
+| Goroutine creation | O(n) | O(workers) |
+| Stack allocation | 2KB × n | 2KB × workers |
+| Scheduler overhead | Higher | Lower |
+| GC pressure | Higher | Lower |
+
+## Reducing GC Pressure
+
+### Allocation Reduction Strategies
+
+```go
+// 1. Reuse buffers across iterations
+buf := make([]byte, 0, 4096)
+for _, item := range items {
+ buf = buf[:0] // reset without reallocation
+ buf = processItem(buf, item)
+}
+
+// 2. Preallocate slices with known length
+result := make([]Item, 0, len(input)) // avoid append reallocations
+for _, in := range input {
+ result = append(result, transform(in))
+}
+
+// 3. Struct embedding instead of pointer fields
+type Event struct {
+ ID [32]byte // embedded, not *[32]byte
+ Pubkey [32]byte // single allocation for entire struct
+ Signature [64]byte
+ Content string // only string data on heap
+}
+
+// 4. String interning for repeated values
+var kindStrings = map[int]string{
+ 0: "set_metadata",
+ 1: "text_note",
+ // ...
+}
+```
+
+### GC Tuning
+
+```go
+import "runtime/debug"
+
+func init() {
+ // GOGC: target heap growth percentage (default 100)
+ // Lower = more frequent GC, less memory
+ // Higher = less frequent GC, more memory
+ debug.SetGCPercent(50) // GC when heap grows 50%
+
+ // GOMEMLIMIT: soft memory limit (Go 1.19+)
+ // GC becomes more aggressive as limit approaches
+ debug.SetMemoryLimit(512 << 20) // 512MB limit
+}
+```
+
+Environment variables:
+
+```bash
+GOGC=50 # More aggressive GC
+GOMEMLIMIT=512MiB # Soft memory limit
+GODEBUG=gctrace=1 # GC trace output
+```
+
+### Arena Allocation (Go 1.20+, experimental)
+
+```go
+//go:build goexperiment.arenas
+
+import "arena"
+
+func processLargeDataset(data []byte) Result {
+ a := arena.NewArena()
+ defer a.Free() // bulk free all allocations
+
+ // All allocations from arena are freed together
+ items := arena.MakeSlice[Item](a, 0, 1000)
+ // ... process
+
+ // Copy result out before Free
+ return copyResult(result)
+}
+```
+
+## Memory Profiling
+
+### Heap Profile
+
+```go
+import "runtime/pprof"
+
+func captureHeapProfile() {
+ f, _ := os.Create("heap.prof")
+ defer f.Close()
+ runtime.GC() // get accurate picture
+ pprof.WriteHeapProfile(f)
+}
+```
+
+```bash
+go tool pprof -http=:8080 heap.prof
+go tool pprof -alloc_space heap.prof # total allocations
+go tool pprof -inuse_space heap.prof # current usage
+```
+
+### Allocation Benchmarks
+
+```go
+func BenchmarkAllocation(b *testing.B) {
+ b.ReportAllocs()
+ for i := 0; i < b.N; i++ {
+ result := processData(input)
+ _ = result
+ }
+}
+```
+
+Output interpretation:
+
+```
+BenchmarkAllocation-8 1000000 1234 ns/op 256 B/op 3 allocs/op
+ ↑ ↑
+ bytes/op allocations/op
+```
+
+### Live Memory Monitoring
+
+```go
+func printMemStats() {
+ var m runtime.MemStats
+ runtime.ReadMemStats(&m)
+ fmt.Printf("Alloc: %d MB\n", m.Alloc/1024/1024)
+ fmt.Printf("TotalAlloc: %d MB\n", m.TotalAlloc/1024/1024)
+ fmt.Printf("Sys: %d MB\n", m.Sys/1024/1024)
+ fmt.Printf("NumGC: %d\n", m.NumGC)
+ fmt.Printf("GCPause: %v\n", time.Duration(m.PauseNs[(m.NumGC+255)%256]))
+}
+```
+
+## Common Patterns Reference
+
+For detailed code examples and patterns, see `references/patterns.md`:
+
+- Buffer pool implementations
+- Zero-allocation JSON encoding
+- Memory-efficient string building
+- Slice capacity management
+- Struct layout optimization
+
+## Checklist for Memory-Critical Code
+
+1. [ ] Profile before optimizing (`go tool pprof`)
+2. [ ] Check escape analysis output (`-gcflags="-m"`)
+3. [ ] Use fixed-size arrays for known-size data
+4. [ ] Implement sync.Pool for frequently allocated objects
+5. [ ] Preallocate slices with known capacity
+6. [ ] Reuse buffers instead of allocating new ones
+7. [ ] Consider struct field ordering for alignment
+8. [ ] Benchmark with `-benchmem` flag
+9. [ ] Set appropriate GOGC/GOMEMLIMIT for production
+10. [ ] Monitor GC behavior with GODEBUG=gctrace=1
diff --git a/.claude/skills/go-memory-optimization/references/patterns.md b/.claude/skills/go-memory-optimization/references/patterns.md
new file mode 100644
index 00000000..199704af
--- /dev/null
+++ b/.claude/skills/go-memory-optimization/references/patterns.md
@@ -0,0 +1,594 @@
+# Go Memory Optimization Patterns
+
+Detailed code examples and patterns for memory-efficient Go programming.
+
+## Buffer Pool Implementations
+
+### Tiered Buffer Pool
+
+For workloads with varying buffer sizes:
+
+```go
+type TieredPool struct {
+ small sync.Pool // 1KB
+ medium sync.Pool // 16KB
+ large sync.Pool // 256KB
+}
+
+func NewTieredPool() *TieredPool {
+ return &TieredPool{
+ small: sync.Pool{New: func() interface{} { return make([]byte, 1024) }},
+ medium: sync.Pool{New: func() interface{} { return make([]byte, 16384) }},
+ large: sync.Pool{New: func() interface{} { return make([]byte, 262144) }},
+ }
+}
+
+func (p *TieredPool) Get(size int) []byte {
+ switch {
+ case size <= 1024:
+ return p.small.Get().([]byte)[:size]
+ case size <= 16384:
+ return p.medium.Get().([]byte)[:size]
+ case size <= 262144:
+ return p.large.Get().([]byte)[:size]
+ default:
+ return make([]byte, size) // too large for pool
+ }
+}
+
+func (p *TieredPool) Put(b []byte) {
+ switch cap(b) {
+ case 1024:
+ p.small.Put(b[:cap(b)])
+ case 16384:
+ p.medium.Put(b[:cap(b)])
+ case 262144:
+ p.large.Put(b[:cap(b)])
+ }
+ // Non-standard sizes are not pooled
+}
+```
+
+### bytes.Buffer Pool
+
+```go
+var bufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+func GetBuffer() *bytes.Buffer {
+ return bufferPool.Get().(*bytes.Buffer)
+}
+
+func PutBuffer(b *bytes.Buffer) {
+ b.Reset()
+ bufferPool.Put(b)
+}
+
+// Usage
+func processData(data []byte) string {
+ buf := GetBuffer()
+ defer PutBuffer(buf)
+
+ buf.WriteString("prefix:")
+ buf.Write(data)
+ buf.WriteString(":suffix")
+
+ return buf.String() // allocates new string
+}
+```
+
+## Zero-Allocation JSON Encoding
+
+### Pre-allocated Encoder
+
+```go
+type JSONEncoder struct {
+ buf []byte
+ scratch [64]byte // for number formatting
+}
+
+func (e *JSONEncoder) Reset() {
+ e.buf = e.buf[:0]
+}
+
+func (e *JSONEncoder) Bytes() []byte {
+ return e.buf
+}
+
+func (e *JSONEncoder) WriteString(s string) {
+ e.buf = append(e.buf, '"')
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ switch c {
+ case '"':
+ e.buf = append(e.buf, '\\', '"')
+ case '\\':
+ e.buf = append(e.buf, '\\', '\\')
+ case '\n':
+ e.buf = append(e.buf, '\\', 'n')
+ case '\r':
+ e.buf = append(e.buf, '\\', 'r')
+ case '\t':
+ e.buf = append(e.buf, '\\', 't')
+ default:
+ if c < 0x20 {
+ e.buf = append(e.buf, '\\', 'u', '0', '0',
+ hexDigits[c>>4], hexDigits[c&0xf])
+ } else {
+ e.buf = append(e.buf, c)
+ }
+ }
+ }
+ e.buf = append(e.buf, '"')
+}
+
+func (e *JSONEncoder) WriteInt(n int64) {
+ e.buf = strconv.AppendInt(e.buf, n, 10)
+}
+
+func (e *JSONEncoder) WriteHex(b []byte) {
+ e.buf = append(e.buf, '"')
+ for _, v := range b {
+ e.buf = append(e.buf, hexDigits[v>>4], hexDigits[v&0xf])
+ }
+ e.buf = append(e.buf, '"')
+}
+
+var hexDigits = [16]byte{'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}
+```
+
+### Append-Based Encoding
+
+```go
+// AppendJSON appends JSON representation to dst, returning extended slice
+func (ev *Event) AppendJSON(dst []byte) []byte {
+ dst = append(dst, `{"id":"`...)
+ dst = appendHex(dst, ev.ID[:])
+ dst = append(dst, `","pubkey":"`...)
+ dst = appendHex(dst, ev.Pubkey[:])
+ dst = append(dst, `","created_at":`...)
+ dst = strconv.AppendInt(dst, ev.CreatedAt, 10)
+ dst = append(dst, `,"kind":`...)
+ dst = strconv.AppendInt(dst, int64(ev.Kind), 10)
+ dst = append(dst, `,"content":`...)
+ dst = appendJSONString(dst, ev.Content)
+ dst = append(dst, '}')
+ return dst
+}
+
+// Usage with pre-allocated buffer
+func encodeEvents(events []Event) []byte {
+ // Estimate size: ~500 bytes per event
+ buf := make([]byte, 0, len(events)*500)
+ buf = append(buf, '[')
+ for i, ev := range events {
+ if i > 0 {
+ buf = append(buf, ',')
+ }
+ buf = ev.AppendJSON(buf)
+ }
+ buf = append(buf, ']')
+ return buf
+}
+```
+
+## Memory-Efficient String Building
+
+### strings.Builder with Preallocation
+
+```go
+func buildQuery(parts []string) string {
+ // Calculate total length
+ total := len(parts) - 1 // for separators
+ for _, p := range parts {
+ total += len(p)
+ }
+
+ var b strings.Builder
+ b.Grow(total) // single allocation
+
+ for i, p := range parts {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ b.WriteString(p)
+ }
+ return b.String()
+}
+```
+
+### Avoiding String Concatenation
+
+```go
+// BAD: O(n^2) allocations
+func buildPath(parts []string) string {
+ result := ""
+ for _, p := range parts {
+ result += "/" + p // new allocation each iteration
+ }
+ return result
+}
+
+// GOOD: O(n) with single allocation
+func buildPath(parts []string) string {
+ if len(parts) == 0 {
+ return ""
+ }
+ n := len(parts) // for slashes
+ for _, p := range parts {
+ n += len(p)
+ }
+
+ b := make([]byte, 0, n)
+ for _, p := range parts {
+ b = append(b, '/')
+ b = append(b, p...)
+ }
+ return string(b)
+}
+```
+
+### Unsafe String/Byte Conversion
+
+```go
+import "unsafe"
+
+// Zero-allocation string to []byte (read-only!)
+func unsafeBytes(s string) []byte {
+ return unsafe.Slice(unsafe.StringData(s), len(s))
+}
+
+// Zero-allocation []byte to string (b must not be modified!)
+func unsafeString(b []byte) string {
+ return unsafe.String(unsafe.SliceData(b), len(b))
+}
+
+// Use when:
+// 1. Converting string for read-only operations (hashing, comparison)
+// 2. Returning []byte from buffer that won't be modified
+// 3. Performance-critical paths with careful ownership management
+```
+
+## Slice Capacity Management
+
+### Append Growth Patterns
+
+```go
+// Slice growth: 0 -> 1 -> 2 -> 4 -> 8 -> 16 -> 32 -> 64 -> ...
+// After 1024: grows by 25% each time
+
+// BAD: Unknown final size causes multiple reallocations
+func collectItems() []Item {
+ var items []Item
+ for item := range source {
+ items = append(items, item) // may reallocate multiple times
+ }
+ return items
+}
+
+// GOOD: Preallocate when size is known
+func collectItems(n int) []Item {
+ items := make([]Item, 0, n)
+ for item := range source {
+ items = append(items, item)
+ }
+ return items
+}
+
+// GOOD: Use slice header trick for uncertain sizes
+func collectItems() []Item {
+ items := make([]Item, 0, 32) // reasonable initial capacity
+ for item := range source {
+ items = append(items, item)
+ }
+ // Trim excess capacity if items will be long-lived
+ return items[:len(items):len(items)]
+}
+```
+
+### Slice Recycling
+
+```go
+// Reuse slice backing array
+func processInBatches(items []Item, batchSize int) {
+ batch := make([]Item, 0, batchSize)
+
+ for i, item := range items {
+ batch = append(batch, item)
+
+ if len(batch) == batchSize || i == len(items)-1 {
+ processBatch(batch)
+ batch = batch[:0] // reset length, keep capacity
+ }
+ }
+}
+```
+
+### Preventing Slice Memory Leaks
+
+```go
+// BAD: Subslice keeps entire backing array alive
+func getFirst10(data []byte) []byte {
+ return data[:10] // entire data array stays in memory
+}
+
+// GOOD: Copy to release original array
+func getFirst10(data []byte) []byte {
+ result := make([]byte, 10)
+ copy(result, data[:10])
+ return result
+}
+
+// Alternative: explicit capacity limit
+func getFirst10(data []byte) []byte {
+ return data[:10:10] // cap=10, can't accidentally grow into original
+}
+```
+
+## Struct Layout Optimization
+
+### Field Ordering for Alignment
+
+```go
+// BAD: 32 bytes due to padding
+type BadLayout struct {
+ a bool // 1 byte + 7 padding
+ b int64 // 8 bytes
+ c bool // 1 byte + 7 padding
+ d int64 // 8 bytes
+}
+
+// GOOD: 24 bytes with optimal ordering
+type GoodLayout struct {
+ b int64 // 8 bytes
+ d int64 // 8 bytes
+ a bool // 1 byte
+ c bool // 1 byte + 6 padding
+}
+
+// Rule: Order fields from largest to smallest alignment
+```
+
+### Checking Struct Size
+
+```go
+func init() {
+ // Compile-time size assertions
+ var _ [24]byte = [unsafe.Sizeof(GoodLayout{})]byte{}
+
+ // Or runtime check
+ if unsafe.Sizeof(Event{}) > 256 {
+ panic("Event struct too large")
+ }
+}
+```
+
+### Cache-Line Optimization
+
+```go
+const CacheLineSize = 64
+
+// Pad struct to prevent false sharing in concurrent access
+type PaddedCounter struct {
+ value uint64
+ _ [CacheLineSize - 8]byte // padding
+}
+
+type Counters struct {
+ reads PaddedCounter
+ writes PaddedCounter
+ // Each counter on separate cache line
+}
+```
+
+## Object Reuse Patterns
+
+### Reset Methods
+
+```go
+type Request struct {
+ Method string
+ Path string
+ Headers map[string]string
+ Body []byte
+}
+
+func (r *Request) Reset() {
+ r.Method = ""
+ r.Path = ""
+ // Reuse map, just clear entries
+ for k := range r.Headers {
+ delete(r.Headers, k)
+ }
+ r.Body = r.Body[:0]
+}
+
+var requestPool = sync.Pool{
+ New: func() interface{} {
+ return &Request{
+ Headers: make(map[string]string, 8),
+ Body: make([]byte, 0, 1024),
+ }
+ },
+}
+```
+
+### Flyweight Pattern
+
+```go
+// Share immutable parts across many instances
+type Event struct {
+ kind *Kind // shared, immutable
+ content string
+}
+
+type Kind struct {
+ ID int
+ Name string
+ Description string
+}
+
+var kindRegistry = map[int]*Kind{
+ 0: {0, "set_metadata", "User metadata"},
+ 1: {1, "text_note", "Text note"},
+ // ... pre-allocated, shared across all events
+}
+
+func NewEvent(kindID int, content string) Event {
+ return Event{
+ kind: kindRegistry[kindID], // no allocation
+ content: content,
+ }
+}
+```
+
+## Channel Patterns for Memory Efficiency
+
+### Buffered Channels as Object Pools
+
+```go
+type SimplePool struct {
+ pool chan *Buffer
+}
+
+func NewSimplePool(size int) *SimplePool {
+ p := &SimplePool{pool: make(chan *Buffer, size)}
+ for i := 0; i < size; i++ {
+ p.pool <- NewBuffer()
+ }
+ return p
+}
+
+func (p *SimplePool) Get() *Buffer {
+ select {
+ case b := <-p.pool:
+ return b
+ default:
+ return NewBuffer() // pool empty, allocate new
+ }
+}
+
+func (p *SimplePool) Put(b *Buffer) {
+ select {
+ case p.pool <- b:
+ default:
+ // pool full, let GC collect
+ }
+}
+```
+
+### Batch Processing Channels
+
+```go
+// Reduce channel overhead by batching
+func batchProcessor(input <-chan Item, batchSize int) <-chan []Item {
+ output := make(chan []Item)
+ go func() {
+ defer close(output)
+ batch := make([]Item, 0, batchSize)
+
+ for item := range input {
+ batch = append(batch, item)
+ if len(batch) == batchSize {
+ output <- batch
+ batch = make([]Item, 0, batchSize)
+ }
+ }
+ if len(batch) > 0 {
+ output <- batch
+ }
+ }()
+ return output
+}
+```
+
+## Advanced Techniques
+
+### Manual Memory Management with mmap
+
+```go
+import "golang.org/x/sys/unix"
+
+// Allocate memory outside Go heap
+func allocateMmap(size int) ([]byte, error) {
+ data, err := unix.Mmap(-1, 0, size,
+ unix.PROT_READ|unix.PROT_WRITE,
+ unix.MAP_ANON|unix.MAP_PRIVATE)
+ return data, err
+}
+
+func freeMmap(data []byte) error {
+ return unix.Munmap(data)
+}
+```
+
+### Inline Arrays in Structs
+
+```go
+// Small-size optimization: inline for small, pointer for large
+type SmallVec struct {
+ len int
+ small [8]int // inline storage for ≤8 elements
+ large []int // heap storage for >8 elements
+}
+
+func (v *SmallVec) Append(x int) {
+ if v.large != nil {
+ v.large = append(v.large, x)
+ v.len++
+ return
+ }
+ if v.len < 8 {
+ v.small[v.len] = x
+ v.len++
+ return
+ }
+ // Spill to heap
+ v.large = make([]int, 9, 16)
+ copy(v.large, v.small[:])
+ v.large[8] = x
+ v.len++
+}
+```
+
+### Bump Allocator
+
+```go
+// Simple arena-style allocator for batch allocations
+type BumpAllocator struct {
+ buf []byte
+ off int
+}
+
+func NewBumpAllocator(size int) *BumpAllocator {
+ return &BumpAllocator{buf: make([]byte, size)}
+}
+
+func (a *BumpAllocator) Alloc(size int) []byte {
+ if a.off+size > len(a.buf) {
+ panic("bump allocator exhausted")
+ }
+ b := a.buf[a.off : a.off+size]
+ a.off += size
+ return b
+}
+
+func (a *BumpAllocator) Reset() {
+ a.off = 0
+}
+
+// Usage: allocate many small objects, reset all at once
+func processBatch(items []Item) {
+ arena := NewBumpAllocator(1 << 20) // 1MB
+ defer arena.Reset()
+
+ for _, item := range items {
+ buf := arena.Alloc(item.Size())
+ item.Serialize(buf)
+ }
+}
+```
diff --git a/.claude/skills/golang/SKILL.md b/.claude/skills/golang/SKILL.md
new file mode 100644
index 00000000..d30ca64e
--- /dev/null
+++ b/.claude/skills/golang/SKILL.md
@@ -0,0 +1,268 @@
+---
+name: golang
+description: This skill should be used when writing, debugging, reviewing, or discussing Go (Golang) code. Provides comprehensive Go programming expertise including idiomatic patterns, standard library, concurrency, error handling, testing, and best practices based on official go.dev documentation.
+---
+
+# Go Programming Expert
+
+## Purpose
+
+This skill provides expert-level assistance with Go programming language development, covering language fundamentals, idiomatic patterns, concurrency, error handling, standard library usage, testing, and best practices.
+
+## When to Use
+
+Activate this skill when:
+- Writing Go code
+- Debugging Go programs
+- Reviewing Go code for best practices
+- Answering questions about Go language features
+- Implementing Go-specific patterns (goroutines, channels, interfaces)
+- Setting up Go projects and modules
+- Writing Go tests
+
+## Core Principles
+
+When writing Go code, always follow these principles:
+
+1. **Named Return Variables**: ALWAYS use named return variables and prefer naked returns for cleaner code
+2. **Error Handling**: Use `lol.mleku.dev/log` and the `chk/errorf` for error checking and creating new errors
+3. **Idiomatic Code**: Write clear, idiomatic Go code following Effective Go guidelines
+4. **Simplicity**: Favor simplicity and clarity over cleverness
+5. **Composition**: Prefer composition over inheritance
+6. **Explicit**: Be explicit rather than implicit
+
+## Key Go Concepts
+
+### Functions with Named Returns
+
+Always use named return values:
+```go
+func divide(a, b float64) (result float64, err error) {
+ if b == 0 {
+ err = errorf.New("division by zero")
+ return
+ }
+ result = a / b
+ return
+}
+```
+
+### Error Handling
+
+Use the specified error handling packages:
+```go
+import "lol.mleku.dev/log"
+
+// Error checking with chk
+if err := doSomething(); chk.E(err) {
+ return
+}
+
+// Creating errors with errorf
+err := errorf.New("something went wrong")
+err := errorf.Errorf("failed to process: %v", value)
+```
+
+### Interfaces and Composition
+
+Go uses implicit interface implementation:
+```go
+type Reader interface {
+ Read(p []byte) (n int, err error)
+}
+
+// Any type with a Read method implements Reader
+type File struct {
+ name string
+}
+
+func (f *File) Read(p []byte) (n int, err error) {
+ // Implementation
+ return
+}
+```
+
+### Interface Design - CRITICAL RULES
+
+**Rule 1: Define interfaces in a dedicated package (e.g., `pkg/interfaces//`)**
+- Interfaces provide isolation between packages and enable dependency inversion
+- Keeping interfaces in a dedicated package prevents circular dependencies
+- Each interface package should be minimal (just the interface, no implementations)
+
+**Rule 2: NEVER use type assertions with interface literals**
+- **NEVER** write `.(interface{ Method() Type })` - this is non-idiomatic and unmaintainable
+- Interface literals cannot be documented, tested for satisfaction, or reused
+
+```go
+// BAD - interface literal in type assertion (NEVER DO THIS)
+if checker, ok := obj.(interface{ Check() bool }); ok {
+ checker.Check()
+}
+
+// GOOD - use defined interface from dedicated package
+import "myproject/pkg/interfaces/checker"
+
+if c, ok := obj.(checker.Checker); ok {
+ c.Check()
+}
+```
+
+**Rule 3: Resolving Circular Dependencies**
+- If a circular dependency occurs, move the interface to `pkg/interfaces/`
+- The implementing type stays in its original package
+- The consuming code imports only the interface package
+- Pattern:
+ ```
+ pkg/interfaces/foo/ <- interface definition (no dependencies)
+ ↑ ↑
+ pkg/bar/ pkg/baz/
+ (implements) (consumes via interface)
+ ```
+
+**Rule 4: Verify interface satisfaction at compile time**
+```go
+// Add this line to ensure *MyType implements MyInterface
+var _ MyInterface = (*MyType)(nil)
+```
+
+### Concurrency
+
+Use goroutines and channels for concurrent programming:
+```go
+// Launch goroutine
+go doWork()
+
+// Channels
+ch := make(chan int, 10)
+ch <- 42
+value := <-ch
+
+// Select statement
+select {
+case msg := <-ch1:
+ // Handle
+case <-time.After(time.Second):
+ // Timeout
+}
+
+// Sync primitives
+var mu sync.Mutex
+mu.Lock()
+defer mu.Unlock()
+```
+
+### Testing
+
+Use table-driven tests as the default pattern:
+```go
+func TestAdd(t *testing.T) {
+ tests := []struct {
+ name string
+ a, b int
+ expected int
+ }{
+ {"positive", 2, 3, 5},
+ {"negative", -1, -1, -2},
+ {"zero", 0, 5, 5},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := Add(tt.a, tt.b)
+ if result != tt.expected {
+ t.Errorf("got %d, want %d", result, tt.expected)
+ }
+ })
+ }
+}
+```
+
+## Reference Materials
+
+For detailed information, consult the reference files:
+
+- **references/effective-go-summary.md** - Key points from Effective Go including formatting, naming, control structures, functions, data allocation, methods, interfaces, concurrency principles, and error handling philosophy
+
+- **references/common-patterns.md** - Practical Go patterns including:
+ - Design patterns (Functional Options, Builder, Singleton, Factory, Strategy)
+ - Concurrency patterns (Worker Pool, Pipeline, Fan-Out/Fan-In, Timeout, Rate Limiting, Circuit Breaker)
+ - Error handling patterns (Error Wrapping, Sentinel Errors, Custom Error Types)
+ - Resource management patterns
+ - Testing patterns
+
+- **references/quick-reference.md** - Quick syntax cheatsheet with common commands, format verbs, standard library snippets, and best practices checklist
+
+## Best Practices Summary
+
+1. **Naming Conventions**
+ - Use camelCase for variables and functions
+ - Use PascalCase for exported names
+ - Keep names short but descriptive
+ - Interface names often end in -er (Reader, Writer, Handler)
+
+2. **Error Handling**
+ - Always check errors
+ - Use named return values
+ - Use lol.mleku.dev/log and chk/errorf
+
+3. **Code Organization**
+ - One package per directory
+ - Use internal/ for non-exported packages
+ - Use cmd/ for applications
+ - Use pkg/ for reusable libraries
+
+4. **Concurrency**
+ - Don't communicate by sharing memory; share memory by communicating
+ - Always close channels from sender
+ - Use defer for cleanup
+
+5. **Documentation**
+ - Comment all exported names
+ - Start comments with the name being described
+ - Use godoc format
+
+6. **Configuration - CRITICAL**
+ - **NEVER** use `os.Getenv()` scattered throughout packages
+ - **ALWAYS** centralize environment variable parsing in a single config package (e.g., `app/config/`)
+ - Pass configuration via structs, not by reading environment directly
+ - This ensures discoverability, documentation, and testability of all config options
+
+7. **Constants - CRITICAL**
+ - **ALWAYS** define named constants for values used more than a few times
+ - **ALWAYS** define named constants if multiple packages depend on the same value
+ - Constants shared across packages belong in a dedicated package (e.g., `pkg/constants/`)
+ - Magic numbers and strings are forbidden
+ ```go
+ // BAD - magic number
+ if size > 1024 {
+
+ // GOOD - named constant
+ const MaxBufferSize = 1024
+ if size > MaxBufferSize {
+ ```
+
+## Common Commands
+
+```bash
+go run main.go # Run program
+go build # Compile
+go test # Run tests
+go test -v # Verbose tests
+go test -cover # Test coverage
+go test -race # Race detection
+go fmt # Format code
+go vet # Lint code
+go mod tidy # Clean dependencies
+go get package # Add dependency
+```
+
+## Official Resources
+
+All guidance is based on official Go documentation:
+- Go Website: https://go.dev
+- Documentation: https://go.dev/doc/
+- Effective Go: https://go.dev/doc/effective_go
+- Language Specification: https://go.dev/ref/spec
+- Standard Library: https://pkg.go.dev/std
+- Go Tour: https://go.dev/tour/
+
diff --git a/.claude/skills/golang/references/common-patterns.md b/.claude/skills/golang/references/common-patterns.md
new file mode 100644
index 00000000..1ecb3ca5
--- /dev/null
+++ b/.claude/skills/golang/references/common-patterns.md
@@ -0,0 +1,649 @@
+# Go Common Patterns and Idioms
+
+## Design Patterns
+
+### Functional Options Pattern
+
+Used for configuring objects with many optional parameters:
+
+```go
+type Server struct {
+ host string
+ port int
+ timeout time.Duration
+ maxConn int
+}
+
+type Option func(*Server)
+
+func WithHost(host string) Option {
+ return func(s *Server) {
+ s.host = host
+ }
+}
+
+func WithPort(port int) Option {
+ return func(s *Server) {
+ s.port = port
+ }
+}
+
+func WithTimeout(timeout time.Duration) Option {
+ return func(s *Server) {
+ s.timeout = timeout
+ }
+}
+
+func NewServer(opts ...Option) *Server {
+ // Set defaults
+ s := &Server{
+ host: "localhost",
+ port: 8080,
+ timeout: 30 * time.Second,
+ maxConn: 100,
+ }
+
+ // Apply options
+ for _, opt := range opts {
+ opt(s)
+ }
+
+ return s
+}
+
+// Usage
+srv := NewServer(
+ WithHost("example.com"),
+ WithPort(443),
+ WithTimeout(60 * time.Second),
+)
+```
+
+### Builder Pattern
+
+For complex object construction:
+
+```go
+type HTTPRequest struct {
+ method string
+ url string
+ headers map[string]string
+ body []byte
+}
+
+type RequestBuilder struct {
+ request *HTTPRequest
+}
+
+func NewRequestBuilder() *RequestBuilder {
+ return &RequestBuilder{
+ request: &HTTPRequest{
+ headers: make(map[string]string),
+ },
+ }
+}
+
+func (b *RequestBuilder) Method(method string) *RequestBuilder {
+ b.request.method = method
+ return b
+}
+
+func (b *RequestBuilder) URL(url string) *RequestBuilder {
+ b.request.url = url
+ return b
+}
+
+func (b *RequestBuilder) Header(key, value string) *RequestBuilder {
+ b.request.headers[key] = value
+ return b
+}
+
+func (b *RequestBuilder) Body(body []byte) *RequestBuilder {
+ b.request.body = body
+ return b
+}
+
+func (b *RequestBuilder) Build() *HTTPRequest {
+ return b.request
+}
+
+// Usage
+req := NewRequestBuilder().
+ Method("POST").
+ URL("https://api.example.com").
+ Header("Content-Type", "application/json").
+ Body([]byte(`{"key":"value"}`)).
+ Build()
+```
+
+### Singleton Pattern
+
+Thread-safe singleton using sync.Once:
+
+```go
+type Database struct {
+ conn *sql.DB
+}
+
+var (
+ instance *Database
+ once sync.Once
+)
+
+func GetDatabase() *Database {
+ once.Do(func() {
+ conn, err := sql.Open("postgres", "connection-string")
+ if err != nil {
+ log.Fatal(err)
+ }
+ instance = &Database{conn: conn}
+ })
+ return instance
+}
+```
+
+### Factory Pattern
+
+```go
+type Animal interface {
+ Speak() string
+}
+
+type Dog struct{}
+func (d Dog) Speak() string { return "Woof!" }
+
+type Cat struct{}
+func (c Cat) Speak() string { return "Meow!" }
+
+type AnimalFactory struct{}
+
+func (f *AnimalFactory) CreateAnimal(animalType string) Animal {
+ switch animalType {
+ case "dog":
+ return &Dog{}
+ case "cat":
+ return &Cat{}
+ default:
+ return nil
+ }
+}
+```
+
+### Strategy Pattern
+
+```go
+type PaymentStrategy interface {
+ Pay(amount float64) error
+}
+
+type CreditCard struct {
+ number string
+}
+
+func (c *CreditCard) Pay(amount float64) error {
+ fmt.Printf("Paying %.2f using credit card %s\n", amount, c.number)
+ return nil
+}
+
+type PayPal struct {
+ email string
+}
+
+func (p *PayPal) Pay(amount float64) error {
+ fmt.Printf("Paying %.2f using PayPal account %s\n", amount, p.email)
+ return nil
+}
+
+type PaymentContext struct {
+ strategy PaymentStrategy
+}
+
+func (pc *PaymentContext) SetStrategy(strategy PaymentStrategy) {
+ pc.strategy = strategy
+}
+
+func (pc *PaymentContext) ExecutePayment(amount float64) error {
+ return pc.strategy.Pay(amount)
+}
+```
+
+## Concurrency Patterns
+
+### Worker Pool
+
+```go
+func worker(id int, jobs <-chan Job, results chan<- Result) {
+ for job := range jobs {
+ result := processJob(job)
+ results <- result
+ }
+}
+
+func WorkerPool(numWorkers int, jobs []Job) []Result {
+ jobsChan := make(chan Job, len(jobs))
+ results := make(chan Result, len(jobs))
+
+ // Start workers
+ for w := 1; w <= numWorkers; w++ {
+ go worker(w, jobsChan, results)
+ }
+
+ // Send jobs
+ for _, job := range jobs {
+ jobsChan <- job
+ }
+ close(jobsChan)
+
+ // Collect results
+ var output []Result
+ for range jobs {
+ output = append(output, <-results)
+ }
+
+ return output
+}
+```
+
+### Pipeline Pattern
+
+```go
+func generator(nums ...int) <-chan int {
+ out := make(chan int)
+ go func() {
+ for _, n := range nums {
+ out <- n
+ }
+ close(out)
+ }()
+ return out
+}
+
+func square(in <-chan int) <-chan int {
+ out := make(chan int)
+ go func() {
+ for n := range in {
+ out <- n * n
+ }
+ close(out)
+ }()
+ return out
+}
+
+func main() {
+ // Create pipeline
+ c := generator(2, 3, 4)
+ out := square(c)
+
+ // Consume output
+ for result := range out {
+ fmt.Println(result)
+ }
+}
+```
+
+### Fan-Out, Fan-In
+
+```go
+func fanOut(in <-chan int, n int) []<-chan int {
+ channels := make([]<-chan int, n)
+ for i := 0; i < n; i++ {
+ channels[i] = worker(in)
+ }
+ return channels
+}
+
+func worker(in <-chan int) <-chan int {
+ out := make(chan int)
+ go func() {
+ for n := range in {
+ out <- expensiveOperation(n)
+ }
+ close(out)
+ }()
+ return out
+}
+
+func fanIn(channels ...<-chan int) <-chan int {
+ out := make(chan int)
+ var wg sync.WaitGroup
+
+ wg.Add(len(channels))
+ for _, c := range channels {
+ go func(ch <-chan int) {
+ defer wg.Done()
+ for n := range ch {
+ out <- n
+ }
+ }(c)
+ }
+
+ go func() {
+ wg.Wait()
+ close(out)
+ }()
+
+ return out
+}
+```
+
+### Timeout Pattern
+
+```go
+func DoWithTimeout(timeout time.Duration) (result string, err error) {
+ done := make(chan struct{})
+
+ go func() {
+ result = expensiveOperation()
+ close(done)
+ }()
+
+ select {
+ case <-done:
+ return result, nil
+ case <-time.After(timeout):
+ return "", fmt.Errorf("operation timed out after %v", timeout)
+ }
+}
+```
+
+### Graceful Shutdown
+
+```go
+func main() {
+ server := &http.Server{Addr: ":8080"}
+
+ // Start server in goroutine
+ go func() {
+ if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
+ log.Fatalf("listen: %s\n", err)
+ }
+ }()
+
+ // Wait for interrupt signal
+ quit := make(chan os.Signal, 1)
+ signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
+ <-quit
+ log.Println("Shutting down server...")
+
+ // Graceful shutdown with timeout
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ if err := server.Shutdown(ctx); err != nil {
+ log.Fatal("Server forced to shutdown:", err)
+ }
+
+ log.Println("Server exiting")
+}
+```
+
+### Rate Limiting
+
+```go
+func rateLimiter(rate time.Duration) <-chan time.Time {
+ return time.Tick(rate)
+}
+
+func main() {
+ limiter := rateLimiter(200 * time.Millisecond)
+
+ for req := range requests {
+ <-limiter // Wait for rate limiter
+ go handleRequest(req)
+ }
+}
+```
+
+### Circuit Breaker
+
+```go
+type CircuitBreaker struct {
+ maxFailures int
+ timeout time.Duration
+ failures int
+ lastFail time.Time
+ state string
+ mu sync.Mutex
+}
+
+func (cb *CircuitBreaker) Call(fn func() error) error {
+ cb.mu.Lock()
+ defer cb.mu.Unlock()
+
+ if cb.state == "open" {
+ if time.Since(cb.lastFail) > cb.timeout {
+ cb.state = "half-open"
+ } else {
+ return fmt.Errorf("circuit breaker is open")
+ }
+ }
+
+ err := fn()
+ if err != nil {
+ cb.failures++
+ cb.lastFail = time.Now()
+ if cb.failures >= cb.maxFailures {
+ cb.state = "open"
+ }
+ return err
+ }
+
+ cb.failures = 0
+ cb.state = "closed"
+ return nil
+}
+```
+
+## Error Handling Patterns
+
+### Error Wrapping
+
+```go
+func processFile(filename string) (err error) {
+ data, err := readFile(filename)
+ if err != nil {
+ return fmt.Errorf("failed to process file %s: %w", filename, err)
+ }
+
+ if err := validate(data); err != nil {
+ return fmt.Errorf("validation failed for %s: %w", filename, err)
+ }
+
+ return nil
+}
+```
+
+### Sentinel Errors
+
+```go
+var (
+ ErrNotFound = errors.New("not found")
+ ErrUnauthorized = errors.New("unauthorized")
+ ErrInvalidInput = errors.New("invalid input")
+)
+
+func FindUser(id int) (*User, error) {
+ user, exists := users[id]
+ if !exists {
+ return nil, ErrNotFound
+ }
+ return user, nil
+}
+
+// Check error
+user, err := FindUser(123)
+if errors.Is(err, ErrNotFound) {
+ // Handle not found
+}
+```
+
+### Custom Error Types
+
+```go
+type ValidationError struct {
+ Field string
+ Value interface{}
+ Err error
+}
+
+func (e *ValidationError) Error() string {
+ return fmt.Sprintf("validation failed for field %s with value %v: %v",
+ e.Field, e.Value, e.Err)
+}
+
+func (e *ValidationError) Unwrap() error {
+ return e.Err
+}
+
+// Usage
+var validErr *ValidationError
+if errors.As(err, &validErr) {
+ fmt.Printf("Field: %s\n", validErr.Field)
+}
+```
+
+## Resource Management Patterns
+
+### Defer for Cleanup
+
+```go
+func processFile(filename string) error {
+ file, err := os.Open(filename)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+
+ // Process file
+ return nil
+}
+```
+
+### Context for Cancellation
+
+```go
+func fetchData(ctx context.Context, url string) ([]byte, error) {
+ req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return io.ReadAll(resp.Body)
+}
+```
+
+### Sync.Pool for Object Reuse
+
+```go
+var bufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+func process() {
+ buf := bufferPool.Get().(*bytes.Buffer)
+ defer bufferPool.Put(buf)
+
+ buf.Reset()
+ // Use buffer
+}
+```
+
+## Testing Patterns
+
+### Table-Driven Tests
+
+```go
+func TestAdd(t *testing.T) {
+ tests := []struct {
+ name string
+ a, b int
+ expected int
+ }{
+ {"positive numbers", 2, 3, 5},
+ {"negative numbers", -1, -1, -2},
+ {"mixed signs", -5, 10, 5},
+ {"zeros", 0, 0, 0},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := Add(tt.a, tt.b)
+ if result != tt.expected {
+ t.Errorf("Add(%d, %d) = %d; want %d",
+ tt.a, tt.b, result, tt.expected)
+ }
+ })
+ }
+}
+```
+
+### Mock Interfaces
+
+```go
+type Database interface {
+ Get(key string) (string, error)
+ Set(key, value string) error
+}
+
+type MockDB struct {
+ data map[string]string
+}
+
+func (m *MockDB) Get(key string) (string, error) {
+ val, ok := m.data[key]
+ if !ok {
+ return "", errors.New("not found")
+ }
+ return val, nil
+}
+
+func (m *MockDB) Set(key, value string) error {
+ m.data[key] = value
+ return nil
+}
+
+func TestUserService(t *testing.T) {
+ mockDB := &MockDB{data: make(map[string]string)}
+ service := NewUserService(mockDB)
+ // Test service
+}
+```
+
+### Test Fixtures
+
+```go
+func setupTestDB(t *testing.T) (*sql.DB, func()) {
+ db, err := sql.Open("sqlite3", ":memory:")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Setup schema
+ _, err = db.Exec(schema)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ cleanup := func() {
+ db.Close()
+ }
+
+ return db, cleanup
+}
+
+func TestDatabase(t *testing.T) {
+ db, cleanup := setupTestDB(t)
+ defer cleanup()
+
+ // Run tests
+}
+```
+
diff --git a/.claude/skills/golang/references/effective-go-summary.md b/.claude/skills/golang/references/effective-go-summary.md
new file mode 100644
index 00000000..b138062b
--- /dev/null
+++ b/.claude/skills/golang/references/effective-go-summary.md
@@ -0,0 +1,423 @@
+# Effective Go - Key Points Summary
+
+Source: https://go.dev/doc/effective_go
+
+## Formatting
+
+- Use `gofmt` to automatically format your code
+- Indentation: use tabs
+- Line length: no strict limit, but keep reasonable
+- Parentheses: Go uses fewer parentheses than C/Java
+
+## Commentary
+
+- Every package should have a package comment
+- Every exported name should have a doc comment
+- Comments should be complete sentences
+- Start comments with the name of the element being described
+
+Example:
+```go
+// Package regexp implements regular expression search.
+package regexp
+
+// Compile parses a regular expression and returns, if successful,
+// a Regexp object that can be used to match against text.
+func Compile(str string) (*Regexp, error) {
+```
+
+## Names
+
+### Package Names
+- Short, concise, evocative
+- Lowercase, single-word
+- No underscores or mixedCaps
+- Avoid stuttering (e.g., `bytes.Buffer` not `bytes.ByteBuffer`)
+
+### Getters/Setters
+- Getter: `Owner()` not `GetOwner()`
+- Setter: `SetOwner()`
+
+### Interface Names
+- One-method interfaces use method name + -er suffix
+- Examples: `Reader`, `Writer`, `Formatter`, `CloseNotifier`
+
+### MixedCaps
+- Use `MixedCaps` or `mixedCaps` rather than underscores
+
+## Semicolons
+
+- Lexer automatically inserts semicolons
+- Never put opening brace on its own line
+
+## Control Structures
+
+### If
+```go
+if err := file.Chmod(0664); err != nil {
+ log.Print(err)
+ return err
+}
+```
+
+### Redeclaration
+```go
+f, err := os.Open(name)
+// err is declared here
+
+d, err := f.Stat()
+// err is redeclared here (same scope)
+```
+
+### For
+```go
+// Like a C for
+for init; condition; post { }
+
+// Like a C while
+for condition { }
+
+// Like a C for(;;)
+for { }
+
+// Range over array/slice/map/channel
+for key, value := range oldMap {
+ newMap[key] = value
+}
+
+// If you only need the key
+for key := range m {
+ // ...
+}
+
+// If you only need the value
+for _, value := range array {
+ // ...
+}
+```
+
+### Switch
+- No automatic fall through
+- Cases can be expressions
+- Can switch on no value (acts like if-else chain)
+
+```go
+switch {
+case '0' <= c && c <= '9':
+ return c - '0'
+case 'a' <= c && c <= 'f':
+ return c - 'a' + 10
+case 'A' <= c && c <= 'F':
+ return c - 'A' + 10
+}
+```
+
+### Type Switch
+```go
+switch t := value.(type) {
+case int:
+ fmt.Printf("int: %d\n", t)
+case string:
+ fmt.Printf("string: %s\n", t)
+default:
+ fmt.Printf("unexpected type %T\n", t)
+}
+```
+
+## Functions
+
+### Multiple Return Values
+```go
+func (file *File) Write(b []byte) (n int, err error) {
+ // ...
+}
+```
+
+### Named Result Parameters
+- Named results are initialized to zero values
+- Can be used for documentation
+- Enable naked returns
+
+```go
+func ReadFull(r Reader, buf []byte) (n int, err error) {
+ for len(buf) > 0 && err == nil {
+ var nr int
+ nr, err = r.Read(buf)
+ n += nr
+ buf = buf[nr:]
+ }
+ return
+}
+```
+
+### Defer
+- Schedules function call to run after surrounding function returns
+- LIFO order
+- Arguments evaluated when defer executes
+
+```go
+func trace(s string) string {
+ fmt.Println("entering:", s)
+ return s
+}
+
+func un(s string) {
+ fmt.Println("leaving:", s)
+}
+
+func a() {
+ defer un(trace("a"))
+ fmt.Println("in a")
+}
+```
+
+## Data
+
+### Allocation with new
+- `new(T)` allocates zeroed storage for new item of type T
+- Returns `*T`
+- Returns memory address of newly allocated zero value
+
+```go
+p := new(int) // p is *int, points to zeroed int
+```
+
+### Constructors and Composite Literals
+```go
+func NewFile(fd int, name string) *File {
+ if fd < 0 {
+ return nil
+ }
+ return &File{fd: fd, name: name}
+}
+```
+
+### Allocation with make
+- `make(T, args)` creates slices, maps, and channels only
+- Returns initialized (not zeroed) value of type T (not *T)
+
+```go
+make([]int, 10, 100) // slice: len=10, cap=100
+make(map[string]int) // map
+make(chan int, 10) // buffered channel
+```
+
+### Arrays
+- Arrays are values, not pointers
+- Passing array to function copies the entire array
+- Array size is part of its type
+
+### Slices
+- Hold references to underlying array
+- Can grow dynamically with `append`
+- Passing slice passes reference
+
+### Maps
+- Hold references to underlying data structure
+- Passing map passes reference
+- Zero value is `nil`
+
+### Printing
+- `%v` - default format
+- `%+v` - struct with field names
+- `%#v` - Go syntax representation
+- `%T` - type
+- `%q` - quoted string
+
+## Initialization
+
+### Constants
+- Created at compile time
+- Can only be numbers, characters, strings, or booleans
+
+### init Function
+- Each source file can have `init()` function
+- Called after package-level variables initialized
+- Used for setup that can't be expressed as declarations
+
+```go
+func init() {
+ // initialization code
+}
+```
+
+## Methods
+
+### Pointers vs. Values
+- Value methods can be invoked on pointers and values
+- Pointer methods can only be invoked on pointers
+
+Rule: Value methods can be called on both values and pointers, but pointer methods should only be called on pointers (though Go allows calling on addressable values).
+
+```go
+type ByteSlice []byte
+
+func (slice ByteSlice) Append(data []byte) []byte {
+ // ...
+}
+
+func (p *ByteSlice) Append(data []byte) {
+ slice := *p
+ // ...
+ *p = slice
+}
+```
+
+## Interfaces and Other Types
+
+### Interfaces
+- A type implements an interface by implementing its methods
+- No explicit declaration of intent
+
+### Type Assertions
+```go
+value, ok := str.(string)
+```
+
+### Type Switches
+```go
+switch v := value.(type) {
+case string:
+ // v is string
+case int:
+ // v is int
+}
+```
+
+### Generality
+- If a type exists only to implement an interface and will never have exported methods beyond that interface, there's no need to export the type itself
+
+## The Blank Identifier
+
+### Unused Imports and Variables
+```go
+import _ "net/http/pprof" // Import for side effects
+```
+
+### Interface Checks
+```go
+var _ json.Marshaler = (*RawMessage)(nil)
+```
+
+## Embedding
+
+### Composition, not Inheritance
+```go
+type ReadWriter struct {
+ *Reader // *bufio.Reader
+ *Writer // *bufio.Writer
+}
+```
+
+## Concurrency
+
+### Share by Communicating
+- Don't communicate by sharing memory; share memory by communicating
+- Use channels to pass ownership
+
+### Goroutines
+- Cheap: small initial stack
+- Multiplexed onto OS threads
+- Prefix function call with `go` keyword
+
+### Channels
+- Allocate with `make`
+- Unbuffered: synchronous
+- Buffered: asynchronous up to buffer size
+
+```go
+ci := make(chan int) // unbuffered
+cj := make(chan int, 0) // unbuffered
+cs := make(chan *os.File, 100) // buffered
+```
+
+### Channels of Channels
+```go
+type Request struct {
+ args []int
+ f func([]int) int
+ resultChan chan int
+}
+```
+
+### Parallelization
+```go
+const numCPU = runtime.NumCPU()
+runtime.GOMAXPROCS(numCPU)
+```
+
+## Errors
+
+### Error Type
+```go
+type error interface {
+ Error() string
+}
+```
+
+### Custom Errors
+```go
+type PathError struct {
+ Op string
+ Path string
+ Err error
+}
+
+func (e *PathError) Error() string {
+ return e.Op + " " + e.Path + ": " + e.Err.Error()
+}
+```
+
+### Panic
+- Use for unrecoverable errors
+- Generally avoid in library code
+
+### Recover
+- Called inside deferred function
+- Stops panic sequence
+- Returns value passed to panic
+
+```go
+func server(workChan <-chan *Work) {
+ for work := range workChan {
+ go safelyDo(work)
+ }
+}
+
+func safelyDo(work *Work) {
+ defer func() {
+ if err := recover(); err != nil {
+ log.Println("work failed:", err)
+ }
+ }()
+ do(work)
+}
+```
+
+## A Web Server Example
+
+```go
+package main
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+)
+
+type Counter struct {
+ n int
+}
+
+func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+ ctr.n++
+ fmt.Fprintf(w, "counter = %d\n", ctr.n)
+}
+
+func main() {
+ ctr := new(Counter)
+ http.Handle("/counter", ctr)
+ log.Fatal(http.ListenAndServe(":8080", nil))
+}
+```
+
diff --git a/.claude/skills/golang/references/quick-reference.md b/.claude/skills/golang/references/quick-reference.md
new file mode 100644
index 00000000..c2e2a650
--- /dev/null
+++ b/.claude/skills/golang/references/quick-reference.md
@@ -0,0 +1,528 @@
+# Go Quick Reference Cheat Sheet
+
+## Basic Syntax
+
+### Hello World
+```go
+package main
+
+import "fmt"
+
+func main() {
+ fmt.Println("Hello, World!")
+}
+```
+
+### Variables
+```go
+var name string = "John"
+var age int = 30
+var height = 5.9 // type inference
+
+// Short declaration (inside functions only)
+count := 42
+```
+
+### Constants
+```go
+const Pi = 3.14159
+const (
+ Sunday = iota // 0
+ Monday // 1
+ Tuesday // 2
+)
+```
+
+## Data Types
+
+### Basic Types
+```go
+bool // true, false
+string // "hello"
+int int8 int16 int32 int64
+uint uint8 uint16 uint32 uint64
+byte // alias for uint8
+rune // alias for int32 (Unicode)
+float32 float64
+complex64 complex128
+```
+
+### Composite Types
+```go
+// Array (fixed size)
+var arr [5]int
+
+// Slice (dynamic)
+slice := []int{1, 2, 3}
+slice = append(slice, 4)
+
+// Map
+m := make(map[string]int)
+m["key"] = 42
+
+// Struct
+type Person struct {
+ Name string
+ Age int
+}
+p := Person{Name: "Alice", Age: 30}
+
+// Pointer
+ptr := &p
+```
+
+## Functions
+
+```go
+// Basic function
+func add(a, b int) int {
+ return a + b
+}
+
+// Named returns (preferred)
+func divide(a, b float64) (result float64, err error) {
+ if b == 0 {
+ err = errors.New("division by zero")
+ return
+ }
+ result = a / b
+ return
+}
+
+// Variadic
+func sum(nums ...int) int {
+ total := 0
+ for _, n := range nums {
+ total += n
+ }
+ return total
+}
+
+// Multiple returns
+func swap(a, b int) (int, int) {
+ return b, a
+}
+```
+
+## Control Flow
+
+### If/Else
+```go
+if x > 0 {
+ // positive
+} else if x < 0 {
+ // negative
+} else {
+ // zero
+}
+
+// With initialization
+if err := doSomething(); err != nil {
+ return err
+}
+```
+
+### For Loops
+```go
+// Traditional for
+for i := 0; i < 10; i++ {
+ fmt.Println(i)
+}
+
+// While-style
+for condition {
+}
+
+// Infinite
+for {
+}
+
+// Range
+for i, v := range slice {
+ fmt.Printf("%d: %v\n", i, v)
+}
+
+for key, value := range myMap {
+ fmt.Printf("%s: %v\n", key, value)
+}
+```
+
+### Switch
+```go
+switch x {
+case 1:
+ fmt.Println("one")
+case 2, 3:
+ fmt.Println("two or three")
+default:
+ fmt.Println("other")
+}
+
+// Type switch
+switch v := i.(type) {
+case int:
+ fmt.Printf("int: %d\n", v)
+case string:
+ fmt.Printf("string: %s\n", v)
+}
+```
+
+## Methods & Interfaces
+
+### Methods
+```go
+type Rectangle struct {
+ Width, Height float64
+}
+
+// Value receiver
+func (r Rectangle) Area() float64 {
+ return r.Width * r.Height
+}
+
+// Pointer receiver
+func (r *Rectangle) Scale(factor float64) {
+ r.Width *= factor
+ r.Height *= factor
+}
+```
+
+### Interfaces
+```go
+type Shape interface {
+ Area() float64
+ Perimeter() float64
+}
+
+// Empty interface (any type)
+var x interface{} // or: var x any
+```
+
+## Concurrency
+
+### Goroutines
+```go
+go doSomething()
+
+go func() {
+ fmt.Println("In goroutine")
+}()
+```
+
+### Channels
+```go
+// Create
+ch := make(chan int) // unbuffered
+ch := make(chan int, 10) // buffered
+
+// Send & Receive
+ch <- 42 // send
+value := <-ch // receive
+
+// Close
+close(ch)
+
+// Check if closed
+value, ok := <-ch
+```
+
+### Select
+```go
+select {
+case msg := <-ch1:
+ fmt.Println("ch1:", msg)
+case msg := <-ch2:
+ fmt.Println("ch2:", msg)
+case <-time.After(1 * time.Second):
+ fmt.Println("timeout")
+default:
+ fmt.Println("no channel ready")
+}
+```
+
+### Sync Package
+```go
+// Mutex
+var mu sync.Mutex
+mu.Lock()
+defer mu.Unlock()
+
+// RWMutex
+var mu sync.RWMutex
+mu.RLock()
+defer mu.RUnlock()
+
+// WaitGroup
+var wg sync.WaitGroup
+wg.Add(1)
+go func() {
+ defer wg.Done()
+ // work
+}()
+wg.Wait()
+```
+
+## Error Handling
+
+```go
+// Create errors
+err := errors.New("error message")
+err := fmt.Errorf("failed: %w", originalErr)
+
+// Check errors
+if err != nil {
+ return err
+}
+
+// Custom error type
+type MyError struct {
+ Msg string
+}
+
+func (e *MyError) Error() string {
+ return e.Msg
+}
+
+// Error checking (Go 1.13+)
+if errors.Is(err, os.ErrNotExist) {
+ // handle
+}
+
+var pathErr *os.PathError
+if errors.As(err, &pathErr) {
+ // handle
+}
+```
+
+## Standard Library Snippets
+
+### fmt - Formatting
+```go
+fmt.Print("text")
+fmt.Println("text with newline")
+fmt.Printf("Name: %s, Age: %d\n", name, age)
+s := fmt.Sprintf("formatted %v", value)
+```
+
+### strings
+```go
+strings.Contains(s, substr)
+strings.HasPrefix(s, prefix)
+strings.Join([]string{"a", "b"}, ",")
+strings.Split(s, ",")
+strings.ToLower(s)
+strings.TrimSpace(s)
+```
+
+### strconv
+```go
+i, _ := strconv.Atoi("42")
+s := strconv.Itoa(42)
+f, _ := strconv.ParseFloat("3.14", 64)
+```
+
+### io
+```go
+io.Copy(dst, src)
+data, _ := io.ReadAll(r)
+io.WriteString(w, "data")
+```
+
+### os
+```go
+file, _ := os.Open("file.txt")
+defer file.Close()
+os.Getenv("PATH")
+os.Exit(1)
+```
+
+### net/http
+```go
+// Server
+http.HandleFunc("/", handler)
+http.ListenAndServe(":8080", nil)
+
+// Client
+resp, _ := http.Get("https://example.com")
+defer resp.Body.Close()
+```
+
+### encoding/json
+```go
+// Encode
+data, _ := json.Marshal(obj)
+
+// Decode
+json.Unmarshal(data, &obj)
+```
+
+### time
+```go
+now := time.Now()
+time.Sleep(5 * time.Second)
+t.Format("2006-01-02 15:04:05")
+time.Parse("2006-01-02", "2024-01-01")
+```
+
+## Testing
+
+### Basic Test
+```go
+// mycode_test.go
+package mypackage
+
+import "testing"
+
+func TestAdd(t *testing.T) {
+ result := Add(2, 3)
+ if result != 5 {
+ t.Errorf("got %d, want 5", result)
+ }
+}
+```
+
+### Table-Driven Test
+```go
+func TestAdd(t *testing.T) {
+ tests := []struct {
+ name string
+ a, b int
+ expected int
+ }{
+ {"positive", 2, 3, 5},
+ {"negative", -1, -1, -2},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := Add(tt.a, tt.b)
+ if result != tt.expected {
+ t.Errorf("got %d, want %d", result, tt.expected)
+ }
+ })
+ }
+}
+```
+
+### Benchmark
+```go
+func BenchmarkAdd(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ Add(2, 3)
+ }
+}
+```
+
+## Go Commands
+
+```bash
+# Run
+go run main.go
+
+# Build
+go build
+go build -o myapp
+
+# Test
+go test
+go test -v
+go test -cover
+go test -race
+
+# Format
+go fmt ./...
+gofmt -s -w .
+
+# Lint
+go vet ./...
+
+# Modules
+go mod init module-name
+go mod tidy
+go get package@version
+go get -u ./...
+
+# Install
+go install
+
+# Documentation
+go doc package.Function
+```
+
+## Common Patterns
+
+### Defer
+```go
+file, err := os.Open("file.txt")
+if err != nil {
+ return err
+}
+defer file.Close()
+```
+
+### Error Wrapping
+```go
+if err != nil {
+ return fmt.Errorf("failed to process: %w", err)
+}
+```
+
+### Context
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+defer cancel()
+```
+
+### Options Pattern
+```go
+type Option func(*Config)
+
+func WithPort(port int) Option {
+ return func(c *Config) {
+ c.port = port
+ }
+}
+
+func New(opts ...Option) *Server {
+ cfg := &Config{port: 8080}
+ for _, opt := range opts {
+ opt(cfg)
+ }
+ return &Server{cfg: cfg}
+}
+```
+
+## Format Verbs
+
+```go
+%v // default format
+%+v // struct with field names
+%#v // Go-syntax representation
+%T // type
+%t // bool
+%d // decimal integer
+%b // binary
+%o // octal
+%x // hex (lowercase)
+%X // hex (uppercase)
+%f // float
+%e // scientific notation
+%s // string
+%q // quoted string
+%p // pointer address
+%w // error wrapping
+```
+
+## Best Practices
+
+1. Use `gofmt` to format code
+2. Always check errors
+3. Use named return values
+4. Prefer composition over inheritance
+5. Use defer for cleanup
+6. Keep functions small and focused
+7. Write table-driven tests
+8. Document exported names
+9. Use interfaces for flexibility
+10. Follow Effective Go guidelines
+
diff --git a/.claude/skills/ndk/INDEX.md b/.claude/skills/ndk/INDEX.md
new file mode 100644
index 00000000..41d6d98e
--- /dev/null
+++ b/.claude/skills/ndk/INDEX.md
@@ -0,0 +1,286 @@
+# NDK (Nostr Development Kit) Claude Skill
+
+> **Comprehensive knowledge base for working with NDK in production applications**
+
+This Claude skill provides deep expertise in the Nostr Development Kit based on real-world usage patterns from the Plebeian Market application.
+
+## 📚 Documentation Structure
+
+```
+.claude/skills/ndk/
+├── README.md # This file - Overview and getting started
+├── ndk-skill.md # Complete reference guide (18KB)
+├── quick-reference.md # Fast lookup for common tasks (7KB)
+├── troubleshooting.md # Common problems and solutions
+└── examples/ # Production code examples
+ ├── README.md
+ ├── 01-initialization.ts # NDK setup and connection
+ ├── 02-authentication.ts # NIP-07, NIP-46, private keys
+ ├── 03-publishing-events.ts # Creating and publishing events
+ ├── 04-querying-subscribing.ts # Fetching and real-time subs
+ └── 05-users-profiles.ts # User and profile management
+```
+
+## 🚀 Quick Start
+
+### For Quick Lookups
+Start with **`quick-reference.md`** for:
+- Common code snippets
+- Quick syntax reminders
+- Frequently used patterns
+
+### For Deep Learning
+Read **`ndk-skill.md`** for:
+- Complete API documentation
+- Best practices
+- Integration patterns
+- Performance optimization
+
+### For Problem Solving
+Check **`troubleshooting.md`** for:
+- Common error solutions
+- Performance tips
+- Testing strategies
+- Debug techniques
+
+### For Code Examples
+Browse **`examples/`** directory for:
+- Real production code
+- Full implementations
+- React integration patterns
+- Error handling examples
+
+## 📖 Core Topics Covered
+
+### 1. Initialization & Setup
+- Basic NDK initialization
+- Multiple instance patterns (main + zap relays)
+- Connection management with timeouts
+- Relay pool configuration
+- Connection status monitoring
+
+### 2. Authentication
+- **NIP-07**: Browser extension signers (Alby, nos2x)
+- **NIP-46**: Remote signers (Bunker)
+- **Private Keys**: Direct key management
+- Auto-login with localStorage
+- Multi-account session management
+
+### 3. Event Publishing
+- Basic text notes
+- Parameterized replaceable events (products, profiles)
+- Order and payment events
+- Batch publishing
+- Error handling patterns
+
+### 4. Querying & Subscriptions
+- One-time fetches with `fetchEvents()`
+- Real-time subscriptions
+- Tag filtering patterns
+- Time-range queries
+- Event monitoring
+- React Query integration
+
+### 5. User & Profile Management
+- Fetch profiles (npub, hex, NIP-05)
+- Update user profiles
+- Follow/unfollow operations
+- Batch profile loading
+- Profile caching strategies
+
+### 6. Advanced Patterns
+- Store-based NDK management
+- Query + subscription combination
+- Event parsing utilities
+- Memory leak prevention
+- Performance optimization
+
+## 🎯 Use Cases
+
+### Building a Nostr Client
+```typescript
+// Initialize
+const { ndk, isConnected } = await initializeNDK({
+ relays: ['wss://relay.damus.io', 'wss://nos.lol'],
+ timeoutMs: 10000
+})
+
+// Authenticate
+const { user } = await loginWithExtension(ndk)
+
+// Publish
+await publishBasicNote(ndk, 'Hello Nostr!')
+
+// Subscribe
+const sub = subscribeToNotes(ndk, user.pubkey, (event) => {
+ console.log('New note:', event.content)
+})
+```
+
+### Building a Marketplace
+```typescript
+// Publish product
+await publishProduct(ndk, {
+ slug: 'bitcoin-shirt',
+ title: 'Bitcoin T-Shirt',
+ price: 25,
+ currency: 'USD',
+ images: ['https://...']
+})
+
+// Create order
+await createOrder(ndk, {
+ orderId: uuidv4(),
+ sellerPubkey: merchant.pubkey,
+ productRef: '30402:pubkey:bitcoin-shirt',
+ quantity: 1,
+ totalAmount: '25.00'
+})
+
+// Monitor payment
+monitorPaymentReceipt(ndk, orderId, invoiceId, (preimage) => {
+ console.log('Payment confirmed!')
+})
+```
+
+### React Integration
+```typescript
+function Feed() {
+ const ndk = useNDK()
+ const { user } = useAuth()
+
+ // Query with real-time updates
+ const { data: notes } = useNotesWithSubscription(
+ ndk,
+ user.pubkey
+ )
+
+ return (
+
+ {notes?.map(note => (
+
+ ))}
+
+ )
+}
+```
+
+## 🔍 Common Patterns Quick Reference
+
+### Safe NDK Access
+```typescript
+const ndk = ndkActions.getNDK()
+if (!ndk) throw new Error('NDK not initialized')
+```
+
+### Subscription Cleanup
+```typescript
+useEffect(() => {
+ const sub = ndk.subscribe(filter, { closeOnEose: false })
+ sub.on('event', handleEvent)
+ return () => sub.stop() // Critical!
+}, [ndk])
+```
+
+### Error Handling
+```typescript
+try {
+ await event.sign()
+ await event.publish()
+} catch (error) {
+ console.error('Publishing failed:', error)
+ throw new Error('Failed to publish. Check connection.')
+}
+```
+
+### Tag Filtering
+```typescript
+// ✅ Correct (note the # prefix for tag filters)
+{ kinds: [16], '#order': [orderId] }
+
+// ❌ Wrong
+{ kinds: [16], 'order': [orderId] }
+```
+
+## 🛠 Development Tools
+
+### VS Code Integration
+These skill files work with:
+- Cursor AI for code completion
+- Claude for code assistance
+- GitHub Copilot with context
+
+### Debugging Tips
+```typescript
+// Check connection
+console.log('Connected relays:',
+ Array.from(ndk.pool?.relays.values() || [])
+ .filter(r => r.status === 1)
+ .map(r => r.url)
+)
+
+// Verify signer
+console.log('Signer:', ndk.signer)
+console.log('Active user:', ndk.activeUser)
+
+// Event inspection
+console.log('Event:', {
+ id: event.id,
+ kind: event.kind,
+ tags: event.tags,
+ sig: event.sig
+})
+```
+
+## 📊 Statistics
+
+- **Total Documentation**: ~50KB
+- **Code Examples**: 5 complete modules
+- **Patterns Documented**: 50+
+- **Common Issues Covered**: 15+
+- **Based On**: Real production code
+
+## 🔗 Additional Resources
+
+### Official NDK Resources
+- **GitHub**: https://github.com/nostr-dev-kit/ndk
+- **Documentation**: https://ndk.fyi
+- **NPM**: `@nostr-dev-kit/ndk`
+
+### Nostr Protocol
+- **NIPs**: https://github.com/nostr-protocol/nips
+- **Nostr**: https://nostr.com
+
+### Related Tools
+- **TanStack Query**: React state management
+- **TanStack Router**: Type-safe routing
+- **Radix UI**: Accessible components
+
+## 💡 Tips for Using This Skill
+
+1. **Start Small**: Begin with quick-reference.md for syntax
+2. **Go Deep**: Read ndk-skill.md section by section
+3. **Copy Examples**: Use examples/ as templates
+4. **Debug Issues**: Check troubleshooting.md first
+5. **Stay Updated**: Patterns based on production usage
+
+## 🤝 Contributing
+
+This skill is maintained based on the Plebeian Market codebase. To improve it:
+
+1. Document new patterns you discover
+2. Add solutions to common problems
+3. Update examples with better approaches
+4. Keep synchronized with NDK updates
+
+## 📝 Version Info
+
+- **Skill Version**: 1.0.0
+- **NDK Version**: Latest (based on production usage)
+- **Last Updated**: November 2025
+- **Codebase**: Plebeian Market
+
+---
+
+**Ready to build with NDK?** Start with `quick-reference.md` or dive into `examples/01-initialization.ts`!
+
diff --git a/.claude/skills/ndk/README.md b/.claude/skills/ndk/README.md
new file mode 100644
index 00000000..2a13dc47
--- /dev/null
+++ b/.claude/skills/ndk/README.md
@@ -0,0 +1,38 @@
+# NDK (Nostr Development Kit) Claude Skill
+
+This skill provides comprehensive knowledge about working with the Nostr Development Kit (NDK) library.
+
+## Files
+
+- **ndk-skill.md** - Complete reference documentation with patterns from production usage
+- **quick-reference.md** - Quick lookup guide for common NDK tasks
+- **examples/** - Code examples extracted from the Plebeian Market codebase
+
+## Usage
+
+When working with NDK-related code, reference these documents to:
+- Understand initialization patterns
+- Learn authentication flows (NIP-07, NIP-46, private keys)
+- Implement event creation and publishing
+- Set up subscriptions for real-time updates
+- Query events with filters
+- Handle users and profiles
+- Integrate with TanStack Query
+
+## Key Topics Covered
+
+1. NDK Initialization & Configuration
+2. Authentication & Signers
+3. Event Creation & Publishing
+4. Querying Events
+5. Real-time Subscriptions
+6. User & Profile Management
+7. Tag Handling
+8. Replaceable Events
+9. Relay Management
+10. Integration with React/TanStack Query
+11. Error Handling & Best Practices
+12. Performance Optimization
+
+All examples are based on real production code from the Plebeian Market application.
+
diff --git a/.claude/skills/ndk/examples/01-initialization.ts b/.claude/skills/ndk/examples/01-initialization.ts
new file mode 100644
index 00000000..bada6d6b
--- /dev/null
+++ b/.claude/skills/ndk/examples/01-initialization.ts
@@ -0,0 +1,162 @@
+/**
+ * NDK Initialization Patterns
+ *
+ * Examples from: src/lib/stores/ndk.ts
+ */
+
+import NDK from '@nostr-dev-kit/ndk'
+
+// ============================================================
+// BASIC INITIALIZATION
+// ============================================================
+
+const basicInit = () => {
+ const ndk = new NDK({
+ explicitRelayUrls: ['wss://relay.damus.io', 'wss://relay.nostr.band']
+ })
+
+ return ndk
+}
+
+// ============================================================
+// PRODUCTION PATTERN - WITH MULTIPLE NDK INSTANCES
+// ============================================================
+
+const productionInit = (relays: string[], zapRelays: string[]) => {
+ // Main NDK instance for general operations
+ const ndk = new NDK({
+ explicitRelayUrls: relays
+ })
+
+ // Separate NDK for zap operations (performance optimization)
+ const zapNdk = new NDK({
+ explicitRelayUrls: zapRelays
+ })
+
+ return { ndk, zapNdk }
+}
+
+// ============================================================
+// CONNECTION WITH TIMEOUT
+// ============================================================
+
+const connectWithTimeout = async (
+ ndk: NDK,
+ timeoutMs: number = 10000
+): Promise => {
+ // Create connection promise
+ const connectPromise = ndk.connect()
+
+ // Create timeout promise
+ const timeoutPromise = new Promise((_, reject) =>
+ setTimeout(() => reject(new Error('Connection timeout')), timeoutMs)
+ )
+
+ try {
+ // Race between connection and timeout
+ await Promise.race([connectPromise, timeoutPromise])
+ console.log('✅ NDK connected successfully')
+ } catch (error) {
+ if (error instanceof Error && error.message === 'Connection timeout') {
+ console.error('❌ Connection timed out after', timeoutMs, 'ms')
+ } else {
+ console.error('❌ Connection failed:', error)
+ }
+ throw error
+ }
+}
+
+// ============================================================
+// FULL INITIALIZATION FLOW
+// ============================================================
+
+interface InitConfig {
+ relays?: string[]
+ zapRelays?: string[]
+ timeoutMs?: number
+}
+
+const defaultRelays = [
+ 'wss://relay.damus.io',
+ 'wss://relay.nostr.band',
+ 'wss://nos.lol'
+]
+
+const defaultZapRelays = [
+ 'wss://relay.damus.io',
+ 'wss://nostr.wine'
+]
+
+const initializeNDK = async (config: InitConfig = {}) => {
+ const {
+ relays = defaultRelays,
+ zapRelays = defaultZapRelays,
+ timeoutMs = 10000
+ } = config
+
+ // Initialize instances
+ const ndk = new NDK({ explicitRelayUrls: relays })
+ const zapNdk = new NDK({ explicitRelayUrls: zapRelays })
+
+ // Connect with timeout protection
+ try {
+ await connectWithTimeout(ndk, timeoutMs)
+ await connectWithTimeout(zapNdk, timeoutMs)
+
+ return { ndk, zapNdk, isConnected: true }
+ } catch (error) {
+ return { ndk, zapNdk, isConnected: false, error }
+ }
+}
+
+// ============================================================
+// CHECKING CONNECTION STATUS
+// ============================================================
+
+const getConnectionStatus = (ndk: NDK) => {
+ const connectedRelays = Array.from(ndk.pool?.relays.values() || [])
+ .filter(relay => relay.status === 1)
+ .map(relay => relay.url)
+
+ const isConnected = connectedRelays.length > 0
+
+ return {
+ isConnected,
+ connectedRelays,
+ totalRelays: ndk.pool?.relays.size || 0
+ }
+}
+
+// ============================================================
+// USAGE EXAMPLE
+// ============================================================
+
+async function main() {
+ // Initialize
+ const { ndk, zapNdk, isConnected } = await initializeNDK({
+ relays: defaultRelays,
+ zapRelays: defaultZapRelays,
+ timeoutMs: 10000
+ })
+
+ if (!isConnected) {
+ console.error('Failed to connect to relays')
+ return
+ }
+
+ // Check status
+ const status = getConnectionStatus(ndk)
+ console.log('Connection status:', status)
+
+ // Ready to use
+ console.log('NDK ready for operations')
+}
+
+export {
+ basicInit,
+ productionInit,
+ connectWithTimeout,
+ initializeNDK,
+ getConnectionStatus
+}
+
diff --git a/.claude/skills/ndk/examples/02-authentication.ts b/.claude/skills/ndk/examples/02-authentication.ts
new file mode 100644
index 00000000..2356205a
--- /dev/null
+++ b/.claude/skills/ndk/examples/02-authentication.ts
@@ -0,0 +1,255 @@
+/**
+ * NDK Authentication Patterns
+ *
+ * Examples from: src/lib/stores/auth.ts
+ */
+
+import NDK from '@nostr-dev-kit/ndk'
+import { NDKNip07Signer, NDKPrivateKeySigner, NDKNip46Signer } from '@nostr-dev-kit/ndk'
+
+// ============================================================
+// NIP-07 - BROWSER EXTENSION SIGNER
+// ============================================================
+
+const loginWithExtension = async (ndk: NDK) => {
+ try {
+ // Create NIP-07 signer (browser extension like Alby, nos2x)
+ const signer = new NDKNip07Signer()
+
+ // Wait for signer to be ready
+ await signer.blockUntilReady()
+
+ // Set signer on NDK instance
+ ndk.signer = signer
+
+ // Get authenticated user
+ const user = await signer.user()
+
+ console.log('✅ Logged in via extension:', user.npub)
+ return { user, signer }
+ } catch (error) {
+ console.error('❌ Extension login failed:', error)
+ throw new Error('Failed to login with browser extension. Is it installed?')
+ }
+}
+
+// ============================================================
+// PRIVATE KEY SIGNER
+// ============================================================
+
+const loginWithPrivateKey = async (ndk: NDK, privateKeyHex: string) => {
+ try {
+ // Validate private key format (64 hex characters)
+ if (!/^[0-9a-f]{64}$/.test(privateKeyHex)) {
+ throw new Error('Invalid private key format')
+ }
+
+ // Create private key signer
+ const signer = new NDKPrivateKeySigner(privateKeyHex)
+
+ // Wait for signer to be ready
+ await signer.blockUntilReady()
+
+ // Set signer on NDK instance
+ ndk.signer = signer
+
+ // Get authenticated user
+ const user = await signer.user()
+
+ console.log('✅ Logged in with private key:', user.npub)
+ return { user, signer }
+ } catch (error) {
+ console.error('❌ Private key login failed:', error)
+ throw error
+ }
+}
+
+// ============================================================
+// NIP-46 - REMOTE SIGNER (BUNKER)
+// ============================================================
+
+const loginWithNip46 = async (
+ ndk: NDK,
+ bunkerUrl: string,
+ localPrivateKey?: string
+) => {
+ try {
+ // Create or use existing local signer
+ const localSigner = localPrivateKey
+ ? new NDKPrivateKeySigner(localPrivateKey)
+ : NDKPrivateKeySigner.generate()
+
+ // Create NIP-46 remote signer
+ const remoteSigner = new NDKNip46Signer(ndk, bunkerUrl, localSigner)
+
+ // Wait for signer to be ready (may require user approval)
+ await remoteSigner.blockUntilReady()
+
+ // Set signer on NDK instance
+ ndk.signer = remoteSigner
+
+ // Get authenticated user
+ const user = await remoteSigner.user()
+
+ console.log('✅ Logged in via NIP-46:', user.npub)
+
+ // Store local signer key for reconnection
+ return {
+ user,
+ signer: remoteSigner,
+ localSignerKey: localSigner.privateKey
+ }
+ } catch (error) {
+ console.error('❌ NIP-46 login failed:', error)
+ throw error
+ }
+}
+
+// ============================================================
+// AUTO-LOGIN FROM LOCAL STORAGE
+// ============================================================
+
+const STORAGE_KEYS = {
+ AUTO_LOGIN: 'nostr:auto-login',
+ LOCAL_SIGNER: 'nostr:local-signer',
+ BUNKER_URL: 'nostr:bunker-url',
+ ENCRYPTED_KEY: 'nostr:encrypted-key'
+}
+
+const getAuthFromStorage = async (ndk: NDK) => {
+ try {
+ // Check if auto-login is enabled
+ const autoLogin = localStorage.getItem(STORAGE_KEYS.AUTO_LOGIN)
+ if (autoLogin !== 'true') {
+ return null
+ }
+
+ // Try NIP-46 bunker connection
+ const privateKey = localStorage.getItem(STORAGE_KEYS.LOCAL_SIGNER)
+ const bunkerUrl = localStorage.getItem(STORAGE_KEYS.BUNKER_URL)
+
+ if (privateKey && bunkerUrl) {
+ return await loginWithNip46(ndk, bunkerUrl, privateKey)
+ }
+
+ // Try encrypted private key
+ const encryptedKey = localStorage.getItem(STORAGE_KEYS.ENCRYPTED_KEY)
+ if (encryptedKey) {
+ // Would need decryption password from user
+ return { needsPassword: true, encryptedKey }
+ }
+
+ // Fallback to extension
+ return await loginWithExtension(ndk)
+ } catch (error) {
+ console.error('Auto-login failed:', error)
+ return null
+ }
+}
+
+// ============================================================
+// SAVE AUTH TO STORAGE
+// ============================================================
+
+const saveAuthToStorage = (
+ method: 'extension' | 'private-key' | 'nip46',
+ data?: {
+ privateKey?: string
+ bunkerUrl?: string
+ encryptedKey?: string
+ }
+) => {
+ // Enable auto-login
+ localStorage.setItem(STORAGE_KEYS.AUTO_LOGIN, 'true')
+
+ if (method === 'nip46' && data?.privateKey && data?.bunkerUrl) {
+ localStorage.setItem(STORAGE_KEYS.LOCAL_SIGNER, data.privateKey)
+ localStorage.setItem(STORAGE_KEYS.BUNKER_URL, data.bunkerUrl)
+ } else if (method === 'private-key' && data?.encryptedKey) {
+ localStorage.setItem(STORAGE_KEYS.ENCRYPTED_KEY, data.encryptedKey)
+ }
+ // Extension doesn't need storage
+}
+
+// ============================================================
+// LOGOUT
+// ============================================================
+
+const logout = (ndk: NDK) => {
+ // Remove signer from NDK
+ ndk.signer = undefined
+
+ // Clear all auth storage
+ Object.values(STORAGE_KEYS).forEach(key => {
+ localStorage.removeItem(key)
+ })
+
+ console.log('✅ Logged out successfully')
+}
+
+// ============================================================
+// GET CURRENT USER
+// ============================================================
+
+const getCurrentUser = async (ndk: NDK) => {
+ if (!ndk.signer) {
+ return null
+ }
+
+ try {
+ const user = await ndk.signer.user()
+ return {
+ pubkey: user.pubkey,
+ npub: user.npub,
+ profile: await user.fetchProfile()
+ }
+ } catch (error) {
+ console.error('Failed to get current user:', error)
+ return null
+ }
+}
+
+// ============================================================
+// USAGE EXAMPLE
+// ============================================================
+
+async function authExample(ndk: NDK) {
+ // Try auto-login first
+ let auth = await getAuthFromStorage(ndk)
+
+ if (!auth) {
+ // Manual login options
+ console.log('Choose login method:')
+ console.log('1. Browser Extension (NIP-07)')
+ console.log('2. Private Key')
+ console.log('3. Remote Signer (NIP-46)')
+
+ // Example: login with extension
+ auth = await loginWithExtension(ndk)
+ saveAuthToStorage('extension')
+ }
+
+ if (auth && 'needsPassword' in auth) {
+ // Handle encrypted key case
+ console.log('Password required for encrypted key')
+ return
+ }
+
+ // Get current user info
+ const currentUser = await getCurrentUser(ndk)
+ console.log('Current user:', currentUser)
+
+ // Logout when done
+ // logout(ndk)
+}
+
+export {
+ loginWithExtension,
+ loginWithPrivateKey,
+ loginWithNip46,
+ getAuthFromStorage,
+ saveAuthToStorage,
+ logout,
+ getCurrentUser
+}
+
diff --git a/.claude/skills/ndk/examples/03-publishing-events.ts b/.claude/skills/ndk/examples/03-publishing-events.ts
new file mode 100644
index 00000000..bd068e43
--- /dev/null
+++ b/.claude/skills/ndk/examples/03-publishing-events.ts
@@ -0,0 +1,376 @@
+/**
+ * NDK Event Publishing Patterns
+ *
+ * Examples from: src/publish/orders.tsx, scripts/gen_products.ts
+ */
+
+import NDK, { NDKEvent, NDKTag } from '@nostr-dev-kit/ndk'
+
+// ============================================================
+// BASIC EVENT PUBLISHING
+// ============================================================
+
+const publishBasicNote = async (ndk: NDK, content: string) => {
+ // Create event
+ const event = new NDKEvent(ndk)
+ event.kind = 1 // Text note
+ event.content = content
+ event.tags = []
+
+ // Sign and publish
+ await event.sign()
+ await event.publish()
+
+ console.log('✅ Published note:', event.id)
+ return event.id
+}
+
+// ============================================================
+// EVENT WITH TAGS
+// ============================================================
+
+const publishNoteWithTags = async (
+ ndk: NDK,
+ content: string,
+ options: {
+ mentions?: string[] // pubkeys to mention
+ hashtags?: string[]
+ replyTo?: string // event ID
+ }
+) => {
+ const event = new NDKEvent(ndk)
+ event.kind = 1
+ event.content = content
+ event.tags = []
+
+ // Add mentions
+ if (options.mentions) {
+ options.mentions.forEach(pubkey => {
+ event.tags.push(['p', pubkey])
+ })
+ }
+
+ // Add hashtags
+ if (options.hashtags) {
+ options.hashtags.forEach(tag => {
+ event.tags.push(['t', tag])
+ })
+ }
+
+ // Add reply
+ if (options.replyTo) {
+ event.tags.push(['e', options.replyTo, '', 'reply'])
+ }
+
+ await event.sign()
+ await event.publish()
+
+ return event.id
+}
+
+// ============================================================
+// PRODUCT LISTING (PARAMETERIZED REPLACEABLE EVENT)
+// ============================================================
+
+interface ProductData {
+ slug: string // Unique identifier
+ title: string
+ description: string
+ price: number
+ currency: string
+ images: string[]
+ shippingRefs?: string[]
+ category?: string
+}
+
+const publishProduct = async (ndk: NDK, product: ProductData) => {
+ const event = new NDKEvent(ndk)
+ event.kind = 30402 // Product listing kind
+ event.content = product.description
+
+ // Build tags
+ event.tags = [
+ ['d', product.slug], // Unique identifier (required for replaceable)
+ ['title', product.title],
+ ['price', product.price.toString(), product.currency],
+ ]
+
+ // Add images
+ product.images.forEach(image => {
+ event.tags.push(['image', image])
+ })
+
+ // Add shipping options
+ if (product.shippingRefs) {
+ product.shippingRefs.forEach(ref => {
+ event.tags.push(['shipping', ref])
+ })
+ }
+
+ // Add category
+ if (product.category) {
+ event.tags.push(['t', product.category])
+ }
+
+ // Optional: set custom timestamp
+ event.created_at = Math.floor(Date.now() / 1000)
+
+ await event.sign()
+ await event.publish()
+
+ console.log('✅ Published product:', product.title)
+ return event.id
+}
+
+// ============================================================
+// ORDER CREATION EVENT
+// ============================================================
+
+interface OrderData {
+ orderId: string
+ sellerPubkey: string
+ productRef: string
+ quantity: number
+ totalAmount: string
+ currency: string
+ shippingRef?: string
+ shippingAddress?: string
+ email?: string
+ phone?: string
+ notes?: string
+}
+
+const createOrder = async (ndk: NDK, order: OrderData) => {
+ const event = new NDKEvent(ndk)
+ event.kind = 16 // Order processing kind
+ event.content = order.notes || ''
+
+ // Required tags per spec
+ event.tags = [
+ ['p', order.sellerPubkey],
+ ['subject', `Order ${order.orderId.substring(0, 8)}`],
+ ['type', 'order-creation'],
+ ['order', order.orderId],
+ ['amount', order.totalAmount],
+ ['item', order.productRef, order.quantity.toString()],
+ ]
+
+ // Optional tags
+ if (order.shippingRef) {
+ event.tags.push(['shipping', order.shippingRef])
+ }
+
+ if (order.shippingAddress) {
+ event.tags.push(['address', order.shippingAddress])
+ }
+
+ if (order.email) {
+ event.tags.push(['email', order.email])
+ }
+
+ if (order.phone) {
+ event.tags.push(['phone', order.phone])
+ }
+
+ try {
+ await event.sign()
+ await event.publish()
+
+ console.log('✅ Order created:', order.orderId)
+ return { success: true, eventId: event.id }
+ } catch (error) {
+ console.error('❌ Failed to create order:', error)
+ return { success: false, error }
+ }
+}
+
+// ============================================================
+// STATUS UPDATE EVENT
+// ============================================================
+
+const publishStatusUpdate = async (
+ ndk: NDK,
+ orderId: string,
+ recipientPubkey: string,
+ status: 'pending' | 'paid' | 'shipped' | 'delivered' | 'cancelled',
+ notes?: string
+) => {
+ const event = new NDKEvent(ndk)
+ event.kind = 16
+ event.content = notes || `Order status updated to ${status}`
+ event.tags = [
+ ['p', recipientPubkey],
+ ['subject', 'order-info'],
+ ['type', 'status-update'],
+ ['order', orderId],
+ ['status', status],
+ ]
+
+ await event.sign()
+ await event.publish()
+
+ return event.id
+}
+
+// ============================================================
+// BATCH PUBLISHING
+// ============================================================
+
+const publishMultipleEvents = async (
+ ndk: NDK,
+ events: Array<{ kind: number; content: string; tags: NDKTag[] }>
+) => {
+ const results = []
+
+ for (const eventData of events) {
+ try {
+ const event = new NDKEvent(ndk)
+ event.kind = eventData.kind
+ event.content = eventData.content
+ event.tags = eventData.tags
+
+ await event.sign()
+ await event.publish()
+
+ results.push({ success: true, eventId: event.id })
+ } catch (error) {
+ results.push({ success: false, error })
+ }
+ }
+
+ return results
+}
+
+// ============================================================
+// PUBLISH WITH CUSTOM SIGNER
+// ============================================================
+
+import { NDKSigner } from '@nostr-dev-kit/ndk'
+
+const publishWithCustomSigner = async (
+ ndk: NDK,
+ signer: NDKSigner,
+ eventData: { kind: number; content: string; tags: NDKTag[] }
+) => {
+ const event = new NDKEvent(ndk)
+ event.kind = eventData.kind
+ event.content = eventData.content
+ event.tags = eventData.tags
+
+ // Sign with specific signer (not ndk.signer)
+ await event.sign(signer)
+ await event.publish()
+
+ return event.id
+}
+
+// ============================================================
+// ERROR HANDLING PATTERN
+// ============================================================
+
+const publishWithErrorHandling = async (
+ ndk: NDK,
+ eventData: { kind: number; content: string; tags: NDKTag[] }
+) => {
+ // Validate NDK
+ if (!ndk) {
+ throw new Error('NDK not initialized')
+ }
+
+ // Validate signer
+ if (!ndk.signer) {
+ throw new Error('No active signer. Please login first.')
+ }
+
+ try {
+ const event = new NDKEvent(ndk)
+ event.kind = eventData.kind
+ event.content = eventData.content
+ event.tags = eventData.tags
+
+ // Sign
+ await event.sign()
+
+ // Verify signature
+ if (!event.sig) {
+ throw new Error('Event signing failed')
+ }
+
+ // Publish
+ await event.publish()
+
+ // Verify event ID
+ if (!event.id) {
+ throw new Error('Event ID not generated')
+ }
+
+ return {
+ success: true,
+ eventId: event.id,
+ pubkey: event.pubkey
+ }
+ } catch (error) {
+ console.error('Publishing failed:', error)
+
+ if (error instanceof Error) {
+ // Handle specific error types
+ if (error.message.includes('relay')) {
+ throw new Error('Failed to publish to relays. Check connection.')
+ }
+ if (error.message.includes('sign')) {
+ throw new Error('Failed to sign event. Check signer.')
+ }
+ }
+
+ throw error
+ }
+}
+
+// ============================================================
+// USAGE EXAMPLE
+// ============================================================
+
+async function publishingExample(ndk: NDK) {
+ // Simple note
+ await publishBasicNote(ndk, 'Hello Nostr!')
+
+ // Note with tags
+ await publishNoteWithTags(ndk, 'Check out this product!', {
+ hashtags: ['marketplace', 'nostr'],
+ mentions: ['pubkey123...']
+ })
+
+ // Product listing
+ await publishProduct(ndk, {
+ slug: 'bitcoin-tshirt',
+ title: 'Bitcoin T-Shirt',
+ description: 'High quality Bitcoin t-shirt',
+ price: 25,
+ currency: 'USD',
+ images: ['https://example.com/image.jpg'],
+ category: 'clothing'
+ })
+
+ // Order
+ await createOrder(ndk, {
+ orderId: 'order-123',
+ sellerPubkey: 'seller-pubkey',
+ productRef: '30402:pubkey:bitcoin-tshirt',
+ quantity: 1,
+ totalAmount: '25.00',
+ currency: 'USD',
+ email: 'customer@example.com'
+ })
+}
+
+export {
+ publishBasicNote,
+ publishNoteWithTags,
+ publishProduct,
+ createOrder,
+ publishStatusUpdate,
+ publishMultipleEvents,
+ publishWithCustomSigner,
+ publishWithErrorHandling
+}
+
diff --git a/.claude/skills/ndk/examples/04-querying-subscribing.ts b/.claude/skills/ndk/examples/04-querying-subscribing.ts
new file mode 100644
index 00000000..ff75e97d
--- /dev/null
+++ b/.claude/skills/ndk/examples/04-querying-subscribing.ts
@@ -0,0 +1,404 @@
+/**
+ * NDK Query and Subscription Patterns
+ *
+ * Examples from: src/queries/orders.tsx, src/queries/payment.tsx
+ */
+
+import NDK, { NDKEvent, NDKFilter, NDKSubscription } from '@nostr-dev-kit/ndk'
+
+// ============================================================
+// BASIC FETCH (ONE-TIME QUERY)
+// ============================================================
+
+const fetchNotes = async (ndk: NDK, authorPubkey: string, limit: number = 50) => {
+ const filter: NDKFilter = {
+ kinds: [1], // Text notes
+ authors: [authorPubkey],
+ limit
+ }
+
+ // Fetch returns a Set
+ const events = await ndk.fetchEvents(filter)
+
+ // Convert to array and sort by timestamp
+ const eventArray = Array.from(events).sort((a, b) =>
+ (b.created_at || 0) - (a.created_at || 0)
+ )
+
+ return eventArray
+}
+
+// ============================================================
+// FETCH WITH MULTIPLE FILTERS
+// ============================================================
+
+const fetchProductsByMultipleAuthors = async (
+ ndk: NDK,
+ pubkeys: string[]
+) => {
+ const filter: NDKFilter = {
+ kinds: [30402], // Product listings
+ authors: pubkeys,
+ limit: 100
+ }
+
+ const events = await ndk.fetchEvents(filter)
+ return Array.from(events)
+}
+
+// ============================================================
+// FETCH WITH TAG FILTERS
+// ============================================================
+
+const fetchOrderEvents = async (ndk: NDK, orderId: string) => {
+ const filter: NDKFilter = {
+ kinds: [16, 17], // Order and payment receipt
+ '#order': [orderId], // Tag filter (note the # prefix)
+ }
+
+ const events = await ndk.fetchEvents(filter)
+ return Array.from(events)
+}
+
+// ============================================================
+// FETCH WITH TIME RANGE
+// ============================================================
+
+const fetchRecentEvents = async (
+ ndk: NDK,
+ kind: number,
+ hoursAgo: number = 24
+) => {
+ const now = Math.floor(Date.now() / 1000)
+ const since = now - (hoursAgo * 3600)
+
+ const filter: NDKFilter = {
+ kinds: [kind],
+ since,
+ until: now,
+ limit: 100
+ }
+
+ const events = await ndk.fetchEvents(filter)
+ return Array.from(events)
+}
+
+// ============================================================
+// FETCH BY EVENT ID
+// ============================================================
+
+const fetchEventById = async (ndk: NDK, eventId: string) => {
+ const filter: NDKFilter = {
+ ids: [eventId]
+ }
+
+ const events = await ndk.fetchEvents(filter)
+
+ if (events.size === 0) {
+ return null
+ }
+
+ return Array.from(events)[0]
+}
+
+// ============================================================
+// BASIC SUBSCRIPTION (REAL-TIME)
+// ============================================================
+
+const subscribeToNotes = (
+ ndk: NDK,
+ authorPubkey: string,
+ onEvent: (event: NDKEvent) => void
+): NDKSubscription => {
+ const filter: NDKFilter = {
+ kinds: [1],
+ authors: [authorPubkey]
+ }
+
+ const subscription = ndk.subscribe(filter, {
+ closeOnEose: false // Keep open for real-time updates
+ })
+
+ // Event handler
+ subscription.on('event', (event: NDKEvent) => {
+ onEvent(event)
+ })
+
+ // EOSE (End of Stored Events) handler
+ subscription.on('eose', () => {
+ console.log('✅ Received all stored events')
+ })
+
+ return subscription
+}
+
+// ============================================================
+// SUBSCRIPTION WITH CLEANUP
+// ============================================================
+
+const createManagedSubscription = (
+ ndk: NDK,
+ filter: NDKFilter,
+ handlers: {
+ onEvent: (event: NDKEvent) => void
+ onEose?: () => void
+ onClose?: () => void
+ }
+) => {
+ const subscription = ndk.subscribe(filter, { closeOnEose: false })
+
+ subscription.on('event', handlers.onEvent)
+
+ if (handlers.onEose) {
+ subscription.on('eose', handlers.onEose)
+ }
+
+ if (handlers.onClose) {
+ subscription.on('close', handlers.onClose)
+ }
+
+ // Return cleanup function
+ return () => {
+ subscription.stop()
+ console.log('✅ Subscription stopped')
+ }
+}
+
+// ============================================================
+// MONITORING SPECIFIC EVENT
+// ============================================================
+
+const monitorPaymentReceipt = (
+ ndk: NDK,
+ orderId: string,
+ invoiceId: string,
+ onPaymentReceived: (preimage: string) => void
+): NDKSubscription => {
+ const sessionStart = Math.floor(Date.now() / 1000)
+
+ const filter: NDKFilter = {
+ kinds: [17], // Payment receipt
+ '#order': [orderId],
+ '#payment-request': [invoiceId],
+ since: sessionStart - 30 // 30 second buffer for clock skew
+ }
+
+ const subscription = ndk.subscribe(filter, { closeOnEose: false })
+
+ subscription.on('event', (event: NDKEvent) => {
+ // Verify event is recent
+ if (event.created_at && event.created_at < sessionStart - 30) {
+ console.log('⏰ Ignoring old receipt')
+ return
+ }
+
+ // Verify it's the correct invoice
+ const paymentRequestTag = event.tags.find(tag => tag[0] === 'payment-request')
+ if (paymentRequestTag?.[1] !== invoiceId) {
+ return
+ }
+
+ // Extract preimage
+ const paymentTag = event.tags.find(tag => tag[0] === 'payment')
+ const preimage = paymentTag?.[3] || 'external-payment'
+
+ console.log('✅ Payment received!')
+ subscription.stop()
+ onPaymentReceived(preimage)
+ })
+
+ return subscription
+}
+
+// ============================================================
+// REACT INTEGRATION PATTERN
+// ============================================================
+
+import { useEffect, useState } from 'react'
+
+function useOrderSubscription(ndk: NDK | null, orderId: string) {
+ const [events, setEvents] = useState([])
+ const [eosed, setEosed] = useState(false)
+
+ useEffect(() => {
+ if (!ndk || !orderId) return
+
+ const filter: NDKFilter = {
+ kinds: [16, 17],
+ '#order': [orderId]
+ }
+
+ const subscription = ndk.subscribe(filter, { closeOnEose: false })
+
+ subscription.on('event', (event: NDKEvent) => {
+ setEvents(prev => {
+ // Avoid duplicates
+ if (prev.some(e => e.id === event.id)) {
+ return prev
+ }
+ return [...prev, event].sort((a, b) =>
+ (a.created_at || 0) - (b.created_at || 0)
+ )
+ })
+ })
+
+ subscription.on('eose', () => {
+ setEosed(true)
+ })
+
+ // Cleanup on unmount
+ return () => {
+ subscription.stop()
+ }
+ }, [ndk, orderId])
+
+ return { events, eosed }
+}
+
+// ============================================================
+// REACT QUERY INTEGRATION
+// ============================================================
+
+import { useQuery, useQueryClient } from '@tanstack/react-query'
+
+// Query function
+const fetchProducts = async (ndk: NDK, pubkey: string) => {
+ if (!ndk) throw new Error('NDK not initialized')
+
+ const filter: NDKFilter = {
+ kinds: [30402],
+ authors: [pubkey]
+ }
+
+ const events = await ndk.fetchEvents(filter)
+ return Array.from(events)
+}
+
+// Hook with subscription for real-time updates
+function useProductsWithSubscription(ndk: NDK | null, pubkey: string) {
+ const queryClient = useQueryClient()
+
+ // Initial query
+ const query = useQuery({
+ queryKey: ['products', pubkey],
+ queryFn: () => fetchProducts(ndk!, pubkey),
+ enabled: !!ndk && !!pubkey,
+ staleTime: 30000
+ })
+
+ // Real-time subscription
+ useEffect(() => {
+ if (!ndk || !pubkey) return
+
+ const filter: NDKFilter = {
+ kinds: [30402],
+ authors: [pubkey]
+ }
+
+ const subscription = ndk.subscribe(filter, { closeOnEose: false })
+
+ subscription.on('event', () => {
+ // Invalidate query to trigger refetch
+ queryClient.invalidateQueries({ queryKey: ['products', pubkey] })
+ })
+
+ return () => {
+ subscription.stop()
+ }
+ }, [ndk, pubkey, queryClient])
+
+ return query
+}
+
+// ============================================================
+// ADVANCED: WAITING FOR SPECIFIC EVENT
+// ============================================================
+
+const waitForEvent = (
+ ndk: NDK,
+ filter: NDKFilter,
+ condition: (event: NDKEvent) => boolean,
+ timeoutMs: number = 30000
+): Promise => {
+ return new Promise((resolve) => {
+ const subscription = ndk.subscribe(filter, { closeOnEose: false })
+
+ // Timeout
+ const timeout = setTimeout(() => {
+ subscription.stop()
+ resolve(null)
+ }, timeoutMs)
+
+ // Event handler
+ subscription.on('event', (event: NDKEvent) => {
+ if (condition(event)) {
+ clearTimeout(timeout)
+ subscription.stop()
+ resolve(event)
+ }
+ })
+ })
+}
+
+// Usage example
+async function waitForPayment(ndk: NDK, orderId: string, invoiceId: string) {
+ const paymentEvent = await waitForEvent(
+ ndk,
+ {
+ kinds: [17],
+ '#order': [orderId],
+ since: Math.floor(Date.now() / 1000)
+ },
+ (event) => {
+ const tag = event.tags.find(t => t[0] === 'payment-request')
+ return tag?.[1] === invoiceId
+ },
+ 60000 // 60 second timeout
+ )
+
+ if (paymentEvent) {
+ console.log('✅ Payment confirmed!')
+ return paymentEvent
+ } else {
+ console.log('⏰ Payment timeout')
+ return null
+ }
+}
+
+// ============================================================
+// USAGE EXAMPLES
+// ============================================================
+
+async function queryExample(ndk: NDK) {
+ // Fetch notes
+ const notes = await fetchNotes(ndk, 'pubkey123', 50)
+ console.log(`Found ${notes.length} notes`)
+
+ // Subscribe to new notes
+ const cleanup = subscribeToNotes(ndk, 'pubkey123', (event) => {
+ console.log('New note:', event.content)
+ })
+
+ // Clean up after 60 seconds
+ setTimeout(cleanup, 60000)
+
+ // Monitor payment
+ monitorPaymentReceipt(ndk, 'order-123', 'invoice-456', (preimage) => {
+ console.log('Payment received:', preimage)
+ })
+}
+
+export {
+ fetchNotes,
+ fetchProductsByMultipleAuthors,
+ fetchOrderEvents,
+ fetchRecentEvents,
+ fetchEventById,
+ subscribeToNotes,
+ createManagedSubscription,
+ monitorPaymentReceipt,
+ useOrderSubscription,
+ useProductsWithSubscription,
+ waitForEvent
+}
+
diff --git a/.claude/skills/ndk/examples/05-users-profiles.ts b/.claude/skills/ndk/examples/05-users-profiles.ts
new file mode 100644
index 00000000..3a9beb65
--- /dev/null
+++ b/.claude/skills/ndk/examples/05-users-profiles.ts
@@ -0,0 +1,423 @@
+/**
+ * NDK User and Profile Handling
+ *
+ * Examples from: src/queries/profiles.tsx, src/components/Profile.tsx
+ */
+
+import NDK, { NDKUser, NDKUserProfile } from '@nostr-dev-kit/ndk'
+import { nip19 } from 'nostr-tools'
+
+// ============================================================
+// FETCH PROFILE BY NPUB
+// ============================================================
+
+const fetchProfileByNpub = async (ndk: NDK, npub: string): Promise => {
+ try {
+ // Get user object from npub
+ const user = ndk.getUser({ npub })
+
+ // Fetch profile from relays
+ const profile = await user.fetchProfile()
+
+ return profile
+ } catch (error) {
+ console.error('Failed to fetch profile:', error)
+ return null
+ }
+}
+
+// ============================================================
+// FETCH PROFILE BY HEX PUBKEY
+// ============================================================
+
+const fetchProfileByPubkey = async (ndk: NDK, pubkey: string): Promise => {
+ try {
+ const user = ndk.getUser({ hexpubkey: pubkey })
+ const profile = await user.fetchProfile()
+
+ return profile
+ } catch (error) {
+ console.error('Failed to fetch profile:', error)
+ return null
+ }
+}
+
+// ============================================================
+// FETCH PROFILE BY NIP-05
+// ============================================================
+
+const fetchProfileByNip05 = async (ndk: NDK, nip05: string): Promise => {
+ try {
+ // Resolve NIP-05 identifier to user
+ const user = await ndk.getUserFromNip05(nip05)
+
+ if (!user) {
+ console.log('User not found for NIP-05:', nip05)
+ return null
+ }
+
+ // Fetch profile
+ const profile = await user.fetchProfile()
+
+ return profile
+ } catch (error) {
+ console.error('Failed to fetch profile by NIP-05:', error)
+ return null
+ }
+}
+
+// ============================================================
+// FETCH PROFILE BY ANY IDENTIFIER
+// ============================================================
+
+const fetchProfileByIdentifier = async (
+ ndk: NDK,
+ identifier: string
+): Promise<{ profile: NDKUserProfile | null; user: NDKUser | null }> => {
+ try {
+ // Check if it's a NIP-05 (contains @)
+ if (identifier.includes('@')) {
+ const user = await ndk.getUserFromNip05(identifier)
+ if (!user) return { profile: null, user: null }
+
+ const profile = await user.fetchProfile()
+ return { profile, user }
+ }
+
+ // Check if it's an npub
+ if (identifier.startsWith('npub')) {
+ const user = ndk.getUser({ npub: identifier })
+ const profile = await user.fetchProfile()
+ return { profile, user }
+ }
+
+ // Assume it's a hex pubkey
+ const user = ndk.getUser({ hexpubkey: identifier })
+ const profile = await user.fetchProfile()
+ return { profile, user }
+ } catch (error) {
+ console.error('Failed to fetch profile:', error)
+ return { profile: null, user: null }
+ }
+}
+
+// ============================================================
+// GET CURRENT USER
+// ============================================================
+
+const getCurrentUser = async (ndk: NDK): Promise => {
+ if (!ndk.signer) {
+ console.log('No signer set')
+ return null
+ }
+
+ try {
+ const user = await ndk.signer.user()
+ return user
+ } catch (error) {
+ console.error('Failed to get current user:', error)
+ return null
+ }
+}
+
+// ============================================================
+// PROFILE DATA STRUCTURE
+// ============================================================
+
+interface ProfileData {
+ // Standard fields
+ name?: string
+ displayName?: string
+ display_name?: string
+ picture?: string
+ image?: string
+ banner?: string
+ about?: string
+
+ // Contact
+ nip05?: string
+ lud06?: string // LNURL
+ lud16?: string // Lightning address
+
+ // Social
+ website?: string
+
+ // Raw data
+ [key: string]: any
+}
+
+// ============================================================
+// EXTRACT PROFILE INFO
+// ============================================================
+
+const extractProfileInfo = (profile: NDKUserProfile | null) => {
+ if (!profile) {
+ return {
+ displayName: 'Anonymous',
+ avatar: null,
+ bio: null,
+ lightningAddress: null,
+ nip05: null
+ }
+ }
+
+ return {
+ displayName: profile.displayName || profile.display_name || profile.name || 'Anonymous',
+ avatar: profile.picture || profile.image || null,
+ banner: profile.banner || null,
+ bio: profile.about || null,
+ lightningAddress: profile.lud16 || profile.lud06 || null,
+ nip05: profile.nip05 || null,
+ website: profile.website || null
+ }
+}
+
+// ============================================================
+// UPDATE PROFILE
+// ============================================================
+
+import { NDKEvent } from '@nostr-dev-kit/ndk'
+
+const updateProfile = async (ndk: NDK, profileData: Partial) => {
+ if (!ndk.signer) {
+ throw new Error('No signer available')
+ }
+
+ // Get current profile
+ const currentUser = await ndk.signer.user()
+ const currentProfile = await currentUser.fetchProfile()
+
+ // Merge with new data
+ const updatedProfile = {
+ ...currentProfile,
+ ...profileData
+ }
+
+ // Create kind 0 (metadata) event
+ const event = new NDKEvent(ndk)
+ event.kind = 0
+ event.content = JSON.stringify(updatedProfile)
+ event.tags = []
+
+ await event.sign()
+ await event.publish()
+
+ console.log('✅ Profile updated')
+ return event.id
+}
+
+// ============================================================
+// BATCH FETCH PROFILES
+// ============================================================
+
+const fetchMultipleProfiles = async (
+ ndk: NDK,
+ pubkeys: string[]
+): Promise