Compare commits
3 Commits
8bdf1fcd39
...
95271cbc81
| Author | SHA1 | Date | |
|---|---|---|---|
|
95271cbc81
|
|||
|
8ea91e39d8
|
|||
|
d3d2d6e643
|
@@ -15,7 +15,49 @@
|
||||
"Bash(./scripts/test.sh:*)",
|
||||
"Bash(./scripts/update-embedded-web.sh:*)",
|
||||
"Bash(bun run build:*)",
|
||||
"Bash(bun update:*)"
|
||||
"Bash(bun update:*)",
|
||||
"Bash(cat:*)",
|
||||
"Bash(git add:*)",
|
||||
"Bash(git commit:*)",
|
||||
"Bash(apt list:*)",
|
||||
"Bash(dpkg:*)",
|
||||
"Bash(find:*)",
|
||||
"Bash(metaflac --list --block-type=VORBIS_COMMENT:*)",
|
||||
"Bash(python3:*)",
|
||||
"Bash(pip3 show:*)",
|
||||
"Bash(pip3 install:*)",
|
||||
"Bash(lsusb:*)",
|
||||
"Bash(dmesg:*)",
|
||||
"Bash(adb devices:*)",
|
||||
"Bash(adb kill-server:*)",
|
||||
"Bash(adb start-server:*)",
|
||||
"Bash(adb shell:*)",
|
||||
"Bash(adb push:*)",
|
||||
"WebSearch",
|
||||
"WebFetch(domain:krosbits.in)",
|
||||
"WebFetch(domain:github.com)",
|
||||
"Bash(curl:*)",
|
||||
"Bash(adb install:*)",
|
||||
"WebFetch(domain:signal.org)",
|
||||
"WebFetch(domain:www.vet.minpolj.gov.rs)",
|
||||
"WebFetch(domain:www.mfa.gov.rs)",
|
||||
"Bash(adb uninstall:*)",
|
||||
"WebFetch(domain:apkpure.com)",
|
||||
"WebFetch(domain:claude.en.uptodown.com)",
|
||||
"WebFetch(domain:www.apkmirror.com)",
|
||||
"Bash(chmod:*)",
|
||||
"Bash(done)",
|
||||
"Bash(/home/mleku/src/next.orly.dev/scripts/test-neo4j-integration.sh:*)",
|
||||
"Bash(echo:*)",
|
||||
"Bash(go doc:*)",
|
||||
"Bash(git checkout:*)",
|
||||
"Bash(grep:*)",
|
||||
"Bash(lsblk:*)",
|
||||
"Bash(update-grub:*)",
|
||||
"Bash(go clean:*)",
|
||||
"Bash(go mod tidy:*)",
|
||||
"Bash(./scripts/test-neo4j-integration.sh:*)",
|
||||
"Bash(docker compose:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
634
.claude/skills/applesauce-core/SKILL.md
Normal file
634
.claude/skills/applesauce-core/SKILL.md
Normal file
@@ -0,0 +1,634 @@
|
||||
---
|
||||
name: applesauce-core
|
||||
description: This skill should be used when working with applesauce-core library for Nostr client development, including event stores, queries, observables, and client utilities. Provides comprehensive knowledge of applesauce patterns for building reactive Nostr applications.
|
||||
---
|
||||
|
||||
# applesauce-core Skill
|
||||
|
||||
This skill provides comprehensive knowledge and patterns for working with applesauce-core, a library that provides reactive utilities and patterns for building Nostr clients.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
Use this skill when:
|
||||
- Building reactive Nostr applications
|
||||
- Managing event stores and caches
|
||||
- Working with observable patterns for Nostr
|
||||
- Implementing real-time updates
|
||||
- Building timeline and feed views
|
||||
- Managing replaceable events
|
||||
- Working with profiles and metadata
|
||||
- Creating efficient Nostr queries
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### applesauce-core Overview
|
||||
|
||||
applesauce-core provides:
|
||||
- **Event stores** - Reactive event caching and management
|
||||
- **Queries** - Declarative event querying patterns
|
||||
- **Observables** - RxJS-based reactive patterns
|
||||
- **Profile helpers** - Profile metadata management
|
||||
- **Timeline utilities** - Feed and timeline building
|
||||
- **NIP helpers** - NIP-specific utilities
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
npm install applesauce-core
|
||||
```
|
||||
|
||||
### Basic Architecture
|
||||
|
||||
applesauce-core is built on reactive principles:
|
||||
- Events are stored in reactive stores
|
||||
- Queries return observables that update when new events arrive
|
||||
- Components subscribe to observables for real-time updates
|
||||
|
||||
## Event Store
|
||||
|
||||
### Creating an Event Store
|
||||
|
||||
```javascript
|
||||
import { EventStore } from 'applesauce-core';
|
||||
|
||||
// Create event store
|
||||
const eventStore = new EventStore();
|
||||
|
||||
// Add events
|
||||
eventStore.add(event1);
|
||||
eventStore.add(event2);
|
||||
|
||||
// Add multiple events
|
||||
eventStore.addMany([event1, event2, event3]);
|
||||
|
||||
// Check if event exists
|
||||
const exists = eventStore.has(eventId);
|
||||
|
||||
// Get event by ID
|
||||
const event = eventStore.get(eventId);
|
||||
|
||||
// Remove event
|
||||
eventStore.remove(eventId);
|
||||
|
||||
// Clear all events
|
||||
eventStore.clear();
|
||||
```
|
||||
|
||||
### Event Store Queries
|
||||
|
||||
```javascript
|
||||
// Get all events
|
||||
const allEvents = eventStore.getAll();
|
||||
|
||||
// Get events by filter
|
||||
const filtered = eventStore.filter({
|
||||
kinds: [1],
|
||||
authors: [pubkey]
|
||||
});
|
||||
|
||||
// Get events by author
|
||||
const authorEvents = eventStore.getByAuthor(pubkey);
|
||||
|
||||
// Get events by kind
|
||||
const textNotes = eventStore.getByKind(1);
|
||||
```
|
||||
|
||||
### Replaceable Events
|
||||
|
||||
applesauce-core handles replaceable events automatically:
|
||||
|
||||
```javascript
|
||||
// For kind 0 (profile), only latest is kept
|
||||
eventStore.add(profileEvent1); // stored
|
||||
eventStore.add(profileEvent2); // replaces if newer
|
||||
|
||||
// For parameterized replaceable (30000-39999)
|
||||
eventStore.add(articleEvent); // keyed by author + kind + d-tag
|
||||
|
||||
// Get replaceable event
|
||||
const profile = eventStore.getReplaceable(0, pubkey);
|
||||
const article = eventStore.getReplaceable(30023, pubkey, 'article-slug');
|
||||
```
|
||||
|
||||
## Queries
|
||||
|
||||
### Query Patterns
|
||||
|
||||
```javascript
|
||||
import { createQuery } from 'applesauce-core';
|
||||
|
||||
// Create a query
|
||||
const query = createQuery(eventStore, {
|
||||
kinds: [1],
|
||||
limit: 50
|
||||
});
|
||||
|
||||
// Subscribe to query results
|
||||
query.subscribe(events => {
|
||||
console.log('Current events:', events);
|
||||
});
|
||||
|
||||
// Query updates automatically when new events added
|
||||
eventStore.add(newEvent); // Subscribers notified
|
||||
```
|
||||
|
||||
### Timeline Query
|
||||
|
||||
```javascript
|
||||
import { TimelineQuery } from 'applesauce-core';
|
||||
|
||||
// Create timeline for user's notes
|
||||
const timeline = new TimelineQuery(eventStore, {
|
||||
kinds: [1],
|
||||
authors: [userPubkey]
|
||||
});
|
||||
|
||||
// Get observable of timeline
|
||||
const timeline$ = timeline.events$;
|
||||
|
||||
// Subscribe
|
||||
timeline$.subscribe(events => {
|
||||
// Events sorted by created_at, newest first
|
||||
renderTimeline(events);
|
||||
});
|
||||
```
|
||||
|
||||
### Profile Query
|
||||
|
||||
```javascript
|
||||
import { ProfileQuery } from 'applesauce-core';
|
||||
|
||||
// Query profile metadata
|
||||
const profileQuery = new ProfileQuery(eventStore, pubkey);
|
||||
|
||||
// Get observable
|
||||
const profile$ = profileQuery.profile$;
|
||||
|
||||
profile$.subscribe(profile => {
|
||||
if (profile) {
|
||||
console.log('Name:', profile.name);
|
||||
console.log('Picture:', profile.picture);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Observables
|
||||
|
||||
### Working with RxJS
|
||||
|
||||
applesauce-core uses RxJS observables:
|
||||
|
||||
```javascript
|
||||
import { map, filter, distinctUntilChanged } from 'rxjs/operators';
|
||||
|
||||
// Transform query results
|
||||
const names$ = profileQuery.profile$.pipe(
|
||||
filter(profile => profile !== null),
|
||||
map(profile => profile.name),
|
||||
distinctUntilChanged()
|
||||
);
|
||||
|
||||
// Combine multiple observables
|
||||
import { combineLatest } from 'rxjs';
|
||||
|
||||
const combined$ = combineLatest([
|
||||
timeline$,
|
||||
profile$
|
||||
]).pipe(
|
||||
map(([events, profile]) => ({
|
||||
events,
|
||||
authorName: profile?.name
|
||||
}))
|
||||
);
|
||||
```
|
||||
|
||||
### Creating Custom Observables
|
||||
|
||||
```javascript
|
||||
import { Observable } from 'rxjs';
|
||||
|
||||
function createEventObservable(store, filter) {
|
||||
return new Observable(subscriber => {
|
||||
// Initial emit
|
||||
subscriber.next(store.filter(filter));
|
||||
|
||||
// Subscribe to store changes
|
||||
const unsubscribe = store.onChange(() => {
|
||||
subscriber.next(store.filter(filter));
|
||||
});
|
||||
|
||||
// Cleanup
|
||||
return () => unsubscribe();
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
## Profile Helpers
|
||||
|
||||
### Profile Metadata
|
||||
|
||||
```javascript
|
||||
import { parseProfile, ProfileContent } from 'applesauce-core';
|
||||
|
||||
// Parse kind 0 content
|
||||
const profileEvent = await getProfileEvent(pubkey);
|
||||
const profile = parseProfile(profileEvent);
|
||||
|
||||
// Profile fields
|
||||
console.log(profile.name); // Display name
|
||||
console.log(profile.about); // Bio
|
||||
console.log(profile.picture); // Avatar URL
|
||||
console.log(profile.banner); // Banner image URL
|
||||
console.log(profile.nip05); // NIP-05 identifier
|
||||
console.log(profile.lud16); // Lightning address
|
||||
console.log(profile.website); // Website URL
|
||||
```
|
||||
|
||||
### Profile Store
|
||||
|
||||
```javascript
|
||||
import { ProfileStore } from 'applesauce-core';
|
||||
|
||||
const profileStore = new ProfileStore(eventStore);
|
||||
|
||||
// Get profile observable
|
||||
const profile$ = profileStore.getProfile(pubkey);
|
||||
|
||||
// Get multiple profiles
|
||||
const profiles$ = profileStore.getProfiles([pubkey1, pubkey2]);
|
||||
|
||||
// Request profile load (triggers fetch if not cached)
|
||||
profileStore.requestProfile(pubkey);
|
||||
```
|
||||
|
||||
## Timeline Utilities
|
||||
|
||||
### Building Feeds
|
||||
|
||||
```javascript
|
||||
import { Timeline } from 'applesauce-core';
|
||||
|
||||
// Create timeline
|
||||
const timeline = new Timeline(eventStore);
|
||||
|
||||
// Add filter
|
||||
timeline.setFilter({
|
||||
kinds: [1, 6],
|
||||
authors: followedPubkeys
|
||||
});
|
||||
|
||||
// Get events observable
|
||||
const events$ = timeline.events$;
|
||||
|
||||
// Load more (pagination)
|
||||
timeline.loadMore(50);
|
||||
|
||||
// Refresh (get latest)
|
||||
timeline.refresh();
|
||||
```
|
||||
|
||||
### Thread Building
|
||||
|
||||
```javascript
|
||||
import { ThreadBuilder } from 'applesauce-core';
|
||||
|
||||
// Build thread from root event
|
||||
const thread = new ThreadBuilder(eventStore, rootEventId);
|
||||
|
||||
// Get thread observable
|
||||
const thread$ = thread.thread$;
|
||||
|
||||
thread$.subscribe(threadData => {
|
||||
console.log('Root:', threadData.root);
|
||||
console.log('Replies:', threadData.replies);
|
||||
console.log('Reply count:', threadData.replyCount);
|
||||
});
|
||||
```
|
||||
|
||||
### Reactions and Zaps
|
||||
|
||||
```javascript
|
||||
import { ReactionStore, ZapStore } from 'applesauce-core';
|
||||
|
||||
// Reactions
|
||||
const reactionStore = new ReactionStore(eventStore);
|
||||
const reactions$ = reactionStore.getReactions(eventId);
|
||||
|
||||
reactions$.subscribe(reactions => {
|
||||
console.log('Likes:', reactions.likes);
|
||||
console.log('Custom:', reactions.custom);
|
||||
});
|
||||
|
||||
// Zaps
|
||||
const zapStore = new ZapStore(eventStore);
|
||||
const zaps$ = zapStore.getZaps(eventId);
|
||||
|
||||
zaps$.subscribe(zaps => {
|
||||
console.log('Total sats:', zaps.totalAmount);
|
||||
console.log('Zap count:', zaps.count);
|
||||
});
|
||||
```
|
||||
|
||||
## NIP Helpers
|
||||
|
||||
### NIP-05 Verification
|
||||
|
||||
```javascript
|
||||
import { verifyNip05 } from 'applesauce-core';
|
||||
|
||||
// Verify NIP-05
|
||||
const result = await verifyNip05('alice@example.com', expectedPubkey);
|
||||
|
||||
if (result.valid) {
|
||||
console.log('NIP-05 verified');
|
||||
} else {
|
||||
console.log('Verification failed:', result.error);
|
||||
}
|
||||
```
|
||||
|
||||
### NIP-10 Reply Parsing
|
||||
|
||||
```javascript
|
||||
import { parseReplyTags } from 'applesauce-core';
|
||||
|
||||
// Parse reply structure
|
||||
const parsed = parseReplyTags(event);
|
||||
|
||||
console.log('Root event:', parsed.root);
|
||||
console.log('Reply to:', parsed.reply);
|
||||
console.log('Mentions:', parsed.mentions);
|
||||
```
|
||||
|
||||
### NIP-65 Relay Lists
|
||||
|
||||
```javascript
|
||||
import { parseRelayList } from 'applesauce-core';
|
||||
|
||||
// Parse relay list event (kind 10002)
|
||||
const relays = parseRelayList(relayListEvent);
|
||||
|
||||
console.log('Read relays:', relays.read);
|
||||
console.log('Write relays:', relays.write);
|
||||
```
|
||||
|
||||
## Integration with nostr-tools
|
||||
|
||||
### Using with SimplePool
|
||||
|
||||
```javascript
|
||||
import { SimplePool } from 'nostr-tools';
|
||||
import { EventStore } from 'applesauce-core';
|
||||
|
||||
const pool = new SimplePool();
|
||||
const eventStore = new EventStore();
|
||||
|
||||
// Load events into store
|
||||
pool.subscribeMany(relays, [filter], {
|
||||
onevent(event) {
|
||||
eventStore.add(event);
|
||||
}
|
||||
});
|
||||
|
||||
// Query store reactively
|
||||
const timeline$ = createTimelineQuery(eventStore, filter);
|
||||
```
|
||||
|
||||
### Publishing Events
|
||||
|
||||
```javascript
|
||||
import { finalizeEvent } from 'nostr-tools';
|
||||
|
||||
// Create event
|
||||
const event = finalizeEvent({
|
||||
kind: 1,
|
||||
content: 'Hello!',
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: []
|
||||
}, secretKey);
|
||||
|
||||
// Add to local store immediately (optimistic update)
|
||||
eventStore.add(event);
|
||||
|
||||
// Publish to relays
|
||||
await pool.publish(relays, event);
|
||||
```
|
||||
|
||||
## Svelte Integration
|
||||
|
||||
### Using in Svelte Components
|
||||
|
||||
```svelte
|
||||
<script>
|
||||
import { onMount, onDestroy } from 'svelte';
|
||||
import { EventStore, TimelineQuery } from 'applesauce-core';
|
||||
|
||||
export let pubkey;
|
||||
|
||||
const eventStore = new EventStore();
|
||||
let events = [];
|
||||
let subscription;
|
||||
|
||||
onMount(() => {
|
||||
const timeline = new TimelineQuery(eventStore, {
|
||||
kinds: [1],
|
||||
authors: [pubkey]
|
||||
});
|
||||
|
||||
subscription = timeline.events$.subscribe(e => {
|
||||
events = e;
|
||||
});
|
||||
});
|
||||
|
||||
onDestroy(() => {
|
||||
subscription?.unsubscribe();
|
||||
});
|
||||
</script>
|
||||
|
||||
{#each events as event}
|
||||
<div class="event">
|
||||
{event.content}
|
||||
</div>
|
||||
{/each}
|
||||
```
|
||||
|
||||
### Svelte Store Adapter
|
||||
|
||||
```javascript
|
||||
import { readable } from 'svelte/store';
|
||||
|
||||
// Convert RxJS observable to Svelte store
|
||||
function fromObservable(observable, initialValue) {
|
||||
return readable(initialValue, set => {
|
||||
const subscription = observable.subscribe(set);
|
||||
return () => subscription.unsubscribe();
|
||||
});
|
||||
}
|
||||
|
||||
// Usage
|
||||
const events$ = timeline.events$;
|
||||
const eventsStore = fromObservable(events$, []);
|
||||
```
|
||||
|
||||
```svelte
|
||||
<script>
|
||||
import { eventsStore } from './stores.js';
|
||||
</script>
|
||||
|
||||
{#each $eventsStore as event}
|
||||
<div>{event.content}</div>
|
||||
{/each}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Store Management
|
||||
|
||||
1. **Single store instance** - Use one EventStore per app
|
||||
2. **Clear stale data** - Implement cache limits
|
||||
3. **Handle replaceable events** - Let store manage deduplication
|
||||
4. **Unsubscribe** - Clean up subscriptions on component destroy
|
||||
|
||||
### Query Optimization
|
||||
|
||||
1. **Use specific filters** - Narrow queries perform better
|
||||
2. **Limit results** - Use limit for initial loads
|
||||
3. **Cache queries** - Reuse query instances
|
||||
4. **Debounce updates** - Throttle rapid changes
|
||||
|
||||
### Memory Management
|
||||
|
||||
1. **Limit store size** - Implement LRU or time-based eviction
|
||||
2. **Clean up observables** - Unsubscribe when done
|
||||
3. **Use weak references** - For profile caches
|
||||
4. **Paginate large feeds** - Don't load everything at once
|
||||
|
||||
### Reactive Patterns
|
||||
|
||||
1. **Prefer observables** - Over imperative queries
|
||||
2. **Use operators** - Transform data with RxJS
|
||||
3. **Combine streams** - For complex views
|
||||
4. **Handle loading states** - Show placeholders
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Event Deduplication
|
||||
|
||||
```javascript
|
||||
// EventStore handles deduplication automatically
|
||||
eventStore.add(event1);
|
||||
eventStore.add(event1); // No duplicate
|
||||
|
||||
// For manual deduplication
|
||||
const seen = new Set();
|
||||
events.filter(e => {
|
||||
if (seen.has(e.id)) return false;
|
||||
seen.add(e.id);
|
||||
return true;
|
||||
});
|
||||
```
|
||||
|
||||
### Optimistic Updates
|
||||
|
||||
```javascript
|
||||
async function publishNote(content) {
|
||||
// Create event
|
||||
const event = await createEvent(content);
|
||||
|
||||
// Add to store immediately (optimistic)
|
||||
eventStore.add(event);
|
||||
|
||||
try {
|
||||
// Publish to relays
|
||||
await pool.publish(relays, event);
|
||||
} catch (error) {
|
||||
// Remove on failure
|
||||
eventStore.remove(event.id);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Loading States
|
||||
|
||||
```javascript
|
||||
import { BehaviorSubject, combineLatest } from 'rxjs';
|
||||
|
||||
const loading$ = new BehaviorSubject(true);
|
||||
const events$ = timeline.events$;
|
||||
|
||||
const state$ = combineLatest([loading$, events$]).pipe(
|
||||
map(([loading, events]) => ({
|
||||
loading,
|
||||
events,
|
||||
empty: !loading && events.length === 0
|
||||
}))
|
||||
);
|
||||
|
||||
// Start loading
|
||||
loading$.next(true);
|
||||
await loadEvents();
|
||||
loading$.next(false);
|
||||
```
|
||||
|
||||
### Infinite Scroll
|
||||
|
||||
```javascript
|
||||
function createInfiniteScroll(timeline, pageSize = 50) {
|
||||
let loading = false;
|
||||
|
||||
async function loadMore() {
|
||||
if (loading) return;
|
||||
|
||||
loading = true;
|
||||
await timeline.loadMore(pageSize);
|
||||
loading = false;
|
||||
}
|
||||
|
||||
function onScroll(event) {
|
||||
const { scrollTop, scrollHeight, clientHeight } = event.target;
|
||||
if (scrollHeight - scrollTop <= clientHeight * 1.5) {
|
||||
loadMore();
|
||||
}
|
||||
}
|
||||
|
||||
return { loadMore, onScroll };
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Events not updating:**
|
||||
- Check subscription is active
|
||||
- Verify events are being added to store
|
||||
- Ensure filter matches events
|
||||
|
||||
**Memory growing:**
|
||||
- Implement store size limits
|
||||
- Clean up subscriptions
|
||||
- Use weak references where appropriate
|
||||
|
||||
**Slow queries:**
|
||||
- Add indexes for common queries
|
||||
- Use more specific filters
|
||||
- Implement pagination
|
||||
|
||||
**Stale data:**
|
||||
- Implement refresh mechanisms
|
||||
- Set up real-time subscriptions
|
||||
- Handle replaceable event updates
|
||||
|
||||
## References
|
||||
|
||||
- **applesauce GitHub**: https://github.com/hzrd149/applesauce
|
||||
- **RxJS Documentation**: https://rxjs.dev
|
||||
- **nostr-tools**: https://github.com/nbd-wtf/nostr-tools
|
||||
- **Nostr Protocol**: https://github.com/nostr-protocol/nostr
|
||||
|
||||
## Related Skills
|
||||
|
||||
- **nostr-tools** - Lower-level Nostr operations
|
||||
- **applesauce-signers** - Event signing abstractions
|
||||
- **svelte** - Building reactive UIs
|
||||
- **nostr** - Nostr protocol fundamentals
|
||||
757
.claude/skills/applesauce-signers/SKILL.md
Normal file
757
.claude/skills/applesauce-signers/SKILL.md
Normal file
@@ -0,0 +1,757 @@
|
||||
---
|
||||
name: applesauce-signers
|
||||
description: This skill should be used when working with applesauce-signers library for Nostr event signing, including NIP-07 browser extensions, NIP-46 remote signing, and custom signer implementations. Provides comprehensive knowledge of signing patterns and signer abstractions.
|
||||
---
|
||||
|
||||
# applesauce-signers Skill
|
||||
|
||||
This skill provides comprehensive knowledge and patterns for working with applesauce-signers, a library that provides signing abstractions for Nostr applications.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
Use this skill when:
|
||||
- Implementing event signing in Nostr applications
|
||||
- Integrating with NIP-07 browser extensions
|
||||
- Working with NIP-46 remote signers
|
||||
- Building custom signer implementations
|
||||
- Managing signing sessions
|
||||
- Handling signing requests and permissions
|
||||
- Implementing multi-signer support
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### applesauce-signers Overview
|
||||
|
||||
applesauce-signers provides:
|
||||
- **Signer abstraction** - Unified interface for different signers
|
||||
- **NIP-07 integration** - Browser extension support
|
||||
- **NIP-46 support** - Remote signing (Nostr Connect)
|
||||
- **Simple signers** - Direct key signing
|
||||
- **Permission handling** - Manage signing requests
|
||||
- **Observable patterns** - Reactive signing states
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
npm install applesauce-signers
|
||||
```
|
||||
|
||||
### Signer Interface
|
||||
|
||||
All signers implement a common interface:
|
||||
|
||||
```typescript
|
||||
interface Signer {
|
||||
// Get public key
|
||||
getPublicKey(): Promise<string>;
|
||||
|
||||
// Sign event
|
||||
signEvent(event: UnsignedEvent): Promise<SignedEvent>;
|
||||
|
||||
// Encrypt (NIP-04)
|
||||
nip04Encrypt?(pubkey: string, plaintext: string): Promise<string>;
|
||||
nip04Decrypt?(pubkey: string, ciphertext: string): Promise<string>;
|
||||
|
||||
// Encrypt (NIP-44)
|
||||
nip44Encrypt?(pubkey: string, plaintext: string): Promise<string>;
|
||||
nip44Decrypt?(pubkey: string, ciphertext: string): Promise<string>;
|
||||
}
|
||||
```
|
||||
|
||||
## Simple Signer
|
||||
|
||||
### Using Secret Key
|
||||
|
||||
```javascript
|
||||
import { SimpleSigner } from 'applesauce-signers';
|
||||
import { generateSecretKey } from 'nostr-tools';
|
||||
|
||||
// Create signer with existing key
|
||||
const signer = new SimpleSigner(secretKey);
|
||||
|
||||
// Or generate new key
|
||||
const newSecretKey = generateSecretKey();
|
||||
const newSigner = new SimpleSigner(newSecretKey);
|
||||
|
||||
// Get public key
|
||||
const pubkey = await signer.getPublicKey();
|
||||
|
||||
// Sign event
|
||||
const unsignedEvent = {
|
||||
kind: 1,
|
||||
content: 'Hello Nostr!',
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: []
|
||||
};
|
||||
|
||||
const signedEvent = await signer.signEvent(unsignedEvent);
|
||||
```
|
||||
|
||||
### NIP-04 Encryption
|
||||
|
||||
```javascript
|
||||
// Encrypt message
|
||||
const ciphertext = await signer.nip04Encrypt(
|
||||
recipientPubkey,
|
||||
'Secret message'
|
||||
);
|
||||
|
||||
// Decrypt message
|
||||
const plaintext = await signer.nip04Decrypt(
|
||||
senderPubkey,
|
||||
ciphertext
|
||||
);
|
||||
```
|
||||
|
||||
### NIP-44 Encryption
|
||||
|
||||
```javascript
|
||||
// Encrypt with NIP-44 (preferred)
|
||||
const ciphertext = await signer.nip44Encrypt(
|
||||
recipientPubkey,
|
||||
'Secret message'
|
||||
);
|
||||
|
||||
// Decrypt
|
||||
const plaintext = await signer.nip44Decrypt(
|
||||
senderPubkey,
|
||||
ciphertext
|
||||
);
|
||||
```
|
||||
|
||||
## NIP-07 Signer
|
||||
|
||||
### Browser Extension Integration
|
||||
|
||||
```javascript
|
||||
import { Nip07Signer } from 'applesauce-signers';
|
||||
|
||||
// Check if extension is available
|
||||
if (window.nostr) {
|
||||
const signer = new Nip07Signer();
|
||||
|
||||
// Get public key (may prompt user)
|
||||
const pubkey = await signer.getPublicKey();
|
||||
|
||||
// Sign event (prompts user)
|
||||
const signedEvent = await signer.signEvent(unsignedEvent);
|
||||
}
|
||||
```
|
||||
|
||||
### Handling Extension Availability
|
||||
|
||||
```javascript
|
||||
function getAvailableSigner() {
|
||||
if (typeof window !== 'undefined' && window.nostr) {
|
||||
return new Nip07Signer();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
// Wait for extension to load
|
||||
async function waitForExtension(timeout = 3000) {
|
||||
const start = Date.now();
|
||||
|
||||
while (Date.now() - start < timeout) {
|
||||
if (window.nostr) {
|
||||
return new Nip07Signer();
|
||||
}
|
||||
await new Promise(r => setTimeout(r, 100));
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
```
|
||||
|
||||
### Extension Permissions
|
||||
|
||||
```javascript
|
||||
// Some extensions support granular permissions
|
||||
const signer = new Nip07Signer();
|
||||
|
||||
// Request specific permissions
|
||||
try {
|
||||
// This varies by extension
|
||||
await window.nostr.enable();
|
||||
} catch (error) {
|
||||
console.log('User denied permission');
|
||||
}
|
||||
```
|
||||
|
||||
## NIP-46 Remote Signer
|
||||
|
||||
### Nostr Connect
|
||||
|
||||
```javascript
|
||||
import { Nip46Signer } from 'applesauce-signers';
|
||||
|
||||
// Create remote signer
|
||||
const signer = new Nip46Signer({
|
||||
// Remote signer's pubkey
|
||||
remotePubkey: signerPubkey,
|
||||
|
||||
// Relays for communication
|
||||
relays: ['wss://relay.example.com'],
|
||||
|
||||
// Local secret key for encryption
|
||||
localSecretKey: localSecretKey,
|
||||
|
||||
// Optional: custom client name
|
||||
clientName: 'My Nostr App'
|
||||
});
|
||||
|
||||
// Connect to remote signer
|
||||
await signer.connect();
|
||||
|
||||
// Get public key
|
||||
const pubkey = await signer.getPublicKey();
|
||||
|
||||
// Sign event
|
||||
const signedEvent = await signer.signEvent(unsignedEvent);
|
||||
|
||||
// Disconnect when done
|
||||
signer.disconnect();
|
||||
```
|
||||
|
||||
### Connection URL
|
||||
|
||||
```javascript
|
||||
// Parse nostrconnect:// URL
|
||||
function parseNostrConnectUrl(url) {
|
||||
const parsed = new URL(url);
|
||||
|
||||
return {
|
||||
pubkey: parsed.pathname.replace('//', ''),
|
||||
relay: parsed.searchParams.get('relay'),
|
||||
secret: parsed.searchParams.get('secret')
|
||||
};
|
||||
}
|
||||
|
||||
// Create signer from URL
|
||||
const { pubkey, relay, secret } = parseNostrConnectUrl(connectUrl);
|
||||
|
||||
const signer = new Nip46Signer({
|
||||
remotePubkey: pubkey,
|
||||
relays: [relay],
|
||||
localSecretKey: generateSecretKey(),
|
||||
secret: secret
|
||||
});
|
||||
```
|
||||
|
||||
### Bunker URL
|
||||
|
||||
```javascript
|
||||
// Parse bunker:// URL (NIP-46)
|
||||
function parseBunkerUrl(url) {
|
||||
const parsed = new URL(url);
|
||||
|
||||
return {
|
||||
pubkey: parsed.pathname.replace('//', ''),
|
||||
relays: parsed.searchParams.getAll('relay'),
|
||||
secret: parsed.searchParams.get('secret')
|
||||
};
|
||||
}
|
||||
|
||||
const { pubkey, relays, secret } = parseBunkerUrl(bunkerUrl);
|
||||
```
|
||||
|
||||
## Signer Management
|
||||
|
||||
### Signer Store
|
||||
|
||||
```javascript
|
||||
import { SignerStore } from 'applesauce-signers';
|
||||
|
||||
const signerStore = new SignerStore();
|
||||
|
||||
// Set active signer
|
||||
signerStore.setSigner(signer);
|
||||
|
||||
// Get active signer
|
||||
const activeSigner = signerStore.getSigner();
|
||||
|
||||
// Clear signer (logout)
|
||||
signerStore.clearSigner();
|
||||
|
||||
// Observable for signer changes
|
||||
signerStore.signer$.subscribe(signer => {
|
||||
if (signer) {
|
||||
console.log('Logged in');
|
||||
} else {
|
||||
console.log('Logged out');
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Multi-Account Support
|
||||
|
||||
```javascript
|
||||
class AccountManager {
|
||||
constructor() {
|
||||
this.accounts = new Map();
|
||||
this.activeAccount = null;
|
||||
}
|
||||
|
||||
addAccount(pubkey, signer) {
|
||||
this.accounts.set(pubkey, signer);
|
||||
}
|
||||
|
||||
removeAccount(pubkey) {
|
||||
this.accounts.delete(pubkey);
|
||||
if (this.activeAccount === pubkey) {
|
||||
this.activeAccount = null;
|
||||
}
|
||||
}
|
||||
|
||||
switchAccount(pubkey) {
|
||||
if (this.accounts.has(pubkey)) {
|
||||
this.activeAccount = pubkey;
|
||||
return this.accounts.get(pubkey);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
getActiveSigner() {
|
||||
return this.activeAccount
|
||||
? this.accounts.get(this.activeAccount)
|
||||
: null;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Custom Signers
|
||||
|
||||
### Implementing a Custom Signer
|
||||
|
||||
```javascript
|
||||
class CustomSigner {
|
||||
constructor(options) {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
async getPublicKey() {
|
||||
// Return public key
|
||||
return this.options.pubkey;
|
||||
}
|
||||
|
||||
async signEvent(event) {
|
||||
// Implement signing logic
|
||||
// Could call external API, hardware wallet, etc.
|
||||
|
||||
const signedEvent = await this.externalSign(event);
|
||||
return signedEvent;
|
||||
}
|
||||
|
||||
async nip04Encrypt(pubkey, plaintext) {
|
||||
// Implement NIP-04 encryption
|
||||
throw new Error('NIP-04 not supported');
|
||||
}
|
||||
|
||||
async nip04Decrypt(pubkey, ciphertext) {
|
||||
throw new Error('NIP-04 not supported');
|
||||
}
|
||||
|
||||
async nip44Encrypt(pubkey, plaintext) {
|
||||
// Implement NIP-44 encryption
|
||||
throw new Error('NIP-44 not supported');
|
||||
}
|
||||
|
||||
async nip44Decrypt(pubkey, ciphertext) {
|
||||
throw new Error('NIP-44 not supported');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Hardware Wallet Signer
|
||||
|
||||
```javascript
|
||||
class HardwareWalletSigner {
|
||||
constructor(devicePath) {
|
||||
this.devicePath = devicePath;
|
||||
}
|
||||
|
||||
async connect() {
|
||||
// Connect to hardware device
|
||||
this.device = await connectToDevice(this.devicePath);
|
||||
}
|
||||
|
||||
async getPublicKey() {
|
||||
// Get public key from device
|
||||
return await this.device.getNostrPubkey();
|
||||
}
|
||||
|
||||
async signEvent(event) {
|
||||
// Sign on device (user confirms on device)
|
||||
const signature = await this.device.signNostrEvent(event);
|
||||
|
||||
return {
|
||||
...event,
|
||||
pubkey: await this.getPublicKey(),
|
||||
id: getEventHash(event),
|
||||
sig: signature
|
||||
};
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Read-Only Signer
|
||||
|
||||
```javascript
|
||||
class ReadOnlySigner {
|
||||
constructor(pubkey) {
|
||||
this.pubkey = pubkey;
|
||||
}
|
||||
|
||||
async getPublicKey() {
|
||||
return this.pubkey;
|
||||
}
|
||||
|
||||
async signEvent(event) {
|
||||
throw new Error('Read-only mode: cannot sign events');
|
||||
}
|
||||
|
||||
async nip04Encrypt(pubkey, plaintext) {
|
||||
throw new Error('Read-only mode: cannot encrypt');
|
||||
}
|
||||
|
||||
async nip04Decrypt(pubkey, ciphertext) {
|
||||
throw new Error('Read-only mode: cannot decrypt');
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Signing Utilities
|
||||
|
||||
### Event Creation Helper
|
||||
|
||||
```javascript
|
||||
async function createAndSignEvent(signer, template) {
|
||||
const pubkey = await signer.getPublicKey();
|
||||
|
||||
const event = {
|
||||
...template,
|
||||
pubkey,
|
||||
created_at: template.created_at || Math.floor(Date.now() / 1000)
|
||||
};
|
||||
|
||||
return await signer.signEvent(event);
|
||||
}
|
||||
|
||||
// Usage
|
||||
const signedNote = await createAndSignEvent(signer, {
|
||||
kind: 1,
|
||||
content: 'Hello!',
|
||||
tags: []
|
||||
});
|
||||
```
|
||||
|
||||
### Batch Signing
|
||||
|
||||
```javascript
|
||||
async function signEvents(signer, events) {
|
||||
const signed = [];
|
||||
|
||||
for (const event of events) {
|
||||
const signedEvent = await signer.signEvent(event);
|
||||
signed.push(signedEvent);
|
||||
}
|
||||
|
||||
return signed;
|
||||
}
|
||||
|
||||
// With parallelization (if signer supports)
|
||||
async function signEventsParallel(signer, events) {
|
||||
return Promise.all(
|
||||
events.map(event => signer.signEvent(event))
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
## Svelte Integration
|
||||
|
||||
### Signer Context
|
||||
|
||||
```svelte
|
||||
<!-- SignerProvider.svelte -->
|
||||
<script>
|
||||
import { setContext } from 'svelte';
|
||||
import { writable } from 'svelte/store';
|
||||
|
||||
const signer = writable(null);
|
||||
|
||||
setContext('signer', {
|
||||
signer,
|
||||
setSigner: (s) => signer.set(s),
|
||||
clearSigner: () => signer.set(null)
|
||||
});
|
||||
</script>
|
||||
|
||||
<slot />
|
||||
```
|
||||
|
||||
```svelte
|
||||
<!-- Component using signer -->
|
||||
<script>
|
||||
import { getContext } from 'svelte';
|
||||
|
||||
const { signer } = getContext('signer');
|
||||
|
||||
async function publishNote(content) {
|
||||
if (!$signer) {
|
||||
alert('Please login first');
|
||||
return;
|
||||
}
|
||||
|
||||
const event = await $signer.signEvent({
|
||||
kind: 1,
|
||||
content,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: []
|
||||
});
|
||||
|
||||
// Publish event...
|
||||
}
|
||||
</script>
|
||||
```
|
||||
|
||||
### Login Component
|
||||
|
||||
```svelte
|
||||
<script>
|
||||
import { getContext } from 'svelte';
|
||||
import { Nip07Signer, SimpleSigner } from 'applesauce-signers';
|
||||
|
||||
const { setSigner, clearSigner, signer } = getContext('signer');
|
||||
|
||||
let nsec = '';
|
||||
|
||||
async function loginWithExtension() {
|
||||
if (window.nostr) {
|
||||
setSigner(new Nip07Signer());
|
||||
} else {
|
||||
alert('No extension found');
|
||||
}
|
||||
}
|
||||
|
||||
function loginWithNsec() {
|
||||
try {
|
||||
const decoded = nip19.decode(nsec);
|
||||
if (decoded.type === 'nsec') {
|
||||
setSigner(new SimpleSigner(decoded.data));
|
||||
nsec = '';
|
||||
}
|
||||
} catch (e) {
|
||||
alert('Invalid nsec');
|
||||
}
|
||||
}
|
||||
|
||||
function logout() {
|
||||
clearSigner();
|
||||
}
|
||||
</script>
|
||||
|
||||
{#if $signer}
|
||||
<button on:click={logout}>Logout</button>
|
||||
{:else}
|
||||
<button on:click={loginWithExtension}>
|
||||
Login with Extension
|
||||
</button>
|
||||
|
||||
<div>
|
||||
<input
|
||||
type="password"
|
||||
bind:value={nsec}
|
||||
placeholder="nsec..."
|
||||
/>
|
||||
<button on:click={loginWithNsec}>
|
||||
Login with Key
|
||||
</button>
|
||||
</div>
|
||||
{/if}
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Security
|
||||
|
||||
1. **Never store secret keys in plain text** - Use secure storage
|
||||
2. **Prefer NIP-07** - Let extensions manage keys
|
||||
3. **Clear keys on logout** - Don't leave in memory
|
||||
4. **Validate before signing** - Check event content
|
||||
|
||||
### User Experience
|
||||
|
||||
1. **Show signing status** - Loading states
|
||||
2. **Handle rejections gracefully** - User may cancel
|
||||
3. **Provide fallbacks** - Multiple login options
|
||||
4. **Remember preferences** - Store signer type
|
||||
|
||||
### Error Handling
|
||||
|
||||
```javascript
|
||||
async function safeSign(signer, event) {
|
||||
try {
|
||||
return await signer.signEvent(event);
|
||||
} catch (error) {
|
||||
if (error.message.includes('rejected')) {
|
||||
console.log('User rejected signing');
|
||||
return null;
|
||||
}
|
||||
if (error.message.includes('timeout')) {
|
||||
console.log('Signing timed out');
|
||||
return null;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Permission Checking
|
||||
|
||||
```javascript
|
||||
function hasEncryptionSupport(signer) {
|
||||
return typeof signer.nip04Encrypt === 'function' ||
|
||||
typeof signer.nip44Encrypt === 'function';
|
||||
}
|
||||
|
||||
function getEncryptionMethod(signer) {
|
||||
// Prefer NIP-44
|
||||
if (typeof signer.nip44Encrypt === 'function') {
|
||||
return 'nip44';
|
||||
}
|
||||
if (typeof signer.nip04Encrypt === 'function') {
|
||||
return 'nip04';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
```
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Signer Detection
|
||||
|
||||
```javascript
|
||||
async function detectSigners() {
|
||||
const available = [];
|
||||
|
||||
// Check NIP-07
|
||||
if (typeof window !== 'undefined' && window.nostr) {
|
||||
available.push({
|
||||
type: 'nip07',
|
||||
name: 'Browser Extension',
|
||||
create: () => new Nip07Signer()
|
||||
});
|
||||
}
|
||||
|
||||
// Check stored credentials
|
||||
const storedKey = localStorage.getItem('nsec');
|
||||
if (storedKey) {
|
||||
available.push({
|
||||
type: 'stored',
|
||||
name: 'Saved Key',
|
||||
create: () => new SimpleSigner(storedKey)
|
||||
});
|
||||
}
|
||||
|
||||
return available;
|
||||
}
|
||||
```
|
||||
|
||||
### Auto-Reconnect for NIP-46
|
||||
|
||||
```javascript
|
||||
class ReconnectingNip46Signer {
|
||||
constructor(options) {
|
||||
this.options = options;
|
||||
this.signer = null;
|
||||
}
|
||||
|
||||
async connect() {
|
||||
this.signer = new Nip46Signer(this.options);
|
||||
await this.signer.connect();
|
||||
}
|
||||
|
||||
async signEvent(event) {
|
||||
try {
|
||||
return await this.signer.signEvent(event);
|
||||
} catch (error) {
|
||||
if (error.message.includes('disconnected')) {
|
||||
await this.connect();
|
||||
return await this.signer.signEvent(event);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Signer Type Persistence
|
||||
|
||||
```javascript
|
||||
const SIGNER_KEY = 'nostr_signer_type';
|
||||
|
||||
function saveSigner(type, data) {
|
||||
localStorage.setItem(SIGNER_KEY, JSON.stringify({ type, data }));
|
||||
}
|
||||
|
||||
async function restoreSigner() {
|
||||
const saved = localStorage.getItem(SIGNER_KEY);
|
||||
if (!saved) return null;
|
||||
|
||||
const { type, data } = JSON.parse(saved);
|
||||
|
||||
switch (type) {
|
||||
case 'nip07':
|
||||
if (window.nostr) {
|
||||
return new Nip07Signer();
|
||||
}
|
||||
break;
|
||||
case 'simple':
|
||||
// Don't store secret keys!
|
||||
break;
|
||||
case 'nip46':
|
||||
const signer = new Nip46Signer(data);
|
||||
await signer.connect();
|
||||
return signer;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Extension not detected:**
|
||||
- Wait for page load
|
||||
- Check window.nostr exists
|
||||
- Verify extension is enabled
|
||||
|
||||
**Signing rejected:**
|
||||
- User cancelled in extension
|
||||
- Handle gracefully with error message
|
||||
|
||||
**NIP-46 connection fails:**
|
||||
- Check relay is accessible
|
||||
- Verify remote signer is online
|
||||
- Check secret matches
|
||||
|
||||
**Encryption not supported:**
|
||||
- Check signer has encrypt methods
|
||||
- Fall back to alternative method
|
||||
- Show user appropriate error
|
||||
|
||||
## References
|
||||
|
||||
- **applesauce GitHub**: https://github.com/hzrd149/applesauce
|
||||
- **NIP-07 Specification**: https://github.com/nostr-protocol/nips/blob/master/07.md
|
||||
- **NIP-46 Specification**: https://github.com/nostr-protocol/nips/blob/master/46.md
|
||||
- **nostr-tools**: https://github.com/nbd-wtf/nostr-tools
|
||||
|
||||
## Related Skills
|
||||
|
||||
- **nostr-tools** - Event creation and signing utilities
|
||||
- **applesauce-core** - Event stores and queries
|
||||
- **nostr** - Nostr protocol fundamentals
|
||||
- **svelte** - Building Nostr UIs
|
||||
767
.claude/skills/nostr-tools/SKILL.md
Normal file
767
.claude/skills/nostr-tools/SKILL.md
Normal file
@@ -0,0 +1,767 @@
|
||||
---
|
||||
name: nostr-tools
|
||||
description: This skill should be used when working with nostr-tools library for Nostr protocol operations, including event creation, signing, filtering, relay communication, and NIP implementations. Provides comprehensive knowledge of nostr-tools APIs and patterns.
|
||||
---
|
||||
|
||||
# nostr-tools Skill
|
||||
|
||||
This skill provides comprehensive knowledge and patterns for working with nostr-tools, the most popular JavaScript/TypeScript library for Nostr protocol development.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
Use this skill when:
|
||||
- Building Nostr clients or applications
|
||||
- Creating and signing Nostr events
|
||||
- Connecting to Nostr relays
|
||||
- Implementing NIP features
|
||||
- Working with Nostr keys and cryptography
|
||||
- Filtering and querying events
|
||||
- Building relay pools or connections
|
||||
- Implementing NIP-44/NIP-04 encryption
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### nostr-tools Overview
|
||||
|
||||
nostr-tools provides:
|
||||
- **Event handling** - Create, sign, verify events
|
||||
- **Key management** - Generate, convert, encode keys
|
||||
- **Relay communication** - Connect, subscribe, publish
|
||||
- **NIP implementations** - NIP-04, NIP-05, NIP-19, NIP-44, etc.
|
||||
- **Cryptographic operations** - Schnorr signatures, encryption
|
||||
- **Filter building** - Query events by various criteria
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
npm install nostr-tools
|
||||
```
|
||||
|
||||
### Basic Imports
|
||||
|
||||
```javascript
|
||||
// Core functionality
|
||||
import {
|
||||
SimplePool,
|
||||
generateSecretKey,
|
||||
getPublicKey,
|
||||
finalizeEvent,
|
||||
verifyEvent
|
||||
} from 'nostr-tools';
|
||||
|
||||
// NIP-specific imports
|
||||
import { nip04, nip05, nip19, nip44 } from 'nostr-tools';
|
||||
|
||||
// Relay operations
|
||||
import { Relay } from 'nostr-tools/relay';
|
||||
```
|
||||
|
||||
## Key Management
|
||||
|
||||
### Generating Keys
|
||||
|
||||
```javascript
|
||||
import { generateSecretKey, getPublicKey } from 'nostr-tools/pure';
|
||||
|
||||
// Generate new secret key (Uint8Array)
|
||||
const secretKey = generateSecretKey();
|
||||
|
||||
// Derive public key
|
||||
const publicKey = getPublicKey(secretKey);
|
||||
|
||||
console.log('Secret key:', bytesToHex(secretKey));
|
||||
console.log('Public key:', publicKey); // hex string
|
||||
```
|
||||
|
||||
### Key Encoding (NIP-19)
|
||||
|
||||
```javascript
|
||||
import { nip19 } from 'nostr-tools';
|
||||
|
||||
// Encode to bech32
|
||||
const nsec = nip19.nsecEncode(secretKey);
|
||||
const npub = nip19.npubEncode(publicKey);
|
||||
const note = nip19.noteEncode(eventId);
|
||||
|
||||
console.log(nsec); // nsec1...
|
||||
console.log(npub); // npub1...
|
||||
console.log(note); // note1...
|
||||
|
||||
// Decode from bech32
|
||||
const { type, data } = nip19.decode(npub);
|
||||
// type: 'npub', data: publicKey (hex)
|
||||
|
||||
// Encode profile reference (nprofile)
|
||||
const nprofile = nip19.nprofileEncode({
|
||||
pubkey: publicKey,
|
||||
relays: ['wss://relay.example.com']
|
||||
});
|
||||
|
||||
// Encode event reference (nevent)
|
||||
const nevent = nip19.neventEncode({
|
||||
id: eventId,
|
||||
relays: ['wss://relay.example.com'],
|
||||
author: publicKey,
|
||||
kind: 1
|
||||
});
|
||||
|
||||
// Encode address (naddr) for replaceable events
|
||||
const naddr = nip19.naddrEncode({
|
||||
identifier: 'my-article',
|
||||
pubkey: publicKey,
|
||||
kind: 30023,
|
||||
relays: ['wss://relay.example.com']
|
||||
});
|
||||
```
|
||||
|
||||
## Event Operations
|
||||
|
||||
### Event Structure
|
||||
|
||||
```javascript
|
||||
// Unsigned event template
|
||||
const eventTemplate = {
|
||||
kind: 1,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [],
|
||||
content: 'Hello Nostr!'
|
||||
};
|
||||
|
||||
// Signed event (after finalizeEvent)
|
||||
const signedEvent = {
|
||||
id: '...', // 32-byte sha256 hash as hex
|
||||
pubkey: '...', // 32-byte public key as hex
|
||||
created_at: 1234567890,
|
||||
kind: 1,
|
||||
tags: [],
|
||||
content: 'Hello Nostr!',
|
||||
sig: '...' // 64-byte Schnorr signature as hex
|
||||
};
|
||||
```
|
||||
|
||||
### Creating and Signing Events
|
||||
|
||||
```javascript
|
||||
import { finalizeEvent, verifyEvent } from 'nostr-tools/pure';
|
||||
|
||||
// Create event template
|
||||
const eventTemplate = {
|
||||
kind: 1,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [
|
||||
['p', publicKey], // Mention
|
||||
['e', eventId, '', 'reply'], // Reply
|
||||
['t', 'nostr'] // Hashtag
|
||||
],
|
||||
content: 'Hello Nostr!'
|
||||
};
|
||||
|
||||
// Sign event
|
||||
const signedEvent = finalizeEvent(eventTemplate, secretKey);
|
||||
|
||||
// Verify event
|
||||
const isValid = verifyEvent(signedEvent);
|
||||
console.log('Event valid:', isValid);
|
||||
```
|
||||
|
||||
### Event Kinds
|
||||
|
||||
```javascript
|
||||
// Common event kinds
|
||||
const KINDS = {
|
||||
Metadata: 0, // Profile metadata (NIP-01)
|
||||
Text: 1, // Short text note (NIP-01)
|
||||
RecommendRelay: 2, // Relay recommendation
|
||||
Contacts: 3, // Contact list (NIP-02)
|
||||
EncryptedDM: 4, // Encrypted DM (NIP-04)
|
||||
EventDeletion: 5, // Delete events (NIP-09)
|
||||
Repost: 6, // Repost (NIP-18)
|
||||
Reaction: 7, // Reaction (NIP-25)
|
||||
ChannelCreation: 40, // Channel (NIP-28)
|
||||
ChannelMessage: 42, // Channel message
|
||||
Zap: 9735, // Zap receipt (NIP-57)
|
||||
Report: 1984, // Report (NIP-56)
|
||||
RelayList: 10002, // Relay list (NIP-65)
|
||||
Article: 30023, // Long-form content (NIP-23)
|
||||
};
|
||||
```
|
||||
|
||||
### Creating Specific Events
|
||||
|
||||
```javascript
|
||||
// Profile metadata (kind 0)
|
||||
const profileEvent = finalizeEvent({
|
||||
kind: 0,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [],
|
||||
content: JSON.stringify({
|
||||
name: 'Alice',
|
||||
about: 'Nostr enthusiast',
|
||||
picture: 'https://example.com/avatar.jpg',
|
||||
nip05: 'alice@example.com',
|
||||
lud16: 'alice@getalby.com'
|
||||
})
|
||||
}, secretKey);
|
||||
|
||||
// Contact list (kind 3)
|
||||
const contactsEvent = finalizeEvent({
|
||||
kind: 3,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [
|
||||
['p', pubkey1, 'wss://relay1.com', 'alice'],
|
||||
['p', pubkey2, 'wss://relay2.com', 'bob'],
|
||||
['p', pubkey3, '', 'carol']
|
||||
],
|
||||
content: '' // Or JSON relay preferences
|
||||
}, secretKey);
|
||||
|
||||
// Reply to an event
|
||||
const replyEvent = finalizeEvent({
|
||||
kind: 1,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [
|
||||
['e', rootEventId, '', 'root'],
|
||||
['e', parentEventId, '', 'reply'],
|
||||
['p', parentEventPubkey]
|
||||
],
|
||||
content: 'This is a reply'
|
||||
}, secretKey);
|
||||
|
||||
// Reaction (kind 7)
|
||||
const reactionEvent = finalizeEvent({
|
||||
kind: 7,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [
|
||||
['e', eventId],
|
||||
['p', eventPubkey]
|
||||
],
|
||||
content: '+' // or '-' or emoji
|
||||
}, secretKey);
|
||||
|
||||
// Delete event (kind 5)
|
||||
const deleteEvent = finalizeEvent({
|
||||
kind: 5,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [
|
||||
['e', eventIdToDelete],
|
||||
['e', anotherEventIdToDelete]
|
||||
],
|
||||
content: 'Deletion reason'
|
||||
}, secretKey);
|
||||
```
|
||||
|
||||
## Relay Communication
|
||||
|
||||
### Using SimplePool
|
||||
|
||||
SimplePool is the recommended way to interact with multiple relays:
|
||||
|
||||
```javascript
|
||||
import { SimplePool } from 'nostr-tools/pool';
|
||||
|
||||
const pool = new SimplePool();
|
||||
const relays = [
|
||||
'wss://relay.damus.io',
|
||||
'wss://nos.lol',
|
||||
'wss://relay.nostr.band'
|
||||
];
|
||||
|
||||
// Subscribe to events
|
||||
const subscription = pool.subscribeMany(
|
||||
relays,
|
||||
[
|
||||
{
|
||||
kinds: [1],
|
||||
authors: [publicKey],
|
||||
limit: 10
|
||||
}
|
||||
],
|
||||
{
|
||||
onevent(event) {
|
||||
console.log('Received event:', event);
|
||||
},
|
||||
oneose() {
|
||||
console.log('End of stored events');
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Close subscription when done
|
||||
subscription.close();
|
||||
|
||||
// Publish event to all relays
|
||||
const results = await Promise.allSettled(
|
||||
pool.publish(relays, signedEvent)
|
||||
);
|
||||
|
||||
// Query events (returns Promise)
|
||||
const events = await pool.querySync(relays, {
|
||||
kinds: [0],
|
||||
authors: [publicKey]
|
||||
});
|
||||
|
||||
// Get single event
|
||||
const event = await pool.get(relays, {
|
||||
ids: [eventId]
|
||||
});
|
||||
|
||||
// Close pool when done
|
||||
pool.close(relays);
|
||||
```
|
||||
|
||||
### Direct Relay Connection
|
||||
|
||||
```javascript
|
||||
import { Relay } from 'nostr-tools/relay';
|
||||
|
||||
const relay = await Relay.connect('wss://relay.damus.io');
|
||||
|
||||
console.log(`Connected to ${relay.url}`);
|
||||
|
||||
// Subscribe
|
||||
const sub = relay.subscribe([
|
||||
{
|
||||
kinds: [1],
|
||||
limit: 100
|
||||
}
|
||||
], {
|
||||
onevent(event) {
|
||||
console.log('Event:', event);
|
||||
},
|
||||
oneose() {
|
||||
console.log('EOSE');
|
||||
sub.close();
|
||||
}
|
||||
});
|
||||
|
||||
// Publish
|
||||
await relay.publish(signedEvent);
|
||||
|
||||
// Close
|
||||
relay.close();
|
||||
```
|
||||
|
||||
### Handling Connection States
|
||||
|
||||
```javascript
|
||||
import { Relay } from 'nostr-tools/relay';
|
||||
|
||||
const relay = await Relay.connect('wss://relay.example.com');
|
||||
|
||||
// Listen for disconnect
|
||||
relay.onclose = () => {
|
||||
console.log('Relay disconnected');
|
||||
};
|
||||
|
||||
// Check connection status
|
||||
console.log('Connected:', relay.connected);
|
||||
```
|
||||
|
||||
## Filters
|
||||
|
||||
### Filter Structure
|
||||
|
||||
```javascript
|
||||
const filter = {
|
||||
// Event IDs
|
||||
ids: ['abc123...'],
|
||||
|
||||
// Authors (pubkeys)
|
||||
authors: ['pubkey1', 'pubkey2'],
|
||||
|
||||
// Event kinds
|
||||
kinds: [1, 6, 7],
|
||||
|
||||
// Tags (single-letter keys)
|
||||
'#e': ['eventId1', 'eventId2'],
|
||||
'#p': ['pubkey1'],
|
||||
'#t': ['nostr', 'bitcoin'],
|
||||
'#d': ['article-identifier'],
|
||||
|
||||
// Time range
|
||||
since: 1704067200, // Unix timestamp
|
||||
until: 1704153600,
|
||||
|
||||
// Limit results
|
||||
limit: 100,
|
||||
|
||||
// Search (NIP-50, if relay supports)
|
||||
search: 'nostr protocol'
|
||||
};
|
||||
```
|
||||
|
||||
### Common Filter Patterns
|
||||
|
||||
```javascript
|
||||
// User's recent posts
|
||||
const userPosts = {
|
||||
kinds: [1],
|
||||
authors: [userPubkey],
|
||||
limit: 50
|
||||
};
|
||||
|
||||
// User's profile
|
||||
const userProfile = {
|
||||
kinds: [0],
|
||||
authors: [userPubkey]
|
||||
};
|
||||
|
||||
// User's contacts
|
||||
const userContacts = {
|
||||
kinds: [3],
|
||||
authors: [userPubkey]
|
||||
};
|
||||
|
||||
// Replies to an event
|
||||
const replies = {
|
||||
kinds: [1],
|
||||
'#e': [eventId]
|
||||
};
|
||||
|
||||
// Reactions to an event
|
||||
const reactions = {
|
||||
kinds: [7],
|
||||
'#e': [eventId]
|
||||
};
|
||||
|
||||
// Feed from followed users
|
||||
const feed = {
|
||||
kinds: [1, 6],
|
||||
authors: followedPubkeys,
|
||||
limit: 100
|
||||
};
|
||||
|
||||
// Events mentioning user
|
||||
const mentions = {
|
||||
kinds: [1],
|
||||
'#p': [userPubkey],
|
||||
limit: 50
|
||||
};
|
||||
|
||||
// Hashtag search
|
||||
const hashtagEvents = {
|
||||
kinds: [1],
|
||||
'#t': ['bitcoin'],
|
||||
limit: 100
|
||||
};
|
||||
|
||||
// Replaceable event by d-tag
|
||||
const replaceableEvent = {
|
||||
kinds: [30023],
|
||||
authors: [authorPubkey],
|
||||
'#d': ['article-slug']
|
||||
};
|
||||
```
|
||||
|
||||
### Multiple Filters
|
||||
|
||||
```javascript
|
||||
// Subscribe with multiple filters (OR logic)
|
||||
const filters = [
|
||||
{ kinds: [1], authors: [userPubkey], limit: 20 },
|
||||
{ kinds: [1], '#p': [userPubkey], limit: 20 }
|
||||
];
|
||||
|
||||
pool.subscribeMany(relays, filters, {
|
||||
onevent(event) {
|
||||
// Receives events matching ANY filter
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Encryption
|
||||
|
||||
### NIP-04 (Legacy DMs)
|
||||
|
||||
```javascript
|
||||
import { nip04 } from 'nostr-tools';
|
||||
|
||||
// Encrypt message
|
||||
const ciphertext = await nip04.encrypt(
|
||||
secretKey,
|
||||
recipientPubkey,
|
||||
'Hello, this is secret!'
|
||||
);
|
||||
|
||||
// Create encrypted DM event
|
||||
const dmEvent = finalizeEvent({
|
||||
kind: 4,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [['p', recipientPubkey]],
|
||||
content: ciphertext
|
||||
}, secretKey);
|
||||
|
||||
// Decrypt message
|
||||
const plaintext = await nip04.decrypt(
|
||||
secretKey,
|
||||
senderPubkey,
|
||||
ciphertext
|
||||
);
|
||||
```
|
||||
|
||||
### NIP-44 (Modern Encryption)
|
||||
|
||||
```javascript
|
||||
import { nip44 } from 'nostr-tools';
|
||||
|
||||
// Get conversation key (cache this for multiple messages)
|
||||
const conversationKey = nip44.getConversationKey(
|
||||
secretKey,
|
||||
recipientPubkey
|
||||
);
|
||||
|
||||
// Encrypt
|
||||
const ciphertext = nip44.encrypt(
|
||||
'Hello with NIP-44!',
|
||||
conversationKey
|
||||
);
|
||||
|
||||
// Decrypt
|
||||
const plaintext = nip44.decrypt(
|
||||
ciphertext,
|
||||
conversationKey
|
||||
);
|
||||
```
|
||||
|
||||
## NIP Implementations
|
||||
|
||||
### NIP-05 (DNS Identifier)
|
||||
|
||||
```javascript
|
||||
import { nip05 } from 'nostr-tools';
|
||||
|
||||
// Query NIP-05 identifier
|
||||
const profile = await nip05.queryProfile('alice@example.com');
|
||||
|
||||
if (profile) {
|
||||
console.log('Pubkey:', profile.pubkey);
|
||||
console.log('Relays:', profile.relays);
|
||||
}
|
||||
|
||||
// Verify NIP-05 for a pubkey
|
||||
const isValid = await nip05.queryProfile('alice@example.com')
|
||||
.then(p => p?.pubkey === expectedPubkey);
|
||||
```
|
||||
|
||||
### NIP-10 (Reply Threading)
|
||||
|
||||
```javascript
|
||||
import { nip10 } from 'nostr-tools';
|
||||
|
||||
// Parse reply tags
|
||||
const parsed = nip10.parse(event);
|
||||
|
||||
console.log('Root:', parsed.root); // Original event
|
||||
console.log('Reply:', parsed.reply); // Direct parent
|
||||
console.log('Mentions:', parsed.mentions); // Other mentions
|
||||
console.log('Profiles:', parsed.profiles); // Mentioned pubkeys
|
||||
```
|
||||
|
||||
### NIP-21 (nostr: URIs)
|
||||
|
||||
```javascript
|
||||
// Parse nostr: URIs
|
||||
const uri = 'nostr:npub1...';
|
||||
const { type, data } = nip19.decode(uri.replace('nostr:', ''));
|
||||
```
|
||||
|
||||
### NIP-27 (Content References)
|
||||
|
||||
```javascript
|
||||
// Parse nostr:npub and nostr:note references in content
|
||||
const content = 'Check out nostr:npub1abc... and nostr:note1xyz...';
|
||||
|
||||
const references = content.match(/nostr:(n[a-z]+1[a-z0-9]+)/g);
|
||||
references?.forEach(ref => {
|
||||
const decoded = nip19.decode(ref.replace('nostr:', ''));
|
||||
console.log(decoded.type, decoded.data);
|
||||
});
|
||||
```
|
||||
|
||||
### NIP-57 (Zaps)
|
||||
|
||||
```javascript
|
||||
import { nip57 } from 'nostr-tools';
|
||||
|
||||
// Validate zap receipt
|
||||
const zapReceipt = await pool.get(relays, {
|
||||
kinds: [9735],
|
||||
'#e': [eventId]
|
||||
});
|
||||
|
||||
const validatedZap = await nip57.validateZapRequest(zapReceipt);
|
||||
```
|
||||
|
||||
## Utilities
|
||||
|
||||
### Hex and Bytes Conversion
|
||||
|
||||
```javascript
|
||||
import { bytesToHex, hexToBytes } from '@noble/hashes/utils';
|
||||
|
||||
// Convert secret key to hex
|
||||
const secretKeyHex = bytesToHex(secretKey);
|
||||
|
||||
// Convert hex back to bytes
|
||||
const secretKeyBytes = hexToBytes(secretKeyHex);
|
||||
```
|
||||
|
||||
### Event ID Calculation
|
||||
|
||||
```javascript
|
||||
import { getEventHash } from 'nostr-tools/pure';
|
||||
|
||||
// Calculate event ID without signing
|
||||
const eventId = getEventHash(unsignedEvent);
|
||||
```
|
||||
|
||||
### Signature Operations
|
||||
|
||||
```javascript
|
||||
import {
|
||||
getSignature,
|
||||
verifyEvent
|
||||
} from 'nostr-tools/pure';
|
||||
|
||||
// Sign event data
|
||||
const signature = getSignature(unsignedEvent, secretKey);
|
||||
|
||||
// Verify complete event
|
||||
const isValid = verifyEvent(signedEvent);
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Connection Management
|
||||
|
||||
1. **Use SimplePool** - Manages connections efficiently
|
||||
2. **Limit concurrent connections** - Don't connect to too many relays
|
||||
3. **Handle disconnections** - Implement reconnection logic
|
||||
4. **Close subscriptions** - Always close when done
|
||||
|
||||
### Event Handling
|
||||
|
||||
1. **Verify events** - Always verify signatures
|
||||
2. **Deduplicate** - Events may come from multiple relays
|
||||
3. **Handle replaceable events** - Latest by created_at wins
|
||||
4. **Validate content** - Don't trust event content blindly
|
||||
|
||||
### Key Security
|
||||
|
||||
1. **Never expose secret keys** - Keep in secure storage
|
||||
2. **Use NIP-07 in browsers** - Let extensions handle signing
|
||||
3. **Validate input** - Check key formats before use
|
||||
|
||||
### Performance
|
||||
|
||||
1. **Cache events** - Avoid re-fetching
|
||||
2. **Use filters wisely** - Be specific, use limits
|
||||
3. **Batch operations** - Combine related queries
|
||||
4. **Close idle connections** - Free up resources
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Building a Feed
|
||||
|
||||
```javascript
|
||||
const pool = new SimplePool();
|
||||
const relays = ['wss://relay.damus.io', 'wss://nos.lol'];
|
||||
|
||||
async function loadFeed(followedPubkeys) {
|
||||
const events = await pool.querySync(relays, {
|
||||
kinds: [1, 6],
|
||||
authors: followedPubkeys,
|
||||
limit: 100
|
||||
});
|
||||
|
||||
// Sort by timestamp
|
||||
return events.sort((a, b) => b.created_at - a.created_at);
|
||||
}
|
||||
```
|
||||
|
||||
### Real-time Updates
|
||||
|
||||
```javascript
|
||||
function subscribeToFeed(followedPubkeys, onEvent) {
|
||||
return pool.subscribeMany(
|
||||
relays,
|
||||
[{ kinds: [1, 6], authors: followedPubkeys }],
|
||||
{
|
||||
onevent: onEvent,
|
||||
oneose() {
|
||||
console.log('Caught up with stored events');
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### Profile Loading
|
||||
|
||||
```javascript
|
||||
async function loadProfile(pubkey) {
|
||||
const [metadata] = await pool.querySync(relays, {
|
||||
kinds: [0],
|
||||
authors: [pubkey],
|
||||
limit: 1
|
||||
});
|
||||
|
||||
if (metadata) {
|
||||
return JSON.parse(metadata.content);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
```
|
||||
|
||||
### Event Deduplication
|
||||
|
||||
```javascript
|
||||
const seenEvents = new Set();
|
||||
|
||||
function handleEvent(event) {
|
||||
if (seenEvents.has(event.id)) {
|
||||
return; // Skip duplicate
|
||||
}
|
||||
seenEvents.add(event.id);
|
||||
|
||||
// Process event...
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Events not publishing:**
|
||||
- Check relay is writable
|
||||
- Verify event is properly signed
|
||||
- Check relay's accepted kinds
|
||||
|
||||
**Subscription not receiving events:**
|
||||
- Verify filter syntax
|
||||
- Check relay has matching events
|
||||
- Ensure subscription isn't closed
|
||||
|
||||
**Signature verification fails:**
|
||||
- Check event structure is correct
|
||||
- Verify keys are in correct format
|
||||
- Ensure event hasn't been modified
|
||||
|
||||
**NIP-05 lookup fails:**
|
||||
- Check CORS headers on server
|
||||
- Verify .well-known path is correct
|
||||
- Handle network timeouts
|
||||
|
||||
## References
|
||||
|
||||
- **nostr-tools GitHub**: https://github.com/nbd-wtf/nostr-tools
|
||||
- **Nostr Protocol**: https://github.com/nostr-protocol/nostr
|
||||
- **NIPs Repository**: https://github.com/nostr-protocol/nips
|
||||
- **NIP-01 (Basic Protocol)**: https://github.com/nostr-protocol/nips/blob/master/01.md
|
||||
|
||||
## Related Skills
|
||||
|
||||
- **nostr** - Nostr protocol fundamentals
|
||||
- **svelte** - Building Nostr UIs with Svelte
|
||||
- **applesauce-core** - Higher-level Nostr client utilities
|
||||
- **applesauce-signers** - Nostr signing abstractions
|
||||
899
.claude/skills/rollup/SKILL.md
Normal file
899
.claude/skills/rollup/SKILL.md
Normal file
@@ -0,0 +1,899 @@
|
||||
---
|
||||
name: rollup
|
||||
description: This skill should be used when working with Rollup module bundler, including configuration, plugins, code splitting, and build optimization. Provides comprehensive knowledge of Rollup patterns, plugin development, and bundling strategies.
|
||||
---
|
||||
|
||||
# Rollup Skill
|
||||
|
||||
This skill provides comprehensive knowledge and patterns for working with Rollup module bundler effectively.
|
||||
|
||||
## When to Use This Skill
|
||||
|
||||
Use this skill when:
|
||||
- Configuring Rollup for web applications
|
||||
- Setting up Rollup for library builds
|
||||
- Working with Rollup plugins
|
||||
- Implementing code splitting
|
||||
- Optimizing bundle size
|
||||
- Troubleshooting build issues
|
||||
- Integrating Rollup with Svelte or other frameworks
|
||||
- Developing custom Rollup plugins
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Rollup Overview
|
||||
|
||||
Rollup is a module bundler that:
|
||||
- **Tree-shakes by default** - Removes unused code automatically
|
||||
- **ES module focused** - Native ESM output support
|
||||
- **Plugin-based** - Extensible architecture
|
||||
- **Multiple outputs** - Generate multiple formats from single input
|
||||
- **Code splitting** - Dynamic imports for lazy loading
|
||||
- **Scope hoisting** - Flattens modules for smaller bundles
|
||||
|
||||
### Basic Configuration
|
||||
|
||||
```javascript
|
||||
// rollup.config.js
|
||||
export default {
|
||||
input: 'src/main.js',
|
||||
output: {
|
||||
file: 'dist/bundle.js',
|
||||
format: 'esm'
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Output Formats
|
||||
|
||||
Rollup supports multiple output formats:
|
||||
|
||||
| Format | Description | Use Case |
|
||||
|--------|-------------|----------|
|
||||
| `esm` | ES modules | Modern browsers, bundlers |
|
||||
| `cjs` | CommonJS | Node.js |
|
||||
| `iife` | Self-executing function | Script tags |
|
||||
| `umd` | Universal Module Definition | CDN, both environments |
|
||||
| `amd` | Asynchronous Module Definition | RequireJS |
|
||||
| `system` | SystemJS | SystemJS loader |
|
||||
|
||||
## Configuration
|
||||
|
||||
### Full Configuration Options
|
||||
|
||||
```javascript
|
||||
// rollup.config.js
|
||||
import resolve from '@rollup/plugin-node-resolve';
|
||||
import commonjs from '@rollup/plugin-commonjs';
|
||||
import terser from '@rollup/plugin-terser';
|
||||
|
||||
const production = !process.env.ROLLUP_WATCH;
|
||||
|
||||
export default {
|
||||
// Entry point(s)
|
||||
input: 'src/main.js',
|
||||
|
||||
// Output configuration
|
||||
output: {
|
||||
// Output file or directory
|
||||
file: 'dist/bundle.js',
|
||||
// Or for code splitting:
|
||||
// dir: 'dist',
|
||||
|
||||
// Output format
|
||||
format: 'esm',
|
||||
|
||||
// Name for IIFE/UMD builds
|
||||
name: 'MyBundle',
|
||||
|
||||
// Sourcemap generation
|
||||
sourcemap: true,
|
||||
|
||||
// Global variables for external imports (IIFE/UMD)
|
||||
globals: {
|
||||
jquery: '$'
|
||||
},
|
||||
|
||||
// Banner/footer comments
|
||||
banner: '/* My library v1.0.0 */',
|
||||
footer: '/* End of bundle */',
|
||||
|
||||
// Chunk naming for code splitting
|
||||
chunkFileNames: '[name]-[hash].js',
|
||||
entryFileNames: '[name].js',
|
||||
|
||||
// Manual chunks for code splitting
|
||||
manualChunks: {
|
||||
vendor: ['lodash', 'moment']
|
||||
},
|
||||
|
||||
// Interop mode for default exports
|
||||
interop: 'auto',
|
||||
|
||||
// Preserve modules structure
|
||||
preserveModules: false,
|
||||
|
||||
// Exports mode
|
||||
exports: 'auto' // 'default', 'named', 'none', 'auto'
|
||||
},
|
||||
|
||||
// External dependencies (not bundled)
|
||||
external: ['lodash', /^node:/],
|
||||
|
||||
// Plugin array
|
||||
plugins: [
|
||||
resolve({
|
||||
browser: true,
|
||||
dedupe: ['svelte']
|
||||
}),
|
||||
commonjs(),
|
||||
production && terser()
|
||||
],
|
||||
|
||||
// Watch mode options
|
||||
watch: {
|
||||
include: 'src/**',
|
||||
exclude: 'node_modules/**',
|
||||
clearScreen: false
|
||||
},
|
||||
|
||||
// Warning handling
|
||||
onwarn(warning, warn) {
|
||||
// Skip certain warnings
|
||||
if (warning.code === 'CIRCULAR_DEPENDENCY') return;
|
||||
warn(warning);
|
||||
},
|
||||
|
||||
// Preserve entry signatures for code splitting
|
||||
preserveEntrySignatures: 'strict',
|
||||
|
||||
// Treeshake options
|
||||
treeshake: {
|
||||
moduleSideEffects: false,
|
||||
propertyReadSideEffects: false
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Multiple Outputs
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
input: 'src/main.js',
|
||||
output: [
|
||||
{
|
||||
file: 'dist/bundle.esm.js',
|
||||
format: 'esm'
|
||||
},
|
||||
{
|
||||
file: 'dist/bundle.cjs.js',
|
||||
format: 'cjs'
|
||||
},
|
||||
{
|
||||
file: 'dist/bundle.umd.js',
|
||||
format: 'umd',
|
||||
name: 'MyLibrary'
|
||||
}
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
### Multiple Entry Points
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
input: {
|
||||
main: 'src/main.js',
|
||||
utils: 'src/utils.js'
|
||||
},
|
||||
output: {
|
||||
dir: 'dist',
|
||||
format: 'esm'
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Array of Configurations
|
||||
|
||||
```javascript
|
||||
export default [
|
||||
{
|
||||
input: 'src/main.js',
|
||||
output: { file: 'dist/main.js', format: 'esm' }
|
||||
},
|
||||
{
|
||||
input: 'src/worker.js',
|
||||
output: { file: 'dist/worker.js', format: 'iife' }
|
||||
}
|
||||
];
|
||||
```
|
||||
|
||||
## Essential Plugins
|
||||
|
||||
### @rollup/plugin-node-resolve
|
||||
|
||||
Resolve node_modules imports:
|
||||
|
||||
```javascript
|
||||
import resolve from '@rollup/plugin-node-resolve';
|
||||
|
||||
export default {
|
||||
plugins: [
|
||||
resolve({
|
||||
// Resolve browser field in package.json
|
||||
browser: true,
|
||||
|
||||
// Prefer built-in modules
|
||||
preferBuiltins: true,
|
||||
|
||||
// Only resolve these extensions
|
||||
extensions: ['.mjs', '.js', '.json', '.node'],
|
||||
|
||||
// Dedupe packages (important for Svelte)
|
||||
dedupe: ['svelte'],
|
||||
|
||||
// Main fields to check in package.json
|
||||
mainFields: ['module', 'main', 'browser'],
|
||||
|
||||
// Export conditions
|
||||
exportConditions: ['svelte', 'browser', 'module', 'import']
|
||||
})
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
### @rollup/plugin-commonjs
|
||||
|
||||
Convert CommonJS to ES modules:
|
||||
|
||||
```javascript
|
||||
import commonjs from '@rollup/plugin-commonjs';
|
||||
|
||||
export default {
|
||||
plugins: [
|
||||
commonjs({
|
||||
// Include specific modules
|
||||
include: /node_modules/,
|
||||
|
||||
// Exclude specific modules
|
||||
exclude: ['node_modules/lodash-es/**'],
|
||||
|
||||
// Ignore conditional requires
|
||||
ignoreDynamicRequires: false,
|
||||
|
||||
// Transform mixed ES/CJS modules
|
||||
transformMixedEsModules: true,
|
||||
|
||||
// Named exports for specific modules
|
||||
namedExports: {
|
||||
'react': ['createElement', 'Component']
|
||||
}
|
||||
})
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
### @rollup/plugin-terser
|
||||
|
||||
Minify output:
|
||||
|
||||
```javascript
|
||||
import terser from '@rollup/plugin-terser';
|
||||
|
||||
export default {
|
||||
plugins: [
|
||||
terser({
|
||||
compress: {
|
||||
drop_console: true,
|
||||
drop_debugger: true
|
||||
},
|
||||
mangle: true,
|
||||
format: {
|
||||
comments: false
|
||||
}
|
||||
})
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
### rollup-plugin-svelte
|
||||
|
||||
Compile Svelte components:
|
||||
|
||||
```javascript
|
||||
import svelte from 'rollup-plugin-svelte';
|
||||
import css from 'rollup-plugin-css-only';
|
||||
|
||||
export default {
|
||||
plugins: [
|
||||
svelte({
|
||||
// Enable dev mode
|
||||
dev: !production,
|
||||
|
||||
// Emit CSS as a separate file
|
||||
emitCss: true,
|
||||
|
||||
// Preprocess (SCSS, TypeScript, etc.)
|
||||
preprocess: sveltePreprocess(),
|
||||
|
||||
// Compiler options
|
||||
compilerOptions: {
|
||||
dev: !production
|
||||
},
|
||||
|
||||
// Custom element mode
|
||||
customElement: false
|
||||
}),
|
||||
|
||||
// Extract CSS to separate file
|
||||
css({ output: 'bundle.css' })
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
### Other Common Plugins
|
||||
|
||||
```javascript
|
||||
import json from '@rollup/plugin-json';
|
||||
import replace from '@rollup/plugin-replace';
|
||||
import alias from '@rollup/plugin-alias';
|
||||
import image from '@rollup/plugin-image';
|
||||
import copy from 'rollup-plugin-copy';
|
||||
import livereload from 'rollup-plugin-livereload';
|
||||
|
||||
export default {
|
||||
plugins: [
|
||||
// Import JSON files
|
||||
json(),
|
||||
|
||||
// Replace strings in code
|
||||
replace({
|
||||
preventAssignment: true,
|
||||
'process.env.NODE_ENV': JSON.stringify('production'),
|
||||
'__VERSION__': JSON.stringify('1.0.0')
|
||||
}),
|
||||
|
||||
// Path aliases
|
||||
alias({
|
||||
entries: [
|
||||
{ find: '@', replacement: './src' },
|
||||
{ find: 'utils', replacement: './src/utils' }
|
||||
]
|
||||
}),
|
||||
|
||||
// Import images
|
||||
image(),
|
||||
|
||||
// Copy static files
|
||||
copy({
|
||||
targets: [
|
||||
{ src: 'public/*', dest: 'dist' }
|
||||
]
|
||||
}),
|
||||
|
||||
// Live reload in dev
|
||||
!production && livereload('dist')
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
## Code Splitting
|
||||
|
||||
### Dynamic Imports
|
||||
|
||||
```javascript
|
||||
// Automatically creates chunks
|
||||
async function loadFeature() {
|
||||
const { feature } = await import('./feature.js');
|
||||
feature();
|
||||
}
|
||||
```
|
||||
|
||||
Configuration for code splitting:
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
input: 'src/main.js',
|
||||
output: {
|
||||
dir: 'dist',
|
||||
format: 'esm',
|
||||
chunkFileNames: 'chunks/[name]-[hash].js'
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Manual Chunks
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
output: {
|
||||
manualChunks: {
|
||||
// Vendor chunk
|
||||
vendor: ['lodash', 'moment'],
|
||||
|
||||
// Or use a function for more control
|
||||
manualChunks(id) {
|
||||
if (id.includes('node_modules')) {
|
||||
return 'vendor';
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Advanced Chunking Strategy
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
output: {
|
||||
manualChunks(id, { getModuleInfo }) {
|
||||
// Separate chunks by feature
|
||||
if (id.includes('/features/auth/')) {
|
||||
return 'auth';
|
||||
}
|
||||
if (id.includes('/features/dashboard/')) {
|
||||
return 'dashboard';
|
||||
}
|
||||
|
||||
// Vendor chunks by package
|
||||
if (id.includes('node_modules')) {
|
||||
const match = id.match(/node_modules\/([^/]+)/);
|
||||
if (match) {
|
||||
const packageName = match[1];
|
||||
// Group small packages
|
||||
const smallPackages = ['lodash', 'date-fns'];
|
||||
if (smallPackages.includes(packageName)) {
|
||||
return 'vendor-utils';
|
||||
}
|
||||
return `vendor-${packageName}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Watch Mode
|
||||
|
||||
### Configuration
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
watch: {
|
||||
// Files to watch
|
||||
include: 'src/**',
|
||||
|
||||
// Files to ignore
|
||||
exclude: 'node_modules/**',
|
||||
|
||||
// Don't clear screen on rebuild
|
||||
clearScreen: false,
|
||||
|
||||
// Rebuild delay
|
||||
buildDelay: 0,
|
||||
|
||||
// Watch chokidar options
|
||||
chokidar: {
|
||||
usePolling: true
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### CLI Watch Mode
|
||||
|
||||
```bash
|
||||
# Watch mode
|
||||
rollup -c -w
|
||||
|
||||
# With environment variable
|
||||
ROLLUP_WATCH=true rollup -c
|
||||
```
|
||||
|
||||
## Plugin Development
|
||||
|
||||
### Plugin Structure
|
||||
|
||||
```javascript
|
||||
function myPlugin(options = {}) {
|
||||
return {
|
||||
// Plugin name (required)
|
||||
name: 'my-plugin',
|
||||
|
||||
// Build hooks
|
||||
options(inputOptions) {
|
||||
// Modify input options
|
||||
return inputOptions;
|
||||
},
|
||||
|
||||
buildStart(inputOptions) {
|
||||
// Called on build start
|
||||
},
|
||||
|
||||
resolveId(source, importer, options) {
|
||||
// Custom module resolution
|
||||
if (source === 'virtual-module') {
|
||||
return source;
|
||||
}
|
||||
return null; // Defer to other plugins
|
||||
},
|
||||
|
||||
load(id) {
|
||||
// Load module content
|
||||
if (id === 'virtual-module') {
|
||||
return 'export default "Hello"';
|
||||
}
|
||||
return null;
|
||||
},
|
||||
|
||||
transform(code, id) {
|
||||
// Transform module code
|
||||
if (id.endsWith('.txt')) {
|
||||
return {
|
||||
code: `export default ${JSON.stringify(code)}`,
|
||||
map: null
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
buildEnd(error) {
|
||||
// Called when build ends
|
||||
if (error) {
|
||||
console.error('Build failed:', error);
|
||||
}
|
||||
},
|
||||
|
||||
// Output generation hooks
|
||||
renderStart(outputOptions, inputOptions) {
|
||||
// Called before output generation
|
||||
},
|
||||
|
||||
banner() {
|
||||
return '/* Custom banner */';
|
||||
},
|
||||
|
||||
footer() {
|
||||
return '/* Custom footer */';
|
||||
},
|
||||
|
||||
renderChunk(code, chunk, options) {
|
||||
// Transform output chunk
|
||||
return code;
|
||||
},
|
||||
|
||||
generateBundle(options, bundle) {
|
||||
// Modify output bundle
|
||||
for (const fileName in bundle) {
|
||||
const chunk = bundle[fileName];
|
||||
if (chunk.type === 'chunk') {
|
||||
// Modify chunk
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
writeBundle(options, bundle) {
|
||||
// After bundle is written
|
||||
},
|
||||
|
||||
closeBundle() {
|
||||
// Called when bundle is closed
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export default myPlugin;
|
||||
```
|
||||
|
||||
### Plugin with Rollup Utils
|
||||
|
||||
```javascript
|
||||
import { createFilter } from '@rollup/pluginutils';
|
||||
|
||||
function myTransformPlugin(options = {}) {
|
||||
const filter = createFilter(options.include, options.exclude);
|
||||
|
||||
return {
|
||||
name: 'my-transform',
|
||||
|
||||
transform(code, id) {
|
||||
if (!filter(id)) return null;
|
||||
|
||||
// Transform code
|
||||
const transformed = code.replace(/foo/g, 'bar');
|
||||
|
||||
return {
|
||||
code: transformed,
|
||||
map: null // Or generate sourcemap
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
## Svelte Integration
|
||||
|
||||
### Complete Svelte Setup
|
||||
|
||||
```javascript
|
||||
// rollup.config.js
|
||||
import svelte from 'rollup-plugin-svelte';
|
||||
import commonjs from '@rollup/plugin-commonjs';
|
||||
import resolve from '@rollup/plugin-node-resolve';
|
||||
import terser from '@rollup/plugin-terser';
|
||||
import css from 'rollup-plugin-css-only';
|
||||
import livereload from 'rollup-plugin-livereload';
|
||||
|
||||
const production = !process.env.ROLLUP_WATCH;
|
||||
|
||||
function serve() {
|
||||
let server;
|
||||
|
||||
function toExit() {
|
||||
if (server) server.kill(0);
|
||||
}
|
||||
|
||||
return {
|
||||
writeBundle() {
|
||||
if (server) return;
|
||||
server = require('child_process').spawn(
|
||||
'npm',
|
||||
['run', 'start', '--', '--dev'],
|
||||
{
|
||||
stdio: ['ignore', 'inherit', 'inherit'],
|
||||
shell: true
|
||||
}
|
||||
);
|
||||
|
||||
process.on('SIGTERM', toExit);
|
||||
process.on('exit', toExit);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export default {
|
||||
input: 'src/main.js',
|
||||
output: {
|
||||
sourcemap: true,
|
||||
format: 'iife',
|
||||
name: 'app',
|
||||
file: 'public/build/bundle.js'
|
||||
},
|
||||
plugins: [
|
||||
svelte({
|
||||
compilerOptions: {
|
||||
dev: !production
|
||||
}
|
||||
}),
|
||||
css({ output: 'bundle.css' }),
|
||||
|
||||
resolve({
|
||||
browser: true,
|
||||
dedupe: ['svelte']
|
||||
}),
|
||||
commonjs(),
|
||||
|
||||
// Dev server
|
||||
!production && serve(),
|
||||
!production && livereload('public'),
|
||||
|
||||
// Minify in production
|
||||
production && terser()
|
||||
],
|
||||
watch: {
|
||||
clearScreen: false
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Bundle Optimization
|
||||
|
||||
1. **Enable tree shaking** - Use ES modules
|
||||
2. **Mark side effects** - Set `sideEffects` in package.json
|
||||
3. **Use terser** - Minify production builds
|
||||
4. **Analyze bundles** - Use rollup-plugin-visualizer
|
||||
5. **Code split** - Lazy load routes and features
|
||||
|
||||
### External Dependencies
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
// Don't bundle peer dependencies for libraries
|
||||
external: [
|
||||
'react',
|
||||
'react-dom',
|
||||
/^lodash\//
|
||||
],
|
||||
output: {
|
||||
globals: {
|
||||
react: 'React',
|
||||
'react-dom': 'ReactDOM'
|
||||
}
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
### Development vs Production
|
||||
|
||||
```javascript
|
||||
const production = !process.env.ROLLUP_WATCH;
|
||||
|
||||
export default {
|
||||
plugins: [
|
||||
replace({
|
||||
preventAssignment: true,
|
||||
'process.env.NODE_ENV': JSON.stringify(
|
||||
production ? 'production' : 'development'
|
||||
)
|
||||
}),
|
||||
production && terser()
|
||||
].filter(Boolean)
|
||||
};
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
onwarn(warning, warn) {
|
||||
// Ignore circular dependency warnings
|
||||
if (warning.code === 'CIRCULAR_DEPENDENCY') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Ignore unused external imports
|
||||
if (warning.code === 'UNUSED_EXTERNAL_IMPORT') {
|
||||
return;
|
||||
}
|
||||
|
||||
// Treat other warnings as errors
|
||||
if (warning.code === 'UNRESOLVED_IMPORT') {
|
||||
throw new Error(warning.message);
|
||||
}
|
||||
|
||||
// Use default warning handling
|
||||
warn(warning);
|
||||
}
|
||||
};
|
||||
```
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Library Build
|
||||
|
||||
```javascript
|
||||
import pkg from './package.json';
|
||||
|
||||
export default {
|
||||
input: 'src/index.js',
|
||||
external: Object.keys(pkg.peerDependencies || {}),
|
||||
output: [
|
||||
{
|
||||
file: pkg.main,
|
||||
format: 'cjs',
|
||||
sourcemap: true
|
||||
},
|
||||
{
|
||||
file: pkg.module,
|
||||
format: 'esm',
|
||||
sourcemap: true
|
||||
}
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
### Application Build
|
||||
|
||||
```javascript
|
||||
export default {
|
||||
input: 'src/main.js',
|
||||
output: {
|
||||
dir: 'dist',
|
||||
format: 'esm',
|
||||
chunkFileNames: 'chunks/[name]-[hash].js',
|
||||
entryFileNames: '[name]-[hash].js',
|
||||
sourcemap: true
|
||||
},
|
||||
plugins: [
|
||||
// All dependencies bundled
|
||||
resolve({ browser: true }),
|
||||
commonjs(),
|
||||
terser()
|
||||
]
|
||||
};
|
||||
```
|
||||
|
||||
### Web Worker Build
|
||||
|
||||
```javascript
|
||||
export default [
|
||||
// Main application
|
||||
{
|
||||
input: 'src/main.js',
|
||||
output: {
|
||||
file: 'dist/main.js',
|
||||
format: 'esm'
|
||||
},
|
||||
plugins: [resolve(), commonjs()]
|
||||
},
|
||||
// Web worker (IIFE format)
|
||||
{
|
||||
input: 'src/worker.js',
|
||||
output: {
|
||||
file: 'dist/worker.js',
|
||||
format: 'iife'
|
||||
},
|
||||
plugins: [resolve(), commonjs()]
|
||||
}
|
||||
];
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Module not found:**
|
||||
- Check @rollup/plugin-node-resolve is configured
|
||||
- Verify package is installed
|
||||
- Check `external` array
|
||||
|
||||
**CommonJS module issues:**
|
||||
- Add @rollup/plugin-commonjs
|
||||
- Check `namedExports` configuration
|
||||
- Try `transformMixedEsModules: true`
|
||||
|
||||
**Circular dependencies:**
|
||||
- Use `onwarn` to suppress or fix
|
||||
- Refactor to break cycles
|
||||
- Check import order
|
||||
|
||||
**Sourcemaps not working:**
|
||||
- Set `sourcemap: true` in output
|
||||
- Ensure plugins pass through maps
|
||||
- Check browser devtools settings
|
||||
|
||||
**Large bundle size:**
|
||||
- Use rollup-plugin-visualizer
|
||||
- Check for duplicate dependencies
|
||||
- Verify tree shaking is working
|
||||
- Mark unused packages as external
|
||||
|
||||
## CLI Reference
|
||||
|
||||
```bash
|
||||
# Basic build
|
||||
rollup -c
|
||||
|
||||
# Watch mode
|
||||
rollup -c -w
|
||||
|
||||
# Custom config
|
||||
rollup -c rollup.custom.config.js
|
||||
|
||||
# Output format
|
||||
rollup src/main.js --format esm --file dist/bundle.js
|
||||
|
||||
# Environment variables
|
||||
NODE_ENV=production rollup -c
|
||||
|
||||
# Silent mode
|
||||
rollup -c --silent
|
||||
|
||||
# Generate bundle stats
|
||||
rollup -c --perf
|
||||
```
|
||||
|
||||
## References
|
||||
|
||||
- **Rollup Documentation**: https://rollupjs.org
|
||||
- **Plugin Directory**: https://github.com/rollup/plugins
|
||||
- **Awesome Rollup**: https://github.com/rollup/awesome
|
||||
- **GitHub**: https://github.com/rollup/rollup
|
||||
|
||||
## Related Skills
|
||||
|
||||
- **svelte** - Using Rollup with Svelte
|
||||
- **typescript** - TypeScript compilation with Rollup
|
||||
- **nostr-tools** - Bundling Nostr applications
|
||||
1004
.claude/skills/svelte/SKILL.md
Normal file
1004
.claude/skills/svelte/SKILL.md
Normal file
File diff suppressed because it is too large
Load Diff
1
.gitignore
vendored
1
.gitignore
vendored
@@ -79,6 +79,7 @@ cmd/benchmark/data
|
||||
!*.svelte
|
||||
!.github/**
|
||||
!.github/workflows/**
|
||||
!.claude/**
|
||||
!app/web/dist/**
|
||||
!app/web/dist/*.js
|
||||
!app/web/dist/*.js.map
|
||||
|
||||
71
CLAUDE.md
71
CLAUDE.md
@@ -8,11 +8,11 @@ ORLY is a high-performance Nostr relay written in Go, designed for personal rela
|
||||
|
||||
**Key Technologies:**
|
||||
- **Language**: Go 1.25.3+
|
||||
- **Database**: Badger v4 (embedded) or Neo4j (social graph)
|
||||
- **Database**: Badger v4 (embedded), Neo4j (social graph), or WasmDB (IndexedDB for WebAssembly)
|
||||
- **Cryptography**: Custom p8k library using purego for secp256k1 operations (no CGO)
|
||||
- **Web UI**: Svelte frontend embedded in the binary
|
||||
- **WebSocket**: gorilla/websocket for Nostr protocol
|
||||
- **Performance**: SIMD-accelerated SHA256 and hex encoding, query result caching with zstd compression
|
||||
- **Performance**: SIMD-accelerated SHA256 and hex encoding, optional query result caching with zstd compression
|
||||
- **Social Graph**: Neo4j backend with Web of Trust (WoT) extensions for trust metrics
|
||||
|
||||
## Build Commands
|
||||
@@ -123,6 +123,13 @@ export ORLY_PORT=3334
|
||||
./orly identity
|
||||
```
|
||||
|
||||
### Get Version
|
||||
```bash
|
||||
# Print version and exit
|
||||
./orly version
|
||||
# Also accepts: -v, --v, -version, --version
|
||||
```
|
||||
|
||||
### Common Configuration
|
||||
```bash
|
||||
# TLS with Let's Encrypt
|
||||
@@ -140,7 +147,7 @@ export ORLY_SPROCKET_ENABLED=true
|
||||
# Enable policy system
|
||||
export ORLY_POLICY_ENABLED=true
|
||||
|
||||
# Database backend selection (badger or neo4j)
|
||||
# Database backend selection (badger, neo4j, or wasmdb)
|
||||
export ORLY_DB_TYPE=badger
|
||||
|
||||
# Neo4j configuration (only when ORLY_DB_TYPE=neo4j)
|
||||
@@ -148,8 +155,9 @@ export ORLY_NEO4J_URI=bolt://localhost:7687
|
||||
export ORLY_NEO4J_USER=neo4j
|
||||
export ORLY_NEO4J_PASSWORD=password
|
||||
|
||||
# Query cache configuration (improves REQ response times)
|
||||
export ORLY_QUERY_CACHE_SIZE_MB=512 # Default: 512MB
|
||||
# Query cache configuration (disabled by default to reduce memory usage)
|
||||
export ORLY_QUERY_CACHE_DISABLED=false # Set to false to enable caching
|
||||
export ORLY_QUERY_CACHE_SIZE_MB=512 # Cache size when enabled
|
||||
export ORLY_QUERY_CACHE_MAX_AGE=5m # Cache expiry time
|
||||
|
||||
# Database cache tuning (for Badger backend)
|
||||
@@ -200,8 +208,9 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
|
||||
**`pkg/database/`** - Database abstraction layer with multiple backend support
|
||||
- `interface.go` - Database interface definition for pluggable backends
|
||||
- `factory.go` - Database backend selection (Badger or Neo4j)
|
||||
- `database.go` - Badger implementation with cache tuning and query cache
|
||||
- `factory.go` - Database backend selection (Badger, Neo4j, or WasmDB)
|
||||
- `factory_wasm.go` - WebAssembly-specific factory (build tag: `js && wasm`)
|
||||
- `database.go` - Badger implementation with cache tuning and optional query cache
|
||||
- `save-event.go` - Event storage with index updates
|
||||
- `query-events.go` - Main query execution engine with filter normalization
|
||||
- `query-for-*.go` - Specialized query builders for different filter patterns
|
||||
@@ -211,6 +220,14 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
- `identity.go` - Relay identity key management
|
||||
- `migrations.go` - Database schema migration runner
|
||||
|
||||
**`pkg/wasmdb/`** - WebAssembly IndexedDB database backend
|
||||
- `wasmdb.go` - Main WasmDB implementation using IndexedDB
|
||||
- Uses `aperturerobotics/go-indexeddb` for IndexedDB bindings
|
||||
- Replicates Badger's index schema for full query compatibility
|
||||
- Object stores map to index prefixes (evt, eid, kc-, pc-, etc.)
|
||||
- Range queries use IndexedDB cursors with KeyRange bounds
|
||||
- Build tag: `js && wasm`
|
||||
|
||||
**`pkg/neo4j/`** - Neo4j graph database backend with social graph support
|
||||
- `neo4j.go` - Main database implementation
|
||||
- `schema.go` - Graph schema and index definitions (includes WoT extensions)
|
||||
@@ -274,6 +291,8 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
- `write_allow` / `write_deny`: Pubkey whitelist/blacklist for writing (write-only)
|
||||
- `read_allow` / `read_deny`: Pubkey whitelist/blacklist for reading (read-only)
|
||||
- `privileged`: Party-involved access control (read-only)
|
||||
- `read_allow_permissive`: Override kind whitelist for READ access (global rule only)
|
||||
- `write_allow_permissive`: Override kind whitelist for WRITE access (global rule only)
|
||||
- See `docs/POLICY_USAGE_GUIDE.md` for configuration examples
|
||||
- See `pkg/policy/README.md` for quick reference
|
||||
|
||||
@@ -300,7 +319,8 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
**Web UI (`app/web/`):**
|
||||
- Svelte-based admin interface
|
||||
- Embedded in binary via `go:embed`
|
||||
- Features: event browser, sprocket management, policy management, user admin, settings
|
||||
- Features: event browser with advanced filtering, sprocket management, policy management, user admin, settings
|
||||
- **Event Browser:** Enhanced filter system with kind, author, tag, and time range filters (replaced simple search)
|
||||
- **Policy Management Tab:** JSON editor with validation, save publishes kind 12345 event
|
||||
|
||||
**Command-line Tools (`cmd/`):**
|
||||
@@ -328,6 +348,11 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
- NostrUser nodes with trust metrics (influence, PageRank)
|
||||
- FOLLOWS, MUTES, REPORTS relationships for WoT analysis
|
||||
- See `pkg/neo4j/WOT_SPEC.md` for full schema specification
|
||||
- **WasmDB**: IndexedDB backend for WebAssembly builds
|
||||
- Enables running ORLY in browser environments
|
||||
- Full query compatibility with Badger's index schema
|
||||
- Uses `aperturerobotics/go-indexeddb` for IndexedDB access
|
||||
- Build with `GOOS=js GOARCH=wasm go build`
|
||||
- Backend selected via factory pattern in `pkg/database/factory.go`
|
||||
- All backends implement the same `Database` interface defined in `pkg/database/interface.go`
|
||||
|
||||
@@ -599,7 +624,9 @@ sudo journalctl -u orly -f
|
||||
|
||||
## Key Dependencies
|
||||
|
||||
- `github.com/dgraph-io/badger/v4` - Embedded database
|
||||
- `github.com/dgraph-io/badger/v4` - Embedded database (Badger backend)
|
||||
- `github.com/neo4j/neo4j-go-driver/v5` - Neo4j driver (Neo4j backend)
|
||||
- `github.com/aperturerobotics/go-indexeddb` - IndexedDB bindings (WasmDB backend)
|
||||
- `github.com/gorilla/websocket` - WebSocket server
|
||||
- `github.com/minio/sha256-simd` - SIMD SHA256
|
||||
- `github.com/templexxx/xhex` - SIMD hex encoding
|
||||
@@ -686,8 +713,8 @@ Each level has these printer types:
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- **Query Cache**: 512MB query result cache (configurable via `ORLY_QUERY_CACHE_SIZE_MB`) with zstd level 9 compression reduces database load for repeated queries
|
||||
- **Filter Normalization**: Filters are normalized before cache lookup, so identical queries with different field ordering produce cache hits
|
||||
- **Query Cache**: Optional 512MB query result cache (disabled by default via `ORLY_QUERY_CACHE_DISABLED=true`) with zstd level 9 compression reduces database load for repeated queries; enable with `ORLY_QUERY_CACHE_DISABLED=false`
|
||||
- **Filter Normalization**: When query cache is enabled, filters are normalized before cache lookup, so identical queries with different field ordering produce cache hits
|
||||
- **Database Caching**: Tune `ORLY_DB_BLOCK_CACHE_MB` and `ORLY_DB_INDEX_CACHE_MB` for workload (Badger backend only)
|
||||
- **Query Optimization**: Add indexes for common filter patterns; multiple specialized query builders optimize different filter combinations
|
||||
- **Batch Operations**: ID lookups and event fetching use batch operations via `GetSerialsByIds` and `FetchEventsBySerials`
|
||||
@@ -699,8 +726,9 @@ Each level has these printer types:
|
||||
|
||||
ORLY has received several significant performance improvements in recent updates:
|
||||
|
||||
### Query Cache System (Latest)
|
||||
- 512MB query result cache with zstd level 9 compression
|
||||
### Query Cache System
|
||||
- Optional 512MB query result cache with zstd level 9 compression (disabled by default to reduce memory usage)
|
||||
- Enable with `ORLY_QUERY_CACHE_DISABLED=false`
|
||||
- Filter normalization ensures cache hits regardless of filter field ordering
|
||||
- Configurable size (`ORLY_QUERY_CACHE_SIZE_MB`) and TTL (`ORLY_QUERY_CACHE_MAX_AGE`)
|
||||
- Dramatically reduces database load for repeated queries (common in Nostr clients)
|
||||
@@ -771,7 +799,7 @@ Files modified:
|
||||
3. GitHub Actions workflow builds binaries for multiple platforms
|
||||
4. Release created automatically with binaries and checksums
|
||||
|
||||
## Recent Features (v0.31.x)
|
||||
## Recent Features (v0.34.x)
|
||||
|
||||
### Directory Spider
|
||||
The directory spider (`pkg/spider/directory.go`) automatically discovers and syncs metadata from other relays:
|
||||
@@ -789,11 +817,21 @@ The Neo4j backend (`pkg/neo4j/`) includes Web of Trust (WoT) extensions:
|
||||
- **WoT Schema**: See `pkg/neo4j/WOT_SPEC.md` for full specification
|
||||
- **Schema Modifications**: See `pkg/neo4j/MODIFYING_SCHEMA.md` for how to update
|
||||
|
||||
### WasmDB IndexedDB Backend
|
||||
WebAssembly-compatible database backend (`pkg/wasmdb/`):
|
||||
- Enables running ORLY in browser environments
|
||||
- Uses IndexedDB as storage via `aperturerobotics/go-indexeddb`
|
||||
- Full query compatibility with Badger's index schema
|
||||
- Object stores map to index prefixes (evt, eid, kc-, pc-, etc.)
|
||||
- Range queries use IndexedDB cursors with KeyRange bounds
|
||||
- Build with `GOOS=js GOARCH=wasm go build`
|
||||
|
||||
### Policy System Enhancements
|
||||
- **Default-Permissive Model**: Read and write are allowed by default unless restrictions are configured
|
||||
- **Write-Only Validation**: Size, age, tag validations apply ONLY to writes
|
||||
- **Read-Only Filtering**: `read_allow`, `read_follows_whitelist`, `privileged` apply ONLY to reads
|
||||
- **Separate Follows Whitelists**: `read_follows_whitelist` and `write_follows_whitelist` for fine-grained control
|
||||
- **Permissive Mode Overrides**: `read_allow_permissive` and `write_allow_permissive` (global rule only) override kind whitelist for independent read/write control
|
||||
- **Scripts**: Policy scripts execute ONLY for write operations
|
||||
- **Reference Documentation**: `docs/POLICY_CONFIGURATION_REFERENCE.md` provides authoritative read vs write applicability
|
||||
- See also: `pkg/policy/README.md` for quick reference
|
||||
@@ -825,6 +863,8 @@ The Neo4j backend (`pkg/neo4j/`) includes Web of Trust (WoT) extensions:
|
||||
"read_deny": ["pubkey_hex"], // Pubkeys denied from reading
|
||||
"read_follows_whitelist": ["pubkey_hex"], // Pubkeys whose follows can read
|
||||
"write_follows_whitelist": ["pubkey_hex"], // Pubkeys whose follows can write
|
||||
"read_allow_permissive": false, // Override kind whitelist for reads
|
||||
"write_allow_permissive": false, // Override kind whitelist for writes
|
||||
"script": "/path/to/script.sh" // External validation script
|
||||
},
|
||||
"rules": {
|
||||
@@ -843,9 +883,11 @@ The Neo4j backend (`pkg/neo4j/`) includes Web of Trust (WoT) extensions:
|
||||
| `read_allow` | READ | Only listed pubkeys can read |
|
||||
| `read_deny` | READ | Listed pubkeys denied (if no read_allow) |
|
||||
| `read_follows_whitelist` | READ | Named pubkeys + their follows can read |
|
||||
| `read_allow_permissive` | READ | Overrides kind whitelist for reads (global only) |
|
||||
| `write_allow` | WRITE | Only listed pubkeys can write |
|
||||
| `write_deny` | WRITE | Listed pubkeys denied (if no write_allow) |
|
||||
| `write_follows_whitelist` | WRITE | Named pubkeys + their follows can write |
|
||||
| `write_allow_permissive` | WRITE | Overrides kind whitelist for writes (global only) |
|
||||
| `privileged` | READ | Only author + p-tag recipients can read |
|
||||
|
||||
**Nil Policy Error Handling:**
|
||||
@@ -877,4 +919,5 @@ Invite-based access control system:
|
||||
| `pkg/neo4j/WOT_SPEC.md` | Web of Trust schema specification |
|
||||
| `pkg/neo4j/MODIFYING_SCHEMA.md` | How to modify Neo4j schema |
|
||||
| `pkg/neo4j/TESTING.md` | Neo4j testing guide |
|
||||
| `.claude/skills/cypher/SKILL.md` | Cypher query language skill for Neo4j |
|
||||
| `readme.adoc` | Project README with feature overview |
|
||||
|
||||
481
pkg/neo4j/bugfix_test.go
Normal file
481
pkg/neo4j/bugfix_test.go
Normal file
@@ -0,0 +1,481 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
// Integration tests for Neo4j bug fixes.
|
||||
// These tests require a running Neo4j instance and are not run by default.
|
||||
//
|
||||
// To run these tests:
|
||||
// 1. Start Neo4j: docker compose -f pkg/neo4j/docker-compose.yaml up -d
|
||||
// 2. Run tests: go test -tags=integration ./pkg/neo4j/... -v
|
||||
// 3. Stop Neo4j: docker compose -f pkg/neo4j/docker-compose.yaml down
|
||||
//
|
||||
// Or use the helper script:
|
||||
// ./scripts/test-neo4j-integration.sh
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/tag"
|
||||
)
|
||||
|
||||
// TestLargeContactListBatching tests that kind 3 events with many follows
|
||||
// don't cause OOM errors by verifying batched processing works correctly.
|
||||
// This tests the fix for: "java out of memory error broadcasting a kind 3 event"
|
||||
func TestLargeContactListBatching(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Generate a test pubkey for the author
|
||||
authorPubkey := generateTestPubkey()
|
||||
|
||||
// Create a kind 3 event with 2000 follows (enough to require multiple batches)
|
||||
// With contactListBatchSize = 1000, this will require 2 batches
|
||||
numFollows := 2000
|
||||
followPubkeys := make([]string, numFollows)
|
||||
tagsList := tag.NewS()
|
||||
|
||||
for i := 0; i < numFollows; i++ {
|
||||
followPubkeys[i] = generateTestPubkey()
|
||||
tagsList.Append(tag.NewFromAny("p", followPubkeys[i]))
|
||||
}
|
||||
|
||||
// Create the kind 3 event
|
||||
ev := createTestEvent(t, authorPubkey, 3, tagsList, "")
|
||||
|
||||
// Save the event - this should NOT cause OOM with batching
|
||||
exists, err := testDB.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save large contact list event: %v", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Event unexpectedly already exists")
|
||||
}
|
||||
|
||||
// Verify the event was saved
|
||||
eventID := hex.EncodeToString(ev.ID[:])
|
||||
checkCypher := "MATCH (e:Event {id: $id}) RETURN e.id AS id"
|
||||
result, err := testDB.ExecuteRead(ctx, checkCypher, map[string]any{"id": eventID})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check event existence: %v", err)
|
||||
}
|
||||
if !result.Next(ctx) {
|
||||
t.Fatal("Event was not saved")
|
||||
}
|
||||
|
||||
// Verify FOLLOWS relationships were created
|
||||
followsCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $pubkey})-[:FOLLOWS]->(followed:NostrUser)
|
||||
RETURN count(followed) AS count
|
||||
`
|
||||
result, err = testDB.ExecuteRead(ctx, followsCypher, map[string]any{"pubkey": authorPubkey})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count follows: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != int64(numFollows) {
|
||||
t.Errorf("Expected %d follows, got %d", numFollows, count)
|
||||
}
|
||||
t.Logf("Successfully created %d FOLLOWS relationships in batches", count)
|
||||
} else {
|
||||
t.Fatal("No follow count returned")
|
||||
}
|
||||
|
||||
// Verify ProcessedSocialEvent was created with correct relationship_count
|
||||
psCypher := `
|
||||
MATCH (ps:ProcessedSocialEvent {pubkey: $pubkey, event_kind: 3})
|
||||
RETURN ps.relationship_count AS count
|
||||
`
|
||||
result, err = testDB.ExecuteRead(ctx, psCypher, map[string]any{"pubkey": authorPubkey})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check ProcessedSocialEvent: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != int64(numFollows) {
|
||||
t.Errorf("ProcessedSocialEvent.relationship_count: expected %d, got %d", numFollows, count)
|
||||
}
|
||||
} else {
|
||||
t.Fatal("ProcessedSocialEvent not created")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMultipleETagsWithClause tests that events with multiple e-tags
|
||||
// generate valid Cypher (WITH between FOREACH and OPTIONAL MATCH).
|
||||
// This tests the fix for: "WITH is required between FOREACH and MATCH"
|
||||
func TestMultipleETagsWithClause(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// First, create some events that will be referenced
|
||||
refEventIDs := make([]string, 5)
|
||||
for i := 0; i < 5; i++ {
|
||||
refPubkey := generateTestPubkey()
|
||||
refTags := tag.NewS()
|
||||
refEv := createTestEvent(t, refPubkey, 1, refTags, "referenced event")
|
||||
exists, err := testDB.SaveEvent(ctx, refEv)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reference event %d: %v", i, err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatalf("Reference event %d unexpectedly exists", i)
|
||||
}
|
||||
refEventIDs[i] = hex.EncodeToString(refEv.ID[:])
|
||||
}
|
||||
|
||||
// Create a kind 5 delete event that references multiple events (multiple e-tags)
|
||||
authorPubkey := generateTestPubkey()
|
||||
tagsList := tag.NewS()
|
||||
for _, refID := range refEventIDs {
|
||||
tagsList.Append(tag.NewFromAny("e", refID))
|
||||
}
|
||||
|
||||
// Create the kind 5 event with multiple e-tags
|
||||
ev := createTestEvent(t, authorPubkey, 5, tagsList, "")
|
||||
|
||||
// Save the event - this should NOT fail with Cypher syntax error
|
||||
exists, err := testDB.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save event with multiple e-tags: %v\n"+
|
||||
"This indicates the WITH clause fix is not working", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Event unexpectedly already exists")
|
||||
}
|
||||
|
||||
// Verify the event was saved
|
||||
eventID := hex.EncodeToString(ev.ID[:])
|
||||
checkCypher := "MATCH (e:Event {id: $id}) RETURN e.id AS id"
|
||||
result, err := testDB.ExecuteRead(ctx, checkCypher, map[string]any{"id": eventID})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check event existence: %v", err)
|
||||
}
|
||||
if !result.Next(ctx) {
|
||||
t.Fatal("Event was not saved")
|
||||
}
|
||||
|
||||
// Verify REFERENCES relationships were created
|
||||
refCypher := `
|
||||
MATCH (e:Event {id: $id})-[:REFERENCES]->(ref:Event)
|
||||
RETURN count(ref) AS count
|
||||
`
|
||||
result, err = testDB.ExecuteRead(ctx, refCypher, map[string]any{"id": eventID})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count references: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != int64(len(refEventIDs)) {
|
||||
t.Errorf("Expected %d REFERENCES relationships, got %d", len(refEventIDs), count)
|
||||
}
|
||||
t.Logf("Successfully created %d REFERENCES relationships", count)
|
||||
} else {
|
||||
t.Fatal("No reference count returned")
|
||||
}
|
||||
}
|
||||
|
||||
// TestLargeMuteListBatching tests that kind 10000 events with many mutes
|
||||
// don't cause OOM errors by verifying batched processing works correctly.
|
||||
func TestLargeMuteListBatching(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Generate a test pubkey for the author
|
||||
authorPubkey := generateTestPubkey()
|
||||
|
||||
// Create a kind 10000 event with 1500 mutes (enough to require 2 batches)
|
||||
numMutes := 1500
|
||||
tagsList := tag.NewS()
|
||||
|
||||
for i := 0; i < numMutes; i++ {
|
||||
mutePubkey := generateTestPubkey()
|
||||
tagsList.Append(tag.NewFromAny("p", mutePubkey))
|
||||
}
|
||||
|
||||
// Create the kind 10000 event
|
||||
ev := createTestEvent(t, authorPubkey, 10000, tagsList, "")
|
||||
|
||||
// Save the event - this should NOT cause OOM with batching
|
||||
exists, err := testDB.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save large mute list event: %v", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Event unexpectedly already exists")
|
||||
}
|
||||
|
||||
// Verify MUTES relationships were created
|
||||
mutesCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $pubkey})-[:MUTES]->(muted:NostrUser)
|
||||
RETURN count(muted) AS count
|
||||
`
|
||||
result, err := testDB.ExecuteRead(ctx, mutesCypher, map[string]any{"pubkey": authorPubkey})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count mutes: %v", err)
|
||||
}
|
||||
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != int64(numMutes) {
|
||||
t.Errorf("Expected %d mutes, got %d", numMutes, count)
|
||||
}
|
||||
t.Logf("Successfully created %d MUTES relationships in batches", count)
|
||||
} else {
|
||||
t.Fatal("No mute count returned")
|
||||
}
|
||||
}
|
||||
|
||||
// TestContactListUpdate tests that updating a contact list (replacing one kind 3 with another)
|
||||
// correctly handles the diff and batching.
|
||||
func TestContactListUpdate(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
authorPubkey := generateTestPubkey()
|
||||
|
||||
// Create initial contact list with 500 follows
|
||||
initialFollows := make([]string, 500)
|
||||
tagsList1 := tag.NewS()
|
||||
for i := 0; i < 500; i++ {
|
||||
initialFollows[i] = generateTestPubkey()
|
||||
tagsList1.Append(tag.NewFromAny("p", initialFollows[i]))
|
||||
}
|
||||
|
||||
ev1 := createTestEventWithTimestamp(t, authorPubkey, 3, tagsList1, "", time.Now().Unix()-100)
|
||||
_, err := testDB.SaveEvent(ctx, ev1)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save initial contact list: %v", err)
|
||||
}
|
||||
|
||||
// Verify initial follows count
|
||||
countCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $pubkey})-[:FOLLOWS]->(followed:NostrUser)
|
||||
RETURN count(followed) AS count
|
||||
`
|
||||
result, err := testDB.ExecuteRead(ctx, countCypher, map[string]any{"pubkey": authorPubkey})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count initial follows: %v", err)
|
||||
}
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != 500 {
|
||||
t.Errorf("Initial follows: expected 500, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
// Create updated contact list: remove 100 old follows, add 200 new ones
|
||||
tagsList2 := tag.NewS()
|
||||
// Keep first 400 of the original follows
|
||||
for i := 0; i < 400; i++ {
|
||||
tagsList2.Append(tag.NewFromAny("p", initialFollows[i]))
|
||||
}
|
||||
// Add 200 new follows
|
||||
for i := 0; i < 200; i++ {
|
||||
tagsList2.Append(tag.NewFromAny("p", generateTestPubkey()))
|
||||
}
|
||||
|
||||
ev2 := createTestEventWithTimestamp(t, authorPubkey, 3, tagsList2, "", time.Now().Unix())
|
||||
_, err = testDB.SaveEvent(ctx, ev2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save updated contact list: %v", err)
|
||||
}
|
||||
|
||||
// Verify final follows count (should be 600)
|
||||
result, err = testDB.ExecuteRead(ctx, countCypher, map[string]any{"pubkey": authorPubkey})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count final follows: %v", err)
|
||||
}
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != 600 {
|
||||
t.Errorf("Final follows: expected 600, got %d", count)
|
||||
}
|
||||
t.Logf("Contact list update successful: 500 -> 600 follows (removed 100, added 200)")
|
||||
}
|
||||
|
||||
// Verify old ProcessedSocialEvent is marked as superseded
|
||||
supersededCypher := `
|
||||
MATCH (ps:ProcessedSocialEvent {pubkey: $pubkey, event_kind: 3})
|
||||
WHERE ps.superseded_by IS NOT NULL
|
||||
RETURN count(ps) AS count
|
||||
`
|
||||
result, err = testDB.ExecuteRead(ctx, supersededCypher, map[string]any{"pubkey": authorPubkey})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check superseded events: %v", err)
|
||||
}
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != 1 {
|
||||
t.Errorf("Expected 1 superseded ProcessedSocialEvent, got %d", count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMixedTagsEvent tests that events with e-tags, p-tags, and other tags
|
||||
// all generate valid Cypher with proper WITH clauses.
|
||||
func TestMixedTagsEvent(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Create some referenced events
|
||||
refEventIDs := make([]string, 3)
|
||||
for i := 0; i < 3; i++ {
|
||||
refPubkey := generateTestPubkey()
|
||||
refTags := tag.NewS()
|
||||
refEv := createTestEvent(t, refPubkey, 1, refTags, "ref")
|
||||
testDB.SaveEvent(ctx, refEv)
|
||||
refEventIDs[i] = hex.EncodeToString(refEv.ID[:])
|
||||
}
|
||||
|
||||
// Create an event with mixed tags: e-tags, p-tags, and other tags
|
||||
authorPubkey := generateTestPubkey()
|
||||
tagsList := tag.NewS(
|
||||
// e-tags (event references)
|
||||
tag.NewFromAny("e", refEventIDs[0]),
|
||||
tag.NewFromAny("e", refEventIDs[1]),
|
||||
tag.NewFromAny("e", refEventIDs[2]),
|
||||
// p-tags (pubkey mentions)
|
||||
tag.NewFromAny("p", generateTestPubkey()),
|
||||
tag.NewFromAny("p", generateTestPubkey()),
|
||||
// other tags
|
||||
tag.NewFromAny("t", "nostr"),
|
||||
tag.NewFromAny("t", "test"),
|
||||
tag.NewFromAny("subject", "Test Subject"),
|
||||
)
|
||||
|
||||
ev := createTestEvent(t, authorPubkey, 1, tagsList, "Mixed tags test")
|
||||
|
||||
// Save the event - should not fail with Cypher syntax errors
|
||||
exists, err := testDB.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save event with mixed tags: %v", err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Event unexpectedly already exists")
|
||||
}
|
||||
|
||||
eventID := hex.EncodeToString(ev.ID[:])
|
||||
|
||||
// Verify REFERENCES relationships
|
||||
refCypher := `MATCH (e:Event {id: $id})-[:REFERENCES]->(ref:Event) RETURN count(ref) AS count`
|
||||
result, err := testDB.ExecuteRead(ctx, refCypher, map[string]any{"id": eventID})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count references: %v", err)
|
||||
}
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != 3 {
|
||||
t.Errorf("Expected 3 REFERENCES, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify MENTIONS relationships
|
||||
mentionsCypher := `MATCH (e:Event {id: $id})-[:MENTIONS]->(u:NostrUser) RETURN count(u) AS count`
|
||||
result, err = testDB.ExecuteRead(ctx, mentionsCypher, map[string]any{"id": eventID})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count mentions: %v", err)
|
||||
}
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != 2 {
|
||||
t.Errorf("Expected 2 MENTIONS, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify TAGGED_WITH relationships
|
||||
taggedCypher := `MATCH (e:Event {id: $id})-[:TAGGED_WITH]->(t:Tag) RETURN count(t) AS count`
|
||||
result, err = testDB.ExecuteRead(ctx, taggedCypher, map[string]any{"id": eventID})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count tags: %v", err)
|
||||
}
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != 3 {
|
||||
t.Errorf("Expected 3 TAGGED_WITH, got %d", count)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("Mixed tags event saved successfully with all relationship types")
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
func generateTestPubkey() string {
|
||||
b := make([]byte, 32)
|
||||
rand.Read(b)
|
||||
return hex.EncodeToString(b)
|
||||
}
|
||||
|
||||
func createTestEvent(t *testing.T, pubkey string, kind uint16, tagsList *tag.S, content string) *event.E {
|
||||
t.Helper()
|
||||
return createTestEventWithTimestamp(t, pubkey, kind, tagsList, content, time.Now().Unix())
|
||||
}
|
||||
|
||||
func createTestEventWithTimestamp(t *testing.T, pubkey string, kind uint16, tagsList *tag.S, content string, timestamp int64) *event.E {
|
||||
t.Helper()
|
||||
|
||||
// Decode pubkey
|
||||
pubkeyBytes, err := hex.DecodeString(pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Invalid pubkey: %v", err)
|
||||
}
|
||||
|
||||
// Generate random ID and signature (for testing purposes)
|
||||
idBytes := make([]byte, 32)
|
||||
rand.Read(idBytes)
|
||||
sigBytes := make([]byte, 64)
|
||||
rand.Read(sigBytes)
|
||||
|
||||
// event.E uses []byte slices, not [32]byte arrays, so we need to assign directly
|
||||
ev := &event.E{
|
||||
Kind: kind,
|
||||
Tags: tagsList,
|
||||
Content: []byte(content),
|
||||
CreatedAt: timestamp,
|
||||
Pubkey: pubkeyBytes,
|
||||
ID: idBytes,
|
||||
Sig: sigBytes,
|
||||
}
|
||||
|
||||
return ev
|
||||
}
|
||||
@@ -84,7 +84,7 @@ LIMIT 1000`
|
||||
deleteParams := map[string]any{"id": idStr}
|
||||
|
||||
if _, err := n.ExecuteWrite(ctx, deleteCypher, deleteParams); err != nil {
|
||||
n.Logger.Warningf("failed to delete expired event %s: %v", idStr[:16], err)
|
||||
n.Logger.Warningf("failed to delete expired event %s: %v", safePrefix(idStr, 16), err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func (n *N) ProcessDelete(ev *event.E, admins [][]byte) error {
|
||||
|
||||
// Check if author is an admin
|
||||
for _, adminPk := range admins {
|
||||
if string(ev.Pubkey[:]) == string(adminPk) {
|
||||
if string(ev.Pubkey) == string(adminPk) {
|
||||
isAdmin = true
|
||||
break
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func (n *N) ProcessDelete(ev *event.E, admins [][]byte) error {
|
||||
}
|
||||
|
||||
// Check if deletion is allowed (same author or admin)
|
||||
canDelete := isAdmin || string(ev.Pubkey[:]) == string(pubkey)
|
||||
canDelete := isAdmin || string(ev.Pubkey) == string(pubkey)
|
||||
if canDelete {
|
||||
// Delete the event
|
||||
if err := n.DeleteEvent(ctx, eventID); err != nil {
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
@@ -14,27 +16,17 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// All tests in this file use the shared testDB instance from testmain_test.go
|
||||
// to avoid Neo4j authentication rate limiting from too many connections.
|
||||
|
||||
func TestDeleteEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -55,12 +47,12 @@ func TestDeleteEvent(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -71,12 +63,12 @@ func TestDeleteEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
// Delete the event
|
||||
if err := db.DeleteEvent(ctx, ev.ID[:]); err != nil {
|
||||
if err := testDB.DeleteEvent(ctx, ev.ID[:]); err != nil {
|
||||
t.Fatalf("Failed to delete event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event is deleted
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
evs, err = testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -90,26 +82,13 @@ func TestDeleteEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteEventBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -130,23 +109,23 @@ func TestDeleteEventBySerial(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
serial, err := testDB.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
|
||||
// Delete by serial
|
||||
if err := db.DeleteEventBySerial(ctx, serial, ev); err != nil {
|
||||
if err := testDB.DeleteEventBySerial(ctx, serial, ev); err != nil {
|
||||
t.Fatalf("Failed to delete event by serial: %v", err)
|
||||
}
|
||||
|
||||
// Verify event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -160,26 +139,13 @@ func TestDeleteEventBySerial(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessDelete_AuthorCanDeleteOwnEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -200,7 +166,7 @@ func TestProcessDelete_AuthorCanDeleteOwnEvent(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, originalEvent); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, originalEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
@@ -219,12 +185,12 @@ func TestProcessDelete_AuthorCanDeleteOwnEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
// Process deletion (no admins)
|
||||
if err := db.ProcessDelete(deleteEvent, nil); err != nil {
|
||||
if err := testDB.ProcessDelete(deleteEvent, nil); err != nil {
|
||||
t.Fatalf("Failed to process delete: %v", err)
|
||||
}
|
||||
|
||||
// Verify original event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(originalEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -238,26 +204,13 @@ func TestProcessDelete_AuthorCanDeleteOwnEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessDelete_OtherUserCannotDelete(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
alice, _ := p8k.New()
|
||||
alice.Generate()
|
||||
@@ -276,7 +229,7 @@ func TestProcessDelete_OtherUserCannotDelete(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, aliceEvent); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, aliceEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
@@ -294,10 +247,10 @@ func TestProcessDelete_OtherUserCannotDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
// Process deletion (Bob is not an admin)
|
||||
_ = db.ProcessDelete(deleteEvent, nil)
|
||||
_ = testDB.ProcessDelete(deleteEvent, nil)
|
||||
|
||||
// Verify Alice's event still exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(aliceEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -311,26 +264,13 @@ func TestProcessDelete_OtherUserCannotDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProcessDelete_AdminCanDeleteAnyEvent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
alice, _ := p8k.New()
|
||||
alice.Generate()
|
||||
@@ -349,7 +289,7 @@ func TestProcessDelete_AdminCanDeleteAnyEvent(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, aliceEvent); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, aliceEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
@@ -368,12 +308,12 @@ func TestProcessDelete_AdminCanDeleteAnyEvent(t *testing.T) {
|
||||
|
||||
// Process deletion with admin pubkey
|
||||
adminPubkeys := [][]byte{admin.Pub()}
|
||||
if err := db.ProcessDelete(deleteEvent, adminPubkeys); err != nil {
|
||||
if err := testDB.ProcessDelete(deleteEvent, adminPubkeys); err != nil {
|
||||
t.Fatalf("Failed to process delete: %v", err)
|
||||
}
|
||||
|
||||
// Verify Alice's event is deleted
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(aliceEvent.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -387,26 +327,13 @@ func TestProcessDelete_AdminCanDeleteAnyEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCheckForDeleted(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -427,12 +354,12 @@ func TestCheckForDeleted(t *testing.T) {
|
||||
t.Fatalf("Failed to sign target event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, targetEvent); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, targetEvent); err != nil {
|
||||
t.Fatalf("Failed to save target event: %v", err)
|
||||
}
|
||||
|
||||
// Check that event is not deleted (no deletion event exists)
|
||||
err = db.CheckForDeleted(targetEvent, nil)
|
||||
err = testDB.CheckForDeleted(targetEvent, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error for non-deleted event, got: %v", err)
|
||||
}
|
||||
@@ -450,12 +377,12 @@ func TestCheckForDeleted(t *testing.T) {
|
||||
t.Fatalf("Failed to sign delete event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, deleteEvent); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, deleteEvent); err != nil {
|
||||
t.Fatalf("Failed to save delete event: %v", err)
|
||||
}
|
||||
|
||||
// Now check should return error (event has been deleted)
|
||||
err = db.CheckForDeleted(targetEvent, nil)
|
||||
err = testDB.CheckForDeleted(targetEvent, nil)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for deleted event")
|
||||
}
|
||||
@@ -464,26 +391,13 @@ func TestCheckForDeleted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReplaceableEventDeletion(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -504,12 +418,12 @@ func TestReplaceableEventDeletion(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, profileEvent); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, profileEvent); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(0)),
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
@@ -531,12 +445,12 @@ func TestReplaceableEventDeletion(t *testing.T) {
|
||||
t.Fatalf("Failed to sign newer event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, newerProfileEvent); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, newerProfileEvent); err != nil {
|
||||
t.Fatalf("Failed to save newer event: %v", err)
|
||||
}
|
||||
|
||||
// Query should return only the newer event
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
evs, err = testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(0)),
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -17,27 +19,17 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// All tests in this file use the shared testDB instance from testmain_test.go
|
||||
// to avoid Neo4j authentication rate limiting from too many connections.
|
||||
|
||||
func TestExpiration_SaveEventWithExpiration(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -61,12 +53,12 @@ func TestExpiration_SaveEventWithExpiration(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Query the event to verify it was saved
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -81,26 +73,13 @@ func TestExpiration_SaveEventWithExpiration(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -124,7 +103,7 @@ func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
t.Fatalf("Failed to sign expired event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, expiredEv); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, expiredEv); err != nil {
|
||||
t.Fatalf("Failed to save expired event: %v", err)
|
||||
}
|
||||
|
||||
@@ -142,7 +121,7 @@ func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
t.Fatalf("Failed to sign valid event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, validEv); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, validEv); err != nil {
|
||||
t.Fatalf("Failed to save valid event: %v", err)
|
||||
}
|
||||
|
||||
@@ -157,12 +136,12 @@ func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
t.Fatalf("Failed to sign permanent event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, permanentEv); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, permanentEv); err != nil {
|
||||
t.Fatalf("Failed to save permanent event: %v", err)
|
||||
}
|
||||
|
||||
// Verify all 3 events exist
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -173,10 +152,10 @@ func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
// Run DeleteExpired
|
||||
db.DeleteExpired()
|
||||
testDB.DeleteExpired()
|
||||
|
||||
// Verify only expired event was deleted
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
evs, err = testDB.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -210,26 +189,13 @@ func TestExpiration_DeleteExpiredEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExpiration_NoExpirationTag(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -250,15 +216,15 @@ func TestExpiration_NoExpirationTag(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Run DeleteExpired - event should not be deleted
|
||||
db.DeleteExpired()
|
||||
testDB.DeleteExpired()
|
||||
|
||||
// Verify event still exists
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -273,26 +239,13 @@ func TestExpiration_NoExpirationTag(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExport_AllEvents(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -315,14 +268,14 @@ func TestExport_AllEvents(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Export all events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
testDB.Export(ctx, &buf)
|
||||
|
||||
// Parse the exported JSONL
|
||||
lines := bytes.Split(buf.Bytes(), []byte("\n"))
|
||||
@@ -346,26 +299,13 @@ func TestExport_AllEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExport_FilterByPubkey(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
// Create two signers
|
||||
alice, _ := p8k.New()
|
||||
@@ -388,7 +328,7 @@ func TestExport_FilterByPubkey(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -405,14 +345,14 @@ func TestExport_FilterByPubkey(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Export only Alice's events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf, alice.Pub())
|
||||
testDB.Export(ctx, &buf, alice.Pub())
|
||||
|
||||
// Parse the exported JSONL
|
||||
lines := bytes.Split(buf.Bytes(), []byte("\n"))
|
||||
@@ -440,30 +380,17 @@ func TestExport_FilterByPubkey(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExport_Empty(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
// Export from empty database
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
testDB.Export(ctx, &buf)
|
||||
|
||||
// Should be empty or just whitespace
|
||||
content := bytes.TrimSpace(buf.Bytes())
|
||||
@@ -475,26 +402,13 @@ func TestExport_Empty(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestImportExport_RoundTrip(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
@@ -513,7 +427,7 @@ func TestImportExport_RoundTrip(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
originalEvents[i] = ev
|
||||
@@ -521,15 +435,15 @@ func TestImportExport_RoundTrip(t *testing.T) {
|
||||
|
||||
// Export events
|
||||
var buf bytes.Buffer
|
||||
db.Export(ctx, &buf)
|
||||
testDB.Export(ctx, &buf)
|
||||
|
||||
// Wipe database
|
||||
if err := db.Wipe(); err != nil {
|
||||
if err := testDB.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Verify database is empty
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -540,10 +454,10 @@ func TestImportExport_RoundTrip(t *testing.T) {
|
||||
}
|
||||
|
||||
// Import events
|
||||
db.Import(bytes.NewReader(buf.Bytes()))
|
||||
testDB.Import(bytes.NewReader(buf.Bytes()))
|
||||
|
||||
// Verify events were restored
|
||||
evs, err = db.QueryEvents(ctx, &filter.F{
|
||||
evs, err = testDB.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
@@ -14,27 +16,17 @@ import (
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
)
|
||||
|
||||
// All tests in this file use the shared testDB instance from testmain_test.go
|
||||
// to avoid Neo4j authentication rate limiting from too many connections.
|
||||
|
||||
func TestFetchEventBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -55,18 +47,18 @@ func TestFetchEventBySerial(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get the serial for this event
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
serial, err := testDB.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||
}
|
||||
|
||||
// Fetch event by serial
|
||||
fetchedEvent, err := db.FetchEventBySerial(serial)
|
||||
fetchedEvent, err := testDB.FetchEventBySerial(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to fetch event by serial: %v", err)
|
||||
}
|
||||
@@ -98,28 +90,15 @@ func TestFetchEventBySerial(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFetchEventBySerial_NonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to fetch with non-existent serial
|
||||
nonExistentSerial := &types.Uint40{}
|
||||
nonExistentSerial.Set(0xFFFFFFFFFF) // Max value
|
||||
|
||||
_, err = db.FetchEventBySerial(nonExistentSerial)
|
||||
_, err := testDB.FetchEventBySerial(nonExistentSerial)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for non-existent serial")
|
||||
}
|
||||
@@ -128,26 +107,13 @@ func TestFetchEventBySerial_NonExistent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFetchEventsBySerials(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -172,11 +138,11 @@ func TestFetchEventsBySerials(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
serial, err := testDB.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
@@ -186,7 +152,7 @@ func TestFetchEventsBySerials(t *testing.T) {
|
||||
}
|
||||
|
||||
// Fetch all events by serials
|
||||
events, err := db.FetchEventsBySerials(serials)
|
||||
events, err := testDB.FetchEventsBySerials(serials)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to fetch events by serials: %v", err)
|
||||
}
|
||||
@@ -210,26 +176,13 @@ func TestFetchEventsBySerials(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetSerialById(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -250,12 +203,12 @@ func TestGetSerialById(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial by ID
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
serial, err := testDB.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||
}
|
||||
@@ -272,27 +225,14 @@ func TestGetSerialById(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetSerialById_NonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to get serial for non-existent event
|
||||
fakeID, _ := hex.Dec("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")
|
||||
|
||||
_, err = db.GetSerialById(fakeID)
|
||||
_, err := testDB.GetSerialById(fakeID)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error for non-existent event ID")
|
||||
}
|
||||
@@ -301,26 +241,13 @@ func TestGetSerialById_NonExistent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetSerialsByIds(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -343,7 +270,7 @@ func TestGetSerialsByIds(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
@@ -352,7 +279,7 @@ func TestGetSerialsByIds(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get serials by IDs
|
||||
serials, err := db.GetSerialsByIds(ids)
|
||||
serials, err := testDB.GetSerialsByIds(ids)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serials by IDs: %v", err)
|
||||
}
|
||||
@@ -365,26 +292,13 @@ func TestGetSerialsByIds(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetFullIdPubkeyBySerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -405,18 +319,18 @@ func TestGetFullIdPubkeyBySerial(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Get serial
|
||||
serial, err := db.GetSerialById(ev.ID[:])
|
||||
serial, err := testDB.GetSerialById(ev.ID[:])
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial: %v", err)
|
||||
}
|
||||
|
||||
// Get full ID and pubkey
|
||||
idPkTs, err := db.GetFullIdPubkeyBySerial(serial)
|
||||
idPkTs, err := testDB.GetFullIdPubkeyBySerial(serial)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get full ID and pubkey: %v", err)
|
||||
}
|
||||
@@ -441,26 +355,13 @@ func TestGetFullIdPubkeyBySerial(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryForSerials(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
cleanTestDatabase()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -482,13 +383,13 @@ func TestQueryForSerials(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Query for serials
|
||||
serials, err := db.QueryForSerials(ctx, &filter.F{
|
||||
serials, err := testDB.QueryForSerials(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(signer.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -31,18 +31,25 @@ func IsBinaryEncoded(val []byte) bool {
|
||||
// NormalizePubkeyHex ensures a pubkey/event ID is in lowercase hex format.
|
||||
// It handles:
|
||||
// - Binary-encoded values (33 bytes with null terminator) -> converts to lowercase hex
|
||||
// - Raw binary values (32 bytes) -> converts to lowercase hex
|
||||
// - Uppercase hex strings -> converts to lowercase
|
||||
// - Already lowercase hex -> returns as-is
|
||||
//
|
||||
// This should be used for all pubkeys and event IDs before storing in Neo4j
|
||||
// to prevent duplicate nodes due to case differences.
|
||||
func NormalizePubkeyHex(val []byte) string {
|
||||
// Handle binary-encoded values from the nostr library
|
||||
// Handle binary-encoded values from the nostr library (33 bytes with null terminator)
|
||||
if IsBinaryEncoded(val) {
|
||||
// Convert binary to lowercase hex
|
||||
return hex.Enc(val[:HashLen])
|
||||
}
|
||||
|
||||
// Handle raw binary values (32 bytes) - common when passing ev.ID or ev.Pubkey directly
|
||||
if len(val) == HashLen {
|
||||
// Convert binary to lowercase hex
|
||||
return hex.Enc(val)
|
||||
}
|
||||
|
||||
// Handle hex strings (may be uppercase from external sources)
|
||||
if len(val) == HexEncodedLen {
|
||||
return strings.ToLower(string(val))
|
||||
|
||||
@@ -74,6 +74,11 @@ func TestNormalizePubkeyHex(t *testing.T) {
|
||||
input: binaryEncoded,
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
{
|
||||
name: "Raw 32-byte binary to hex",
|
||||
input: testBytes,
|
||||
expected: "0000000000000000000000000000000000000000000000000000000000000001",
|
||||
},
|
||||
{
|
||||
name: "Lowercase hex passthrough",
|
||||
input: []byte("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
|
||||
"lol.mleku.dev"
|
||||
@@ -18,6 +20,16 @@ import (
|
||||
"next.orly.dev/pkg/utils/apputil"
|
||||
)
|
||||
|
||||
// maxConcurrentQueries limits the number of concurrent Neo4j queries to prevent
|
||||
// authentication rate limiting and connection exhaustion
|
||||
const maxConcurrentQueries = 10
|
||||
|
||||
// maxRetryAttempts is the maximum number of times to retry a query on rate limit
|
||||
const maxRetryAttempts = 3
|
||||
|
||||
// retryBaseDelay is the base delay for exponential backoff
|
||||
const retryBaseDelay = 500 * time.Millisecond
|
||||
|
||||
// N implements the database.Database interface using Neo4j as the storage backend
|
||||
type N struct {
|
||||
ctx context.Context
|
||||
@@ -34,6 +46,9 @@ type N struct {
|
||||
neo4jPassword string
|
||||
|
||||
ready chan struct{} // Closed when database is ready to serve requests
|
||||
|
||||
// querySem limits concurrent queries to prevent rate limiting
|
||||
querySem chan struct{}
|
||||
}
|
||||
|
||||
// Ensure N implements database.Database interface at compile time
|
||||
@@ -112,6 +127,7 @@ func NewWithConfig(
|
||||
neo4jUser: neo4jUser,
|
||||
neo4jPassword: neo4jPassword,
|
||||
ready: make(chan struct{}),
|
||||
querySem: make(chan struct{}, maxConcurrentQueries),
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
@@ -199,42 +215,139 @@ func (n *N) initNeo4jClient() error {
|
||||
}
|
||||
|
||||
|
||||
// ExecuteRead executes a read query against Neo4j
|
||||
// isRateLimitError checks if an error is due to authentication rate limiting
|
||||
func isRateLimitError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
errStr := err.Error()
|
||||
return strings.Contains(errStr, "AuthenticationRateLimit") ||
|
||||
strings.Contains(errStr, "Too many failed authentication attempts")
|
||||
}
|
||||
|
||||
// acquireQuerySlot acquires a slot from the query semaphore
|
||||
func (n *N) acquireQuerySlot(ctx context.Context) error {
|
||||
select {
|
||||
case n.querySem <- struct{}{}:
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// releaseQuerySlot releases a slot back to the query semaphore
|
||||
func (n *N) releaseQuerySlot() {
|
||||
<-n.querySem
|
||||
}
|
||||
|
||||
// ExecuteRead executes a read query against Neo4j with rate limiting and retry
|
||||
// Returns a collected result that can be iterated after the session closes
|
||||
func (n *N) ExecuteRead(ctx context.Context, cypher string, params map[string]any) (*CollectedResult, error) {
|
||||
session := n.driver.NewSession(ctx, neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
|
||||
defer session.Close(ctx)
|
||||
// Acquire semaphore slot to limit concurrent queries
|
||||
if err := n.acquireQuerySlot(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire query slot: %w", err)
|
||||
}
|
||||
defer n.releaseQuerySlot()
|
||||
|
||||
result, err := session.Run(ctx, cypher, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("neo4j read query failed: %w", err)
|
||||
var lastErr error
|
||||
for attempt := 0; attempt < maxRetryAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
// Exponential backoff
|
||||
delay := retryBaseDelay * time.Duration(1<<uint(attempt-1))
|
||||
n.Logger.Warningf("retrying read query after %v (attempt %d/%d)", delay, attempt+1, maxRetryAttempts)
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
session := n.driver.NewSession(ctx, neo4j.SessionConfig{AccessMode: neo4j.AccessModeRead})
|
||||
result, err := session.Run(ctx, cypher, params)
|
||||
if err != nil {
|
||||
session.Close(ctx)
|
||||
lastErr = err
|
||||
if isRateLimitError(err) {
|
||||
continue // Retry on rate limit
|
||||
}
|
||||
return nil, fmt.Errorf("neo4j read query failed: %w", err)
|
||||
}
|
||||
|
||||
// Collect all records before the session closes
|
||||
// (Neo4j results are lazy and need an open session for iteration)
|
||||
records, err := result.Collect(ctx)
|
||||
session.Close(ctx)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
if isRateLimitError(err) {
|
||||
continue // Retry on rate limit
|
||||
}
|
||||
return nil, fmt.Errorf("neo4j result collect failed: %w", err)
|
||||
}
|
||||
|
||||
return &CollectedResult{records: records, index: -1}, nil
|
||||
}
|
||||
|
||||
// Collect all records before the session closes
|
||||
// (Neo4j results are lazy and need an open session for iteration)
|
||||
records, err := result.Collect(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("neo4j result collect failed: %w", err)
|
||||
}
|
||||
|
||||
return &CollectedResult{records: records, index: -1}, nil
|
||||
return nil, fmt.Errorf("neo4j read query failed after %d attempts: %w", maxRetryAttempts, lastErr)
|
||||
}
|
||||
|
||||
// ExecuteWrite executes a write query against Neo4j
|
||||
// ExecuteWrite executes a write query against Neo4j with rate limiting and retry
|
||||
func (n *N) ExecuteWrite(ctx context.Context, cypher string, params map[string]any) (neo4j.ResultWithContext, error) {
|
||||
session := n.driver.NewSession(ctx, neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
|
||||
defer session.Close(ctx)
|
||||
// Acquire semaphore slot to limit concurrent queries
|
||||
if err := n.acquireQuerySlot(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire query slot: %w", err)
|
||||
}
|
||||
defer n.releaseQuerySlot()
|
||||
|
||||
result, err := session.Run(ctx, cypher, params)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("neo4j write query failed: %w", err)
|
||||
var lastErr error
|
||||
for attempt := 0; attempt < maxRetryAttempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
// Exponential backoff
|
||||
delay := retryBaseDelay * time.Duration(1<<uint(attempt-1))
|
||||
n.Logger.Warningf("retrying write query after %v (attempt %d/%d)", delay, attempt+1, maxRetryAttempts)
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
session := n.driver.NewSession(ctx, neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
|
||||
result, err := session.Run(ctx, cypher, params)
|
||||
if err != nil {
|
||||
session.Close(ctx)
|
||||
lastErr = err
|
||||
if isRateLimitError(err) {
|
||||
continue // Retry on rate limit
|
||||
}
|
||||
return nil, fmt.Errorf("neo4j write query failed: %w", err)
|
||||
}
|
||||
|
||||
// Consume the result to ensure the query completes before closing session
|
||||
_, err = result.Consume(ctx)
|
||||
session.Close(ctx)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
if isRateLimitError(err) {
|
||||
continue // Retry on rate limit
|
||||
}
|
||||
return nil, fmt.Errorf("neo4j write consume failed: %w", err)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return nil, fmt.Errorf("neo4j write query failed after %d attempts: %w", maxRetryAttempts, lastErr)
|
||||
}
|
||||
|
||||
// ExecuteWriteTransaction executes a transactional write operation
|
||||
// ExecuteWriteTransaction executes a transactional write operation with rate limiting
|
||||
func (n *N) ExecuteWriteTransaction(ctx context.Context, work func(tx neo4j.ManagedTransaction) (any, error)) (any, error) {
|
||||
// Acquire semaphore slot to limit concurrent queries
|
||||
if err := n.acquireQuerySlot(ctx); err != nil {
|
||||
return nil, fmt.Errorf("failed to acquire query slot: %w", err)
|
||||
}
|
||||
defer n.releaseQuerySlot()
|
||||
|
||||
session := n.driver.NewSession(ctx, neo4j.SessionConfig{AccessMode: neo4j.AccessModeWrite})
|
||||
defer session.Close(ctx)
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -10,27 +11,15 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// All tests in this file use the shared testDB instance from testmain_test.go
|
||||
// to avoid Neo4j authentication rate limiting from too many connections.
|
||||
|
||||
func TestNIP43_AddAndRemoveMember(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
cleanTestDatabase()
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
@@ -38,12 +27,12 @@ func TestNIP43_AddAndRemoveMember(t *testing.T) {
|
||||
|
||||
// Add member
|
||||
inviteCode := "test-invite-123"
|
||||
if err := db.AddNIP43Member(pubkey, inviteCode); err != nil {
|
||||
if err := testDB.AddNIP43Member(pubkey, inviteCode); err != nil {
|
||||
t.Fatalf("Failed to add NIP-43 member: %v", err)
|
||||
}
|
||||
|
||||
// Check membership
|
||||
isMember, err := db.IsNIP43Member(pubkey)
|
||||
isMember, err := testDB.IsNIP43Member(pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check membership: %v", err)
|
||||
}
|
||||
@@ -52,7 +41,7 @@ func TestNIP43_AddAndRemoveMember(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get membership details
|
||||
membership, err := db.GetNIP43Membership(pubkey)
|
||||
membership, err := testDB.GetNIP43Membership(pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get membership: %v", err)
|
||||
}
|
||||
@@ -61,12 +50,12 @@ func TestNIP43_AddAndRemoveMember(t *testing.T) {
|
||||
}
|
||||
|
||||
// Remove member
|
||||
if err := db.RemoveNIP43Member(pubkey); err != nil {
|
||||
if err := testDB.RemoveNIP43Member(pubkey); err != nil {
|
||||
t.Fatalf("Failed to remove member: %v", err)
|
||||
}
|
||||
|
||||
// Verify no longer a member
|
||||
isMember, _ = db.IsNIP43Member(pubkey)
|
||||
isMember, _ = testDB.IsNIP43Member(pubkey)
|
||||
if isMember {
|
||||
t.Fatal("Expected pubkey to not be a member after removal")
|
||||
}
|
||||
@@ -75,26 +64,11 @@ func TestNIP43_AddAndRemoveMember(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNIP43_GetAllMembers(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
cleanTestDatabase()
|
||||
|
||||
// Add multiple members
|
||||
var pubkeys [][]byte
|
||||
@@ -104,13 +78,13 @@ func TestNIP43_GetAllMembers(t *testing.T) {
|
||||
pubkey := signer.Pub()
|
||||
pubkeys = append(pubkeys, pubkey)
|
||||
|
||||
if err := db.AddNIP43Member(pubkey, "invite"+string(rune('A'+i))); err != nil {
|
||||
if err := testDB.AddNIP43Member(pubkey, "invite"+string(rune('A'+i))); err != nil {
|
||||
t.Fatalf("Failed to add member %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get all members
|
||||
members, err := db.GetAllNIP43Members()
|
||||
members, err := testDB.GetAllNIP43Members()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get all members: %v", err)
|
||||
}
|
||||
@@ -135,36 +109,21 @@ func TestNIP43_GetAllMembers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNIP43_InviteCode(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
cleanTestDatabase()
|
||||
|
||||
// Store valid invite code (expires in 1 hour)
|
||||
validCode := "valid-code-123"
|
||||
expiresAt := time.Now().Add(1 * time.Hour)
|
||||
if err := db.StoreInviteCode(validCode, expiresAt); err != nil {
|
||||
if err := testDB.StoreInviteCode(validCode, expiresAt); err != nil {
|
||||
t.Fatalf("Failed to store invite code: %v", err)
|
||||
}
|
||||
|
||||
// Validate the code
|
||||
isValid, err := db.ValidateInviteCode(validCode)
|
||||
isValid, err := testDB.ValidateInviteCode(validCode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to validate invite code: %v", err)
|
||||
}
|
||||
@@ -173,7 +132,7 @@ func TestNIP43_InviteCode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test non-existent code
|
||||
isValid, err = db.ValidateInviteCode("non-existent-code")
|
||||
isValid, err = testDB.ValidateInviteCode("non-existent-code")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to validate non-existent code: %v", err)
|
||||
}
|
||||
@@ -182,12 +141,12 @@ func TestNIP43_InviteCode(t *testing.T) {
|
||||
}
|
||||
|
||||
// Delete the invite code
|
||||
if err := db.DeleteInviteCode(validCode); err != nil {
|
||||
if err := testDB.DeleteInviteCode(validCode); err != nil {
|
||||
t.Fatalf("Failed to delete invite code: %v", err)
|
||||
}
|
||||
|
||||
// Verify code is no longer valid
|
||||
isValid, _ = db.ValidateInviteCode(validCode)
|
||||
isValid, _ = testDB.ValidateInviteCode(validCode)
|
||||
if isValid {
|
||||
t.Fatal("Expected deleted code to be invalid")
|
||||
}
|
||||
@@ -196,36 +155,21 @@ func TestNIP43_InviteCode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNIP43_ExpiredInviteCode(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
cleanTestDatabase()
|
||||
|
||||
// Store expired invite code (expired 1 hour ago)
|
||||
expiredCode := "expired-code-123"
|
||||
expiresAt := time.Now().Add(-1 * time.Hour)
|
||||
if err := db.StoreInviteCode(expiredCode, expiresAt); err != nil {
|
||||
if err := testDB.StoreInviteCode(expiredCode, expiresAt); err != nil {
|
||||
t.Fatalf("Failed to store expired invite code: %v", err)
|
||||
}
|
||||
|
||||
// Validate should return false for expired code
|
||||
isValid, err := db.ValidateInviteCode(expiredCode)
|
||||
isValid, err := testDB.ValidateInviteCode(expiredCode)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to validate expired code: %v", err)
|
||||
}
|
||||
@@ -237,49 +181,34 @@ func TestNIP43_ExpiredInviteCode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNIP43_DuplicateMember(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
cleanTestDatabase()
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
pubkey := signer.Pub()
|
||||
|
||||
// Add member first time
|
||||
if err := db.AddNIP43Member(pubkey, "invite1"); err != nil {
|
||||
if err := testDB.AddNIP43Member(pubkey, "invite1"); err != nil {
|
||||
t.Fatalf("Failed to add member: %v", err)
|
||||
}
|
||||
|
||||
// Add same member again (should not error, just update)
|
||||
if err := db.AddNIP43Member(pubkey, "invite2"); err != nil {
|
||||
if err := testDB.AddNIP43Member(pubkey, "invite2"); err != nil {
|
||||
t.Fatalf("Failed to re-add member: %v", err)
|
||||
}
|
||||
|
||||
// Check membership still exists
|
||||
isMember, _ := db.IsNIP43Member(pubkey)
|
||||
isMember, _ := testDB.IsNIP43Member(pubkey)
|
||||
if !isMember {
|
||||
t.Fatal("Expected pubkey to still be a member")
|
||||
}
|
||||
|
||||
// Get all members should have only 1 entry
|
||||
members, _ := db.GetAllNIP43Members()
|
||||
members, _ := testDB.GetAllNIP43Members()
|
||||
if len(members) != 1 {
|
||||
t.Fatalf("Expected 1 member, got %d", len(members))
|
||||
}
|
||||
@@ -288,26 +217,11 @@ func TestNIP43_DuplicateMember(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNIP43_MembershipPersistence(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
cleanTestDatabase()
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
@@ -315,12 +229,12 @@ func TestNIP43_MembershipPersistence(t *testing.T) {
|
||||
|
||||
// Add member
|
||||
inviteCode := "persistence-test"
|
||||
if err := db.AddNIP43Member(pubkey, inviteCode); err != nil {
|
||||
if err := testDB.AddNIP43Member(pubkey, inviteCode); err != nil {
|
||||
t.Fatalf("Failed to add member: %v", err)
|
||||
}
|
||||
|
||||
// Get membership and verify all fields
|
||||
membership, err := db.GetNIP43Membership(pubkey)
|
||||
membership, err := testDB.GetNIP43Membership(pubkey)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get membership: %v", err)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
@@ -192,6 +193,16 @@ func (n *N) buildCypherQuery(f *filter.F, includeDeleteEvents bool) (string, map
|
||||
whereClauses = append(whereClauses, "e.kind <> 5")
|
||||
}
|
||||
|
||||
// Filter out expired events (NIP-40) unless querying by explicit IDs
|
||||
// Events with expiration > 0 that have passed are hidden from results
|
||||
// EXCEPT when the query includes specific event IDs (allowing explicit lookup)
|
||||
hasExplicitIds := f.Ids != nil && len(f.Ids.T) > 0
|
||||
if !hasExplicitIds {
|
||||
params["now"] = time.Now().Unix()
|
||||
// Show events where either: no expiration (expiration = 0) OR expiration hasn't passed yet
|
||||
whereClauses = append(whereClauses, "(e.expiration = 0 OR e.expiration > $now)")
|
||||
}
|
||||
|
||||
// Build WHERE clause
|
||||
whereClause := ""
|
||||
if len(whereClauses) > 0 {
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
@@ -14,37 +16,11 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// setupTestDatabase creates a fresh Neo4j database connection for testing
|
||||
func setupTestDatabase(t *testing.T) (*N, context.Context, context.CancelFunc) {
|
||||
t.Helper()
|
||||
// All tests in this file use the shared testDB instance from testmain_test.go
|
||||
// to avoid Neo4j authentication rate limiting from too many connections.
|
||||
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
db.Close()
|
||||
cancel()
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
return db, ctx, cancel
|
||||
}
|
||||
|
||||
// createTestSigner creates a new signer for test events
|
||||
func createTestSigner(t *testing.T) *p8k.Signer {
|
||||
// createTestSignerLocal creates a new signer for test events
|
||||
func createTestSignerLocal(t *testing.T) *p8k.Signer {
|
||||
t.Helper()
|
||||
|
||||
signer, err := p8k.New()
|
||||
@@ -57,8 +33,8 @@ func createTestSigner(t *testing.T) *p8k.Signer {
|
||||
return signer
|
||||
}
|
||||
|
||||
// createAndSaveEvent creates a signed event and saves it to the database
|
||||
func createAndSaveEvent(t *testing.T, ctx context.Context, db *N, signer *p8k.Signer, k uint16, content string, tags *tag.S, ts int64) *event.E {
|
||||
// createAndSaveEventLocal creates a signed event and saves it to the database
|
||||
func createAndSaveEventLocal(t *testing.T, ctx context.Context, signer *p8k.Signer, k uint16, content string, tags *tag.S, ts int64) *event.E {
|
||||
t.Helper()
|
||||
|
||||
ev := event.New()
|
||||
@@ -72,7 +48,7 @@ func createAndSaveEvent(t *testing.T, ctx context.Context, db *N, signer *p8k.Si
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
@@ -80,17 +56,20 @@ func createAndSaveEvent(t *testing.T, ctx context.Context, db *N, signer *p8k.Si
|
||||
}
|
||||
|
||||
func TestQueryEventsByID(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
signer := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
signer := createTestSignerLocal(t)
|
||||
|
||||
// Create and save a test event
|
||||
ev := createAndSaveEvent(t, ctx, db, signer, 1, "Test event for ID query", nil, timestamp.Now().V)
|
||||
ev := createAndSaveEventLocal(t, ctx, signer, 1, "Test event for ID query", nil, timestamp.Now().V)
|
||||
|
||||
// Query by ID
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Ids: tag.NewFromBytesSlice(ev.ID),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -110,21 +89,24 @@ func TestQueryEventsByID(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsByKind(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
signer := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
signer := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events of different kinds
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Kind 1 event A", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Kind 1 event B", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, signer, 7, "Kind 7 reaction", nil, baseTs+2)
|
||||
createAndSaveEvent(t, ctx, db, signer, 30023, "Kind 30023 article", nil, baseTs+3)
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Kind 1 event A", nil, baseTs)
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Kind 1 event B", nil, baseTs+1)
|
||||
createAndSaveEventLocal(t, ctx, signer, 7, "Kind 7 reaction", nil, baseTs+2)
|
||||
createAndSaveEventLocal(t, ctx, signer, 30023, "Kind 30023 article", nil, baseTs+3)
|
||||
|
||||
// Query for kind 1
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -145,21 +127,24 @@ func TestQueryEventsByKind(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsByAuthor(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
alice := createTestSigner(t)
|
||||
bob := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
alice := createTestSignerLocal(t)
|
||||
bob := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events from different authors
|
||||
createAndSaveEvent(t, ctx, db, alice, 1, "Alice's event 1", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, alice, 1, "Alice's event 2", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, bob, 1, "Bob's event", nil, baseTs+2)
|
||||
createAndSaveEventLocal(t, ctx, alice, 1, "Alice's event 1", nil, baseTs)
|
||||
createAndSaveEventLocal(t, ctx, alice, 1, "Alice's event 2", nil, baseTs+1)
|
||||
createAndSaveEventLocal(t, ctx, bob, 1, "Bob's event", nil, baseTs+2)
|
||||
|
||||
// Query for Alice's events
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(alice.Pub()),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -181,21 +166,24 @@ func TestQueryEventsByAuthor(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsByTimeRange(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
signer := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
signer := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events at different times
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Old event", nil, baseTs-7200) // 2 hours ago
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Recent event", nil, baseTs-1800) // 30 min ago
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Current event", nil, baseTs)
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Old event", nil, baseTs-7200) // 2 hours ago
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Recent event", nil, baseTs-1800) // 30 min ago
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Current event", nil, baseTs)
|
||||
|
||||
// Query for events in the last hour
|
||||
since := ×tamp.T{V: baseTs - 3600}
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Since: since,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -216,23 +204,26 @@ func TestQueryEventsByTimeRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsByTag(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
signer := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
signer := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events with tags
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Bitcoin post",
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Bitcoin post",
|
||||
tag.NewS(tag.NewFromAny("t", "bitcoin")), baseTs)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Nostr post",
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Nostr post",
|
||||
tag.NewS(tag.NewFromAny("t", "nostr")), baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Bitcoin and Nostr post",
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Bitcoin and Nostr post",
|
||||
tag.NewS(tag.NewFromAny("t", "bitcoin"), tag.NewFromAny("t", "nostr")), baseTs+2)
|
||||
|
||||
// Query for bitcoin tagged events
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Tags: tag.NewS(tag.NewFromAny("t", "bitcoin")),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -247,21 +238,24 @@ func TestQueryEventsByTag(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsByKindAndAuthor(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
alice := createTestSigner(t)
|
||||
bob := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
alice := createTestSignerLocal(t)
|
||||
bob := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events
|
||||
createAndSaveEvent(t, ctx, db, alice, 1, "Alice note", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, alice, 7, "Alice reaction", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, bob, 1, "Bob note", nil, baseTs+2)
|
||||
createAndSaveEventLocal(t, ctx, alice, 1, "Alice note", nil, baseTs)
|
||||
createAndSaveEventLocal(t, ctx, alice, 7, "Alice reaction", nil, baseTs+1)
|
||||
createAndSaveEventLocal(t, ctx, bob, 1, "Bob note", nil, baseTs+2)
|
||||
|
||||
// Query for Alice's kind 1 events
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Authors: tag.NewFromBytesSlice(alice.Pub()),
|
||||
})
|
||||
@@ -277,21 +271,24 @@ func TestQueryEventsByKindAndAuthor(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsWithLimit(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
signer := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
signer := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create many events
|
||||
for i := 0; i < 20; i++ {
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Event", nil, baseTs+int64(i))
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Event", nil, baseTs+int64(i))
|
||||
}
|
||||
|
||||
// Query with limit
|
||||
limit := uint(5)
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Limit: &limit,
|
||||
})
|
||||
@@ -307,20 +304,23 @@ func TestQueryEventsWithLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsOrderByCreatedAt(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
signer := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
signer := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events at different times
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "First", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Second", nil, baseTs+100)
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Third", nil, baseTs+200)
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "First", nil, baseTs)
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Second", nil, baseTs+100)
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Third", nil, baseTs+200)
|
||||
|
||||
// Query and verify order (should be descending by created_at)
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -343,12 +343,16 @@ func TestQueryEventsOrderByCreatedAt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsEmpty(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Query for non-existent kind
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(99999)),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -363,20 +367,23 @@ func TestQueryEventsEmpty(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsMultipleKinds(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
signer := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
signer := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events of different kinds
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Note", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, signer, 7, "Reaction", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, signer, 30023, "Article", nil, baseTs+2)
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Note", nil, baseTs)
|
||||
createAndSaveEventLocal(t, ctx, signer, 7, "Reaction", nil, baseTs+1)
|
||||
createAndSaveEventLocal(t, ctx, signer, 30023, "Article", nil, baseTs+2)
|
||||
|
||||
// Query for multiple kinds
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1), kind.New(7)),
|
||||
})
|
||||
if err != nil {
|
||||
@@ -391,24 +398,27 @@ func TestQueryEventsMultipleKinds(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestQueryEventsMultipleAuthors(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
alice := createTestSigner(t)
|
||||
bob := createTestSigner(t)
|
||||
charlie := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
alice := createTestSignerLocal(t)
|
||||
bob := createTestSignerLocal(t)
|
||||
charlie := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events from different authors
|
||||
createAndSaveEvent(t, ctx, db, alice, 1, "Alice", nil, baseTs)
|
||||
createAndSaveEvent(t, ctx, db, bob, 1, "Bob", nil, baseTs+1)
|
||||
createAndSaveEvent(t, ctx, db, charlie, 1, "Charlie", nil, baseTs+2)
|
||||
createAndSaveEventLocal(t, ctx, alice, 1, "Alice", nil, baseTs)
|
||||
createAndSaveEventLocal(t, ctx, bob, 1, "Bob", nil, baseTs+1)
|
||||
createAndSaveEventLocal(t, ctx, charlie, 1, "Charlie", nil, baseTs+2)
|
||||
|
||||
// Query for Alice and Bob's events
|
||||
authors := tag.NewFromBytesSlice(alice.Pub(), bob.Pub())
|
||||
|
||||
evs, err := db.QueryEvents(ctx, &filter.F{
|
||||
evs, err := testDB.QueryEvents(ctx, &filter.F{
|
||||
Authors: authors,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -423,20 +433,23 @@ func TestQueryEventsMultipleAuthors(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCountEvents(t *testing.T) {
|
||||
db, ctx, cancel := setupTestDatabase(t)
|
||||
defer db.Close()
|
||||
defer cancel()
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
signer := createTestSigner(t)
|
||||
cleanTestDatabase()
|
||||
|
||||
ctx := context.Background()
|
||||
signer := createTestSignerLocal(t)
|
||||
baseTs := timestamp.Now().V
|
||||
|
||||
// Create events
|
||||
for i := 0; i < 5; i++ {
|
||||
createAndSaveEvent(t, ctx, db, signer, 1, "Event", nil, baseTs+int64(i))
|
||||
createAndSaveEventLocal(t, ctx, signer, 1, "Event", nil, baseTs+int64(i))
|
||||
}
|
||||
|
||||
// Count events
|
||||
count, _, err := db.CountEvents(ctx, &filter.F{
|
||||
count, _, err := testDB.CountEvents(ctx, &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
// NOTE: This file requires updates to match the current nostr library types.
|
||||
// The filter/tag/kind types have changed since this test was written.
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
@@ -81,10 +87,10 @@ func TestQueryEventsWithNilFilter(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// Test 5: Filter with empty Ids slice
|
||||
// Test 5: Filter with empty Ids (using tag with empty slice)
|
||||
t.Run("EmptyIds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: &tag.S{T: [][]byte{}},
|
||||
Ids: &tag.T{T: [][]byte{}},
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
@@ -92,10 +98,10 @@ func TestQueryEventsWithNilFilter(t *testing.T) {
|
||||
}
|
||||
})
|
||||
|
||||
// Test 6: Filter with empty Authors slice
|
||||
// Test 6: Filter with empty Authors (using tag with empty slice)
|
||||
t.Run("EmptyAuthors", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Authors: &tag.S{T: [][]byte{}},
|
||||
Authors: &tag.T{T: [][]byte{}},
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
@@ -106,7 +112,7 @@ func TestQueryEventsWithNilFilter(t *testing.T) {
|
||||
// Test 7: Filter with empty Kinds slice
|
||||
t.Run("EmptyKinds", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Kinds: &kind.S{K: []*kind.T{}},
|
||||
Kinds: kind.NewS(),
|
||||
}
|
||||
_, err := testDB.QueryEvents(ctx, f)
|
||||
if err != nil {
|
||||
@@ -190,7 +196,7 @@ func TestQueryEventsWithValidFilters(t *testing.T) {
|
||||
|
||||
// Test 5: Filter with limit
|
||||
t.Run("FilterWithLimit", func(t *testing.T) {
|
||||
limit := 1
|
||||
limit := uint(1)
|
||||
f := &filter.F{
|
||||
Kinds: kind.NewS(kind.New(1)),
|
||||
Limit: &limit,
|
||||
@@ -234,9 +240,9 @@ func TestBuildCypherQueryWithNilFields(t *testing.T) {
|
||||
// Test with empty slices
|
||||
t.Run("EmptySlices", func(t *testing.T) {
|
||||
f := &filter.F{
|
||||
Ids: &tag.S{T: [][]byte{}},
|
||||
Authors: &tag.S{T: [][]byte{}},
|
||||
Kinds: &kind.S{K: []*kind.T{}},
|
||||
Ids: &tag.T{T: [][]byte{}},
|
||||
Authors: &tag.T{T: [][]byte{}},
|
||||
Kinds: kind.NewS(),
|
||||
}
|
||||
cypher, params := testDB.buildCypherQuery(f, false)
|
||||
if cypher == "" {
|
||||
@@ -252,8 +258,8 @@ func TestBuildCypherQueryWithNilFields(t *testing.T) {
|
||||
since := timestamp.Now()
|
||||
until := timestamp.Now()
|
||||
f := &filter.F{
|
||||
Since: &since,
|
||||
Until: &until,
|
||||
Since: since,
|
||||
Until: until,
|
||||
}
|
||||
cypher, params := testDB.buildCypherQuery(f, false)
|
||||
if _, ok := params["since"]; !ok {
|
||||
|
||||
@@ -16,12 +16,19 @@ func parseInt64(s string) (int64, error) {
|
||||
return strconv.ParseInt(s, 10, 64)
|
||||
}
|
||||
|
||||
// tagBatchSize is the maximum number of tags to process in a single transaction
|
||||
// This prevents Neo4j stack overflow errors with events that have thousands of tags
|
||||
const tagBatchSize = 500
|
||||
|
||||
// SaveEvent stores a Nostr event in the Neo4j database.
|
||||
// It creates event nodes and relationships for authors, tags, and references.
|
||||
// This method leverages Neo4j's graph capabilities to model Nostr's social graph naturally.
|
||||
//
|
||||
// For social graph events (kinds 0, 3, 1984, 10000), it additionally processes them
|
||||
// to maintain NostrUser nodes and FOLLOWS/MUTES/REPORTS relationships with event traceability.
|
||||
//
|
||||
// To prevent Neo4j stack overflow errors with events containing thousands of tags,
|
||||
// tags are processed in batches using UNWIND instead of generating inline Cypher.
|
||||
func (n *N) SaveEvent(c context.Context, ev *event.E) (exists bool, err error) {
|
||||
eventID := hex.Enc(ev.ID[:])
|
||||
|
||||
@@ -42,7 +49,7 @@ func (n *N) SaveEvent(c context.Context, ev *event.E) (exists bool, err error) {
|
||||
if ev.Kind == 0 || ev.Kind == 3 || ev.Kind == 1984 || ev.Kind == 10000 {
|
||||
processor := NewSocialEventProcessor(n)
|
||||
if err := processor.ProcessSocialEvent(c, ev); err != nil {
|
||||
n.Logger.Warningf("failed to reprocess social event %s: %v", eventID[:16], err)
|
||||
n.Logger.Warningf("failed to reprocess social event %s: %v", safePrefix(eventID, 16), err)
|
||||
// Don't fail the whole save, social processing is supplementary
|
||||
}
|
||||
}
|
||||
@@ -55,14 +62,20 @@ func (n *N) SaveEvent(c context.Context, ev *event.E) (exists bool, err error) {
|
||||
return false, fmt.Errorf("failed to get serial number: %w", err)
|
||||
}
|
||||
|
||||
// Build and execute Cypher query to create event with all relationships
|
||||
// This creates Event and Author nodes for NIP-01 query support
|
||||
cypher, params := n.buildEventCreationCypher(ev, serial)
|
||||
|
||||
// Step 1: Create base event with author (small, fixed-size query)
|
||||
cypher, params := n.buildBaseEventCypher(ev, serial)
|
||||
if _, err = n.ExecuteWrite(c, cypher, params); err != nil {
|
||||
return false, fmt.Errorf("failed to save event: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Process tags in batches to avoid stack overflow
|
||||
if ev.Tags != nil {
|
||||
if err := n.addTagsInBatches(c, eventID, ev); err != nil {
|
||||
// Log but don't fail - base event is saved, tags are supplementary for queries
|
||||
n.Logger.Errorf("failed to add tags for event %s: %v", safePrefix(eventID, 16), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Process social graph events (kinds 0, 3, 1984, 10000)
|
||||
// This creates NostrUser nodes and social relationships (FOLLOWS, MUTES, REPORTS)
|
||||
// with event traceability for diff-based updates
|
||||
@@ -72,7 +85,7 @@ func (n *N) SaveEvent(c context.Context, ev *event.E) (exists bool, err error) {
|
||||
// Log error but don't fail the whole save
|
||||
// NIP-01 queries will still work even if social processing fails
|
||||
n.Logger.Errorf("failed to process social event kind %d, event %s: %v",
|
||||
ev.Kind, eventID[:16], err)
|
||||
ev.Kind, safePrefix(eventID, 16), err)
|
||||
// Consider: should we fail here or continue?
|
||||
// For now, continue - social graph is supplementary to base relay
|
||||
}
|
||||
@@ -81,13 +94,20 @@ func (n *N) SaveEvent(c context.Context, ev *event.E) (exists bool, err error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// buildEventCreationCypher constructs a Cypher query to create an event node with all relationships
|
||||
// This is a single atomic operation that creates:
|
||||
// safePrefix returns up to n characters from a string, handling short strings gracefully
|
||||
func safePrefix(s string, n int) string {
|
||||
if len(s) <= n {
|
||||
return s
|
||||
}
|
||||
return s[:n]
|
||||
}
|
||||
|
||||
// buildBaseEventCypher constructs a Cypher query to create just the base event node and author.
|
||||
// Tags are added separately in batches to prevent stack overflow with large tag sets.
|
||||
// This creates:
|
||||
// - Event node with all properties
|
||||
// - NostrUser node and AUTHORED_BY relationship (unified author + WoT node)
|
||||
// - Tag nodes and TAGGED_WITH relationships
|
||||
// - Reference relationships (REFERENCES for 'e' tags, MENTIONS for 'p' tags)
|
||||
func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[string]any) {
|
||||
func (n *N) buildBaseEventCypher(ev *event.E, serial uint64) (string, map[string]any) {
|
||||
params := make(map[string]any)
|
||||
|
||||
// Event properties
|
||||
@@ -123,7 +143,7 @@ func (n *N) buildEventCreationCypher(ev *event.E, serial uint64) (string, map[st
|
||||
}
|
||||
params["tags"] = string(tagsJSON)
|
||||
|
||||
// Start building the Cypher query
|
||||
// Build Cypher query - just event + author, no tags (tags added in batches)
|
||||
// Use MERGE to ensure idempotency for NostrUser nodes
|
||||
// NostrUser serves both NIP-01 author tracking and WoT social graph
|
||||
cypher := `
|
||||
@@ -146,143 +166,180 @@ CREATE (e:Event {
|
||||
|
||||
// Link event to author
|
||||
CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
`
|
||||
|
||||
// Process tags to create relationships
|
||||
// Different tag types create different relationship patterns
|
||||
tagNodeIndex := 0
|
||||
eTagIndex := 0
|
||||
pTagIndex := 0
|
||||
|
||||
// Track if we need to add WITH clause before OPTIONAL MATCH
|
||||
// This is required because Cypher doesn't allow MATCH after CREATE without WITH
|
||||
needsWithClause := true
|
||||
|
||||
// Collect all e-tags, p-tags, and other tags first so we can generate proper Cypher
|
||||
// Neo4j requires WITH clauses between certain clause types (FOREACH -> MATCH/MERGE)
|
||||
type tagInfo struct {
|
||||
tagType string
|
||||
value string
|
||||
}
|
||||
var eTags, pTags, otherTags []tagInfo
|
||||
|
||||
// Only process tags if they exist
|
||||
if ev.Tags != nil {
|
||||
for _, tagItem := range *ev.Tags {
|
||||
if len(tagItem.T) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
tagType := string(tagItem.T[0])
|
||||
|
||||
switch tagType {
|
||||
case "e": // Event reference
|
||||
tagValue := ExtractETagValue(tagItem)
|
||||
if tagValue != "" {
|
||||
eTags = append(eTags, tagInfo{"e", tagValue})
|
||||
}
|
||||
case "p": // Pubkey mention
|
||||
tagValue := ExtractPTagValue(tagItem)
|
||||
if tagValue != "" {
|
||||
pTags = append(pTags, tagInfo{"p", tagValue})
|
||||
}
|
||||
default: // Other tags
|
||||
tagValue := string(tagItem.T[1])
|
||||
otherTags = append(otherTags, tagInfo{tagType, tagValue})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate Cypher for e-tags (OPTIONAL MATCH + FOREACH pattern)
|
||||
// These need WITH clause before first one, and WITH after all FOREACHes
|
||||
for i, tag := range eTags {
|
||||
paramName := fmt.Sprintf("eTag_%d", eTagIndex)
|
||||
params[paramName] = tag.value
|
||||
|
||||
// Add WITH clause before first OPTIONAL MATCH only
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// Reference to event (e-tag)
|
||||
OPTIONAL MATCH (ref%d:Event {id: $%s})
|
||||
FOREACH (ignoreMe IN CASE WHEN ref%d IS NOT NULL THEN [1] ELSE [] END |
|
||||
CREATE (e)-[:REFERENCES]->(ref%d)
|
||||
)
|
||||
`, eTagIndex, paramName, eTagIndex, eTagIndex)
|
||||
|
||||
eTagIndex++
|
||||
|
||||
// After the last e-tag FOREACH, add WITH clause if there are p-tags or other tags
|
||||
if i == len(eTags)-1 && (len(pTags) > 0 || len(otherTags) > 0) {
|
||||
cypher += `
|
||||
// Required WITH after FOREACH before MERGE/MATCH
|
||||
WITH e, a
|
||||
`
|
||||
}
|
||||
}
|
||||
|
||||
// Generate Cypher for p-tags (MERGE pattern)
|
||||
for _, tag := range pTags {
|
||||
paramName := fmt.Sprintf("pTag_%d", pTagIndex)
|
||||
params[paramName] = tag.value
|
||||
|
||||
// If no e-tags were processed, we still need the initial WITH
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// Mention of NostrUser (p-tag)
|
||||
MERGE (mentioned%d:NostrUser {pubkey: $%s})
|
||||
ON CREATE SET mentioned%d.created_at = timestamp()
|
||||
CREATE (e)-[:MENTIONS]->(mentioned%d)
|
||||
`, pTagIndex, paramName, pTagIndex, pTagIndex)
|
||||
|
||||
pTagIndex++
|
||||
}
|
||||
|
||||
// Generate Cypher for other tags (MERGE pattern)
|
||||
for _, tag := range otherTags {
|
||||
typeParam := fmt.Sprintf("tagType_%d", tagNodeIndex)
|
||||
valueParam := fmt.Sprintf("tagValue_%d", tagNodeIndex)
|
||||
params[typeParam] = tag.tagType
|
||||
params[valueParam] = tag.value
|
||||
|
||||
// If no e-tags or p-tags were processed, we still need the initial WITH
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// Generic tag relationship
|
||||
MERGE (tag%d:Tag {type: $%s, value: $%s})
|
||||
CREATE (e)-[:TAGGED_WITH]->(tag%d)
|
||||
`, tagNodeIndex, typeParam, valueParam, tagNodeIndex)
|
||||
|
||||
tagNodeIndex++
|
||||
}
|
||||
|
||||
// Return the created event
|
||||
cypher += `
|
||||
RETURN e.id AS id`
|
||||
|
||||
return cypher, params
|
||||
}
|
||||
|
||||
// tagTypeValue represents a generic tag with type and value for batch processing
|
||||
type tagTypeValue struct {
|
||||
Type string
|
||||
Value string
|
||||
}
|
||||
|
||||
// addTagsInBatches processes event tags in batches using UNWIND to prevent Neo4j stack overflow.
|
||||
// This handles e-tags (event references), p-tags (pubkey mentions), and other tags separately.
|
||||
func (n *N) addTagsInBatches(c context.Context, eventID string, ev *event.E) error {
|
||||
if ev.Tags == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collect tags by type
|
||||
var eTags, pTags []string
|
||||
var otherTags []tagTypeValue
|
||||
|
||||
for _, tagItem := range *ev.Tags {
|
||||
if len(tagItem.T) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
tagType := string(tagItem.T[0])
|
||||
|
||||
switch tagType {
|
||||
case "e": // Event reference
|
||||
tagValue := ExtractETagValue(tagItem)
|
||||
if tagValue != "" {
|
||||
eTags = append(eTags, tagValue)
|
||||
}
|
||||
case "p": // Pubkey mention
|
||||
tagValue := ExtractPTagValue(tagItem)
|
||||
if tagValue != "" {
|
||||
pTags = append(pTags, tagValue)
|
||||
}
|
||||
default: // Other tags
|
||||
tagValue := string(tagItem.T[1])
|
||||
otherTags = append(otherTags, tagTypeValue{Type: tagType, Value: tagValue})
|
||||
}
|
||||
}
|
||||
|
||||
// Add p-tags in batches (creates MENTIONS relationships)
|
||||
if len(pTags) > 0 {
|
||||
if err := n.addPTagsInBatches(c, eventID, pTags); err != nil {
|
||||
return fmt.Errorf("failed to add p-tags: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add e-tags in batches (creates REFERENCES relationships)
|
||||
if len(eTags) > 0 {
|
||||
if err := n.addETagsInBatches(c, eventID, eTags); err != nil {
|
||||
return fmt.Errorf("failed to add e-tags: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add other tags in batches (creates TAGGED_WITH relationships)
|
||||
if len(otherTags) > 0 {
|
||||
if err := n.addOtherTagsInBatches(c, eventID, otherTags); err != nil {
|
||||
return fmt.Errorf("failed to add other tags: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addPTagsInBatches adds p-tag (pubkey mention) relationships using UNWIND for efficiency.
|
||||
// Creates NostrUser nodes for mentioned pubkeys and MENTIONS relationships.
|
||||
func (n *N) addPTagsInBatches(c context.Context, eventID string, pTags []string) error {
|
||||
// Process in batches to avoid memory issues
|
||||
for i := 0; i < len(pTags); i += tagBatchSize {
|
||||
end := i + tagBatchSize
|
||||
if end > len(pTags) {
|
||||
end = len(pTags)
|
||||
}
|
||||
batch := pTags[i:end]
|
||||
|
||||
// Use UNWIND to process multiple p-tags in a single query
|
||||
cypher := `
|
||||
MATCH (e:Event {id: $eventId})
|
||||
UNWIND $pubkeys AS pubkey
|
||||
MERGE (u:NostrUser {pubkey: pubkey})
|
||||
ON CREATE SET u.created_at = timestamp()
|
||||
CREATE (e)-[:MENTIONS]->(u)`
|
||||
|
||||
params := map[string]any{
|
||||
"eventId": eventID,
|
||||
"pubkeys": batch,
|
||||
}
|
||||
|
||||
if _, err := n.ExecuteWrite(c, cypher, params); err != nil {
|
||||
return fmt.Errorf("batch %d-%d: %w", i, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addETagsInBatches adds e-tag (event reference) relationships using UNWIND for efficiency.
|
||||
// Only creates REFERENCES relationships if the referenced event exists.
|
||||
func (n *N) addETagsInBatches(c context.Context, eventID string, eTags []string) error {
|
||||
// Process in batches to avoid memory issues
|
||||
for i := 0; i < len(eTags); i += tagBatchSize {
|
||||
end := i + tagBatchSize
|
||||
if end > len(eTags) {
|
||||
end = len(eTags)
|
||||
}
|
||||
batch := eTags[i:end]
|
||||
|
||||
// Use UNWIND to process multiple e-tags in a single query
|
||||
// OPTIONAL MATCH ensures we only create relationships if referenced event exists
|
||||
cypher := `
|
||||
MATCH (e:Event {id: $eventId})
|
||||
UNWIND $eventIds AS refId
|
||||
OPTIONAL MATCH (ref:Event {id: refId})
|
||||
WITH e, ref
|
||||
WHERE ref IS NOT NULL
|
||||
CREATE (e)-[:REFERENCES]->(ref)`
|
||||
|
||||
params := map[string]any{
|
||||
"eventId": eventID,
|
||||
"eventIds": batch,
|
||||
}
|
||||
|
||||
if _, err := n.ExecuteWrite(c, cypher, params); err != nil {
|
||||
return fmt.Errorf("batch %d-%d: %w", i, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addOtherTagsInBatches adds generic tag relationships using UNWIND for efficiency.
|
||||
// Creates Tag nodes with type and value, and TAGGED_WITH relationships.
|
||||
func (n *N) addOtherTagsInBatches(c context.Context, eventID string, tags []tagTypeValue) error {
|
||||
// Process in batches to avoid memory issues
|
||||
for i := 0; i < len(tags); i += tagBatchSize {
|
||||
end := i + tagBatchSize
|
||||
if end > len(tags) {
|
||||
end = len(tags)
|
||||
}
|
||||
batch := tags[i:end]
|
||||
|
||||
// Convert to map slice for Neo4j parameter passing
|
||||
tagMaps := make([]map[string]string, len(batch))
|
||||
for j, t := range batch {
|
||||
tagMaps[j] = map[string]string{"type": t.Type, "value": t.Value}
|
||||
}
|
||||
|
||||
// Use UNWIND to process multiple tags in a single query
|
||||
cypher := `
|
||||
MATCH (e:Event {id: $eventId})
|
||||
UNWIND $tags AS tag
|
||||
MERGE (t:Tag {type: tag.type, value: tag.value})
|
||||
CREATE (e)-[:TAGGED_WITH]->(t)`
|
||||
|
||||
params := map[string]any{
|
||||
"eventId": eventID,
|
||||
"tags": tagMaps,
|
||||
}
|
||||
|
||||
if _, err := n.ExecuteWrite(c, cypher, params); err != nil {
|
||||
return fmt.Errorf("batch %d-%d: %w", i, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSerialsFromFilter returns event serials matching a filter
|
||||
func (n *N) GetSerialsFromFilter(f *filter.F) (serials types.Uint40s, err error) {
|
||||
// Use QueryForSerials with background context
|
||||
|
||||
@@ -3,7 +3,6 @@ package neo4j
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -14,167 +13,9 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
)
|
||||
|
||||
// TestCypherQueryGeneration_WithClause is a unit test that validates the WITH clause fix
|
||||
// without requiring a Neo4j instance. This test verifies the generated Cypher string
|
||||
// has correct syntax for different tag combinations.
|
||||
func TestCypherQueryGeneration_WithClause(t *testing.T) {
|
||||
// Create a mock N struct - we only need it to call buildEventCreationCypher
|
||||
// No actual Neo4j connection is needed for this unit test
|
||||
n := &N{}
|
||||
|
||||
// Generate test keypair
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
tags *tag.S
|
||||
expectWithClause bool
|
||||
expectOptionalMatch bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "NoTags",
|
||||
tags: nil,
|
||||
expectWithClause: false,
|
||||
expectOptionalMatch: false,
|
||||
description: "Event without tags",
|
||||
},
|
||||
{
|
||||
name: "OnlyPTags_NoWithNeeded",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
),
|
||||
expectWithClause: false,
|
||||
expectOptionalMatch: false,
|
||||
description: "p-tags use MERGE (not OPTIONAL MATCH), no WITH needed",
|
||||
},
|
||||
{
|
||||
name: "OnlyETags_WithRequired",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("e", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
|
||||
tag.NewFromAny("e", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
|
||||
),
|
||||
expectWithClause: true,
|
||||
expectOptionalMatch: true,
|
||||
description: "e-tags use OPTIONAL MATCH which requires WITH clause after CREATE",
|
||||
},
|
||||
{
|
||||
name: "ETagBeforePTag",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("e", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
),
|
||||
expectWithClause: true,
|
||||
expectOptionalMatch: true,
|
||||
description: "e-tag appearing first triggers WITH clause",
|
||||
},
|
||||
{
|
||||
name: "PTagBeforeETag",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
tag.NewFromAny("e", "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"),
|
||||
),
|
||||
expectWithClause: true,
|
||||
expectOptionalMatch: true,
|
||||
description: "WITH clause needed even when p-tag comes before e-tag",
|
||||
},
|
||||
{
|
||||
name: "GenericTagsBeforeETag",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("t", "nostr"),
|
||||
tag.NewFromAny("r", "https://example.com"),
|
||||
tag.NewFromAny("e", "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"),
|
||||
),
|
||||
expectWithClause: true,
|
||||
expectOptionalMatch: true,
|
||||
description: "WITH clause needed when e-tag follows generic tags",
|
||||
},
|
||||
{
|
||||
name: "OnlyGenericTags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("t", "bitcoin"),
|
||||
tag.NewFromAny("d", "identifier"),
|
||||
tag.NewFromAny("r", "wss://relay.example.com"),
|
||||
),
|
||||
expectWithClause: false,
|
||||
expectOptionalMatch: false,
|
||||
description: "Generic tags use MERGE, no WITH needed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte(fmt.Sprintf("Test content for %s", tt.name))
|
||||
ev.Tags = tt.tags
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
// Generate Cypher query
|
||||
cypher, params := n.buildEventCreationCypher(ev, 12345)
|
||||
|
||||
// Validate WITH clause presence
|
||||
hasWithClause := strings.Contains(cypher, "WITH e, a")
|
||||
if tt.expectWithClause && !hasWithClause {
|
||||
t.Errorf("%s: expected WITH clause but none found in Cypher:\n%s", tt.description, cypher)
|
||||
}
|
||||
if !tt.expectWithClause && hasWithClause {
|
||||
t.Errorf("%s: unexpected WITH clause in Cypher:\n%s", tt.description, cypher)
|
||||
}
|
||||
|
||||
// Validate OPTIONAL MATCH presence
|
||||
hasOptionalMatch := strings.Contains(cypher, "OPTIONAL MATCH")
|
||||
if tt.expectOptionalMatch && !hasOptionalMatch {
|
||||
t.Errorf("%s: expected OPTIONAL MATCH but none found", tt.description)
|
||||
}
|
||||
if !tt.expectOptionalMatch && hasOptionalMatch {
|
||||
t.Errorf("%s: unexpected OPTIONAL MATCH found", tt.description)
|
||||
}
|
||||
|
||||
// Validate WITH clause comes BEFORE first OPTIONAL MATCH (if both present)
|
||||
if hasWithClause && hasOptionalMatch {
|
||||
withIndex := strings.Index(cypher, "WITH e, a")
|
||||
optionalIndex := strings.Index(cypher, "OPTIONAL MATCH")
|
||||
if withIndex > optionalIndex {
|
||||
t.Errorf("%s: WITH clause must come BEFORE OPTIONAL MATCH.\nWITH at %d, OPTIONAL MATCH at %d\nCypher:\n%s",
|
||||
tt.description, withIndex, optionalIndex, cypher)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate parameters are set
|
||||
if params == nil {
|
||||
t.Error("params should not be nil")
|
||||
}
|
||||
|
||||
// Validate basic required params exist
|
||||
if _, ok := params["eventId"]; !ok {
|
||||
t.Error("params should contain eventId")
|
||||
}
|
||||
if _, ok := params["serial"]; !ok {
|
||||
t.Error("params should contain serial")
|
||||
}
|
||||
|
||||
t.Logf("✓ %s: WITH=%v, OPTIONAL_MATCH=%v", tt.name, hasWithClause, hasOptionalMatch)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCypherQueryGeneration_MultipleETags verifies WITH clause is added exactly once
|
||||
// even with multiple e-tags.
|
||||
func TestCypherQueryGeneration_MultipleETags(t *testing.T) {
|
||||
// TestBuildBaseEventCypher verifies the base event creation query generates correct Cypher.
|
||||
// The new architecture separates event creation from tag processing to avoid stack overflow.
|
||||
func TestBuildBaseEventCypher(t *testing.T) {
|
||||
n := &N{}
|
||||
|
||||
signer, err := p8k.New()
|
||||
@@ -185,216 +26,45 @@ func TestCypherQueryGeneration_MultipleETags(t *testing.T) {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Create event with many e-tags
|
||||
manyETags := tag.NewS()
|
||||
for i := 0; i < 10; i++ {
|
||||
manyETags.Append(tag.NewFromAny("e", fmt.Sprintf("%064x", i)))
|
||||
}
|
||||
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event with many e-tags")
|
||||
ev.Tags = manyETags
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
cypher, _ := n.buildEventCreationCypher(ev, 1)
|
||||
|
||||
// Count WITH clauses - should be exactly 1
|
||||
withCount := strings.Count(cypher, "WITH e, a")
|
||||
if withCount != 1 {
|
||||
t.Errorf("Expected exactly 1 WITH clause, found %d\nCypher:\n%s", withCount, cypher)
|
||||
}
|
||||
|
||||
// Count OPTIONAL MATCH - should match number of e-tags
|
||||
optionalMatchCount := strings.Count(cypher, "OPTIONAL MATCH")
|
||||
if optionalMatchCount != 10 {
|
||||
t.Errorf("Expected 10 OPTIONAL MATCH statements (one per e-tag), found %d", optionalMatchCount)
|
||||
}
|
||||
|
||||
// Count FOREACH (which wraps the conditional relationship creation)
|
||||
foreachCount := strings.Count(cypher, "FOREACH")
|
||||
if foreachCount != 10 {
|
||||
t.Errorf("Expected 10 FOREACH blocks, found %d", foreachCount)
|
||||
}
|
||||
|
||||
t.Logf("✓ WITH clause added once, followed by %d OPTIONAL MATCH + FOREACH pairs", optionalMatchCount)
|
||||
}
|
||||
|
||||
// TestCypherQueryGeneration_CriticalBugScenario reproduces the exact bug scenario
|
||||
// that was fixed: CREATE followed by OPTIONAL MATCH without WITH clause.
|
||||
func TestCypherQueryGeneration_CriticalBugScenario(t *testing.T) {
|
||||
n := &N{}
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// This is the exact scenario that caused the bug:
|
||||
// An event with just one e-tag should have:
|
||||
// 1. CREATE clause for the event
|
||||
// 2. WITH clause to carry forward variables
|
||||
// 3. OPTIONAL MATCH for the referenced event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Reply to an event")
|
||||
ev.Tags = tag.NewS(
|
||||
tag.NewFromAny("e", "1234567890123456789012345678901234567890123456789012345678901234"),
|
||||
)
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
cypher, _ := n.buildEventCreationCypher(ev, 1)
|
||||
|
||||
// The critical validation: WITH must appear between CREATE and OPTIONAL MATCH
|
||||
createIndex := strings.Index(cypher, "CREATE (e)-[:AUTHORED_BY]->(a)")
|
||||
withIndex := strings.Index(cypher, "WITH e, a")
|
||||
optionalMatchIndex := strings.Index(cypher, "OPTIONAL MATCH")
|
||||
|
||||
if createIndex == -1 {
|
||||
t.Fatal("CREATE clause not found in Cypher")
|
||||
}
|
||||
if withIndex == -1 {
|
||||
t.Fatal("WITH clause not found in Cypher - THIS IS THE BUG!")
|
||||
}
|
||||
if optionalMatchIndex == -1 {
|
||||
t.Fatal("OPTIONAL MATCH not found in Cypher")
|
||||
}
|
||||
|
||||
// Validate order: CREATE < WITH < OPTIONAL MATCH
|
||||
if !(createIndex < withIndex && withIndex < optionalMatchIndex) {
|
||||
t.Errorf("Invalid clause ordering. Expected: CREATE (%d) < WITH (%d) < OPTIONAL MATCH (%d)\nCypher:\n%s",
|
||||
createIndex, withIndex, optionalMatchIndex, cypher)
|
||||
}
|
||||
|
||||
t.Log("✓ Critical bug scenario validated: WITH clause correctly placed between CREATE and OPTIONAL MATCH")
|
||||
}
|
||||
|
||||
// TestBuildEventCreationCypher_WithClause validates the WITH clause fix for Cypher queries.
|
||||
// The bug was that OPTIONAL MATCH cannot directly follow CREATE in Cypher - a WITH clause
|
||||
// is required to carry forward bound variables (e, a) from the CREATE to the MATCH.
|
||||
func TestBuildEventCreationCypher_WithClause(t *testing.T) {
|
||||
// Skip if Neo4j is not available
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
// Create test database
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Wait for database to be ready
|
||||
<-db.Ready()
|
||||
|
||||
// Wipe database to ensure clean state
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Generate test keypair
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create signer: %v", err)
|
||||
}
|
||||
if err := signer.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Test cases for different tag combinations
|
||||
tests := []struct {
|
||||
name string
|
||||
tags *tag.S
|
||||
wantWithClause bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "NoTags",
|
||||
tags: nil,
|
||||
wantWithClause: false,
|
||||
description: "Event without tags should not have WITH clause",
|
||||
description: "Event without tags",
|
||||
},
|
||||
{
|
||||
name: "OnlyPTags",
|
||||
name: "WithPTags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
),
|
||||
wantWithClause: false,
|
||||
description: "Event with only p-tags (MERGE) should not have WITH clause",
|
||||
description: "Event with p-tags (stored in tags JSON, relationships added separately)",
|
||||
},
|
||||
{
|
||||
name: "OnlyETags",
|
||||
name: "WithETags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("e", "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"),
|
||||
tag.NewFromAny("e", "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"),
|
||||
),
|
||||
wantWithClause: true,
|
||||
description: "Event with e-tags (OPTIONAL MATCH) MUST have WITH clause",
|
||||
},
|
||||
{
|
||||
name: "ETagFirst",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("e", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
),
|
||||
wantWithClause: true,
|
||||
description: "Event with e-tag first MUST have WITH clause before OPTIONAL MATCH",
|
||||
},
|
||||
{
|
||||
name: "PTagFirst",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
tag.NewFromAny("e", "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"),
|
||||
),
|
||||
wantWithClause: true,
|
||||
description: "Event with p-tag first still needs WITH clause before e-tag's OPTIONAL MATCH",
|
||||
description: "Event with e-tags (stored in tags JSON, relationships added separately)",
|
||||
},
|
||||
{
|
||||
name: "MixedTags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("t", "nostr"),
|
||||
tag.NewFromAny("e", "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000005"),
|
||||
tag.NewFromAny("r", "https://example.com"),
|
||||
tag.NewFromAny("e", "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"),
|
||||
tag.NewFromAny("p", "0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
),
|
||||
wantWithClause: true,
|
||||
description: "Mixed tags with e-tag requires WITH clause",
|
||||
},
|
||||
{
|
||||
name: "OnlyGenericTags",
|
||||
tags: tag.NewS(
|
||||
tag.NewFromAny("t", "bitcoin"),
|
||||
tag.NewFromAny("r", "wss://relay.example.com"),
|
||||
tag.NewFromAny("d", "identifier"),
|
||||
),
|
||||
wantWithClause: false,
|
||||
description: "Generic tags (MERGE) don't require WITH clause",
|
||||
description: "Event with mixed tags",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create event
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
@@ -406,24 +76,75 @@ func TestBuildEventCreationCypher_WithClause(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
// Build Cypher query
|
||||
cypher, params := db.buildEventCreationCypher(ev, 1)
|
||||
cypher, params := n.buildBaseEventCypher(ev, 12345)
|
||||
|
||||
// Check if WITH clause is present
|
||||
hasWithClause := strings.Contains(cypher, "WITH e, a")
|
||||
|
||||
if tt.wantWithClause && !hasWithClause {
|
||||
t.Errorf("%s: expected WITH clause but none found.\nCypher:\n%s", tt.description, cypher)
|
||||
// Base event Cypher should NOT contain tag relationship clauses
|
||||
// (tags are added separately via addTagsInBatches)
|
||||
if strings.Contains(cypher, "OPTIONAL MATCH") {
|
||||
t.Errorf("%s: buildBaseEventCypher should NOT contain OPTIONAL MATCH", tt.description)
|
||||
}
|
||||
if !tt.wantWithClause && hasWithClause {
|
||||
t.Errorf("%s: unexpected WITH clause found.\nCypher:\n%s", tt.description, cypher)
|
||||
if strings.Contains(cypher, "UNWIND") {
|
||||
t.Errorf("%s: buildBaseEventCypher should NOT contain UNWIND", tt.description)
|
||||
}
|
||||
if strings.Contains(cypher, ":REFERENCES") {
|
||||
t.Errorf("%s: buildBaseEventCypher should NOT contain :REFERENCES", tt.description)
|
||||
}
|
||||
if strings.Contains(cypher, ":MENTIONS") {
|
||||
t.Errorf("%s: buildBaseEventCypher should NOT contain :MENTIONS", tt.description)
|
||||
}
|
||||
if strings.Contains(cypher, ":TAGGED_WITH") {
|
||||
t.Errorf("%s: buildBaseEventCypher should NOT contain :TAGGED_WITH", tt.description)
|
||||
}
|
||||
|
||||
// Verify Cypher syntax by executing it against Neo4j
|
||||
// This is the key test - invalid Cypher will fail here
|
||||
_, err := db.ExecuteWrite(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Cypher query failed (invalid syntax): %v\nCypher:\n%s", tt.description, err, cypher)
|
||||
// Should contain basic event creation elements
|
||||
if !strings.Contains(cypher, "CREATE (e:Event") {
|
||||
t.Errorf("%s: should CREATE Event node", tt.description)
|
||||
}
|
||||
if !strings.Contains(cypher, "MERGE (a:NostrUser") {
|
||||
t.Errorf("%s: should MERGE NostrUser node", tt.description)
|
||||
}
|
||||
if !strings.Contains(cypher, ":AUTHORED_BY") {
|
||||
t.Errorf("%s: should create AUTHORED_BY relationship", tt.description)
|
||||
}
|
||||
|
||||
// Should have tags serialized in params
|
||||
if _, ok := params["tags"]; !ok {
|
||||
t.Errorf("%s: params should contain serialized tags", tt.description)
|
||||
}
|
||||
|
||||
// Validate params have required fields
|
||||
requiredParams := []string{"eventId", "serial", "kind", "createdAt", "content", "sig", "pubkey", "tags", "expiration"}
|
||||
for _, p := range requiredParams {
|
||||
if _, ok := params[p]; !ok {
|
||||
t.Errorf("%s: missing required param: %s", tt.description, p)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("✓ %s: base event Cypher is clean (no tag relationships)", tt.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestSafePrefix validates the safePrefix helper function
|
||||
func TestSafePrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
n int
|
||||
expected string
|
||||
}{
|
||||
{"hello world", 5, "hello"},
|
||||
{"hi", 5, "hi"},
|
||||
{"", 5, ""},
|
||||
{"1234567890", 10, "1234567890"},
|
||||
{"1234567890", 11, "1234567890"},
|
||||
{"0123456789abcdef", 8, "01234567"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(fmt.Sprintf("%q[:%d]", tt.input, tt.n), func(t *testing.T) {
|
||||
result := safePrefix(tt.input, tt.n)
|
||||
if result != tt.expected {
|
||||
t.Errorf("safePrefix(%q, %d) = %q; want %q", tt.input, tt.n, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -431,27 +152,16 @@ func TestBuildEventCreationCypher_WithClause(t *testing.T) {
|
||||
|
||||
// TestSaveEvent_ETagReference tests that events with e-tags are saved correctly
|
||||
// and the REFERENCES relationships are created when the referenced event exists.
|
||||
// Uses shared testDB from testmain_test.go to avoid auth rate limiting.
|
||||
func TestSaveEvent_ETagReference(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx := context.Background()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Generate keypairs
|
||||
alice, err := p8k.New()
|
||||
@@ -482,7 +192,7 @@ func TestSaveEvent_ETagReference(t *testing.T) {
|
||||
}
|
||||
|
||||
// Save root event
|
||||
exists, err := db.SaveEvent(ctx, rootEvent)
|
||||
exists, err := testDB.SaveEvent(ctx, rootEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save root event: %v", err)
|
||||
}
|
||||
@@ -507,8 +217,8 @@ func TestSaveEvent_ETagReference(t *testing.T) {
|
||||
t.Fatalf("Failed to sign reply event: %v", err)
|
||||
}
|
||||
|
||||
// Save reply event - this exercises the WITH clause fix
|
||||
exists, err = db.SaveEvent(ctx, replyEvent)
|
||||
// Save reply event - this exercises the batched tag creation
|
||||
exists, err = testDB.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save reply event: %v", err)
|
||||
}
|
||||
@@ -526,7 +236,7 @@ func TestSaveEvent_ETagReference(t *testing.T) {
|
||||
"rootId": rootEventID,
|
||||
}
|
||||
|
||||
result, err := db.ExecuteRead(ctx, cypher, params)
|
||||
result, err := testDB.ExecuteRead(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query REFERENCES relationship: %v", err)
|
||||
}
|
||||
@@ -550,7 +260,7 @@ func TestSaveEvent_ETagReference(t *testing.T) {
|
||||
"authorPubkey": hex.Enc(alice.Pub()),
|
||||
}
|
||||
|
||||
mentionsResult, err := db.ExecuteRead(ctx, mentionsCypher, mentionsParams)
|
||||
mentionsResult, err := testDB.ExecuteRead(ctx, mentionsCypher, mentionsParams)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query MENTIONS relationship: %v", err)
|
||||
}
|
||||
@@ -563,28 +273,17 @@ func TestSaveEvent_ETagReference(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestSaveEvent_ETagMissingReference tests that e-tags to non-existent events
|
||||
// don't create broken relationships (OPTIONAL MATCH handles this gracefully).
|
||||
// don't create broken relationships (batched processing handles this gracefully).
|
||||
// Uses shared testDB from testmain_test.go to avoid auth rate limiting.
|
||||
func TestSaveEvent_ETagMissingReference(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx := context.Background()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -610,8 +309,8 @@ func TestSaveEvent_ETagMissingReference(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
// Save should succeed (OPTIONAL MATCH handles missing reference)
|
||||
exists, err := db.SaveEvent(ctx, ev)
|
||||
// Save should succeed (batched e-tag processing handles missing reference)
|
||||
exists, err := testDB.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save event with missing reference: %v", err)
|
||||
}
|
||||
@@ -623,7 +322,7 @@ func TestSaveEvent_ETagMissingReference(t *testing.T) {
|
||||
checkCypher := "MATCH (e:Event {id: $id}) RETURN e.id AS id"
|
||||
checkParams := map[string]any{"id": hex.Enc(ev.ID[:])}
|
||||
|
||||
result, err := db.ExecuteRead(ctx, checkCypher, checkParams)
|
||||
result, err := testDB.ExecuteRead(ctx, checkCypher, checkParams)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check event: %v", err)
|
||||
}
|
||||
@@ -639,7 +338,7 @@ func TestSaveEvent_ETagMissingReference(t *testing.T) {
|
||||
`
|
||||
refParams := map[string]any{"eventId": hex.Enc(ev.ID[:])}
|
||||
|
||||
refResult, err := db.ExecuteRead(ctx, refCypher, refParams)
|
||||
refResult, err := testDB.ExecuteRead(ctx, refCypher, refParams)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to check references: %v", err)
|
||||
}
|
||||
@@ -655,27 +354,16 @@ func TestSaveEvent_ETagMissingReference(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestSaveEvent_MultipleETags tests events with multiple e-tags.
|
||||
// Uses shared testDB from testmain_test.go to avoid auth rate limiting.
|
||||
func TestSaveEvent_MultipleETags(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx := context.Background()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -698,7 +386,7 @@ func TestSaveEvent_MultipleETags(t *testing.T) {
|
||||
t.Fatalf("Failed to sign event %d: %v", i, err)
|
||||
}
|
||||
|
||||
if _, err := db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, err := testDB.SaveEvent(ctx, ev); err != nil {
|
||||
t.Fatalf("Failed to save event %d: %v", i, err)
|
||||
}
|
||||
|
||||
@@ -721,8 +409,8 @@ func TestSaveEvent_MultipleETags(t *testing.T) {
|
||||
t.Fatalf("Failed to sign reply event: %v", err)
|
||||
}
|
||||
|
||||
// Save reply event - tests multiple OPTIONAL MATCH statements after WITH
|
||||
exists, err := db.SaveEvent(ctx, replyEvent)
|
||||
// Save reply event - tests batched e-tag creation
|
||||
exists, err := testDB.SaveEvent(ctx, replyEvent)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save multi-reference event: %v", err)
|
||||
}
|
||||
@@ -737,7 +425,7 @@ func TestSaveEvent_MultipleETags(t *testing.T) {
|
||||
`
|
||||
params := map[string]any{"replyId": hex.Enc(replyEvent.ID[:])}
|
||||
|
||||
result, err := db.ExecuteRead(ctx, cypher, params)
|
||||
result, err := testDB.ExecuteRead(ctx, cypher, params)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to query REFERENCES relationships: %v", err)
|
||||
}
|
||||
@@ -761,25 +449,18 @@ func TestSaveEvent_MultipleETags(t *testing.T) {
|
||||
t.Logf("✓ All %d REFERENCES relationships created successfully", len(referencedIDs))
|
||||
}
|
||||
|
||||
// TestBuildEventCreationCypher_CypherSyntaxValidation validates the generated Cypher
|
||||
// is syntactically correct for all edge cases.
|
||||
func TestBuildEventCreationCypher_CypherSyntaxValidation(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
// TestSaveEvent_LargePTagBatch tests that events with many p-tags are saved correctly
|
||||
// using batched processing to avoid Neo4j stack overflow.
|
||||
// Uses shared testDB from testmain_test.go to avoid auth rate limiting.
|
||||
func TestSaveEvent_LargePTagBatch(t *testing.T) {
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx := context.Background()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
// Clean up before test
|
||||
cleanTestDatabase()
|
||||
|
||||
signer, err := p8k.New()
|
||||
if err != nil {
|
||||
@@ -789,36 +470,52 @@ func TestBuildEventCreationCypher_CypherSyntaxValidation(t *testing.T) {
|
||||
t.Fatalf("Failed to generate keypair: %v", err)
|
||||
}
|
||||
|
||||
// Test many e-tags to ensure WITH clause is added only once
|
||||
manyETags := tag.NewS()
|
||||
for i := 0; i < 10; i++ {
|
||||
manyETags.Append(tag.NewFromAny("e", fmt.Sprintf("%064x", i)))
|
||||
// Create event with many p-tags (enough to require multiple batches)
|
||||
// With tagBatchSize = 500, this will require 2 batches
|
||||
numTags := 600
|
||||
manyPTags := tag.NewS()
|
||||
for i := 0; i < numTags; i++ {
|
||||
manyPTags.Append(tag.NewFromAny("p", fmt.Sprintf("%064x", i)))
|
||||
}
|
||||
|
||||
ev := event.New()
|
||||
ev.Pubkey = signer.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Kind = 1
|
||||
ev.Content = []byte("Event with many e-tags")
|
||||
ev.Tags = manyETags
|
||||
ev.Kind = 3 // Contact list
|
||||
ev.Content = []byte("")
|
||||
ev.Tags = manyPTags
|
||||
|
||||
if err := ev.Sign(signer); err != nil {
|
||||
t.Fatalf("Failed to sign event: %v", err)
|
||||
}
|
||||
|
||||
cypher, _ := db.buildEventCreationCypher(ev, 1)
|
||||
|
||||
// Count occurrences of WITH clause - should be exactly 1
|
||||
withCount := strings.Count(cypher, "WITH e, a")
|
||||
if withCount != 1 {
|
||||
t.Errorf("Expected exactly 1 WITH clause, found %d\nCypher:\n%s", withCount, cypher)
|
||||
// This should succeed with batched processing
|
||||
exists, err := testDB.SaveEvent(ctx, ev)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to save event with %d p-tags: %v", numTags, err)
|
||||
}
|
||||
if exists {
|
||||
t.Fatal("Event should not exist yet")
|
||||
}
|
||||
|
||||
// Count OPTIONAL MATCH statements - should equal number of e-tags
|
||||
optionalMatchCount := strings.Count(cypher, "OPTIONAL MATCH")
|
||||
if optionalMatchCount != 10 {
|
||||
t.Errorf("Expected 10 OPTIONAL MATCH statements, found %d", optionalMatchCount)
|
||||
// Verify all MENTIONS relationships were created
|
||||
countCypher := `
|
||||
MATCH (e:Event {id: $eventId})-[:MENTIONS]->(u:NostrUser)
|
||||
RETURN count(u) AS mentionCount
|
||||
`
|
||||
countParams := map[string]any{"eventId": hex.Enc(ev.ID[:])}
|
||||
|
||||
result, err := testDB.ExecuteRead(ctx, countCypher, countParams)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to count MENTIONS: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("✓ WITH clause correctly added once, followed by %d OPTIONAL MATCH statements", optionalMatchCount)
|
||||
}
|
||||
if result.Next(ctx) {
|
||||
count := result.Record().Values[0].(int64)
|
||||
if count != int64(numTags) {
|
||||
t.Errorf("Expected %d MENTIONS relationships, got %d", numTags, count)
|
||||
} else {
|
||||
t.Logf("✓ All %d MENTIONS relationships created via batched processing", count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,7 +95,7 @@ func (p *SocialEventProcessor) processProfileMetadata(ctx context.Context, ev *e
|
||||
return fmt.Errorf("failed to update profile: %w", err)
|
||||
}
|
||||
|
||||
p.db.Logger.Infof("updated profile for user %s", pubkey[:16])
|
||||
p.db.Logger.Infof("updated profile for user %s", safePrefix(pubkey, 16))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -113,7 +113,7 @@ func (p *SocialEventProcessor) processContactList(ctx context.Context, ev *event
|
||||
// 2. Reject if this event is older than existing
|
||||
if existingEvent != nil && existingEvent.CreatedAt >= ev.CreatedAt {
|
||||
p.db.Logger.Infof("rejecting older contact list event %s (existing: %s)",
|
||||
eventID[:16], existingEvent.EventID[:16])
|
||||
safePrefix(eventID, 16), safePrefix(existingEvent.EventID, 16))
|
||||
return nil // Not an error, just skip
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ func (p *SocialEventProcessor) processContactList(ctx context.Context, ev *event
|
||||
}
|
||||
|
||||
p.db.Logger.Infof("processed contact list: author=%s, event=%s, added=%d, removed=%d, total=%d",
|
||||
authorPubkey[:16], eventID[:16], len(added), len(removed), len(newFollows))
|
||||
safePrefix(authorPubkey, 16), safePrefix(eventID, 16), len(added), len(removed), len(newFollows))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -168,7 +168,7 @@ func (p *SocialEventProcessor) processMuteList(ctx context.Context, ev *event.E)
|
||||
|
||||
// Reject if older
|
||||
if existingEvent != nil && existingEvent.CreatedAt >= ev.CreatedAt {
|
||||
p.db.Logger.Infof("rejecting older mute list event %s", eventID[:16])
|
||||
p.db.Logger.Infof("rejecting older mute list event %s", safePrefix(eventID, 16))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -205,7 +205,7 @@ func (p *SocialEventProcessor) processMuteList(ctx context.Context, ev *event.E)
|
||||
}
|
||||
|
||||
p.db.Logger.Infof("processed mute list: author=%s, event=%s, added=%d, removed=%d",
|
||||
authorPubkey[:16], eventID[:16], len(added), len(removed))
|
||||
safePrefix(authorPubkey, 16), safePrefix(eventID, 16), len(added), len(removed))
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -232,7 +232,7 @@ func (p *SocialEventProcessor) processReport(ctx context.Context, ev *event.E) e
|
||||
}
|
||||
|
||||
if reportedPubkey == "" {
|
||||
p.db.Logger.Warningf("report event %s has no p-tag, skipping", eventID[:16])
|
||||
p.db.Logger.Warningf("report event %s has no p-tag, skipping", safePrefix(eventID, 16))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -280,7 +280,7 @@ func (p *SocialEventProcessor) processReport(ctx context.Context, ev *event.E) e
|
||||
}
|
||||
|
||||
p.db.Logger.Infof("processed report: reporter=%s, reported=%s, type=%s",
|
||||
reporterPubkey[:16], reportedPubkey[:16], reportType)
|
||||
safePrefix(reporterPubkey, 16), safePrefix(reportedPubkey, 16), reportType)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -298,15 +298,17 @@ type UpdateContactListParams struct {
|
||||
|
||||
// updateContactListGraph performs atomic graph update for contact list changes
|
||||
func (p *SocialEventProcessor) updateContactListGraph(ctx context.Context, params UpdateContactListParams) error {
|
||||
// Note: WITH is required between CREATE and MERGE in Cypher
|
||||
cypher := `
|
||||
// Mark old event as superseded (if exists)
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
// We need to break this into separate operations because Neo4j's UNWIND
|
||||
// produces zero rows for empty arrays, which stops query execution.
|
||||
// Also, complex query chains with OPTIONAL MATCH can have issues.
|
||||
|
||||
// Create new event tracking node
|
||||
// WITH required after OPTIONAL MATCH + SET before CREATE
|
||||
WITH old
|
||||
// Step 1: Create the ProcessedSocialEvent and NostrUser nodes
|
||||
createCypher := `
|
||||
// Get or create author node first
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
ON CREATE SET author.created_at = timestamp()
|
||||
|
||||
// Create new ProcessedSocialEvent tracking node
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 3,
|
||||
@@ -317,54 +319,107 @@ func (p *SocialEventProcessor) updateContactListGraph(ctx context.Context, param
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// WITH required to transition from CREATE to MERGE
|
||||
WITH new
|
||||
|
||||
// Get or create author node
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
|
||||
// Update unchanged FOLLOWS relationships to point to new event
|
||||
// (so they remain visible when filtering by non-superseded events)
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[unchanged:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE unchanged.created_by_event = $old_event_id
|
||||
AND NOT followed.pubkey IN $removed_follows
|
||||
SET unchanged.created_by_event = $new_event_id,
|
||||
unchanged.created_at = $created_at
|
||||
|
||||
// Remove old FOLLOWS relationships for removed follows
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[old_follows:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE old_follows.created_by_event = $old_event_id
|
||||
AND followed.pubkey IN $removed_follows
|
||||
DELETE old_follows
|
||||
|
||||
// Create new FOLLOWS relationships for added follows
|
||||
WITH author
|
||||
UNWIND $added_follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
MERGE (author)-[new_follows:FOLLOWS]->(followed)
|
||||
ON CREATE SET
|
||||
new_follows.created_by_event = $new_event_id,
|
||||
new_follows.created_at = $created_at,
|
||||
new_follows.relay_received_at = timestamp()
|
||||
ON MATCH SET
|
||||
new_follows.created_by_event = $new_event_id,
|
||||
new_follows.created_at = $created_at
|
||||
RETURN author.pubkey AS author_pubkey
|
||||
`
|
||||
|
||||
cypherParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"new_event_id": params.NewEventID,
|
||||
"old_event_id": params.OldEventID,
|
||||
"created_at": params.CreatedAt,
|
||||
"total_follows": params.TotalFollows,
|
||||
"added_follows": params.AddedFollows,
|
||||
"removed_follows": params.RemovedFollows,
|
||||
createParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"new_event_id": params.NewEventID,
|
||||
"created_at": params.CreatedAt,
|
||||
"total_follows": params.TotalFollows,
|
||||
}
|
||||
|
||||
_, err := p.db.ExecuteWrite(ctx, cypher, cypherParams)
|
||||
return err
|
||||
_, err := p.db.ExecuteWrite(ctx, createCypher, createParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create ProcessedSocialEvent: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Mark old event as superseded (if it exists)
|
||||
if params.OldEventID != "" {
|
||||
supersedeCypher := `
|
||||
MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
`
|
||||
supersedeParams := map[string]any{
|
||||
"old_event_id": params.OldEventID,
|
||||
"new_event_id": params.NewEventID,
|
||||
}
|
||||
// Ignore errors - old event may not exist
|
||||
p.db.ExecuteWrite(ctx, supersedeCypher, supersedeParams)
|
||||
|
||||
// Step 3: Update unchanged FOLLOWS to point to new event
|
||||
// Always update relationships that aren't being removed
|
||||
updateCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $author_pubkey})-[f:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE f.created_by_event = $old_event_id
|
||||
AND NOT followed.pubkey IN $removed_follows
|
||||
SET f.created_by_event = $new_event_id,
|
||||
f.created_at = $created_at
|
||||
`
|
||||
updateParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"old_event_id": params.OldEventID,
|
||||
"new_event_id": params.NewEventID,
|
||||
"created_at": params.CreatedAt,
|
||||
"removed_follows": params.RemovedFollows,
|
||||
}
|
||||
p.db.ExecuteWrite(ctx, updateCypher, updateParams)
|
||||
|
||||
// Step 4: Remove FOLLOWS for removed follows
|
||||
if len(params.RemovedFollows) > 0 {
|
||||
removeCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $author_pubkey})-[f:FOLLOWS]->(followed:NostrUser)
|
||||
WHERE f.created_by_event = $old_event_id
|
||||
AND followed.pubkey IN $removed_follows
|
||||
DELETE f
|
||||
`
|
||||
removeParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"old_event_id": params.OldEventID,
|
||||
"removed_follows": params.RemovedFollows,
|
||||
}
|
||||
p.db.ExecuteWrite(ctx, removeCypher, removeParams)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 5: Create new FOLLOWS relationships for added follows
|
||||
// Process in batches to avoid memory issues
|
||||
const batchSize = 500
|
||||
for i := 0; i < len(params.AddedFollows); i += batchSize {
|
||||
end := i + batchSize
|
||||
if end > len(params.AddedFollows) {
|
||||
end = len(params.AddedFollows)
|
||||
}
|
||||
batch := params.AddedFollows[i:end]
|
||||
|
||||
followsCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $author_pubkey})
|
||||
UNWIND $added_follows AS followed_pubkey
|
||||
MERGE (followed:NostrUser {pubkey: followed_pubkey})
|
||||
ON CREATE SET followed.created_at = timestamp()
|
||||
MERGE (author)-[f:FOLLOWS]->(followed)
|
||||
ON CREATE SET
|
||||
f.created_by_event = $new_event_id,
|
||||
f.created_at = $created_at,
|
||||
f.relay_received_at = timestamp()
|
||||
ON MATCH SET
|
||||
f.created_by_event = $new_event_id,
|
||||
f.created_at = $created_at
|
||||
`
|
||||
|
||||
followsParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"new_event_id": params.NewEventID,
|
||||
"created_at": params.CreatedAt,
|
||||
"added_follows": batch,
|
||||
}
|
||||
|
||||
if _, err := p.db.ExecuteWrite(ctx, followsCypher, followsParams); err != nil {
|
||||
return fmt.Errorf("failed to create FOLLOWS batch %d-%d: %w", i, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateMuteListParams holds parameters for mute list graph update
|
||||
@@ -380,15 +435,16 @@ type UpdateMuteListParams struct {
|
||||
|
||||
// updateMuteListGraph performs atomic graph update for mute list changes
|
||||
func (p *SocialEventProcessor) updateMuteListGraph(ctx context.Context, params UpdateMuteListParams) error {
|
||||
// Note: WITH is required between CREATE and MERGE in Cypher
|
||||
cypher := `
|
||||
// Mark old event as superseded (if exists)
|
||||
OPTIONAL MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
// We need to break this into separate operations because Neo4j's UNWIND
|
||||
// produces zero rows for empty arrays, which stops query execution.
|
||||
|
||||
// Create new event tracking node
|
||||
// WITH required after OPTIONAL MATCH + SET before CREATE
|
||||
WITH old
|
||||
// Step 1: Create the ProcessedSocialEvent and NostrUser nodes
|
||||
createCypher := `
|
||||
// Get or create author node first
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
ON CREATE SET author.created_at = timestamp()
|
||||
|
||||
// Create new ProcessedSocialEvent tracking node
|
||||
CREATE (new:ProcessedSocialEvent {
|
||||
event_id: $new_event_id,
|
||||
event_kind: 10000,
|
||||
@@ -399,53 +455,106 @@ func (p *SocialEventProcessor) updateMuteListGraph(ctx context.Context, params U
|
||||
superseded_by: null
|
||||
})
|
||||
|
||||
// WITH required to transition from CREATE to MERGE
|
||||
WITH new
|
||||
|
||||
// Get or create author node
|
||||
MERGE (author:NostrUser {pubkey: $author_pubkey})
|
||||
|
||||
// Update unchanged MUTES relationships to point to new event
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[unchanged:MUTES]->(muted:NostrUser)
|
||||
WHERE unchanged.created_by_event = $old_event_id
|
||||
AND NOT muted.pubkey IN $removed_mutes
|
||||
SET unchanged.created_by_event = $new_event_id,
|
||||
unchanged.created_at = $created_at
|
||||
|
||||
// Remove old MUTES relationships
|
||||
WITH author
|
||||
OPTIONAL MATCH (author)-[old_mutes:MUTES]->(muted:NostrUser)
|
||||
WHERE old_mutes.created_by_event = $old_event_id
|
||||
AND muted.pubkey IN $removed_mutes
|
||||
DELETE old_mutes
|
||||
|
||||
// Create new MUTES relationships
|
||||
WITH author
|
||||
UNWIND $added_mutes AS muted_pubkey
|
||||
MERGE (muted:NostrUser {pubkey: muted_pubkey})
|
||||
MERGE (author)-[new_mutes:MUTES]->(muted)
|
||||
ON CREATE SET
|
||||
new_mutes.created_by_event = $new_event_id,
|
||||
new_mutes.created_at = $created_at,
|
||||
new_mutes.relay_received_at = timestamp()
|
||||
ON MATCH SET
|
||||
new_mutes.created_by_event = $new_event_id,
|
||||
new_mutes.created_at = $created_at
|
||||
RETURN author.pubkey AS author_pubkey
|
||||
`
|
||||
|
||||
cypherParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"new_event_id": params.NewEventID,
|
||||
"old_event_id": params.OldEventID,
|
||||
"created_at": params.CreatedAt,
|
||||
"total_mutes": params.TotalMutes,
|
||||
"added_mutes": params.AddedMutes,
|
||||
"removed_mutes": params.RemovedMutes,
|
||||
createParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"new_event_id": params.NewEventID,
|
||||
"created_at": params.CreatedAt,
|
||||
"total_mutes": params.TotalMutes,
|
||||
}
|
||||
|
||||
_, err := p.db.ExecuteWrite(ctx, cypher, cypherParams)
|
||||
return err
|
||||
_, err := p.db.ExecuteWrite(ctx, createCypher, createParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create ProcessedSocialEvent: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Mark old event as superseded (if it exists)
|
||||
if params.OldEventID != "" {
|
||||
supersedeCypher := `
|
||||
MATCH (old:ProcessedSocialEvent {event_id: $old_event_id})
|
||||
SET old.superseded_by = $new_event_id
|
||||
`
|
||||
supersedeParams := map[string]any{
|
||||
"old_event_id": params.OldEventID,
|
||||
"new_event_id": params.NewEventID,
|
||||
}
|
||||
p.db.ExecuteWrite(ctx, supersedeCypher, supersedeParams)
|
||||
|
||||
// Step 3: Update unchanged MUTES to point to new event
|
||||
// Always update relationships that aren't being removed
|
||||
updateCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $author_pubkey})-[m:MUTES]->(muted:NostrUser)
|
||||
WHERE m.created_by_event = $old_event_id
|
||||
AND NOT muted.pubkey IN $removed_mutes
|
||||
SET m.created_by_event = $new_event_id,
|
||||
m.created_at = $created_at
|
||||
`
|
||||
updateParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"old_event_id": params.OldEventID,
|
||||
"new_event_id": params.NewEventID,
|
||||
"created_at": params.CreatedAt,
|
||||
"removed_mutes": params.RemovedMutes,
|
||||
}
|
||||
p.db.ExecuteWrite(ctx, updateCypher, updateParams)
|
||||
|
||||
// Step 4: Remove MUTES for removed mutes
|
||||
if len(params.RemovedMutes) > 0 {
|
||||
removeCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $author_pubkey})-[m:MUTES]->(muted:NostrUser)
|
||||
WHERE m.created_by_event = $old_event_id
|
||||
AND muted.pubkey IN $removed_mutes
|
||||
DELETE m
|
||||
`
|
||||
removeParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"old_event_id": params.OldEventID,
|
||||
"removed_mutes": params.RemovedMutes,
|
||||
}
|
||||
p.db.ExecuteWrite(ctx, removeCypher, removeParams)
|
||||
}
|
||||
}
|
||||
|
||||
// Step 5: Create new MUTES relationships for added mutes
|
||||
// Process in batches to avoid memory issues
|
||||
const batchSize = 500
|
||||
for i := 0; i < len(params.AddedMutes); i += batchSize {
|
||||
end := i + batchSize
|
||||
if end > len(params.AddedMutes) {
|
||||
end = len(params.AddedMutes)
|
||||
}
|
||||
batch := params.AddedMutes[i:end]
|
||||
|
||||
mutesCypher := `
|
||||
MATCH (author:NostrUser {pubkey: $author_pubkey})
|
||||
UNWIND $added_mutes AS muted_pubkey
|
||||
MERGE (muted:NostrUser {pubkey: muted_pubkey})
|
||||
ON CREATE SET muted.created_at = timestamp()
|
||||
MERGE (author)-[m:MUTES]->(muted)
|
||||
ON CREATE SET
|
||||
m.created_by_event = $new_event_id,
|
||||
m.created_at = $created_at,
|
||||
m.relay_received_at = timestamp()
|
||||
ON MATCH SET
|
||||
m.created_by_event = $new_event_id,
|
||||
m.created_at = $created_at
|
||||
`
|
||||
|
||||
mutesParams := map[string]any{
|
||||
"author_pubkey": params.AuthorPubkey,
|
||||
"new_event_id": params.NewEventID,
|
||||
"created_at": params.CreatedAt,
|
||||
"added_mutes": batch,
|
||||
}
|
||||
|
||||
if _, err := p.db.ExecuteWrite(ctx, mutesCypher, mutesParams); err != nil {
|
||||
return fmt.Errorf("failed to create MUTES batch %d-%d: %w", i, end, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getLatestSocialEvent retrieves the most recent non-superseded event of a given kind for a pubkey
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
@@ -14,31 +16,16 @@ import (
|
||||
)
|
||||
|
||||
// TestSocialEventProcessor tests the social event processor with kinds 0, 3, 1984, 10000
|
||||
// Uses the shared testDB instance from testmain_test.go to avoid auth rate limiting
|
||||
func TestSocialEventProcessor(t *testing.T) {
|
||||
// Skip if Neo4j is not available
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Create test database
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
ctx := context.Background()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Wait for database to be ready
|
||||
<-db.Ready()
|
||||
|
||||
// Wipe database to ensure clean state for tests
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
// Clean database for this test
|
||||
cleanTestDatabase()
|
||||
|
||||
// Generate test keypairs
|
||||
alice := generateTestKeypair(t, "alice")
|
||||
@@ -52,36 +39,36 @@ func TestSocialEventProcessor(t *testing.T) {
|
||||
baseTimestamp := timestamp.Now().V
|
||||
|
||||
t.Run("Kind0_ProfileMetadata", func(t *testing.T) {
|
||||
testProfileMetadata(t, ctx, db, alice, baseTimestamp)
|
||||
testProfileMetadata(t, ctx, testDB, alice, baseTimestamp)
|
||||
})
|
||||
|
||||
t.Run("Kind3_ContactList_Initial", func(t *testing.T) {
|
||||
testContactListInitial(t, ctx, db, alice, bob, charlie, baseTimestamp+1)
|
||||
testContactListInitial(t, ctx, testDB, alice, bob, charlie, baseTimestamp+1)
|
||||
})
|
||||
|
||||
t.Run("Kind3_ContactList_Update_AddFollow", func(t *testing.T) {
|
||||
testContactListUpdate(t, ctx, db, alice, bob, charlie, dave, baseTimestamp+2)
|
||||
testContactListUpdate(t, ctx, testDB, alice, bob, charlie, dave, baseTimestamp+2)
|
||||
})
|
||||
|
||||
t.Run("Kind3_ContactList_Update_RemoveFollow", func(t *testing.T) {
|
||||
testContactListRemove(t, ctx, db, alice, bob, charlie, dave, baseTimestamp+3)
|
||||
testContactListRemove(t, ctx, testDB, alice, bob, charlie, dave, baseTimestamp+3)
|
||||
})
|
||||
|
||||
t.Run("Kind3_ContactList_OlderEventRejected", func(t *testing.T) {
|
||||
// Use timestamp BEFORE the initial contact list to test rejection
|
||||
testContactListOlderRejected(t, ctx, db, alice, bob, baseTimestamp)
|
||||
testContactListOlderRejected(t, ctx, testDB, alice, bob, baseTimestamp)
|
||||
})
|
||||
|
||||
t.Run("Kind10000_MuteList", func(t *testing.T) {
|
||||
testMuteList(t, ctx, db, alice, eve)
|
||||
testMuteList(t, ctx, testDB, alice, eve)
|
||||
})
|
||||
|
||||
t.Run("Kind1984_Reports", func(t *testing.T) {
|
||||
testReports(t, ctx, db, alice, bob, eve)
|
||||
testReports(t, ctx, testDB, alice, bob, eve)
|
||||
})
|
||||
|
||||
t.Run("VerifyGraphState", func(t *testing.T) {
|
||||
verifyFinalGraphState(t, ctx, db, alice, bob, charlie, dave, eve)
|
||||
verifyFinalGraphState(t, ctx, testDB, alice, bob, charlie, dave, eve)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
//go:build integration
|
||||
// +build integration
|
||||
|
||||
package neo4j
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/interfaces/signer/p8k"
|
||||
@@ -12,37 +13,25 @@ import (
|
||||
// RemoveSubscription, ClearSubscriptions) is handled at the app layer, not the
|
||||
// database layer. Tests for those methods have been removed.
|
||||
|
||||
// All tests in this file use the shared testDB instance from testmain_test.go
|
||||
// to avoid Neo4j authentication rate limiting from too many connections.
|
||||
|
||||
func TestMarkers_SetGetDelete(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
cleanTestDatabase()
|
||||
|
||||
// Set a marker
|
||||
key := "test-marker"
|
||||
value := []byte("test-value-123")
|
||||
if err := db.SetMarker(key, value); err != nil {
|
||||
if err := testDB.SetMarker(key, value); err != nil {
|
||||
t.Fatalf("Failed to set marker: %v", err)
|
||||
}
|
||||
|
||||
// Get the marker
|
||||
retrieved, err := db.GetMarker(key)
|
||||
retrieved, err := testDB.GetMarker(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get marker: %v", err)
|
||||
}
|
||||
@@ -52,11 +41,11 @@ func TestMarkers_SetGetDelete(t *testing.T) {
|
||||
|
||||
// Update the marker
|
||||
newValue := []byte("updated-value")
|
||||
if err := db.SetMarker(key, newValue); err != nil {
|
||||
if err := testDB.SetMarker(key, newValue); err != nil {
|
||||
t.Fatalf("Failed to update marker: %v", err)
|
||||
}
|
||||
|
||||
retrieved, err = db.GetMarker(key)
|
||||
retrieved, err = testDB.GetMarker(key)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get updated marker: %v", err)
|
||||
}
|
||||
@@ -65,12 +54,12 @@ func TestMarkers_SetGetDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
// Delete the marker
|
||||
if err := db.DeleteMarker(key); err != nil {
|
||||
if err := testDB.DeleteMarker(key); err != nil {
|
||||
t.Fatalf("Failed to delete marker: %v", err)
|
||||
}
|
||||
|
||||
// Verify marker is deleted
|
||||
_, err = db.GetMarker(key)
|
||||
_, err = testDB.GetMarker(key)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when getting deleted marker")
|
||||
}
|
||||
@@ -79,25 +68,12 @@ func TestMarkers_SetGetDelete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMarkers_GetNonExistent(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Try to get non-existent marker
|
||||
_, err = db.GetMarker("non-existent-marker")
|
||||
// Try to get non-existent marker (don't wipe - just test non-existent key)
|
||||
_, err := testDB.GetMarker("non-existent-marker-unique-12345")
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when getting non-existent marker")
|
||||
}
|
||||
@@ -106,35 +82,18 @@ func TestMarkers_GetNonExistent(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSerial_GetNextSerial(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
// Get first serial
|
||||
serial1, err := db.getNextSerial()
|
||||
serial1, err := testDB.getNextSerial()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get first serial: %v", err)
|
||||
}
|
||||
|
||||
// Get second serial
|
||||
serial2, err := db.getNextSerial()
|
||||
serial2, err := testDB.getNextSerial()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get second serial: %v", err)
|
||||
}
|
||||
@@ -147,7 +106,7 @@ func TestSerial_GetNextSerial(t *testing.T) {
|
||||
// Get multiple more serials and verify they're all unique and increasing
|
||||
var serials []uint64
|
||||
for i := 0; i < 10; i++ {
|
||||
s, err := db.getNextSerial()
|
||||
s, err := testDB.getNextSerial()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial %d: %v", i, err)
|
||||
}
|
||||
@@ -164,53 +123,28 @@ func TestSerial_GetNextSerial(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDatabaseReady(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
// Database should already be ready (testDB is initialized in TestMain)
|
||||
select {
|
||||
case <-testDB.Ready():
|
||||
t.Logf("✓ Database ready signal works correctly")
|
||||
default:
|
||||
t.Fatal("Expected database to be ready")
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Wait for ready
|
||||
<-db.Ready()
|
||||
|
||||
// Database should be ready now
|
||||
t.Logf("✓ Database ready signal works correctly")
|
||||
}
|
||||
|
||||
func TestIdentity(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
// Wipe to ensure clean state
|
||||
if err := db.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
cleanTestDatabase()
|
||||
|
||||
// Get identity (creates if not exists)
|
||||
secret1, err := db.GetOrCreateRelayIdentitySecret()
|
||||
secret1, err := testDB.GetOrCreateRelayIdentitySecret()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get identity: %v", err)
|
||||
}
|
||||
@@ -219,7 +153,7 @@ func TestIdentity(t *testing.T) {
|
||||
}
|
||||
|
||||
// Get identity again (should return same one)
|
||||
secret2, err := db.GetOrCreateRelayIdentitySecret()
|
||||
secret2, err := testDB.GetOrCreateRelayIdentitySecret()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get identity second time: %v", err)
|
||||
}
|
||||
@@ -241,38 +175,25 @@ func TestIdentity(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWipe(t *testing.T) {
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
if neo4jURI == "" {
|
||||
t.Skip("Skipping Neo4j test: ORLY_NEO4J_URI not set")
|
||||
if testDB == nil {
|
||||
t.Skip("Neo4j not available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
tempDir := t.TempDir()
|
||||
db, err := New(ctx, cancel, tempDir, "debug")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
<-db.Ready()
|
||||
|
||||
signer, _ := p8k.New()
|
||||
signer.Generate()
|
||||
|
||||
// Add some data
|
||||
if err := db.AddNIP43Member(signer.Pub(), "test"); err != nil {
|
||||
if err := testDB.AddNIP43Member(signer.Pub(), "test"); err != nil {
|
||||
t.Fatalf("Failed to add member: %v", err)
|
||||
}
|
||||
|
||||
// Wipe the database
|
||||
if err := db.Wipe(); err != nil {
|
||||
if err := testDB.Wipe(); err != nil {
|
||||
t.Fatalf("Failed to wipe database: %v", err)
|
||||
}
|
||||
|
||||
// Verify data is gone
|
||||
isMember, _ := db.IsNIP43Member(signer.Pub())
|
||||
isMember, _ := testDB.IsNIP43Member(signer.Pub())
|
||||
if isMember {
|
||||
t.Fatal("Expected data to be wiped")
|
||||
}
|
||||
|
||||
@@ -69,13 +69,15 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// cleanTestDatabase removes all nodes and relationships
|
||||
// cleanTestDatabase removes all nodes and relationships, then re-initializes
|
||||
func cleanTestDatabase() {
|
||||
ctx := context.Background()
|
||||
// Delete all nodes and relationships
|
||||
_, _ = testDB.ExecuteWrite(ctx, "MATCH (n) DETACH DELETE n", nil)
|
||||
// Clear migration markers so migrations can run fresh
|
||||
_, _ = testDB.ExecuteWrite(ctx, "MATCH (m:Migration) DELETE m", nil)
|
||||
// Re-apply schema (constraints and indexes)
|
||||
_ = testDB.applySchema(ctx)
|
||||
// Re-initialize serial counter
|
||||
_ = testDB.initSerialCounter()
|
||||
}
|
||||
|
||||
// setupTestEvent creates a test event directly in Neo4j for testing queries
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.34.5
|
||||
v0.34.6
|
||||
240
scripts/test-neo4j-integration.sh
Executable file
240
scripts/test-neo4j-integration.sh
Executable file
@@ -0,0 +1,240 @@
|
||||
#!/bin/bash
|
||||
# Neo4j Integration Test Runner
|
||||
#
|
||||
# This script runs the Neo4j integration tests by:
|
||||
# 1. Checking if Docker/Docker Compose are available
|
||||
# 2. Starting a Neo4j container
|
||||
# 3. Running the integration tests
|
||||
# 4. Stopping the container
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/test-neo4j-integration.sh
|
||||
#
|
||||
# Environment variables:
|
||||
# SKIP_DOCKER_INSTALL=1 - Skip Docker installation check
|
||||
# KEEP_CONTAINER=1 - Don't stop container after tests
|
||||
# NEO4J_TEST_REQUIRED=1 - Fail if Docker/Neo4j not available (for local testing)
|
||||
#
|
||||
# Exit codes:
|
||||
# 0 - Tests passed OR Docker/Neo4j not available (soft fail for CI)
|
||||
# 1 - Tests failed (only when Neo4j is available)
|
||||
# 2 - Tests required but Docker/Neo4j not available (when NEO4J_TEST_REQUIRED=1)
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
COMPOSE_FILE="$PROJECT_ROOT/pkg/neo4j/docker-compose.yaml"
|
||||
CONTAINER_NAME="neo4j-test"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
log_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
log_skip() {
|
||||
echo -e "${BLUE}[SKIP]${NC} $1"
|
||||
}
|
||||
|
||||
# Soft fail - exit 0 for CI compatibility unless NEO4J_TEST_REQUIRED is set
|
||||
soft_fail() {
|
||||
local message="$1"
|
||||
if [ "$NEO4J_TEST_REQUIRED" = "1" ]; then
|
||||
log_error "$message"
|
||||
log_error "NEO4J_TEST_REQUIRED=1 is set, failing"
|
||||
exit 2
|
||||
else
|
||||
log_skip "$message"
|
||||
log_skip "Neo4j integration tests skipped (set NEO4J_TEST_REQUIRED=1 to require)"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
# Check if Docker is installed and running
|
||||
check_docker() {
|
||||
if ! command -v docker &> /dev/null; then
|
||||
soft_fail "Docker is not installed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! docker info &> /dev/null 2>&1; then
|
||||
soft_fail "Docker daemon is not running or permission denied"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_info "Docker is available"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Check if Docker Compose is installed
|
||||
check_docker_compose() {
|
||||
# Try docker compose (v2) first, then docker-compose (v1)
|
||||
if docker compose version &> /dev/null 2>&1; then
|
||||
COMPOSE_CMD="docker compose"
|
||||
log_info "Using Docker Compose v2"
|
||||
return 0
|
||||
elif command -v docker-compose &> /dev/null; then
|
||||
COMPOSE_CMD="docker-compose"
|
||||
log_info "Using Docker Compose v1"
|
||||
return 0
|
||||
else
|
||||
soft_fail "Docker Compose is not installed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Start Neo4j container
|
||||
start_neo4j() {
|
||||
log_info "Starting Neo4j container..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Try to start container, soft fail if it doesn't work
|
||||
if ! $COMPOSE_CMD -f "$COMPOSE_FILE" up -d 2>&1; then
|
||||
soft_fail "Failed to start Neo4j container"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_info "Waiting for Neo4j to become healthy..."
|
||||
|
||||
# Wait for container to be healthy (up to 2 minutes)
|
||||
local timeout=120
|
||||
local elapsed=0
|
||||
|
||||
while [ $elapsed -lt $timeout ]; do
|
||||
local health=$(docker inspect --format='{{.State.Health.Status}}' "$CONTAINER_NAME" 2>/dev/null || echo "not_found")
|
||||
|
||||
if [ "$health" = "healthy" ]; then
|
||||
log_info "Neo4j is healthy and ready"
|
||||
return 0
|
||||
elif [ "$health" = "not_found" ]; then
|
||||
log_warn "Container $CONTAINER_NAME not found, retrying..."
|
||||
fi
|
||||
|
||||
echo -n "."
|
||||
sleep 2
|
||||
elapsed=$((elapsed + 2))
|
||||
done
|
||||
|
||||
echo ""
|
||||
log_warn "Neo4j failed to become healthy within $timeout seconds"
|
||||
log_info "Container logs:"
|
||||
docker logs "$CONTAINER_NAME" --tail 20 2>/dev/null || true
|
||||
|
||||
# Clean up failed container
|
||||
$COMPOSE_CMD -f "$COMPOSE_FILE" down -v 2>/dev/null || true
|
||||
|
||||
soft_fail "Neo4j container failed to start properly"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Stop Neo4j container
|
||||
stop_neo4j() {
|
||||
if [ "$KEEP_CONTAINER" = "1" ]; then
|
||||
log_info "KEEP_CONTAINER=1, leaving Neo4j running"
|
||||
return 0
|
||||
fi
|
||||
|
||||
log_info "Stopping Neo4j container..."
|
||||
cd "$PROJECT_ROOT"
|
||||
$COMPOSE_CMD -f "$COMPOSE_FILE" down -v 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Run integration tests
|
||||
run_tests() {
|
||||
log_info "Running Neo4j integration tests..."
|
||||
|
||||
cd "$PROJECT_ROOT"
|
||||
|
||||
# Set environment variables for tests
|
||||
# Note: Tests use ORLY_NEO4J_* prefix (consistent with app config)
|
||||
export ORLY_NEO4J_URI="bolt://localhost:7687"
|
||||
export ORLY_NEO4J_USER="neo4j"
|
||||
export ORLY_NEO4J_PASSWORD="testpassword"
|
||||
# Also set NEO4J_TEST_URI for testmain_test.go compatibility
|
||||
export NEO4J_TEST_URI="bolt://localhost:7687"
|
||||
|
||||
# Run tests with integration tag
|
||||
if go test -tags=integration ./pkg/neo4j/... -v -timeout 5m; then
|
||||
log_info "All integration tests passed!"
|
||||
return 0
|
||||
else
|
||||
log_error "Some integration tests failed"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
log_info "Neo4j Integration Test Runner"
|
||||
log_info "=============================="
|
||||
|
||||
if [ "$NEO4J_TEST_REQUIRED" = "1" ]; then
|
||||
log_info "NEO4J_TEST_REQUIRED=1 - tests will fail if Neo4j unavailable"
|
||||
else
|
||||
log_info "NEO4J_TEST_REQUIRED not set - tests will skip if Neo4j unavailable"
|
||||
fi
|
||||
|
||||
# Check prerequisites (these will soft_fail if not available)
|
||||
check_docker || exit $?
|
||||
check_docker_compose || exit $?
|
||||
|
||||
# Check if compose file exists
|
||||
if [ ! -f "$COMPOSE_FILE" ]; then
|
||||
soft_fail "Docker Compose file not found: $COMPOSE_FILE"
|
||||
fi
|
||||
|
||||
# Track if we need to stop the container
|
||||
local need_cleanup=0
|
||||
|
||||
# Check if container is already running
|
||||
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
|
||||
log_info "Neo4j container is already running"
|
||||
else
|
||||
start_neo4j || exit $?
|
||||
need_cleanup=1
|
||||
fi
|
||||
|
||||
# Run tests
|
||||
local test_result=0
|
||||
run_tests || test_result=1
|
||||
|
||||
# Cleanup
|
||||
if [ $need_cleanup -eq 1 ]; then
|
||||
stop_neo4j
|
||||
fi
|
||||
|
||||
if [ $test_result -eq 0 ]; then
|
||||
log_info "Integration tests completed successfully"
|
||||
else
|
||||
log_error "Integration tests failed"
|
||||
fi
|
||||
|
||||
exit $test_result
|
||||
}
|
||||
|
||||
# Handle cleanup on script exit
|
||||
cleanup() {
|
||||
if [ "$KEEP_CONTAINER" != "1" ] && docker ps --format '{{.Names}}' 2>/dev/null | grep -q "^${CONTAINER_NAME}$"; then
|
||||
log_warn "Cleaning up after interrupt..."
|
||||
stop_neo4j
|
||||
fi
|
||||
}
|
||||
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
main "$@"
|
||||
Reference in New Issue
Block a user