Compare commits

..

2 Commits

Author SHA1 Message Date
woikos
489b9f4593 Improve release command VPS deployment docs (v0.48.14)
Some checks failed
Go / build-and-release (push) Has been cancelled
- Clarify ARM64 build-on-remote approach for relay.orly.dev
- Remove unnecessary git stash from deployment command
- Add note about setcap needing reapplication after binary rebuild
- Use explicit GOPATH and go binary path for clarity

Files modified:
- .claude/commands/release.md: Improved deployment step documentation
- pkg/version/version: v0.48.13 -> v0.48.14

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-11 11:14:20 +01:00
woikos
604d759a6a Fix web UI not showing cached events and add Blossom toggle (v0.48.13)
Some checks failed
Go / build-and-release (push) Has been cancelled
- Fix fetchEvents() discarding IndexedDB cached events instead of merging with relay results
- Add mergeAndDeduplicateEvents() helper to combine and dedupe events by ID
- Add ORLY_BLOSSOM_ENABLED config option to disable Blossom server
- Make fetch-kinds.js fall back to existing eventKinds.js when network unavailable

Files modified:
- app/web/src/nostr.js: Fix event caching, add merge helper
- app/web/scripts/fetch-kinds.js: Add fallback for network failures
- app/config/config.go: Add BlossomEnabled config field
- app/main.go: Check BlossomEnabled before initializing Blossom server
- pkg/version/version: Bump to v0.48.13

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-01-11 04:55:55 +01:00
8 changed files with 86 additions and 39 deletions

View File

@@ -49,10 +49,12 @@ If no argument provided, default to `patch`.
GIT_SSH_COMMAND="ssh -i ~/.ssh/gitmlekudev" git push ssh://mleku@git.mleku.dev:2222/mleku/next.orly.dev.git main --tags
```
11. **Deploy to VPS** by running:
```
ssh relay.orly.dev 'cd ~/src/next.orly.dev && git stash && git pull origin main && export PATH=$PATH:~/go/bin && CGO_ENABLED=0 go build -o ~/.local/bin/next.orly.dev && sudo /usr/sbin/setcap cap_net_bind_service=+ep ~/.local/bin/next.orly.dev && sudo systemctl restart orly && ~/.local/bin/next.orly.dev version'
11. **Deploy to relay.orly.dev** (ARM64):
Build on remote (faster than uploading cross-compiled binary due to slow local bandwidth):
```bash
ssh relay.orly.dev 'cd ~/src/next.orly.dev && git pull origin main && GOPATH=$HOME CGO_ENABLED=0 ~/go/bin/go build -o ~/.local/bin/next.orly.dev && sudo /usr/sbin/setcap cap_net_bind_service=+ep ~/.local/bin/next.orly.dev && sudo systemctl restart orly && ~/.local/bin/next.orly.dev version'
```
Note: setcap must be re-applied after each binary rebuild to allow binding to ports 80/443.
12. **Report completion** with the new version and commit hash

View File

@@ -72,7 +72,8 @@ type C struct {
FollowsThrottlePerEvent time.Duration `env:"ORLY_FOLLOWS_THROTTLE_INCREMENT" default:"200ms" usage:"delay added per event for non-followed users"`
FollowsThrottleMaxDelay time.Duration `env:"ORLY_FOLLOWS_THROTTLE_MAX" default:"60s" usage:"maximum throttle delay cap"`
// Blossom blob storage service level settings
// Blossom blob storage service settings
BlossomEnabled bool `env:"ORLY_BLOSSOM_ENABLED" default:"true" usage:"enable Blossom blob storage server (only works with Badger backend)"`
BlossomServiceLevels string `env:"ORLY_BLOSSOM_SERVICE_LEVELS" usage:"comma-separated list of service levels in format: name:storage_mb_per_sat_per_month (e.g., basic:1,premium:10)"`
// Web UI and dev mode settings

View File

@@ -435,7 +435,7 @@ func Run(
// Initialize Blossom blob storage server (only for Badger backend)
// MUST be done before UserInterface() which registers routes
if badgerDB, ok := db.(*database.D); ok {
if badgerDB, ok := db.(*database.D); ok && cfg.BlossomEnabled {
log.I.F("Badger backend detected, initializing Blossom server...")
if l.blossomServer, err = initializeBlossomServer(ctx, cfg, badgerDB); err != nil {
log.E.F("failed to initialize blossom server: %v", err)
@@ -445,6 +445,8 @@ func Run(
} else {
log.W.F("blossom server initialization returned nil without error")
}
} else if !cfg.BlossomEnabled {
log.I.F("Blossom server disabled via ORLY_BLOSSOM_ENABLED=false")
} else {
log.I.F("Non-Badger backend detected (type: %T), Blossom server not available", db)
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -6,25 +6,35 @@
import { fileURLToPath } from 'url';
import { dirname, join } from 'path';
import { writeFileSync } from 'fs';
import { writeFileSync, existsSync } from 'fs';
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const KINDS_URL = 'https://git.mleku.dev/mleku/nostr/raw/branch/main/encoders/kind/kinds.json';
const OUTPUT_PATH = join(__dirname, '..', 'src', 'eventKinds.js');
async function fetchKinds() {
console.log(`Fetching kinds from ${KINDS_URL}...`);
const response = await fetch(KINDS_URL);
if (!response.ok) {
throw new Error(`Failed to fetch kinds.json: ${response.status} ${response.statusText}`);
try {
const response = await fetch(KINDS_URL, { timeout: 10000 });
if (!response.ok) {
throw new Error(`HTTP ${response.status} ${response.statusText}`);
}
const data = await response.json();
console.log(`Fetched ${Object.keys(data.kinds).length} kinds (version: ${data.version})`);
return data;
} catch (error) {
// Check if we have an existing eventKinds.js we can use
if (existsSync(OUTPUT_PATH)) {
console.warn(`Warning: Could not fetch kinds.json (${error.message})`);
console.log(`Using existing ${OUTPUT_PATH}`);
return null; // Signal to skip generation
}
throw new Error(`Failed to fetch kinds.json and no existing file: ${error.message}`);
}
const data = await response.json();
console.log(`Fetched ${Object.keys(data.kinds).length} kinds (version: ${data.version})`);
return data;
}
function generateEventKinds(data) {
@@ -202,14 +212,18 @@ export const kindCategories = [
async function main() {
try {
const data = await fetchKinds();
// If fetchKinds returned null, we're using the existing file
if (data === null) {
console.log('Skipping generation, using existing eventKinds.js');
return;
}
const kinds = generateEventKinds(data);
const js = generateJS(kinds, data);
// Write to src/eventKinds.js
const outPath = join(__dirname, '..', 'src', 'eventKinds.js');
writeFileSync(outPath, js);
console.log(`Generated ${outPath} with ${kinds.length} kinds`);
writeFileSync(OUTPUT_PATH, js);
console.log(`Generated ${OUTPUT_PATH} with ${kinds.length} kinds`);
} catch (error) {
console.error('Error:', error.message);
process.exit(1);

View File

@@ -179,6 +179,28 @@ export class Nip07Signer {
}
}
// Merge two event arrays, deduplicating by event id
// Newer events (by created_at) take precedence for same id
function mergeAndDeduplicateEvents(cached, relay) {
const eventMap = new Map();
// Add cached events first
for (const event of cached) {
eventMap.set(event.id, event);
}
// Add/update with relay events (they may be newer)
for (const event of relay) {
const existing = eventMap.get(event.id);
if (!existing || event.created_at >= existing.created_at) {
eventMap.set(event.id, event);
}
}
// Return sorted by created_at descending (newest first)
return Array.from(eventMap.values()).sort((a, b) => b.created_at - a.created_at);
}
// IndexedDB helpers for unified event storage
// This provides a local cache that all components can access
const DB_NAME = "nostrCache";
@@ -573,9 +595,10 @@ export async function fetchEvents(filters, options = {}) {
} = options;
// Try to get cached events first if requested
let cachedEvents = [];
if (useCache) {
try {
const cachedEvents = await queryEventsFromDB(filters);
cachedEvents = await queryEventsFromDB(filters);
if (cachedEvents.length > 0) {
console.log(`Found ${cachedEvents.length} cached events in IndexedDB`);
}
@@ -585,17 +608,19 @@ export async function fetchEvents(filters, options = {}) {
}
return new Promise((resolve, reject) => {
const events = [];
const relayEvents = [];
const timeoutId = setTimeout(() => {
console.log(`Timeout reached after ${timeout}ms, returning ${events.length} events`);
console.log(`Timeout reached after ${timeout}ms, returning ${relayEvents.length} relay events`);
sub.close();
// Store all received events in IndexedDB before resolving
if (events.length > 0) {
putEvents(events).catch(e => console.warn("Failed to cache events", e));
if (relayEvents.length > 0) {
putEvents(relayEvents).catch(e => console.warn("Failed to cache events", e));
}
resolve(events);
// Merge cached events with relay events, deduplicate by id
const mergedEvents = mergeAndDeduplicateEvents(cachedEvents, relayEvents);
resolve(mergedEvents);
}, timeout);
try {
@@ -615,22 +640,25 @@ export async function fetchEvents(filters, options = {}) {
created_at: event.created_at,
content_preview: event.content?.substring(0, 50)
});
events.push(event);
relayEvents.push(event);
// Store event immediately in IndexedDB
putEvent(event).catch(e => console.warn("Failed to cache event", e));
},
oneose() {
console.log(`✅ EOSE received for REQ [${subId}], got ${events.length} events`);
console.log(`✅ EOSE received for REQ [${subId}], got ${relayEvents.length} relay events`);
clearTimeout(timeoutId);
sub.close();
// Store all events in IndexedDB before resolving
if (events.length > 0) {
putEvents(events).catch(e => console.warn("Failed to cache events", e));
if (relayEvents.length > 0) {
putEvents(relayEvents).catch(e => console.warn("Failed to cache events", e));
}
resolve(events);
// Merge cached events with relay events, deduplicate by id
const mergedEvents = mergeAndDeduplicateEvents(cachedEvents, relayEvents);
console.log(`Merged ${cachedEvents.length} cached + ${relayEvents.length} relay = ${mergedEvents.length} total events`);
resolve(mergedEvents);
}
}
);

View File

@@ -1 +1 @@
v0.48.12
v0.48.14