interim-docs-update #3
@@ -57,7 +57,32 @@
|
||||
"Bash(go clean:*)",
|
||||
"Bash(go mod tidy:*)",
|
||||
"Bash(./scripts/test-neo4j-integration.sh:*)",
|
||||
"Bash(docker compose:*)"
|
||||
"Bash(docker compose:*)",
|
||||
"Bash(sudo update-grub:*)",
|
||||
"Bash(lspci:*)",
|
||||
"Bash(lsmod:*)",
|
||||
"Bash(modinfo:*)",
|
||||
"Bash(apt-cache policy:*)",
|
||||
"WebFetch(domain:git.kernel.org)",
|
||||
"Bash(ip link:*)",
|
||||
"WebFetch(domain:www.laptopcentar.rs)",
|
||||
"WebFetch(domain:www.kupujemprodajem.com)",
|
||||
"WebFetch(domain:www.bcgroup-online.com)",
|
||||
"WebFetch(domain:www.monitor.rs)",
|
||||
"WebFetch(domain:www.protis.hr)",
|
||||
"Bash(apt-cache search:*)",
|
||||
"Bash(dkms status:*)",
|
||||
"Bash(sudo dkms build:*)",
|
||||
"Bash(sudo apt install:*)",
|
||||
"Bash(wget:*)",
|
||||
"Bash(ls:*)",
|
||||
"Bash(git clone:*)",
|
||||
"Bash(sudo make:*)",
|
||||
"Bash(sudo modprobe:*)",
|
||||
"Bash(update-desktop-database:*)",
|
||||
"Bash(CGO_ENABLED=0 go build:*)",
|
||||
"Bash(CGO_ENABLED=0 go test:*)",
|
||||
"Bash(git submodule:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
|
||||
257
pkg/database/IMPORT_MEMORY_OPTIMIZATION_PLAN.md
Normal file
257
pkg/database/IMPORT_MEMORY_OPTIMIZATION_PLAN.md
Normal file
@@ -0,0 +1,257 @@
|
||||
# Import Memory Optimization Plan
|
||||
|
||||
## Goal
|
||||
|
||||
Constrain import memory utilization to ≤1.5GB to ensure system disk cache flushing completes adequately before continuing.
|
||||
|
||||
## Test Results (Baseline)
|
||||
|
||||
- **File**: `wot_reference.jsonl` (2.7 GB, ~2.16 million events)
|
||||
- **System**: 15 GB RAM, Linux
|
||||
- **Events Saved**: 2,130,545
|
||||
- **Total Time**: 48 minutes 16 seconds
|
||||
- **Average Rate**: 736 events/sec
|
||||
- **Peak Memory**: ~6.4 GB (42% of system RAM)
|
||||
|
||||
### Memory Timeline (Baseline)
|
||||
|
||||
| Time | Memory (RSS) | Events | Notes |
|
||||
|------|--------------|--------|-------|
|
||||
| Start | 95 MB | 0 | Initial state |
|
||||
| +10 min | 2.7 GB | 283k | Warming up |
|
||||
| +20 min | 4.1 GB | 475k | Memory growing |
|
||||
| +30 min | 5.2 GB | 720k | Peak approaching |
|
||||
| +35 min | 5.9 GB | 485k | Near peak |
|
||||
| +40 min | 5.6 GB | 1.3M | GC recovered memory |
|
||||
| +48 min | 6.4 GB | 2.1M | Final (42% of RAM) |
|
||||
|
||||
## Root Causes of Memory Growth
|
||||
|
||||
### 1. Badger Internal Caches (configured in `database.go`)
|
||||
|
||||
- Block cache: 1024 MB default
|
||||
- Index cache: 512 MB default
|
||||
- Memtables: 8 × 16 MB = 128 MB
|
||||
- Total baseline: ~1.6 GB just for configured caches
|
||||
|
||||
### 2. Badger Write Buffers
|
||||
|
||||
- L0 tables buffer (8 tables × 16 MB)
|
||||
- Value log writes accumulate until compaction
|
||||
|
||||
### 3. No Backpressure in Import Loop
|
||||
|
||||
- Events are written continuously without waiting for compaction
|
||||
- `debug.FreeOSMemory()` only runs every 5 seconds
|
||||
- Badger buffers writes faster than disk can flush
|
||||
|
||||
### 4. Transaction Overhead
|
||||
|
||||
- Each `SaveEvent` creates a transaction
|
||||
- Transactions have overhead that accumulates
|
||||
|
||||
## Proposed Mitigations
|
||||
|
||||
### Phase 1: Reduce Badger Cache Configuration for Import
|
||||
|
||||
Add import-specific configuration options in `app/config/config.go`:
|
||||
|
||||
```go
|
||||
ImportBlockCacheMB int `env:"ORLY_IMPORT_BLOCK_CACHE_MB" default:"256"`
|
||||
ImportIndexCacheMB int `env:"ORLY_IMPORT_INDEX_CACHE_MB" default:"128"`
|
||||
ImportMemTableSize int `env:"ORLY_IMPORT_MEMTABLE_SIZE_MB" default:"8"`
|
||||
```
|
||||
|
||||
For a 1.5GB target:
|
||||
|
||||
| Component | Size | Notes |
|
||||
|-----------|------|-------|
|
||||
| Block cache | 256 MB | Reduced from 1024 MB |
|
||||
| Index cache | 128 MB | Reduced from 512 MB |
|
||||
| Memtables | 4 × 8 MB = 32 MB | Reduced from 8 × 16 MB |
|
||||
| Serial cache | ~20 MB | Unchanged |
|
||||
| Working memory | ~200 MB | Buffer for processing |
|
||||
| **Total** | **~636 MB** | Leaves headroom for 1.5GB target |
|
||||
|
||||
### Phase 2: Add Batching with Sync to Import Loop
|
||||
|
||||
Modify `import_utils.go` to batch writes and force sync:
|
||||
|
||||
```go
|
||||
const (
|
||||
importBatchSize = 500 // Events per batch
|
||||
importSyncInterval = 2000 // Events before forcing sync
|
||||
importMemCheckEvents = 1000 // Events between memory checks
|
||||
importMaxMemoryMB = 1400 // Target max memory (MB)
|
||||
)
|
||||
|
||||
// In processJSONLEventsWithPolicy:
|
||||
var batchCount int
|
||||
for scan.Scan() {
|
||||
// ... existing event processing ...
|
||||
|
||||
batchCount++
|
||||
count++
|
||||
|
||||
// Force sync periodically to flush writes to disk
|
||||
if batchCount >= importSyncInterval {
|
||||
d.DB.Sync() // Force write to disk
|
||||
batchCount = 0
|
||||
}
|
||||
|
||||
// Memory pressure check
|
||||
if count % importMemCheckEvents == 0 {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
heapMB := m.HeapAlloc / 1024 / 1024
|
||||
|
||||
if heapMB > importMaxMemoryMB {
|
||||
// Apply backpressure
|
||||
d.DB.Sync()
|
||||
runtime.GC()
|
||||
debug.FreeOSMemory()
|
||||
|
||||
// Wait for compaction to catch up
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Use Batch Transactions
|
||||
|
||||
Instead of one transaction per event, batch multiple events:
|
||||
|
||||
```go
|
||||
// Accumulate events for batch write
|
||||
const txnBatchSize = 100
|
||||
|
||||
type pendingWrite struct {
|
||||
idxs [][]byte
|
||||
compactKey []byte
|
||||
compactVal []byte
|
||||
graphKeys [][]byte
|
||||
}
|
||||
|
||||
var pendingWrites []pendingWrite
|
||||
|
||||
// In the event processing loop
|
||||
pendingWrites = append(pendingWrites, pw)
|
||||
|
||||
if len(pendingWrites) >= txnBatchSize {
|
||||
err = d.Update(func(txn *badger.Txn) error {
|
||||
for _, pw := range pendingWrites {
|
||||
for _, key := range pw.idxs {
|
||||
txn.Set(key, nil)
|
||||
}
|
||||
txn.Set(pw.compactKey, pw.compactVal)
|
||||
for _, gk := range pw.graphKeys {
|
||||
txn.Set(gk, nil)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
pendingWrites = pendingWrites[:0]
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 4: Implement Adaptive Rate Limiting
|
||||
|
||||
```go
|
||||
type importRateLimiter struct {
|
||||
targetMemMB uint64
|
||||
checkInterval int
|
||||
baseDelay time.Duration
|
||||
maxDelay time.Duration
|
||||
}
|
||||
|
||||
func (r *importRateLimiter) maybeThrottle(eventCount int) {
|
||||
if eventCount % r.checkInterval != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
heapMB := m.HeapAlloc / 1024 / 1024
|
||||
|
||||
if heapMB > r.targetMemMB {
|
||||
// Calculate delay proportional to overage
|
||||
overage := float64(heapMB - r.targetMemMB) / float64(r.targetMemMB)
|
||||
delay := time.Duration(float64(r.baseDelay) * (1 + overage*10))
|
||||
if delay > r.maxDelay {
|
||||
delay = r.maxDelay
|
||||
}
|
||||
|
||||
// Force GC and wait
|
||||
runtime.GC()
|
||||
debug.FreeOSMemory()
|
||||
time.Sleep(delay)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Order
|
||||
|
||||
1. **Quick Win**: Add `d.DB.Sync()` call every N events in import loop
|
||||
2. **Configuration**: Add environment variables for import-specific cache sizes
|
||||
3. **Batching**: Implement batch transactions to reduce overhead
|
||||
4. **Adaptive**: Add memory-aware rate limiting
|
||||
|
||||
## Expected Results
|
||||
|
||||
| Approach | Memory Target | Throughput Impact |
|
||||
|----------|---------------|-------------------|
|
||||
| Current | ~6 GB peak | 736 events/sec |
|
||||
| Phase 1 (cache reduction) | ~2 GB | ~700 events/sec |
|
||||
| Phase 2 (sync + GC) | ~1.5 GB | ~500 events/sec |
|
||||
| Phase 3 (batching) | ~1.5 GB | ~600 events/sec |
|
||||
| Phase 4 (adaptive) | ~1.4 GB | Variable |
|
||||
|
||||
## Files to Modify
|
||||
|
||||
1. `app/config/config.go` - Add import-specific config options
|
||||
2. `pkg/database/database.go` - Add import mode with reduced caches
|
||||
3. `pkg/database/import_utils.go` - Add batching, sync, and rate limiting
|
||||
4. `pkg/database/save-event.go` - Add batch save method (optional, for Phase 3)
|
||||
|
||||
## Environment Variables (Proposed)
|
||||
|
||||
```bash
|
||||
# Import-specific cache settings (only apply during import operations)
|
||||
ORLY_IMPORT_BLOCK_CACHE_MB=256 # Block cache size during import
|
||||
ORLY_IMPORT_INDEX_CACHE_MB=128 # Index cache size during import
|
||||
ORLY_IMPORT_MEMTABLE_SIZE_MB=8 # Memtable size during import
|
||||
|
||||
# Import rate limiting
|
||||
ORLY_IMPORT_SYNC_INTERVAL=2000 # Events between forced syncs
|
||||
ORLY_IMPORT_MAX_MEMORY_MB=1400 # Target max memory during import
|
||||
ORLY_IMPORT_BATCH_SIZE=100 # Events per transaction batch
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- The adaptive rate limiting (Phase 4) is the most robust solution but adds complexity
|
||||
- Phase 2 alone should achieve the 1.5GB target with acceptable throughput
|
||||
- Batch transactions (Phase 3) can improve throughput but require refactoring `SaveEvent`
|
||||
- Consider making these settings configurable so users can tune for their hardware
|
||||
|
||||
## Test Command
|
||||
|
||||
To re-run the import test with memory monitoring:
|
||||
|
||||
```bash
|
||||
# Start relay with import-optimized settings
|
||||
export ORLY_DATA_DIR=/tmp/orly-import-test
|
||||
export ORLY_ACL_MODE=none
|
||||
export ORLY_PORT=10548
|
||||
export ORLY_LOG_LEVEL=info
|
||||
./orly &
|
||||
|
||||
# Upload test file
|
||||
curl -X POST \
|
||||
-F "file=@/path/to/wot_reference.jsonl" \
|
||||
http://localhost:10548/api/import
|
||||
|
||||
# Monitor memory
|
||||
watch -n 5 'ps -p $(pgrep orly) -o pid,rss,pmem --no-headers'
|
||||
```
|
||||
@@ -6,9 +6,11 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
@@ -22,6 +24,14 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
evB := make([]byte, 0, units.Mb)
|
||||
evBuf := bytes.NewBuffer(evB)
|
||||
|
||||
// Performance tracking
|
||||
startTime := time.Now()
|
||||
var eventCount, bytesWritten int64
|
||||
lastLogTime := startTime
|
||||
const logInterval = 5 * time.Second
|
||||
|
||||
log.I.F("export: starting export operation")
|
||||
|
||||
// Create resolver for compact event decoding
|
||||
resolver := NewDatabaseSerialResolver(d, d.serialCache)
|
||||
|
||||
@@ -86,7 +96,8 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
}
|
||||
|
||||
// Serialize the event to JSON and write it to the output
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
data := ev.Serialize()
|
||||
if _, err = w.Write(data); chk.E(err) {
|
||||
ev.Free()
|
||||
return
|
||||
}
|
||||
@@ -94,7 +105,19 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
ev.Free()
|
||||
return
|
||||
}
|
||||
bytesWritten += int64(len(data) + 1)
|
||||
eventCount++
|
||||
ev.Free()
|
||||
|
||||
// Progress logging every logInterval
|
||||
if time.Since(lastLogTime) >= logInterval {
|
||||
elapsed := time.Since(startTime)
|
||||
eventsPerSec := float64(eventCount) / elapsed.Seconds()
|
||||
mbPerSec := float64(bytesWritten) / elapsed.Seconds() / 1024 / 1024
|
||||
log.I.F("export: progress %d events, %.2f MB written, %.0f events/sec, %.2f MB/sec",
|
||||
eventCount, float64(bytesWritten)/1024/1024, eventsPerSec, mbPerSec)
|
||||
lastLogTime = time.Now()
|
||||
}
|
||||
}
|
||||
it.Close()
|
||||
|
||||
@@ -133,7 +156,8 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
}
|
||||
|
||||
// Serialize the event to JSON and write it to the output
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
data := ev.Serialize()
|
||||
if _, err = w.Write(data); chk.E(err) {
|
||||
ev.Free()
|
||||
return
|
||||
}
|
||||
@@ -141,7 +165,19 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
ev.Free()
|
||||
return
|
||||
}
|
||||
bytesWritten += int64(len(data) + 1)
|
||||
eventCount++
|
||||
ev.Free()
|
||||
|
||||
// Progress logging every logInterval
|
||||
if time.Since(lastLogTime) >= logInterval {
|
||||
elapsed := time.Since(startTime)
|
||||
eventsPerSec := float64(eventCount) / elapsed.Seconds()
|
||||
mbPerSec := float64(bytesWritten) / elapsed.Seconds() / 1024 / 1024
|
||||
log.I.F("export: progress %d events, %.2f MB written, %.0f events/sec, %.2f MB/sec",
|
||||
eventCount, float64(bytesWritten)/1024/1024, eventsPerSec, mbPerSec)
|
||||
lastLogTime = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
@@ -149,8 +185,16 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Final export summary
|
||||
elapsed := time.Since(startTime)
|
||||
eventsPerSec := float64(eventCount) / elapsed.Seconds()
|
||||
mbPerSec := float64(bytesWritten) / elapsed.Seconds() / 1024 / 1024
|
||||
log.I.F("export: completed - %d events, %.2f MB in %v (%.0f events/sec, %.2f MB/sec)",
|
||||
eventCount, float64(bytesWritten)/1024/1024, elapsed.Round(time.Millisecond), eventsPerSec, mbPerSec)
|
||||
} else {
|
||||
// Export events for specific pubkeys
|
||||
log.I.F("export: exporting events for %d pubkeys", len(pubkeys))
|
||||
for _, pubkey := range pubkeys {
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
@@ -187,7 +231,8 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
}
|
||||
|
||||
// Serialize the event to JSON and write it to the output
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
data := ev.Serialize()
|
||||
if _, err = w.Write(data); chk.E(err) {
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
@@ -195,7 +240,19 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
ev.Free()
|
||||
continue
|
||||
}
|
||||
bytesWritten += int64(len(data) + 1)
|
||||
eventCount++
|
||||
ev.Free()
|
||||
|
||||
// Progress logging every logInterval
|
||||
if time.Since(lastLogTime) >= logInterval {
|
||||
elapsed := time.Since(startTime)
|
||||
eventsPerSec := float64(eventCount) / elapsed.Seconds()
|
||||
mbPerSec := float64(bytesWritten) / elapsed.Seconds() / 1024 / 1024
|
||||
log.I.F("export: progress %d events, %.2f MB written, %.0f events/sec, %.2f MB/sec",
|
||||
eventCount, float64(bytesWritten)/1024/1024, eventsPerSec, mbPerSec)
|
||||
lastLogTime = time.Now()
|
||||
}
|
||||
}
|
||||
return
|
||||
},
|
||||
@@ -203,5 +260,12 @@ func (d *D) Export(c context.Context, w io.Writer, pubkeys ...[]byte) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Final export summary for pubkey export
|
||||
elapsed := time.Since(startTime)
|
||||
eventsPerSec := float64(eventCount) / elapsed.Seconds()
|
||||
mbPerSec := float64(bytesWritten) / elapsed.Seconds() / 1024 / 1024
|
||||
log.I.F("export: completed - %d events, %.2f MB in %v (%.0f events/sec, %.2f MB/sec)",
|
||||
eventCount, float64(bytesWritten)/1024/1024, elapsed.Round(time.Millisecond), eventsPerSec, mbPerSec)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
@@ -20,6 +21,9 @@ const maxLen = 500000000
|
||||
|
||||
// ImportEventsFromReader imports events from an io.Reader containing JSONL data
|
||||
func (d *D) ImportEventsFromReader(ctx context.Context, rr io.Reader) error {
|
||||
startTime := time.Now()
|
||||
log.I.F("import: starting import operation")
|
||||
|
||||
// store to disk so we can return fast
|
||||
tmpPath := os.TempDir() + string(os.PathSeparator) + "orly"
|
||||
os.MkdirAll(tmpPath, 0700)
|
||||
@@ -29,15 +33,27 @@ func (d *D) ImportEventsFromReader(ctx context.Context, rr io.Reader) error {
|
||||
}
|
||||
defer os.Remove(tmp.Name()) // Clean up temp file when done
|
||||
|
||||
log.I.F("buffering upload to %s", tmp.Name())
|
||||
if _, err = io.Copy(tmp, rr); chk.E(err) {
|
||||
log.I.F("import: buffering upload to %s", tmp.Name())
|
||||
bufferStart := time.Now()
|
||||
bytesBuffered, err := io.Copy(tmp, rr)
|
||||
if chk.E(err) {
|
||||
return err
|
||||
}
|
||||
bufferElapsed := time.Since(bufferStart)
|
||||
log.I.F("import: buffered %.2f MB in %v (%.2f MB/sec)",
|
||||
float64(bytesBuffered)/1024/1024, bufferElapsed.Round(time.Millisecond),
|
||||
float64(bytesBuffered)/bufferElapsed.Seconds()/1024/1024)
|
||||
|
||||
if _, err = tmp.Seek(0, 0); chk.E(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.processJSONLEvents(ctx, tmp)
|
||||
processErr := d.processJSONLEvents(ctx, tmp)
|
||||
|
||||
totalElapsed := time.Since(startTime)
|
||||
log.I.F("import: total operation time: %v", totalElapsed.Round(time.Millisecond))
|
||||
|
||||
return processErr
|
||||
}
|
||||
|
||||
// ImportEventsFromStrings imports events from a slice of JSON strings with policy filtering
|
||||
@@ -59,11 +75,16 @@ func (d *D) processJSONLEventsWithPolicy(ctx context.Context, rr io.Reader, poli
|
||||
scanBuf := make([]byte, maxLen)
|
||||
scan.Buffer(scanBuf, maxLen)
|
||||
|
||||
var count, total int
|
||||
// Performance tracking
|
||||
startTime := time.Now()
|
||||
lastLogTime := startTime
|
||||
const logInterval = 5 * time.Second
|
||||
|
||||
var count, total, skipped, policyRejected, unmarshalErrors, saveErrors int
|
||||
for scan.Scan() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.I.F("context closed")
|
||||
log.I.F("import: context closed after %d events", count)
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
@@ -71,6 +92,7 @@ func (d *D) processJSONLEventsWithPolicy(ctx context.Context, rr io.Reader, poli
|
||||
b := scan.Bytes()
|
||||
total += len(b) + 1
|
||||
if len(b) < 1 {
|
||||
skipped++
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -78,6 +100,7 @@ func (d *D) processJSONLEventsWithPolicy(ctx context.Context, rr io.Reader, poli
|
||||
if _, err := ev.Unmarshal(b); err != nil {
|
||||
// return the pooled buffer on error
|
||||
ev.Free()
|
||||
unmarshalErrors++
|
||||
log.W.F("failed to unmarshal event: %v", err)
|
||||
continue
|
||||
}
|
||||
@@ -90,11 +113,13 @@ func (d *D) processJSONLEventsWithPolicy(ctx context.Context, rr io.Reader, poli
|
||||
if policyErr != nil {
|
||||
log.W.F("policy check failed for event %x: %v", ev.ID, policyErr)
|
||||
ev.Free()
|
||||
policyRejected++
|
||||
continue
|
||||
}
|
||||
if !allowed {
|
||||
log.D.F("policy rejected event %x during sync import", ev.ID)
|
||||
ev.Free()
|
||||
policyRejected++
|
||||
continue
|
||||
}
|
||||
log.D.F("policy allowed event %x during sync import", ev.ID)
|
||||
@@ -103,6 +128,7 @@ func (d *D) processJSONLEventsWithPolicy(ctx context.Context, rr io.Reader, poli
|
||||
if _, err := d.SaveEvent(ctx, ev); err != nil {
|
||||
// return the pooled buffer on error paths too
|
||||
ev.Free()
|
||||
saveErrors++
|
||||
log.W.F("failed to save event: %v", err)
|
||||
continue
|
||||
}
|
||||
@@ -111,13 +137,30 @@ func (d *D) processJSONLEventsWithPolicy(ctx context.Context, rr io.Reader, poli
|
||||
ev.Free()
|
||||
b = nil
|
||||
count++
|
||||
if count%100 == 0 {
|
||||
log.I.F("processed %d events", count)
|
||||
|
||||
// Progress logging every logInterval
|
||||
if time.Since(lastLogTime) >= logInterval {
|
||||
elapsed := time.Since(startTime)
|
||||
eventsPerSec := float64(count) / elapsed.Seconds()
|
||||
mbPerSec := float64(total) / elapsed.Seconds() / 1024 / 1024
|
||||
log.I.F("import: progress %d events saved, %.2f MB read, %.0f events/sec, %.2f MB/sec",
|
||||
count, float64(total)/1024/1024, eventsPerSec, mbPerSec)
|
||||
lastLogTime = time.Now()
|
||||
debug.FreeOSMemory()
|
||||
}
|
||||
}
|
||||
|
||||
log.I.F("read %d bytes and saved %d events", total, count)
|
||||
// Final summary
|
||||
elapsed := time.Since(startTime)
|
||||
eventsPerSec := float64(count) / elapsed.Seconds()
|
||||
mbPerSec := float64(total) / elapsed.Seconds() / 1024 / 1024
|
||||
log.I.F("import: completed - %d events saved, %.2f MB in %v (%.0f events/sec, %.2f MB/sec)",
|
||||
count, float64(total)/1024/1024, elapsed.Round(time.Millisecond), eventsPerSec, mbPerSec)
|
||||
if unmarshalErrors > 0 || saveErrors > 0 || policyRejected > 0 || skipped > 0 {
|
||||
log.I.F("import: stats - %d unmarshal errors, %d save errors, %d policy rejected, %d skipped empty lines",
|
||||
unmarshalErrors, saveErrors, policyRejected, skipped)
|
||||
}
|
||||
|
||||
if err := scan.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Submodule pkg/protocol/blossom/blossom deleted from e8d0a1ec44
24
pkg/protocol/blossom/blossom/LICENSE.txt
Normal file
24
pkg/protocol/blossom/blossom/LICENSE.txt
Normal file
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <https://unlicense.org>
|
||||
61
pkg/protocol/blossom/blossom/README.md
Normal file
61
pkg/protocol/blossom/blossom/README.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# 🌸 Blossom - Blobs stored simply on mediaservers
|
||||
|
||||
Blossom uses [nostr](https://github.com/nostr-protocol/nostr) public / private keys for identities. Users are expected to sign authorization events to prove their identity when interacting with servers
|
||||
|
||||
## What is it?
|
||||
|
||||
Blossom is a specification for a set of HTTP endpoints that allow users to store blobs of data on publicly accessible servers
|
||||
|
||||
## What are blobs
|
||||
|
||||
Blobs are packs of binary data addressed by their sha256 hash
|
||||
|
||||
## Protocol specification (BUDs)
|
||||
|
||||
BUDs or **Blossom Upgrade Documents** are short documents that outline an additional feature that a blossom server may implement.
|
||||
|
||||
## BUDs
|
||||
|
||||
- [BUD-00: Blossom Upgrade Documents](./buds/00.md)
|
||||
- [BUD-01: Server requirements and blob retrieval](./buds/01.md)
|
||||
- [BUD-02: Blob upload and management](./buds/02.md)
|
||||
- [BUD-03: User Server List](./buds/03.md)
|
||||
- [BUD-04: Mirroring blobs](./buds/04.md)
|
||||
- [BUD-05: Media optimization](./buds/05.md)
|
||||
- [BUD-06: Upload requirements](./buds/06.md)
|
||||
- [BUD-07: Payment required](./buds/07.md)
|
||||
- [BUD-08: Nostr File Metadata Tags](./buds/08.md)
|
||||
- [BUD-09: Blob Report](./buds/09.md)
|
||||
|
||||
## Endpoints
|
||||
|
||||
Blossom Servers expose a few endpoints for managing blobs
|
||||
|
||||
- `GET /<sha256>` (optional file `.ext`) [BUD-01](./buds/01.md#get-sha256---get-blob)
|
||||
- `HEAD /<sha256>` (optional file `.ext`) [BUD-01](./buds/01.md#head-sha256---has-blob)
|
||||
- `PUT /upload` [BUD-02](./buds/02.md#put-upload---upload-blob)
|
||||
- `Authentication`: Signed [nostr event](./buds/02.md#upload-authorization-required)
|
||||
- Return a blob descriptor
|
||||
- `HEAD /upload` [BUD-06](./buds/06.md#head-upload---upload-requirements)
|
||||
- `GET /list/<pubkey>` [BUD-02](./buds/02.md#get-listpubkey---list-blobs)
|
||||
- Returns an array of blob descriptors
|
||||
- `Authentication` _(optional)_: Signed [nostr event](./buds/02.md#list-authorization-optional)
|
||||
- `DELETE /<sha256>` [BUD-02](./buds/02.md#delete-sha256---delete-blob)
|
||||
- `Authentication`: Signed [nostr event](./buds/02.md#delete-authorization-required)
|
||||
- `PUT /mirror` [BUD-04](./buds/04.md#put-mirror---mirror-blob)
|
||||
- `Authentication`: Signed [nostr event](./buds/02.md#upload-authorization-required)
|
||||
- `HEAD /media` [BUD-05](./buds/05.md#head-media)
|
||||
- `PUT /media` [BUD-05](./buds/05.md#put-media)
|
||||
- `Authentication`: Signed [nostr event](./buds/05.md#upload-authorization)
|
||||
- `PUT /report` [BUD-09](./buds/09.md)
|
||||
|
||||
## Event kinds
|
||||
|
||||
| kind | description | BUD |
|
||||
| ------- | ------------------- | ------------------ |
|
||||
| `24242` | Authorization event | [01](./buds/01.md) |
|
||||
| `10063` | User Server List | [03](./buds/03.md) |
|
||||
|
||||
## License
|
||||
|
||||
Public domain.
|
||||
19
pkg/protocol/blossom/blossom/buds/00.md
Normal file
19
pkg/protocol/blossom/blossom/buds/00.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# BUD-00
|
||||
|
||||
## Blossom Upgrade Documents
|
||||
|
||||
`draft` `mandatory`
|
||||
|
||||
This document details the common language for all following BUDs
|
||||
|
||||
## Language
|
||||
|
||||
All occurences of "MUST", "MUST NOT", "SHOULD", "SHOULD NOT" MUST be interpreted as per [RFC 2119](https://www.rfc-editor.org/rfc/rfc2119)
|
||||
|
||||
## BUDs
|
||||
|
||||
BUDs or "Blossom Upgrade Documents" are short documents that outline an additional requirement or feature that a blossom server MUST or MAY implement.
|
||||
|
||||
## Blobs
|
||||
|
||||
Blobs are raw binary data addressed by the sha256 hash of the data.
|
||||
162
pkg/protocol/blossom/blossom/buds/01.md
Normal file
162
pkg/protocol/blossom/blossom/buds/01.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# BUD-01
|
||||
|
||||
## Server requirements and blob retrieval
|
||||
|
||||
`draft` `mandatory`
|
||||
|
||||
_All pubkeys MUST be in hex format_
|
||||
|
||||
## Cross origin headers
|
||||
|
||||
Servers MUST set the `Access-Control-Allow-Origin: *` header on all responses to ensure compatibility with applications hosted on other domains.
|
||||
|
||||
For [preflight](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS#preflighted_requests) (`OPTIONS`) requests,
|
||||
servers MUST also set, at minimum, the `Access-Control-Allow-Headers: Authorization, *` and `Access-Control-Allow-Methods: GET, HEAD, PUT,
|
||||
DELETE` headers.
|
||||
|
||||
The header `Access-Control-Max-Age: 86400` MAY be set to cache the results of a preflight request for 24 hours.
|
||||
|
||||
## Error responses
|
||||
|
||||
Every time a server sends an error response (HTTP status codes >=400), it may include a human-readable header `X-Reason` that can be displayed to the user.
|
||||
|
||||
## Authorization events
|
||||
|
||||
Authorization events are used to identify the users to the server
|
||||
|
||||
Authorization events must be generic and must NOT be scoped to specific servers. This allows pubkeys to sign a single event and interact the same way with multiple servers.
|
||||
|
||||
Events MUST be kind `24242` and have a `t` tag with a verb of `get`, `upload`, `list`, or `delete`
|
||||
|
||||
Events MUST have the `content` set to a human readable string explaining to the user what the events intended use is. For example `Upload Blob`, `Delete dog-picture.png`, `List Images`, etc
|
||||
|
||||
All events MUST have a [NIP-40](https://github.com/nostr-protocol/nips/blob/master/40.md) `expiration` tag set to a unix timestamp at which the event should be considered expired.
|
||||
|
||||
Authorization events MAY have multiple `x` tags for endpoints that require a sha256 hash.
|
||||
|
||||
Example event:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"id": "bb653c815da18c089f3124b41c4b5ec072a40b87ca0f50bbbc6ecde9aca442eb",
|
||||
"pubkey": "b53185b9f27962ebdf76b8a9b0a84cd8b27f9f3d4abd59f715788a3bf9e7f75e",
|
||||
"kind": 24242,
|
||||
"content": "Upload bitcoin.pdf",
|
||||
"created_at": 1708773959,
|
||||
"tags": [
|
||||
["t", "upload"],
|
||||
// Authorization events MAY have multiple "x" tags.
|
||||
["x", "b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553"],
|
||||
["expiration", "1708858680"]
|
||||
],
|
||||
"sig": "d0d58c92afb3f4f1925120b99c39cffe77d93e82f488c5f8f482e8f97df75c5357175b5098c338661c37d1074b0a18ab5e75a9df08967bfb200930ec6a76562f"
|
||||
}
|
||||
```
|
||||
|
||||
Servers must perform the following checks in order to validate the event
|
||||
|
||||
1. The `kind` must be `24242`
|
||||
2. `created_at` must be in the past
|
||||
3. The `expiration` tag must be set to a Unix timestamp in the future
|
||||
4. The `t` tag must have a verb matching the intended action of the endpoint
|
||||
5. Additional checks for specific endpoints. `/upload`, `/delete`, etc
|
||||
|
||||
Using the `Authorization` HTTP header, the kind `24242` event MUST be base64 encoded and use the Authorization scheme Nostr
|
||||
|
||||
Example HTTP Authorization header:
|
||||
|
||||
```
|
||||
Authorization: Nostr eyJpZCI6IjhlY2JkY2RkNTMyOTIwMDEwNTUyNGExNDI4NzkxMzg4MWIzOWQxNDA5ZDhiOTBjY2RiNGI0M2Y4ZjBmYzlkMGMiLCJwdWJrZXkiOiI5ZjBjYzE3MDIzYjJjZjUwOWUwZjFkMzA1NzkzZDIwZTdjNzIyNzY5MjhmZDliZjg1NTM2ODg3YWM1NzBhMjgwIiwiY3JlYXRlZF9hdCI6MTcwODc3MTIyNywia2luZCI6MjQyNDIsInRhZ3MiOltbInQiLCJnZXQiXSxbImV4cGlyYXRpb24iLCIxNzA4ODU3NTQwIl1dLCJjb250ZW50IjoiR2V0IEJsb2JzIiwic2lnIjoiMDJmMGQyYWIyM2IwNDQ0NjI4NGIwNzFhOTVjOThjNjE2YjVlOGM3NWFmMDY2N2Y5NmNlMmIzMWM1M2UwN2I0MjFmOGVmYWRhYzZkOTBiYTc1NTFlMzA4NWJhN2M0ZjU2NzRmZWJkMTVlYjQ4NTFjZTM5MGI4MzI4MjJiNDcwZDIifQ==
|
||||
```
|
||||
|
||||
## Endpoints
|
||||
|
||||
All endpoints MUST be served from the root of the domain (eg. the `/upload` endpoint MUST be accessible from `https://cdn.example.com/upload`, etc). This allows clients to talk to servers interchangeably when uploading or retrieving blobs
|
||||
|
||||
## GET /sha256 - Get Blob
|
||||
|
||||
The `GET /<sha256>` endpoint MUST return the contents of the blob in the response body. the `Content-Type` header SHOULD beset to the appropriate MIME-type
|
||||
|
||||
The endpoint MUST accept an optional file extension in the URL. ie. `.pdf`, `.png`, etc
|
||||
|
||||
Regardless of the file extension, the server MUST return the MIME type of the blob in the `Content-Type` header. If the
|
||||
server does not know the MIME type of the blob, it MUST default to `application/octet-stream`
|
||||
|
||||
### Proxying and Redirection (Optional)
|
||||
|
||||
If the endpoint returns a redirection 3xx status code such as 307 or 308 ([RFC 9110 section
|
||||
15.4](https://datatracker.ietf.org/doc/html/rfc9110#name-redirection-3xx)), it MUST redirect to a URL containing the
|
||||
same sha256 hash as the requested blob. This ensures that if a user copies or reuses the redirect URL, it will
|
||||
contain the original sha256 hash.
|
||||
|
||||
While the final blob may not be served from a Blossom server (e.g. CDN, IPFS, object storage, etc.), the destination
|
||||
server MUST set the `Access-Control-Allow-Origin: *` header on the response to allow cross-origin requests, as well as
|
||||
the `Content-Type` and `Content-Length` headers to ensure the blob can be correctly displayed by clients. Two ways to
|
||||
guarantee this are:
|
||||
|
||||
1. Proxying the blob through the Blossom server, allowing it to override headers such as `Content-Type`.
|
||||
2. Manipulating the redirect URL to include a file extension that matches the blob type, such as `.pdf`, `.png`, etc. If
|
||||
the server is unable to determine the MIME type of the blob, it MUST default to `application/octet-stream` and MAY
|
||||
include a file extension in the URL that reflects the blob type (e.g. `.bin`, `.dat`, etc.).
|
||||
|
||||
### Get Authorization (optional)
|
||||
|
||||
The server may optionally require authorization when retrieving blobs from the `GET /<sha256>` endpoint
|
||||
|
||||
In this case, the server MUST perform additional checks on the authorization event
|
||||
|
||||
1. A `t` tag MUST be present and set to `get`
|
||||
2. The event MUST contain either a `server` tag containing the full URL to the server or MUST contain at least one `x` tag matching the sha256 hash of the blob being retrieved
|
||||
|
||||
If the client did not send an `Authorization` header the server must respond with the appropriate HTTP status code `401` (Unauthorized)
|
||||
|
||||
Example event for retrieving a single blob:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "06d4842b9d7f8bf72440471704de4efa9ef8f0348e366d097405573994f66294",
|
||||
"pubkey": "ec0d11351457798907a3900fe465bfdc3b081be6efeb3d68c4d67774c0bc1f9a",
|
||||
"kind": 24242,
|
||||
"content": "Get bitcoin.pdf",
|
||||
"created_at": 1708771927,
|
||||
"tags": [
|
||||
["t", "get"],
|
||||
["expiration", "1708857340"],
|
||||
["x", "b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553"]
|
||||
],
|
||||
"sig": "22ecb5116ba143e4c3d6dc4b53d549aed6970ec455f6d25d145e0ad1fd7c0e26c465b2e92d5fdf699c7050fa43e6a41f087ef167208d4f06425f61548168fd7f"
|
||||
}
|
||||
```
|
||||
|
||||
Example event for retrieving multiple blobs from single server:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "d9484f18533d5e36f000f902a45b15a7eecf5fbfcb046789756d57ea87115dc5",
|
||||
"pubkey": "b5f07faa8d3529f03bd898a23dfb3257bab8d8f5490777c46076ff9647e205dc",
|
||||
"kind": 24242,
|
||||
"content": "Get blobs from example.com",
|
||||
"created_at": 1708771927,
|
||||
"tags": [
|
||||
["t", "get"],
|
||||
["expiration", "1708857340"],
|
||||
["server", "https://cdn.example.com/"]
|
||||
],
|
||||
"sig": "e402ade78e1714d40cd6bd3091bc5f4ada8e904e90301b5a2b9b5f0b6e95ce908d4f22b15e9fb86f8268a2131f8adbb3d1f0e7e7afd1ab0f4f08acb15822a999"
|
||||
}
|
||||
```
|
||||
|
||||
## HEAD /sha256 - Has Blob
|
||||
|
||||
The `HEAD /<sha256>` endpoint SHOULD be identical to the `GET /<sha256>` endpoint except that it MUST NOT return the
|
||||
blob in the reponse body per [RFC 7231](https://www.rfc-editor.org/rfc/rfc7231#section-4.3.2)
|
||||
|
||||
The endpoint MUST respond with the same `Content-Type` and `Content-Length` headers as the `GET /<sha256>` endpoint.
|
||||
|
||||
The endpoint MUST accept an optional file extension in the URL similar to the `GET /<sha256>` endpoint. ie. `.pdf`, `.png`, etc
|
||||
|
||||
## Range requests
|
||||
|
||||
To better support mobile devices, video files, or low bandwidth connections. servers should support range requests ([RFC 7233 section 3](https://www.rfc-editor.org/rfc/rfc7233#section-3)) on the `GET /<sha256>` endpoint and signal support using the `accept-ranges: bytes` and `content-length` headers on the `HEAD /<sha256>` endpoint
|
||||
|
||||
See [MDN docs](https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests) for more details
|
||||
148
pkg/protocol/blossom/blossom/buds/02.md
Normal file
148
pkg/protocol/blossom/blossom/buds/02.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# BUD-02
|
||||
|
||||
## Blob upload and management
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
_All pubkeys MUST be in hex format_
|
||||
|
||||
Defines the `/upload`, `/list` and `DELETE /<sha256>` endpoints
|
||||
|
||||
## Blob Descriptor
|
||||
|
||||
A blob descriptor is a JSON object containing `url`, `sha256`, `size`, `type`, and `uploaded` fields
|
||||
|
||||
- `url` A publicly accessible URL to the [BUD-01](./01.md#get-sha256---get-blob) `GET /<sha256>` endpoint with a file extension
|
||||
- `sha256` The sha256 hash of the blob
|
||||
- `size` The size of the blob in bytes
|
||||
- `type` The MIME type of the blob (falling back to `application/octet-stream` if unknown)
|
||||
- `uploaded` The unix timestamp of when the blob was uploaded to the server
|
||||
|
||||
Servers MUST include a file extension in the URL in the `url` field to allow clients to easily embed the URL in social posts or other content
|
||||
|
||||
Servers MAY include additional fields in the descriptor like `magnet`, `infohash`, or `ipfs` depending on other protocols they support
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://cdn.example.com/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf",
|
||||
"sha256": "b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553",
|
||||
"size": 184292,
|
||||
"type": "application/pdf",
|
||||
"uploaded": 1725105921
|
||||
}
|
||||
```
|
||||
|
||||
## PUT /upload - Upload Blob
|
||||
|
||||
The `PUT /upload` endpoint MUST accept binary data in the body of the request and MAY use the `Content-Type` and `Content-Length` headers to get the MIME type and size of the data
|
||||
|
||||
The endpoint MUST NOT modify the blob in any way and should return the exact same sha256 that was uploaded. This is critical to allow users to re-upload their blobs to new servers
|
||||
|
||||
The endpoint MUST return a [Blob Descriptor](#blob-descriptor) if the upload was successful or an error object if it was not
|
||||
|
||||
Servers MAY reject an upload for any reason and should respond with the appropriate HTTP `4xx` status code and an error
|
||||
message explaining the reason for the rejection
|
||||
|
||||
### File extension normalization (Optional)
|
||||
|
||||
When storing blobs, servers MAY normalise the file extension to a standard format (e.g. `.pdf`, `.png`, etc.) based on
|
||||
the MIME type of the blob. This can be especially useful when the `GET /<sha256>` endpoint is redirected to an external
|
||||
URL (see the [proxying and redirection section from BUD-01](./01.md#proxying-and-redirection-optional)), as external
|
||||
servers may rely on the file extension to serve the blob correctly.
|
||||
|
||||
### Upload Authorization (Optional)
|
||||
|
||||
Servers MAY accept an authorization event when uploading blobs and should perform additional checks
|
||||
|
||||
1. The `t` tag MUST be set to `upload`
|
||||
2. MUST contain at least one `x` tag matching the sha256 hash of the body of the request
|
||||
|
||||
Example Authorization event:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "bb653c815da18c089f3124b41c4b5ec072a40b87ca0f50bbbc6ecde9aca442eb",
|
||||
"pubkey": "b53185b9f27962ebdf76b8a9b0a84cd8b27f9f3d4abd59f715788a3bf9e7f75e",
|
||||
"kind": 24242,
|
||||
"content": "Upload bitcoin.pdf",
|
||||
"created_at": 1708773959,
|
||||
"tags": [
|
||||
["t", "upload"],
|
||||
["x", "b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553"],
|
||||
["expiration", "1708858680"]
|
||||
],
|
||||
"sig": "d0d58c92afb3f4f1925120b99c39cffe77d93e82f488c5f8f482e8f97df75c5357175b5098c338661c37d1074b0a18ab5e75a9df08967bfb200930ec6a76562f"
|
||||
}
|
||||
```
|
||||
|
||||
## GET /list/pubkey - List Blobs (Optional)
|
||||
|
||||
The `/list/<pubkey>` endpoint MUST return a JSON array of [Blob Descriptor](#blob-descriptor) that were uploaded by the specified pubkey
|
||||
|
||||
The endpoint MUST support a `since` and `until` query parameter to limit the returned blobs by their `uploaded` date
|
||||
|
||||
Servers may reject a list for any reason and MUST respond with the appropriate HTTP `4xx` status code and an error message explaining the reason for the rejection
|
||||
|
||||
### List Authorization (optional)
|
||||
|
||||
The server may optionally require Authorization when listing blobs uploaded by the pubkey
|
||||
|
||||
In this case the server must perform additional checks on the authorization event
|
||||
|
||||
1. The `t` tag must be set to `list`
|
||||
|
||||
Example Authorization event:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "cbb1cab9566355bfdf04e1f1fc1e655fe903ecc193e8a750092ee53beec2a0e8",
|
||||
"pubkey": "a5fc3654296e6de3cda6ba3e8eba7224fac8b150fd035d66b4c3c1dc2888b8fc",
|
||||
"kind": 24242,
|
||||
"content": "List Blobs",
|
||||
"created_at": 1708772350,
|
||||
"tags": [
|
||||
["t", "list"],
|
||||
["expiration", "1708858680"]
|
||||
],
|
||||
"sig": "ff9c716f8de0f633738036472be553ce4b58dc71d423a0ef403f95f64ef28582ef82129b41d4d0ef64d2338eb4aeeb66dbc03f8b3a3ed405054ea8ecb14fa36c"
|
||||
}
|
||||
```
|
||||
|
||||
## DELETE /sha256 - Delete Blob
|
||||
|
||||
Servers MUST accept `DELETE` requests to the `/<sha256>` endpoint
|
||||
|
||||
Servers may reject a delete request for any reason and should respond with the appropriate HTTP `4xx` status code and an error message explaining the reason for the rejection
|
||||
|
||||
### Delete Authorization (required)
|
||||
|
||||
Servers MUST accept an authorization event when deleting blobs
|
||||
|
||||
Servers should perform additional checks on the authorization event
|
||||
|
||||
1. The `t` tag must be set to `delete`
|
||||
2. MUST contain at least one `x` tag matching the sha256 hash of the blob being deleted
|
||||
|
||||
When multiple `x` tags are present on the authorization event the server MUST only delete the blob listed in the URL.
|
||||
|
||||
**Multiple `x` tags MUST NOT be interpreted as the user requesting a bulk delete.**
|
||||
|
||||
Example Authorization event:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "a92868bd8ea740706d931f5d205308eaa0e6698e5f8026a990e78ee34ce47fe8",
|
||||
"pubkey": "ae0063dd2c81ec469f2291ac029a19f39268bfc40aea7ab4136d7a858c3a06de",
|
||||
"kind": 24242,
|
||||
"content": "Delete bitcoin.pdf",
|
||||
"created_at": 1708774469,
|
||||
"tags": [
|
||||
["t", "delete"],
|
||||
["x", "b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553"],
|
||||
["expiration", "1708858680"]
|
||||
],
|
||||
"sig": "2ba9af680505583e3eb289a1624a08661a2f6fa2e5566a5ee0036333d517f965e0ffba7f5f7a57c2de37e00a2e85fd7999076468e52bdbcfad8abb76b37a94b0"
|
||||
}
|
||||
```
|
||||
76
pkg/protocol/blossom/blossom/buds/03.md
Normal file
76
pkg/protocol/blossom/blossom/buds/03.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# BUD-03
|
||||
|
||||
## User Server List
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
Defines a replaceable event using `kind:10063` to advertise the blossom servers a user uses to host their blobs.
|
||||
|
||||
The event MUST include at least one `server` tag containing the full server URL including the `http://` or `https://`.
|
||||
|
||||
The order of these tags is important and should be arranged with the users most "reliable" or "trusted" servers being first.
|
||||
|
||||
The `.content` field is not used.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "e4bee088334cb5d38cff1616e964369c37b6081be997962ab289d6c671975d71",
|
||||
"pubkey": "781208004e09102d7da3b7345e64fd193cd1bc3fce8fdae6008d77f9cabcd036",
|
||||
"content": "",
|
||||
"kind": 10063,
|
||||
"created_at": 1708774162,
|
||||
"tags": [
|
||||
["server", "https://cdn.self.hosted"],
|
||||
["server", "https://cdn.satellite.earth"]
|
||||
],
|
||||
"sig": "cc5efa74f59e80622c77cacf4dd62076bcb7581b45e9acff471e7963a1f4d8b3406adab5ee1ac9673487480e57d20e523428e60ffcc7e7a904ac882cfccfc653"
|
||||
}
|
||||
```
|
||||
|
||||
## Client Upload Implementation
|
||||
|
||||
When uploading blobs clients MUST attempt to upload the blob to at least the first `server` listed in the users server list.
|
||||
|
||||
Optionally clients MAY upload the blob to all the servers or mirror the blob to the other servers if they support [BUD-04](./04.md)
|
||||
|
||||
This ensures that the blob is available in multiple locations in the case one of the servers goes offline.
|
||||
|
||||
## Client Retrieval Implementation
|
||||
|
||||
When extracting the SHA256 hash from the URL clients MUST use the last occurrence of a 64 char hex string. This allows clients to extract hashes from blossom URLs and SOME non-blossom URLs.
|
||||
|
||||
In all the following examples, the hash `b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553` should be selected
|
||||
|
||||
- Blossom URLs
|
||||
- `https://blossom.example.com/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf`
|
||||
- `https://cdn.example.com/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553`
|
||||
- Non Blossom URLs
|
||||
- `https://cdn.example.com/user/ec4425ff5e9446080d2f70440188e3ca5d6da8713db7bdeef73d0ed54d9093f0/media/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf`
|
||||
- `https://cdn.example.com/media/user-name/documents/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf`
|
||||
- `http://download.example.com/downloads/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553`
|
||||
- `http://media.example.com/documents/b1/67/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf`
|
||||
|
||||
In the context of nostr events, clients SHOULD use the author's server list when looking for blobs that are no longer available at the original URL.
|
||||
|
||||
Take the following event as an example
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "834185269f4ab72539193105060dbb1c8b2efd702d14481cea345c47beefe6eb",
|
||||
"pubkey": "ec4425ff5e9446080d2f70440188e3ca5d6da8713db7bdeef73d0ed54d9093f0",
|
||||
"content": "I've developed a new open source P2P e-cash system called Bitcoin. check it out\nhttps://cdn.broken-domain.com/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf",
|
||||
"kind": 1,
|
||||
"created_at": 1297484820,
|
||||
"tags": [],
|
||||
"sig": "bd4bb200bdd5f7ffe5dbc3e539052e27b05d6f9f528e255b1bc4261cc16b8f2ad85c89eef990c5f2eee756ef71b4c571ecf6a88ad12f7338e321dd60c6a903b5"
|
||||
}
|
||||
```
|
||||
|
||||
Once the client discovers that the URL `https://cdn.broken-domain.com/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf` is no longer available. It can perform the following steps to find the blob:
|
||||
|
||||
1. Get the SHA256 hash from the URL
|
||||
2. Look for the authors server list `kind:10063`
|
||||
3. If found, Attempt to retrieve the blob from each `server` listed started with the first
|
||||
4. If not found, the client MAY fallback to using a well-known popular blossom server to retrieve the blob
|
||||
|
||||
This ensures clients can quickly find missing blobs using the users list of trusted servers.
|
||||
46
pkg/protocol/blossom/blossom/buds/04.md
Normal file
46
pkg/protocol/blossom/blossom/buds/04.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# BUD-04
|
||||
|
||||
## Mirroring blobs
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
Defines the `/mirror` endpoint
|
||||
|
||||
## PUT /mirror - Mirror Blob
|
||||
|
||||
A server MAY expose a `PUT /mirror` endpoint to allow users to copy a blob from a URL instead of uploading it
|
||||
|
||||
Clients MUST pass the URL of the remote blob as a stringified JSON object in the request body
|
||||
|
||||
```jsonc
|
||||
// request body...
|
||||
{
|
||||
"url": "https://cdn.satellite.earth/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf"
|
||||
}
|
||||
```
|
||||
|
||||
Clients MAY set the `Authorization` header to an upload authorization event defined in [BUD-02](./02.md#upload-authorization-optional). When using authorization, the event MUST be of type "upload".
|
||||
|
||||
The `/mirror` endpoint MUST download the blob from the specified URL and verify that there is at least one `x` tag in the authorization event matching the sha256 hash of the download blob
|
||||
|
||||
**Multiple `x` tags in the authorization event MUST NOT be interpreted as the user requesting to mirror multiple blobs.**
|
||||
|
||||
The endpoint MUST return a [Blob Descriptor](#blob-descriptor) and a `2xx` status code if the mirroring was successful
|
||||
or a `4xx` status code and error message if it was not.
|
||||
|
||||
The destination server SHOULD use the `Content-Type` header returned from the origin server to infer the mime type of
|
||||
the blob. If the `Content-Type` header is not present the destination server SHOULD attempt to detect the `Content-Type`
|
||||
from the blob contents and file extension, falling back to `application/octet-stream` if it cannot determine the type.
|
||||
|
||||
Servers MAY use the `Content-Length` header to determine the size of the blob.
|
||||
|
||||
Servers MAY reject a mirror request for any reason and MUST respond with the appropriate HTTP `4xx` status code and an error message explaining the reason for the rejection.
|
||||
|
||||
## Example Flow
|
||||
|
||||
1. Client signs an `upload` authorization event and uploads blob to Server A
|
||||
1. Server A returns a [Blob Descriptor](./02.md#blob-descriptor) with the `url`
|
||||
1. Client sends the `url` to Server B `/mirror` using the original `upload` authorization event
|
||||
1. Server B downloads the blob from Server A using the `url`
|
||||
1. Server B verifies the downloaded blob hash matches the `x` tag in the authorization event
|
||||
1. Server B returns a [Blob Descriptor](./02.md#blob-descriptor)
|
||||
48
pkg/protocol/blossom/blossom/buds/05.md
Normal file
48
pkg/protocol/blossom/blossom/buds/05.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# BUD-05
|
||||
|
||||
## Media optimization endpoints
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
Defines the `PUT /media` endpoint for processing and optimizing media
|
||||
|
||||
## PUT /media
|
||||
|
||||
The `PUT /media` endpoint MUST accept binary data in the body of the request and MAY use the `Content-Type` and `Content-Length` headers to get the MIME type and size of the media
|
||||
|
||||
The server should preform any optimizations or conversions it deems necessary in order to make the media more suitable for distribution
|
||||
|
||||
The endpoint MUST respond with a `2xx` status and a [blob descriptor](./02.md#blob-descriptor) of the new processed blob
|
||||
|
||||
Servers MAY reject media uploads for any reason and should respond with the appropriate HTTP `4xx` status code and an error message explaining the reason for the rejection
|
||||
|
||||
### Upload Authorization
|
||||
|
||||
Servers MAY require a `media` [authorization event](./02.md#upload-authorization-required) to identify the uploader
|
||||
|
||||
If a server requires a `media` authorization event it MUST perform the following checks
|
||||
|
||||
1. The `t` tag MUST be set to `media`
|
||||
2. MUST contain at least one `x` tag matching the sha256 hash of the body of the request
|
||||
|
||||
## HEAD /media
|
||||
|
||||
Servers MUST respond to `HEAD` requests on the `/media` endpoint in a similar way to the `HEAD /upload` endpoint defined in [BUD-06](./06.md)
|
||||
|
||||
## Limitations
|
||||
|
||||
This endpoint is intentionally limited to optimizing a single blob with the goal of making it easier to distribute
|
||||
|
||||
How the blob is optimized is the sole responsibility of the server and the client should have no say in what optimization process is used
|
||||
|
||||
The goal of this endpoint is to provide a simple "trusted" optimization endpoint clients can use to optimize media for distribution
|
||||
|
||||
If a longer optimization or transformation process is needed, or if the client needs to specify how a blob should be transformed. there are other tools and protocol that should be used.
|
||||
|
||||
## Client Implementation
|
||||
|
||||
Clients MAY let a user selected a "trusted processing" server for uploading images or short videos
|
||||
|
||||
Once a server has been selected, the client uploads the original media to the `/media` endpoint of the trusted server and get the optimized blob back
|
||||
|
||||
Then the client can ask the user to sign another `upload` authorization event for the new optimized blob and call the `/mirror` endpoint on other servers to distribute the blob
|
||||
73
pkg/protocol/blossom/blossom/buds/06.md
Normal file
73
pkg/protocol/blossom/blossom/buds/06.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# BUD-06
|
||||
|
||||
## Upload requirements
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
Defines how clients can verify if the upload can be completed before sending the blob to the server. This mechanism helps prevent unnecessary traffic to other endpoints by rejecting files based on their hash, size, MIME type or other server-specific requirements.
|
||||
|
||||
## HEAD /upload - Upload requirements
|
||||
|
||||
The `HEAD /upload` endpoint MUST use the `X-SHA-256`, `X-Content-Type` and `X-Content-Length` headers sent by client to get the SHA-256 hash, MIME type and size of the blob that will be uploaded, returning a HTTP status code and a custom header `X-Reason` to indicate some human readable message about the upload requirements.
|
||||
|
||||
### Headers
|
||||
|
||||
- `X-SHA-256`: A string that represents the blob's SHA-256 hash.
|
||||
- `X-Content-Length`: An integer that represents the blob size in bytes.
|
||||
- `X-Content-Type`: A string that specifies the blob's MIME type, like `application/pdf` or `image/png`.
|
||||
|
||||
### Upload Authorization
|
||||
|
||||
The `HEAD /upload` endpoint MAY accept an `upload` authorization event using the `Authorization` header similar to what is used in the [`PUT /upload`](./02.md#upload-authorization-required) endpoint
|
||||
|
||||
If the server requires authorization to upload it may respond with the `401` status code, or if authorization was provided and is invalid or not permitted it may respond with `403` status code
|
||||
|
||||
### Examples
|
||||
|
||||
Example request from the client:
|
||||
|
||||
```http
|
||||
X-Content-Type: application/pdf
|
||||
X-Content-Length: 184292
|
||||
X-SHA-256: 88a74d0b866c8ba79251a11fe5ac807839226870e77355f02eaf68b156522576
|
||||
```
|
||||
|
||||
Example response from the server if the upload can be done:
|
||||
|
||||
```http
|
||||
HTTP/1.1 200 OK
|
||||
```
|
||||
|
||||
If the upload cannot proceed, the server MUST return an appropriate `4xx` HTTP status code and a custom header `X-Reason` with a human readable error message.
|
||||
|
||||
Some examples of error messages:
|
||||
|
||||
```http
|
||||
HTTP/1.1 400 Bad Request
|
||||
X-Reason: Invalid X-SHA-256 header format. Expected a string.
|
||||
```
|
||||
|
||||
```http
|
||||
HTTP/1.1 401 Unauthorized
|
||||
X-Reason: Authorization required for uploading video files.
|
||||
```
|
||||
|
||||
```http
|
||||
HTTP/1.1 403 Forbidden
|
||||
X-Reason: SHA-256 hash banned.
|
||||
```
|
||||
|
||||
```http
|
||||
HTTP/1.1 411 Length Required
|
||||
X-Reason: Missing X-Content-Length header.
|
||||
```
|
||||
|
||||
```http
|
||||
HTTP/1.1 413 Content Too Large
|
||||
X-Reason: File too large. Max allowed size is 100MB.
|
||||
```
|
||||
|
||||
```http
|
||||
HTTP/1.1 415 Unsupported Media Type
|
||||
X-Reason: Unsupported file type.
|
||||
```
|
||||
105
pkg/protocol/blossom/blossom/buds/07.md
Normal file
105
pkg/protocol/blossom/blossom/buds/07.md
Normal file
@@ -0,0 +1,105 @@
|
||||
BUD-07
|
||||
======
|
||||
|
||||
Paid upload and download
|
||||
---------------
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
Payment requirements for blob storage.
|
||||
|
||||
## Payment Required
|
||||
|
||||
Some servers MAY require payment for uploads, downloads, or any other endpoint. In such cases, these endpoints MUST return a **402 Payment Required** status code.
|
||||
|
||||
Some endpoints a server may require payment for:
|
||||
|
||||
- [`HEAD /upload`](./06.md) to signal that payment is required for the `PUT` request ( if [BUD-06](./06.md) is supported )
|
||||
- [`PUT /upload`](./02.md#put-upload---upload-blob) to require payment for uploads
|
||||
- [`HEAD /<sha256>`](./01.md#head-sha256---has-blob) to signal that payment is required for the `GET` request
|
||||
- [`GET /<sha256>`](./01.md#get-sha256---get-blob) to require payment for downloads ( maybe charge by MB downloaded? )
|
||||
- [`HEAD /media`](./05.md) and [`PUT /upload`](./05.md) to require payment for media optimizations ( if [BUD-06](./06.md) is supported )
|
||||
|
||||
When payment is required, the server MUST include one or more `X-{payment_method}` header(s), each corresponding to a supported payment method.
|
||||
|
||||
## Server headers
|
||||
|
||||
The 402 status code and `X-{payment_method}` header is used by the server to inform the client that a payment is required for the requested operation. The server MUST provide specific headers for each supported payment method.
|
||||
|
||||
Supported payment methods:
|
||||
|
||||
- `X-Cashu`: Payment details for the cashu payment method, adhering to the [NUT-24](https://github.com/cashubtc/nuts/blob/main/24.md) standard.
|
||||
- `X-Lightning`: Payment details for the lightning payment method, adhering to the [BOLT-11](https://github.com/lightning/bolts/blob/master/11-payment-encoding.md) standard.
|
||||
|
||||
If a server supports multiple payment methods, it MAY send multiple `X-{payment_method}` headers in the same response.
|
||||
|
||||
Schema:
|
||||
|
||||
```http
|
||||
HTTP/1.1 402 Payment Required
|
||||
X-{payment_method}: "<encoded_payload_according_to_{payment_method}_spec>"
|
||||
```
|
||||
|
||||
### `X-Cashu` Header
|
||||
|
||||
When using the X-Cashu header, the server MUST adhere to the [NUT-24](https://github.com/cashubtc/nuts/blob/main/24.md) standard.
|
||||
|
||||
Example for cashu:
|
||||
|
||||
```http
|
||||
HTTP/1.1 402 Payment Required
|
||||
X-Cashu: creqApWF0gaNhdGVub3N0cmFheKlucHJvZmlsZTFxeTI4d3VtbjhnaGo3dW45ZDNzaGp0bnl2OWtoMnVld2Q5aHN6OW1od2RlbjV0ZTB3ZmprY2N0ZTljdXJ4dmVuOWVlaHFjdHJ2NWhzenJ0aHdkZW41dGUwZGVoaHh0bnZkYWtxcWd5ZGFxeTdjdXJrNDM5eWtwdGt5c3Y3dWRoZGh1NjhzdWNtMjk1YWtxZWZkZWhrZjBkNDk1Y3d1bmw1YWeBgmFuYjE3YWloYjdhOTAxNzZhYQphdWNzYXRhbYF4Imh0dHBzOi8vbm9mZWVzLnRlc3RudXQuY2FzaHUuc3BhY2U
|
||||
```
|
||||
|
||||
### `X-Lightning` Header
|
||||
|
||||
When using the X-Lightning header, the server MUST adhere to the [BOLT-11](https://github.com/lightning/bolts/blob/master/11-payment-encoding.md) standard.
|
||||
Example for lightning:
|
||||
|
||||
```http
|
||||
HTTP/1.1 402 Payment Required
|
||||
X-Lightning: lnbc30n1pnnmw3lpp57727jjq8zxctahfavqacymellq56l70f7lwfkmhxfjva6dgul2zqhp5w48l28v60yvythn6qvnpq0lez54422a042yaw4kq8arvd68a6n7qcqzzsxqyz5vqsp5sqezejdfaxx5hge83tf59a50h6gagwah59fjn9mw2d5mn278jkys9qxpqysgqt2q2lhjl9kgfaqz864mhlsspftzdyr642lf3zdt6ljqj6wmathdhtgcn0e6f4ym34jl0qkt6gwnllygvzkhdlpq64c6yv3rta2hyzlqp8k28pz
|
||||
```
|
||||
|
||||
### Client implementation
|
||||
|
||||
Clients MUST parse and validate the `X-{payment_method}` header received from the server. The client SHOULD provide a way for the user to complete the payment and retry the request using the same `X-{payment_method}` header.
|
||||
|
||||
The client MUST provide the payment proof when re-trying the request using the same `X-{payment_method}` header that was chosen. The payment proof MUST align with the payment method specification:
|
||||
|
||||
- For cashu the payment proof should be a serialized `cashuB` token in the `X-Cashu` header according to [NUT-24](https://github.com/cashubtc/nuts/blob/main/24.md#client-payment).
|
||||
- For lightning the payment proof should be the preimage of the payment request according to [BOLT-11](https://github.com/lightning/bolts/blob/master/11-payment-encoding.md).
|
||||
|
||||
Schema:
|
||||
|
||||
```http
|
||||
X-{payment_method}: "<encoded_payment_proof_according_to_{payment_method}_spec>"
|
||||
```
|
||||
|
||||
Example for Cashu:
|
||||
|
||||
```http
|
||||
X-Cashu: cashuBo2F0gqJhaUgA_9SLj17PgGFwgaNhYQFhc3hAYWNjMTI0MzVlN2I4NDg0YzNjZjE4NTAxNDkyMThhZjkwZjcxNmE1MmJmNGE1ZWQzNDdlNDhlY2MxM2Y3NzM4OGFjWCECRFODGd5IXVW
|
||||
```
|
||||
|
||||
Example for Lightning:
|
||||
|
||||
```http
|
||||
X-Lightning: 966fcb8f153339372f9a187f725384ff4ceae0047c25b9ce607488d7c7e93bba
|
||||
```
|
||||
|
||||
**Special Note on HEAD Requests**
|
||||
|
||||
The HEAD endpoints are only used to retrieve blob or server information. They MUST NOT be retried with payment proof. Instead, clients should complete the payment and proceed with the `PUT` or `GET` request.
|
||||
|
||||
### Error handling
|
||||
|
||||
If the client fails to provide the payment proof (expired invoice, invalid token, etc.) the server MUST respond with **400 Bad request** status code and include a `X-Reason` header with a human-readable message. The client SHOULD inform the user about the error and provide a way to retry the request.
|
||||
|
||||
### Extending with Future Payment Methods
|
||||
|
||||
To support future payment methods (e.g., other Layer 2 solutions), the specification allows the addition of new X-{payment_method} headers. Each new method MUST adhere to the following:
|
||||
|
||||
New methods MUST use a unique `X-{payment_method}` header containing the specific payment details.
|
||||
|
||||
New methods MUST adhere their own specification, which MUST be publicly available and linked in the header.
|
||||
35
pkg/protocol/blossom/blossom/buds/08.md
Normal file
35
pkg/protocol/blossom/blossom/buds/08.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# BUD-08
|
||||
|
||||
## Nostr File Metadata Tags
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
Describes how a server could return nostr [NIP-94 File Metadata](https://github.com/nostr-protocol/nips/blob/master/94.md) tags from the `/upload` and `/mirror` endpoints
|
||||
|
||||
### Returning tags
|
||||
|
||||
As described in [BUD-02](./02.md#blob-descriptor) servers MAY add any additional fields to a blob descriptor
|
||||
|
||||
Servers MAY return an additional `nip94` field in the [blob descriptor](./02.md#blob-descriptor) from the `/upload` or `/mirror` endpoints
|
||||
|
||||
The `nip94` field should contain a JSON array with KV pairs as defined in [NIP-94](https://github.com/nostr-protocol/nips/blob/master/94.md)
|
||||
|
||||
An example response would look like:
|
||||
|
||||
```json
|
||||
{
|
||||
"url": "https://cdn.example.com/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf",
|
||||
"sha256": "b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553",
|
||||
"size": 184292,
|
||||
"type": "application/pdf",
|
||||
"uploaded": 1725909682,
|
||||
"nip94": [
|
||||
["url", "https://cdn.example.com/b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553.pdf"],
|
||||
["m", "application/pdf"],
|
||||
["x", "b1674191a88ec5cdd733e4240a81803105dc412d6c6708d53ab94fc248f4f553"],
|
||||
["size", "184292"],
|
||||
["magnet", "magnet:?xt=urn:btih:9804c5286a3fb07b2244c968b39bc3cc814313bc&dn=bitcoin.pdf"],
|
||||
["i", "9804c5286a3fb07b2244c968b39bc3cc814313bc"]
|
||||
]
|
||||
}
|
||||
```
|
||||
40
pkg/protocol/blossom/blossom/buds/09.md
Normal file
40
pkg/protocol/blossom/blossom/buds/09.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# BUD-09
|
||||
|
||||
## Blob Report
|
||||
|
||||
`draft` `optional`
|
||||
|
||||
This bud defines a new endpoint for clients and users to report blobs to servers.
|
||||
|
||||
### PUT /report - reporting a blob
|
||||
|
||||
The request body MUST be a signed [NIP-56](https://github.com/nostr-protocol/nips/blob/master/56.md) report event with one or more `x` tags containing the hashes of the blobs being reported.
|
||||
|
||||
Example:
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"kind": 1984,
|
||||
"tags": [
|
||||
["x", "<blob-sha256>", "<type-based-on-nip-56>"],
|
||||
["x", "<another-blob-sha256>", "<type-based-on-nip-56>"]
|
||||
],
|
||||
"content": "<human readable report details>",
|
||||
// other fields...
|
||||
}
|
||||
```
|
||||
|
||||
The clients can include `e` or `p` tags to point to the event or the profile that contains this media if they want to make this report event useful for relays as well.
|
||||
|
||||
Server MUST respond to a report request with a success code or a code in the 4xx/5xx range if there was any error.
|
||||
|
||||
### Client behavior
|
||||
|
||||
The clients can show a blob report button on posts or in blob details. Or its RECOMMENDED to merge this with normal nostr report and send it to both relays and blossom server. other clients can receive it from relays and hide or blur reported blob from trusted friends.
|
||||
|
||||
### Server behavior
|
||||
|
||||
The servers MAY keep the reports somewhere for operators to check and take action on them. they MAY use a list of trusted people or moderators to directly take action on blob without operator request.
|
||||
|
||||
Servers MAY consider removed blobs sha256 as blocked to prevent rewrite.
|
||||
Servers SHOULD advertise a route or landing page to provide their rules and terms of service which affects the report process.
|
||||
Reference in New Issue
Block a user