Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
feae79af1a
|
|||
|
ebef8605eb
|
|||
|
c5db0abf73
|
|||
|
016e97925a
|
|||
|
042b47a4d9
|
116
CLAUDE.md
116
CLAUDE.md
@@ -8,11 +8,12 @@ ORLY is a high-performance Nostr relay written in Go, designed for personal rela
|
||||
|
||||
**Key Technologies:**
|
||||
- **Language**: Go 1.25.3+
|
||||
- **Database**: Badger v4 (embedded key-value store) or DGraph (distributed graph database)
|
||||
- **Database**: Badger v4 (embedded), DGraph (distributed graph), or Neo4j (social graph)
|
||||
- **Cryptography**: Custom p8k library using purego for secp256k1 operations (no CGO)
|
||||
- **Web UI**: Svelte frontend embedded in the binary
|
||||
- **WebSocket**: gorilla/websocket for Nostr protocol
|
||||
- **Performance**: SIMD-accelerated SHA256 and hex encoding, query result caching with zstd compression
|
||||
- **Social Graph**: Neo4j backend with Web of Trust (WoT) extensions for trust metrics
|
||||
|
||||
## Build Commands
|
||||
|
||||
@@ -139,9 +140,16 @@ export ORLY_SPROCKET_ENABLED=true
|
||||
# Enable policy system
|
||||
export ORLY_POLICY_ENABLED=true
|
||||
|
||||
# Database backend selection (badger or dgraph)
|
||||
# Database backend selection (badger, dgraph, or neo4j)
|
||||
export ORLY_DB_TYPE=badger
|
||||
export ORLY_DGRAPH_URL=localhost:9080 # Only for dgraph backend
|
||||
|
||||
# DGraph configuration (only when ORLY_DB_TYPE=dgraph)
|
||||
export ORLY_DGRAPH_URL=localhost:9080
|
||||
|
||||
# Neo4j configuration (only when ORLY_DB_TYPE=neo4j)
|
||||
export ORLY_NEO4J_URI=bolt://localhost:7687
|
||||
export ORLY_NEO4J_USER=neo4j
|
||||
export ORLY_NEO4J_PASSWORD=password
|
||||
|
||||
# Query cache configuration (improves REQ response times)
|
||||
export ORLY_QUERY_CACHE_SIZE_MB=512 # Default: 512MB
|
||||
@@ -150,6 +158,20 @@ export ORLY_QUERY_CACHE_MAX_AGE=5m # Cache expiry time
|
||||
# Database cache tuning (for Badger backend)
|
||||
export ORLY_DB_BLOCK_CACHE_MB=512 # Block cache size
|
||||
export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
export ORLY_INLINE_EVENT_THRESHOLD=1024 # Inline storage threshold (bytes)
|
||||
|
||||
# Directory Spider (metadata sync from other relays)
|
||||
export ORLY_DIRECTORY_SPIDER=true # Enable directory spider
|
||||
export ORLY_DIRECTORY_SPIDER_INTERVAL=24h # How often to run
|
||||
export ORLY_DIRECTORY_SPIDER_HOPS=3 # Max hops for relay discovery
|
||||
|
||||
# NIP-43 Relay Access Metadata
|
||||
export ORLY_NIP43_ENABLED=true # Enable invite system
|
||||
export ORLY_NIP43_INVITE_EXPIRY=24h # Invite code validity
|
||||
|
||||
# Authentication modes
|
||||
export ORLY_AUTH_REQUIRED=false # Require auth for all requests
|
||||
export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
```
|
||||
|
||||
## Code Architecture
|
||||
@@ -177,7 +199,7 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
|
||||
**`pkg/database/`** - Database abstraction layer with multiple backend support
|
||||
- `interface.go` - Database interface definition for pluggable backends
|
||||
- `factory.go` - Database backend selection (Badger or DGraph)
|
||||
- `factory.go` - Database backend selection (Badger, DGraph, or Neo4j)
|
||||
- `database.go` - Badger implementation with cache tuning and query cache
|
||||
- `save-event.go` - Event storage with index updates
|
||||
- `query-events.go` - Main query execution engine with filter normalization
|
||||
@@ -188,6 +210,15 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- `identity.go` - Relay identity key management
|
||||
- `migrations.go` - Database schema migration runner
|
||||
|
||||
**`pkg/neo4j/`** - Neo4j graph database backend with social graph support
|
||||
- `neo4j.go` - Main database implementation
|
||||
- `schema.go` - Graph schema and index definitions (includes WoT extensions)
|
||||
- `query-events.go` - REQ filter to Cypher translation
|
||||
- `save-event.go` - Event storage with relationship creation
|
||||
- `social-event-processor.go` - Processes kinds 0, 3, 1984, 10000 for social graph
|
||||
- `WOT_SPEC.md` - Web of Trust data model specification (NostrUser nodes, trust metrics)
|
||||
- `MODIFYING_SCHEMA.md` - Guide for schema modifications
|
||||
|
||||
**`pkg/protocol/`** - Nostr protocol implementation
|
||||
- `ws/` - WebSocket message framing and parsing
|
||||
- `auth/` - NIP-42 authentication challenge/response
|
||||
@@ -223,6 +254,9 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
**`pkg/policy/`** - Event filtering and validation policies
|
||||
- Policy configuration loaded from `~/.config/ORLY/policy.json`
|
||||
- Per-kind size limits, age restrictions, custom scripts
|
||||
- **Write-Only Validation**: Size, age, tag, and expiry validations apply ONLY to write operations
|
||||
- **Read-Only Filtering**: `read_allow`, `read_deny`, `privileged` apply ONLY to read operations
|
||||
- See `docs/POLICY_CONFIGURATION_REFERENCE.md` for authoritative read vs write applicability
|
||||
- **Dynamic Policy Hot Reload via Kind 12345 Events:**
|
||||
- Policy admins can update policy configuration without relay restart
|
||||
- Kind 12345 events contain JSON policy in content field
|
||||
@@ -231,12 +265,16 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- Policy admin follow lists (kind 3) trigger immediate cache refresh
|
||||
- `WriteAllowFollows` rule grants both read+write access to admin follows
|
||||
- Tag validation supports regex patterns per tag type
|
||||
- **New Policy Rule Fields:**
|
||||
- **Policy Rule Fields:**
|
||||
- `max_expiry_duration`: ISO-8601 duration format (e.g., "P7D", "PT1H30M") for event expiry limits
|
||||
- `protected_required`: Requires NIP-70 protected events (must have "-" tag)
|
||||
- `identifier_regex`: Regex pattern for validating "d" tag identifiers
|
||||
- `follows_whitelist_admins`: Per-rule admin pubkeys whose follows are whitelisted
|
||||
- `write_allow` / `write_deny`: Pubkey whitelist/blacklist for writing (write-only)
|
||||
- `read_allow` / `read_deny`: Pubkey whitelist/blacklist for reading (read-only)
|
||||
- `privileged`: Party-involved access control (read-only)
|
||||
- See `docs/POLICY_USAGE_GUIDE.md` for configuration examples
|
||||
- See `pkg/policy/README.md` for quick reference
|
||||
|
||||
**`pkg/sync/`** - Distributed synchronization
|
||||
- `cluster_manager.go` - Active replication between relay peers
|
||||
@@ -246,6 +284,12 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
**`pkg/spider/`** - Event syncing from other relays
|
||||
- `spider.go` - Spider manager for "follows" mode
|
||||
- Fetches events from admin relays for followed pubkeys
|
||||
- **Directory Spider** (`directory.go`):
|
||||
- Discovers relays by crawling kind 10002 (relay list) events
|
||||
- Expands outward from seed pubkeys (whitelisted users) via hop distance
|
||||
- Fetches metadata events (kinds 0, 3, 10000, 10002) from discovered relays
|
||||
- Self-detection prevents querying own relay
|
||||
- Configurable interval and max hops via `ORLY_DIRECTORY_SPIDER_*` env vars
|
||||
|
||||
**`pkg/utils/`** - Shared utilities
|
||||
- `atomic/` - Extended atomic operations
|
||||
@@ -279,6 +323,11 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
- Supports multiple backends via `ORLY_DB_TYPE` environment variable
|
||||
- **Badger** (default): Embedded key-value store with custom indexing, ideal for single-instance deployments
|
||||
- **DGraph**: Distributed graph database for larger, multi-node deployments
|
||||
- **Neo4j**: Graph database with social graph and Web of Trust (WoT) extensions
|
||||
- Processes kinds 0 (profile), 3 (contacts), 1984 (reports), 10000 (mute list) for social graph
|
||||
- NostrUser nodes with trust metrics (influence, PageRank)
|
||||
- FOLLOWS, MUTES, REPORTS relationships for WoT analysis
|
||||
- See `pkg/neo4j/WOT_SPEC.md` for full schema specification
|
||||
- Backend selected via factory pattern in `pkg/database/factory.go`
|
||||
- All backends implement the same `Database` interface defined in `pkg/database/interface.go`
|
||||
|
||||
@@ -299,9 +348,15 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
|
||||
**Configuration System:**
|
||||
- Uses `go-simpler.org/env` for struct tags
|
||||
- All config in `app/config/config.go` with `ORLY_` prefix
|
||||
- **IMPORTANT: ALL environment variables MUST be defined in `app/config/config.go`**
|
||||
- Never use `os.Getenv()` directly in packages - always pass config via structs
|
||||
- This ensures all config options appear in `./orly help` output
|
||||
- Database backends receive config via `database.DatabaseConfig` struct
|
||||
- Use `GetDatabaseConfigValues()` helper to extract DB config from app config
|
||||
- All config fields use `ORLY_` prefix with struct tags defining defaults and usage
|
||||
- Supports XDG directories via `github.com/adrg/xdg`
|
||||
- Default data directory: `~/.local/share/ORLY`
|
||||
- Database-specific config (Neo4j, DGraph, Badger) is passed via `DatabaseConfig` struct in `pkg/database/factory.go`
|
||||
|
||||
**Event Publishing:**
|
||||
- `pkg/protocol/publish/` manages publisher registry
|
||||
@@ -524,3 +579,52 @@ Files modified:
|
||||
```
|
||||
3. GitHub Actions workflow builds binaries for multiple platforms
|
||||
4. Release created automatically with binaries and checksums
|
||||
|
||||
## Recent Features (v0.31.x)
|
||||
|
||||
### Directory Spider
|
||||
The directory spider (`pkg/spider/directory.go`) automatically discovers and syncs metadata from other relays:
|
||||
- Crawls kind 10002 (relay list) events to discover relays
|
||||
- Expands outward from seed pubkeys (whitelisted users) via configurable hop distance
|
||||
- Fetches essential metadata events (kinds 0, 3, 10000, 10002)
|
||||
- Self-detection prevents querying own relay
|
||||
- Enable with `ORLY_DIRECTORY_SPIDER=true`
|
||||
|
||||
### Neo4j Social Graph Backend
|
||||
The Neo4j backend (`pkg/neo4j/`) includes Web of Trust (WoT) extensions:
|
||||
- **Social Event Processor**: Handles kinds 0, 3, 1984, 10000 for social graph management
|
||||
- **NostrUser nodes**: Store profile data and trust metrics (influence, PageRank)
|
||||
- **Relationships**: FOLLOWS, MUTES, REPORTS for social graph analysis
|
||||
- **WoT Schema**: See `pkg/neo4j/WOT_SPEC.md` for full specification
|
||||
- **Schema Modifications**: See `pkg/neo4j/MODIFYING_SCHEMA.md` for how to update
|
||||
|
||||
### Policy System Enhancements
|
||||
- **Write-Only Validation**: Size, age, tag validations apply ONLY to writes
|
||||
- **Read-Only Filtering**: `read_allow`, `read_deny`, `privileged` apply ONLY to reads
|
||||
- **Scripts**: Policy scripts execute ONLY for write operations
|
||||
- **Reference Documentation**: `docs/POLICY_CONFIGURATION_REFERENCE.md` provides authoritative read vs write applicability
|
||||
- See also: `pkg/policy/README.md` for quick reference
|
||||
|
||||
### Authentication Modes
|
||||
- `ORLY_AUTH_REQUIRED=true`: Require authentication for ALL requests
|
||||
- `ORLY_AUTH_TO_WRITE=true`: Require authentication only for writes (allow anonymous reads)
|
||||
|
||||
### NIP-43 Relay Access Metadata
|
||||
Invite-based access control system:
|
||||
- `ORLY_NIP43_ENABLED=true`: Enable invite system
|
||||
- Publishes kind 8000/8001 events for member changes
|
||||
- Publishes kind 13534 membership list events
|
||||
- Configurable invite expiry via `ORLY_NIP43_INVITE_EXPIRY`
|
||||
|
||||
## Documentation Index
|
||||
|
||||
| Document | Purpose |
|
||||
|----------|---------|
|
||||
| `docs/POLICY_CONFIGURATION_REFERENCE.md` | Authoritative policy config reference with read/write applicability |
|
||||
| `docs/POLICY_USAGE_GUIDE.md` | Comprehensive policy system user guide |
|
||||
| `pkg/policy/README.md` | Policy system quick reference |
|
||||
| `pkg/neo4j/README.md` | Neo4j backend overview |
|
||||
| `pkg/neo4j/WOT_SPEC.md` | Web of Trust schema specification |
|
||||
| `pkg/neo4j/MODIFYING_SCHEMA.md` | How to modify Neo4j schema |
|
||||
| `pkg/neo4j/TESTING.md` | Neo4j testing guide |
|
||||
| `readme.adoc` | Project README with feature overview |
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
// Package config provides a go-simpler.org/env configuration table and helpers
|
||||
// for working with the list of key/value lists stored in .env files.
|
||||
//
|
||||
// IMPORTANT: This file is the SINGLE SOURCE OF TRUTH for all environment variables.
|
||||
// All configuration options MUST be defined here with proper `env` struct tags.
|
||||
// Never use os.Getenv() directly in other packages - pass configuration via structs.
|
||||
// This ensures all options appear in `./orly help` output and are documented.
|
||||
//
|
||||
// For database backends, use GetDatabaseConfigValues() to extract database-specific
|
||||
// settings, then construct a database.DatabaseConfig in the caller (e.g., main.go).
|
||||
package config
|
||||
|
||||
import (
|
||||
@@ -82,11 +90,19 @@ type C struct {
|
||||
NIP43InviteExpiry time.Duration `env:"ORLY_NIP43_INVITE_EXPIRY" default:"24h" usage:"how long invite codes remain valid"`
|
||||
|
||||
// Database configuration
|
||||
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger or dgraph"`
|
||||
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger, dgraph, or neo4j"`
|
||||
DgraphURL string `env:"ORLY_DGRAPH_URL" default:"localhost:9080" usage:"dgraph gRPC endpoint address (only used when ORLY_DB_TYPE=dgraph)"`
|
||||
QueryCacheSizeMB int `env:"ORLY_QUERY_CACHE_SIZE_MB" default:"512" usage:"query cache size in MB (caches database query results for faster REQ responses)"`
|
||||
QueryCacheMaxAge string `env:"ORLY_QUERY_CACHE_MAX_AGE" default:"5m" usage:"maximum age for cached query results (e.g., 5m, 10m, 1h)"`
|
||||
|
||||
// Neo4j configuration (only used when ORLY_DB_TYPE=neo4j)
|
||||
Neo4jURI string `env:"ORLY_NEO4J_URI" default:"bolt://localhost:7687" usage:"Neo4j bolt URI (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
Neo4jUser string `env:"ORLY_NEO4J_USER" default:"neo4j" usage:"Neo4j authentication username (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
Neo4jPassword string `env:"ORLY_NEO4J_PASSWORD" default:"password" usage:"Neo4j authentication password (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
|
||||
// Advanced database tuning
|
||||
InlineEventThreshold int `env:"ORLY_INLINE_EVENT_THRESHOLD" default:"1024" usage:"size threshold in bytes for inline event storage in Badger (0 to disable, typical values: 384-1024)"`
|
||||
|
||||
// TLS configuration
|
||||
TLSDomains []string `env:"ORLY_TLS_DOMAINS" usage:"comma-separated list of domains to respond to for TLS"`
|
||||
Certs []string `env:"ORLY_CERTS" usage:"comma-separated list of paths to certificate root names (e.g., /path/to/cert will load /path/to/cert.pem and /path/to/cert.key)"`
|
||||
@@ -369,3 +385,28 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
PrintEnv(cfg, printer)
|
||||
fmt.Fprintln(printer)
|
||||
}
|
||||
|
||||
// GetDatabaseConfigValues returns the database configuration values as individual fields.
|
||||
// This avoids circular imports with pkg/database while allowing main.go to construct
|
||||
// a database.DatabaseConfig with the correct type.
|
||||
func (cfg *C) GetDatabaseConfigValues() (
|
||||
dataDir, logLevel string,
|
||||
blockCacheMB, indexCacheMB, queryCacheSizeMB int,
|
||||
queryCacheMaxAge time.Duration,
|
||||
inlineEventThreshold int,
|
||||
dgraphURL, neo4jURI, neo4jUser, neo4jPassword string,
|
||||
) {
|
||||
// Parse query cache max age from string to duration
|
||||
queryCacheMaxAge = 5 * time.Minute // Default
|
||||
if cfg.QueryCacheMaxAge != "" {
|
||||
if duration, err := time.ParseDuration(cfg.QueryCacheMaxAge); err == nil {
|
||||
queryCacheMaxAge = duration
|
||||
}
|
||||
}
|
||||
|
||||
return cfg.DataDir, cfg.DBLogLevel,
|
||||
cfg.DBBlockCacheMB, cfg.DBIndexCacheMB, cfg.QueryCacheSizeMB,
|
||||
queryCacheMaxAge,
|
||||
cfg.InlineEventThreshold,
|
||||
cfg.DgraphURL, cfg.Neo4jURI, cfg.Neo4jUser, cfg.Neo4jPassword
|
||||
}
|
||||
|
||||
@@ -177,6 +177,10 @@ LIMIT $limit
|
||||
|
||||
## Configuration
|
||||
|
||||
All configuration is centralized in `app/config/config.go` and visible via `./orly help`.
|
||||
|
||||
> **Important:** All environment variables must be defined in `app/config/config.go`. Do not use `os.Getenv()` directly in package code. Database backends receive configuration via the `database.DatabaseConfig` struct.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
|
||||
615
docs/POLICY_CONFIGURATION_REFERENCE.md
Normal file
615
docs/POLICY_CONFIGURATION_REFERENCE.md
Normal file
@@ -0,0 +1,615 @@
|
||||
# ORLY Policy Configuration Reference
|
||||
|
||||
This document provides a definitive reference for all policy configuration options and when each rule applies. Use this as the authoritative source for understanding policy behavior.
|
||||
|
||||
## Quick Reference: Read vs Write Applicability
|
||||
|
||||
| Rule Field | Write (EVENT) | Read (REQ) | Notes |
|
||||
|------------|:-------------:|:----------:|-------|
|
||||
| `size_limit` | ✅ | ❌ | Validates incoming events only |
|
||||
| `content_limit` | ✅ | ❌ | Validates incoming events only |
|
||||
| `max_age_of_event` | ✅ | ❌ | Prevents replay attacks |
|
||||
| `max_age_event_in_future` | ✅ | ❌ | Prevents future-dated events |
|
||||
| `max_expiry_duration` | ✅ | ❌ | Requires expiration tag |
|
||||
| `must_have_tags` | ✅ | ❌ | Validates required tags |
|
||||
| `protected_required` | ✅ | ❌ | Requires NIP-70 "-" tag |
|
||||
| `identifier_regex` | ✅ | ❌ | Validates "d" tag format |
|
||||
| `tag_validation` | ✅ | ❌ | Validates tag values with regex |
|
||||
| `write_allow` | ✅ | ❌ | Pubkey whitelist for writing |
|
||||
| `write_deny` | ✅ | ❌ | Pubkey blacklist for writing |
|
||||
| `read_allow` | ❌ | ✅ | Pubkey whitelist for reading |
|
||||
| `read_deny` | ❌ | ✅ | Pubkey blacklist for reading |
|
||||
| `privileged` | ❌ | ✅ | Party-involved access control |
|
||||
| `write_allow_follows` | ✅ | ✅ | Grants **both** read AND write |
|
||||
| `follows_whitelist_admins` | ✅ | ✅ | Grants **both** read AND write |
|
||||
| `script` | ✅ | ❌ | Scripts only run for writes |
|
||||
|
||||
---
|
||||
|
||||
## Core Principle: Validation vs Filtering
|
||||
|
||||
The policy system has two distinct modes of operation:
|
||||
|
||||
### Write Operations (EVENT messages)
|
||||
- **Purpose**: Validate and accept/reject incoming events
|
||||
- **All rules apply** except `read_allow`, `read_deny`, and `privileged`
|
||||
- Events are checked **before storage**
|
||||
- Rejected events are never stored
|
||||
|
||||
### Read Operations (REQ messages)
|
||||
- **Purpose**: Filter which stored events a user can retrieve
|
||||
- **Only access control rules apply**: `read_allow`, `read_deny`, `privileged`, `write_allow_follows`, `follows_whitelist_admins`
|
||||
- Validation rules (size, age, tags) do NOT apply
|
||||
- Scripts are NOT executed for reads
|
||||
- Filtering happens **after database query**
|
||||
|
||||
---
|
||||
|
||||
## Configuration Structure
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow|deny",
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7],
|
||||
"blacklist": [4, 42]
|
||||
},
|
||||
"owners": ["hex_pubkey_64_chars"],
|
||||
"policy_admins": ["hex_pubkey_64_chars"],
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"global": { /* Rule object */ },
|
||||
"rules": {
|
||||
"1": { /* Rule object for kind 1 */ },
|
||||
"30023": { /* Rule object for kind 30023 */ }
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Top-Level Configuration Fields
|
||||
|
||||
### `default_policy`
|
||||
**Type**: `string`
|
||||
**Values**: `"allow"` (default) or `"deny"`
|
||||
**Applies to**: Both read and write
|
||||
|
||||
The fallback behavior when no specific rule makes a decision.
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny"
|
||||
}
|
||||
```
|
||||
|
||||
### `kind.whitelist` and `kind.blacklist`
|
||||
**Type**: `[]int`
|
||||
**Applies to**: Both read and write
|
||||
|
||||
Controls which event kinds are processed at all.
|
||||
|
||||
- **Whitelist** takes precedence: If present, ONLY whitelisted kinds are allowed
|
||||
- **Blacklist**: If no whitelist, these kinds are denied
|
||||
- **Neither**: Behavior depends on `default_policy` and whether rules exist
|
||||
|
||||
```json
|
||||
{
|
||||
"kind": {
|
||||
"whitelist": [0, 1, 3, 7, 30023]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `owners`
|
||||
**Type**: `[]string` (64-character hex pubkeys)
|
||||
**Applies to**: Policy administration
|
||||
|
||||
Relay owners with full control. Merged with `ORLY_OWNERS` environment variable.
|
||||
|
||||
```json
|
||||
{
|
||||
"owners": ["4a93c5ac0c6f49d2c7e7a5b8d9e0f1a2b3c4d5e6f7a8b9c0d1e2f3a4b5c6d7e8"]
|
||||
}
|
||||
```
|
||||
|
||||
### `policy_admins`
|
||||
**Type**: `[]string` (64-character hex pubkeys)
|
||||
**Applies to**: Policy administration
|
||||
|
||||
Pubkeys that can update policy via kind 12345 events (with restrictions).
|
||||
|
||||
### `policy_follow_whitelist_enabled`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Both read and write (when `write_allow_follows` is true)
|
||||
|
||||
When enabled, allows `write_allow_follows` rules to grant access to policy admin follows.
|
||||
|
||||
---
|
||||
|
||||
## Rule Object Fields
|
||||
|
||||
Rules can be defined in `global` (applies to all events) or `rules[kind]` (applies to specific kind).
|
||||
|
||||
### Access Control Fields
|
||||
|
||||
#### `write_allow`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Write only
|
||||
**Behavior**: Exclusive whitelist
|
||||
|
||||
When present with entries, ONLY these pubkeys can write events of this kind. All others are denied.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_allow": ["pubkey1_hex", "pubkey2_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Special case**: Empty array `[]` explicitly allows all writers.
|
||||
|
||||
#### `write_deny`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Write only
|
||||
**Behavior**: Blacklist (highest priority)
|
||||
|
||||
These pubkeys cannot write events of this kind. **Checked before allow lists.**
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_deny": ["banned_pubkey_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `read_allow`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Read only
|
||||
**Behavior**: Exclusive whitelist (with OR logic for privileged)
|
||||
|
||||
When present with entries:
|
||||
- If `privileged: false`: ONLY these pubkeys can read
|
||||
- If `privileged: true`: These pubkeys OR parties involved can read
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"read_allow": ["trusted_pubkey_hex"],
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `read_deny`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Read only
|
||||
**Behavior**: Blacklist (highest priority)
|
||||
|
||||
These pubkeys cannot read events of this kind. **Checked before allow lists.**
|
||||
|
||||
#### `privileged`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Read only
|
||||
|
||||
When `true`, events are only readable by "parties involved":
|
||||
- The event author (`event.pubkey`)
|
||||
- Users mentioned in `p` tags
|
||||
|
||||
**Interaction with `read_allow`**:
|
||||
- `read_allow` present + `privileged: true` = OR logic (in list OR party involved)
|
||||
- `read_allow` empty + `privileged: true` = Only parties involved
|
||||
- `privileged: true` alone = Only parties involved
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"description": "DMs - only sender and recipient can read",
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `write_allow_follows`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Both read AND write
|
||||
**Requires**: `policy_follow_whitelist_enabled: true` at top level
|
||||
|
||||
Grants **both read and write access** to pubkeys followed by policy admins.
|
||||
|
||||
> **Important**: Despite the name, this grants BOTH read and write access.
|
||||
|
||||
```json
|
||||
{
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"rules": {
|
||||
"1": {
|
||||
"write_allow_follows": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `follows_whitelist_admins`
|
||||
**Type**: `[]string` (hex pubkeys)
|
||||
**Applies to**: Both read AND write
|
||||
|
||||
Alternative to `write_allow_follows` that specifies which admin pubkeys' follows are whitelisted for this specific rule.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"follows_whitelist_admins": ["curator_pubkey_hex"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Validation Fields (Write-Only)
|
||||
|
||||
These fields validate incoming events and are **completely ignored for read operations**.
|
||||
|
||||
#### `size_limit`
|
||||
**Type**: `int64` (bytes)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum total serialized event size.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"size_limit": 100000
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `content_limit`
|
||||
**Type**: `int64` (bytes)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum size of the `content` field.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"content_limit": 10000
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_age_of_event`
|
||||
**Type**: `int64` (seconds)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum age of events. Events with `created_at` older than `now - max_age_of_event` are rejected.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"max_age_of_event": 86400
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_age_event_in_future`
|
||||
**Type**: `int64` (seconds)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum time events can be dated in the future. Events with `created_at` later than `now + max_age_event_in_future` are rejected.
|
||||
|
||||
```json
|
||||
{
|
||||
"global": {
|
||||
"max_age_event_in_future": 300
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `max_expiry_duration`
|
||||
**Type**: `string` (ISO-8601 duration)
|
||||
**Applies to**: Write only
|
||||
|
||||
Maximum allowed expiry time from event creation. Events **must** have an `expiration` tag when this is set.
|
||||
|
||||
**Format**: `P[n]Y[n]M[n]W[n]DT[n]H[n]M[n]S`
|
||||
|
||||
**Examples**:
|
||||
- `P7D` = 7 days
|
||||
- `PT1H` = 1 hour
|
||||
- `P1DT12H` = 1 day 12 hours
|
||||
- `PT30M` = 30 minutes
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"20": {
|
||||
"description": "Ephemeral events must expire within 24 hours",
|
||||
"max_expiry_duration": "P1D"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `must_have_tags`
|
||||
**Type**: `[]string` (tag names)
|
||||
**Applies to**: Write only
|
||||
|
||||
Required tags that must be present on the event.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"must_have_tags": ["p", "e"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `protected_required`
|
||||
**Type**: `boolean`
|
||||
**Default**: `false`
|
||||
**Applies to**: Write only
|
||||
|
||||
Requires events to have a `-` tag (NIP-70 protected events).
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"protected_required": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `identifier_regex`
|
||||
**Type**: `string` (regex pattern)
|
||||
**Applies to**: Write only
|
||||
|
||||
Regex pattern that `d` tag values must match. Events **must** have a `d` tag when this is set.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"identifier_regex": "^[a-z0-9-]{1,64}$"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### `tag_validation`
|
||||
**Type**: `map[string]string` (tag name → regex pattern)
|
||||
**Applies to**: Write only
|
||||
|
||||
Regex patterns for validating specific tag values. Only validates tags that are **present** on the event.
|
||||
|
||||
> **Note**: To require a tag to exist, use `must_have_tags`. `tag_validation` only validates format.
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"tag_validation": {
|
||||
"t": "^[a-z0-9-]{1,32}$",
|
||||
"d": "^[a-z0-9-]+$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Script Configuration
|
||||
|
||||
#### `script`
|
||||
**Type**: `string` (file path)
|
||||
**Applies to**: Write only
|
||||
|
||||
Path to a custom validation script. **Scripts are NOT executed for read operations.**
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"1": {
|
||||
"script": "/etc/orly/scripts/spam-filter.py"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Policy Evaluation Order
|
||||
|
||||
### For Write Operations
|
||||
|
||||
```
|
||||
1. Global Rule Check (all fields apply)
|
||||
├─ Universal constraints (size, tags, age, etc.)
|
||||
├─ write_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
└─ write_allow check
|
||||
|
||||
2. Kind Filtering (whitelist/blacklist)
|
||||
|
||||
3. Kind-Specific Rule Check (same as global)
|
||||
├─ Universal constraints
|
||||
├─ write_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ write_allow check
|
||||
└─ Script execution (if configured)
|
||||
|
||||
4. Default Policy (if no rules matched)
|
||||
```
|
||||
|
||||
### For Read Operations
|
||||
|
||||
```
|
||||
1. Global Rule Check (access control only)
|
||||
├─ read_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ read_allow check
|
||||
└─ privileged check (party involved)
|
||||
|
||||
2. Kind Filtering (whitelist/blacklist)
|
||||
|
||||
3. Kind-Specific Rule Check (access control only)
|
||||
├─ read_deny check
|
||||
├─ write_allow_follows / follows_whitelist_admins check
|
||||
├─ read_allow + privileged (OR logic)
|
||||
└─ privileged-only check
|
||||
|
||||
4. Default Policy (if no rules matched)
|
||||
|
||||
NOTE: Scripts are NOT executed for read operations
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Configuration Patterns
|
||||
|
||||
### Private Relay (Whitelist Only)
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny",
|
||||
"global": {
|
||||
"write_allow": ["trusted_pubkey_1", "trusted_pubkey_2"],
|
||||
"read_allow": ["trusted_pubkey_1", "trusted_pubkey_2"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Open Relay with Spam Protection
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow",
|
||||
"global": {
|
||||
"size_limit": 100000,
|
||||
"max_age_of_event": 86400,
|
||||
"max_age_event_in_future": 300
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"script": "/etc/orly/scripts/spam-filter.sh"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Community Relay (Follows-Based)
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "deny",
|
||||
"policy_admins": ["community_admin_pubkey"],
|
||||
"policy_follow_whitelist_enabled": true,
|
||||
"global": {
|
||||
"write_allow_follows": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Encrypted DMs (Privileged Access)
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"4": {
|
||||
"description": "Encrypted DMs - only sender/recipient",
|
||||
"privileged": true,
|
||||
"protected_required": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Long-Form Content with Validation
|
||||
|
||||
```json
|
||||
{
|
||||
"rules": {
|
||||
"30023": {
|
||||
"description": "Long-form articles",
|
||||
"size_limit": 100000,
|
||||
"content_limit": 50000,
|
||||
"max_expiry_duration": "P30D",
|
||||
"identifier_regex": "^[a-z0-9-]{1,64}$",
|
||||
"tag_validation": {
|
||||
"t": "^[a-z0-9-]{1,32}$"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Important Behaviors
|
||||
|
||||
### Whitelist vs Blacklist Precedence
|
||||
|
||||
1. **Deny lists** (`write_deny`, `read_deny`) are checked **first** and have highest priority
|
||||
2. **Allow lists** are exclusive when populated - ONLY listed pubkeys are allowed
|
||||
3. **Deny-only configuration**: If only deny list exists (no allow list), all non-denied pubkeys are allowed
|
||||
|
||||
### Empty Arrays vs Null
|
||||
|
||||
- `[]` (empty array explicitly set) = Allow all
|
||||
- `null` or field omitted = No list configured, use other rules
|
||||
|
||||
### Global Rules Are Additive
|
||||
|
||||
Global rules are always evaluated **in addition to** kind-specific rules. They cannot be overridden at the kind level.
|
||||
|
||||
### Implicit Kind Whitelist
|
||||
|
||||
When rules are defined but no explicit `kind.whitelist`:
|
||||
- If `default_policy: "allow"`: All kinds allowed
|
||||
- If `default_policy: "deny"` or unset: Only kinds with rules allowed
|
||||
|
||||
---
|
||||
|
||||
## Debugging Policy Issues
|
||||
|
||||
Enable debug logging to see policy decisions:
|
||||
|
||||
```bash
|
||||
export ORLY_LOG_LEVEL=debug
|
||||
```
|
||||
|
||||
Log messages include:
|
||||
- Policy evaluation steps
|
||||
- Rule matching
|
||||
- Access decisions with reasons
|
||||
|
||||
---
|
||||
|
||||
## Source Code Reference
|
||||
|
||||
- Policy struct definition: `pkg/policy/policy.go:75-144` (Rule struct)
|
||||
- Policy struct definition: `pkg/policy/policy.go:380-412` (P struct)
|
||||
- Check evaluation: `pkg/policy/policy.go:1260-1595` (checkRulePolicy)
|
||||
- Write handler: `app/handle-event.go:114-138`
|
||||
- Read handler: `app/handle-req.go:420-438`
|
||||
33
main.go
33
main.go
@@ -42,8 +42,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var db database.Database
|
||||
if db, err = database.NewDatabase(
|
||||
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
|
||||
if db, err = database.NewDatabaseWithConfig(
|
||||
ctx, cancel, cfg.DBType, makeDatabaseConfig(cfg),
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -318,8 +318,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var db database.Database
|
||||
log.I.F("initializing %s database at %s", cfg.DBType, cfg.DataDir)
|
||||
if db, err = database.NewDatabase(
|
||||
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
|
||||
if db, err = database.NewDatabaseWithConfig(
|
||||
ctx, cancel, cfg.DBType, makeDatabaseConfig(cfg),
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -430,3 +430,28 @@ func main() {
|
||||
}
|
||||
// log.I.F("exiting")
|
||||
}
|
||||
|
||||
// makeDatabaseConfig creates a database.DatabaseConfig from the app config.
|
||||
// This helper function extracts all database-specific configuration values
|
||||
// and constructs the appropriate struct for the database package.
|
||||
func makeDatabaseConfig(cfg *config.C) *database.DatabaseConfig {
|
||||
dataDir, logLevel,
|
||||
blockCacheMB, indexCacheMB, queryCacheSizeMB,
|
||||
queryCacheMaxAge,
|
||||
inlineEventThreshold,
|
||||
dgraphURL, neo4jURI, neo4jUser, neo4jPassword := cfg.GetDatabaseConfigValues()
|
||||
|
||||
return &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
BlockCacheMB: blockCacheMB,
|
||||
IndexCacheMB: indexCacheMB,
|
||||
QueryCacheSizeMB: queryCacheSizeMB,
|
||||
QueryCacheMaxAge: queryCacheMaxAge,
|
||||
InlineEventThreshold: inlineEventThreshold,
|
||||
DgraphURL: dgraphURL,
|
||||
Neo4jURI: neo4jURI,
|
||||
Neo4jUser: neo4jUser,
|
||||
Neo4jPassword: neo4jPassword,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
@@ -21,10 +20,11 @@ import (
|
||||
|
||||
// D implements the Database interface using Badger as the storage backend
|
||||
type D struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
inlineEventThreshold int // Configurable threshold for inline event storage
|
||||
*badger.DB
|
||||
seq *badger.Sequence
|
||||
pubkeySeq *badger.Sequence // Sequence for pubkey serials
|
||||
@@ -35,63 +35,85 @@ type D struct {
|
||||
// Ensure D implements Database interface at compile time
|
||||
var _ Database = (*D)(nil)
|
||||
|
||||
// New creates a new Badger database instance with default configuration.
|
||||
// This is provided for backward compatibility with existing callers.
|
||||
// For full configuration control, use NewWithConfig instead.
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Initialize query cache with configurable size (default 512MB)
|
||||
queryCacheSize := int64(512 * 1024 * 1024) // 512 MB
|
||||
if v := os.Getenv("ORLY_QUERY_CACHE_SIZE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
queryCacheSize = int64(n * 1024 * 1024)
|
||||
}
|
||||
// Create a default config for backward compatibility
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
BlockCacheMB: 1024, // Default 1024 MB
|
||||
IndexCacheMB: 512, // Default 512 MB
|
||||
QueryCacheSizeMB: 512, // Default 512 MB
|
||||
QueryCacheMaxAge: 5 * time.Minute, // Default 5 minutes
|
||||
InlineEventThreshold: 1024, // Default 1024 bytes
|
||||
}
|
||||
queryCacheMaxAge := 5 * time.Minute // Default 5 minutes
|
||||
if v := os.Getenv("ORLY_QUERY_CACHE_MAX_AGE"); v != "" {
|
||||
if duration, perr := time.ParseDuration(v); perr == nil {
|
||||
queryCacheMaxAge = duration
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// NewWithConfig creates a new Badger database instance with full configuration.
|
||||
// This is the preferred method when you have access to DatabaseConfig.
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *DatabaseConfig,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Apply defaults for zero values (backward compatibility)
|
||||
blockCacheMB := cfg.BlockCacheMB
|
||||
if blockCacheMB == 0 {
|
||||
blockCacheMB = 1024 // Default 1024 MB
|
||||
}
|
||||
indexCacheMB := cfg.IndexCacheMB
|
||||
if indexCacheMB == 0 {
|
||||
indexCacheMB = 512 // Default 512 MB
|
||||
}
|
||||
queryCacheSizeMB := cfg.QueryCacheSizeMB
|
||||
if queryCacheSizeMB == 0 {
|
||||
queryCacheSizeMB = 512 // Default 512 MB
|
||||
}
|
||||
queryCacheMaxAge := cfg.QueryCacheMaxAge
|
||||
if queryCacheMaxAge == 0 {
|
||||
queryCacheMaxAge = 5 * time.Minute // Default 5 minutes
|
||||
}
|
||||
inlineEventThreshold := cfg.InlineEventThreshold
|
||||
if inlineEventThreshold == 0 {
|
||||
inlineEventThreshold = 1024 // Default 1024 bytes
|
||||
}
|
||||
|
||||
queryCacheSize := int64(queryCacheSizeMB * 1024 * 1024)
|
||||
|
||||
d = &D{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
DB: nil,
|
||||
seq: nil,
|
||||
ready: make(chan struct{}),
|
||||
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
inlineEventThreshold: inlineEventThreshold,
|
||||
DB: nil,
|
||||
seq: nil,
|
||||
ready: make(chan struct{}),
|
||||
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
if err = os.MkdirAll(cfg.DataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Also ensure the directory exists using apputil.EnsureDir for any
|
||||
// potential subdirectories
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
dummyFile := filepath.Join(cfg.DataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := badger.DefaultOptions(d.dataDir)
|
||||
// Configure caches based on environment to better match workload.
|
||||
// Configure caches based on config to better match workload.
|
||||
// Defaults aim for higher hit ratios under read-heavy workloads while remaining safe.
|
||||
var blockCacheMB = 1024 // default 512 MB
|
||||
var indexCacheMB = 512 // default 256 MB
|
||||
if v := os.Getenv("ORLY_DB_BLOCK_CACHE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
blockCacheMB = n
|
||||
}
|
||||
}
|
||||
if v := os.Getenv("ORLY_DB_INDEX_CACHE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
indexCacheMB = n
|
||||
}
|
||||
}
|
||||
opts.BlockCacheSize = int64(blockCacheMB * units.Mb)
|
||||
opts.IndexCacheSize = int64(indexCacheMB * units.Mb)
|
||||
opts.BlockSize = 4 * units.Kb // 4 KB block size
|
||||
|
||||
@@ -4,8 +4,33 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DatabaseConfig holds all database configuration options that can be passed
|
||||
// to any database backend. Each backend uses the relevant fields for its type.
|
||||
// This centralizes configuration instead of having each backend read env vars directly.
|
||||
type DatabaseConfig struct {
|
||||
// Common settings for all backends
|
||||
DataDir string
|
||||
LogLevel string
|
||||
|
||||
// Badger-specific settings
|
||||
BlockCacheMB int // ORLY_DB_BLOCK_CACHE_MB
|
||||
IndexCacheMB int // ORLY_DB_INDEX_CACHE_MB
|
||||
QueryCacheSizeMB int // ORLY_QUERY_CACHE_SIZE_MB
|
||||
QueryCacheMaxAge time.Duration // ORLY_QUERY_CACHE_MAX_AGE
|
||||
InlineEventThreshold int // ORLY_INLINE_EVENT_THRESHOLD
|
||||
|
||||
// DGraph-specific settings
|
||||
DgraphURL string // ORLY_DGRAPH_URL
|
||||
|
||||
// Neo4j-specific settings
|
||||
Neo4jURI string // ORLY_NEO4J_URI
|
||||
Neo4jUser string // ORLY_NEO4J_USER
|
||||
Neo4jPassword string // ORLY_NEO4J_PASSWORD
|
||||
}
|
||||
|
||||
// NewDatabase creates a database instance based on the specified type.
|
||||
// Supported types: "badger", "dgraph", "neo4j"
|
||||
func NewDatabase(
|
||||
@@ -14,19 +39,39 @@ func NewDatabase(
|
||||
dbType string,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
) (Database, error) {
|
||||
// Create a default config for backward compatibility with existing callers
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewDatabaseWithConfig(ctx, cancel, dbType, cfg)
|
||||
}
|
||||
|
||||
// NewDatabaseWithConfig creates a database instance with full configuration.
|
||||
// This is the preferred method when you have access to the app config.
|
||||
func NewDatabaseWithConfig(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
cfg *DatabaseConfig,
|
||||
) (Database, error) {
|
||||
switch strings.ToLower(dbType) {
|
||||
case "badger", "":
|
||||
// Use the existing badger implementation
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
case "dgraph":
|
||||
// Use the new dgraph implementation
|
||||
// Import dynamically to avoid import cycles
|
||||
return newDgraphDatabase(ctx, cancel, dataDir, logLevel)
|
||||
// Use the dgraph implementation
|
||||
if newDgraphDatabase == nil {
|
||||
return nil, fmt.Errorf("dgraph database backend not available (import _ \"next.orly.dev/pkg/dgraph\")")
|
||||
}
|
||||
return newDgraphDatabase(ctx, cancel, cfg)
|
||||
case "neo4j":
|
||||
// Use the new neo4j implementation
|
||||
// Import dynamically to avoid import cycles
|
||||
return newNeo4jDatabase(ctx, cancel, dataDir, logLevel)
|
||||
// Use the neo4j implementation
|
||||
if newNeo4jDatabase == nil {
|
||||
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
|
||||
}
|
||||
return newNeo4jDatabase(ctx, cancel, cfg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j)", dbType)
|
||||
}
|
||||
@@ -34,20 +79,20 @@ func NewDatabase(
|
||||
|
||||
// newDgraphDatabase creates a dgraph database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newDgraphDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
|
||||
var newDgraphDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterDgraphFactory registers the dgraph database factory
|
||||
// This is called from the dgraph package's init() function
|
||||
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
|
||||
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newDgraphDatabase = factory
|
||||
}
|
||||
|
||||
// newNeo4jDatabase creates a neo4j database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterNeo4jFactory registers the neo4j database factory
|
||||
// This is called from the neo4j package's init() function
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newNeo4jDatabase = factory
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
@@ -270,14 +268,9 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (
|
||||
eventData := eventDataBuf.Bytes()
|
||||
|
||||
// Determine storage strategy (Reiser4 optimizations)
|
||||
// Get threshold from environment, default to 0 (disabled)
|
||||
// When enabled, typical values: 384 (conservative), 512 (recommended), 1024 (aggressive)
|
||||
smallEventThreshold := 1024
|
||||
if v := os.Getenv("ORLY_INLINE_EVENT_THRESHOLD"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n >= 0 {
|
||||
smallEventThreshold = n
|
||||
}
|
||||
}
|
||||
// Use the threshold from database configuration
|
||||
// Typical values: 384 (conservative), 512 (recommended), 1024 (aggressive)
|
||||
smallEventThreshold := d.inlineEventThreshold
|
||||
isSmallEvent := smallEventThreshold > 0 && len(eventData) <= smallEventThreshold
|
||||
isReplaceableEvent := kind.IsReplaceable(ev.Kind)
|
||||
isAddressableEvent := kind.IsParameterizedReplaceable(ev.Kind)
|
||||
|
||||
@@ -48,30 +48,21 @@ func init() {
|
||||
database.RegisterDgraphFactory(func(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
cfg *database.DatabaseConfig,
|
||||
) (database.Database, error) {
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// Config holds configuration options for the Dgraph database
|
||||
type Config struct {
|
||||
DataDir string
|
||||
LogLevel string
|
||||
DgraphURL string // Dgraph gRPC endpoint (e.g., "localhost:9080")
|
||||
EnableGraphQL bool
|
||||
EnableIntrospection bool
|
||||
}
|
||||
|
||||
// New creates a new Dgraph-based database instance
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
// NewWithConfig creates a new Dgraph-based database instance with full configuration.
|
||||
// Configuration is passed from the centralized app config via DatabaseConfig.
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *database.DatabaseConfig,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Get dgraph URL from environment, default to localhost
|
||||
dgraphURL := os.Getenv("ORLY_DGRAPH_URL")
|
||||
// Apply defaults for empty values
|
||||
dgraphURL := cfg.DgraphURL
|
||||
if dgraphURL == "" {
|
||||
dgraphURL = "localhost:9080"
|
||||
}
|
||||
@@ -79,8 +70,8 @@ func New(
|
||||
d = &D{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
dgraphURL: dgraphURL,
|
||||
enableGraphQL: false,
|
||||
enableIntrospection: false,
|
||||
@@ -88,12 +79,12 @@ func New(
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
if err = os.MkdirAll(cfg.DataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure directory structure
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
dummyFile := filepath.Join(cfg.DataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -128,6 +119,21 @@ func New(
|
||||
return
|
||||
}
|
||||
|
||||
// New creates a new Dgraph-based database instance with default configuration.
|
||||
// This is provided for backward compatibility with existing callers (tests, etc.).
|
||||
// For full configuration control, use NewWithConfig instead.
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
cfg := &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// initDgraphClient establishes connection to dgraph server
|
||||
func (d *D) initDgraphClient() error {
|
||||
d.Logger.Infof("connecting to dgraph at %s", d.dgraphURL)
|
||||
|
||||
@@ -15,6 +15,8 @@ docker run -d --name neo4j \
|
||||
|
||||
### 2. Configure Environment
|
||||
|
||||
All Neo4j configuration is defined in `app/config/config.go` and visible via `./orly help`:
|
||||
|
||||
```bash
|
||||
export ORLY_DB_TYPE=neo4j
|
||||
export ORLY_NEO4J_URI=bolt://localhost:7687
|
||||
@@ -22,6 +24,8 @@ export ORLY_NEO4J_USER=neo4j
|
||||
export ORLY_NEO4J_PASSWORD=password
|
||||
```
|
||||
|
||||
> **Note:** Configuration is centralized in `app/config/config.go`. Do not use `os.Getenv()` directly in package code - all environment variables should be passed via the `database.DatabaseConfig` struct.
|
||||
|
||||
### 3. Run ORLY
|
||||
|
||||
```bash
|
||||
|
||||
@@ -70,38 +70,29 @@ func init() {
|
||||
database.RegisterNeo4jFactory(func(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
cfg *database.DatabaseConfig,
|
||||
) (database.Database, error) {
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// Config holds configuration options for the Neo4j database
|
||||
type Config struct {
|
||||
DataDir string
|
||||
LogLevel string
|
||||
Neo4jURI string // Neo4j bolt URI (e.g., "bolt://localhost:7687")
|
||||
Neo4jUser string // Authentication username
|
||||
Neo4jPassword string // Authentication password
|
||||
}
|
||||
|
||||
// New creates a new Neo4j-based database instance
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
// NewWithConfig creates a new Neo4j-based database instance with full configuration.
|
||||
// Configuration is passed from the centralized app config via DatabaseConfig.
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *database.DatabaseConfig,
|
||||
) (
|
||||
n *N, err error,
|
||||
) {
|
||||
// Get Neo4j connection details from environment
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
// Apply defaults for empty values
|
||||
neo4jURI := cfg.Neo4jURI
|
||||
if neo4jURI == "" {
|
||||
neo4jURI = "bolt://localhost:7687"
|
||||
}
|
||||
neo4jUser := os.Getenv("ORLY_NEO4J_USER")
|
||||
neo4jUser := cfg.Neo4jUser
|
||||
if neo4jUser == "" {
|
||||
neo4jUser = "neo4j"
|
||||
}
|
||||
neo4jPassword := os.Getenv("ORLY_NEO4J_PASSWORD")
|
||||
neo4jPassword := cfg.Neo4jPassword
|
||||
if neo4jPassword == "" {
|
||||
neo4jPassword = "password"
|
||||
}
|
||||
@@ -109,8 +100,8 @@ func New(
|
||||
n = &N{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
neo4jURI: neo4jURI,
|
||||
neo4jUser: neo4jUser,
|
||||
neo4jPassword: neo4jPassword,
|
||||
@@ -118,12 +109,12 @@ func New(
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
if err = os.MkdirAll(cfg.DataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure directory structure
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
dummyFile := filepath.Join(cfg.DataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -158,6 +149,21 @@ func New(
|
||||
return
|
||||
}
|
||||
|
||||
// New creates a new Neo4j-based database instance with default configuration.
|
||||
// This is provided for backward compatibility with existing callers (tests, etc.).
|
||||
// For full configuration control, use NewWithConfig instead.
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
n *N, err error,
|
||||
) {
|
||||
cfg := &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// initNeo4jClient establishes connection to Neo4j server
|
||||
func (n *N) initNeo4jClient() error {
|
||||
n.Logger.Infof("connecting to neo4j at %s", n.neo4jURI)
|
||||
|
||||
@@ -134,6 +134,10 @@ CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
eTagIndex := 0
|
||||
pTagIndex := 0
|
||||
|
||||
// Track if we need to add WITH clause before OPTIONAL MATCH
|
||||
// This is required because Cypher doesn't allow MATCH after CREATE without WITH
|
||||
needsWithClause := true
|
||||
|
||||
// Only process tags if they exist
|
||||
if ev.Tags != nil {
|
||||
for _, tagItem := range *ev.Tags {
|
||||
@@ -150,6 +154,15 @@ CREATE (e)-[:AUTHORED_BY]->(a)
|
||||
paramName := fmt.Sprintf("eTag_%d", eTagIndex)
|
||||
params[paramName] = tagValue
|
||||
|
||||
// Add WITH clause before first OPTIONAL MATCH to transition from CREATE to MATCH
|
||||
if needsWithClause {
|
||||
cypher += `
|
||||
// Carry forward event and author nodes for tag processing
|
||||
WITH e, a
|
||||
`
|
||||
needsWithClause = false
|
||||
}
|
||||
|
||||
cypher += fmt.Sprintf(`
|
||||
// Reference to event (e-tag)
|
||||
OPTIONAL MATCH (ref%d:Event {id: $%s})
|
||||
|
||||
@@ -113,11 +113,11 @@ func TestMaxExpiryDuration(t *testing.T) {
|
||||
expectAllow: true,
|
||||
},
|
||||
{
|
||||
name: "valid expiry at exact limit",
|
||||
name: "expiry at exact limit rejected",
|
||||
maxExpiryDuration: "PT1H",
|
||||
eventExpiry: 3600, // exactly 1 hour
|
||||
eventExpiry: 3600, // exactly 1 hour - >= means this is rejected
|
||||
hasExpiryTag: true,
|
||||
expectAllow: true,
|
||||
expectAllow: false,
|
||||
},
|
||||
{
|
||||
name: "expiry exceeds limit",
|
||||
@@ -235,6 +235,79 @@ func TestMaxExpiryDurationPrecedence(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test that max_expiry_duration only applies to writes, not reads
|
||||
func TestMaxExpiryDurationWriteOnly(t *testing.T) {
|
||||
signer, pubkey := generateTestKeypair(t)
|
||||
|
||||
// Policy with strict max_expiry_duration
|
||||
policyJSON := []byte(`{
|
||||
"default_policy": "allow",
|
||||
"rules": {
|
||||
"4": {
|
||||
"description": "DM events with expiry",
|
||||
"max_expiry_duration": "PT10M",
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
policy, err := New(policyJSON)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create policy: %v", err)
|
||||
}
|
||||
|
||||
// Create event WITHOUT an expiry tag - this would fail write validation
|
||||
// but should still be readable
|
||||
ev := createTestEventForNewFields(t, signer, "test DM", 4)
|
||||
if err := ev.Sign(signer); chk.E(err) {
|
||||
t.Fatalf("Failed to sign: %v", err)
|
||||
}
|
||||
|
||||
// Write should fail (no expiry tag when max_expiry_duration is set)
|
||||
allowed, err := policy.CheckPolicy("write", ev, pubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPolicy write error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Write should be denied for event without expiry tag when max_expiry_duration is set")
|
||||
}
|
||||
|
||||
// Read should succeed (validation constraints don't apply to reads)
|
||||
allowed, err = policy.CheckPolicy("read", ev, pubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPolicy read error: %v", err)
|
||||
}
|
||||
if !allowed {
|
||||
t.Error("Read should be allowed - max_expiry_duration is write-only validation")
|
||||
}
|
||||
|
||||
// Also test with an event that has expiry exceeding the limit
|
||||
ev2 := createTestEventForNewFields(t, signer, "test DM 2", 4)
|
||||
expiryTs := ev2.CreatedAt + 7200 // 2 hours - exceeds 10 minute limit
|
||||
addTagString(ev2, "expiration", int64ToString(expiryTs))
|
||||
if err := ev2.Sign(signer); chk.E(err) {
|
||||
t.Fatalf("Failed to sign: %v", err)
|
||||
}
|
||||
|
||||
// Write should fail (expiry exceeds limit)
|
||||
allowed, err = policy.CheckPolicy("write", ev2, pubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPolicy write error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Write should be denied for event with expiry exceeding max_expiry_duration")
|
||||
}
|
||||
|
||||
// Read should still succeed
|
||||
allowed, err = policy.CheckPolicy("read", ev2, pubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPolicy read error: %v", err)
|
||||
}
|
||||
if !allowed {
|
||||
t.Error("Read should be allowed - max_expiry_duration is write-only validation")
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ProtectedRequired Tests
|
||||
// =============================================================================
|
||||
|
||||
@@ -1280,7 +1280,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check required tags
|
||||
if len(rule.MustHaveTags) > 0 {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && len(rule.MustHaveTags) > 0 {
|
||||
for _, requiredTag := range rule.MustHaveTags {
|
||||
if ev.Tags.GetFirst([]byte(requiredTag)) == nil {
|
||||
return false, nil
|
||||
@@ -1289,7 +1290,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check expiry time (uses maxExpirySeconds which is parsed from MaxExpiryDuration or MaxExpiry)
|
||||
if rule.maxExpirySeconds != nil && *rule.maxExpirySeconds > 0 {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.maxExpirySeconds != nil && *rule.maxExpirySeconds > 0 {
|
||||
expiryTag := ev.Tags.GetFirst([]byte("expiration"))
|
||||
if expiryTag == nil {
|
||||
return false, nil // Must have expiry if max_expiry is set
|
||||
@@ -1302,7 +1304,7 @@ func (p *P) checkRulePolicy(
|
||||
return false, nil // Invalid expiry format
|
||||
}
|
||||
maxAllowedExpiry := ev.CreatedAt + *rule.maxExpirySeconds
|
||||
if expiryTs > maxAllowedExpiry {
|
||||
if expiryTs >= maxAllowedExpiry {
|
||||
log.D.F("expiration %d exceeds max allowed %d (created_at %d + max_expiry %d)",
|
||||
expiryTs, maxAllowedExpiry, ev.CreatedAt, *rule.maxExpirySeconds)
|
||||
return false, nil // Expiry too far in the future
|
||||
@@ -1310,7 +1312,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check ProtectedRequired (NIP-70: events must have "-" tag)
|
||||
if rule.ProtectedRequired {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.ProtectedRequired {
|
||||
protectedTag := ev.Tags.GetFirst([]byte("-"))
|
||||
if protectedTag == nil {
|
||||
log.D.F("protected_required: event missing '-' tag (NIP-70)")
|
||||
@@ -1319,7 +1322,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check IdentifierRegex (validates "d" tag values)
|
||||
if rule.identifierRegexCache != nil {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.identifierRegexCache != nil {
|
||||
dTags := ev.Tags.GetAll([]byte("d"))
|
||||
if len(dTags) == 0 {
|
||||
log.D.F("identifier_regex: event missing 'd' tag")
|
||||
@@ -1336,7 +1340,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check MaxAgeOfEvent (maximum age of event in seconds)
|
||||
if rule.MaxAgeOfEvent != nil && *rule.MaxAgeOfEvent > 0 {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.MaxAgeOfEvent != nil && *rule.MaxAgeOfEvent > 0 {
|
||||
currentTime := time.Now().Unix()
|
||||
maxAllowedTime := currentTime - *rule.MaxAgeOfEvent
|
||||
if ev.CreatedAt < maxAllowedTime {
|
||||
@@ -1345,7 +1350,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check MaxAgeEventInFuture (maximum time event can be in the future in seconds)
|
||||
if rule.MaxAgeEventInFuture != nil && *rule.MaxAgeEventInFuture > 0 {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.MaxAgeEventInFuture != nil && *rule.MaxAgeEventInFuture > 0 {
|
||||
currentTime := time.Now().Unix()
|
||||
maxFutureTime := currentTime + *rule.MaxAgeEventInFuture
|
||||
if ev.CreatedAt > maxFutureTime {
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.31.6
|
||||
v0.31.9
|
||||
Reference in New Issue
Block a user