Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
016e97925a
|
|||
|
042b47a4d9
|
|||
|
952ce0285b
|
|||
|
45856f39b4
|
|||
|
70944d45df
|
20
CLAUDE.md
20
CLAUDE.md
@@ -139,9 +139,16 @@ export ORLY_SPROCKET_ENABLED=true
|
||||
# Enable policy system
|
||||
export ORLY_POLICY_ENABLED=true
|
||||
|
||||
# Database backend selection (badger or dgraph)
|
||||
# Database backend selection (badger, dgraph, or neo4j)
|
||||
export ORLY_DB_TYPE=badger
|
||||
export ORLY_DGRAPH_URL=localhost:9080 # Only for dgraph backend
|
||||
|
||||
# DGraph configuration (only when ORLY_DB_TYPE=dgraph)
|
||||
export ORLY_DGRAPH_URL=localhost:9080
|
||||
|
||||
# Neo4j configuration (only when ORLY_DB_TYPE=neo4j)
|
||||
export ORLY_NEO4J_URI=bolt://localhost:7687
|
||||
export ORLY_NEO4J_USER=neo4j
|
||||
export ORLY_NEO4J_PASSWORD=password
|
||||
|
||||
# Query cache configuration (improves REQ response times)
|
||||
export ORLY_QUERY_CACHE_SIZE_MB=512 # Default: 512MB
|
||||
@@ -150,6 +157,7 @@ export ORLY_QUERY_CACHE_MAX_AGE=5m # Cache expiry time
|
||||
# Database cache tuning (for Badger backend)
|
||||
export ORLY_DB_BLOCK_CACHE_MB=512 # Block cache size
|
||||
export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
export ORLY_INLINE_EVENT_THRESHOLD=1024 # Inline storage threshold (bytes)
|
||||
```
|
||||
|
||||
## Code Architecture
|
||||
@@ -299,9 +307,15 @@ export ORLY_DB_INDEX_CACHE_MB=256 # Index cache size
|
||||
|
||||
**Configuration System:**
|
||||
- Uses `go-simpler.org/env` for struct tags
|
||||
- All config in `app/config/config.go` with `ORLY_` prefix
|
||||
- **IMPORTANT: ALL environment variables MUST be defined in `app/config/config.go`**
|
||||
- Never use `os.Getenv()` directly in packages - always pass config via structs
|
||||
- This ensures all config options appear in `./orly help` output
|
||||
- Database backends receive config via `database.DatabaseConfig` struct
|
||||
- Use `GetDatabaseConfigValues()` helper to extract DB config from app config
|
||||
- All config fields use `ORLY_` prefix with struct tags defining defaults and usage
|
||||
- Supports XDG directories via `github.com/adrg/xdg`
|
||||
- Default data directory: `~/.local/share/ORLY`
|
||||
- Database-specific config (Neo4j, DGraph, Badger) is passed via `DatabaseConfig` struct in `pkg/database/factory.go`
|
||||
|
||||
**Event Publishing:**
|
||||
- `pkg/protocol/publish/` manages publisher registry
|
||||
|
||||
@@ -1,5 +1,13 @@
|
||||
// Package config provides a go-simpler.org/env configuration table and helpers
|
||||
// for working with the list of key/value lists stored in .env files.
|
||||
//
|
||||
// IMPORTANT: This file is the SINGLE SOURCE OF TRUTH for all environment variables.
|
||||
// All configuration options MUST be defined here with proper `env` struct tags.
|
||||
// Never use os.Getenv() directly in other packages - pass configuration via structs.
|
||||
// This ensures all options appear in `./orly help` output and are documented.
|
||||
//
|
||||
// For database backends, use GetDatabaseConfigValues() to extract database-specific
|
||||
// settings, then construct a database.DatabaseConfig in the caller (e.g., main.go).
|
||||
package config
|
||||
|
||||
import (
|
||||
@@ -82,11 +90,19 @@ type C struct {
|
||||
NIP43InviteExpiry time.Duration `env:"ORLY_NIP43_INVITE_EXPIRY" default:"24h" usage:"how long invite codes remain valid"`
|
||||
|
||||
// Database configuration
|
||||
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger or dgraph"`
|
||||
DBType string `env:"ORLY_DB_TYPE" default:"badger" usage:"database backend to use: badger, dgraph, or neo4j"`
|
||||
DgraphURL string `env:"ORLY_DGRAPH_URL" default:"localhost:9080" usage:"dgraph gRPC endpoint address (only used when ORLY_DB_TYPE=dgraph)"`
|
||||
QueryCacheSizeMB int `env:"ORLY_QUERY_CACHE_SIZE_MB" default:"512" usage:"query cache size in MB (caches database query results for faster REQ responses)"`
|
||||
QueryCacheMaxAge string `env:"ORLY_QUERY_CACHE_MAX_AGE" default:"5m" usage:"maximum age for cached query results (e.g., 5m, 10m, 1h)"`
|
||||
|
||||
// Neo4j configuration (only used when ORLY_DB_TYPE=neo4j)
|
||||
Neo4jURI string `env:"ORLY_NEO4J_URI" default:"bolt://localhost:7687" usage:"Neo4j bolt URI (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
Neo4jUser string `env:"ORLY_NEO4J_USER" default:"neo4j" usage:"Neo4j authentication username (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
Neo4jPassword string `env:"ORLY_NEO4J_PASSWORD" default:"password" usage:"Neo4j authentication password (only used when ORLY_DB_TYPE=neo4j)"`
|
||||
|
||||
// Advanced database tuning
|
||||
InlineEventThreshold int `env:"ORLY_INLINE_EVENT_THRESHOLD" default:"1024" usage:"size threshold in bytes for inline event storage in Badger (0 to disable, typical values: 384-1024)"`
|
||||
|
||||
// TLS configuration
|
||||
TLSDomains []string `env:"ORLY_TLS_DOMAINS" usage:"comma-separated list of domains to respond to for TLS"`
|
||||
Certs []string `env:"ORLY_CERTS" usage:"comma-separated list of paths to certificate root names (e.g., /path/to/cert will load /path/to/cert.pem and /path/to/cert.key)"`
|
||||
@@ -369,3 +385,28 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
PrintEnv(cfg, printer)
|
||||
fmt.Fprintln(printer)
|
||||
}
|
||||
|
||||
// GetDatabaseConfigValues returns the database configuration values as individual fields.
|
||||
// This avoids circular imports with pkg/database while allowing main.go to construct
|
||||
// a database.DatabaseConfig with the correct type.
|
||||
func (cfg *C) GetDatabaseConfigValues() (
|
||||
dataDir, logLevel string,
|
||||
blockCacheMB, indexCacheMB, queryCacheSizeMB int,
|
||||
queryCacheMaxAge time.Duration,
|
||||
inlineEventThreshold int,
|
||||
dgraphURL, neo4jURI, neo4jUser, neo4jPassword string,
|
||||
) {
|
||||
// Parse query cache max age from string to duration
|
||||
queryCacheMaxAge = 5 * time.Minute // Default
|
||||
if cfg.QueryCacheMaxAge != "" {
|
||||
if duration, err := time.ParseDuration(cfg.QueryCacheMaxAge); err == nil {
|
||||
queryCacheMaxAge = duration
|
||||
}
|
||||
}
|
||||
|
||||
return cfg.DataDir, cfg.DBLogLevel,
|
||||
cfg.DBBlockCacheMB, cfg.DBIndexCacheMB, cfg.QueryCacheSizeMB,
|
||||
queryCacheMaxAge,
|
||||
cfg.InlineEventThreshold,
|
||||
cfg.DgraphURL, cfg.Neo4jURI, cfg.Neo4jUser, cfg.Neo4jPassword
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
@@ -16,11 +15,20 @@ import (
|
||||
)
|
||||
|
||||
// HandlePolicyConfigUpdate processes kind 12345 policy configuration events.
|
||||
// Only policy admins can update policy configuration.
|
||||
// Owners and policy admins can update policy configuration, with different permissions:
|
||||
//
|
||||
// OWNERS can:
|
||||
// - Modify all fields including owners and policy_admins
|
||||
// - But owners list must remain non-empty (to prevent lockout)
|
||||
//
|
||||
// POLICY ADMINS can:
|
||||
// - Extend rules (add to allow lists, add new kinds, add blacklists)
|
||||
// - CANNOT modify owners or policy_admins (protected fields)
|
||||
// - CANNOT reduce owner-granted permissions
|
||||
//
|
||||
// Process flow:
|
||||
// 1. Verify sender is policy admin (from current policy.policy_admins list)
|
||||
// 2. Parse and validate JSON FIRST (before making any changes)
|
||||
// 1. Check if sender is owner or policy admin
|
||||
// 2. Validate JSON with appropriate rules for the sender type
|
||||
// 3. Pause ALL message processing (lock mutex)
|
||||
// 4. Reload policy (pause policy engine, update, save, resume)
|
||||
// 5. Resume message processing (unlock mutex)
|
||||
@@ -30,24 +38,40 @@ import (
|
||||
func (l *Listener) HandlePolicyConfigUpdate(ev *event.E) error {
|
||||
log.I.F("received policy config update from pubkey: %s", hex.Enc(ev.Pubkey))
|
||||
|
||||
// 1. Verify sender is policy admin (from current policy.policy_admins list)
|
||||
// 1. Verify sender is owner or policy admin
|
||||
if l.policyManager == nil {
|
||||
return fmt.Errorf("policy system is not enabled")
|
||||
}
|
||||
|
||||
isOwner := l.policyManager.IsOwner(ev.Pubkey)
|
||||
isAdmin := l.policyManager.IsPolicyAdmin(ev.Pubkey)
|
||||
if !isAdmin {
|
||||
log.W.F("policy config update rejected: pubkey %s is not a policy admin", hex.Enc(ev.Pubkey))
|
||||
return fmt.Errorf("only policy administrators can update policy configuration")
|
||||
|
||||
if !isOwner && !isAdmin {
|
||||
log.W.F("policy config update rejected: pubkey %s is not an owner or policy admin", hex.Enc(ev.Pubkey))
|
||||
return fmt.Errorf("only owners and policy administrators can update policy configuration")
|
||||
}
|
||||
|
||||
log.I.F("policy admin verified: %s", hex.Enc(ev.Pubkey))
|
||||
if isOwner {
|
||||
log.I.F("owner verified: %s", hex.Enc(ev.Pubkey))
|
||||
} else {
|
||||
log.I.F("policy admin verified: %s", hex.Enc(ev.Pubkey))
|
||||
}
|
||||
|
||||
// 2. Parse and validate JSON FIRST (before making any changes)
|
||||
// 2. Parse and validate JSON with appropriate validation rules
|
||||
policyJSON := []byte(ev.Content)
|
||||
if err := l.policyManager.ValidateJSON(policyJSON); chk.E(err) {
|
||||
log.E.F("policy config update validation failed: %v", err)
|
||||
return fmt.Errorf("invalid policy configuration: %v", err)
|
||||
var validationErr error
|
||||
|
||||
if isOwner {
|
||||
// Owners can modify all fields, but owners list must be non-empty
|
||||
validationErr = l.policyManager.ValidateOwnerPolicyUpdate(policyJSON)
|
||||
} else {
|
||||
// Policy admins have restrictions: can't modify protected fields, can't reduce permissions
|
||||
validationErr = l.policyManager.ValidatePolicyAdminUpdate(policyJSON, ev.Pubkey)
|
||||
}
|
||||
|
||||
if validationErr != nil {
|
||||
log.E.F("policy config update validation failed: %v", validationErr)
|
||||
return fmt.Errorf("invalid policy configuration: %v", validationErr)
|
||||
}
|
||||
|
||||
log.I.F("policy config validation passed")
|
||||
@@ -65,12 +89,23 @@ func (l *Listener) HandlePolicyConfigUpdate(ev *event.E) error {
|
||||
|
||||
// 4. Reload policy (this will pause policy engine, update, save, and resume)
|
||||
log.I.F("applying policy configuration update")
|
||||
if err := l.policyManager.Reload(policyJSON, configPath); chk.E(err) {
|
||||
log.E.F("policy config update failed: %v", err)
|
||||
return fmt.Errorf("failed to apply policy configuration: %v", err)
|
||||
var reloadErr error
|
||||
if isOwner {
|
||||
reloadErr = l.policyManager.ReloadAsOwner(policyJSON, configPath)
|
||||
} else {
|
||||
reloadErr = l.policyManager.ReloadAsPolicyAdmin(policyJSON, configPath, ev.Pubkey)
|
||||
}
|
||||
|
||||
log.I.F("policy configuration updated successfully by admin: %s", hex.Enc(ev.Pubkey))
|
||||
if reloadErr != nil {
|
||||
log.E.F("policy config update failed: %v", reloadErr)
|
||||
return fmt.Errorf("failed to apply policy configuration: %v", reloadErr)
|
||||
}
|
||||
|
||||
if isOwner {
|
||||
log.I.F("policy configuration updated successfully by owner: %s", hex.Enc(ev.Pubkey))
|
||||
} else {
|
||||
log.I.F("policy configuration updated successfully by policy admin: %s", hex.Enc(ev.Pubkey))
|
||||
}
|
||||
|
||||
// 5. Message processing mutex will be unlocked by defer
|
||||
return nil
|
||||
|
||||
@@ -139,6 +139,7 @@ func createPolicyConfigEvent(t *testing.T, signer *p8k.Signer, policyJSON string
|
||||
}
|
||||
|
||||
// TestHandlePolicyConfigUpdate_ValidAdmin tests policy update from valid admin
|
||||
// Policy admins can extend rules but cannot modify protected fields (owners, policy_admins)
|
||||
func TestHandlePolicyConfigUpdate_ValidAdmin(t *testing.T) {
|
||||
// Create admin signer
|
||||
adminSigner := p8k.MustNew()
|
||||
@@ -150,9 +151,10 @@ func TestHandlePolicyConfigUpdate_ValidAdmin(t *testing.T) {
|
||||
listener, _, cleanup := setupPolicyTestListener(t, adminHex)
|
||||
defer cleanup()
|
||||
|
||||
// Create valid policy update event
|
||||
// Create valid policy update event that ONLY extends, doesn't modify protected fields
|
||||
// Note: policy_admins must stay the same (policy admins cannot change this field)
|
||||
newPolicyJSON := `{
|
||||
"default_policy": "deny",
|
||||
"default_policy": "allow",
|
||||
"policy_admins": ["` + adminHex + `"],
|
||||
"kind": {"whitelist": [1, 3, 7]}
|
||||
}`
|
||||
@@ -165,9 +167,10 @@ func TestHandlePolicyConfigUpdate_ValidAdmin(t *testing.T) {
|
||||
t.Errorf("Expected success but got error: %v", err)
|
||||
}
|
||||
|
||||
// Verify policy was updated
|
||||
if listener.policyManager.DefaultPolicy != "deny" {
|
||||
t.Errorf("Policy was not updated, default_policy = %q, expected 'deny'",
|
||||
// Verify policy was updated (kind whitelist was extended)
|
||||
// Note: default_policy should still be "allow" from original
|
||||
if listener.policyManager.DefaultPolicy != "allow" {
|
||||
t.Errorf("Policy was not updated correctly, default_policy = %q, expected 'allow'",
|
||||
listener.policyManager.DefaultPolicy)
|
||||
}
|
||||
}
|
||||
@@ -260,8 +263,9 @@ func TestHandlePolicyConfigUpdate_InvalidPubkey(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestHandlePolicyConfigUpdate_AdminCannotRemoveSelf tests that admin can update policy
|
||||
func TestHandlePolicyConfigUpdate_AdminCanUpdateAdminList(t *testing.T) {
|
||||
// TestHandlePolicyConfigUpdate_PolicyAdminCannotModifyProtectedFields tests that policy admins
|
||||
// cannot modify the owners or policy_admins fields (these are protected, owner-only fields)
|
||||
func TestHandlePolicyConfigUpdate_PolicyAdminCannotModifyProtectedFields(t *testing.T) {
|
||||
adminSigner := p8k.MustNew()
|
||||
if err := adminSigner.Generate(); err != nil {
|
||||
t.Fatalf("Failed to generate admin keypair: %v", err)
|
||||
@@ -274,22 +278,23 @@ func TestHandlePolicyConfigUpdate_AdminCanUpdateAdminList(t *testing.T) {
|
||||
listener, _, cleanup := setupPolicyTestListener(t, adminHex)
|
||||
defer cleanup()
|
||||
|
||||
// Update policy to add second admin
|
||||
// Try to add second admin (policy_admins is a protected field)
|
||||
newPolicyJSON := `{
|
||||
"default_policy": "allow",
|
||||
"policy_admins": ["` + adminHex + `", "` + admin2Hex + `"]
|
||||
}`
|
||||
ev := createPolicyConfigEvent(t, adminSigner, newPolicyJSON)
|
||||
|
||||
// This should FAIL because policy admins cannot modify the policy_admins field
|
||||
err := listener.HandlePolicyConfigUpdate(ev)
|
||||
if err != nil {
|
||||
t.Errorf("Expected success but got error: %v", err)
|
||||
if err == nil {
|
||||
t.Error("Expected error when policy admin tries to modify policy_admins (protected field)")
|
||||
}
|
||||
|
||||
// Verify both admins are now in the list
|
||||
// Second admin should NOT be in the list since update was rejected
|
||||
admin2Bin, _ := hex.Dec(admin2Hex)
|
||||
if !listener.policyManager.IsPolicyAdmin(admin2Bin) {
|
||||
t.Error("Second admin should have been added to admin list")
|
||||
if listener.policyManager.IsPolicyAdmin(admin2Bin) {
|
||||
t.Error("Second admin should NOT have been added - policy_admins is protected")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,10 +451,11 @@ func TestMessageProcessingPauseDuringPolicyUpdate(t *testing.T) {
|
||||
|
||||
// We can't easily mock the mutex, but we can verify the policy update succeeds
|
||||
// which implies the pause/resume cycle completed
|
||||
|
||||
// Note: policy_admins must stay the same (protected field)
|
||||
newPolicyJSON := `{
|
||||
"default_policy": "deny",
|
||||
"policy_admins": ["` + adminHex + `"]
|
||||
"default_policy": "allow",
|
||||
"policy_admins": ["` + adminHex + `"],
|
||||
"kind": {"whitelist": [1, 3, 5, 7]}
|
||||
}`
|
||||
ev := createPolicyConfigEvent(t, adminSigner, newPolicyJSON)
|
||||
|
||||
@@ -462,8 +468,8 @@ func TestMessageProcessingPauseDuringPolicyUpdate(t *testing.T) {
|
||||
_ = pauseCalled
|
||||
_ = resumeCalled
|
||||
|
||||
// Verify policy was actually updated
|
||||
if listener.policyManager.DefaultPolicy != "deny" {
|
||||
// Verify policy was actually updated (kind whitelist was extended)
|
||||
if listener.policyManager.DefaultPolicy != "allow" {
|
||||
t.Error("Policy should have been updated")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,6 +177,10 @@ LIMIT $limit
|
||||
|
||||
## Configuration
|
||||
|
||||
All configuration is centralized in `app/config/config.go` and visible via `./orly help`.
|
||||
|
||||
> **Important:** All environment variables must be defined in `app/config/config.go`. Do not use `os.Getenv()` directly in package code. Database backends receive configuration via the `database.DatabaseConfig` struct.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
|
||||
@@ -1111,20 +1111,62 @@ Check logs for policy decisions and errors.
|
||||
|
||||
## Dynamic Policy Configuration via Kind 12345
|
||||
|
||||
Policy administrators can update the relay policy dynamically by publishing kind 12345 events. This enables runtime policy changes without relay restarts.
|
||||
Both **owners** and **policy admins** can update the relay policy dynamically by publishing kind 12345 events. This enables runtime policy changes without relay restarts, with different permission levels for each role.
|
||||
|
||||
### Role Hierarchy and Permissions
|
||||
|
||||
ORLY uses a layered permission model for policy updates:
|
||||
|
||||
| Role | Source | Can Modify | Restrictions |
|
||||
|------|--------|------------|--------------|
|
||||
| **Owner** | `ORLY_OWNERS` env or `owners` in policy.json | All fields | Owners list must remain non-empty |
|
||||
| **Policy Admin** | `policy_admins` in policy.json | Extend rules, add blacklists | Cannot modify `owners` or `policy_admins`, cannot reduce permissions |
|
||||
|
||||
### Composition Rules
|
||||
|
||||
Policy updates from owners and policy admins compose as follows:
|
||||
|
||||
1. **Owner policy is the base** - Defines minimum permissions and protected fields
|
||||
2. **Policy admins can extend** - Add to allow lists, add new kinds, add blacklists
|
||||
3. **Blacklists override whitelists** - Policy admins can ban users that owners allowed
|
||||
4. **Protected fields are immutable** - Only owners can modify `owners` and `policy_admins`
|
||||
|
||||
#### What Policy Admins CAN Do:
|
||||
|
||||
- ✅ Add pubkeys to `write_allow` and `read_allow` lists
|
||||
- ✅ Add entries to `write_deny` and `read_deny` lists to blacklist malicious users
|
||||
- ✅ Blacklist any non-admin user, even if whitelisted by owners or other admins
|
||||
- ✅ Add kinds to `kind.whitelist` and `kind.blacklist`
|
||||
- ✅ Increase size limits (`size_limit`, `content_limit`, etc.)
|
||||
- ✅ Add rules for new kinds not defined by owners
|
||||
- ✅ Enable `write_allow_follows` for additional rules
|
||||
|
||||
#### What Policy Admins CANNOT Do:
|
||||
|
||||
- ❌ Modify the `owners` field
|
||||
- ❌ Modify the `policy_admins` field
|
||||
- ❌ Blacklist owners or other policy admins (protected users)
|
||||
- ❌ Remove pubkeys from allow lists
|
||||
- ❌ Remove kinds from whitelist
|
||||
- ❌ Reduce size limits
|
||||
- ❌ Remove rules defined by owners
|
||||
- ❌ Add new required tags (restrictions)
|
||||
|
||||
### Enabling Dynamic Policy Updates
|
||||
|
||||
1. Add yourself as a policy admin in the initial policy.json:
|
||||
1. Set yourself as both owner and policy admin in the initial policy.json:
|
||||
|
||||
```json
|
||||
{
|
||||
"default_policy": "allow",
|
||||
"policy_admins": ["YOUR_HEX_PUBKEY_HERE"],
|
||||
"owners": ["YOUR_HEX_PUBKEY_HERE"],
|
||||
"policy_admins": ["ADMIN_HEX_PUBKEY_HERE"],
|
||||
"policy_follow_whitelist_enabled": false
|
||||
}
|
||||
```
|
||||
|
||||
**Important:** The `owners` list must contain at least one pubkey to prevent lockout.
|
||||
|
||||
2. Ensure policy is enabled:
|
||||
|
||||
```bash
|
||||
@@ -1135,15 +1177,28 @@ export ORLY_POLICY_ENABLED=true
|
||||
|
||||
Send a kind 12345 event with the new policy configuration as JSON content:
|
||||
|
||||
**As Owner (full control):**
|
||||
```json
|
||||
{
|
||||
"kind": 12345,
|
||||
"content": "{\"default_policy\": \"deny\", \"kind\": {\"whitelist\": [1,3,7]}, \"policy_admins\": [\"YOUR_HEX_PUBKEY\"]}",
|
||||
"content": "{\"default_policy\": \"deny\", \"owners\": [\"OWNER_HEX\"], \"policy_admins\": [\"ADMIN_HEX\"], \"kind\": {\"whitelist\": [1,3,7]}}",
|
||||
"tags": [],
|
||||
"created_at": 1234567890
|
||||
}
|
||||
```
|
||||
|
||||
**As Policy Admin (extensions only):**
|
||||
```json
|
||||
{
|
||||
"kind": 12345,
|
||||
"content": "{\"default_policy\": \"deny\", \"owners\": [\"OWNER_HEX\"], \"policy_admins\": [\"ADMIN_HEX\"], \"kind\": {\"whitelist\": [1,3,7,30023], \"blacklist\": [4]}, \"rules\": {\"1\": {\"write_deny\": [\"BAD_ACTOR_HEX\"]}}}",
|
||||
"tags": [],
|
||||
"created_at": 1234567890
|
||||
}
|
||||
```
|
||||
|
||||
Note: Policy admins must include the original `owners` and `policy_admins` values unchanged.
|
||||
|
||||
### Policy Admin Follow List Whitelisting
|
||||
|
||||
When `policy_follow_whitelist_enabled` is `true`, the relay automatically grants access to all pubkeys followed by policy admins.
|
||||
@@ -1161,10 +1216,27 @@ When `policy_follow_whitelist_enabled` is `true`, the relay automatically grants
|
||||
|
||||
### Security Considerations
|
||||
|
||||
- Only pubkeys listed in `policy_admins` can update the policy
|
||||
- Policy updates are validated before applying (invalid JSON or pubkeys are rejected)
|
||||
- Failed updates preserve the existing policy (no corruption)
|
||||
- All policy updates are logged for audit purposes
|
||||
- **Privilege separation**: Only owners can add/remove owners and policy admins
|
||||
- **Non-empty owners**: At least one owner must always exist to prevent lockout
|
||||
- **Protected fields**: Policy admins cannot escalate their privileges by modifying `owners`
|
||||
- **Blacklist override**: Policy admins can block bad actors even if owners allowed them
|
||||
- **Validation first**: Policy updates are validated before applying (invalid updates are rejected)
|
||||
- **Atomic updates**: Failed updates preserve the existing policy (no corruption)
|
||||
- **Audit logging**: All policy updates are logged with the submitter's pubkey
|
||||
|
||||
### Error Messages
|
||||
|
||||
Common validation errors:
|
||||
|
||||
| Error | Cause |
|
||||
|-------|-------|
|
||||
| `owners list cannot be empty` | Owner tried to remove all owners |
|
||||
| `cannot modify the 'owners' field` | Policy admin tried to change owners |
|
||||
| `cannot modify the 'policy_admins' field` | Policy admin tried to change admins |
|
||||
| `cannot remove kind X from whitelist` | Policy admin tried to reduce permissions |
|
||||
| `cannot reduce size_limit for kind X` | Policy admin tried to make limits stricter |
|
||||
| `cannot blacklist owner X` | Policy admin tried to blacklist an owner |
|
||||
| `cannot blacklist policy admin X` | Policy admin tried to blacklist another admin |
|
||||
|
||||
## Testing the Policy System
|
||||
|
||||
|
||||
417
docs/WASM_MOBILE_BUILD_PLAN.md
Normal file
417
docs/WASM_MOBILE_BUILD_PLAN.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Plan: Enable js/wasm, iOS, and Android Builds
|
||||
|
||||
This document outlines the work required to enable ORLY and the nostr library to build successfully for WebAssembly (js/wasm), iOS (ios/arm64), and Android (android/arm64).
|
||||
|
||||
## Current Build Status
|
||||
|
||||
| Platform | Status | Notes |
|
||||
|----------|--------|-------|
|
||||
| linux/amd64 | ✅ Works | Uses libsecp256k1 via purego |
|
||||
| darwin/arm64 | ✅ Works | Uses pure Go p256k1 |
|
||||
| darwin/amd64 | ✅ Works | Uses pure Go p256k1 |
|
||||
| windows/amd64 | ✅ Works | Uses pure Go p256k1 |
|
||||
| android/arm64 | ✅ Works | Uses pure Go p256k1 |
|
||||
| js/wasm | ❌ Fails | Missing platform stubs (planned for hackpadfs work) |
|
||||
| ios/arm64 | ⚠️ Requires gomobile | See iOS section below |
|
||||
|
||||
---
|
||||
|
||||
## Issue 1: js/wasm Build Failures
|
||||
|
||||
### Problem
|
||||
Two packages fail to compile for js/wasm due to missing platform-specific implementations:
|
||||
|
||||
1. **`next.orly.dev/pkg/utils/interrupt`** - Missing `Restart()` function
|
||||
2. **`git.mleku.dev/mleku/nostr/ws`** - Missing `getConnectionOptions()` function
|
||||
|
||||
### Root Cause Analysis
|
||||
|
||||
#### 1.1 interrupt package
|
||||
The `Restart()` function is defined with build tags:
|
||||
- `restart.go` → `//go:build linux`
|
||||
- `restart_darwin.go` → `//go:build darwin`
|
||||
- `restart_windows.go` → `//go:build windows`
|
||||
|
||||
But `main.go` calls `Restart()` unconditionally on line 66, causing undefined symbol on js/wasm.
|
||||
|
||||
#### 1.2 ws package
|
||||
The `getConnectionOptions()` function is defined in `connection_options.go` with:
|
||||
```go
|
||||
//go:build !js
|
||||
```
|
||||
|
||||
This correctly excludes js/wasm, but no alternative implementation exists for js/wasm, so `connection.go` line 28 fails.
|
||||
|
||||
### Solution
|
||||
|
||||
#### 1.1 Fix interrupt package (ORLY)
|
||||
|
||||
Create a new file `restart_other.go`:
|
||||
|
||||
```go
|
||||
//go:build !linux && !darwin && !windows
|
||||
|
||||
package interrupt
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Restart is not supported on this platform - just exit
|
||||
func Restart() {
|
||||
log.W.Ln("restart not supported on this platform, exiting")
|
||||
os.Exit(0)
|
||||
}
|
||||
```
|
||||
|
||||
#### 1.2 Fix ws package (nostr library)
|
||||
|
||||
Create a new file `connection_options_js.go`:
|
||||
|
||||
```go
|
||||
//go:build js
|
||||
|
||||
package ws
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// getConnectionOptions returns nil on js/wasm as we use browser WebSocket API
|
||||
func getConnectionOptions(
|
||||
requestHeader http.Header, tlsConfig *tls.Config,
|
||||
) *websocket.Dialer {
|
||||
// On js/wasm, gorilla/websocket doesn't work - need to use browser APIs
|
||||
// This is a stub that allows compilation; actual WebSocket usage would
|
||||
// need a js/wasm-compatible implementation
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
**However**, this alone won't make WebSocket work - the entire `ws` package uses `gorilla/websocket` which doesn't support js/wasm. A proper fix requires:
|
||||
|
||||
Option A: Use conditional compilation to swap in a js/wasm WebSocket implementation (e.g., `nhooyr.io/websocket` which supports js/wasm)
|
||||
|
||||
Option B: Make the `ws` package optional with build tags so js/wasm builds exclude it entirely
|
||||
|
||||
**Recommended**: Option B - exclude the ws client package on js/wasm since ORLY is a server, not a client.
|
||||
|
||||
---
|
||||
|
||||
## Issue 2: iOS Build Failure
|
||||
|
||||
### Problem
|
||||
```
|
||||
ios/arm64 requires external (cgo) linking, but cgo is not enabled
|
||||
```
|
||||
|
||||
### Root Cause
|
||||
iOS requires CGO for all executables due to Apple's linking requirements. This is a fundamental Go limitation - you cannot build iOS binaries with `CGO_ENABLED=0`.
|
||||
|
||||
### Solution
|
||||
|
||||
#### Option A: Accept CGO requirement for iOS
|
||||
Build with CGO enabled and provide a cross-compilation toolchain:
|
||||
|
||||
```bash
|
||||
CGO_ENABLED=1 CC=clang GOOS=ios GOARCH=arm64 go build
|
||||
```
|
||||
|
||||
This requires:
|
||||
1. Xcode with iOS SDK installed
|
||||
2. Cross-compilation from macOS (or complex cross-toolchain setup)
|
||||
|
||||
#### Option B: Create a library instead of executable
|
||||
Instead of building a standalone binary, build ORLY as a Go library that can be called from Swift/Objective-C:
|
||||
|
||||
```bash
|
||||
CGO_ENABLED=1 GOOS=ios GOARCH=arm64 go build -buildmode=c-archive -o liborly.a
|
||||
```
|
||||
|
||||
This creates a static library usable in iOS apps.
|
||||
|
||||
#### Option C: Use gomobile
|
||||
Use the `gomobile` tool which handles iOS cross-compilation:
|
||||
|
||||
```bash
|
||||
gomobile bind -target=ios ./pkg/...
|
||||
```
|
||||
|
||||
**Recommendation**: Option A or B depending on use case. For a relay server, iOS support may not be practical anyway (iOS backgrounding restrictions, network limitations).
|
||||
|
||||
---
|
||||
|
||||
## Issue 3: Android Build Failure (RESOLVED)
|
||||
|
||||
### Problem
|
||||
```
|
||||
# github.com/ebitengine/purego
|
||||
dlfcn_android.go:21:13: undefined: cgo.Dlopen
|
||||
```
|
||||
|
||||
### Root Cause
|
||||
Android uses the Linux kernel, so Go's `GOOS=android` still matches the `linux` build tag. This meant our `*_linux.go` files (which import purego) were being compiled for Android.
|
||||
|
||||
### Solution (Implemented)
|
||||
|
||||
Updated all build tags in `crypto/p8k/` to explicitly exclude Android:
|
||||
|
||||
**Linux files** (`*_linux.go`):
|
||||
```go
|
||||
//go:build linux && !android && !purego
|
||||
```
|
||||
|
||||
**Other platform files** (`*_other.go`):
|
||||
```go
|
||||
//go:build !linux || android || purego
|
||||
```
|
||||
|
||||
This ensures Android uses the pure Go `p256k1.mleku.dev` implementation instead of trying to load libsecp256k1 via purego.
|
||||
|
||||
### Verification
|
||||
```bash
|
||||
GOOS=android GOARCH=arm64 CGO_ENABLED=0 go build -o orly-android-arm64
|
||||
# Successfully produces 33MB ARM64 ELF binary
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: js/wasm Support (Low effort)
|
||||
|
||||
| Task | Repository | Effort |
|
||||
|------|------------|--------|
|
||||
| Create `restart_other.go` stub | ORLY | 5 min |
|
||||
| Create `connection_options_js.go` stub OR exclude ws package | nostr | 15 min |
|
||||
| Test js/wasm build compiles | Both | 5 min |
|
||||
|
||||
**Note**: This enables *compilation* but not *functionality*. Running ORLY in a browser would require significant additional work (no filesystem, no listening sockets, etc.).
|
||||
|
||||
### Phase 2: Android Support (Medium effort)
|
||||
|
||||
| Task | Repository | Effort |
|
||||
|------|------------|--------|
|
||||
| Audit purego imports - ensure Linux-only | nostr | 30 min |
|
||||
| Add build tags to any files importing purego | nostr | 15 min |
|
||||
| Test android/arm64 build | Both | 5 min |
|
||||
|
||||
### Phase 3: iOS Support (High effort, questionable value)
|
||||
|
||||
| Task | Repository | Effort |
|
||||
|------|------------|--------|
|
||||
| Set up iOS cross-compilation environment | - | 2-4 hours |
|
||||
| Modify build scripts for CGO_ENABLED=1 | ORLY | 30 min |
|
||||
| Create c-archive or gomobile bindings | ORLY | 2-4 hours |
|
||||
| Test on iOS simulator/device | - | 1-2 hours |
|
||||
|
||||
**Recommendation**: iOS support should be deprioritized unless there's a specific use case. A Nostr relay is a server, and iOS imposes severe restrictions on background network services.
|
||||
|
||||
---
|
||||
|
||||
## Quick Wins (Do First)
|
||||
|
||||
### 1. Create `restart_other.go` in ORLY
|
||||
|
||||
```go
|
||||
//go:build !linux && !darwin && !windows
|
||||
|
||||
package interrupt
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"os"
|
||||
)
|
||||
|
||||
func Restart() {
|
||||
log.W.Ln("restart not supported on this platform, exiting")
|
||||
os.Exit(0)
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Exclude ws package from js/wasm in nostr library
|
||||
|
||||
Modify `connection.go` to have a build tag:
|
||||
```go
|
||||
//go:build !js
|
||||
|
||||
package ws
|
||||
// ... rest of file
|
||||
```
|
||||
|
||||
Create `connection_js.go`:
|
||||
```go
|
||||
//go:build js
|
||||
|
||||
package ws
|
||||
|
||||
// Stub package for js/wasm - WebSocket client not supported
|
||||
// Use browser's native WebSocket API instead
|
||||
```
|
||||
|
||||
### 3. Audit purego usage
|
||||
|
||||
Ensure all files that import `github.com/ebitengine/purego` have:
|
||||
```go
|
||||
//go:build linux && !purego
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Estimated Total Effort
|
||||
|
||||
| Platform | Compilation | Full Functionality |
|
||||
|----------|-------------|-------------------|
|
||||
| js/wasm | 1 hour | Not practical (server) |
|
||||
| android/arm64 | 1-2 hours | Possible with NDK |
|
||||
| ios/arm64 | 4-8 hours | Limited (iOS restrictions) |
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
## iOS with gomobile
|
||||
|
||||
Since iOS requires CGO and you cannot use Xcode without an Apple ID, the `gomobile` approach is the best option. This creates a framework that can be integrated into iOS apps.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Install gomobile**:
|
||||
```bash
|
||||
go install golang.org/x/mobile/cmd/gomobile@latest
|
||||
gomobile init
|
||||
```
|
||||
|
||||
2. **Create a bindable package**:
|
||||
gomobile can only bind packages that export types and functions suitable for mobile. You'll need to create a simplified API layer.
|
||||
|
||||
### Creating a Bindable API
|
||||
|
||||
Create a new package (e.g., `pkg/mobile/`) with a simplified interface:
|
||||
|
||||
```go
|
||||
// pkg/mobile/relay.go
|
||||
package mobile
|
||||
|
||||
import (
|
||||
"context"
|
||||
// ... minimal imports
|
||||
)
|
||||
|
||||
// Relay represents an embedded Nostr relay
|
||||
type Relay struct {
|
||||
// internal fields
|
||||
}
|
||||
|
||||
// NewRelay creates a new relay instance
|
||||
func NewRelay(dataDir string, port int) (*Relay, error) {
|
||||
// Initialize relay with mobile-friendly defaults
|
||||
}
|
||||
|
||||
// Start begins accepting connections
|
||||
func (r *Relay) Start() error {
|
||||
// Start the relay server
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down the relay
|
||||
func (r *Relay) Stop() error {
|
||||
// Shutdown
|
||||
}
|
||||
|
||||
// GetPublicKey returns the relay's public key
|
||||
func (r *Relay) GetPublicKey() string {
|
||||
// Return npub
|
||||
}
|
||||
```
|
||||
|
||||
### Building the iOS Framework
|
||||
|
||||
```bash
|
||||
# Build iOS framework (requires macOS)
|
||||
gomobile bind -target=ios -o ORLY.xcframework ./pkg/mobile
|
||||
|
||||
# This produces ORLY.xcframework which can be added to Xcode projects
|
||||
```
|
||||
|
||||
### Limitations of gomobile
|
||||
|
||||
1. **Only certain types are bindable**:
|
||||
- Basic types (int, float, string, bool, []byte)
|
||||
- Structs with exported fields of bindable types
|
||||
- Interfaces with methods using bindable types
|
||||
- Error return values
|
||||
|
||||
2. **No channels or goroutines in API**:
|
||||
The public API must be synchronous or use callbacks
|
||||
|
||||
3. **Callbacks require interfaces**:
|
||||
```go
|
||||
// Define callback interface
|
||||
type EventHandler interface {
|
||||
OnEvent(eventJSON string)
|
||||
}
|
||||
|
||||
// Accept callback in API
|
||||
func (r *Relay) SetEventHandler(h EventHandler) {
|
||||
// Store and use callback
|
||||
}
|
||||
```
|
||||
|
||||
### Alternative: Building a Static Library
|
||||
|
||||
If you want more control, build as a C archive:
|
||||
|
||||
```bash
|
||||
# From macOS with Xcode command line tools
|
||||
CGO_ENABLED=1 GOOS=ios GOARCH=arm64 \
|
||||
go build -buildmode=c-archive -o liborly.a ./pkg/mobile
|
||||
|
||||
# This produces:
|
||||
# - liborly.a (static library)
|
||||
# - liborly.h (C header file)
|
||||
```
|
||||
|
||||
This can be linked into any iOS project using the C header.
|
||||
|
||||
### Recommended Next Steps for iOS
|
||||
|
||||
1. Create `pkg/mobile/` with a minimal, mobile-friendly API
|
||||
2. Test gomobile binding on Linux first: `gomobile bind -target=android ./pkg/mobile`
|
||||
3. Once Android binding works, the iOS binding will use the same API
|
||||
4. Find someone with macOS to run `gomobile bind -target=ios`
|
||||
|
||||
---
|
||||
|
||||
## Appendix: File Changes Summary
|
||||
|
||||
### nostr Repository (`git.mleku.dev/mleku/nostr`) - COMPLETED
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `crypto/p8k/secp_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/schnorr_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/ecdh_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/recovery_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/utils_linux.go` | Build tag: `linux && !android && !purego` |
|
||||
| `crypto/p8k/secp_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/schnorr_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/ecdh_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/recovery_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/utils_other.go` | Build tag: `!linux \|\| android \|\| purego` |
|
||||
| `crypto/p8k/constants.go` | NEW - shared constants (no build tags) |
|
||||
|
||||
### ORLY Repository (`next.orly.dev`)
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `go.mod` | Added `replace` directive for local nostr library |
|
||||
|
||||
### Future Work (js/wasm)
|
||||
|
||||
| File | Action Needed |
|
||||
|------|---------------|
|
||||
| `pkg/utils/interrupt/restart_other.go` | CREATE - stub `Restart()` for unsupported platforms |
|
||||
| `nostr/ws/connection.go` | MODIFY - add `//go:build !js` or exclude package |
|
||||
| `nostr/ws/connection_js.go` | CREATE - stub for js/wasm |
|
||||
3
go.mod
3
go.mod
@@ -3,7 +3,7 @@ module next.orly.dev
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
git.mleku.dev/mleku/nostr v1.0.4
|
||||
git.mleku.dev/mleku/nostr v1.0.7
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
github.com/dgraph-io/dgo/v230 v230.0.1
|
||||
@@ -82,6 +82,7 @@ require (
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
p256k1.mleku.dev v1.0.3 // indirect
|
||||
)
|
||||
|
||||
retract v1.0.3
|
||||
|
||||
6
go.sum
6
go.sum
@@ -1,6 +1,6 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
git.mleku.dev/mleku/nostr v1.0.4 h1:QKJlqUubLPeMpYpxHODSvfSlL+F6UhjBiBuze9FGRKo=
|
||||
git.mleku.dev/mleku/nostr v1.0.4/go.mod h1:swI7bWLc7yU1jd7PLCCIrIcUR3Ug5O+GPvpub/w6eTY=
|
||||
git.mleku.dev/mleku/nostr v1.0.7 h1:BXWsAAiGu56JXR4rIn0kaVOE+RtMmA9MPvAs8y/BjnI=
|
||||
git.mleku.dev/mleku/nostr v1.0.7/go.mod h1:iYTlg2WKJXJ0kcsM6QBGOJ0UDiJidMgL/i64cHyPjZc=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
@@ -302,3 +302,5 @@ lol.mleku.dev v1.0.5/go.mod h1:JlsqP0CZDLKRyd85XGcy79+ydSRqmFkrPzYFMYxQ+zs=
|
||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
||||
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=
|
||||
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
|
||||
p256k1.mleku.dev v1.0.3 h1:2SBEH9XhNAotO1Ik8ejODjChTqc06Z/6ncQhrYkAdRA=
|
||||
p256k1.mleku.dev v1.0.3/go.mod h1:cWkZlx6Tu7CTmIxonFbdjhdNfkY3VbjjY5TFEILiTnY=
|
||||
|
||||
33
main.go
33
main.go
@@ -42,8 +42,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var db database.Database
|
||||
if db, err = database.NewDatabase(
|
||||
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
|
||||
if db, err = database.NewDatabaseWithConfig(
|
||||
ctx, cancel, cfg.DBType, makeDatabaseConfig(cfg),
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -318,8 +318,8 @@ func main() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var db database.Database
|
||||
log.I.F("initializing %s database at %s", cfg.DBType, cfg.DataDir)
|
||||
if db, err = database.NewDatabase(
|
||||
ctx, cancel, cfg.DBType, cfg.DataDir, cfg.DBLogLevel,
|
||||
if db, err = database.NewDatabaseWithConfig(
|
||||
ctx, cancel, cfg.DBType, makeDatabaseConfig(cfg),
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -430,3 +430,28 @@ func main() {
|
||||
}
|
||||
// log.I.F("exiting")
|
||||
}
|
||||
|
||||
// makeDatabaseConfig creates a database.DatabaseConfig from the app config.
|
||||
// This helper function extracts all database-specific configuration values
|
||||
// and constructs the appropriate struct for the database package.
|
||||
func makeDatabaseConfig(cfg *config.C) *database.DatabaseConfig {
|
||||
dataDir, logLevel,
|
||||
blockCacheMB, indexCacheMB, queryCacheSizeMB,
|
||||
queryCacheMaxAge,
|
||||
inlineEventThreshold,
|
||||
dgraphURL, neo4jURI, neo4jUser, neo4jPassword := cfg.GetDatabaseConfigValues()
|
||||
|
||||
return &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
BlockCacheMB: blockCacheMB,
|
||||
IndexCacheMB: indexCacheMB,
|
||||
QueryCacheSizeMB: queryCacheSizeMB,
|
||||
QueryCacheMaxAge: queryCacheMaxAge,
|
||||
InlineEventThreshold: inlineEventThreshold,
|
||||
DgraphURL: dgraphURL,
|
||||
Neo4jURI: neo4jURI,
|
||||
Neo4jUser: neo4jUser,
|
||||
Neo4jPassword: neo4jPassword,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestKind3TagRoundTrip(t *testing.T) {
|
||||
// Verify all tags have key "p"
|
||||
pTagCount := 0
|
||||
for _, tg := range *ev1.Tags {
|
||||
if tag != nil && tag.Len() >= 2 {
|
||||
if tg != nil && tg.Len() >= 2 {
|
||||
key := tg.Key()
|
||||
if len(key) == 1 && key[0] == 'p' {
|
||||
pTagCount++
|
||||
@@ -63,7 +63,7 @@ func TestKind3TagRoundTrip(t *testing.T) {
|
||||
// Verify all tags still have key "p"
|
||||
pTagCount2 := 0
|
||||
for _, tg := range *ev2.Tags {
|
||||
if tag != nil && tag.Len() >= 2 {
|
||||
if tg != nil && tg.Len() >= 2 {
|
||||
key := tg.Key()
|
||||
if len(key) == 1 && key[0] == 'p' {
|
||||
pTagCount2++
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
@@ -21,10 +20,11 @@ import (
|
||||
|
||||
// D implements the Database interface using Badger as the storage backend
|
||||
type D struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
dataDir string
|
||||
Logger *logger
|
||||
inlineEventThreshold int // Configurable threshold for inline event storage
|
||||
*badger.DB
|
||||
seq *badger.Sequence
|
||||
pubkeySeq *badger.Sequence // Sequence for pubkey serials
|
||||
@@ -35,63 +35,85 @@ type D struct {
|
||||
// Ensure D implements Database interface at compile time
|
||||
var _ Database = (*D)(nil)
|
||||
|
||||
// New creates a new Badger database instance with default configuration.
|
||||
// This is provided for backward compatibility with existing callers.
|
||||
// For full configuration control, use NewWithConfig instead.
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Initialize query cache with configurable size (default 512MB)
|
||||
queryCacheSize := int64(512 * 1024 * 1024) // 512 MB
|
||||
if v := os.Getenv("ORLY_QUERY_CACHE_SIZE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
queryCacheSize = int64(n * 1024 * 1024)
|
||||
}
|
||||
// Create a default config for backward compatibility
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
BlockCacheMB: 1024, // Default 1024 MB
|
||||
IndexCacheMB: 512, // Default 512 MB
|
||||
QueryCacheSizeMB: 512, // Default 512 MB
|
||||
QueryCacheMaxAge: 5 * time.Minute, // Default 5 minutes
|
||||
InlineEventThreshold: 1024, // Default 1024 bytes
|
||||
}
|
||||
queryCacheMaxAge := 5 * time.Minute // Default 5 minutes
|
||||
if v := os.Getenv("ORLY_QUERY_CACHE_MAX_AGE"); v != "" {
|
||||
if duration, perr := time.ParseDuration(v); perr == nil {
|
||||
queryCacheMaxAge = duration
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// NewWithConfig creates a new Badger database instance with full configuration.
|
||||
// This is the preferred method when you have access to DatabaseConfig.
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *DatabaseConfig,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Apply defaults for zero values (backward compatibility)
|
||||
blockCacheMB := cfg.BlockCacheMB
|
||||
if blockCacheMB == 0 {
|
||||
blockCacheMB = 1024 // Default 1024 MB
|
||||
}
|
||||
indexCacheMB := cfg.IndexCacheMB
|
||||
if indexCacheMB == 0 {
|
||||
indexCacheMB = 512 // Default 512 MB
|
||||
}
|
||||
queryCacheSizeMB := cfg.QueryCacheSizeMB
|
||||
if queryCacheSizeMB == 0 {
|
||||
queryCacheSizeMB = 512 // Default 512 MB
|
||||
}
|
||||
queryCacheMaxAge := cfg.QueryCacheMaxAge
|
||||
if queryCacheMaxAge == 0 {
|
||||
queryCacheMaxAge = 5 * time.Minute // Default 5 minutes
|
||||
}
|
||||
inlineEventThreshold := cfg.InlineEventThreshold
|
||||
if inlineEventThreshold == 0 {
|
||||
inlineEventThreshold = 1024 // Default 1024 bytes
|
||||
}
|
||||
|
||||
queryCacheSize := int64(queryCacheSizeMB * 1024 * 1024)
|
||||
|
||||
d = &D{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
DB: nil,
|
||||
seq: nil,
|
||||
ready: make(chan struct{}),
|
||||
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
inlineEventThreshold: inlineEventThreshold,
|
||||
DB: nil,
|
||||
seq: nil,
|
||||
ready: make(chan struct{}),
|
||||
queryCache: querycache.NewEventCache(queryCacheSize, queryCacheMaxAge),
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
if err = os.MkdirAll(cfg.DataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Also ensure the directory exists using apputil.EnsureDir for any
|
||||
// potential subdirectories
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
dummyFile := filepath.Join(cfg.DataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := badger.DefaultOptions(d.dataDir)
|
||||
// Configure caches based on environment to better match workload.
|
||||
// Configure caches based on config to better match workload.
|
||||
// Defaults aim for higher hit ratios under read-heavy workloads while remaining safe.
|
||||
var blockCacheMB = 1024 // default 512 MB
|
||||
var indexCacheMB = 512 // default 256 MB
|
||||
if v := os.Getenv("ORLY_DB_BLOCK_CACHE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
blockCacheMB = n
|
||||
}
|
||||
}
|
||||
if v := os.Getenv("ORLY_DB_INDEX_CACHE_MB"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n > 0 {
|
||||
indexCacheMB = n
|
||||
}
|
||||
}
|
||||
opts.BlockCacheSize = int64(blockCacheMB * units.Mb)
|
||||
opts.IndexCacheSize = int64(indexCacheMB * units.Mb)
|
||||
opts.BlockSize = 4 * units.Kb // 4 KB block size
|
||||
|
||||
@@ -4,8 +4,33 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DatabaseConfig holds all database configuration options that can be passed
|
||||
// to any database backend. Each backend uses the relevant fields for its type.
|
||||
// This centralizes configuration instead of having each backend read env vars directly.
|
||||
type DatabaseConfig struct {
|
||||
// Common settings for all backends
|
||||
DataDir string
|
||||
LogLevel string
|
||||
|
||||
// Badger-specific settings
|
||||
BlockCacheMB int // ORLY_DB_BLOCK_CACHE_MB
|
||||
IndexCacheMB int // ORLY_DB_INDEX_CACHE_MB
|
||||
QueryCacheSizeMB int // ORLY_QUERY_CACHE_SIZE_MB
|
||||
QueryCacheMaxAge time.Duration // ORLY_QUERY_CACHE_MAX_AGE
|
||||
InlineEventThreshold int // ORLY_INLINE_EVENT_THRESHOLD
|
||||
|
||||
// DGraph-specific settings
|
||||
DgraphURL string // ORLY_DGRAPH_URL
|
||||
|
||||
// Neo4j-specific settings
|
||||
Neo4jURI string // ORLY_NEO4J_URI
|
||||
Neo4jUser string // ORLY_NEO4J_USER
|
||||
Neo4jPassword string // ORLY_NEO4J_PASSWORD
|
||||
}
|
||||
|
||||
// NewDatabase creates a database instance based on the specified type.
|
||||
// Supported types: "badger", "dgraph", "neo4j"
|
||||
func NewDatabase(
|
||||
@@ -14,19 +39,39 @@ func NewDatabase(
|
||||
dbType string,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
) (Database, error) {
|
||||
// Create a default config for backward compatibility with existing callers
|
||||
cfg := &DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewDatabaseWithConfig(ctx, cancel, dbType, cfg)
|
||||
}
|
||||
|
||||
// NewDatabaseWithConfig creates a database instance with full configuration.
|
||||
// This is the preferred method when you have access to the app config.
|
||||
func NewDatabaseWithConfig(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dbType string,
|
||||
cfg *DatabaseConfig,
|
||||
) (Database, error) {
|
||||
switch strings.ToLower(dbType) {
|
||||
case "badger", "":
|
||||
// Use the existing badger implementation
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
case "dgraph":
|
||||
// Use the new dgraph implementation
|
||||
// Import dynamically to avoid import cycles
|
||||
return newDgraphDatabase(ctx, cancel, dataDir, logLevel)
|
||||
// Use the dgraph implementation
|
||||
if newDgraphDatabase == nil {
|
||||
return nil, fmt.Errorf("dgraph database backend not available (import _ \"next.orly.dev/pkg/dgraph\")")
|
||||
}
|
||||
return newDgraphDatabase(ctx, cancel, cfg)
|
||||
case "neo4j":
|
||||
// Use the new neo4j implementation
|
||||
// Import dynamically to avoid import cycles
|
||||
return newNeo4jDatabase(ctx, cancel, dataDir, logLevel)
|
||||
// Use the neo4j implementation
|
||||
if newNeo4jDatabase == nil {
|
||||
return nil, fmt.Errorf("neo4j database backend not available (import _ \"next.orly.dev/pkg/neo4j\")")
|
||||
}
|
||||
return newNeo4jDatabase(ctx, cancel, cfg)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported database type: %s (supported: badger, dgraph, neo4j)", dbType)
|
||||
}
|
||||
@@ -34,20 +79,20 @@ func NewDatabase(
|
||||
|
||||
// newDgraphDatabase creates a dgraph database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newDgraphDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
|
||||
var newDgraphDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterDgraphFactory registers the dgraph database factory
|
||||
// This is called from the dgraph package's init() function
|
||||
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
|
||||
func RegisterDgraphFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newDgraphDatabase = factory
|
||||
}
|
||||
|
||||
// newNeo4jDatabase creates a neo4j database instance
|
||||
// This is defined here to avoid import cycles
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, string, string) (Database, error)
|
||||
var newNeo4jDatabase func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)
|
||||
|
||||
// RegisterNeo4jFactory registers the neo4j database factory
|
||||
// This is called from the neo4j package's init() function
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, string, string) (Database, error)) {
|
||||
func RegisterNeo4jFactory(factory func(context.Context, context.CancelFunc, *DatabaseConfig) (Database, error)) {
|
||||
newNeo4jDatabase = factory
|
||||
}
|
||||
|
||||
@@ -5,8 +5,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
@@ -270,14 +268,9 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (
|
||||
eventData := eventDataBuf.Bytes()
|
||||
|
||||
// Determine storage strategy (Reiser4 optimizations)
|
||||
// Get threshold from environment, default to 0 (disabled)
|
||||
// When enabled, typical values: 384 (conservative), 512 (recommended), 1024 (aggressive)
|
||||
smallEventThreshold := 1024
|
||||
if v := os.Getenv("ORLY_INLINE_EVENT_THRESHOLD"); v != "" {
|
||||
if n, perr := strconv.Atoi(v); perr == nil && n >= 0 {
|
||||
smallEventThreshold = n
|
||||
}
|
||||
}
|
||||
// Use the threshold from database configuration
|
||||
// Typical values: 384 (conservative), 512 (recommended), 1024 (aggressive)
|
||||
smallEventThreshold := d.inlineEventThreshold
|
||||
isSmallEvent := smallEventThreshold > 0 && len(eventData) <= smallEventThreshold
|
||||
isReplaceableEvent := kind.IsReplaceable(ev.Kind)
|
||||
isAddressableEvent := kind.IsParameterizedReplaceable(ev.Kind)
|
||||
|
||||
@@ -48,30 +48,21 @@ func init() {
|
||||
database.RegisterDgraphFactory(func(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
cfg *database.DatabaseConfig,
|
||||
) (database.Database, error) {
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// Config holds configuration options for the Dgraph database
|
||||
type Config struct {
|
||||
DataDir string
|
||||
LogLevel string
|
||||
DgraphURL string // Dgraph gRPC endpoint (e.g., "localhost:9080")
|
||||
EnableGraphQL bool
|
||||
EnableIntrospection bool
|
||||
}
|
||||
|
||||
// New creates a new Dgraph-based database instance
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
// NewWithConfig creates a new Dgraph-based database instance with full configuration.
|
||||
// Configuration is passed from the centralized app config via DatabaseConfig.
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *database.DatabaseConfig,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
// Get dgraph URL from environment, default to localhost
|
||||
dgraphURL := os.Getenv("ORLY_DGRAPH_URL")
|
||||
// Apply defaults for empty values
|
||||
dgraphURL := cfg.DgraphURL
|
||||
if dgraphURL == "" {
|
||||
dgraphURL = "localhost:9080"
|
||||
}
|
||||
@@ -79,8 +70,8 @@ func New(
|
||||
d = &D{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
dgraphURL: dgraphURL,
|
||||
enableGraphQL: false,
|
||||
enableIntrospection: false,
|
||||
@@ -88,12 +79,12 @@ func New(
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
if err = os.MkdirAll(cfg.DataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure directory structure
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
dummyFile := filepath.Join(cfg.DataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -128,6 +119,21 @@ func New(
|
||||
return
|
||||
}
|
||||
|
||||
// New creates a new Dgraph-based database instance with default configuration.
|
||||
// This is provided for backward compatibility with existing callers (tests, etc.).
|
||||
// For full configuration control, use NewWithConfig instead.
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
d *D, err error,
|
||||
) {
|
||||
cfg := &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// initDgraphClient establishes connection to dgraph server
|
||||
func (d *D) initDgraphClient() error {
|
||||
d.Logger.Infof("connecting to dgraph at %s", d.dgraphURL)
|
||||
|
||||
@@ -15,6 +15,8 @@ docker run -d --name neo4j \
|
||||
|
||||
### 2. Configure Environment
|
||||
|
||||
All Neo4j configuration is defined in `app/config/config.go` and visible via `./orly help`:
|
||||
|
||||
```bash
|
||||
export ORLY_DB_TYPE=neo4j
|
||||
export ORLY_NEO4J_URI=bolt://localhost:7687
|
||||
@@ -22,6 +24,8 @@ export ORLY_NEO4J_USER=neo4j
|
||||
export ORLY_NEO4J_PASSWORD=password
|
||||
```
|
||||
|
||||
> **Note:** Configuration is centralized in `app/config/config.go`. Do not use `os.Getenv()` directly in package code - all environment variables should be passed via the `database.DatabaseConfig` struct.
|
||||
|
||||
### 3. Run ORLY
|
||||
|
||||
```bash
|
||||
|
||||
@@ -70,38 +70,29 @@ func init() {
|
||||
database.RegisterNeo4jFactory(func(
|
||||
ctx context.Context,
|
||||
cancel context.CancelFunc,
|
||||
dataDir string,
|
||||
logLevel string,
|
||||
cfg *database.DatabaseConfig,
|
||||
) (database.Database, error) {
|
||||
return New(ctx, cancel, dataDir, logLevel)
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
// Config holds configuration options for the Neo4j database
|
||||
type Config struct {
|
||||
DataDir string
|
||||
LogLevel string
|
||||
Neo4jURI string // Neo4j bolt URI (e.g., "bolt://localhost:7687")
|
||||
Neo4jUser string // Authentication username
|
||||
Neo4jPassword string // Authentication password
|
||||
}
|
||||
|
||||
// New creates a new Neo4j-based database instance
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
// NewWithConfig creates a new Neo4j-based database instance with full configuration.
|
||||
// Configuration is passed from the centralized app config via DatabaseConfig.
|
||||
func NewWithConfig(
|
||||
ctx context.Context, cancel context.CancelFunc, cfg *database.DatabaseConfig,
|
||||
) (
|
||||
n *N, err error,
|
||||
) {
|
||||
// Get Neo4j connection details from environment
|
||||
neo4jURI := os.Getenv("ORLY_NEO4J_URI")
|
||||
// Apply defaults for empty values
|
||||
neo4jURI := cfg.Neo4jURI
|
||||
if neo4jURI == "" {
|
||||
neo4jURI = "bolt://localhost:7687"
|
||||
}
|
||||
neo4jUser := os.Getenv("ORLY_NEO4J_USER")
|
||||
neo4jUser := cfg.Neo4jUser
|
||||
if neo4jUser == "" {
|
||||
neo4jUser = "neo4j"
|
||||
}
|
||||
neo4jPassword := os.Getenv("ORLY_NEO4J_PASSWORD")
|
||||
neo4jPassword := cfg.Neo4jPassword
|
||||
if neo4jPassword == "" {
|
||||
neo4jPassword = "password"
|
||||
}
|
||||
@@ -109,8 +100,8 @@ func New(
|
||||
n = &N{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
dataDir: dataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(logLevel), dataDir),
|
||||
dataDir: cfg.DataDir,
|
||||
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel), cfg.DataDir),
|
||||
neo4jURI: neo4jURI,
|
||||
neo4jUser: neo4jUser,
|
||||
neo4jPassword: neo4jPassword,
|
||||
@@ -118,12 +109,12 @@ func New(
|
||||
}
|
||||
|
||||
// Ensure the data directory exists
|
||||
if err = os.MkdirAll(dataDir, 0755); chk.E(err) {
|
||||
if err = os.MkdirAll(cfg.DataDir, 0755); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Ensure directory structure
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
dummyFile := filepath.Join(cfg.DataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -158,6 +149,21 @@ func New(
|
||||
return
|
||||
}
|
||||
|
||||
// New creates a new Neo4j-based database instance with default configuration.
|
||||
// This is provided for backward compatibility with existing callers (tests, etc.).
|
||||
// For full configuration control, use NewWithConfig instead.
|
||||
func New(
|
||||
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
|
||||
) (
|
||||
n *N, err error,
|
||||
) {
|
||||
cfg := &database.DatabaseConfig{
|
||||
DataDir: dataDir,
|
||||
LogLevel: logLevel,
|
||||
}
|
||||
return NewWithConfig(ctx, cancel, cfg)
|
||||
}
|
||||
|
||||
// initNeo4jClient establishes connection to Neo4j server
|
||||
func (n *N) initNeo4jClient() error {
|
||||
n.Logger.Infof("connecting to neo4j at %s", n.neo4jURI)
|
||||
|
||||
525
pkg/policy/composition.go
Normal file
525
pkg/policy/composition.go
Normal file
@@ -0,0 +1,525 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// =============================================================================
|
||||
// Policy Composition Types
|
||||
// =============================================================================
|
||||
|
||||
// PolicyAdminContribution represents extensions/additions from a policy admin.
|
||||
// Policy admins can extend the base owner policy but cannot modify protected fields
|
||||
// (owners, policy_admins) or reduce owner-granted permissions.
|
||||
type PolicyAdminContribution struct {
|
||||
// AdminPubkey is the hex-encoded pubkey of the policy admin who made this contribution
|
||||
AdminPubkey string `json:"admin_pubkey"`
|
||||
// CreatedAt is the Unix timestamp when this contribution was created
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
// EventID is the Nostr event ID that created this contribution (for audit trail)
|
||||
EventID string `json:"event_id,omitempty"`
|
||||
|
||||
// KindWhitelistAdd adds kinds to the whitelist (OR with owner's whitelist)
|
||||
KindWhitelistAdd []int `json:"kind_whitelist_add,omitempty"`
|
||||
// KindBlacklistAdd adds kinds to the blacklist (overrides whitelist)
|
||||
KindBlacklistAdd []int `json:"kind_blacklist_add,omitempty"`
|
||||
|
||||
// RulesExtend extends existing rules defined by the owner
|
||||
RulesExtend map[int]RuleExtension `json:"rules_extend,omitempty"`
|
||||
// RulesAdd adds new rules for kinds not defined by the owner
|
||||
RulesAdd map[int]Rule `json:"rules_add,omitempty"`
|
||||
|
||||
// GlobalExtend extends the global rule
|
||||
GlobalExtend *RuleExtension `json:"global_extend,omitempty"`
|
||||
}
|
||||
|
||||
// RuleExtension defines how a policy admin can extend an existing owner rule.
|
||||
// All fields are additive - they extend, not replace, the owner's configuration.
|
||||
type RuleExtension struct {
|
||||
// WriteAllowAdd adds pubkeys to the write allow list
|
||||
WriteAllowAdd []string `json:"write_allow_add,omitempty"`
|
||||
// WriteDenyAdd adds pubkeys to the write deny list (overrides allow)
|
||||
WriteDenyAdd []string `json:"write_deny_add,omitempty"`
|
||||
// ReadAllowAdd adds pubkeys to the read allow list
|
||||
ReadAllowAdd []string `json:"read_allow_add,omitempty"`
|
||||
// ReadDenyAdd adds pubkeys to the read deny list (overrides allow)
|
||||
ReadDenyAdd []string `json:"read_deny_add,omitempty"`
|
||||
|
||||
// SizeLimitOverride can only make the limit MORE permissive (larger)
|
||||
SizeLimitOverride *int64 `json:"size_limit_override,omitempty"`
|
||||
// ContentLimitOverride can only make the limit MORE permissive (larger)
|
||||
ContentLimitOverride *int64 `json:"content_limit_override,omitempty"`
|
||||
// MaxAgeOfEventOverride can only make the limit MORE permissive (older allowed)
|
||||
MaxAgeOfEventOverride *int64 `json:"max_age_of_event_override,omitempty"`
|
||||
// MaxAgeEventInFutureOverride can only make the limit MORE permissive (further future allowed)
|
||||
MaxAgeEventInFutureOverride *int64 `json:"max_age_event_in_future_override,omitempty"`
|
||||
|
||||
// WriteAllowFollows extends the follow whitelist feature
|
||||
WriteAllowFollows *bool `json:"write_allow_follows,omitempty"`
|
||||
// FollowsWhitelistAdminsAdd adds admin pubkeys whose follows are whitelisted
|
||||
FollowsWhitelistAdminsAdd []string `json:"follows_whitelist_admins_add,omitempty"`
|
||||
}
|
||||
|
||||
// ComposedPolicy manages the base owner policy and policy admin contributions.
|
||||
// It computes an effective merged policy at runtime.
|
||||
type ComposedPolicy struct {
|
||||
// OwnerPolicy is the base policy set by owners
|
||||
OwnerPolicy *P
|
||||
// Contributions is a map of event ID -> contribution for deduplication
|
||||
Contributions map[string]*PolicyAdminContribution
|
||||
// contributionsMx protects the contributions map
|
||||
contributionsMx sync.RWMutex
|
||||
// configDir is the directory where policy files are stored
|
||||
configDir string
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Protected Field Validation
|
||||
// =============================================================================
|
||||
|
||||
// ProtectedFields are fields that only owners can modify
|
||||
var ProtectedFields = []string{"owners", "policy_admins"}
|
||||
|
||||
// ValidateOwnerPolicy validates a policy update from an owner.
|
||||
// Ensures owners list is non-empty.
|
||||
func ValidateOwnerPolicy(policy *P) error {
|
||||
if len(policy.Owners) == 0 {
|
||||
return fmt.Errorf("owners list cannot be empty: at least one owner must be defined")
|
||||
}
|
||||
|
||||
// Validate all owner pubkeys are valid hex
|
||||
for _, owner := range policy.Owners {
|
||||
if len(owner) != 64 {
|
||||
return fmt.Errorf("invalid owner pubkey length: %q (expected 64 hex characters)", owner)
|
||||
}
|
||||
if _, err := hex.Dec(owner); err != nil {
|
||||
return fmt.Errorf("invalid owner pubkey format: %q: %v", owner, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate all policy admin pubkeys are valid hex
|
||||
for _, admin := range policy.PolicyAdmins {
|
||||
if len(admin) != 64 {
|
||||
return fmt.Errorf("invalid policy_admin pubkey length: %q (expected 64 hex characters)", admin)
|
||||
}
|
||||
if _, err := hex.Dec(admin); err != nil {
|
||||
return fmt.Errorf("invalid policy_admin pubkey format: %q: %v", admin, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePolicyAdminContribution validates a contribution from a policy admin.
|
||||
// Ensures no protected fields are modified and extensions are valid.
|
||||
func ValidatePolicyAdminContribution(
|
||||
ownerPolicy *P,
|
||||
contribution *PolicyAdminContribution,
|
||||
existingContributions map[string]*PolicyAdminContribution,
|
||||
) error {
|
||||
// Validate the admin pubkey is valid
|
||||
if len(contribution.AdminPubkey) != 64 {
|
||||
return fmt.Errorf("invalid admin pubkey length")
|
||||
}
|
||||
|
||||
// Validate kind additions don't conflict with owner blacklist
|
||||
// (though PA can add to blacklist to override whitelist)
|
||||
|
||||
// Validate rule extensions
|
||||
for kind, ext := range contribution.RulesExtend {
|
||||
ownerRule, exists := ownerPolicy.rules[kind]
|
||||
if !exists {
|
||||
return fmt.Errorf("cannot extend rule for kind %d: not defined in owner policy (use rules_add instead)", kind)
|
||||
}
|
||||
|
||||
// Validate size limit overrides are more permissive
|
||||
if ext.SizeLimitOverride != nil && ownerRule.SizeLimit != nil {
|
||||
if *ext.SizeLimitOverride < *ownerRule.SizeLimit {
|
||||
return fmt.Errorf("size_limit_override for kind %d must be >= owner's limit (%d)", kind, *ownerRule.SizeLimit)
|
||||
}
|
||||
}
|
||||
|
||||
if ext.ContentLimitOverride != nil && ownerRule.ContentLimit != nil {
|
||||
if *ext.ContentLimitOverride < *ownerRule.ContentLimit {
|
||||
return fmt.Errorf("content_limit_override for kind %d must be >= owner's limit (%d)", kind, *ownerRule.ContentLimit)
|
||||
}
|
||||
}
|
||||
|
||||
if ext.MaxAgeOfEventOverride != nil && ownerRule.MaxAgeOfEvent != nil {
|
||||
if *ext.MaxAgeOfEventOverride < *ownerRule.MaxAgeOfEvent {
|
||||
return fmt.Errorf("max_age_of_event_override for kind %d must be >= owner's limit (%d)", kind, *ownerRule.MaxAgeOfEvent)
|
||||
}
|
||||
}
|
||||
|
||||
// Validate pubkey formats in allow/deny lists
|
||||
for _, pk := range ext.WriteAllowAdd {
|
||||
if len(pk) != 64 {
|
||||
return fmt.Errorf("invalid pubkey in write_allow_add for kind %d: %q", kind, pk)
|
||||
}
|
||||
}
|
||||
for _, pk := range ext.WriteDenyAdd {
|
||||
if len(pk) != 64 {
|
||||
return fmt.Errorf("invalid pubkey in write_deny_add for kind %d: %q", kind, pk)
|
||||
}
|
||||
}
|
||||
for _, pk := range ext.ReadAllowAdd {
|
||||
if len(pk) != 64 {
|
||||
return fmt.Errorf("invalid pubkey in read_allow_add for kind %d: %q", kind, pk)
|
||||
}
|
||||
}
|
||||
for _, pk := range ext.ReadDenyAdd {
|
||||
if len(pk) != 64 {
|
||||
return fmt.Errorf("invalid pubkey in read_deny_add for kind %d: %q", kind, pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate rules_add are for kinds not already defined by owner
|
||||
for kind := range contribution.RulesAdd {
|
||||
if _, exists := ownerPolicy.rules[kind]; exists {
|
||||
return fmt.Errorf("cannot add rule for kind %d: already defined in owner policy (use rules_extend instead)", kind)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Policy Composition Logic
|
||||
// =============================================================================
|
||||
|
||||
// NewComposedPolicy creates a new composed policy from an owner policy.
|
||||
func NewComposedPolicy(ownerPolicy *P, configDir string) *ComposedPolicy {
|
||||
return &ComposedPolicy{
|
||||
OwnerPolicy: ownerPolicy,
|
||||
Contributions: make(map[string]*PolicyAdminContribution),
|
||||
configDir: configDir,
|
||||
}
|
||||
}
|
||||
|
||||
// AddContribution adds a policy admin contribution.
|
||||
// Returns error if validation fails.
|
||||
func (cp *ComposedPolicy) AddContribution(contribution *PolicyAdminContribution) error {
|
||||
cp.contributionsMx.Lock()
|
||||
defer cp.contributionsMx.Unlock()
|
||||
|
||||
// Validate the contribution
|
||||
if err := ValidatePolicyAdminContribution(cp.OwnerPolicy, contribution, cp.Contributions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store the contribution
|
||||
cp.Contributions[contribution.EventID] = contribution
|
||||
|
||||
// Persist to disk
|
||||
if err := cp.saveContribution(contribution); err != nil {
|
||||
log.W.F("failed to persist contribution: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveContribution removes a policy admin contribution by event ID.
|
||||
func (cp *ComposedPolicy) RemoveContribution(eventID string) {
|
||||
cp.contributionsMx.Lock()
|
||||
defer cp.contributionsMx.Unlock()
|
||||
|
||||
delete(cp.Contributions, eventID)
|
||||
|
||||
// Remove from disk
|
||||
if cp.configDir != "" {
|
||||
contribPath := filepath.Join(cp.configDir, "policy-contributions", eventID+".json")
|
||||
os.Remove(contribPath)
|
||||
}
|
||||
}
|
||||
|
||||
// GetEffectivePolicy computes the merged effective policy.
|
||||
// Composition rules:
|
||||
// - Whitelists are unioned (OR)
|
||||
// - Blacklists are unioned and override whitelists
|
||||
// - Limits use the most permissive value
|
||||
// - Conflicts between PAs: oldest created_at wins (except deny always wins)
|
||||
func (cp *ComposedPolicy) GetEffectivePolicy() *P {
|
||||
cp.contributionsMx.RLock()
|
||||
defer cp.contributionsMx.RUnlock()
|
||||
|
||||
// Clone the owner policy as base
|
||||
effective := cp.cloneOwnerPolicy()
|
||||
|
||||
// Sort contributions by created_at (oldest first for conflict resolution)
|
||||
sorted := cp.getSortedContributions()
|
||||
|
||||
// Apply each contribution
|
||||
for _, contrib := range sorted {
|
||||
cp.applyContribution(effective, contrib)
|
||||
}
|
||||
|
||||
// Repopulate binary caches
|
||||
effective.Global.populateBinaryCache()
|
||||
for kind := range effective.rules {
|
||||
rule := effective.rules[kind]
|
||||
rule.populateBinaryCache()
|
||||
effective.rules[kind] = rule
|
||||
}
|
||||
|
||||
return effective
|
||||
}
|
||||
|
||||
// cloneOwnerPolicy creates a deep copy of the owner policy.
|
||||
func (cp *ComposedPolicy) cloneOwnerPolicy() *P {
|
||||
// Marshal and unmarshal to create a deep copy
|
||||
data, _ := json.Marshal(cp.OwnerPolicy)
|
||||
var cloned P
|
||||
json.Unmarshal(data, &cloned)
|
||||
|
||||
// Copy the manager reference (not cloned)
|
||||
cloned.manager = cp.OwnerPolicy.manager
|
||||
|
||||
return &cloned
|
||||
}
|
||||
|
||||
// getSortedContributions returns contributions sorted by created_at.
|
||||
func (cp *ComposedPolicy) getSortedContributions() []*PolicyAdminContribution {
|
||||
sorted := make([]*PolicyAdminContribution, 0, len(cp.Contributions))
|
||||
for _, contrib := range cp.Contributions {
|
||||
sorted = append(sorted, contrib)
|
||||
}
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return sorted[i].CreatedAt < sorted[j].CreatedAt
|
||||
})
|
||||
return sorted
|
||||
}
|
||||
|
||||
// applyContribution applies a single contribution to the effective policy.
|
||||
func (cp *ComposedPolicy) applyContribution(effective *P, contrib *PolicyAdminContribution) {
|
||||
// Apply kind whitelist additions (OR)
|
||||
for _, kind := range contrib.KindWhitelistAdd {
|
||||
if !containsInt(effective.Kind.Whitelist, kind) {
|
||||
effective.Kind.Whitelist = append(effective.Kind.Whitelist, kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply kind blacklist additions (OR, overrides whitelist)
|
||||
for _, kind := range contrib.KindBlacklistAdd {
|
||||
if !containsInt(effective.Kind.Blacklist, kind) {
|
||||
effective.Kind.Blacklist = append(effective.Kind.Blacklist, kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply rule extensions
|
||||
for kind, ext := range contrib.RulesExtend {
|
||||
if rule, exists := effective.rules[kind]; exists {
|
||||
cp.applyRuleExtension(&rule, &ext, contrib.CreatedAt)
|
||||
effective.rules[kind] = rule
|
||||
}
|
||||
}
|
||||
|
||||
// Apply new rules
|
||||
for kind, rule := range contrib.RulesAdd {
|
||||
if _, exists := effective.rules[kind]; !exists {
|
||||
if effective.rules == nil {
|
||||
effective.rules = make(map[int]Rule)
|
||||
}
|
||||
effective.rules[kind] = rule
|
||||
}
|
||||
}
|
||||
|
||||
// Apply global rule extension
|
||||
if contrib.GlobalExtend != nil {
|
||||
cp.applyRuleExtension(&effective.Global, contrib.GlobalExtend, contrib.CreatedAt)
|
||||
}
|
||||
}
|
||||
|
||||
// applyRuleExtension applies a rule extension to an existing rule.
|
||||
func (cp *ComposedPolicy) applyRuleExtension(rule *Rule, ext *RuleExtension, _ int64) {
|
||||
// Add to allow lists (OR)
|
||||
for _, pk := range ext.WriteAllowAdd {
|
||||
if !containsString(rule.WriteAllow, pk) {
|
||||
rule.WriteAllow = append(rule.WriteAllow, pk)
|
||||
}
|
||||
}
|
||||
for _, pk := range ext.ReadAllowAdd {
|
||||
if !containsString(rule.ReadAllow, pk) {
|
||||
rule.ReadAllow = append(rule.ReadAllow, pk)
|
||||
}
|
||||
}
|
||||
|
||||
// Add to deny lists (OR, overrides allow) - deny always wins
|
||||
for _, pk := range ext.WriteDenyAdd {
|
||||
if !containsString(rule.WriteDeny, pk) {
|
||||
rule.WriteDeny = append(rule.WriteDeny, pk)
|
||||
}
|
||||
}
|
||||
for _, pk := range ext.ReadDenyAdd {
|
||||
if !containsString(rule.ReadDeny, pk) {
|
||||
rule.ReadDeny = append(rule.ReadDeny, pk)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply limit overrides (most permissive wins)
|
||||
if ext.SizeLimitOverride != nil {
|
||||
if rule.SizeLimit == nil || *ext.SizeLimitOverride > *rule.SizeLimit {
|
||||
rule.SizeLimit = ext.SizeLimitOverride
|
||||
}
|
||||
}
|
||||
if ext.ContentLimitOverride != nil {
|
||||
if rule.ContentLimit == nil || *ext.ContentLimitOverride > *rule.ContentLimit {
|
||||
rule.ContentLimit = ext.ContentLimitOverride
|
||||
}
|
||||
}
|
||||
if ext.MaxAgeOfEventOverride != nil {
|
||||
if rule.MaxAgeOfEvent == nil || *ext.MaxAgeOfEventOverride > *rule.MaxAgeOfEvent {
|
||||
rule.MaxAgeOfEvent = ext.MaxAgeOfEventOverride
|
||||
}
|
||||
}
|
||||
if ext.MaxAgeEventInFutureOverride != nil {
|
||||
if rule.MaxAgeEventInFuture == nil || *ext.MaxAgeEventInFutureOverride > *rule.MaxAgeEventInFuture {
|
||||
rule.MaxAgeEventInFuture = ext.MaxAgeEventInFutureOverride
|
||||
}
|
||||
}
|
||||
|
||||
// Enable WriteAllowFollows if requested (OR logic)
|
||||
if ext.WriteAllowFollows != nil && *ext.WriteAllowFollows {
|
||||
rule.WriteAllowFollows = true
|
||||
}
|
||||
|
||||
// Add to follows whitelist admins
|
||||
for _, pk := range ext.FollowsWhitelistAdminsAdd {
|
||||
if !containsString(rule.FollowsWhitelistAdmins, pk) {
|
||||
rule.FollowsWhitelistAdmins = append(rule.FollowsWhitelistAdmins, pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Persistence
|
||||
// =============================================================================
|
||||
|
||||
// saveContribution persists a contribution to disk.
|
||||
func (cp *ComposedPolicy) saveContribution(contrib *PolicyAdminContribution) error {
|
||||
if cp.configDir == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
contribDir := filepath.Join(cp.configDir, "policy-contributions")
|
||||
if err := os.MkdirAll(contribDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
contribPath := filepath.Join(contribDir, contrib.EventID+".json")
|
||||
data, err := json.MarshalIndent(contrib, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(contribPath, data, 0644)
|
||||
}
|
||||
|
||||
// LoadContributions loads all contributions from disk.
|
||||
func (cp *ComposedPolicy) LoadContributions() error {
|
||||
if cp.configDir == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
contribDir := filepath.Join(cp.configDir, "policy-contributions")
|
||||
if _, err := os.Stat(contribDir); os.IsNotExist(err) {
|
||||
return nil // No contributions yet
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(contribDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cp.contributionsMx.Lock()
|
||||
defer cp.contributionsMx.Unlock()
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() || filepath.Ext(entry.Name()) != ".json" {
|
||||
continue
|
||||
}
|
||||
|
||||
contribPath := filepath.Join(contribDir, entry.Name())
|
||||
data, err := os.ReadFile(contribPath)
|
||||
if err != nil {
|
||||
log.W.F("failed to read contribution %s: %v", entry.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
var contrib PolicyAdminContribution
|
||||
if err := json.Unmarshal(data, &contrib); err != nil {
|
||||
log.W.F("failed to parse contribution %s: %v", entry.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Validate against current owner policy
|
||||
if err := ValidatePolicyAdminContribution(cp.OwnerPolicy, &contrib, cp.Contributions); err != nil {
|
||||
log.W.F("contribution %s is no longer valid: %v (skipping)", entry.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
cp.Contributions[contrib.EventID] = &contrib
|
||||
}
|
||||
|
||||
log.I.F("loaded %d policy admin contributions", len(cp.Contributions))
|
||||
return nil
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Owner Detection
|
||||
// =============================================================================
|
||||
|
||||
// IsOwner checks if the given pubkey is an owner.
|
||||
// The pubkey parameter should be binary ([]byte), not hex-encoded.
|
||||
func (p *P) IsOwner(pubkey []byte) bool {
|
||||
if len(pubkey) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
p.policyFollowsMx.RLock()
|
||||
defer p.policyFollowsMx.RUnlock()
|
||||
|
||||
for _, owner := range p.ownersBin {
|
||||
if utils.FastEqual(owner, pubkey) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsOwnerOrPolicyAdmin checks if the given pubkey is an owner or policy admin.
|
||||
// The pubkey parameter should be binary ([]byte), not hex-encoded.
|
||||
func (p *P) IsOwnerOrPolicyAdmin(pubkey []byte) bool {
|
||||
return p.IsOwner(pubkey) || p.IsPolicyAdmin(pubkey)
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Helper Functions
|
||||
// =============================================================================
|
||||
|
||||
func containsInt(slice []int, val int) bool {
|
||||
for _, v := range slice {
|
||||
if v == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func containsString(slice []string, val string) bool {
|
||||
for _, v := range slice {
|
||||
if v == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
690
pkg/policy/composition_test.go
Normal file
690
pkg/policy/composition_test.go
Normal file
@@ -0,0 +1,690 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestValidateOwnerPolicyUpdate tests owner-specific validation
|
||||
func TestValidateOwnerPolicyUpdate(t *testing.T) {
|
||||
// Create a base policy
|
||||
basePolicy := &P{
|
||||
DefaultPolicy: "allow",
|
||||
Owners: []string{"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"},
|
||||
PolicyAdmins: []string{"bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
newPolicy string
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid owner update with non-empty owners",
|
||||
newPolicy: `{
|
||||
"default_policy": "deny",
|
||||
"owners": ["cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"],
|
||||
"policy_admins": ["dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"]
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - empty owners list",
|
||||
newPolicy: `{
|
||||
"default_policy": "deny",
|
||||
"owners": [],
|
||||
"policy_admins": ["dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"]
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "owners list cannot be empty",
|
||||
},
|
||||
{
|
||||
name: "invalid - missing owners field",
|
||||
newPolicy: `{
|
||||
"default_policy": "deny",
|
||||
"policy_admins": ["dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"]
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "owners list cannot be empty",
|
||||
},
|
||||
{
|
||||
name: "invalid - bad owner pubkey format",
|
||||
newPolicy: `{
|
||||
"default_policy": "deny",
|
||||
"owners": ["not-a-valid-pubkey"]
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "invalid owner pubkey",
|
||||
},
|
||||
{
|
||||
name: "valid - owner can add multiple owners",
|
||||
newPolicy: `{
|
||||
"default_policy": "deny",
|
||||
"owners": [
|
||||
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||
"cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
|
||||
]
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := basePolicy.ValidateOwnerPolicyUpdate([]byte(tt.newPolicy))
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("expected error containing %q, got nil", tt.errorMsg)
|
||||
} else if tt.errorMsg != "" && !containsSubstring(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("expected error containing %q, got %q", tt.errorMsg, err.Error())
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestValidatePolicyAdminUpdate tests policy admin validation
|
||||
func TestValidatePolicyAdminUpdate(t *testing.T) {
|
||||
// Create a base policy with known owners and admins
|
||||
ownerPubkey := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
adminPubkey := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||
allowedPubkey := "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc"
|
||||
|
||||
baseJSON := `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
basePolicy := &P{}
|
||||
if err := json.Unmarshal([]byte(baseJSON), basePolicy); err != nil {
|
||||
t.Fatalf("failed to create base policy: %v", err)
|
||||
}
|
||||
|
||||
adminPubkeyBin := make([]byte, 32)
|
||||
for i := range adminPubkeyBin {
|
||||
adminPubkeyBin[i] = 0xbb
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
newPolicy string
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid - policy admin can extend write_allow",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `", "dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - policy admin can add to kind whitelist",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7, 30023]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - policy admin can increase size limit",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"size_limit": 20000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - policy admin cannot modify owners",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot modify the 'owners' field",
|
||||
},
|
||||
{
|
||||
name: "invalid - policy admin cannot modify policy_admins",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["dddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot modify the 'policy_admins' field",
|
||||
},
|
||||
{
|
||||
name: "invalid - policy admin cannot remove from kind whitelist",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot remove kind 7 from whitelist",
|
||||
},
|
||||
{
|
||||
name: "invalid - policy admin cannot remove from write_allow",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": [],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot remove pubkey",
|
||||
},
|
||||
{
|
||||
name: "invalid - policy admin cannot reduce size limit",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"size_limit": 5000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot reduce size_limit",
|
||||
},
|
||||
{
|
||||
name: "invalid - policy admin cannot remove rule",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot remove rule for kind 1",
|
||||
},
|
||||
{
|
||||
name: "valid - policy admin can add blacklist entries for non-admin users",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7],
|
||||
"blacklist": [4]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"write_deny": ["eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - policy admin cannot blacklist owner in write_deny",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"write_deny": ["` + ownerPubkey + `"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot blacklist owner",
|
||||
},
|
||||
{
|
||||
name: "invalid - policy admin cannot blacklist other policy admin",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"write_deny": ["` + adminPubkey + `"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMsg: "cannot blacklist policy admin",
|
||||
},
|
||||
{
|
||||
name: "valid - policy admin can blacklist whitelisted non-admin user",
|
||||
newPolicy: `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"],
|
||||
"policy_admins": ["` + adminPubkey + `"],
|
||||
"kind": {
|
||||
"whitelist": [1, 3, 7]
|
||||
},
|
||||
"rules": {
|
||||
"1": {
|
||||
"description": "Text notes",
|
||||
"write_allow": ["` + allowedPubkey + `"],
|
||||
"write_deny": ["` + allowedPubkey + `"],
|
||||
"size_limit": 10000
|
||||
}
|
||||
}
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := basePolicy.ValidatePolicyAdminUpdate([]byte(tt.newPolicy), adminPubkeyBin)
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("expected error containing %q, got nil", tt.errorMsg)
|
||||
} else if tt.errorMsg != "" && !containsSubstring(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("expected error containing %q, got %q", tt.errorMsg, err.Error())
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestIsOwner tests the IsOwner method
|
||||
func TestIsOwner(t *testing.T) {
|
||||
ownerPubkey := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
nonOwnerPubkey := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||
|
||||
_ = nonOwnerPubkey // Silence unused variable warning
|
||||
|
||||
policyJSON := `{
|
||||
"default_policy": "allow",
|
||||
"owners": ["` + ownerPubkey + `"]
|
||||
}`
|
||||
|
||||
policy, err := New([]byte(policyJSON))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create policy: %v", err)
|
||||
}
|
||||
|
||||
// Create binary pubkeys
|
||||
ownerBin := make([]byte, 32)
|
||||
for i := range ownerBin {
|
||||
ownerBin[i] = 0xaa
|
||||
}
|
||||
|
||||
nonOwnerBin := make([]byte, 32)
|
||||
for i := range nonOwnerBin {
|
||||
nonOwnerBin[i] = 0xbb
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pubkey []byte
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "owner is recognized",
|
||||
pubkey: ownerBin,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "non-owner is not recognized",
|
||||
pubkey: nonOwnerBin,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "nil pubkey returns false",
|
||||
pubkey: nil,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "empty pubkey returns false",
|
||||
pubkey: []byte{},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := policy.IsOwner(tt.pubkey)
|
||||
if result != tt.expected {
|
||||
t.Errorf("expected %v, got %v", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestStringSliceEqual tests the helper function
|
||||
func TestStringSliceEqual(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
a []string
|
||||
b []string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "equal slices same order",
|
||||
a: []string{"a", "b", "c"},
|
||||
b: []string{"a", "b", "c"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "equal slices different order",
|
||||
a: []string{"a", "b", "c"},
|
||||
b: []string{"c", "a", "b"},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "different lengths",
|
||||
a: []string{"a", "b"},
|
||||
b: []string{"a", "b", "c"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "different contents",
|
||||
a: []string{"a", "b", "c"},
|
||||
b: []string{"a", "b", "d"},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "empty slices",
|
||||
a: []string{},
|
||||
b: []string{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "nil slices",
|
||||
a: nil,
|
||||
b: nil,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "nil vs empty",
|
||||
a: nil,
|
||||
b: []string{},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "duplicates in both",
|
||||
a: []string{"a", "a", "b"},
|
||||
b: []string{"a", "b", "a"},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := stringSliceEqual(tt.a, tt.b)
|
||||
if result != tt.expected {
|
||||
t.Errorf("expected %v, got %v", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestPolicyAdminContributionValidation tests the contribution validation
|
||||
func TestPolicyAdminContributionValidation(t *testing.T) {
|
||||
ownerPubkey := "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
|
||||
adminPubkey := "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"
|
||||
|
||||
ownerPolicy := &P{
|
||||
DefaultPolicy: "allow",
|
||||
Owners: []string{ownerPubkey},
|
||||
PolicyAdmins: []string{adminPubkey},
|
||||
Kind: Kinds{
|
||||
Whitelist: []int{1, 3, 7},
|
||||
},
|
||||
rules: map[int]Rule{
|
||||
1: {
|
||||
Description: "Text notes",
|
||||
SizeLimit: ptr(int64(10000)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
contribution *PolicyAdminContribution
|
||||
expectError bool
|
||||
errorMsg string
|
||||
}{
|
||||
{
|
||||
name: "valid - add kinds to whitelist",
|
||||
contribution: &PolicyAdminContribution{
|
||||
AdminPubkey: adminPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
EventID: "event123",
|
||||
KindWhitelistAdd: []int{30023},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - add to blacklist",
|
||||
contribution: &PolicyAdminContribution{
|
||||
AdminPubkey: adminPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
EventID: "event123",
|
||||
KindBlacklistAdd: []int{4},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "valid - extend existing rule with larger limit",
|
||||
contribution: &PolicyAdminContribution{
|
||||
AdminPubkey: adminPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
EventID: "event123",
|
||||
RulesExtend: map[int]RuleExtension{
|
||||
1: {
|
||||
SizeLimitOverride: ptr(int64(20000)),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - extend non-existent rule",
|
||||
contribution: &PolicyAdminContribution{
|
||||
AdminPubkey: adminPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
EventID: "event123",
|
||||
RulesExtend: map[int]RuleExtension{
|
||||
999: {
|
||||
SizeLimitOverride: ptr(int64(20000)),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "cannot extend rule for kind 999",
|
||||
},
|
||||
{
|
||||
name: "invalid - size limit override smaller than owner's",
|
||||
contribution: &PolicyAdminContribution{
|
||||
AdminPubkey: adminPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
EventID: "event123",
|
||||
RulesExtend: map[int]RuleExtension{
|
||||
1: {
|
||||
SizeLimitOverride: ptr(int64(5000)),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "size_limit_override for kind 1 must be >=",
|
||||
},
|
||||
{
|
||||
name: "valid - add new rule for undefined kind",
|
||||
contribution: &PolicyAdminContribution{
|
||||
AdminPubkey: adminPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
EventID: "event123",
|
||||
RulesAdd: map[int]Rule{
|
||||
30023: {
|
||||
Description: "Long-form content",
|
||||
SizeLimit: ptr(int64(100000)),
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid - add rule for already-defined kind",
|
||||
contribution: &PolicyAdminContribution{
|
||||
AdminPubkey: adminPubkey,
|
||||
CreatedAt: 1234567890,
|
||||
EventID: "event123",
|
||||
RulesAdd: map[int]Rule{
|
||||
1: {
|
||||
Description: "Trying to override",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "cannot add rule for kind 1: already defined",
|
||||
},
|
||||
{
|
||||
name: "invalid - bad pubkey length in extension",
|
||||
contribution: &PolicyAdminContribution{
|
||||
AdminPubkey: "short",
|
||||
CreatedAt: 1234567890,
|
||||
EventID: "event123",
|
||||
},
|
||||
expectError: true,
|
||||
errorMsg: "invalid admin pubkey length",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := ValidatePolicyAdminContribution(ownerPolicy, tt.contribution, nil)
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("expected error containing %q, got nil", tt.errorMsg)
|
||||
} else if tt.errorMsg != "" && !containsSubstring(err.Error(), tt.errorMsg) {
|
||||
t.Errorf("expected error containing %q, got %q", tt.errorMsg, err.Error())
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function for generic pointer
|
||||
func ptr[T any](v T) *T {
|
||||
return &v
|
||||
}
|
||||
@@ -113,11 +113,11 @@ func TestMaxExpiryDuration(t *testing.T) {
|
||||
expectAllow: true,
|
||||
},
|
||||
{
|
||||
name: "valid expiry at exact limit",
|
||||
name: "expiry at exact limit rejected",
|
||||
maxExpiryDuration: "PT1H",
|
||||
eventExpiry: 3600, // exactly 1 hour
|
||||
eventExpiry: 3600, // exactly 1 hour - >= means this is rejected
|
||||
hasExpiryTag: true,
|
||||
expectAllow: true,
|
||||
expectAllow: false,
|
||||
},
|
||||
{
|
||||
name: "expiry exceeds limit",
|
||||
@@ -235,6 +235,79 @@ func TestMaxExpiryDurationPrecedence(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test that max_expiry_duration only applies to writes, not reads
|
||||
func TestMaxExpiryDurationWriteOnly(t *testing.T) {
|
||||
signer, pubkey := generateTestKeypair(t)
|
||||
|
||||
// Policy with strict max_expiry_duration
|
||||
policyJSON := []byte(`{
|
||||
"default_policy": "allow",
|
||||
"rules": {
|
||||
"4": {
|
||||
"description": "DM events with expiry",
|
||||
"max_expiry_duration": "PT10M",
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
policy, err := New(policyJSON)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create policy: %v", err)
|
||||
}
|
||||
|
||||
// Create event WITHOUT an expiry tag - this would fail write validation
|
||||
// but should still be readable
|
||||
ev := createTestEventForNewFields(t, signer, "test DM", 4)
|
||||
if err := ev.Sign(signer); chk.E(err) {
|
||||
t.Fatalf("Failed to sign: %v", err)
|
||||
}
|
||||
|
||||
// Write should fail (no expiry tag when max_expiry_duration is set)
|
||||
allowed, err := policy.CheckPolicy("write", ev, pubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPolicy write error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Write should be denied for event without expiry tag when max_expiry_duration is set")
|
||||
}
|
||||
|
||||
// Read should succeed (validation constraints don't apply to reads)
|
||||
allowed, err = policy.CheckPolicy("read", ev, pubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPolicy read error: %v", err)
|
||||
}
|
||||
if !allowed {
|
||||
t.Error("Read should be allowed - max_expiry_duration is write-only validation")
|
||||
}
|
||||
|
||||
// Also test with an event that has expiry exceeding the limit
|
||||
ev2 := createTestEventForNewFields(t, signer, "test DM 2", 4)
|
||||
expiryTs := ev2.CreatedAt + 7200 // 2 hours - exceeds 10 minute limit
|
||||
addTagString(ev2, "expiration", int64ToString(expiryTs))
|
||||
if err := ev2.Sign(signer); chk.E(err) {
|
||||
t.Fatalf("Failed to sign: %v", err)
|
||||
}
|
||||
|
||||
// Write should fail (expiry exceeds limit)
|
||||
allowed, err = policy.CheckPolicy("write", ev2, pubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPolicy write error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Write should be denied for event with expiry exceeding max_expiry_duration")
|
||||
}
|
||||
|
||||
// Read should still succeed
|
||||
allowed, err = policy.CheckPolicy("read", ev2, pubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Fatalf("CheckPolicy read error: %v", err)
|
||||
}
|
||||
if !allowed {
|
||||
t.Error("Read should be allowed - max_expiry_duration is write-only validation")
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ProtectedRequired Tests
|
||||
// =============================================================================
|
||||
@@ -1071,6 +1144,94 @@ func TestNewFieldsInGlobalRule(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// New() Validation Tests - Ensures invalid configs fail at load time
|
||||
// =============================================================================
|
||||
|
||||
// TestNewRejectsInvalidMaxExpiryDuration verifies that New() fails fast when
|
||||
// given an invalid max_expiry_duration format like "T10M" instead of "PT10M".
|
||||
// This prevents silent failures where constraints are ignored.
|
||||
func TestNewRejectsInvalidMaxExpiryDuration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
json string
|
||||
expectError bool
|
||||
errorMatch string
|
||||
}{
|
||||
{
|
||||
name: "valid PT10M format accepted",
|
||||
json: `{
|
||||
"rules": {
|
||||
"4": {"max_expiry_duration": "PT10M"}
|
||||
}
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid T10M format (missing P prefix) rejected",
|
||||
json: `{
|
||||
"rules": {
|
||||
"4": {"max_expiry_duration": "T10M"}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMatch: "max_expiry_duration",
|
||||
},
|
||||
{
|
||||
name: "invalid 10M format (missing PT prefix) rejected",
|
||||
json: `{
|
||||
"rules": {
|
||||
"4": {"max_expiry_duration": "10M"}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMatch: "max_expiry_duration",
|
||||
},
|
||||
{
|
||||
name: "valid P7D format accepted",
|
||||
json: `{
|
||||
"rules": {
|
||||
"1": {"max_expiry_duration": "P7D"}
|
||||
}
|
||||
}`,
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
name: "invalid 7D format (missing P prefix) rejected",
|
||||
json: `{
|
||||
"rules": {
|
||||
"1": {"max_expiry_duration": "7D"}
|
||||
}
|
||||
}`,
|
||||
expectError: true,
|
||||
errorMatch: "max_expiry_duration",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
policy, err := New([]byte(tt.json))
|
||||
|
||||
if tt.expectError {
|
||||
if err == nil {
|
||||
t.Errorf("New() should have rejected invalid config, but returned policy: %+v", policy)
|
||||
return
|
||||
}
|
||||
if tt.errorMatch != "" && !contains(err.Error(), tt.errorMatch) {
|
||||
t.Errorf("Error %q should contain %q", err.Error(), tt.errorMatch)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("New() unexpected error for valid config: %v", err)
|
||||
}
|
||||
if policy == nil {
|
||||
t.Error("New() returned nil policy for valid config")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// ValidateJSON Tests for New Fields
|
||||
// =============================================================================
|
||||
|
||||
@@ -468,11 +468,19 @@ func (p *P) UnmarshalJSON(data []byte) error {
|
||||
// New creates a new policy from JSON configuration.
|
||||
// If policyJSON is empty, returns a policy with default settings.
|
||||
// The default_policy field defaults to "allow" if not specified.
|
||||
// Returns an error if the policy JSON contains invalid values (e.g., invalid
|
||||
// ISO-8601 duration format for max_expiry_duration, invalid regex patterns, etc.).
|
||||
func New(policyJSON []byte) (p *P, err error) {
|
||||
p = &P{
|
||||
DefaultPolicy: "allow", // Set default value
|
||||
}
|
||||
if len(policyJSON) > 0 {
|
||||
// Validate JSON before loading to fail fast on invalid configurations.
|
||||
// This prevents silent failures where invalid values (like "T10M" instead
|
||||
// of "PT10M" for max_expiry_duration) are ignored and constraints don't apply.
|
||||
if err = p.ValidateJSON(policyJSON); err != nil {
|
||||
return nil, fmt.Errorf("policy validation failed: %v", err)
|
||||
}
|
||||
if err = json.Unmarshal(policyJSON, p); chk.E(err) {
|
||||
return nil, fmt.Errorf("failed to unmarshal policy JSON: %v", err)
|
||||
}
|
||||
@@ -1272,7 +1280,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check required tags
|
||||
if len(rule.MustHaveTags) > 0 {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && len(rule.MustHaveTags) > 0 {
|
||||
for _, requiredTag := range rule.MustHaveTags {
|
||||
if ev.Tags.GetFirst([]byte(requiredTag)) == nil {
|
||||
return false, nil
|
||||
@@ -1281,7 +1290,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check expiry time (uses maxExpirySeconds which is parsed from MaxExpiryDuration or MaxExpiry)
|
||||
if rule.maxExpirySeconds != nil && *rule.maxExpirySeconds > 0 {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.maxExpirySeconds != nil && *rule.maxExpirySeconds > 0 {
|
||||
expiryTag := ev.Tags.GetFirst([]byte("expiration"))
|
||||
if expiryTag == nil {
|
||||
return false, nil // Must have expiry if max_expiry is set
|
||||
@@ -1294,7 +1304,7 @@ func (p *P) checkRulePolicy(
|
||||
return false, nil // Invalid expiry format
|
||||
}
|
||||
maxAllowedExpiry := ev.CreatedAt + *rule.maxExpirySeconds
|
||||
if expiryTs > maxAllowedExpiry {
|
||||
if expiryTs >= maxAllowedExpiry {
|
||||
log.D.F("expiration %d exceeds max allowed %d (created_at %d + max_expiry %d)",
|
||||
expiryTs, maxAllowedExpiry, ev.CreatedAt, *rule.maxExpirySeconds)
|
||||
return false, nil // Expiry too far in the future
|
||||
@@ -1302,7 +1312,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check ProtectedRequired (NIP-70: events must have "-" tag)
|
||||
if rule.ProtectedRequired {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.ProtectedRequired {
|
||||
protectedTag := ev.Tags.GetFirst([]byte("-"))
|
||||
if protectedTag == nil {
|
||||
log.D.F("protected_required: event missing '-' tag (NIP-70)")
|
||||
@@ -1311,7 +1322,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check IdentifierRegex (validates "d" tag values)
|
||||
if rule.identifierRegexCache != nil {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.identifierRegexCache != nil {
|
||||
dTags := ev.Tags.GetAll([]byte("d"))
|
||||
if len(dTags) == 0 {
|
||||
log.D.F("identifier_regex: event missing 'd' tag")
|
||||
@@ -1328,7 +1340,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check MaxAgeOfEvent (maximum age of event in seconds)
|
||||
if rule.MaxAgeOfEvent != nil && *rule.MaxAgeOfEvent > 0 {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.MaxAgeOfEvent != nil && *rule.MaxAgeOfEvent > 0 {
|
||||
currentTime := time.Now().Unix()
|
||||
maxAllowedTime := currentTime - *rule.MaxAgeOfEvent
|
||||
if ev.CreatedAt < maxAllowedTime {
|
||||
@@ -1337,7 +1350,8 @@ func (p *P) checkRulePolicy(
|
||||
}
|
||||
|
||||
// Check MaxAgeEventInFuture (maximum time event can be in the future in seconds)
|
||||
if rule.MaxAgeEventInFuture != nil && *rule.MaxAgeEventInFuture > 0 {
|
||||
// Only apply for write access - we validate what goes in, not what comes out
|
||||
if access == "write" && rule.MaxAgeEventInFuture != nil && *rule.MaxAgeEventInFuture > 0 {
|
||||
currentTime := time.Now().Unix()
|
||||
maxFutureTime := currentTime + *rule.MaxAgeEventInFuture
|
||||
if ev.CreatedAt > maxFutureTime {
|
||||
@@ -1766,6 +1780,8 @@ func (p *P) ValidateJSON(policyJSON []byte) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Note: Owner-specific validation (non-empty owners) is done in ValidateOwnerPolicyUpdate
|
||||
|
||||
// Validate regex patterns in tag_validation rules and new fields
|
||||
for kind, rule := range tempPolicy.rules {
|
||||
for tagName, pattern := range rule.TagValidation {
|
||||
@@ -1782,7 +1798,7 @@ func (p *P) ValidateJSON(policyJSON []byte) error {
|
||||
// Validate MaxExpiryDuration format
|
||||
if rule.MaxExpiryDuration != "" {
|
||||
if _, err := parseDuration(rule.MaxExpiryDuration); err != nil {
|
||||
return fmt.Errorf("invalid max_expiry_duration %q in kind %d: %v", rule.MaxExpiryDuration, kind, err)
|
||||
return fmt.Errorf("invalid max_expiry_duration %q in kind %d: %v (format must be ISO-8601 duration, e.g. \"PT10M\" for 10 minutes, \"P7D\" for 7 days, \"P1DT12H\" for 1 day 12 hours)", rule.MaxExpiryDuration, kind, err)
|
||||
}
|
||||
}
|
||||
// Validate FollowsWhitelistAdmins pubkeys
|
||||
@@ -1813,7 +1829,7 @@ func (p *P) ValidateJSON(policyJSON []byte) error {
|
||||
// Validate global rule MaxExpiryDuration format
|
||||
if tempPolicy.Global.MaxExpiryDuration != "" {
|
||||
if _, err := parseDuration(tempPolicy.Global.MaxExpiryDuration); err != nil {
|
||||
return fmt.Errorf("invalid max_expiry_duration %q in global rule: %v", tempPolicy.Global.MaxExpiryDuration, err)
|
||||
return fmt.Errorf("invalid max_expiry_duration %q in global rule: %v (format must be ISO-8601 duration, e.g. \"PT10M\" for 10 minutes, \"P7D\" for 7 days, \"P1DT12H\" for 1 day 12 hours)", tempPolicy.Global.MaxExpiryDuration, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2177,3 +2193,254 @@ func (p *P) GetRulesKinds() []int {
|
||||
}
|
||||
return kinds
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Owner vs Policy Admin Update Validation
|
||||
// =============================================================================
|
||||
|
||||
// ValidateOwnerPolicyUpdate validates a full policy update from an owner.
|
||||
// Owners can modify all fields but the owners list must be non-empty.
|
||||
func (p *P) ValidateOwnerPolicyUpdate(policyJSON []byte) error {
|
||||
// First run standard validation
|
||||
if err := p.ValidateJSON(policyJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse the new policy
|
||||
tempPolicy := &P{}
|
||||
if err := json.Unmarshal(policyJSON, tempPolicy); err != nil {
|
||||
return fmt.Errorf("failed to parse policy JSON: %v", err)
|
||||
}
|
||||
|
||||
// Owner-specific validation: owners list cannot be empty
|
||||
if len(tempPolicy.Owners) == 0 {
|
||||
return fmt.Errorf("owners list cannot be empty: at least one owner must be defined to prevent lockout")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidatePolicyAdminUpdate validates a policy update from a policy admin.
|
||||
// Policy admins CANNOT modify: owners, policy_admins
|
||||
// Policy admins CAN: extend rules, add blacklists, add new kind rules
|
||||
func (p *P) ValidatePolicyAdminUpdate(policyJSON []byte, adminPubkey []byte) error {
|
||||
// First run standard validation
|
||||
if err := p.ValidateJSON(policyJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse the new policy
|
||||
tempPolicy := &P{}
|
||||
if err := json.Unmarshal(policyJSON, tempPolicy); err != nil {
|
||||
return fmt.Errorf("failed to parse policy JSON: %v", err)
|
||||
}
|
||||
|
||||
// Protected field check: owners must match current
|
||||
if !stringSliceEqual(tempPolicy.Owners, p.Owners) {
|
||||
return fmt.Errorf("policy admins cannot modify the 'owners' field: this is a protected field that only owners can change")
|
||||
}
|
||||
|
||||
// Protected field check: policy_admins must match current
|
||||
if !stringSliceEqual(tempPolicy.PolicyAdmins, p.PolicyAdmins) {
|
||||
return fmt.Errorf("policy admins cannot modify the 'policy_admins' field: this is a protected field that only owners can change")
|
||||
}
|
||||
|
||||
// Validate that the admin is not reducing owner-granted permissions
|
||||
// This check ensures policy admins can only extend, not restrict
|
||||
if err := p.validateNoPermissionReduction(tempPolicy); err != nil {
|
||||
return fmt.Errorf("policy admins cannot reduce owner-granted permissions: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateNoPermissionReduction checks that the new policy doesn't reduce
|
||||
// permissions that were granted in the current (owner) policy.
|
||||
//
|
||||
// Policy admins CAN:
|
||||
// - ADD to allow lists (write_allow, read_allow)
|
||||
// - ADD to deny lists (write_deny, read_deny) to blacklist non-admin users
|
||||
// - INCREASE limits (size_limit, content_limit, max_age_of_event)
|
||||
// - ADD new kinds to whitelist or blacklist
|
||||
// - ADD new rules for kinds not defined by owner
|
||||
//
|
||||
// Policy admins CANNOT:
|
||||
// - REMOVE from allow lists
|
||||
// - DECREASE limits
|
||||
// - REMOVE kinds from whitelist
|
||||
// - REMOVE rules defined by owner
|
||||
// - ADD new required tags (restrictions)
|
||||
// - BLACKLIST owners or other policy admins
|
||||
func (p *P) validateNoPermissionReduction(newPolicy *P) error {
|
||||
// Check kind whitelist - new policy must include all current whitelisted kinds
|
||||
for _, kind := range p.Kind.Whitelist {
|
||||
found := false
|
||||
for _, newKind := range newPolicy.Kind.Whitelist {
|
||||
if kind == newKind {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("cannot remove kind %d from whitelist", kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Check each rule in the current policy
|
||||
for kind, currentRule := range p.rules {
|
||||
newRule, exists := newPolicy.rules[kind]
|
||||
if !exists {
|
||||
return fmt.Errorf("cannot remove rule for kind %d", kind)
|
||||
}
|
||||
|
||||
// Check write_allow - new rule must include all current pubkeys
|
||||
for _, pk := range currentRule.WriteAllow {
|
||||
if !containsString(newRule.WriteAllow, pk) {
|
||||
return fmt.Errorf("cannot remove pubkey %s from write_allow for kind %d", pk, kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Check read_allow - new rule must include all current pubkeys
|
||||
for _, pk := range currentRule.ReadAllow {
|
||||
if !containsString(newRule.ReadAllow, pk) {
|
||||
return fmt.Errorf("cannot remove pubkey %s from read_allow for kind %d", pk, kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Check write_deny - cannot blacklist owners or policy admins
|
||||
for _, pk := range newRule.WriteDeny {
|
||||
if containsString(p.Owners, pk) {
|
||||
return fmt.Errorf("cannot blacklist owner %s in write_deny for kind %d", pk, kind)
|
||||
}
|
||||
if containsString(p.PolicyAdmins, pk) {
|
||||
return fmt.Errorf("cannot blacklist policy admin %s in write_deny for kind %d", pk, kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Check read_deny - cannot blacklist owners or policy admins
|
||||
for _, pk := range newRule.ReadDeny {
|
||||
if containsString(p.Owners, pk) {
|
||||
return fmt.Errorf("cannot blacklist owner %s in read_deny for kind %d", pk, kind)
|
||||
}
|
||||
if containsString(p.PolicyAdmins, pk) {
|
||||
return fmt.Errorf("cannot blacklist policy admin %s in read_deny for kind %d", pk, kind)
|
||||
}
|
||||
}
|
||||
|
||||
// Check size limits - new limit cannot be smaller
|
||||
if currentRule.SizeLimit != nil && newRule.SizeLimit != nil {
|
||||
if *newRule.SizeLimit < *currentRule.SizeLimit {
|
||||
return fmt.Errorf("cannot reduce size_limit for kind %d from %d to %d", kind, *currentRule.SizeLimit, *newRule.SizeLimit)
|
||||
}
|
||||
}
|
||||
|
||||
// Check content limits - new limit cannot be smaller
|
||||
if currentRule.ContentLimit != nil && newRule.ContentLimit != nil {
|
||||
if *newRule.ContentLimit < *currentRule.ContentLimit {
|
||||
return fmt.Errorf("cannot reduce content_limit for kind %d from %d to %d", kind, *currentRule.ContentLimit, *newRule.ContentLimit)
|
||||
}
|
||||
}
|
||||
|
||||
// Check max_age_of_event - new limit cannot be smaller (smaller = more restrictive)
|
||||
if currentRule.MaxAgeOfEvent != nil && newRule.MaxAgeOfEvent != nil {
|
||||
if *newRule.MaxAgeOfEvent < *currentRule.MaxAgeOfEvent {
|
||||
return fmt.Errorf("cannot reduce max_age_of_event for kind %d from %d to %d", kind, *currentRule.MaxAgeOfEvent, *newRule.MaxAgeOfEvent)
|
||||
}
|
||||
}
|
||||
|
||||
// Check must_have_tags - cannot add new required tags (more restrictive)
|
||||
for _, tag := range newRule.MustHaveTags {
|
||||
found := false
|
||||
for _, currentTag := range currentRule.MustHaveTags {
|
||||
if tag == currentTag {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("cannot add required tag %q for kind %d (only owners can add restrictions)", tag, kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check global rule write_deny - cannot blacklist owners or policy admins
|
||||
for _, pk := range newPolicy.Global.WriteDeny {
|
||||
if containsString(p.Owners, pk) {
|
||||
return fmt.Errorf("cannot blacklist owner %s in global write_deny", pk)
|
||||
}
|
||||
if containsString(p.PolicyAdmins, pk) {
|
||||
return fmt.Errorf("cannot blacklist policy admin %s in global write_deny", pk)
|
||||
}
|
||||
}
|
||||
|
||||
// Check global rule read_deny - cannot blacklist owners or policy admins
|
||||
for _, pk := range newPolicy.Global.ReadDeny {
|
||||
if containsString(p.Owners, pk) {
|
||||
return fmt.Errorf("cannot blacklist owner %s in global read_deny", pk)
|
||||
}
|
||||
if containsString(p.PolicyAdmins, pk) {
|
||||
return fmt.Errorf("cannot blacklist policy admin %s in global read_deny", pk)
|
||||
}
|
||||
}
|
||||
|
||||
// Check global rule size limits
|
||||
if p.Global.SizeLimit != nil && newPolicy.Global.SizeLimit != nil {
|
||||
if *newPolicy.Global.SizeLimit < *p.Global.SizeLimit {
|
||||
return fmt.Errorf("cannot reduce global size_limit from %d to %d", *p.Global.SizeLimit, *newPolicy.Global.SizeLimit)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReloadAsOwner reloads the policy from an owner's kind 12345 event.
|
||||
// Owners can modify all fields but the owners list must be non-empty.
|
||||
func (p *P) ReloadAsOwner(policyJSON []byte, configPath string) error {
|
||||
// Validate as owner update
|
||||
if err := p.ValidateOwnerPolicyUpdate(policyJSON); err != nil {
|
||||
return fmt.Errorf("owner policy validation failed: %v", err)
|
||||
}
|
||||
|
||||
// Use existing Reload logic
|
||||
return p.Reload(policyJSON, configPath)
|
||||
}
|
||||
|
||||
// ReloadAsPolicyAdmin reloads the policy from a policy admin's kind 12345 event.
|
||||
// Policy admins cannot modify protected fields (owners, policy_admins) and
|
||||
// cannot reduce owner-granted permissions.
|
||||
func (p *P) ReloadAsPolicyAdmin(policyJSON []byte, configPath string, adminPubkey []byte) error {
|
||||
// Validate as policy admin update
|
||||
if err := p.ValidatePolicyAdminUpdate(policyJSON, adminPubkey); err != nil {
|
||||
return fmt.Errorf("policy admin validation failed: %v", err)
|
||||
}
|
||||
|
||||
// Use existing Reload logic
|
||||
return p.Reload(policyJSON, configPath)
|
||||
}
|
||||
|
||||
// stringSliceEqual checks if two string slices are equal (order-independent).
|
||||
func stringSliceEqual(a, b []string) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Create maps for comparison
|
||||
aMap := make(map[string]int)
|
||||
for _, v := range a {
|
||||
aMap[v]++
|
||||
}
|
||||
|
||||
bMap := make(map[string]int)
|
||||
for _, v := range b {
|
||||
bMap[v]++
|
||||
}
|
||||
|
||||
// Compare maps
|
||||
for k, v := range aMap {
|
||||
if bMap[k] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.31.3
|
||||
v0.31.8
|
||||
Reference in New Issue
Block a user