Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
516ce9c42c
|
|||
|
ed95947971
|
|||
|
b58b91cd14
|
|||
|
20293046d3
|
|||
|
a6d969d7e9
|
|||
|
a5dc827e15
|
|||
|
be81b3320e
|
|||
|
f16ab3077f
|
50
.claude/commands/release.md
Normal file
50
.claude/commands/release.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Release Command
|
||||
|
||||
Review all changes in the repository and create a release with proper commit message, version tag, and push to remotes.
|
||||
|
||||
## Argument: $ARGUMENTS
|
||||
|
||||
The argument should be one of:
|
||||
- `patch` - Bump the patch version (e.g., v0.35.3 -> v0.35.4)
|
||||
- `minor` - Bump the minor version and reset patch to 0 (e.g., v0.35.3 -> v0.36.0)
|
||||
|
||||
If no argument provided, default to `patch`.
|
||||
|
||||
## Steps to perform:
|
||||
|
||||
1. **Read the current version** from `pkg/version/version`
|
||||
|
||||
2. **Calculate the new version** based on the argument:
|
||||
- Parse the current version (format: vMAJOR.MINOR.PATCH)
|
||||
- If `patch`: increment PATCH by 1
|
||||
- If `minor`: increment MINOR by 1, set PATCH to 0
|
||||
|
||||
3. **Update the version file** (`pkg/version/version`) with the new version
|
||||
|
||||
4. **Review changes** using `git status` and `git diff --stat HEAD`
|
||||
|
||||
5. **Compose a commit message** following this format:
|
||||
- First line: 72 chars max, imperative mood summary
|
||||
- Blank line
|
||||
- Bullet points describing each significant change
|
||||
- "Files modified:" section listing affected files
|
||||
- Footer with Claude Code attribution
|
||||
|
||||
6. **Stage all changes** with `git add -A`
|
||||
|
||||
7. **Create the commit** with the composed message
|
||||
|
||||
8. **Create a git tag** with the new version (e.g., `v0.36.0`)
|
||||
|
||||
9. **Push to remotes** (origin and gitea) with tags:
|
||||
```
|
||||
git push origin main --tags
|
||||
git push gitea main --tags
|
||||
```
|
||||
|
||||
10. **Report completion** with the new version and commit hash
|
||||
|
||||
## Important:
|
||||
- Do NOT push to github remote (only origin and gitea)
|
||||
- Always verify the build compiles before committing: `CGO_ENABLED=0 go build -o /dev/null ./...`
|
||||
- If build fails, fix issues before proceeding
|
||||
@@ -1,5 +1,4 @@
|
||||
{
|
||||
"MAX_THINKING_TOKENS": "8000",
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash:*",
|
||||
@@ -85,10 +84,47 @@
|
||||
"Bash(CGO_ENABLED=0 go test:*)",
|
||||
"Bash(git submodule:*)",
|
||||
"WebFetch(domain:neo4j.com)",
|
||||
"Bash(git reset:*)"
|
||||
"Bash(git reset:*)",
|
||||
"Bash(go get:*)",
|
||||
"Bash(export ORLY_DATA_DIR=/tmp/orly-badger-test )",
|
||||
"Bash(ORLY_PORT=10547:*)",
|
||||
"Bash(ORLY_ACL_MODE=none:*)",
|
||||
"Bash(ORLY_LOG_LEVEL=info:*)",
|
||||
"Bash(ORLY_HEALTH_PORT=8080:*)",
|
||||
"Bash(ORLY_ENABLE_SHUTDOWN=true:*)",
|
||||
"Bash(timeout 5 ./orly:*)",
|
||||
"Bash(# Test with a small subset first echo \"\"Testing with first 10000 lines...\"\" head -10000 ~/src/git.nostrdev.com/wot_reference.jsonl ls -lh /tmp/test_subset.jsonl curl -s -X POST -F \"\"file=@/tmp/test_subset.jsonl\"\" http://localhost:10547/api/import echo \"\"\"\" echo \"\"Test completed\"\" # Check relay logs sleep 5 tail -50 /tmp/claude/tasks/bd99a21.output)",
|
||||
"Bash(# Check if import is still running curl -s http://localhost:8080/healthz && echo \"\" - relay is healthy\"\" # Check relay memory echo \"\"Relay memory:\"\" ps -p 20580 -o rss=,vsz=,pmem=)",
|
||||
"Skill(cypher)",
|
||||
"Bash(git tag:*)",
|
||||
"Bash(git push:*)",
|
||||
"Bash(kill:*)",
|
||||
"Bash(pkill:*)",
|
||||
"Bash(pkill -f \"curl.*import\")",
|
||||
"Bash(CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build:*)",
|
||||
"Bash(CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build:*)",
|
||||
"Bash(CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build:*)",
|
||||
"Bash(__NEW_LINE__ echo \"\")",
|
||||
"Bash(# Check if Neo4j is running echo \"\"Checking Neo4j status...\"\" docker compose ps)",
|
||||
"Bash(pgrep:*)",
|
||||
"Bash(docker stats:*)",
|
||||
"Bash(fi)",
|
||||
"Bash(xargs:*)",
|
||||
"Bash(for i in 1 2 3 4 5)",
|
||||
"Bash(do)",
|
||||
"WebFetch(domain:vermaden.wordpress.com)",
|
||||
"WebFetch(domain:eylenburg.github.io)",
|
||||
"Bash(go run -exec '' -c 'package main; import \"\"git.mleku.dev/mleku/nostr/utils/normalize\"\"; import \"\"fmt\"\"; func main() { fmt.Println(string(normalize.URL([]byte(\"\"relay.example.com:3334\"\")))); fmt.Println(string(normalize.URL([]byte(\"\"relay.example.com:443\"\")))); fmt.Println(string(normalize.URL([]byte(\"\"ws://relay.example.com:3334\"\")))); fmt.Println(string(normalize.URL([]byte(\"\"wss://relay.example.com:3334\"\")))) }')",
|
||||
"Bash(go run:*)",
|
||||
"Bash(git commit -m \"$(cat <<''EOF''\nFix NIP-11 fetch URL scheme conversion for non-proxied relays\n\n- Convert wss:// to https:// and ws:// to http:// before fetching NIP-11\n documents, fixing failures for users not using HTTPS upgrade proxies\n- The fetchNIP11 function was using WebSocket URLs directly for HTTP\n requests, causing scheme mismatch errors\n\n🤖 Generated with [Claude Code](https://claude.com/claude-code)\n\nCo-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>\nEOF\n)\")",
|
||||
"Bash(/tmp/orly help:*)",
|
||||
"Bash(git commit -m \"$(cat <<''EOF''\nAdd ORLY_POLICY_PATH for custom policy file location\n\n- Add ORLY_POLICY_PATH environment variable to configure custom policy\n file path, overriding the default ~/.config/ORLY/policy.json location\n- Enforce ABSOLUTE paths only - relay panics on startup if relative path\n is provided, preventing common misconfiguration errors\n- Update PolicyManager to store and expose configPath for hot-reload saves\n- Add ConfigPath() method to P struct delegating to internal PolicyManager\n- Update NewWithManager() signature to accept optional custom path parameter\n- Add BUG_REPORTS_AND_FEATURE_REQUEST_PROTOCOL.md with issue submission\n guidelines requiring environment details, reproduction steps, and logs\n- Update README.md with system requirements (500MB minimum memory) and\n link to bug report protocol\n- Update CLAUDE.md and README.md documentation for new ORLY_POLICY_PATH\n\nFiles modified:\n- app/config/config.go: Add PolicyPath config field\n- pkg/policy/policy.go: Add configPath storage and validation\n- app/handle-policy-config.go: Use policyManager.ConfigPath()\n- app/main.go: Pass cfg.PolicyPath to NewWithManager\n- pkg/policy/*_test.go: Update test calls with new parameter\n- BUG_REPORTS_AND_FEATURE_REQUEST_PROTOCOL.md: New file\n- README.md, CLAUDE.md: Documentation updates\n\n🤖 Generated with [Claude Code](https://claude.com/claude-code)\n\nCo-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>\nEOF\n)\")",
|
||||
"Bash(mkdir:*)",
|
||||
"Bash(ssh:*)"
|
||||
],
|
||||
"deny": [],
|
||||
"ask": []
|
||||
},
|
||||
"outputStyle": "Default"
|
||||
"outputStyle": "Default",
|
||||
"MAX_THINKING_TOKENS": "8000"
|
||||
}
|
||||
|
||||
118
.gitea/issue_template/bug_report.yaml
Normal file
118
.gitea/issue_template/bug_report.yaml
Normal file
@@ -0,0 +1,118 @@
|
||||
name: Bug Report
|
||||
about: Report a bug or unexpected behavior in ORLY relay
|
||||
title: "[BUG] "
|
||||
labels:
|
||||
- bug
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Bug Report Guidelines
|
||||
|
||||
Thank you for taking the time to report a bug. Please fill out the form below to help us understand and reproduce the issue.
|
||||
|
||||
**Before submitting:**
|
||||
- Search [existing issues](https://git.mleku.dev/mleku/next.orly.dev/issues) to avoid duplicates
|
||||
- Check the [documentation](https://git.mleku.dev/mleku/next.orly.dev) for configuration guidance
|
||||
- Ensure you're running a recent version of ORLY
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: ORLY Version
|
||||
description: Run `./orly version` to get the version
|
||||
placeholder: "v0.35.4"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: database
|
||||
attributes:
|
||||
label: Database Backend
|
||||
description: Which database backend are you using?
|
||||
options:
|
||||
- Badger (default)
|
||||
- Neo4j
|
||||
- WasmDB
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Bug Description
|
||||
description: A clear and concise description of the bug
|
||||
placeholder: Describe what happened and what you expected to happen
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: Detailed steps to reproduce the behavior
|
||||
placeholder: |
|
||||
1. Start relay with `./orly`
|
||||
2. Connect with client X
|
||||
3. Perform action Y
|
||||
4. Observe error Z
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: What did you expect to happen?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant Logs
|
||||
description: |
|
||||
Include relevant log output. Set `ORLY_LOG_LEVEL=debug` or `trace` for more detail.
|
||||
This will be automatically formatted as code.
|
||||
render: shell
|
||||
|
||||
- type: textarea
|
||||
id: config
|
||||
attributes:
|
||||
label: Configuration
|
||||
description: |
|
||||
Relevant environment variables or configuration (redact sensitive values).
|
||||
This will be automatically formatted as code.
|
||||
render: shell
|
||||
placeholder: |
|
||||
ORLY_ACL_MODE=follows
|
||||
ORLY_POLICY_ENABLED=true
|
||||
ORLY_DB_TYPE=badger
|
||||
|
||||
- type: textarea
|
||||
id: environment
|
||||
attributes:
|
||||
label: Environment
|
||||
description: Operating system, Go version, etc.
|
||||
placeholder: |
|
||||
OS: Linux 6.8.0
|
||||
Go: 1.25.3
|
||||
Architecture: amd64
|
||||
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: Any other context, screenshots, or information that might help
|
||||
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: Checklist
|
||||
options:
|
||||
- label: I have searched existing issues and this is not a duplicate
|
||||
required: true
|
||||
- label: I have included version information
|
||||
required: true
|
||||
- label: I have included steps to reproduce the issue
|
||||
required: true
|
||||
8
.gitea/issue_template/config.yaml
Normal file
8
.gitea/issue_template/config.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Documentation
|
||||
url: https://git.mleku.dev/mleku/next.orly.dev
|
||||
about: Check the repository documentation before opening an issue
|
||||
- name: Nostr Protocol (NIPs)
|
||||
url: https://github.com/nostr-protocol/nips
|
||||
about: For questions about Nostr protocol specifications
|
||||
118
.gitea/issue_template/feature_request.yaml
Normal file
118
.gitea/issue_template/feature_request.yaml
Normal file
@@ -0,0 +1,118 @@
|
||||
name: Feature Request
|
||||
about: Suggest a new feature or enhancement for ORLY relay
|
||||
title: "[FEATURE] "
|
||||
labels:
|
||||
- enhancement
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
## Feature Request Guidelines
|
||||
|
||||
Thank you for suggesting a feature. Please provide as much detail as possible to help us understand your proposal.
|
||||
|
||||
**Before submitting:**
|
||||
- Search [existing issues](https://git.mleku.dev/mleku/next.orly.dev/issues) to avoid duplicates
|
||||
- Check if this is covered by an existing [NIP](https://github.com/nostr-protocol/nips)
|
||||
- Review the [documentation](https://git.mleku.dev/mleku/next.orly.dev) for current capabilities
|
||||
|
||||
- type: dropdown
|
||||
id: category
|
||||
attributes:
|
||||
label: Feature Category
|
||||
description: What area of ORLY does this feature relate to?
|
||||
options:
|
||||
- Protocol (NIP implementation)
|
||||
- Database / Storage
|
||||
- Performance / Optimization
|
||||
- Policy / Access Control
|
||||
- Web UI / Admin Interface
|
||||
- Deployment / Operations
|
||||
- API / Integration
|
||||
- Documentation
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: problem
|
||||
attributes:
|
||||
label: Problem Statement
|
||||
description: |
|
||||
What problem does this feature solve? Is this related to a frustration you have?
|
||||
A clear problem statement helps us understand the motivation.
|
||||
placeholder: "I'm always frustrated when..."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: Proposed Solution
|
||||
description: |
|
||||
Describe the solution you'd like. Be specific about expected behavior.
|
||||
placeholder: "I would like ORLY to..."
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: alternatives
|
||||
attributes:
|
||||
label: Alternatives Considered
|
||||
description: |
|
||||
Describe any alternative solutions or workarounds you've considered.
|
||||
placeholder: "I've tried X but it doesn't work because..."
|
||||
|
||||
- type: input
|
||||
id: nip
|
||||
attributes:
|
||||
label: Related NIP
|
||||
description: If this relates to a Nostr Implementation Possibility, provide the NIP number
|
||||
placeholder: "NIP-XX"
|
||||
|
||||
- type: dropdown
|
||||
id: impact
|
||||
attributes:
|
||||
label: Scope of Impact
|
||||
description: How significant is this feature?
|
||||
options:
|
||||
- Minor enhancement (small quality-of-life improvement)
|
||||
- Moderate feature (adds useful capability)
|
||||
- Major feature (significant new functionality)
|
||||
- Breaking change (requires migration or config changes)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: contribution
|
||||
attributes:
|
||||
label: Willingness to Contribute
|
||||
description: Would you be willing to help implement this feature?
|
||||
options:
|
||||
- "Yes, I can submit a PR"
|
||||
- "Yes, I can help with testing"
|
||||
- "No, but I can provide more details"
|
||||
- "No"
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: additional
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: |
|
||||
Any other context, mockups, examples, or references that help explain the feature.
|
||||
|
||||
For protocol features, include example event structures or message flows if applicable.
|
||||
|
||||
- type: checkboxes
|
||||
id: checklist
|
||||
attributes:
|
||||
label: Checklist
|
||||
options:
|
||||
- label: I have searched existing issues and this is not a duplicate
|
||||
required: true
|
||||
- label: I have described the problem this feature solves
|
||||
required: true
|
||||
- label: I have checked if this relates to an existing NIP
|
||||
required: false
|
||||
53
.github/workflows/ci.yaml
vendored
Normal file
53
.github/workflows/ci.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, develop]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Download libsecp256k1
|
||||
run: |
|
||||
wget -q https://git.mleku.dev/mleku/nostr/raw/branch/main/crypto/p8k/libsecp256k1.so -O libsecp256k1.so
|
||||
chmod +x libsecp256k1.so
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$(pwd)"
|
||||
CGO_ENABLED=0 go test ./...
|
||||
|
||||
- name: Build binary
|
||||
run: |
|
||||
CGO_ENABLED=0 go build -o orly .
|
||||
./orly version
|
||||
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Check go mod tidy
|
||||
run: |
|
||||
go mod tidy
|
||||
git diff --exit-code go.mod go.sum
|
||||
|
||||
- name: Run go vet
|
||||
run: CGO_ENABLED=0 go vet ./...
|
||||
154
.github/workflows/release.yaml
vendored
Normal file
154
.github/workflows/release.yaml
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- goos: linux
|
||||
goarch: amd64
|
||||
platform: linux-amd64
|
||||
ext: ""
|
||||
lib: libsecp256k1.so
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
platform: linux-arm64
|
||||
ext: ""
|
||||
lib: libsecp256k1.so
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
platform: darwin-amd64
|
||||
ext: ""
|
||||
lib: libsecp256k1.dylib
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
platform: darwin-arm64
|
||||
ext: ""
|
||||
lib: libsecp256k1.dylib
|
||||
- goos: windows
|
||||
goarch: amd64
|
||||
platform: windows-amd64
|
||||
ext: ".exe"
|
||||
lib: libsecp256k1.dll
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.23'
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install bun
|
||||
run: |
|
||||
curl -fsSL https://bun.sh/install | bash
|
||||
echo "$HOME/.bun/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Build Web UI
|
||||
run: |
|
||||
cd app/web
|
||||
$HOME/.bun/bin/bun install
|
||||
$HOME/.bun/bin/bun run build
|
||||
|
||||
- name: Get version
|
||||
id: version
|
||||
run: echo "version=$(cat pkg/version/version)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build binary
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
run: |
|
||||
VERSION=${{ steps.version.outputs.version }}
|
||||
OUTPUT="orly-${VERSION}-${{ matrix.platform }}${{ matrix.ext }}"
|
||||
go build -ldflags "-s -w -X main.version=${VERSION}" -o ${OUTPUT} .
|
||||
sha256sum ${OUTPUT} > ${OUTPUT}.sha256
|
||||
|
||||
- name: Download runtime library
|
||||
run: |
|
||||
VERSION=${{ steps.version.outputs.version }}
|
||||
LIB="${{ matrix.lib }}"
|
||||
wget -q "https://git.mleku.dev/mleku/nostr/raw/branch/main/crypto/p8k/${LIB}" -O "${LIB}" || true
|
||||
if [ -f "${LIB}" ]; then
|
||||
sha256sum "${LIB}" > "${LIB}.sha256"
|
||||
fi
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: orly-${{ matrix.platform }}
|
||||
path: |
|
||||
orly-*
|
||||
libsecp256k1*
|
||||
|
||||
release:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get version
|
||||
id: version
|
||||
run: echo "version=$(cat pkg/version/version)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
merge-multiple: true
|
||||
|
||||
- name: Create combined checksums
|
||||
run: |
|
||||
cd artifacts
|
||||
cat *.sha256 | sort -k2 > SHA256SUMS.txt
|
||||
rm -f *.sha256
|
||||
|
||||
- name: List release files
|
||||
run: ls -la artifacts/
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
name: ORLY ${{ steps.version.outputs.version }}
|
||||
body: |
|
||||
## ORLY ${{ steps.version.outputs.version }}
|
||||
|
||||
### Downloads
|
||||
|
||||
Download the appropriate binary for your platform. The `libsecp256k1` library is optional but recommended for better cryptographic performance.
|
||||
|
||||
### Installation
|
||||
|
||||
1. Download the binary for your platform
|
||||
2. (Optional) Download the corresponding `libsecp256k1` library
|
||||
3. Place both files in the same directory
|
||||
4. Make the binary executable: `chmod +x orly-*`
|
||||
5. Run: `./orly-*-linux-amd64` (or your platform's binary)
|
||||
|
||||
### Verify Downloads
|
||||
|
||||
```bash
|
||||
sha256sum -c SHA256SUMS.txt
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
See the [repository documentation](https://git.mleku.dev/mleku/next.orly.dev) for configuration options.
|
||||
files: |
|
||||
artifacts/*
|
||||
draft: false
|
||||
prerelease: false
|
||||
254
BUG_REPORTS_AND_FEATURE_REQUEST_PROTOCOL.md
Normal file
254
BUG_REPORTS_AND_FEATURE_REQUEST_PROTOCOL.md
Normal file
@@ -0,0 +1,254 @@
|
||||
# Feature Request and Bug Report Protocol
|
||||
|
||||
This document describes how to submit effective bug reports and feature requests for ORLY relay. Following these guidelines helps maintainers understand and resolve issues quickly.
|
||||
|
||||
## Before Submitting
|
||||
|
||||
1. **Search existing issues** - Your issue may already be reported or discussed
|
||||
2. **Check documentation** - Review `CLAUDE.md`, `docs/`, and `pkg/*/README.md` files
|
||||
3. **Verify with latest version** - Ensure the issue exists in the current release
|
||||
4. **Test with default configuration** - Rule out configuration-specific problems
|
||||
|
||||
## Bug Reports
|
||||
|
||||
### Required Information
|
||||
|
||||
**Title**: Concise summary of the problem
|
||||
- Good: "Kind 3 events with 8000+ follows truncated on save"
|
||||
- Bad: "Events not saving" or "Bug in database"
|
||||
|
||||
**Environment**:
|
||||
```
|
||||
ORLY version: (output of ./orly version)
|
||||
OS: (e.g., Ubuntu 24.04, macOS 14.2)
|
||||
Go version: (output of go version)
|
||||
Database backend: (badger/neo4j/wasmdb)
|
||||
```
|
||||
|
||||
**Configuration** (relevant settings only):
|
||||
```bash
|
||||
ORLY_DB_TYPE=badger
|
||||
ORLY_POLICY_ENABLED=true
|
||||
# Include any non-default settings
|
||||
```
|
||||
|
||||
**Steps to Reproduce**:
|
||||
1. Start relay with configuration X
|
||||
2. Connect client and send event Y
|
||||
3. Query for event with filter Z
|
||||
4. Observe error/unexpected behavior
|
||||
|
||||
**Expected Behavior**: What should happen
|
||||
|
||||
**Actual Behavior**: What actually happens
|
||||
|
||||
**Logs**: Include relevant log output with `ORLY_LOG_LEVEL=debug` or `trace`
|
||||
|
||||
### Minimal Reproduction
|
||||
|
||||
The most effective bug reports include a minimal reproduction case:
|
||||
|
||||
```bash
|
||||
# Example: Script that demonstrates the issue
|
||||
export ORLY_LOG_LEVEL=debug
|
||||
./orly &
|
||||
sleep 2
|
||||
|
||||
# Send problematic event
|
||||
echo '["EVENT", {...}]' | websocat ws://localhost:3334
|
||||
|
||||
# Show the failure
|
||||
echo '["REQ", "test", {"kinds": [1]}]' | websocat ws://localhost:3334
|
||||
```
|
||||
|
||||
Or provide a failing test case:
|
||||
|
||||
```go
|
||||
func TestReproduceBug(t *testing.T) {
|
||||
// Setup
|
||||
db := setupTestDB(t)
|
||||
|
||||
// This should work but fails
|
||||
event := createTestEvent(kind, content)
|
||||
err := db.SaveEvent(ctx, event)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Query returns unexpected result
|
||||
results, err := db.QueryEvents(ctx, filter)
|
||||
assert.Len(t, results, 1) // Fails: got 0
|
||||
}
|
||||
```
|
||||
|
||||
## Feature Requests
|
||||
|
||||
### Required Information
|
||||
|
||||
**Title**: Clear description of the feature
|
||||
- Good: "Add WebSocket compression support (permessage-deflate)"
|
||||
- Bad: "Make it faster" or "New feature idea"
|
||||
|
||||
**Problem Statement**: What problem does this solve?
|
||||
```
|
||||
Currently, clients with high-latency connections experience slow sync times
|
||||
because event data is transmitted uncompressed. A typical session transfers
|
||||
50MB of JSON that could be reduced to ~10MB with compression.
|
||||
```
|
||||
|
||||
**Proposed Solution**: How should it work?
|
||||
```
|
||||
Add optional permessage-deflate WebSocket extension support:
|
||||
- New config: ORLY_WS_COMPRESSION=true
|
||||
- Negotiate compression during WebSocket handshake
|
||||
- Apply to messages over configurable threshold (default 1KB)
|
||||
```
|
||||
|
||||
**Use Case**: Who benefits and how?
|
||||
```
|
||||
- Mobile clients on cellular connections
|
||||
- Users syncing large follow lists
|
||||
- Relays with bandwidth constraints
|
||||
```
|
||||
|
||||
**Alternatives Considered** (optional):
|
||||
```
|
||||
- Application-level compression: Rejected because it requires client changes
|
||||
- HTTP/2: Not applicable for WebSocket connections
|
||||
```
|
||||
|
||||
### Implementation Notes (optional)
|
||||
|
||||
If you have implementation ideas:
|
||||
|
||||
```
|
||||
Suggested approach:
|
||||
1. Add compression config to app/config/config.go
|
||||
2. Modify gorilla/websocket upgrader in app/handle-websocket.go
|
||||
3. Add compression threshold check before WriteMessage()
|
||||
|
||||
Reference: gorilla/websocket has built-in permessage-deflate support
|
||||
```
|
||||
|
||||
## What Makes Reports Effective
|
||||
|
||||
**Do**:
|
||||
- Be specific and factual
|
||||
- Include version numbers and exact error messages
|
||||
- Provide reproducible steps
|
||||
- Attach relevant logs (redact sensitive data)
|
||||
- Link to related issues or discussions
|
||||
- Respond to follow-up questions promptly
|
||||
|
||||
**Avoid**:
|
||||
- Vague descriptions ("it doesn't work")
|
||||
- Multiple unrelated issues in one report
|
||||
- Assuming the cause without evidence
|
||||
- Demanding immediate fixes
|
||||
- Duplicating existing issues
|
||||
|
||||
## Issue Labels
|
||||
|
||||
When applicable, suggest appropriate labels:
|
||||
|
||||
| Label | Use When |
|
||||
|-------|----------|
|
||||
| `bug` | Something isn't working as documented |
|
||||
| `enhancement` | New feature or improvement |
|
||||
| `performance` | Speed or resource usage issue |
|
||||
| `documentation` | Docs are missing or incorrect |
|
||||
| `question` | Clarification needed (not a bug) |
|
||||
| `good first issue` | Suitable for new contributors |
|
||||
|
||||
## Response Expectations
|
||||
|
||||
- **Acknowledgment**: Within a few days
|
||||
- **Triage**: Issue labeled and prioritized
|
||||
- **Resolution**: Depends on complexity and priority
|
||||
|
||||
Complex features may require discussion before implementation. Bug fixes for critical issues are prioritized.
|
||||
|
||||
## Following Up
|
||||
|
||||
If your issue hasn't received attention:
|
||||
|
||||
1. **Check issue status** - It may be labeled or assigned
|
||||
2. **Add new information** - If you've discovered more details
|
||||
3. **Politely bump** - A single follow-up comment after 2 weeks is appropriate
|
||||
4. **Consider contributing** - PRs that fix bugs or implement features are welcome
|
||||
|
||||
## Contributing Fixes
|
||||
|
||||
If you want to fix a bug or implement a feature yourself:
|
||||
|
||||
1. Comment on the issue to avoid duplicate work
|
||||
2. Follow the coding patterns in `CLAUDE.md`
|
||||
3. Include tests for your changes
|
||||
4. Keep PRs focused on a single issue
|
||||
5. Reference the issue number in your PR
|
||||
|
||||
## Security Issues
|
||||
|
||||
**Do not report security vulnerabilities in public issues.**
|
||||
|
||||
For security-sensitive bugs:
|
||||
- Contact maintainers directly
|
||||
- Provide detailed reproduction steps privately
|
||||
- Allow reasonable time for a fix before disclosure
|
||||
|
||||
## Examples
|
||||
|
||||
### Good Bug Report
|
||||
|
||||
```markdown
|
||||
## WebSocket disconnects after 60 seconds of inactivity
|
||||
|
||||
**Environment**:
|
||||
- ORLY v0.34.5
|
||||
- Ubuntu 22.04
|
||||
- Go 1.25.3
|
||||
- Badger backend
|
||||
|
||||
**Steps to Reproduce**:
|
||||
1. Connect to relay: `websocat ws://localhost:3334`
|
||||
2. Send subscription: `["REQ", "test", {"kinds": [1], "limit": 1}]`
|
||||
3. Wait 60 seconds without sending messages
|
||||
4. Observe connection closed
|
||||
|
||||
**Expected**: Connection remains open (Nostr relays should maintain persistent connections)
|
||||
|
||||
**Actual**: Connection closed with code 1000 after exactly 60 seconds
|
||||
|
||||
**Logs** (ORLY_LOG_LEVEL=debug):
|
||||
```
|
||||
1764783029014485🔎 client timeout, closing connection /app/handle-websocket.go:142
|
||||
```
|
||||
|
||||
**Possible Cause**: May be related to read deadline not being extended on subscription activity
|
||||
```
|
||||
|
||||
### Good Feature Request
|
||||
|
||||
```markdown
|
||||
## Add rate limiting per pubkey
|
||||
|
||||
**Problem**:
|
||||
A single pubkey can flood the relay with events, consuming storage and
|
||||
bandwidth. Currently there's no way to limit per-author submission rate.
|
||||
|
||||
**Proposed Solution**:
|
||||
Add configurable rate limiting:
|
||||
```bash
|
||||
ORLY_RATE_LIMIT_EVENTS_PER_MINUTE=60
|
||||
ORLY_RATE_LIMIT_BURST=10
|
||||
```
|
||||
|
||||
When exceeded, return OK false with "rate-limited" message per NIP-20.
|
||||
|
||||
**Use Case**:
|
||||
- Public relays protecting against spam
|
||||
- Community relays with fair-use policies
|
||||
- Paid relays enforcing subscription tiers
|
||||
|
||||
**Alternatives Considered**:
|
||||
- IP-based limiting: Ineffective because users share IPs and use VPNs
|
||||
- Global limiting: Punishes all users for one bad actor
|
||||
```
|
||||
@@ -147,6 +147,10 @@ export ORLY_SPROCKET_ENABLED=true
|
||||
# Enable policy system
|
||||
export ORLY_POLICY_ENABLED=true
|
||||
|
||||
# Custom policy file path (MUST be ABSOLUTE path starting with /)
|
||||
# Default: ~/.config/ORLY/policy.json (or ~/.config/{ORLY_APP_NAME}/policy.json)
|
||||
# export ORLY_POLICY_PATH=/etc/orly/policy.json
|
||||
|
||||
# Database backend selection (badger, neo4j, or wasmdb)
|
||||
export ORLY_DB_TYPE=badger
|
||||
|
||||
@@ -270,7 +274,8 @@ export ORLY_AUTH_TO_WRITE=false # Require auth only for writes
|
||||
- `none.go` - Open relay (no restrictions)
|
||||
|
||||
**`pkg/policy/`** - Event filtering and validation policies
|
||||
- Policy configuration loaded from `~/.config/ORLY/policy.json`
|
||||
- Policy configuration loaded from `~/.config/ORLY/policy.json` by default
|
||||
- Custom path via `ORLY_POLICY_PATH` (MUST be absolute path starting with `/`)
|
||||
- Per-kind size limits, age restrictions, custom scripts
|
||||
- **Write-Only Validation**: Size, age, tag, and expiry validations apply ONLY to write operations
|
||||
- **Read-Only Filtering**: `read_allow`, `read_deny`, `privileged` apply ONLY to read operations
|
||||
|
||||
101
CONTRIBUTING.md
Normal file
101
CONTRIBUTING.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Contributing to ORLY
|
||||
|
||||
Thank you for your interest in contributing to ORLY! This document outlines the process for reporting bugs, requesting features, and submitting contributions.
|
||||
|
||||
**Canonical Repository:** https://git.mleku.dev/mleku/next.orly.dev
|
||||
|
||||
## Issue Reporting Policy
|
||||
|
||||
### Before Opening an Issue
|
||||
|
||||
1. **Search existing issues** to avoid duplicates
|
||||
2. **Check the documentation** in the repository
|
||||
3. **Verify your version** - run `./orly version` and ensure you're on a recent release
|
||||
4. **Review the CLAUDE.md** file for configuration guidance
|
||||
|
||||
### Bug Reports
|
||||
|
||||
Use the **Bug Report** template when reporting unexpected behavior. A good bug report includes:
|
||||
|
||||
- **Version information** - exact ORLY version from `./orly version`
|
||||
- **Database backend** - Badger, Neo4j, or WasmDB
|
||||
- **Clear description** - what happened vs. what you expected
|
||||
- **Reproduction steps** - detailed steps to trigger the bug
|
||||
- **Logs** - relevant log output (use `ORLY_LOG_LEVEL=debug` or `trace`)
|
||||
- **Configuration** - relevant environment variables (redact secrets)
|
||||
|
||||
#### Log Levels for Debugging
|
||||
|
||||
```bash
|
||||
export ORLY_LOG_LEVEL=trace # Most verbose
|
||||
export ORLY_LOG_LEVEL=debug # Development debugging
|
||||
export ORLY_LOG_LEVEL=info # Default
|
||||
```
|
||||
|
||||
### Feature Requests
|
||||
|
||||
Use the **Feature Request** template when suggesting new functionality. A good feature request includes:
|
||||
|
||||
- **Problem statement** - what problem does this solve?
|
||||
- **Proposed solution** - specific description of desired behavior
|
||||
- **Alternatives considered** - workarounds you've tried
|
||||
- **Related NIP** - if this implements a Nostr protocol specification
|
||||
- **Impact assessment** - is this a minor tweak or major change?
|
||||
|
||||
#### Feature Categories
|
||||
|
||||
- **Protocol** - NIP implementations and Nostr protocol features
|
||||
- **Database** - Storage backends, indexing, query optimization
|
||||
- **Performance** - Caching, SIMD operations, memory optimization
|
||||
- **Policy** - Access control, event filtering, validation
|
||||
- **Web UI** - Admin interface improvements
|
||||
- **Operations** - Deployment, monitoring, systemd integration
|
||||
|
||||
## Code Contributions
|
||||
|
||||
### Development Setup
|
||||
|
||||
```bash
|
||||
# Clone the repository
|
||||
git clone https://git.mleku.dev/mleku/next.orly.dev.git
|
||||
cd next.orly.dev
|
||||
|
||||
# Build
|
||||
CGO_ENABLED=0 go build -o orly
|
||||
|
||||
# Run tests
|
||||
./scripts/test.sh
|
||||
|
||||
# Build with web UI
|
||||
./scripts/update-embedded-web.sh
|
||||
```
|
||||
|
||||
### Pull Request Guidelines
|
||||
|
||||
1. **One feature/fix per PR** - keep changes focused
|
||||
2. **Write tests** - for new functionality and bug fixes
|
||||
3. **Follow existing patterns** - match the code style of surrounding code
|
||||
4. **Update documentation** - if your change affects configuration or behavior
|
||||
5. **Test your changes** - run `./scripts/test.sh` before submitting
|
||||
|
||||
### Commit Message Format
|
||||
|
||||
```
|
||||
Short summary (72 chars max, imperative mood)
|
||||
|
||||
- Bullet point describing change 1
|
||||
- Bullet point describing change 2
|
||||
|
||||
Files modified:
|
||||
- path/to/file1.go: Description of change
|
||||
- path/to/file2.go: Description of change
|
||||
```
|
||||
|
||||
## Communication
|
||||
|
||||
- **Issues:** https://git.mleku.dev/mleku/next.orly.dev/issues
|
||||
- **Documentation:** https://git.mleku.dev/mleku/next.orly.dev
|
||||
|
||||
## License
|
||||
|
||||
By contributing to ORLY, you agree that your contributions will be licensed under the same license as the project.
|
||||
41
README.md
41
README.md
@@ -1,15 +1,43 @@
|
||||
# next.orly.dev
|
||||
|
||||
---
|
||||
|
||||

|
||||
|
||||

|
||||
[](https://pkg.go.dev/next.orly.dev)
|
||||
[](https://geyser.fund/project/orly)
|
||||
|
||||
zap me: <20>mlekudev@getalby.com
|
||||
zap me: <20>mlekudev@getalby.com
|
||||
|
||||
follow me on [nostr](https://jumble.social/users/npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku)
|
||||
|
||||
## ⚠️ Bug Reports & Feature Requests
|
||||
|
||||
**Bug reports and feature requests that do not follow the protocol will not be accepted.**
|
||||
|
||||
Before submitting any issue, you must read and follow [BUG_REPORTS_AND_FEATURE_REQUEST_PROTOCOL.md](./BUG_REPORTS_AND_FEATURE_REQUEST_PROTOCOL.md).
|
||||
|
||||
Requirements:
|
||||
- **Bug reports**: Include environment details, reproduction steps, expected/actual behavior, and logs
|
||||
- **Feature requests**: Include problem statement, proposed solution, and use cases
|
||||
- **Both**: Search existing issues first, verify with latest version, provide minimal reproduction
|
||||
|
||||
Issues missing required information will be closed without review.
|
||||
|
||||
## ⚠️ System Requirements
|
||||
|
||||
> **IMPORTANT: ORLY requires a minimum of 500MB of free memory to operate.**
|
||||
>
|
||||
> The relay uses adaptive PID-controlled rate limiting to manage memory pressure. By default, it will:
|
||||
> - Auto-detect available system memory at startup
|
||||
> - Target 66% of available memory, capped at 1.5GB for optimal performance
|
||||
> - **Fail to start** if less than 500MB is available
|
||||
>
|
||||
> You can override the memory target with `ORLY_RATE_LIMIT_TARGET_MB` (e.g., `ORLY_RATE_LIMIT_TARGET_MB=2000` for 2GB).
|
||||
>
|
||||
> To disable rate limiting (not recommended): `ORLY_RATE_LIMIT_ENABLED=false`
|
||||
|
||||
## About
|
||||
|
||||
ORLY is a nostr relay written from the ground up to be performant, low latency, and built with a number of features designed to make it well suited for:
|
||||
@@ -152,8 +180,8 @@ The relay will:
|
||||
If you're running behind a reverse proxy or tunnel (e.g., Caddy, nginx, Cloudflare Tunnel), the setup is the same. The relay listens locally and your reverse proxy forwards traffic to it:
|
||||
|
||||
```
|
||||
Browser <20> Reverse Proxy <20> ORLY (port 3334) <20> Dev Server (port 8080)
|
||||
<20>
|
||||
Browser <20> Reverse Proxy <20> ORLY (port 3334) <20> Dev Server (port 8080)
|
||||
<20>
|
||||
WebSocket/API
|
||||
```
|
||||
|
||||
@@ -204,7 +232,12 @@ ORLY includes a comprehensive policy system for fine-grained control over event
|
||||
|
||||
```bash
|
||||
export ORLY_POLICY_ENABLED=true
|
||||
# Create policy file at ~/.config/ORLY/policy.json
|
||||
# Default policy file: ~/.config/ORLY/policy.json
|
||||
|
||||
# OPTIONAL: Use a custom policy file location
|
||||
# WARNING: ORLY_POLICY_PATH MUST be an ABSOLUTE path (starting with /)
|
||||
# Relative paths will be REJECTED and the relay will fail to start
|
||||
export ORLY_POLICY_PATH=/etc/orly/policy.json
|
||||
```
|
||||
|
||||
For detailed configuration and examples, see the [Policy Usage Guide](docs/POLICY_USAGE_GUIDE.md).
|
||||
|
||||
@@ -82,7 +82,8 @@ type C struct {
|
||||
DirectorySpiderInterval time.Duration `env:"ORLY_DIRECTORY_SPIDER_INTERVAL" default:"24h" usage:"how often to run directory spider"`
|
||||
DirectorySpiderMaxHops int `env:"ORLY_DIRECTORY_SPIDER_HOPS" default:"3" usage:"maximum hops for relay discovery from seed users"`
|
||||
|
||||
PolicyEnabled bool `env:"ORLY_POLICY_ENABLED" default:"false" usage:"enable policy-based event processing (configuration found in $HOME/.config/ORLY/policy.json)"`
|
||||
PolicyEnabled bool `env:"ORLY_POLICY_ENABLED" default:"false" usage:"enable policy-based event processing (default config: $HOME/.config/ORLY/policy.json)"`
|
||||
PolicyPath string `env:"ORLY_POLICY_PATH" usage:"ABSOLUTE path to policy configuration file (MUST start with /); overrides default location; relative paths are rejected"`
|
||||
|
||||
// NIP-43 Relay Access Metadata and Requests
|
||||
NIP43Enabled bool `env:"ORLY_NIP43_ENABLED" default:"false" usage:"enable NIP-43 relay access metadata and invite system"`
|
||||
@@ -106,18 +107,21 @@ type C struct {
|
||||
SerialCacheEventIds int `env:"ORLY_SERIAL_CACHE_EVENT_IDS" default:"500000" usage:"max event IDs to cache for compact event storage (default: 500000, ~16MB memory)"`
|
||||
|
||||
// Adaptive rate limiting (PID-controlled)
|
||||
RateLimitEnabled bool `env:"ORLY_RATE_LIMIT_ENABLED" default:"false" usage:"enable adaptive PID-controlled rate limiting for database operations"`
|
||||
RateLimitTargetMB int `env:"ORLY_RATE_LIMIT_TARGET_MB" default:"1500" usage:"target memory limit in MB for rate limiting (default: 1500 = 1.5GB)"`
|
||||
RateLimitWriteKp float64 `env:"ORLY_RATE_LIMIT_WRITE_KP" default:"0.5" usage:"PID proportional gain for write operations"`
|
||||
RateLimitWriteKi float64 `env:"ORLY_RATE_LIMIT_WRITE_KI" default:"0.1" usage:"PID integral gain for write operations"`
|
||||
RateLimitWriteKd float64 `env:"ORLY_RATE_LIMIT_WRITE_KD" default:"0.05" usage:"PID derivative gain for write operations (filtered)"`
|
||||
RateLimitReadKp float64 `env:"ORLY_RATE_LIMIT_READ_KP" default:"0.3" usage:"PID proportional gain for read operations"`
|
||||
RateLimitReadKi float64 `env:"ORLY_RATE_LIMIT_READ_KI" default:"0.05" usage:"PID integral gain for read operations"`
|
||||
RateLimitReadKd float64 `env:"ORLY_RATE_LIMIT_READ_KD" default:"0.02" usage:"PID derivative gain for read operations (filtered)"`
|
||||
RateLimitMaxWriteMs int `env:"ORLY_RATE_LIMIT_MAX_WRITE_MS" default:"1000" usage:"maximum delay for write operations in milliseconds"`
|
||||
RateLimitMaxReadMs int `env:"ORLY_RATE_LIMIT_MAX_READ_MS" default:"500" usage:"maximum delay for read operations in milliseconds"`
|
||||
RateLimitWriteTarget float64 `env:"ORLY_RATE_LIMIT_WRITE_TARGET" default:"0.85" usage:"PID setpoint for writes (throttle when load exceeds this, 0.0-1.0)"`
|
||||
RateLimitReadTarget float64 `env:"ORLY_RATE_LIMIT_READ_TARGET" default:"0.90" usage:"PID setpoint for reads (throttle when load exceeds this, 0.0-1.0)"`
|
||||
RateLimitEnabled bool `env:"ORLY_RATE_LIMIT_ENABLED" default:"true" usage:"enable adaptive PID-controlled rate limiting for database operations"`
|
||||
RateLimitTargetMB int `env:"ORLY_RATE_LIMIT_TARGET_MB" default:"0" usage:"target memory limit in MB (0=auto-detect: 66% of available, min 500MB)"`
|
||||
RateLimitWriteKp float64 `env:"ORLY_RATE_LIMIT_WRITE_KP" default:"0.5" usage:"PID proportional gain for write operations"`
|
||||
RateLimitWriteKi float64 `env:"ORLY_RATE_LIMIT_WRITE_KI" default:"0.1" usage:"PID integral gain for write operations"`
|
||||
RateLimitWriteKd float64 `env:"ORLY_RATE_LIMIT_WRITE_KD" default:"0.05" usage:"PID derivative gain for write operations (filtered)"`
|
||||
RateLimitReadKp float64 `env:"ORLY_RATE_LIMIT_READ_KP" default:"0.3" usage:"PID proportional gain for read operations"`
|
||||
RateLimitReadKi float64 `env:"ORLY_RATE_LIMIT_READ_KI" default:"0.05" usage:"PID integral gain for read operations"`
|
||||
RateLimitReadKd float64 `env:"ORLY_RATE_LIMIT_READ_KD" default:"0.02" usage:"PID derivative gain for read operations (filtered)"`
|
||||
RateLimitMaxWriteMs int `env:"ORLY_RATE_LIMIT_MAX_WRITE_MS" default:"1000" usage:"maximum delay for write operations in milliseconds"`
|
||||
RateLimitMaxReadMs int `env:"ORLY_RATE_LIMIT_MAX_READ_MS" default:"500" usage:"maximum delay for read operations in milliseconds"`
|
||||
RateLimitWriteTarget float64 `env:"ORLY_RATE_LIMIT_WRITE_TARGET" default:"0.85" usage:"PID setpoint for writes (throttle when load exceeds this, 0.0-1.0)"`
|
||||
RateLimitReadTarget float64 `env:"ORLY_RATE_LIMIT_READ_TARGET" default:"0.90" usage:"PID setpoint for reads (throttle when load exceeds this, 0.0-1.0)"`
|
||||
RateLimitEmergencyThreshold float64 `env:"ORLY_RATE_LIMIT_EMERGENCY_THRESHOLD" default:"1.167" usage:"memory pressure ratio (target+1/6) to trigger emergency mode with aggressive throttling"`
|
||||
RateLimitRecoveryThreshold float64 `env:"ORLY_RATE_LIMIT_RECOVERY_THRESHOLD" default:"0.833" usage:"memory pressure ratio (target-1/6) below which emergency mode exits (hysteresis)"`
|
||||
RateLimitEmergencyMaxMs int `env:"ORLY_RATE_LIMIT_EMERGENCY_MAX_MS" default:"5000" usage:"maximum delay for writes in emergency mode (milliseconds)"`
|
||||
|
||||
// TLS configuration
|
||||
TLSDomains []string `env:"ORLY_TLS_DOMAINS" usage:"comma-separated list of domains to respond to for TLS"`
|
||||
@@ -457,11 +461,15 @@ func (cfg *C) GetRateLimitConfigValues() (
|
||||
readKp, readKi, readKd float64,
|
||||
maxWriteMs, maxReadMs int,
|
||||
writeTarget, readTarget float64,
|
||||
emergencyThreshold, recoveryThreshold float64,
|
||||
emergencyMaxMs int,
|
||||
) {
|
||||
return cfg.RateLimitEnabled,
|
||||
cfg.RateLimitTargetMB,
|
||||
cfg.RateLimitWriteKp, cfg.RateLimitWriteKi, cfg.RateLimitWriteKd,
|
||||
cfg.RateLimitReadKp, cfg.RateLimitReadKi, cfg.RateLimitReadKd,
|
||||
cfg.RateLimitMaxWriteMs, cfg.RateLimitMaxReadMs,
|
||||
cfg.RateLimitWriteTarget, cfg.RateLimitReadTarget
|
||||
cfg.RateLimitWriteTarget, cfg.RateLimitReadTarget,
|
||||
cfg.RateLimitEmergencyThreshold, cfg.RateLimitRecoveryThreshold,
|
||||
cfg.RateLimitEmergencyMaxMs
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/encoders/kind"
|
||||
"git.mleku.dev/mleku/nostr/encoders/reason"
|
||||
"next.orly.dev/pkg/protocol/nip43"
|
||||
"next.orly.dev/pkg/ratelimit"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -608,6 +609,10 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
env.E.Pubkey,
|
||||
)
|
||||
log.I.F("delete event pubkey hex: %s", hex.Enc(env.E.Pubkey))
|
||||
// Apply rate limiting before write operation
|
||||
if l.rateLimiter != nil && l.rateLimiter.IsEnabled() {
|
||||
l.rateLimiter.Wait(saveCtx, int(ratelimit.Write))
|
||||
}
|
||||
if _, err = l.DB.SaveEvent(saveCtx, env.E); err != nil {
|
||||
log.E.F("failed to save delete event %0x: %v", env.E.ID, err)
|
||||
if strings.HasPrefix(err.Error(), "blocked:") {
|
||||
@@ -675,6 +680,10 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
// store the event - use a separate context to prevent cancellation issues
|
||||
saveCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
// Apply rate limiting before write operation
|
||||
if l.rateLimiter != nil && l.rateLimiter.IsEnabled() {
|
||||
l.rateLimiter.Wait(saveCtx, int(ratelimit.Write))
|
||||
}
|
||||
// log.I.F("saving event %0x, %s", env.E.ID, env.E.Serialize())
|
||||
if _, err = l.DB.SaveEvent(saveCtx, env.E); err != nil {
|
||||
if strings.HasPrefix(err.Error(), "blocked:") {
|
||||
|
||||
@@ -3,9 +3,7 @@ package app
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"lol.mleku.dev/log"
|
||||
"git.mleku.dev/mleku/nostr/encoders/event"
|
||||
"git.mleku.dev/mleku/nostr/encoders/filter"
|
||||
@@ -76,8 +74,8 @@ func (l *Listener) HandlePolicyConfigUpdate(ev *event.E) error {
|
||||
|
||||
log.I.F("policy config validation passed")
|
||||
|
||||
// Get config path for saving
|
||||
configPath := filepath.Join(xdg.ConfigHome, l.Config.AppName, "policy.json")
|
||||
// Get config path for saving (uses custom path if set, otherwise default)
|
||||
configPath := l.policyManager.ConfigPath()
|
||||
|
||||
// 3. Pause ALL message processing (lock mutex)
|
||||
// Note: We need to release the RLock first (which caller holds), then acquire exclusive Lock
|
||||
|
||||
@@ -74,7 +74,7 @@ func setupPolicyTestListener(t *testing.T, policyAdminHex string) (*Listener, *d
|
||||
}
|
||||
|
||||
// Create policy manager - now config file exists at XDG path
|
||||
policyManager := policy.NewWithManager(ctx, cfg.AppName, cfg.PolicyEnabled)
|
||||
policyManager := policy.NewWithManager(ctx, cfg.AppName, cfg.PolicyEnabled, "")
|
||||
|
||||
server := &Server{
|
||||
Ctx: ctx,
|
||||
|
||||
@@ -87,7 +87,7 @@ func Run(
|
||||
l.sprocketManager = NewSprocketManager(ctx, cfg.AppName, cfg.SprocketEnabled)
|
||||
|
||||
// Initialize policy manager
|
||||
l.policyManager = policy.NewWithManager(ctx, cfg.AppName, cfg.PolicyEnabled)
|
||||
l.policyManager = policy.NewWithManager(ctx, cfg.AppName, cfg.PolicyEnabled, cfg.PolicyPath)
|
||||
|
||||
// Merge policy-defined owners with environment-defined owners
|
||||
// This allows cloud deployments to add owners via policy.json when env vars cannot be modified
|
||||
|
||||
27
docker-compose.yml
Normal file
27
docker-compose.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
neo4j:
|
||||
image: neo4j:5-community
|
||||
container_name: orly-neo4j
|
||||
ports:
|
||||
- "7474:7474" # HTTP
|
||||
- "7687:7687" # Bolt
|
||||
environment:
|
||||
- NEO4J_AUTH=neo4j/password
|
||||
- NEO4J_PLUGINS=["apoc"]
|
||||
- NEO4J_dbms_memory_heap_initial__size=512m
|
||||
- NEO4J_dbms_memory_heap_max__size=1G
|
||||
- NEO4J_dbms_memory_pagecache_size=512m
|
||||
volumes:
|
||||
- neo4j-data:/data
|
||||
- neo4j-logs:/logs
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:7474"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
volumes:
|
||||
neo4j-data:
|
||||
neo4j-logs:
|
||||
129
docs/RATE_LIMITING_TEST_REPORT_BADGER.md
Normal file
129
docs/RATE_LIMITING_TEST_REPORT_BADGER.md
Normal file
@@ -0,0 +1,129 @@
|
||||
# Rate Limiting Test Report: Badger Backend
|
||||
|
||||
**Test Date:** December 12, 2025
|
||||
**Test Duration:** 16 minutes (1,018 seconds)
|
||||
**Import File:** `wot_reference.jsonl` (2.7 GB, 2,158,366 events)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Value |
|
||||
|-----------|-------|
|
||||
| Database Backend | Badger |
|
||||
| Target Memory | 1,500 MB |
|
||||
| Emergency Threshold | 1,750 MB (target + 1/6) |
|
||||
| Recovery Threshold | 1,250 MB (target - 1/6) |
|
||||
| Max Write Delay | 1,000 ms (normal), 5,000 ms (emergency) |
|
||||
| Data Directory | `/tmp/orly-badger-test` |
|
||||
|
||||
## Results Summary
|
||||
|
||||
### Memory Management
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| Peak RSS (VmHWM) | 2,892 MB |
|
||||
| Final RSS | 1,353 MB |
|
||||
| Target | 1,500 MB |
|
||||
| **Memory Controlled** | **Yes** (90% of target) |
|
||||
|
||||
The rate limiter successfully controlled memory usage. While peak memory reached 2,892 MB before rate limiting engaged, the system was brought down to and stabilized at ~1,350 MB, well under the 1,500 MB target.
|
||||
|
||||
### Rate Limiting Events
|
||||
|
||||
| Event Type | Count |
|
||||
|------------|-------|
|
||||
| Emergency Mode Entries | 9 |
|
||||
| Emergency Mode Exits | 8 |
|
||||
| Compactions Triggered | 3 |
|
||||
| Compactions Completed | 3 |
|
||||
|
||||
### Compaction Performance
|
||||
|
||||
| Compaction | Duration |
|
||||
|------------|----------|
|
||||
| #1 | 8.16 seconds |
|
||||
| #2 | 8.75 seconds |
|
||||
| #3 | 8.76 seconds |
|
||||
| **Average** | **8.56 seconds** |
|
||||
|
||||
### Import Throughput
|
||||
|
||||
| Phase | Events/sec | MB/sec |
|
||||
|-------|------------|--------|
|
||||
| Initial (no throttling) | 93 | 1.77 |
|
||||
| After throttling | 31 | 0.26 |
|
||||
| **Throttle Factor** | **3x reduction** | |
|
||||
|
||||
The rate limiter reduced import throughput by approximately 3x to maintain memory within target limits.
|
||||
|
||||
### Import Progress
|
||||
|
||||
- **Events Saved:** 30,978 (partial - test stopped for report)
|
||||
- **Data Read:** 258.70 MB
|
||||
- **Database Size:** 369 MB
|
||||
|
||||
## Timeline
|
||||
|
||||
```
|
||||
[00:00] Import started at 93 events/sec
|
||||
[00:20] Memory pressure triggered emergency mode (116.9% > 116.7% threshold)
|
||||
[00:20] Compaction #1 triggered
|
||||
[00:28] Compaction #1 completed (8.16s)
|
||||
[00:30] Emergency mode exited, memory recovered
|
||||
[01:00] Multiple emergency mode cycles as memory fluctuates
|
||||
[05:00] Throughput stabilized at ~50 events/sec
|
||||
[10:00] Throughput further reduced to ~35 events/sec
|
||||
[16:00] Test stopped at 31 events/sec, memory stable at 1,353 MB
|
||||
```
|
||||
|
||||
## Import Rate Over Time
|
||||
|
||||
```
|
||||
Time Events/sec Memory Status
|
||||
------ ---------- -------------
|
||||
00:05 93 Rising
|
||||
00:20 82 Emergency mode entered
|
||||
01:00 72 Recovering
|
||||
03:00 60 Stabilizing
|
||||
06:00 46 Controlled
|
||||
10:00 35 Controlled
|
||||
16:00 31 Stable at ~1,350 MB
|
||||
```
|
||||
|
||||
## Key Observations
|
||||
|
||||
### What Worked Well
|
||||
|
||||
1. **Memory Control:** The PID-based rate limiter successfully prevented memory from exceeding the target for extended periods.
|
||||
|
||||
2. **Emergency Mode:** The hysteresis-based emergency mode (enter at +16.7%, exit at -16.7%) prevented rapid oscillation between modes.
|
||||
|
||||
3. **Automatic Compaction:** When emergency mode triggered, Badger compaction was automatically initiated, helping reclaim memory.
|
||||
|
||||
4. **Progressive Throttling:** Write delays increased progressively with memory pressure, allowing smooth throughput reduction.
|
||||
|
||||
### Areas for Potential Improvement
|
||||
|
||||
1. **Initial Spike:** Memory peaked at 2,892 MB before rate limiting could respond. Consider more aggressive initial throttling or pre-warming.
|
||||
|
||||
2. **Throughput Trade-off:** Import rate dropped from 93 to 31 events/sec (3x reduction). This is the expected cost of memory control.
|
||||
|
||||
3. **Sustained Emergency Mode:** The test showed 9 entries but only 8 exits, indicating the system was in emergency mode at test end. This is acceptable behavior when load is continuous.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The adaptive rate limiting system with emergency mode and automatic compaction **successfully controlled memory usage** for the Badger backend. The system:
|
||||
|
||||
- Prevented sustained memory overflow beyond the target
|
||||
- Automatically triggered compaction during high memory pressure
|
||||
- Smoothly reduced throughput to maintain stability
|
||||
- Demonstrated effective hysteresis to prevent mode oscillation
|
||||
|
||||
**Recommendation:** The rate limiting implementation is ready for production use with Badger backend. For high-throughput imports, users should expect approximately 3x reduction in import speed when memory limits are active.
|
||||
|
||||
## Test Environment
|
||||
|
||||
- **OS:** Linux 6.8.0-87-generic
|
||||
- **Architecture:** x86_64
|
||||
- **Go Version:** 1.25.3
|
||||
- **Badger Version:** v4
|
||||
142
docs/RATE_LIMITING_TEST_REPORT_NEO4J.md
Normal file
142
docs/RATE_LIMITING_TEST_REPORT_NEO4J.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# Rate Limiting Test Report: Neo4j Backend
|
||||
|
||||
**Test Date:** December 12, 2025
|
||||
**Test Duration:** 73 minutes (4,409 seconds)
|
||||
**Import File:** `wot_reference.jsonl` (2.7 GB, 2,158,366 events)
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Value |
|
||||
|-----------|-------|
|
||||
| Database Backend | Neo4j 5-community (Docker) |
|
||||
| Target Memory | 1,500 MB (relay process) |
|
||||
| Emergency Threshold | 1,167 (target + 1/6) |
|
||||
| Recovery Threshold | 833 (target - 1/6) |
|
||||
| Max Write Delay | 1,000 ms (normal), 5,000 ms (emergency) |
|
||||
| Neo4j Memory Limits | Heap: 512MB-1GB, Page Cache: 512MB |
|
||||
|
||||
## Results Summary
|
||||
|
||||
### Memory Management
|
||||
|
||||
| Component | Metric | Value |
|
||||
|-----------|--------|-------|
|
||||
| **Relay Process** | Peak RSS (VmHWM) | 148 MB |
|
||||
| **Relay Process** | Final RSS | 35 MB |
|
||||
| **Neo4j Container** | Memory Usage | 1.614 GB |
|
||||
| **Neo4j Container** | Memory % | 10.83% of 14.91GB |
|
||||
| **Rate Limiting** | Events Triggered | **0** |
|
||||
|
||||
### Key Finding: Architecture Difference
|
||||
|
||||
Unlike Badger (embedded database), Neo4j runs as a **separate process** in a Docker container. This means:
|
||||
|
||||
1. **Relay process memory stays low** (~35MB) because it's just a client
|
||||
2. **Neo4j manages its own memory** within the container (1.6GB used)
|
||||
3. **Rate limiter monitors relay RSS**, which doesn't reflect Neo4j's actual load
|
||||
4. **No rate limiting triggered** because relay memory never approached the 1.5GB target
|
||||
|
||||
This is architecturally correct - the relay doesn't need memory-based rate limiting for Neo4j because it's not holding the data in process.
|
||||
|
||||
### Event Processing
|
||||
|
||||
| Event Type | Count | Rate |
|
||||
|------------|-------|------|
|
||||
| Contact Lists (kind 3) | 174,836 | 40 events/sec |
|
||||
| Mute Lists (kind 10000) | 4,027 | 0.9 events/sec |
|
||||
| **Total Social Events** | **178,863** | **41 events/sec** |
|
||||
|
||||
### Neo4j Performance
|
||||
|
||||
| Metric | Value |
|
||||
|--------|-------|
|
||||
| CPU Usage | 40-45% |
|
||||
| Memory | Stable at 1.6GB |
|
||||
| Disk Writes | 12.7 GB |
|
||||
| Network In | 1.8 GB |
|
||||
| Network Out | 583 MB |
|
||||
| Process Count | 77-82 |
|
||||
|
||||
### Import Throughput Over Time
|
||||
|
||||
```
|
||||
Time Contact Lists Delta/min Neo4j Memory
|
||||
------ ------------- --------- ------------
|
||||
08:28 0 - 1.57 GB
|
||||
08:47 31,257 ~2,100 1.61 GB
|
||||
08:52 42,403 ~2,200 1.61 GB
|
||||
09:02 67,581 ~2,500 1.61 GB
|
||||
09:12 97,316 ~3,000 1.60 GB
|
||||
09:22 112,681 ~3,100 1.61 GB
|
||||
09:27 163,252 ~10,000* 1.61 GB
|
||||
09:41 174,836 ~2,400 1.61 GB
|
||||
```
|
||||
*Spike may be due to batch processing of cached events
|
||||
|
||||
### Memory Stability
|
||||
|
||||
Neo4j's memory usage remained remarkably stable throughout the test:
|
||||
|
||||
```
|
||||
Sample Memory Delta
|
||||
-------- -------- -----
|
||||
08:47 1.605 GB -
|
||||
09:02 1.611 GB +6 MB
|
||||
09:12 1.603 GB -8 MB
|
||||
09:27 1.607 GB +4 MB
|
||||
09:41 1.614 GB +7 MB
|
||||
```
|
||||
|
||||
**Variance:** < 15 MB over 73 minutes - excellent stability.
|
||||
|
||||
## Architecture Comparison: Badger vs Neo4j
|
||||
|
||||
| Aspect | Badger | Neo4j |
|
||||
|--------|--------|-------|
|
||||
| Database Type | Embedded | External (Docker) |
|
||||
| Memory Consumer | Relay process | Container process |
|
||||
| Rate Limiter Target | Relay RSS | Relay RSS |
|
||||
| Rate Limiting Effectiveness | High | Low* |
|
||||
| Compaction Triggering | Yes | N/A |
|
||||
| Emergency Mode | Yes | Not triggered |
|
||||
|
||||
*The current rate limiter design targets relay process memory, which doesn't reflect Neo4j's actual resource usage.
|
||||
|
||||
## Recommendations for Neo4j Rate Limiting
|
||||
|
||||
The current implementation monitors **relay process memory**, but for Neo4j this should be enhanced to monitor:
|
||||
|
||||
### 1. Query Latency-Based Throttling (Currently Implemented)
|
||||
The Neo4j monitor already tracks query latency via `RecordQueryLatency()` and `RecordWriteLatency()`, using EMA smoothing. Latency > 500ms increases reported load.
|
||||
|
||||
### 2. Connection Pool Saturation (Currently Implemented)
|
||||
The `querySem` semaphore limits concurrent queries (default 10). When full, the load metric increases.
|
||||
|
||||
### 3. Future Enhancement: Container Metrics
|
||||
Consider monitoring Neo4j container metrics via:
|
||||
- Docker stats API for memory/CPU
|
||||
- Neo4j metrics endpoint for transaction counts, cache hit rates
|
||||
- JMX metrics for heap usage and GC pressure
|
||||
|
||||
## Conclusion
|
||||
|
||||
The Neo4j import test demonstrated:
|
||||
|
||||
1. **Stable Memory Usage**: Neo4j maintained consistent 1.6GB memory throughout
|
||||
2. **Consistent Throughput**: ~40 social events/second with no degradation
|
||||
3. **Architectural Isolation**: Relay stays lightweight while Neo4j handles data
|
||||
4. **Rate Limiter Design**: Current RSS-based limiting is appropriate for Badger but less relevant for Neo4j
|
||||
|
||||
**Recommendation:** The Neo4j rate limiter is correctly implemented but relies on latency and concurrency metrics rather than memory pressure. For production deployments with Neo4j, configure appropriate Neo4j memory limits in the container (heap_initial, heap_max, pagecache) rather than relying on relay-side rate limiting.
|
||||
|
||||
## Test Environment
|
||||
|
||||
- **OS:** Linux 6.8.0-87-generic
|
||||
- **Architecture:** x86_64
|
||||
- **Go Version:** 1.25.3
|
||||
- **Neo4j Version:** 5.26.18 (community)
|
||||
- **Container:** Docker with 14.91GB limit
|
||||
- **Neo4j Settings:**
|
||||
- Heap Initial: 512MB
|
||||
- Heap Max: 1GB
|
||||
- Page Cache: 512MB
|
||||
554
docs/applesauce-reference.md
Normal file
554
docs/applesauce-reference.md
Normal file
@@ -0,0 +1,554 @@
|
||||
# Applesauce Library Reference
|
||||
|
||||
A collection of TypeScript libraries for building Nostr web clients. Powers the noStrudel client.
|
||||
|
||||
**Repository:** https://github.com/hzrd149/applesauce
|
||||
**Documentation:** https://hzrd149.github.io/applesauce/
|
||||
|
||||
---
|
||||
|
||||
## Packages Overview
|
||||
|
||||
| Package | Description |
|
||||
|---------|-------------|
|
||||
| `applesauce-core` | Event utilities, key management, protocols, event storage |
|
||||
| `applesauce-relay` | Relay connection management with auto-reconnect |
|
||||
| `applesauce-signers` | Signing interfaces for multiple providers |
|
||||
| `applesauce-loaders` | High-level data loading for common Nostr patterns |
|
||||
| `applesauce-factory` | Event creation and manipulation utilities |
|
||||
| `applesauce-react` | React hooks and providers |
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Core package
|
||||
npm install applesauce-core
|
||||
|
||||
# With React support
|
||||
npm install applesauce-core applesauce-react
|
||||
|
||||
# Full stack
|
||||
npm install applesauce-core applesauce-relay applesauce-signers applesauce-loaders applesauce-factory
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Core Concepts
|
||||
|
||||
### Philosophy
|
||||
- **Reactive Architecture**: Built on RxJS observables for event-driven programming
|
||||
- **No Vendor Lock-in**: Generic interfaces compatible with other Nostr libraries
|
||||
- **Modularity**: Tree-shakeable packages - include only what you need
|
||||
|
||||
---
|
||||
|
||||
## EventStore
|
||||
|
||||
The foundational class for managing Nostr event state.
|
||||
|
||||
### Creation
|
||||
|
||||
```typescript
|
||||
import { EventStore } from "applesauce-core";
|
||||
|
||||
// Memory-only store
|
||||
const eventStore = new EventStore();
|
||||
|
||||
// With persistent database
|
||||
import { BetterSqlite3EventDatabase } from "applesauce-core/database";
|
||||
const database = new BetterSqlite3EventDatabase("./events.db");
|
||||
const eventStore = new EventStore(database);
|
||||
```
|
||||
|
||||
### Event Management Methods
|
||||
|
||||
```typescript
|
||||
// Add event (returns existing if duplicate, null if rejected)
|
||||
eventStore.add(event, relay?);
|
||||
|
||||
// Remove events
|
||||
eventStore.remove(id);
|
||||
eventStore.remove(event);
|
||||
eventStore.removeByFilters(filters);
|
||||
|
||||
// Update event (notify store of modifications)
|
||||
eventStore.update(event);
|
||||
```
|
||||
|
||||
### Query Methods
|
||||
|
||||
```typescript
|
||||
// Check existence
|
||||
eventStore.hasEvent(id);
|
||||
|
||||
// Get single event
|
||||
eventStore.getEvent(id);
|
||||
|
||||
// Get by filters
|
||||
eventStore.getByFilters(filters);
|
||||
|
||||
// Get sorted timeline (newest first)
|
||||
eventStore.getTimeline(filters);
|
||||
|
||||
// Replaceable events
|
||||
eventStore.hasReplaceable(kind, pubkey);
|
||||
eventStore.getReplaceable(kind, pubkey, identifier?);
|
||||
eventStore.getReplaceableHistory(kind, pubkey, identifier?); // requires keepOldVersions: true
|
||||
```
|
||||
|
||||
### Observable Subscriptions
|
||||
|
||||
```typescript
|
||||
// Single event updates
|
||||
eventStore.event(id).subscribe(event => { ... });
|
||||
|
||||
// All matching events
|
||||
eventStore.filters(filters, onlyNew?).subscribe(events => { ... });
|
||||
|
||||
// Sorted event arrays
|
||||
eventStore.timeline(filters, onlyNew?).subscribe(events => { ... });
|
||||
|
||||
// Replaceable events
|
||||
eventStore.replaceable(kind, pubkey).subscribe(event => { ... });
|
||||
|
||||
// Addressable events
|
||||
eventStore.addressable(kind, pubkey, identifier).subscribe(event => { ... });
|
||||
```
|
||||
|
||||
### Helper Subscriptions
|
||||
|
||||
```typescript
|
||||
// Profile (kind 0)
|
||||
eventStore.profile(pubkey).subscribe(profile => { ... });
|
||||
|
||||
// Contacts (kind 3)
|
||||
eventStore.contacts(pubkey).subscribe(contacts => { ... });
|
||||
|
||||
// Mutes (kind 10000)
|
||||
eventStore.mutes(pubkey).subscribe(mutes => { ... });
|
||||
|
||||
// Mailboxes/NIP-65 relays (kind 10002)
|
||||
eventStore.mailboxes(pubkey).subscribe(mailboxes => { ... });
|
||||
|
||||
// Blossom servers (kind 10063)
|
||||
eventStore.blossomServers(pubkey).subscribe(servers => { ... });
|
||||
|
||||
// Reactions (kind 7)
|
||||
eventStore.reactions(event).subscribe(reactions => { ... });
|
||||
|
||||
// Thread replies
|
||||
eventStore.thread(eventId).subscribe(thread => { ... });
|
||||
|
||||
// Comments
|
||||
eventStore.comments(event).subscribe(comments => { ... });
|
||||
```
|
||||
|
||||
### NIP-91 AND Operators
|
||||
|
||||
```typescript
|
||||
// Use & prefix for tags requiring ALL values
|
||||
eventStore.filters({
|
||||
kinds: [1],
|
||||
"&t": ["meme", "cat"], // Must have BOTH tags
|
||||
"#t": ["black", "white"] // Must have black OR white
|
||||
});
|
||||
```
|
||||
|
||||
### Fallback Loaders
|
||||
|
||||
```typescript
|
||||
// Custom async loaders for missing events
|
||||
eventStore.eventLoader = async (pointer) => {
|
||||
// Fetch from relay and return event
|
||||
};
|
||||
|
||||
eventStore.replaceableLoader = async (pointer) => { ... };
|
||||
eventStore.addressableLoader = async (pointer) => { ... };
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
```typescript
|
||||
const eventStore = new EventStore();
|
||||
|
||||
// Keep all versions of replaceable events
|
||||
eventStore.keepOldVersions = true;
|
||||
|
||||
// Keep expired events (default: removes them)
|
||||
eventStore.keepExpired = true;
|
||||
|
||||
// Custom verification
|
||||
eventStore.verifyEvent = (event) => verifySignature(event);
|
||||
|
||||
// Model memory duration (default: 60000ms)
|
||||
eventStore.modelKeepWarm = 60000;
|
||||
```
|
||||
|
||||
### Memory Management
|
||||
|
||||
```typescript
|
||||
// Mark event as in-use
|
||||
eventStore.claim(event, claimId);
|
||||
|
||||
// Check if claimed
|
||||
eventStore.isClaimed(event);
|
||||
|
||||
// Remove claims
|
||||
eventStore.removeClaim(event, claimId);
|
||||
eventStore.clearClaim(event);
|
||||
|
||||
// Prune unclaimed events
|
||||
eventStore.prune(count?);
|
||||
|
||||
// Iterate unclaimed (LRU ordered)
|
||||
for (const event of eventStore.unclaimed()) { ... }
|
||||
```
|
||||
|
||||
### Observable Streams
|
||||
|
||||
```typescript
|
||||
// New events added
|
||||
eventStore.insert$.subscribe(event => { ... });
|
||||
|
||||
// Events modified
|
||||
eventStore.update$.subscribe(event => { ... });
|
||||
|
||||
// Events deleted
|
||||
eventStore.remove$.subscribe(event => { ... });
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## EventFactory
|
||||
|
||||
Primary interface for creating, building, and modifying Nostr events.
|
||||
|
||||
### Initialization
|
||||
|
||||
```typescript
|
||||
import { EventFactory } from "applesauce-factory";
|
||||
|
||||
// Basic
|
||||
const factory = new EventFactory();
|
||||
|
||||
// With signer
|
||||
const factory = new EventFactory({ signer: mySigner });
|
||||
|
||||
// Full configuration
|
||||
const factory = new EventFactory({
|
||||
signer: { getPublicKey, signEvent, nip04?, nip44? },
|
||||
client: { name: "MyApp", address: "31990:..." },
|
||||
getEventRelayHint: (eventId) => "wss://relay.example.com",
|
||||
getPubkeyRelayHint: (pubkey) => "wss://relay.example.com",
|
||||
emojis: emojiArray
|
||||
});
|
||||
```
|
||||
|
||||
### Blueprint-Based Creation
|
||||
|
||||
```typescript
|
||||
import { NoteBlueprint, ReactionBlueprint } from "applesauce-factory/blueprints";
|
||||
|
||||
// Pattern 1: Constructor + arguments
|
||||
const note = await factory.create(NoteBlueprint, "Hello Nostr!");
|
||||
const reaction = await factory.create(ReactionBlueprint, event, "+");
|
||||
|
||||
// Pattern 2: Direct blueprint call
|
||||
const note = await factory.create(NoteBlueprint("Hello Nostr!"));
|
||||
```
|
||||
|
||||
### Custom Event Building
|
||||
|
||||
```typescript
|
||||
import { setContent, includeNameValueTag, includeSingletonTag } from "applesauce-factory/operations";
|
||||
|
||||
const event = await factory.build(
|
||||
{ kind: 30023 },
|
||||
setContent("Article content..."),
|
||||
includeNameValueTag(["title", "My Title"]),
|
||||
includeSingletonTag(["d", "article-id"])
|
||||
);
|
||||
```
|
||||
|
||||
### Event Modification
|
||||
|
||||
```typescript
|
||||
import { addPubkeyTag } from "applesauce-factory/operations";
|
||||
|
||||
// Full modification
|
||||
const modified = await factory.modify(existingEvent, operations);
|
||||
|
||||
// Tags only
|
||||
const updated = await factory.modifyTags(existingEvent, addPubkeyTag("pubkey"));
|
||||
```
|
||||
|
||||
### Helper Methods
|
||||
|
||||
```typescript
|
||||
// Short text note (kind 1)
|
||||
await factory.note("Hello world!", options?);
|
||||
|
||||
// Reply to note
|
||||
await factory.noteReply(parentEvent, "My reply");
|
||||
|
||||
// Reaction (kind 7)
|
||||
await factory.reaction(event, "🔥");
|
||||
|
||||
// Event deletion
|
||||
await factory.delete(events, reason?);
|
||||
|
||||
// Repost/share
|
||||
await factory.share(event);
|
||||
|
||||
// NIP-22 comment
|
||||
await factory.comment(article, "Great article!");
|
||||
```
|
||||
|
||||
### Available Blueprints
|
||||
|
||||
| Blueprint | Description |
|
||||
|-----------|-------------|
|
||||
| `NoteBlueprint(content, options?)` | Standard text notes (kind 1) |
|
||||
| `CommentBlueprint(parent, content, options?)` | Comments on events |
|
||||
| `NoteReplyBlueprint(parent, content, options?)` | Replies to notes |
|
||||
| `ReactionBlueprint(event, emoji?)` | Emoji reactions (kind 7) |
|
||||
| `ShareBlueprint(event, options?)` | Event shares/reposts |
|
||||
| `PicturePostBlueprint(pictures, content, options?)` | Image posts |
|
||||
| `FileMetadataBlueprint(file, options?)` | File metadata |
|
||||
| `DeleteBlueprint(events)` | Event deletion |
|
||||
| `LiveStreamBlueprint(title, options?)` | Live streams |
|
||||
|
||||
---
|
||||
|
||||
## Models
|
||||
|
||||
Pre-built reactive models for common data patterns.
|
||||
|
||||
### Built-in Models
|
||||
|
||||
```typescript
|
||||
import { ProfileModel, TimelineModel, RepliesModel } from "applesauce-core/models";
|
||||
|
||||
// Profile subscription (kind 0)
|
||||
const profile$ = eventStore.model(ProfileModel, pubkey);
|
||||
|
||||
// Timeline subscription
|
||||
const timeline$ = eventStore.model(TimelineModel, { kinds: [1] });
|
||||
|
||||
// Replies subscription (NIP-10 and NIP-22)
|
||||
const replies$ = eventStore.model(RepliesModel, event);
|
||||
```
|
||||
|
||||
### Custom Models
|
||||
|
||||
```typescript
|
||||
import { Model } from "applesauce-core";
|
||||
|
||||
const AppSettingsModel: Model<AppSettings, [string]> = (appId) => {
|
||||
return (store) => {
|
||||
return store.addressable(30078, store.pubkey, appId).pipe(
|
||||
map(event => event ? JSON.parse(event.content) : null)
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
// Usage
|
||||
const settings$ = eventStore.model(AppSettingsModel, "my-app");
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Helper Functions
|
||||
|
||||
### Event Utilities
|
||||
|
||||
```typescript
|
||||
import {
|
||||
isEvent,
|
||||
markFromCache,
|
||||
isFromCache,
|
||||
getTagValue,
|
||||
getIndexableTags
|
||||
} from "applesauce-core/helpers";
|
||||
```
|
||||
|
||||
### Profile Management
|
||||
|
||||
```typescript
|
||||
import { getProfileContent, isValidProfile } from "applesauce-core/helpers";
|
||||
|
||||
const profile = getProfileContent(kind0Event);
|
||||
const valid = isValidProfile(profile);
|
||||
```
|
||||
|
||||
### Relay Configuration
|
||||
|
||||
```typescript
|
||||
import { getInboxes, getOutboxes } from "applesauce-core/helpers";
|
||||
|
||||
const inboxRelays = getInboxes(kind10002Event);
|
||||
const outboxRelays = getOutboxes(kind10002Event);
|
||||
```
|
||||
|
||||
### Zap Processing
|
||||
|
||||
```typescript
|
||||
import {
|
||||
isValidZap,
|
||||
getZapSender,
|
||||
getZapRecipient,
|
||||
getZapPayment
|
||||
} from "applesauce-core/helpers";
|
||||
|
||||
if (isValidZap(zapEvent)) {
|
||||
const sender = getZapSender(zapEvent);
|
||||
const recipient = getZapRecipient(zapEvent);
|
||||
const payment = getZapPayment(zapEvent);
|
||||
}
|
||||
```
|
||||
|
||||
### Lightning Parsing
|
||||
|
||||
```typescript
|
||||
import { parseBolt11, parseLNURLOrAddress } from "applesauce-core/helpers";
|
||||
|
||||
const invoice = parseBolt11(bolt11String);
|
||||
const lnurl = parseLNURLOrAddress(addressOrUrl);
|
||||
```
|
||||
|
||||
### Pointer Creation
|
||||
|
||||
```typescript
|
||||
import {
|
||||
getEventPointerFromETag,
|
||||
getAddressPointerFromATag,
|
||||
getProfilePointerFromPTag,
|
||||
getAddressPointerForEvent
|
||||
} from "applesauce-core/helpers";
|
||||
```
|
||||
|
||||
### Tag Validation
|
||||
|
||||
```typescript
|
||||
import { isETag, isATag, isPTag, isDTag, isRTag, isTTag } from "applesauce-core/helpers";
|
||||
```
|
||||
|
||||
### Media Detection
|
||||
|
||||
```typescript
|
||||
import { isAudioURL, isVideoURL, isImageURL, isStreamURL } from "applesauce-core/helpers";
|
||||
|
||||
if (isImageURL(url)) {
|
||||
// Handle image
|
||||
}
|
||||
```
|
||||
|
||||
### Hidden Tags (NIP-51/60)
|
||||
|
||||
```typescript
|
||||
import {
|
||||
canHaveHiddenTags,
|
||||
hasHiddenTags,
|
||||
getHiddenTags,
|
||||
unlockHiddenTags,
|
||||
modifyEventTags
|
||||
} from "applesauce-core/helpers";
|
||||
```
|
||||
|
||||
### Comment Operations
|
||||
|
||||
```typescript
|
||||
import { getCommentRootPointer, getCommentReplyPointer } from "applesauce-core/helpers";
|
||||
```
|
||||
|
||||
### Deletion Handling
|
||||
|
||||
```typescript
|
||||
import { getDeleteIds, getDeleteCoordinates } from "applesauce-core/helpers";
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Common Patterns
|
||||
|
||||
### Basic Nostr Client Setup
|
||||
|
||||
```typescript
|
||||
import { EventStore } from "applesauce-core";
|
||||
import { EventFactory } from "applesauce-factory";
|
||||
import { NoteBlueprint } from "applesauce-factory/blueprints";
|
||||
|
||||
// Initialize stores
|
||||
const eventStore = new EventStore();
|
||||
const factory = new EventFactory({ signer: mySigner });
|
||||
|
||||
// Subscribe to timeline
|
||||
eventStore.timeline({ kinds: [1], limit: 50 }).subscribe(notes => {
|
||||
renderNotes(notes);
|
||||
});
|
||||
|
||||
// Create a new note
|
||||
const note = await factory.create(NoteBlueprint, "Hello Nostr!");
|
||||
|
||||
// Add to store
|
||||
eventStore.add(note);
|
||||
```
|
||||
|
||||
### Profile Display
|
||||
|
||||
```typescript
|
||||
// Subscribe to profile updates
|
||||
eventStore.profile(pubkey).subscribe(event => {
|
||||
if (event) {
|
||||
const profile = getProfileContent(event);
|
||||
displayProfile(profile);
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
### Reactive Reactions
|
||||
|
||||
```typescript
|
||||
// Subscribe to reactions on an event
|
||||
eventStore.reactions(targetEvent).subscribe(reactions => {
|
||||
const likeCount = reactions.filter(r => r.content === "+").length;
|
||||
updateLikeButton(likeCount);
|
||||
});
|
||||
|
||||
// Add a reaction
|
||||
const reaction = await factory.reaction(targetEvent, "🔥");
|
||||
eventStore.add(reaction);
|
||||
```
|
||||
|
||||
### Thread Loading
|
||||
|
||||
```typescript
|
||||
eventStore.thread(rootEventId).subscribe(thread => {
|
||||
renderThread(thread);
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Nostr Event Kinds Reference
|
||||
|
||||
| Kind | Description |
|
||||
|------|-------------|
|
||||
| 0 | Profile metadata |
|
||||
| 1 | Short text note |
|
||||
| 3 | Contact list |
|
||||
| 7 | Reaction |
|
||||
| 10000 | Mute list |
|
||||
| 10002 | Relay list (NIP-65) |
|
||||
| 10063 | Blossom servers |
|
||||
| 30023 | Long-form content |
|
||||
| 30078 | App-specific data (NIP-78) |
|
||||
|
||||
---
|
||||
|
||||
## Resources
|
||||
|
||||
- **Documentation:** https://hzrd149.github.io/applesauce/
|
||||
- **GitHub:** https://github.com/hzrd149/applesauce
|
||||
- **TypeDoc API:** Check the repository for full API documentation
|
||||
- **Example App:** noStrudel client demonstrates real-world usage
|
||||
325
docs/plans/DECENTRALIZE_NOSTR.md
Normal file
325
docs/plans/DECENTRALIZE_NOSTR.md
Normal file
@@ -0,0 +1,325 @@
|
||||
# ORLY Expansion Plan: Documentation, Installer, Tray, and WireGuard
|
||||
|
||||
## Overview
|
||||
|
||||
Expand ORLY from a relay binary into a complete ecosystem for personal Nostr relay deployment, with:
|
||||
1. **Textbook-style README** - Progressive documentation from novice to expert
|
||||
2. **GUI Installer** - Wails-based setup wizard (Linux + macOS)
|
||||
3. **System Tray** - Service monitoring and control
|
||||
4. **WireGuard Client** - Embedded tunnel for NAT traversal
|
||||
5. **Proxy Server** - Self-hostable AND managed service option
|
||||
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
USER SYSTEMS
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ orly-setup │ │ orly │ │ orly --tray │ │
|
||||
│ │ (Installer) │ │ (Relay) │ │ (Systray) │ │
|
||||
│ └──────┬───────┘ └──────┬───────┘ └──────┬───────┘ │
|
||||
│ │ │ │ │
|
||||
│ │ generates │ serves │ monitors │
|
||||
│ ▼ ▼ ▼ │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ ~/.config/ │ │ :3334 WS/HTTP│ │ /api/admin/* │ │
|
||||
│ │ systemd svc │ │ + WG tunnel │ │ status/ctrl │ │
|
||||
│ └──────────────┘ └──────┬───────┘ └──────────────┘ │
|
||||
│ │ │
|
||||
│ ┌───────┴───────┐ │
|
||||
│ │ pkg/tunnel/ │ │
|
||||
│ │ WireGuard │ │
|
||||
│ └───────┬───────┘ │
|
||||
└─────────────────────────────┼───────────────────────────────────────┘
|
||||
│ WG Tunnel (UDP :51820)
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────────┐
|
||||
│ PROXY SERVER │
|
||||
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │
|
||||
│ │ WG Server │───▶│ Nostr Auth │───▶│ Public Proxy │ │
|
||||
│ │ :51820 │ │ (npub-based) │ │ Egress │ │
|
||||
│ └──────────────┘ └──────────────┘ └──────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Package Structure
|
||||
|
||||
```
|
||||
next.orly.dev/
|
||||
├── cmd/
|
||||
│ ├── orly-setup/ # NEW: Wails installer
|
||||
│ │ ├── main.go
|
||||
│ │ ├── app.go # Backend logic
|
||||
│ │ ├── frontend/ # Svelte wizard UI
|
||||
│ │ │ └── src/steps/ # Welcome, Config, Install, Complete
|
||||
│ │ └── install/
|
||||
│ │ ├── preflight.go # Dependency checks
|
||||
│ │ ├── systemd.go # Service creation
|
||||
│ │ └── verify.go # Post-install checks
|
||||
│ │
|
||||
│ └── proxy-server/ # NEW: WireGuard proxy
|
||||
│ ├── main.go
|
||||
│ ├── server.go # WG server
|
||||
│ ├── auth.go # Nostr auth
|
||||
│ └── registry.go # User management
|
||||
│
|
||||
├── pkg/
|
||||
│ ├── tunnel/ # NEW: Embedded WG client
|
||||
│ │ ├── tunnel.go # Main interface
|
||||
│ │ ├── client.go # wireguard-go wrapper
|
||||
│ │ ├── reconnect.go # Auto-reconnect
|
||||
│ │ └── health.go # Connection health
|
||||
│ │
|
||||
│ ├── tray/ # NEW: System tray
|
||||
│ │ ├── tray.go # Platform abstraction
|
||||
│ │ ├── tray_linux.go # Linux implementation
|
||||
│ │ ├── tray_darwin.go # macOS implementation
|
||||
│ │ └── menu.go # Menu construction
|
||||
│ │
|
||||
│ ├── admin/ # NEW: Admin HTTP API
|
||||
│ │ ├── api.go # Router
|
||||
│ │ ├── status.go # GET /api/admin/status
|
||||
│ │ ├── control.go # POST /api/admin/start|stop|restart
|
||||
│ │ └── logs.go # GET /api/admin/logs (SSE)
|
||||
│ │
|
||||
│ └── interfaces/
|
||||
│ ├── tunnel/tunnel.go # Tunnel interface
|
||||
│ ├── tray/tray.go # Tray interface
|
||||
│ └── admin/admin.go # Admin API interface
|
||||
│
|
||||
└── docs/
|
||||
└── README.adoc # NEW: Textbook-style docs
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1: Documentation Foundation
|
||||
**Files to create/modify:**
|
||||
- `README.adoc` - New textbook-style documentation
|
||||
- `docs/` - Reorganize scattered docs
|
||||
|
||||
**README Structure (Textbook Style):**
|
||||
```
|
||||
Chapter 1: Quick Start (5-minute setup)
|
||||
Chapter 2: Installation (platform-specific)
|
||||
Chapter 3: Configuration (all env vars)
|
||||
Chapter 4: Operations (systemd, monitoring)
|
||||
Chapter 5: Security (TLS, ACLs, policy)
|
||||
Chapter 6: Advanced (Neo4j, clustering, WoT)
|
||||
Chapter 7: Architecture (internals)
|
||||
Appendices: Reference tables, troubleshooting
|
||||
```
|
||||
|
||||
### Phase 2: Admin API
|
||||
**Files to create:**
|
||||
- `pkg/admin/api.go` - Router and middleware
|
||||
- `pkg/admin/status.go` - Status endpoint
|
||||
- `pkg/admin/control.go` - Start/stop/restart
|
||||
- `pkg/admin/logs.go` - Log streaming via SSE
|
||||
- `pkg/interfaces/admin/admin.go` - Interface definition
|
||||
|
||||
**Files to modify:**
|
||||
- `app/server.go` - Register `/api/admin/*` routes
|
||||
- `app/config/config.go` - Add admin API config
|
||||
|
||||
**Endpoints:**
|
||||
```
|
||||
GET /api/admin/status - Relay status, uptime, connections
|
||||
POST /api/admin/start - Start relay (when in tray mode)
|
||||
POST /api/admin/stop - Graceful shutdown
|
||||
POST /api/admin/restart - Graceful restart
|
||||
GET /api/admin/logs - SSE log stream
|
||||
```
|
||||
|
||||
### Phase 3: System Tray
|
||||
**Files to create:**
|
||||
- `pkg/tray/tray.go` - Platform abstraction
|
||||
- `pkg/tray/tray_linux.go` - Linux (dbus/appindicator)
|
||||
- `pkg/tray/tray_darwin.go` - macOS (NSStatusBar)
|
||||
- `pkg/tray/menu.go` - Menu construction
|
||||
- `pkg/interfaces/tray/tray.go` - Interface
|
||||
|
||||
**Files to modify:**
|
||||
- `main.go` - Add `--tray` flag handling
|
||||
- `app/config/config.go` - Add tray config
|
||||
|
||||
**Features:**
|
||||
- Status icon (green/yellow/red)
|
||||
- Start/Stop/Restart menu items
|
||||
- Open Web UI (launches browser)
|
||||
- View Logs submenu
|
||||
- Auto-start on login toggle
|
||||
|
||||
### Phase 4: Installer GUI (Wails)
|
||||
**Files to create:**
|
||||
- `cmd/orly-setup/main.go` - Wails entry point
|
||||
- `cmd/orly-setup/app.go` - Backend methods
|
||||
- `cmd/orly-setup/frontend/` - Svelte wizard
|
||||
- `cmd/orly-setup/install/preflight.go` - Dependency checks
|
||||
- `cmd/orly-setup/install/systemd.go` - Service creation
|
||||
- `cmd/orly-setup/install/config.go` - Config generation
|
||||
- `cmd/orly-setup/install/verify.go` - Post-install checks
|
||||
- `scripts/build-installer.sh` - Build script
|
||||
|
||||
**Wizard Steps:**
|
||||
1. Welcome - Introduction, license
|
||||
2. Preflight - Check Go, disk, ports
|
||||
3. Configuration - Port, data dir, TLS domains
|
||||
4. Admin Setup - Generate or import admin keys
|
||||
5. Database - Choose Badger or Neo4j
|
||||
6. WireGuard (optional) - Tunnel config
|
||||
7. Installation - Create service, start relay
|
||||
8. Complete - Verify and show status
|
||||
|
||||
### Phase 5: WireGuard Client
|
||||
**Files to create:**
|
||||
- `pkg/tunnel/tunnel.go` - Main interface
|
||||
- `pkg/tunnel/client.go` - wireguard-go wrapper
|
||||
- `pkg/tunnel/config.go` - WG configuration
|
||||
- `pkg/tunnel/reconnect.go` - Auto-reconnect logic
|
||||
- `pkg/tunnel/health.go` - Health monitoring
|
||||
- `pkg/tunnel/handoff.go` - Graceful restart
|
||||
- `pkg/interfaces/tunnel/tunnel.go` - Interface
|
||||
|
||||
**Files to modify:**
|
||||
- `app/config/config.go` - Add WG config fields
|
||||
- `app/main.go` - Initialize tunnel on startup
|
||||
- `main.go` - Tunnel lifecycle management
|
||||
|
||||
**Config additions:**
|
||||
```go
|
||||
WGEnabled bool `env:"ORLY_WG_ENABLED" default:"false"`
|
||||
WGServer string `env:"ORLY_WG_SERVER"`
|
||||
WGPrivateKey string `env:"ORLY_WG_PRIVATE_KEY"`
|
||||
WGServerPubKey string `env:"ORLY_WG_PUBLIC_KEY"`
|
||||
WGKeepalive int `env:"ORLY_WG_KEEPALIVE" default:"25"`
|
||||
WGMTU int `env:"ORLY_WG_MTU" default:"1280"`
|
||||
WGReconnect bool `env:"ORLY_WG_RECONNECT" default:"true"`
|
||||
```
|
||||
|
||||
### Phase 6: Proxy Server
|
||||
**Files to create:**
|
||||
- `cmd/proxy-server/main.go` - Entry point
|
||||
- `cmd/proxy-server/server.go` - WG server management
|
||||
- `cmd/proxy-server/auth.go` - Nostr-based auth
|
||||
- `cmd/proxy-server/registry.go` - User/relay registry
|
||||
- `cmd/proxy-server/bandwidth.go` - Traffic monitoring
|
||||
- `cmd/proxy-server/config.go` - Server configuration
|
||||
|
||||
**Features:**
|
||||
- WireGuard server (wireguard-go)
|
||||
- Nostr event-based authentication (NIP-98 style)
|
||||
- User registration via signed events
|
||||
- Relay discovery and assignment
|
||||
- Bandwidth monitoring and quotas
|
||||
- Multi-tenant isolation
|
||||
|
||||
---
|
||||
|
||||
## Key Interfaces
|
||||
|
||||
### Tunnel Interface
|
||||
```go
|
||||
type Tunnel interface {
|
||||
Connect(ctx context.Context) error
|
||||
Disconnect() error
|
||||
Status() TunnelStatus
|
||||
Handoff() (*HandoffState, error)
|
||||
Resume(state *HandoffState) error
|
||||
}
|
||||
```
|
||||
|
||||
### Admin API Interface
|
||||
```go
|
||||
type AdminAPI interface {
|
||||
Status() (*RelayStatus, error)
|
||||
Start() error
|
||||
Stop() error
|
||||
Restart() error
|
||||
Logs(ctx context.Context, lines int) (<-chan LogEntry, error)
|
||||
}
|
||||
```
|
||||
|
||||
### Tray Interface
|
||||
```go
|
||||
type TrayApp interface {
|
||||
Run() error
|
||||
Quit()
|
||||
UpdateStatus(status StatusLevel, tooltip string)
|
||||
ShowNotification(title, message string)
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Dependencies to Add
|
||||
|
||||
```go
|
||||
// go.mod additions
|
||||
require (
|
||||
github.com/wailsapp/wails/v2 v2.x.x // Installer GUI
|
||||
golang.zx2c4.com/wireguard v0.x.x // WireGuard client
|
||||
github.com/getlantern/systray v1.x.x // System tray (or fyne.io/systray)
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Build Commands
|
||||
|
||||
```bash
|
||||
# Standard relay build (unchanged)
|
||||
CGO_ENABLED=0 go build -o orly
|
||||
|
||||
# Relay with tray support
|
||||
CGO_ENABLED=0 go build -tags tray -o orly
|
||||
|
||||
# Installer GUI
|
||||
cd cmd/orly-setup && wails build -platform linux/amd64,darwin/amd64
|
||||
|
||||
# Proxy server
|
||||
CGO_ENABLED=0 go build -o orly-proxy ./cmd/proxy-server
|
||||
|
||||
# All platforms
|
||||
./scripts/build-all.sh
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Critical Files Reference
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `app/config/config.go` | Add WG, tray, admin API config |
|
||||
| `app/server.go` | Register admin API routes |
|
||||
| `main.go` | Add --tray flag, WG initialization |
|
||||
| `scripts/deploy.sh` | Pattern for installer service creation |
|
||||
| `app/web/src/App.svelte` | Pattern for installer UI |
|
||||
|
||||
---
|
||||
|
||||
## Backward Compatibility
|
||||
|
||||
- Main `orly` binary behavior unchanged without flags
|
||||
- All new features opt-in via environment variables
|
||||
- WireGuard gracefully degrades if connection fails
|
||||
- Tray mode only activates with `--tray` flag
|
||||
- Admin API can be disabled via `ORLY_ADMIN_API_ENABLED=false`
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. New user can install via GUI wizard in < 5 minutes
|
||||
2. README guides user from zero to running relay
|
||||
3. System tray provides one-click relay management
|
||||
4. WireGuard tunnel auto-connects and reconnects
|
||||
5. Proxy server enables home relay exposure without port forwarding
|
||||
6. All existing functionality preserved
|
||||
3
go.mod
3
go.mod
@@ -3,7 +3,7 @@ module next.orly.dev
|
||||
go 1.25.3
|
||||
|
||||
require (
|
||||
git.mleku.dev/mleku/nostr v1.0.8
|
||||
git.mleku.dev/mleku/nostr v1.0.9
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/aperturerobotics/go-indexeddb v0.2.3
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
@@ -14,6 +14,7 @@ require (
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
github.com/nbd-wtf/go-nostr v0.52.0
|
||||
github.com/neo4j/neo4j-go-driver/v5 v5.28.4
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/sosodev/duration v1.3.1
|
||||
github.com/stretchr/testify v1.11.1
|
||||
|
||||
6
go.sum
6
go.sum
@@ -1,5 +1,5 @@
|
||||
git.mleku.dev/mleku/nostr v1.0.8 h1:YYREdIxobEqYkzxQ7/5ALACPzLkiHW+CTira+VvSQZk=
|
||||
git.mleku.dev/mleku/nostr v1.0.8/go.mod h1:iYTlg2WKJXJ0kcsM6QBGOJ0UDiJidMgL/i64cHyPjZc=
|
||||
git.mleku.dev/mleku/nostr v1.0.9 h1:aiN0ihnXzEpboXjW4u8qr5XokLQqg4P0XSZ1Y273qM0=
|
||||
git.mleku.dev/mleku/nostr v1.0.9/go.mod h1:iYTlg2WKJXJ0kcsM6QBGOJ0UDiJidMgL/i64cHyPjZc=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/ImVexed/fasturl v0.0.0-20230304231329-4e41488060f3 h1:ClzzXMDDuUbWfNNZqGeYq4PnYOlwlOVIvSyNaIy0ykg=
|
||||
@@ -111,6 +111,8 @@ github.com/nbd-wtf/go-nostr v0.52.0/go.mod h1:4avYoc9mDGZ9wHsvCOhHH9vPzKucCfuYBt
|
||||
github.com/neo4j/neo4j-go-driver/v5 v5.28.4 h1:7toxehVcYkZbyxV4W3Ib9VcnyRBQPucF+VwNNmtSXi4=
|
||||
github.com/neo4j/neo4j-go-driver/v5 v5.28.4/go.mod h1:Vff8OwT7QpLm7L2yYr85XNWe9Rbqlbeb9asNXJTHO4k=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
|
||||
56
main.go
56
main.go
@@ -21,7 +21,7 @@ import (
|
||||
"next.orly.dev/pkg/acl"
|
||||
"git.mleku.dev/mleku/nostr/crypto/keys"
|
||||
"next.orly.dev/pkg/database"
|
||||
_ "next.orly.dev/pkg/neo4j" // Import to register neo4j factory
|
||||
neo4jdb "next.orly.dev/pkg/neo4j" // Import for neo4j factory and type
|
||||
"git.mleku.dev/mleku/nostr/encoders/hex"
|
||||
"next.orly.dev/pkg/ratelimit"
|
||||
"next.orly.dev/pkg/utils/interrupt"
|
||||
@@ -343,26 +343,72 @@ func main() {
|
||||
writeKp, writeKi, writeKd,
|
||||
readKp, readKi, readKd,
|
||||
maxWriteMs, maxReadMs,
|
||||
writeTarget, readTarget := cfg.GetRateLimitConfigValues()
|
||||
writeTarget, readTarget,
|
||||
emergencyThreshold, recoveryThreshold,
|
||||
emergencyMaxMs := cfg.GetRateLimitConfigValues()
|
||||
|
||||
if rateLimitEnabled {
|
||||
// Auto-detect memory target if set to 0 (default)
|
||||
if targetMB == 0 {
|
||||
var memErr error
|
||||
targetMB, memErr = ratelimit.CalculateTargetMemoryMB(targetMB)
|
||||
if memErr != nil {
|
||||
log.F.F("FATAL: %v", memErr)
|
||||
log.F.F("There is not enough memory to run this relay in this environment.")
|
||||
log.F.F("Available: %dMB, Required minimum: %dMB",
|
||||
ratelimit.DetectAvailableMemoryMB(), ratelimit.MinimumMemoryMB)
|
||||
os.Exit(1)
|
||||
}
|
||||
stats := ratelimit.GetMemoryStats(targetMB)
|
||||
// Calculate what 66% would be to determine if we hit the cap
|
||||
calculated66 := int(float64(stats.AvailableMB) * ratelimit.AutoDetectMemoryFraction)
|
||||
if calculated66 > ratelimit.DefaultMaxMemoryMB {
|
||||
log.I.F("memory auto-detected: total=%dMB, available=%dMB, target=%dMB (capped at default max, 66%% would be %dMB)",
|
||||
stats.TotalMB, stats.AvailableMB, targetMB, calculated66)
|
||||
} else {
|
||||
log.I.F("memory auto-detected: total=%dMB, available=%dMB, target=%dMB (66%% of available)",
|
||||
stats.TotalMB, stats.AvailableMB, targetMB)
|
||||
}
|
||||
} else {
|
||||
// Validate explicitly configured target
|
||||
_, memErr := ratelimit.CalculateTargetMemoryMB(targetMB)
|
||||
if memErr != nil {
|
||||
log.F.F("FATAL: %v", memErr)
|
||||
log.F.F("Configured target memory %dMB is below minimum required %dMB.",
|
||||
targetMB, ratelimit.MinimumMemoryMB)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
rlConfig := ratelimit.NewConfigFromValues(
|
||||
rateLimitEnabled, targetMB,
|
||||
writeKp, writeKi, writeKd,
|
||||
readKp, readKi, readKd,
|
||||
maxWriteMs, maxReadMs,
|
||||
writeTarget, readTarget,
|
||||
emergencyThreshold, recoveryThreshold,
|
||||
emergencyMaxMs,
|
||||
)
|
||||
|
||||
// Create appropriate monitor based on database type
|
||||
if badgerDB, ok := db.(*database.D); ok {
|
||||
limiter = ratelimit.NewBadgerLimiter(rlConfig, badgerDB.DB)
|
||||
// Set the rate limiter on the database for import operations
|
||||
badgerDB.SetRateLimiter(limiter)
|
||||
log.I.F("rate limiter configured for Badger backend (target: %dMB)", targetMB)
|
||||
} else if n4jDB, ok := db.(*neo4jdb.N); ok {
|
||||
// Create Neo4j rate limiter with access to driver and querySem
|
||||
limiter = ratelimit.NewNeo4jLimiter(
|
||||
rlConfig,
|
||||
n4jDB.Driver(),
|
||||
n4jDB.QuerySem(),
|
||||
n4jDB.MaxConcurrentQueries(),
|
||||
)
|
||||
log.I.F("rate limiter configured for Neo4j backend (target: %dMB)", targetMB)
|
||||
} else {
|
||||
// For Neo4j or other backends, create a disabled limiter for now
|
||||
// Neo4j monitor requires access to the querySem which is internal
|
||||
// For other backends, create a disabled limiter
|
||||
limiter = ratelimit.NewDisabledLimiter()
|
||||
log.I.F("rate limiter disabled for non-Badger backend")
|
||||
log.I.F("rate limiter disabled for unknown backend")
|
||||
}
|
||||
} else {
|
||||
limiter = ratelimit.NewDisabledLimiter()
|
||||
|
||||
@@ -20,6 +20,15 @@ import (
|
||||
"git.mleku.dev/mleku/nostr/utils/units"
|
||||
)
|
||||
|
||||
// RateLimiterInterface defines the minimal interface for rate limiting during import
|
||||
type RateLimiterInterface interface {
|
||||
IsEnabled() bool
|
||||
Wait(ctx context.Context, opType int) time.Duration
|
||||
}
|
||||
|
||||
// WriteOpType is the operation type constant for write operations
|
||||
const WriteOpType = 1
|
||||
|
||||
// D implements the Database interface using Badger as the storage backend
|
||||
type D struct {
|
||||
ctx context.Context
|
||||
@@ -35,6 +44,14 @@ type D struct {
|
||||
// Serial cache for compact event storage
|
||||
// Caches pubkey and event ID serial mappings for fast compact event decoding
|
||||
serialCache *SerialCache
|
||||
|
||||
// Rate limiter for controlling memory pressure during bulk operations
|
||||
rateLimiter RateLimiterInterface
|
||||
}
|
||||
|
||||
// SetRateLimiter sets the rate limiter for controlling memory during import/export
|
||||
func (d *D) SetRateLimiter(limiter RateLimiterInterface) {
|
||||
d.rateLimiter = limiter
|
||||
}
|
||||
|
||||
// Ensure D implements Database interface at compile time
|
||||
|
||||
@@ -125,6 +125,11 @@ func (d *D) processJSONLEventsWithPolicy(ctx context.Context, rr io.Reader, poli
|
||||
log.D.F("policy allowed event %x during sync import", ev.ID)
|
||||
}
|
||||
|
||||
// Apply rate limiting before write operation if limiter is configured
|
||||
if d.rateLimiter != nil && d.rateLimiter.IsEnabled() {
|
||||
d.rateLimiter.Wait(ctx, WriteOpType)
|
||||
}
|
||||
|
||||
if _, err := d.SaveEvent(ctx, ev); err != nil {
|
||||
// return the pooled buffer on error paths too
|
||||
ev.Free()
|
||||
|
||||
@@ -30,6 +30,17 @@ type Metrics struct {
|
||||
|
||||
// Timestamp is when these metrics were collected.
|
||||
Timestamp time.Time
|
||||
|
||||
// InEmergencyMode indicates that memory pressure is critical
|
||||
// and aggressive throttling should be applied.
|
||||
InEmergencyMode bool
|
||||
|
||||
// CompactionPending indicates that the database needs compaction
|
||||
// and writes should be throttled to allow it to catch up.
|
||||
CompactionPending bool
|
||||
|
||||
// PhysicalMemoryMB is the actual physical memory (RSS - shared) in MB
|
||||
PhysicalMemoryMB uint64
|
||||
}
|
||||
|
||||
// Monitor defines the interface for database load monitoring.
|
||||
@@ -56,3 +67,33 @@ type Monitor interface {
|
||||
// Stop halts background metric collection.
|
||||
Stop()
|
||||
}
|
||||
|
||||
// CompactableMonitor extends Monitor with compaction-triggering capability.
|
||||
// Implemented by database backends that support manual compaction (e.g., Badger).
|
||||
type CompactableMonitor interface {
|
||||
Monitor
|
||||
|
||||
// TriggerCompaction initiates a database compaction operation.
|
||||
// This may take significant time; callers should run this in a goroutine.
|
||||
// Returns an error if compaction fails or is not supported.
|
||||
TriggerCompaction() error
|
||||
|
||||
// IsCompacting returns true if a compaction is currently in progress.
|
||||
IsCompacting() bool
|
||||
}
|
||||
|
||||
// EmergencyModeMonitor extends Monitor with emergency mode detection.
|
||||
// Implemented by monitors that can detect critical memory pressure.
|
||||
type EmergencyModeMonitor interface {
|
||||
Monitor
|
||||
|
||||
// SetEmergencyThreshold sets the memory threshold (as a fraction, e.g., 1.5 = 150% of target)
|
||||
// above which emergency mode is triggered.
|
||||
SetEmergencyThreshold(threshold float64)
|
||||
|
||||
// GetEmergencyThreshold returns the current emergency threshold.
|
||||
GetEmergencyThreshold() float64
|
||||
|
||||
// ForceEmergencyMode manually triggers emergency mode for a duration.
|
||||
ForceEmergencyMode(duration time.Duration)
|
||||
}
|
||||
|
||||
@@ -447,3 +447,18 @@ func (n *N) CacheEvents(f *filter.F, events event.S) {}
|
||||
|
||||
// InvalidateQueryCache invalidates the query cache (not implemented for Neo4j)
|
||||
func (n *N) InvalidateQueryCache() {}
|
||||
|
||||
// Driver returns the Neo4j driver for use in rate limiting.
|
||||
func (n *N) Driver() neo4j.DriverWithContext {
|
||||
return n.driver
|
||||
}
|
||||
|
||||
// QuerySem returns the query semaphore for use in rate limiting.
|
||||
func (n *N) QuerySem() chan struct{} {
|
||||
return n.querySem
|
||||
}
|
||||
|
||||
// MaxConcurrentQueries returns the maximum concurrent query limit.
|
||||
func (n *N) MaxConcurrentQueries() int {
|
||||
return cap(n.querySem)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func TestBugReproduction_WithPolicyManager(t *testing.T) {
|
||||
|
||||
// Create policy with manager (enabled)
|
||||
ctx := context.Background()
|
||||
policy := NewWithManager(ctx, "ORLY", true)
|
||||
policy := NewWithManager(ctx, "ORLY", true, "")
|
||||
|
||||
// Load policy from file
|
||||
if err := policy.LoadFromFile(policyPath); err != nil {
|
||||
|
||||
@@ -31,7 +31,7 @@ func setupTestPolicy(t *testing.T, appName string) (*P, func()) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
policy := NewWithManager(ctx, appName, true)
|
||||
policy := NewWithManager(ctx, appName, true, "")
|
||||
if policy == nil {
|
||||
cancel()
|
||||
os.RemoveAll(configDir)
|
||||
|
||||
@@ -29,7 +29,7 @@ func setupHotreloadTestPolicy(t *testing.T, appName string) (*P, func()) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
policy := NewWithManager(ctx, appName, true)
|
||||
policy := NewWithManager(ctx, appName, true, "")
|
||||
if policy == nil {
|
||||
cancel()
|
||||
os.RemoveAll(configDir)
|
||||
|
||||
@@ -514,12 +514,19 @@ type PolicyManager struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
configDir string
|
||||
configPath string // Path to policy.json file
|
||||
scriptPath string // Default script path for backward compatibility
|
||||
enabled bool
|
||||
mutex sync.RWMutex
|
||||
runners map[string]*ScriptRunner // Map of script path -> runner
|
||||
}
|
||||
|
||||
// ConfigPath returns the path to the policy configuration file.
|
||||
// This is used by hot-reload handlers to know where to save updated policy.
|
||||
func (pm *PolicyManager) ConfigPath() string {
|
||||
return pm.configPath
|
||||
}
|
||||
|
||||
// P represents a complete policy configuration for a Nostr relay.
|
||||
// It defines access control rules, kind filtering, and default behavior.
|
||||
// Policies are evaluated in order: global rules, kind filtering, specific rules, then default policy.
|
||||
@@ -695,6 +702,15 @@ func (p *P) IsEnabled() bool {
|
||||
return p != nil && p.manager != nil && p.manager.IsEnabled()
|
||||
}
|
||||
|
||||
// ConfigPath returns the path to the policy configuration file.
|
||||
// Delegates to the internal PolicyManager.
|
||||
func (p *P) ConfigPath() string {
|
||||
if p == nil || p.manager == nil {
|
||||
return ""
|
||||
}
|
||||
return p.manager.ConfigPath()
|
||||
}
|
||||
|
||||
// getDefaultPolicyAction returns true if the default policy is "allow", false if "deny"
|
||||
func (p *P) getDefaultPolicyAction() (allowed bool) {
|
||||
switch p.DefaultPolicy {
|
||||
@@ -711,10 +727,29 @@ func (p *P) getDefaultPolicyAction() (allowed bool) {
|
||||
// NewWithManager creates a new policy with a policy manager for script execution.
|
||||
// It initializes the policy manager, loads configuration from files, and starts
|
||||
// background processes for script management and periodic health checks.
|
||||
func NewWithManager(ctx context.Context, appName string, enabled bool) *P {
|
||||
//
|
||||
// The customPolicyPath parameter allows overriding the default policy file location.
|
||||
// If empty, uses the default path: $HOME/.config/{appName}/policy.json
|
||||
// If provided, it MUST be an absolute path (starting with /) or the function will panic.
|
||||
func NewWithManager(ctx context.Context, appName string, enabled bool, customPolicyPath string) *P {
|
||||
configDir := filepath.Join(xdg.ConfigHome, appName)
|
||||
scriptPath := filepath.Join(configDir, "policy.sh")
|
||||
configPath := filepath.Join(configDir, "policy.json")
|
||||
|
||||
// Determine the policy config path
|
||||
var configPath string
|
||||
if customPolicyPath != "" {
|
||||
// Validate that custom path is absolute
|
||||
if !filepath.IsAbs(customPolicyPath) {
|
||||
panic(fmt.Sprintf("FATAL: ORLY_POLICY_PATH must be an ABSOLUTE path (starting with /), got: %q", customPolicyPath))
|
||||
}
|
||||
configPath = customPolicyPath
|
||||
// Update configDir to match the custom path's directory for script resolution
|
||||
configDir = filepath.Dir(customPolicyPath)
|
||||
scriptPath = filepath.Join(configDir, "policy.sh")
|
||||
log.I.F("using custom policy path: %s", configPath)
|
||||
} else {
|
||||
configPath = filepath.Join(configDir, "policy.json")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
@@ -722,6 +757,7 @@ func NewWithManager(ctx context.Context, appName string, enabled bool) *P {
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
configDir: configDir,
|
||||
configPath: configPath,
|
||||
scriptPath: scriptPath,
|
||||
enabled: enabled,
|
||||
runners: make(map[string]*ScriptRunner),
|
||||
|
||||
@@ -825,7 +825,7 @@ func TestNewWithManager(t *testing.T) {
|
||||
// Test with disabled policy (doesn't require policy.json file)
|
||||
t.Run("disabled policy", func(t *testing.T) {
|
||||
enabled := false
|
||||
policy := NewWithManager(ctx, appName, enabled)
|
||||
policy := NewWithManager(ctx, appName, enabled, "")
|
||||
|
||||
if policy == nil {
|
||||
t.Fatal("Expected policy but got nil")
|
||||
|
||||
@@ -31,7 +31,7 @@ func setupTagValidationTestPolicy(t *testing.T, appName string) (*P, func()) {
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
policy := NewWithManager(ctx, appName, true)
|
||||
policy := NewWithManager(ctx, appName, true, "")
|
||||
if policy == nil {
|
||||
cancel()
|
||||
os.RemoveAll(configDir)
|
||||
|
||||
@@ -3,23 +3,32 @@
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/interfaces/loadmonitor"
|
||||
)
|
||||
|
||||
// BadgerMonitor implements loadmonitor.Monitor for the Badger database.
|
||||
// It collects metrics from Badger's LSM tree, caches, and Go runtime.
|
||||
// It collects metrics from Badger's LSM tree, caches, and actual process memory.
|
||||
// It also implements CompactableMonitor and EmergencyModeMonitor interfaces.
|
||||
type BadgerMonitor struct {
|
||||
db *badger.DB
|
||||
|
||||
// Target memory for pressure calculation
|
||||
targetMemoryBytes atomic.Uint64
|
||||
|
||||
// Emergency mode configuration
|
||||
emergencyThreshold atomic.Uint64 // stored as threshold * 1000 (e.g., 1500 = 1.5)
|
||||
emergencyModeUntil atomic.Int64 // Unix nano when forced emergency mode ends
|
||||
inEmergencyMode atomic.Bool
|
||||
|
||||
// Compaction state
|
||||
isCompacting atomic.Bool
|
||||
|
||||
// Latency tracking with exponential moving average
|
||||
queryLatencyNs atomic.Int64
|
||||
writeLatencyNs atomic.Int64
|
||||
@@ -37,8 +46,10 @@ type BadgerMonitor struct {
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
// Compile-time check that BadgerMonitor implements loadmonitor.Monitor
|
||||
// Compile-time checks for interface implementation
|
||||
var _ loadmonitor.Monitor = (*BadgerMonitor)(nil)
|
||||
var _ loadmonitor.CompactableMonitor = (*BadgerMonitor)(nil)
|
||||
var _ loadmonitor.EmergencyModeMonitor = (*BadgerMonitor)(nil)
|
||||
|
||||
// NewBadgerMonitor creates a new Badger load monitor.
|
||||
// The updateInterval controls how often metrics are collected (default 100ms).
|
||||
@@ -58,9 +69,73 @@ func NewBadgerMonitor(db *badger.DB, updateInterval time.Duration) *BadgerMonito
|
||||
// Set a default target (1.5GB)
|
||||
m.targetMemoryBytes.Store(1500 * 1024 * 1024)
|
||||
|
||||
// Default emergency threshold: 150% of target
|
||||
m.emergencyThreshold.Store(1500)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// SetEmergencyThreshold sets the memory threshold above which emergency mode is triggered.
|
||||
// threshold is a fraction, e.g., 1.5 = 150% of target memory.
|
||||
func (m *BadgerMonitor) SetEmergencyThreshold(threshold float64) {
|
||||
m.emergencyThreshold.Store(uint64(threshold * 1000))
|
||||
}
|
||||
|
||||
// GetEmergencyThreshold returns the current emergency threshold as a fraction.
|
||||
func (m *BadgerMonitor) GetEmergencyThreshold() float64 {
|
||||
return float64(m.emergencyThreshold.Load()) / 1000.0
|
||||
}
|
||||
|
||||
// ForceEmergencyMode manually triggers emergency mode for a duration.
|
||||
func (m *BadgerMonitor) ForceEmergencyMode(duration time.Duration) {
|
||||
m.emergencyModeUntil.Store(time.Now().Add(duration).UnixNano())
|
||||
m.inEmergencyMode.Store(true)
|
||||
log.W.F("⚠️ emergency mode forced for %v", duration)
|
||||
}
|
||||
|
||||
// TriggerCompaction initiates a Badger Flatten operation to compact all levels.
|
||||
// This should be called when memory pressure is high and the database needs to
|
||||
// reclaim space. It runs synchronously and may take significant time.
|
||||
func (m *BadgerMonitor) TriggerCompaction() error {
|
||||
if m.db == nil || m.db.IsClosed() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.isCompacting.Load() {
|
||||
log.D.Ln("compaction already in progress, skipping")
|
||||
return nil
|
||||
}
|
||||
|
||||
m.isCompacting.Store(true)
|
||||
defer m.isCompacting.Store(false)
|
||||
|
||||
log.I.Ln("🗜️ triggering Badger compaction (Flatten)")
|
||||
start := time.Now()
|
||||
|
||||
// Flatten with 4 workers (matches NumCompactors default)
|
||||
err := m.db.Flatten(4)
|
||||
if err != nil {
|
||||
log.E.F("compaction failed: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Also run value log GC to reclaim space
|
||||
for {
|
||||
err := m.db.RunValueLogGC(0.5)
|
||||
if err != nil {
|
||||
break // No more GC needed
|
||||
}
|
||||
}
|
||||
|
||||
log.I.F("🗜️ compaction completed in %v", time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsCompacting returns true if a compaction is currently in progress.
|
||||
func (m *BadgerMonitor) IsCompacting() bool {
|
||||
return m.isCompacting.Load()
|
||||
}
|
||||
|
||||
// GetMetrics returns the current load metrics.
|
||||
func (m *BadgerMonitor) GetMetrics() loadmonitor.Metrics {
|
||||
m.metricsLock.RLock()
|
||||
@@ -140,7 +215,7 @@ func (m *BadgerMonitor) collectLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
// updateMetrics collects current metrics from Badger and runtime.
|
||||
// updateMetrics collects current metrics from Badger and actual process memory.
|
||||
func (m *BadgerMonitor) updateMetrics() {
|
||||
if m.db == nil || m.db.IsClosed() {
|
||||
return
|
||||
@@ -150,17 +225,40 @@ func (m *BadgerMonitor) updateMetrics() {
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
// Calculate memory pressure from Go runtime
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
// Use RSS-based memory pressure (actual physical memory, not Go runtime)
|
||||
procMem := ReadProcessMemoryStats()
|
||||
physicalMemBytes := procMem.PhysicalMemoryBytes()
|
||||
metrics.PhysicalMemoryMB = physicalMemBytes / (1024 * 1024)
|
||||
|
||||
targetBytes := m.targetMemoryBytes.Load()
|
||||
if targetBytes > 0 {
|
||||
// Use HeapAlloc as primary memory metric
|
||||
// This represents the actual live heap objects
|
||||
metrics.MemoryPressure = float64(memStats.HeapAlloc) / float64(targetBytes)
|
||||
// Use actual physical memory (RSS - shared) for pressure calculation
|
||||
metrics.MemoryPressure = float64(physicalMemBytes) / float64(targetBytes)
|
||||
}
|
||||
|
||||
// Check emergency mode
|
||||
emergencyThreshold := float64(m.emergencyThreshold.Load()) / 1000.0
|
||||
forcedUntil := m.emergencyModeUntil.Load()
|
||||
now := time.Now().UnixNano()
|
||||
|
||||
if forcedUntil > now {
|
||||
// Still in forced emergency mode
|
||||
metrics.InEmergencyMode = true
|
||||
} else if metrics.MemoryPressure >= emergencyThreshold {
|
||||
// Memory pressure exceeds emergency threshold
|
||||
metrics.InEmergencyMode = true
|
||||
if !m.inEmergencyMode.Load() {
|
||||
log.W.F("⚠️ entering emergency mode: memory pressure %.1f%% >= threshold %.1f%%",
|
||||
metrics.MemoryPressure*100, emergencyThreshold*100)
|
||||
}
|
||||
} else {
|
||||
if m.inEmergencyMode.Load() {
|
||||
log.I.F("✅ exiting emergency mode: memory pressure %.1f%% < threshold %.1f%%",
|
||||
metrics.MemoryPressure*100, emergencyThreshold*100)
|
||||
}
|
||||
}
|
||||
m.inEmergencyMode.Store(metrics.InEmergencyMode)
|
||||
|
||||
// Get Badger LSM tree information for write load
|
||||
levels := m.db.Levels()
|
||||
var l0Tables int
|
||||
@@ -191,6 +289,9 @@ func (m *BadgerMonitor) updateMetrics() {
|
||||
compactionLoad = 1.0
|
||||
}
|
||||
|
||||
// Mark compaction as pending if score is high
|
||||
metrics.CompactionPending = maxScore > 1.5 || l0Tables > 10
|
||||
|
||||
// Blend: 60% L0 (immediate backpressure), 40% compaction score
|
||||
metrics.WriteLoad = 0.6*l0Load + 0.4*compactionLoad
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/interfaces/loadmonitor"
|
||||
pidif "next.orly.dev/pkg/interfaces/pid"
|
||||
"next.orly.dev/pkg/pid"
|
||||
@@ -74,25 +75,47 @@ type Config struct {
|
||||
// The remaining weight is given to the load metric.
|
||||
// Default: 0.7 (70% memory, 30% load)
|
||||
MemoryWeight float64
|
||||
|
||||
// EmergencyThreshold is the memory pressure level (fraction of target) that triggers emergency mode.
|
||||
// Default: 1.167 (116.7% = target + 1/6th)
|
||||
// When exceeded, writes are aggressively throttled until memory drops below RecoveryThreshold.
|
||||
EmergencyThreshold float64
|
||||
|
||||
// RecoveryThreshold is the memory pressure level below which we exit emergency mode.
|
||||
// Default: 0.833 (83.3% = target - 1/6th)
|
||||
// Hysteresis prevents rapid oscillation between normal and emergency modes.
|
||||
RecoveryThreshold float64
|
||||
|
||||
// EmergencyMaxDelayMs is the maximum delay for writes during emergency mode.
|
||||
// Default: 5000 (5 seconds) - much longer than normal MaxWriteDelayMs
|
||||
EmergencyMaxDelayMs int
|
||||
|
||||
// CompactionCheckInterval controls how often to check if compaction should be triggered.
|
||||
// Default: 10 seconds
|
||||
CompactionCheckInterval time.Duration
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for the rate limiter.
|
||||
func DefaultConfig() Config {
|
||||
return Config{
|
||||
Enabled: true,
|
||||
TargetMemoryMB: 1500, // 1.5GB target
|
||||
WriteSetpoint: 0.85,
|
||||
ReadSetpoint: 0.90,
|
||||
WriteKp: 0.5,
|
||||
WriteKi: 0.1,
|
||||
WriteKd: 0.05,
|
||||
ReadKp: 0.3,
|
||||
ReadKi: 0.05,
|
||||
ReadKd: 0.02,
|
||||
MaxWriteDelayMs: 1000, // 1 second max
|
||||
MaxReadDelayMs: 500, // 500ms max
|
||||
MetricUpdateInterval: 100 * time.Millisecond,
|
||||
MemoryWeight: 0.7,
|
||||
Enabled: true,
|
||||
TargetMemoryMB: 1500, // 1.5GB target
|
||||
WriteSetpoint: 0.85,
|
||||
ReadSetpoint: 0.90,
|
||||
WriteKp: 0.5,
|
||||
WriteKi: 0.1,
|
||||
WriteKd: 0.05,
|
||||
ReadKp: 0.3,
|
||||
ReadKi: 0.05,
|
||||
ReadKd: 0.02,
|
||||
MaxWriteDelayMs: 1000, // 1 second max
|
||||
MaxReadDelayMs: 500, // 500ms max
|
||||
MetricUpdateInterval: 100 * time.Millisecond,
|
||||
MemoryWeight: 0.7,
|
||||
EmergencyThreshold: 1.167, // Target + 1/6th (~1.75GB for 1.5GB target)
|
||||
RecoveryThreshold: 0.833, // Target - 1/6th (~1.25GB for 1.5GB target)
|
||||
EmergencyMaxDelayMs: 5000, // 5 seconds max in emergency mode
|
||||
CompactionCheckInterval: 10 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,22 +128,39 @@ func NewConfigFromValues(
|
||||
readKp, readKi, readKd float64,
|
||||
maxWriteMs, maxReadMs int,
|
||||
writeTarget, readTarget float64,
|
||||
emergencyThreshold, recoveryThreshold float64,
|
||||
emergencyMaxMs int,
|
||||
) Config {
|
||||
// Apply defaults for zero values
|
||||
if emergencyThreshold == 0 {
|
||||
emergencyThreshold = 1.167 // Target + 1/6th
|
||||
}
|
||||
if recoveryThreshold == 0 {
|
||||
recoveryThreshold = 0.833 // Target - 1/6th
|
||||
}
|
||||
if emergencyMaxMs == 0 {
|
||||
emergencyMaxMs = 5000 // 5 seconds
|
||||
}
|
||||
|
||||
return Config{
|
||||
Enabled: enabled,
|
||||
TargetMemoryMB: targetMB,
|
||||
WriteSetpoint: writeTarget,
|
||||
ReadSetpoint: readTarget,
|
||||
WriteKp: writeKp,
|
||||
WriteKi: writeKi,
|
||||
WriteKd: writeKd,
|
||||
ReadKp: readKp,
|
||||
ReadKi: readKi,
|
||||
ReadKd: readKd,
|
||||
MaxWriteDelayMs: maxWriteMs,
|
||||
MaxReadDelayMs: maxReadMs,
|
||||
MetricUpdateInterval: 100 * time.Millisecond,
|
||||
MemoryWeight: 0.7,
|
||||
Enabled: enabled,
|
||||
TargetMemoryMB: targetMB,
|
||||
WriteSetpoint: writeTarget,
|
||||
ReadSetpoint: readTarget,
|
||||
WriteKp: writeKp,
|
||||
WriteKi: writeKi,
|
||||
WriteKd: writeKd,
|
||||
ReadKp: readKp,
|
||||
ReadKi: readKi,
|
||||
ReadKd: readKd,
|
||||
MaxWriteDelayMs: maxWriteMs,
|
||||
MaxReadDelayMs: maxReadMs,
|
||||
MetricUpdateInterval: 100 * time.Millisecond,
|
||||
MemoryWeight: 0.7,
|
||||
EmergencyThreshold: emergencyThreshold,
|
||||
RecoveryThreshold: recoveryThreshold,
|
||||
EmergencyMaxDelayMs: emergencyMaxMs,
|
||||
CompactionCheckInterval: 10 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,11 +179,17 @@ type Limiter struct {
|
||||
metricsLock sync.RWMutex
|
||||
currentMetrics loadmonitor.Metrics
|
||||
|
||||
// Emergency mode tracking with hysteresis
|
||||
inEmergencyMode atomic.Bool
|
||||
lastEmergencyCheck atomic.Int64 // Unix nano timestamp
|
||||
compactionTriggered atomic.Bool
|
||||
|
||||
// Statistics
|
||||
totalWriteDelayMs atomic.Int64
|
||||
totalReadDelayMs atomic.Int64
|
||||
writeThrottles atomic.Int64
|
||||
readThrottles atomic.Int64
|
||||
emergencyEvents atomic.Int64
|
||||
|
||||
// Lifecycle
|
||||
ctx context.Context
|
||||
@@ -158,6 +204,20 @@ type Limiter struct {
|
||||
func NewLimiter(config Config, monitor loadmonitor.Monitor) *Limiter {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
// Apply defaults for zero values
|
||||
if config.EmergencyThreshold == 0 {
|
||||
config.EmergencyThreshold = 1.167 // Target + 1/6th
|
||||
}
|
||||
if config.RecoveryThreshold == 0 {
|
||||
config.RecoveryThreshold = 0.833 // Target - 1/6th
|
||||
}
|
||||
if config.EmergencyMaxDelayMs == 0 {
|
||||
config.EmergencyMaxDelayMs = 5000 // 5 seconds
|
||||
}
|
||||
if config.CompactionCheckInterval == 0 {
|
||||
config.CompactionCheckInterval = 10 * time.Second
|
||||
}
|
||||
|
||||
l := &Limiter{
|
||||
config: config,
|
||||
monitor: monitor,
|
||||
@@ -196,6 +256,11 @@ func NewLimiter(config Config, monitor loadmonitor.Monitor) *Limiter {
|
||||
monitor.SetMemoryTarget(uint64(config.TargetMemoryMB) * 1024 * 1024)
|
||||
}
|
||||
|
||||
// Configure emergency threshold if monitor supports it
|
||||
if emMon, ok := monitor.(loadmonitor.EmergencyModeMonitor); ok {
|
||||
emMon.SetEmergencyThreshold(config.EmergencyThreshold)
|
||||
}
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
@@ -255,12 +320,13 @@ func (l *Limiter) Stopped() <-chan struct{} {
|
||||
// Wait blocks until the rate limiter permits the operation to proceed.
|
||||
// It returns the delay that was applied, or 0 if no delay was needed.
|
||||
// If the context is cancelled, it returns immediately.
|
||||
func (l *Limiter) Wait(ctx context.Context, opType OperationType) time.Duration {
|
||||
// opType accepts int for interface compatibility (0=Read, 1=Write)
|
||||
func (l *Limiter) Wait(ctx context.Context, opType int) time.Duration {
|
||||
if !l.config.Enabled || l.monitor == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
delay := l.ComputeDelay(opType)
|
||||
delay := l.ComputeDelay(OperationType(opType))
|
||||
if delay <= 0 {
|
||||
return 0
|
||||
}
|
||||
@@ -286,6 +352,9 @@ func (l *Limiter) ComputeDelay(opType OperationType) time.Duration {
|
||||
metrics := l.currentMetrics
|
||||
l.metricsLock.RUnlock()
|
||||
|
||||
// Check emergency mode with hysteresis
|
||||
inEmergency := l.checkEmergencyMode(metrics.MemoryPressure)
|
||||
|
||||
// Compute process variable as weighted combination of memory and load
|
||||
var loadMetric float64
|
||||
switch opType {
|
||||
@@ -305,6 +374,34 @@ func (l *Limiter) ComputeDelay(opType OperationType) time.Duration {
|
||||
case Write:
|
||||
out := l.writePID.UpdateValue(pv)
|
||||
delaySec = out.Value()
|
||||
|
||||
// In emergency mode, apply progressive throttling for writes
|
||||
if inEmergency {
|
||||
// Calculate how far above recovery threshold we are
|
||||
// At emergency threshold, add 1x normal delay
|
||||
// For every additional 10% above emergency, double the delay
|
||||
excessPressure := metrics.MemoryPressure - l.config.RecoveryThreshold
|
||||
if excessPressure > 0 {
|
||||
// Progressive multiplier: starts at 2x, doubles every 10% excess
|
||||
multiplier := 2.0
|
||||
for excess := excessPressure; excess > 0.1; excess -= 0.1 {
|
||||
multiplier *= 2
|
||||
}
|
||||
|
||||
emergencyDelaySec := delaySec * multiplier
|
||||
maxEmergencySec := float64(l.config.EmergencyMaxDelayMs) / 1000.0
|
||||
|
||||
if emergencyDelaySec > maxEmergencySec {
|
||||
emergencyDelaySec = maxEmergencySec
|
||||
}
|
||||
// Minimum emergency delay of 100ms to allow other operations
|
||||
if emergencyDelaySec < 0.1 {
|
||||
emergencyDelaySec = 0.1
|
||||
}
|
||||
delaySec = emergencyDelaySec
|
||||
}
|
||||
}
|
||||
|
||||
if delaySec > 0 {
|
||||
l.writeThrottles.Add(1)
|
||||
l.totalWriteDelayMs.Add(int64(delaySec * 1000))
|
||||
@@ -325,6 +422,68 @@ func (l *Limiter) ComputeDelay(opType OperationType) time.Duration {
|
||||
return time.Duration(delaySec * float64(time.Second))
|
||||
}
|
||||
|
||||
// checkEmergencyMode implements hysteresis-based emergency mode detection.
|
||||
// Enters emergency mode when memory pressure >= EmergencyThreshold.
|
||||
// Exits emergency mode when memory pressure <= RecoveryThreshold.
|
||||
func (l *Limiter) checkEmergencyMode(memoryPressure float64) bool {
|
||||
wasInEmergency := l.inEmergencyMode.Load()
|
||||
|
||||
if wasInEmergency {
|
||||
// To exit, must drop below recovery threshold
|
||||
if memoryPressure <= l.config.RecoveryThreshold {
|
||||
l.inEmergencyMode.Store(false)
|
||||
log.I.F("✅ exiting emergency mode: memory %.1f%% <= recovery threshold %.1f%%",
|
||||
memoryPressure*100, l.config.RecoveryThreshold*100)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// To enter, must exceed emergency threshold
|
||||
if memoryPressure >= l.config.EmergencyThreshold {
|
||||
l.inEmergencyMode.Store(true)
|
||||
l.emergencyEvents.Add(1)
|
||||
log.W.F("⚠️ entering emergency mode: memory %.1f%% >= threshold %.1f%%",
|
||||
memoryPressure*100, l.config.EmergencyThreshold*100)
|
||||
|
||||
// Trigger compaction if supported
|
||||
l.triggerCompactionIfNeeded()
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// triggerCompactionIfNeeded triggers database compaction if the monitor supports it
|
||||
// and compaction isn't already in progress.
|
||||
func (l *Limiter) triggerCompactionIfNeeded() {
|
||||
if l.compactionTriggered.Load() {
|
||||
return // Already triggered
|
||||
}
|
||||
|
||||
compactMon, ok := l.monitor.(loadmonitor.CompactableMonitor)
|
||||
if !ok {
|
||||
return // Monitor doesn't support compaction
|
||||
}
|
||||
|
||||
if compactMon.IsCompacting() {
|
||||
return // Already compacting
|
||||
}
|
||||
|
||||
l.compactionTriggered.Store(true)
|
||||
go func() {
|
||||
defer l.compactionTriggered.Store(false)
|
||||
if err := compactMon.TriggerCompaction(); err != nil {
|
||||
log.E.F("compaction failed: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// InEmergencyMode returns true if the limiter is currently in emergency mode.
|
||||
func (l *Limiter) InEmergencyMode() bool {
|
||||
return l.inEmergencyMode.Load()
|
||||
}
|
||||
|
||||
// RecordLatency records an operation latency for the monitor.
|
||||
func (l *Limiter) RecordLatency(opType OperationType, latency time.Duration) {
|
||||
if l.monitor == nil {
|
||||
@@ -345,6 +504,8 @@ type Stats struct {
|
||||
ReadThrottles int64
|
||||
TotalWriteDelayMs int64
|
||||
TotalReadDelayMs int64
|
||||
EmergencyEvents int64
|
||||
InEmergencyMode bool
|
||||
CurrentMetrics loadmonitor.Metrics
|
||||
WritePIDState PIDState
|
||||
ReadPIDState PIDState
|
||||
@@ -368,6 +529,8 @@ func (l *Limiter) GetStats() Stats {
|
||||
ReadThrottles: l.readThrottles.Load(),
|
||||
TotalWriteDelayMs: l.totalWriteDelayMs.Load(),
|
||||
TotalReadDelayMs: l.totalReadDelayMs.Load(),
|
||||
EmergencyEvents: l.emergencyEvents.Load(),
|
||||
InEmergencyMode: l.inEmergencyMode.Load(),
|
||||
CurrentMetrics: metrics,
|
||||
}
|
||||
|
||||
|
||||
149
pkg/ratelimit/memory.go
Normal file
149
pkg/ratelimit/memory.go
Normal file
@@ -0,0 +1,149 @@
|
||||
//go:build !(js && wasm)
|
||||
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"runtime"
|
||||
|
||||
"github.com/pbnjay/memory"
|
||||
)
|
||||
|
||||
// MinimumMemoryMB is the minimum memory required to run the relay with rate limiting.
|
||||
const MinimumMemoryMB = 500
|
||||
|
||||
// AutoDetectMemoryFraction is the fraction of available memory to use when auto-detecting.
|
||||
const AutoDetectMemoryFraction = 0.66
|
||||
|
||||
// DefaultMaxMemoryMB is the default maximum memory target when auto-detecting.
|
||||
// This caps the auto-detected value to ensure optimal performance.
|
||||
const DefaultMaxMemoryMB = 1500
|
||||
|
||||
// ErrInsufficientMemory is returned when there isn't enough memory to run the relay.
|
||||
var ErrInsufficientMemory = errors.New("insufficient memory: relay requires at least 500MB of available memory")
|
||||
|
||||
// ProcessMemoryStats contains memory statistics for the current process.
|
||||
// On Linux, these are read from /proc/self/status for accurate RSS values.
|
||||
// On other platforms, these are approximated from Go runtime stats.
|
||||
type ProcessMemoryStats struct {
|
||||
// VmRSS is the resident set size (total physical memory in use) in bytes
|
||||
VmRSS uint64
|
||||
// RssShmem is the shared memory portion of RSS in bytes
|
||||
RssShmem uint64
|
||||
// RssAnon is the anonymous (non-shared) memory in bytes
|
||||
RssAnon uint64
|
||||
// VmHWM is the peak RSS (high water mark) in bytes
|
||||
VmHWM uint64
|
||||
}
|
||||
|
||||
// PhysicalMemoryBytes returns the actual physical memory usage (RSS - shared)
|
||||
func (p ProcessMemoryStats) PhysicalMemoryBytes() uint64 {
|
||||
if p.VmRSS > p.RssShmem {
|
||||
return p.VmRSS - p.RssShmem
|
||||
}
|
||||
return p.VmRSS
|
||||
}
|
||||
|
||||
// PhysicalMemoryMB returns the actual physical memory usage in MB
|
||||
func (p ProcessMemoryStats) PhysicalMemoryMB() uint64 {
|
||||
return p.PhysicalMemoryBytes() / (1024 * 1024)
|
||||
}
|
||||
|
||||
// DetectAvailableMemoryMB returns the available system memory in megabytes.
|
||||
// On Linux, this returns the actual available memory (free + cached).
|
||||
// On other systems, it returns total memory minus the Go runtime's current usage.
|
||||
func DetectAvailableMemoryMB() uint64 {
|
||||
// Use pbnjay/memory for cross-platform memory detection
|
||||
available := memory.FreeMemory()
|
||||
if available == 0 {
|
||||
// Fallback: use total memory
|
||||
available = memory.TotalMemory()
|
||||
}
|
||||
return available / (1024 * 1024)
|
||||
}
|
||||
|
||||
// DetectTotalMemoryMB returns the total system memory in megabytes.
|
||||
func DetectTotalMemoryMB() uint64 {
|
||||
return memory.TotalMemory() / (1024 * 1024)
|
||||
}
|
||||
|
||||
// CalculateTargetMemoryMB calculates the target memory limit based on configuration.
|
||||
// If configuredMB is 0, it auto-detects based on available memory (66% of available, capped at 1.5GB).
|
||||
// If configuredMB is non-zero, it validates that it's achievable.
|
||||
// Returns an error if there isn't enough memory.
|
||||
func CalculateTargetMemoryMB(configuredMB int) (int, error) {
|
||||
availableMB := int(DetectAvailableMemoryMB())
|
||||
|
||||
// If configured to auto-detect (0), calculate target
|
||||
if configuredMB == 0 {
|
||||
// First check if we have minimum available memory
|
||||
if availableMB < MinimumMemoryMB {
|
||||
return 0, ErrInsufficientMemory
|
||||
}
|
||||
|
||||
// Calculate 66% of available
|
||||
targetMB := int(float64(availableMB) * AutoDetectMemoryFraction)
|
||||
|
||||
// If 66% is less than minimum, use minimum (we've already verified we have enough)
|
||||
if targetMB < MinimumMemoryMB {
|
||||
targetMB = MinimumMemoryMB
|
||||
}
|
||||
|
||||
// Cap at default maximum for optimal performance
|
||||
if targetMB > DefaultMaxMemoryMB {
|
||||
targetMB = DefaultMaxMemoryMB
|
||||
}
|
||||
|
||||
return targetMB, nil
|
||||
}
|
||||
|
||||
// If explicitly configured, validate it's achievable
|
||||
if configuredMB < MinimumMemoryMB {
|
||||
return 0, ErrInsufficientMemory
|
||||
}
|
||||
|
||||
// Warn but allow if configured target exceeds available
|
||||
// (the PID controller will throttle as needed)
|
||||
return configuredMB, nil
|
||||
}
|
||||
|
||||
// GetMemoryStats returns current memory statistics for logging.
|
||||
type MemoryStats struct {
|
||||
TotalMB uint64
|
||||
AvailableMB uint64
|
||||
TargetMB int
|
||||
GoAllocatedMB uint64
|
||||
GoSysMB uint64
|
||||
}
|
||||
|
||||
// GetMemoryStats returns current memory statistics.
|
||||
func GetMemoryStats(targetMB int) MemoryStats {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
return MemoryStats{
|
||||
TotalMB: DetectTotalMemoryMB(),
|
||||
AvailableMB: DetectAvailableMemoryMB(),
|
||||
TargetMB: targetMB,
|
||||
GoAllocatedMB: m.Alloc / (1024 * 1024),
|
||||
GoSysMB: m.Sys / (1024 * 1024),
|
||||
}
|
||||
}
|
||||
|
||||
// readProcessMemoryStatsFallback returns memory stats using Go runtime.
|
||||
// This is used on non-Linux platforms or when /proc is unavailable.
|
||||
// The values are approximations and may not accurately reflect OS-level metrics.
|
||||
func readProcessMemoryStatsFallback() ProcessMemoryStats {
|
||||
var m runtime.MemStats
|
||||
runtime.ReadMemStats(&m)
|
||||
|
||||
// Use Sys as an approximation of RSS (includes all memory from OS)
|
||||
// HeapAlloc approximates anonymous memory (live heap objects)
|
||||
// We cannot determine shared memory from Go runtime, so leave it at 0
|
||||
return ProcessMemoryStats{
|
||||
VmRSS: m.Sys,
|
||||
RssAnon: m.HeapAlloc,
|
||||
RssShmem: 0, // Cannot determine shared memory from Go runtime
|
||||
VmHWM: 0, // Not available from Go runtime
|
||||
}
|
||||
}
|
||||
62
pkg/ratelimit/memory_linux.go
Normal file
62
pkg/ratelimit/memory_linux.go
Normal file
@@ -0,0 +1,62 @@
|
||||
//go:build linux && !(js && wasm)
|
||||
|
||||
package ratelimit
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ReadProcessMemoryStats reads memory statistics from /proc/self/status.
|
||||
// This provides accurate RSS (Resident Set Size) information on Linux,
|
||||
// including the breakdown between shared and anonymous memory.
|
||||
func ReadProcessMemoryStats() ProcessMemoryStats {
|
||||
stats := ProcessMemoryStats{}
|
||||
|
||||
file, err := os.Open("/proc/self/status")
|
||||
if err != nil {
|
||||
// Fallback to runtime stats if /proc is not available
|
||||
return readProcessMemoryStatsFallback()
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
key := strings.TrimSuffix(fields[0], ":")
|
||||
valueStr := fields[1]
|
||||
|
||||
value, err := strconv.ParseUint(valueStr, 10, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Values in /proc/self/status are in kB
|
||||
valueBytes := value * 1024
|
||||
|
||||
switch key {
|
||||
case "VmRSS":
|
||||
stats.VmRSS = valueBytes
|
||||
case "RssShmem":
|
||||
stats.RssShmem = valueBytes
|
||||
case "RssAnon":
|
||||
stats.RssAnon = valueBytes
|
||||
case "VmHWM":
|
||||
stats.VmHWM = valueBytes
|
||||
}
|
||||
}
|
||||
|
||||
// If we didn't get VmRSS, fall back to runtime stats
|
||||
if stats.VmRSS == 0 {
|
||||
return readProcessMemoryStatsFallback()
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
15
pkg/ratelimit/memory_other.go
Normal file
15
pkg/ratelimit/memory_other.go
Normal file
@@ -0,0 +1,15 @@
|
||||
//go:build !linux && !(js && wasm)
|
||||
|
||||
package ratelimit
|
||||
|
||||
// ReadProcessMemoryStats returns memory statistics using Go runtime stats.
|
||||
// On non-Linux platforms, we cannot read /proc/self/status, so we approximate
|
||||
// using the Go runtime's memory statistics.
|
||||
//
|
||||
// Note: This is less accurate than the Linux implementation because:
|
||||
// - runtime.MemStats.Sys includes memory reserved but not necessarily resident
|
||||
// - We cannot distinguish shared vs anonymous memory
|
||||
// - The values may not match what the OS reports for the process
|
||||
func ReadProcessMemoryStats() ProcessMemoryStats {
|
||||
return readProcessMemoryStatsFallback()
|
||||
}
|
||||
@@ -2,20 +2,25 @@ package ratelimit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/neo4j/neo4j-go-driver/v5/neo4j"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/interfaces/loadmonitor"
|
||||
)
|
||||
|
||||
// Neo4jMonitor implements loadmonitor.Monitor for Neo4j database.
|
||||
// Since Neo4j driver doesn't expose detailed metrics, we track:
|
||||
// - Memory pressure via Go runtime
|
||||
// - Memory pressure via actual RSS (not Go runtime)
|
||||
// - Query concurrency via the semaphore
|
||||
// - Latency via recording
|
||||
//
|
||||
// This monitor implements aggressive memory-based limiting:
|
||||
// When memory exceeds the target, it applies 50% more aggressive throttling.
|
||||
// It rechecks every 10 seconds and doubles the throttling multiplier until
|
||||
// memory returns under target.
|
||||
type Neo4jMonitor struct {
|
||||
driver neo4j.DriverWithContext
|
||||
querySem chan struct{} // Reference to the query semaphore
|
||||
@@ -23,14 +28,24 @@ type Neo4jMonitor struct {
|
||||
// Target memory for pressure calculation
|
||||
targetMemoryBytes atomic.Uint64
|
||||
|
||||
// Emergency mode configuration
|
||||
emergencyThreshold atomic.Uint64 // stored as threshold * 1000 (e.g., 1500 = 1.5)
|
||||
emergencyModeUntil atomic.Int64 // Unix nano when forced emergency mode ends
|
||||
inEmergencyMode atomic.Bool
|
||||
|
||||
// Aggressive throttling multiplier for Neo4j
|
||||
// Starts at 1.5 (50% more aggressive), doubles every 10 seconds while over limit
|
||||
throttleMultiplier atomic.Uint64 // stored as multiplier * 100 (e.g., 150 = 1.5x)
|
||||
lastThrottleCheck atomic.Int64 // Unix nano timestamp
|
||||
|
||||
// Latency tracking with exponential moving average
|
||||
queryLatencyNs atomic.Int64
|
||||
writeLatencyNs atomic.Int64
|
||||
latencyAlpha float64 // EMA coefficient (default 0.1)
|
||||
|
||||
// Concurrency tracking
|
||||
activeReads atomic.Int32
|
||||
activeWrites atomic.Int32
|
||||
activeReads atomic.Int32
|
||||
activeWrites atomic.Int32
|
||||
maxConcurrency int
|
||||
|
||||
// Cached metrics (updated by background goroutine)
|
||||
@@ -43,8 +58,12 @@ type Neo4jMonitor struct {
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
// Compile-time check that Neo4jMonitor implements loadmonitor.Monitor
|
||||
// Compile-time checks for interface implementation
|
||||
var _ loadmonitor.Monitor = (*Neo4jMonitor)(nil)
|
||||
var _ loadmonitor.EmergencyModeMonitor = (*Neo4jMonitor)(nil)
|
||||
|
||||
// ThrottleCheckInterval is how often to recheck memory and adjust throttling
|
||||
const ThrottleCheckInterval = 10 * time.Second
|
||||
|
||||
// NewNeo4jMonitor creates a new Neo4j load monitor.
|
||||
// The querySem should be the same semaphore used for limiting concurrent queries.
|
||||
@@ -75,9 +94,40 @@ func NewNeo4jMonitor(
|
||||
// Set a default target (1.5GB)
|
||||
m.targetMemoryBytes.Store(1500 * 1024 * 1024)
|
||||
|
||||
// Default emergency threshold: 100% of target (same as target for Neo4j)
|
||||
m.emergencyThreshold.Store(1000)
|
||||
|
||||
// Start with 1.0x multiplier (no throttling)
|
||||
m.throttleMultiplier.Store(100)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// SetEmergencyThreshold sets the memory threshold above which emergency mode is triggered.
|
||||
// threshold is a fraction, e.g., 1.0 = 100% of target memory.
|
||||
func (m *Neo4jMonitor) SetEmergencyThreshold(threshold float64) {
|
||||
m.emergencyThreshold.Store(uint64(threshold * 1000))
|
||||
}
|
||||
|
||||
// GetEmergencyThreshold returns the current emergency threshold as a fraction.
|
||||
func (m *Neo4jMonitor) GetEmergencyThreshold() float64 {
|
||||
return float64(m.emergencyThreshold.Load()) / 1000.0
|
||||
}
|
||||
|
||||
// ForceEmergencyMode manually triggers emergency mode for a duration.
|
||||
func (m *Neo4jMonitor) ForceEmergencyMode(duration time.Duration) {
|
||||
m.emergencyModeUntil.Store(time.Now().Add(duration).UnixNano())
|
||||
m.inEmergencyMode.Store(true)
|
||||
m.throttleMultiplier.Store(150) // Start at 1.5x
|
||||
log.W.F("⚠️ Neo4j emergency mode forced for %v", duration)
|
||||
}
|
||||
|
||||
// GetThrottleMultiplier returns the current throttle multiplier.
|
||||
// Returns a value >= 1.0, where 1.0 = no extra throttling, 1.5 = 50% more aggressive, etc.
|
||||
func (m *Neo4jMonitor) GetThrottleMultiplier() float64 {
|
||||
return float64(m.throttleMultiplier.Load()) / 100.0
|
||||
}
|
||||
|
||||
// GetMetrics returns the current load metrics.
|
||||
func (m *Neo4jMonitor) GetMetrics() loadmonitor.Metrics {
|
||||
m.metricsLock.RLock()
|
||||
@@ -157,22 +207,27 @@ func (m *Neo4jMonitor) collectLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
// updateMetrics collects current metrics.
|
||||
// updateMetrics collects current metrics and manages aggressive throttling.
|
||||
func (m *Neo4jMonitor) updateMetrics() {
|
||||
metrics := loadmonitor.Metrics{
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
// Calculate memory pressure from Go runtime
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
// Use RSS-based memory pressure (actual physical memory, not Go runtime)
|
||||
procMem := ReadProcessMemoryStats()
|
||||
physicalMemBytes := procMem.PhysicalMemoryBytes()
|
||||
metrics.PhysicalMemoryMB = physicalMemBytes / (1024 * 1024)
|
||||
|
||||
targetBytes := m.targetMemoryBytes.Load()
|
||||
if targetBytes > 0 {
|
||||
// Use HeapAlloc as primary memory metric
|
||||
metrics.MemoryPressure = float64(memStats.HeapAlloc) / float64(targetBytes)
|
||||
// Use actual physical memory (RSS - shared) for pressure calculation
|
||||
metrics.MemoryPressure = float64(physicalMemBytes) / float64(targetBytes)
|
||||
}
|
||||
|
||||
// Check and update emergency mode with aggressive throttling
|
||||
m.updateEmergencyMode(metrics.MemoryPressure)
|
||||
metrics.InEmergencyMode = m.inEmergencyMode.Load()
|
||||
|
||||
// Calculate load from semaphore usage
|
||||
// querySem is a buffered channel - count how many slots are taken
|
||||
if m.querySem != nil {
|
||||
@@ -186,6 +241,20 @@ func (m *Neo4jMonitor) updateMetrics() {
|
||||
metrics.ReadLoad = concurrencyLoad
|
||||
}
|
||||
|
||||
// Apply throttle multiplier to loads when in emergency mode
|
||||
// This makes the PID controller think load is higher, causing more throttling
|
||||
if metrics.InEmergencyMode {
|
||||
multiplier := m.GetThrottleMultiplier()
|
||||
metrics.WriteLoad = metrics.WriteLoad * multiplier
|
||||
if metrics.WriteLoad > 1.0 {
|
||||
metrics.WriteLoad = 1.0
|
||||
}
|
||||
metrics.ReadLoad = metrics.ReadLoad * multiplier
|
||||
if metrics.ReadLoad > 1.0 {
|
||||
metrics.ReadLoad = 1.0
|
||||
}
|
||||
}
|
||||
|
||||
// Add latency-based load adjustment
|
||||
// High latency indicates the database is struggling
|
||||
queryLatencyNs := m.queryLatencyNs.Load()
|
||||
@@ -221,6 +290,60 @@ func (m *Neo4jMonitor) updateMetrics() {
|
||||
m.metricsLock.Unlock()
|
||||
}
|
||||
|
||||
// updateEmergencyMode manages the emergency mode state and throttle multiplier.
|
||||
// When memory exceeds the target:
|
||||
// - Enters emergency mode with 1.5x throttle multiplier (50% more aggressive)
|
||||
// - Every 10 seconds while still over limit, doubles the multiplier
|
||||
// - When memory returns under target, resets to normal
|
||||
func (m *Neo4jMonitor) updateEmergencyMode(memoryPressure float64) {
|
||||
threshold := float64(m.emergencyThreshold.Load()) / 1000.0
|
||||
forcedUntil := m.emergencyModeUntil.Load()
|
||||
now := time.Now().UnixNano()
|
||||
|
||||
// Check if in forced emergency mode
|
||||
if forcedUntil > now {
|
||||
return // Stay in forced mode
|
||||
}
|
||||
|
||||
// Check if memory exceeds threshold
|
||||
if memoryPressure >= threshold {
|
||||
if !m.inEmergencyMode.Load() {
|
||||
// Entering emergency mode - start at 1.5x (50% more aggressive)
|
||||
m.inEmergencyMode.Store(true)
|
||||
m.throttleMultiplier.Store(150)
|
||||
m.lastThrottleCheck.Store(now)
|
||||
log.W.F("⚠️ Neo4j entering emergency mode: memory %.1f%% >= threshold %.1f%%, throttle 1.5x",
|
||||
memoryPressure*100, threshold*100)
|
||||
return
|
||||
}
|
||||
|
||||
// Already in emergency mode - check if it's time to double throttling
|
||||
lastCheck := m.lastThrottleCheck.Load()
|
||||
elapsed := time.Duration(now - lastCheck)
|
||||
|
||||
if elapsed >= ThrottleCheckInterval {
|
||||
// Double the throttle multiplier
|
||||
currentMult := m.throttleMultiplier.Load()
|
||||
newMult := currentMult * 2
|
||||
if newMult > 1600 { // Cap at 16x to prevent overflow
|
||||
newMult = 1600
|
||||
}
|
||||
m.throttleMultiplier.Store(newMult)
|
||||
m.lastThrottleCheck.Store(now)
|
||||
log.W.F("⚠️ Neo4j still over memory limit: %.1f%%, doubling throttle to %.1fx",
|
||||
memoryPressure*100, float64(newMult)/100.0)
|
||||
}
|
||||
} else {
|
||||
// Memory is under threshold
|
||||
if m.inEmergencyMode.Load() {
|
||||
m.inEmergencyMode.Store(false)
|
||||
m.throttleMultiplier.Store(100) // Reset to 1.0x
|
||||
log.I.F("✅ Neo4j exiting emergency mode: memory %.1f%% < threshold %.1f%%",
|
||||
memoryPressure*100, threshold*100)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IncrementActiveReads tracks an active read operation.
|
||||
// Call this when starting a read, and call the returned function when done.
|
||||
func (m *Neo4jMonitor) IncrementActiveReads() func() {
|
||||
|
||||
@@ -69,8 +69,11 @@ func (c *NIP11Cache) Get(ctx context.Context, relayURL string) (*relayinfo.T, er
|
||||
|
||||
// fetchNIP11 fetches relay information document from a given URL
|
||||
func (c *NIP11Cache) fetchNIP11(ctx context.Context, relayURL string) (*relayinfo.T, error) {
|
||||
// Construct NIP-11 URL
|
||||
// Convert WebSocket URL to HTTP URL for NIP-11 fetch
|
||||
// wss:// -> https://, ws:// -> http://
|
||||
nip11URL := relayURL
|
||||
nip11URL = strings.Replace(nip11URL, "wss://", "https://", 1)
|
||||
nip11URL = strings.Replace(nip11URL, "ws://", "http://", 1)
|
||||
if !strings.HasSuffix(nip11URL, "/") {
|
||||
nip11URL += "/"
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.35.0
|
||||
v0.35.5
|
||||
|
||||
Reference in New Issue
Block a user