Compare commits
143 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
dd80cc767d
|
|||
|
423270402b
|
|||
|
e929c09476
|
|||
|
429c8acaef
|
|||
|
f3f933675e
|
|||
|
b761a04422
|
|||
|
8d61b8e44c
|
|||
|
19e265bf39
|
|||
|
c41bcb2652
|
|||
|
a4dd177eb5
|
|||
|
9020bb8164
|
|||
| 3fe4537cd9 | |||
|
7ec8698b62
|
|||
|
2514f875e6
|
|||
|
a6350c8e80
|
|||
|
6c3d22cb38
|
|||
|
8adb129fbe
|
|||
|
fd698af1ca
|
|||
|
ac4fd506e5
|
|||
|
8898b20d4b
|
|||
|
b351d0fb78
|
|||
|
9c8ff2976d
|
|||
|
a7dd958585
|
|||
|
8eb5b839b0
|
|||
|
e57169eeae
|
|||
|
109326dfa3
|
|||
| 52911354a7 | |||
|
b74f4757e7
|
|||
| 2d0ebfe032 | |||
| fff61ceca1 | |||
| b7b7dc7353 | |||
| 996fb3aeb7 | |||
| b9a713d81d | |||
|
1e6ce84e26
|
|||
|
0361f3843a
|
|||
|
4317e8ba4a
|
|||
|
9094f36d6e
|
|||
|
9314467f55
|
|||
|
19e6520587
|
|||
|
9e59a6c315
|
|||
|
9449435c65
|
|||
|
df8e66d9a7
|
|||
|
96eab2270d
|
|||
|
c0bd7d8da3
|
|||
|
1ffb7afb01
|
|||
|
ffa9d85ba5
|
|||
|
1223b1b20e
|
|||
|
deb56664e2
|
|||
|
1641d18993
|
|||
|
eab5d236db
|
|||
|
f3e7188816
|
|||
|
39957c2ebf
|
|||
|
4528d44fc7
|
|||
|
7b19db5806
|
|||
|
14d4417aec
|
|||
|
bdda37732c
|
|||
|
0024611179
|
|||
|
699ba0554e
|
|||
|
c62d685fa4
|
|||
|
6935575654
|
|||
|
80043b46b3
|
|||
|
c68654dccc
|
|||
|
72c6d16739
|
|||
|
366d35ec28
|
|||
|
c36cec44c4
|
|||
|
c91a283520
|
|||
|
bb0693f455
|
|||
|
0d7943be89
|
|||
|
978d9b88cd
|
|||
|
bbfb9b7300
|
|||
|
5b06906673
|
|||
|
f5c3da9bc3
|
|||
|
c608e1075b
|
|||
|
5237fb1a1f
|
|||
|
6901950059
|
|||
|
251fc17933
|
|||
|
fdb9e18b03
|
|||
|
67552edf04
|
|||
|
f25b760d84
|
|||
|
bfa38822e0
|
|||
|
eac5e05e77
|
|||
|
b72f2dd51e
|
|||
|
cc32703be0
|
|||
|
994d26bb09
|
|||
|
ea2d833e66
|
|||
|
af04f89df8
|
|||
|
fab2f104ff
|
|||
|
06940efcec
|
|||
|
0ba36a3f67
|
|||
|
d4bee83992
|
|||
|
aabb536d13
|
|||
|
498073460c
|
|||
|
11d378bfc3
|
|||
|
9b7e8d28de
|
|||
|
c16ee76638
|
|||
|
132fdc9f36
|
|||
|
4f1d48c247
|
|||
|
651791aec1
|
|||
|
53d649c64e
|
|||
|
4dafab3fd6
|
|||
|
f2475c48b7
|
|||
|
b5448f4153
|
|||
|
11d318d4e3
|
|||
|
53e8e160dd
|
|||
|
90c9198ebe
|
|||
|
4bbbbb1bb6
|
|||
|
56ab6eaa81
|
|||
|
e3c931fcf9
|
|||
|
0544159d4b
|
|||
|
65e1dd6183
|
|||
|
0e83a56025
|
|||
|
93d6871488
|
|||
|
681cdb3a64
|
|||
|
901b4ff16a
|
|||
|
6d34664cf8
|
|||
|
dac6a30625
|
|||
|
5959d5dc7e
|
|||
|
1d07875652
|
|||
|
8ec0c49ecd
|
|||
| 525df97679 | |||
| e2ad580c65 | |||
| ba287ee644 | |||
| c391b9db46 | |||
| 59f246d304 | |||
| eaed2294bc | |||
| ae0d4f5b68 | |||
|
4ee09ada17
|
|||
|
e91c591a6f
|
|||
|
2323545d4b
|
|||
|
1d18425677
|
|||
|
fc68bcf3cb
|
|||
|
affd6c1ebc
|
|||
|
6e103c454d
|
|||
|
db3f98b8cb
|
|||
|
43404d6a07
|
|||
|
49bdf3f5d7
|
|||
|
a2449e24ae
|
|||
|
5c129e078e
|
|||
|
b28acc0c29
|
|||
|
71b699c5c5
|
|||
|
8164330f29
|
|||
|
1226b1f534
|
|||
|
3aa56ebe66
|
94
.github/workflows/go.yml
vendored
Normal file
94
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
# This workflow will build a golang project
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||
#
|
||||
# Release Process:
|
||||
# 1. Update the version in pkg/version/version file (e.g., v1.2.3)
|
||||
# 2. Create and push a tag matching the version:
|
||||
# git tag v1.2.3
|
||||
# git push origin v1.2.3
|
||||
# 3. The workflow will automatically:
|
||||
# - Build binaries for multiple platforms (Linux, macOS, Windows)
|
||||
# - Create a GitHub release with the binaries
|
||||
# - Generate release notes
|
||||
|
||||
name: Go
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Install libsecp256k1
|
||||
run: ./scripts/ubuntu_install_libsecp256k1.sh
|
||||
|
||||
- name: Build with cgo
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test with cgo
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Set CGO off
|
||||
run: echo "CGO_ENABLED=0" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Build Release Binaries
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
# Extract version from tag (e.g., v1.2.3 -> 1.2.3)
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "Building release binaries for version $VERSION"
|
||||
|
||||
# Create directory for binaries
|
||||
mkdir -p release-binaries
|
||||
|
||||
# Build for different platforms
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build --ldflags '-extldflags "-static"' -o release-binaries/orly-${VERSION}-linux-amd64 .
|
||||
GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build --ldflags '-extldflags "-static"' -o release-binaries/orly-${VERSION}-linux-arm64 .
|
||||
GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-darwin-amd64 .
|
||||
GOOS=darwin GOARCH=arm64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-darwin-arm64 .
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-windows-amd64.exe .
|
||||
|
||||
# Build cmd executables
|
||||
for cmd in lerproxy nauth nurl vainstr walletcli; do
|
||||
echo "Building $cmd"
|
||||
GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o --ldflags '-extldflags "-static"' release-binaries/${cmd}-${VERSION}-linux-amd64 ./cmd/${cmd}
|
||||
GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build -o --ldflags '-extldflags "-static"' release-binaries/${cmd}-${VERSION}-linux-arm64 ./cmd/${cmd}
|
||||
GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/${cmd}-${VERSION}-darwin-amd64 ./cmd/${cmd}
|
||||
GOOS=darwin GOARCH=arm64 CGO_ENABLED=0 go build -o release-binaries/${cmd}-${VERSION}-darwin-arm64 ./cmd/${cmd}
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/${cmd}-${VERSION}-windows-amd64.exe ./cmd/${cmd}
|
||||
done
|
||||
|
||||
# Create checksums
|
||||
cd release-binaries
|
||||
sha256sum * > SHA256SUMS.txt
|
||||
cd ..
|
||||
|
||||
- name: Create GitHub Release
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: release-binaries/*
|
||||
draft: false
|
||||
prerelease: false
|
||||
generate_release_notes: true
|
||||
|
||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -64,7 +64,7 @@ node_modules/**
|
||||
!.gitmodules
|
||||
!*.txt
|
||||
!*.sum
|
||||
!version
|
||||
!pkg/version
|
||||
!*.service
|
||||
!*.benc
|
||||
!*.png
|
||||
@@ -84,13 +84,14 @@ node_modules/**
|
||||
!*.xml
|
||||
!.name
|
||||
!.gitignore
|
||||
|
||||
!version
|
||||
!out.jsonl
|
||||
# ...even if they are in subdirectories
|
||||
!*/
|
||||
/blocklist.json
|
||||
/gui/gui/main.wasm
|
||||
/gui/gui/index.html
|
||||
database/testrealy
|
||||
pkg/database/testrealy
|
||||
/.idea/workspace.xml
|
||||
/.idea/dictionaries/project.xml
|
||||
/.idea/shelf/Add_tombstone_handling__enhance_event_ID_logic__update_imports.xml
|
||||
@@ -99,3 +100,9 @@ database/testrealy
|
||||
/.idea/modules.xml
|
||||
/.idea/orly.dev.iml
|
||||
/.idea/vcs.xml
|
||||
/.idea/codeStyles/codeStyleConfig.xml
|
||||
/.idea/material_theme_project_new.xml
|
||||
/.idea/orly.iml
|
||||
/.idea/go.imports.xml
|
||||
/.idea/inspectionProfiles/Project_Default.xml
|
||||
/.idea/.name
|
||||
|
||||
@@ -1,199 +0,0 @@
|
||||
// Package config provides a go-simpler.org/env configuration table and helpers
|
||||
// for working with the list of key/value lists stored in .env files.
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"orly.dev/utils/chk"
|
||||
env2 "orly.dev/utils/env"
|
||||
"orly.dev/utils/log"
|
||||
"orly.dev/utils/lol"
|
||||
"orly.dev/version"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"go-simpler.org/env"
|
||||
|
||||
"orly.dev/utils/apputil"
|
||||
)
|
||||
|
||||
// C is the configuration for realy relay. These are read from the environment
|
||||
// if present, or if a .env file is found in ~/.config/realy/ that is read
|
||||
// instead and overrides anything else.
|
||||
type C struct {
|
||||
AppName string `env:"ORLY_APP_NAME" default:"orly"`
|
||||
Config string `env:"ORLY_CONFIG_DIR" usage:"location for configuration file, which has the name '.env' to make it harder to delete, and is a standard environment KEY=value<newline>... style"`
|
||||
State string `env:"ORLY_STATE_DATA_DIR" usage:"storage location for state data affected by dynamic interactive interfaces"`
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the ratel event store"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
DNS string `env:"ORLY_DNS" usage:"external DNS name that points at the relay"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
Pprof bool `env:"ORLY_PPROF" default:"false" usage:"enable pprof on 127.0.0.1:6060"`
|
||||
}
|
||||
|
||||
// New creates a new config.C.
|
||||
func New() (cfg *C, err error) {
|
||||
cfg = &C{}
|
||||
if err = env.Load(cfg, &env.Options{SliceSep: ","}); chk.T(err) {
|
||||
return
|
||||
}
|
||||
if cfg.Config == "" {
|
||||
cfg.Config = filepath.Join(xdg.ConfigHome, cfg.AppName)
|
||||
}
|
||||
if cfg.DataDir == "" {
|
||||
cfg.DataDir = filepath.Join(xdg.DataHome, cfg.AppName)
|
||||
}
|
||||
envPath := filepath.Join(cfg.Config, ".env")
|
||||
if apputil.FileExists(envPath) {
|
||||
var e env2.Env
|
||||
if e, err = env2.GetEnv(envPath); chk.T(err) {
|
||||
return
|
||||
}
|
||||
if err = env.Load(
|
||||
cfg, &env.Options{SliceSep: ",", Source: e},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
log.I.F("loaded configuration from %s", envPath)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// HelpRequested returns true if any of the common types of help invocation are
|
||||
// found as the first command line parameter/flag.
|
||||
func HelpRequested() (help bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "help", "-h", "--h", "-help", "--help", "?":
|
||||
help = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetEnv processes os.Args to detect a request for printing the current
|
||||
// settings as a list of environment variable key/values.
|
||||
func GetEnv() (requested bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "env":
|
||||
requested = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KV is a key/value pair.
|
||||
type KV struct{ Key, Value string }
|
||||
|
||||
// KVSlice is a collection of key/value pairs.
|
||||
type KVSlice []KV
|
||||
|
||||
func (kv KVSlice) Len() int { return len(kv) }
|
||||
func (kv KVSlice) Less(i, j int) bool { return kv[i].Key < kv[j].Key }
|
||||
func (kv KVSlice) Swap(i, j int) { kv[i], kv[j] = kv[j], kv[i] }
|
||||
|
||||
// Compose merges two KVSlice together, replacing the values of earlier keys
|
||||
// with same named KV items later in the slice (enabling compositing two
|
||||
// together as a .env, as well as them being composed as structs.
|
||||
func (kv KVSlice) Compose(kv2 KVSlice) (out KVSlice) {
|
||||
// duplicate the initial KVSlice
|
||||
for _, p := range kv {
|
||||
out = append(out, p)
|
||||
}
|
||||
out:
|
||||
for i, p := range kv2 {
|
||||
for j, q := range out {
|
||||
// if the key is repeated, replace the value
|
||||
if p.Key == q.Key {
|
||||
out[j].Value = kv2[i].Value
|
||||
continue out
|
||||
}
|
||||
}
|
||||
out = append(out, p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EnvKV turns a struct with `env` keys (used with go-simpler/env) into a
|
||||
// standard formatted environment variable key/value pair list, one per line.
|
||||
// Note you must dereference a pointer type to use this. This allows the
|
||||
// composition of the config in this file with an extended form with a
|
||||
// customized variant of realy to produce correct environment variables both
|
||||
// read and write.
|
||||
func EnvKV(cfg any) (m KVSlice) {
|
||||
t := reflect.TypeOf(cfg)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
k := t.Field(i).Tag.Get("env")
|
||||
v := reflect.ValueOf(cfg).Field(i).Interface()
|
||||
var val string
|
||||
switch v.(type) {
|
||||
case string:
|
||||
val = v.(string)
|
||||
case int, bool, time.Duration:
|
||||
val = fmt.Sprint(v)
|
||||
case []string:
|
||||
arr := v.([]string)
|
||||
if len(arr) > 0 {
|
||||
val = strings.Join(arr, ",")
|
||||
}
|
||||
}
|
||||
// this can happen with embedded structs
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
m = append(m, KV{k, val})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PrintEnv renders the key/values of a config.C to a provided io.Writer.
|
||||
func PrintEnv(cfg *C, printer io.Writer) {
|
||||
kvs := EnvKV(*cfg)
|
||||
sort.Sort(kvs)
|
||||
for _, v := range kvs {
|
||||
_, _ = fmt.Fprintf(printer, "%s=%s\n", v.Key, v.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// PrintHelp outputs a help text listing the configuration options and default
|
||||
// values to a provided io.Writer (usually os.Stderr or os.Stdout).
|
||||
func PrintHelp(cfg *C, printer io.Writer) {
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
"%s %s\n\n", cfg.AppName, version.V,
|
||||
)
|
||||
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
"Environment variables that configure %s:\n\n", cfg.AppName,
|
||||
)
|
||||
|
||||
env.Usage(cfg, printer, &env.Options{SliceSep: ","})
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
"\nCLI parameter 'help' also prints this information\n"+
|
||||
"\n.env file found at the path %s will be automatically "+
|
||||
"loaded for configuration.\nset these two variables for a custom load path,"+
|
||||
" this file will be created on first startup.\nenvironment overrides it and "+
|
||||
"you can also edit the file to set configuration options\n\n"+
|
||||
"use the parameter 'env' to print out the current configuration to the terminal\n\n"+
|
||||
"set the environment using\n\n\t%s env > %s/.env\n", os.Args[0],
|
||||
cfg.Config,
|
||||
cfg.Config,
|
||||
)
|
||||
|
||||
fmt.Fprintf(printer, "\ncurrent configuration:\n\n")
|
||||
PrintEnv(cfg, printer)
|
||||
fmt.Fprintln(printer)
|
||||
return
|
||||
}
|
||||
81
app/main.go
81
app/main.go
@@ -1,81 +0,0 @@
|
||||
// Package app implements the realy nostr relay with a simple follow/mute list authentication scheme and the new HTTP REST based protocol.
|
||||
package app
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"orly.dev/app/config"
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/encoders/filter"
|
||||
"orly.dev/encoders/filters"
|
||||
"orly.dev/interfaces/store"
|
||||
"orly.dev/utils/context"
|
||||
)
|
||||
|
||||
type List map[string]struct{}
|
||||
|
||||
type Relay struct {
|
||||
sync.Mutex
|
||||
*config.C
|
||||
Store store.I
|
||||
}
|
||||
|
||||
func (r *Relay) Name() string { return r.C.AppName }
|
||||
|
||||
func (r *Relay) Storage() store.I { return r.Store }
|
||||
|
||||
func (r *Relay) Init() (err error) {
|
||||
// for _, src := range r.C.Owners {
|
||||
// if len(src) < 1 {
|
||||
// continue
|
||||
// }
|
||||
// dst := make([]byte, len(src)/2)
|
||||
// if _, err = hex.DecBytes(dst, []byte(src)); chk.E(err) {
|
||||
// if dst, err = bech32encoding.NpubToBytes([]byte(src)); chk.E(err) {
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
// r.owners = append(r.owners, dst)
|
||||
// }
|
||||
// if len(r.owners) > 0 {
|
||||
// log.F.C(func() string {
|
||||
// ownerIds := make([]string, len(r.owners))
|
||||
// for i, npub := range r.owners {
|
||||
// ownerIds[i] = hex.Enc(npub)
|
||||
// }
|
||||
// owners := strings.Join(ownerIds, ",")
|
||||
// return fmt.Sprintf("owners %s", owners)
|
||||
// })
|
||||
// r.ZeroLists()
|
||||
// r.CheckOwnerLists(context.Bg())
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Relay) AcceptEvent(
|
||||
c context.T, evt *event.E, hr *http.Request,
|
||||
origin string, authedPubkey []byte,
|
||||
) (accept bool, notice string, afterSave func()) {
|
||||
accept = true
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Relay) AcceptFilter(
|
||||
c context.T, hr *http.Request, f *filter.S,
|
||||
authedPubkey []byte,
|
||||
) (allowed *filter.S, ok bool, modified bool) {
|
||||
allowed = f
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Relay) AcceptReq(
|
||||
c context.T, hr *http.Request, id []byte,
|
||||
ff *filters.T, authedPubkey []byte,
|
||||
) (allowed *filters.T, ok bool, modified bool) {
|
||||
|
||||
allowed = ff
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
package realy
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"orly.dev/interfaces/relay"
|
||||
"orly.dev/utils/normalize"
|
||||
"strings"
|
||||
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/interfaces/store"
|
||||
"orly.dev/protocol/socketapi"
|
||||
"orly.dev/utils/context"
|
||||
)
|
||||
|
||||
func (s *Server) addEvent(
|
||||
c context.T, rl relay.I, ev *event.E,
|
||||
hr *http.Request, origin string,
|
||||
authedPubkey []byte,
|
||||
) (accepted bool, message []byte) {
|
||||
|
||||
if ev == nil {
|
||||
return false, normalize.Invalid.F("empty event")
|
||||
}
|
||||
// sto := rl.Storage()
|
||||
// advancedSaver, _ := sto.(relay.AdvancedSaver)
|
||||
// don't allow storing event with protected marker as per nip-70 with auth enabled.
|
||||
// if (s.authRequired || !s.publicReadable) && ev.Tags.ContainsProtectedMarker() {
|
||||
// if len(authedPubkey) == 0 || !bytes.Equal(ev.Pubkey, authedPubkey) {
|
||||
// return false,
|
||||
// []byte(fmt.Sprintf("event with relay marker tag '-' (nip-70 protected event) "+
|
||||
// "may only be published by matching npub: %0x is not %0x",
|
||||
// authedPubkey, ev.Pubkey))
|
||||
// }
|
||||
// }
|
||||
if ev.Kind.IsEphemeral() {
|
||||
} else {
|
||||
// if advancedSaver != nil {
|
||||
// advancedSaver.BeforeSave(c, ev)
|
||||
// }
|
||||
if saveErr := s.Publish(c, ev); saveErr != nil {
|
||||
if errors.Is(saveErr, store.ErrDupEvent) {
|
||||
return false, []byte(saveErr.Error())
|
||||
}
|
||||
errmsg := saveErr.Error()
|
||||
if socketapi.NIP20prefixmatcher.MatchString(errmsg) {
|
||||
if strings.Contains(errmsg, "tombstone") {
|
||||
return false, normalize.Error.F("event was deleted, not storing it again")
|
||||
}
|
||||
if strings.HasPrefix(errmsg, string(normalize.Blocked)) {
|
||||
return false, []byte(errmsg)
|
||||
}
|
||||
return false, []byte(errmsg)
|
||||
} else {
|
||||
return false, []byte(errmsg)
|
||||
}
|
||||
}
|
||||
// if advancedSaver != nil {
|
||||
// advancedSaver.AfterSave(ev)
|
||||
// }
|
||||
}
|
||||
// var authRequired bool
|
||||
// if ar, ok := rl.(relay.Authenticator); ok {
|
||||
// authRequired = ar.AuthRequired()
|
||||
// }
|
||||
// notify subscribers
|
||||
s.listeners.Deliver(ev)
|
||||
accepted = true
|
||||
return
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package realy
|
||||
|
||||
import (
|
||||
"orly.dev/utils/log"
|
||||
)
|
||||
|
||||
func (s *Server) disconnect() {
|
||||
for client := range s.clients {
|
||||
log.I.F("closing client %s", client.RemoteAddr())
|
||||
client.Close()
|
||||
}
|
||||
}
|
||||
@@ -1,49 +0,0 @@
|
||||
package realy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"orly.dev/interfaces/relay"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/utils/log"
|
||||
"orly.dev/version"
|
||||
"sort"
|
||||
|
||||
"orly.dev/protocol/relayinfo"
|
||||
)
|
||||
|
||||
func (s *Server) handleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
log.I.Ln("handling relay information document")
|
||||
var info *relayinfo.T
|
||||
if informationer, ok := s.relay.(relay.Informationer); ok {
|
||||
info = informationer.GetNIP11InformationDocument()
|
||||
} else {
|
||||
supportedNIPs := relayinfo.GetList(
|
||||
relayinfo.BasicProtocol,
|
||||
relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EventDeletion,
|
||||
relayinfo.RelayInformationDocument,
|
||||
relayinfo.GenericTagQueries,
|
||||
relayinfo.NostrMarketplace,
|
||||
relayinfo.EventTreatment,
|
||||
relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
relayinfo.RelayListMetadata,
|
||||
)
|
||||
sort.Sort(supportedNIPs)
|
||||
log.T.Ln("supported NIPs", supportedNIPs)
|
||||
info = &relayinfo.T{
|
||||
Name: s.relay.Name(),
|
||||
Description: version.Description,
|
||||
Nips: supportedNIPs, Software: version.URL,
|
||||
Version: version.V,
|
||||
Limitation: relayinfo.Limits{},
|
||||
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(info); chk.E(err) {
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GenerateDescription(text string, scopes []string) string {
|
||||
if len(scopes) == 0 {
|
||||
return text
|
||||
}
|
||||
result := make([]string, 0)
|
||||
for _, value := range scopes {
|
||||
result = append(result, "`"+value+"`")
|
||||
}
|
||||
return text + "<br/><br/>**Scopes**<br/>" + strings.Join(result, ", ")
|
||||
}
|
||||
|
||||
func GetRemoteFromReq(r *http.Request) (rr string) {
|
||||
// reverse proxy should populate this field so we see the remote not the proxy
|
||||
rem := r.Header.Get("X-Forwarded-For")
|
||||
if rem == "" {
|
||||
rr = r.RemoteAddr
|
||||
} else {
|
||||
splitted := strings.Split(rem, " ")
|
||||
if len(splitted) == 1 {
|
||||
rr = splitted[0]
|
||||
}
|
||||
if len(splitted) == 2 {
|
||||
rr = splitted[1]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package interfaces
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"orly.dev/app/realy/publish"
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/interfaces/relay"
|
||||
"orly.dev/interfaces/store"
|
||||
"orly.dev/utils/context"
|
||||
)
|
||||
|
||||
type Server interface {
|
||||
AddEvent(
|
||||
c context.T, rl relay.I, ev *event.E, hr *http.Request,
|
||||
origin string, authedPubkey []byte,
|
||||
) (
|
||||
accepted bool,
|
||||
message []byte,
|
||||
)
|
||||
Context() context.T
|
||||
Disconnect()
|
||||
Publisher() *publish.S
|
||||
Publish(c context.T, evt *event.E) (err error)
|
||||
Relay() relay.I
|
||||
Shutdown()
|
||||
Storage() store.I
|
||||
// Options() *options.T
|
||||
// AcceptEvent(
|
||||
// c context.T, ev *event.E, hr *http.Request, origin string,
|
||||
// authedPubkey []byte) (accept bool, notice string, afterSave func())
|
||||
// AdminAuth(r *http.Request,
|
||||
// tolerance ...time.Duration) (authed bool, pubkey []byte)
|
||||
// AuthRequired() bool
|
||||
// Configuration() store.Configuration
|
||||
// Owners() [][]byte
|
||||
// PublicReadable() bool
|
||||
// SetConfiguration(*store.Configuration)
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package publisher
|
||||
|
||||
import (
|
||||
"orly.dev/encoders/event"
|
||||
)
|
||||
|
||||
type Message interface {
|
||||
Type() string
|
||||
}
|
||||
|
||||
type I interface {
|
||||
Message
|
||||
Deliver(ev *event.E)
|
||||
Receive(msg Message)
|
||||
}
|
||||
|
||||
type Publishers []I
|
||||
@@ -1,31 +0,0 @@
|
||||
package realy
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"orly.dev/app/realy/interfaces"
|
||||
"orly.dev/app/realy/publish"
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/interfaces/relay"
|
||||
"orly.dev/interfaces/store"
|
||||
"orly.dev/utils/context"
|
||||
)
|
||||
|
||||
func (s *Server) Storage() store.I { return s.relay.Storage() }
|
||||
|
||||
func (s *Server) Relay() relay.I { return s.relay }
|
||||
|
||||
func (s *Server) Disconnect() { s.disconnect() }
|
||||
|
||||
func (s *Server) AddEvent(
|
||||
c context.T, rl relay.I, ev *event.E, hr *http.Request, origin string,
|
||||
authedPubkey []byte,
|
||||
) (accepted bool, message []byte) {
|
||||
|
||||
return s.addEvent(c, rl, ev, hr, origin, authedPubkey)
|
||||
}
|
||||
|
||||
func (s *Server) Publisher() *publish.S { return s.listeners }
|
||||
|
||||
func (s *Server) Context() context.T { return s.Ctx }
|
||||
|
||||
var _ interfaces.Server = &Server{}
|
||||
@@ -1,154 +0,0 @@
|
||||
package realy
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"orly.dev/app/realy/helpers"
|
||||
"orly.dev/app/realy/options"
|
||||
"orly.dev/app/realy/publish"
|
||||
"orly.dev/interfaces/relay"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/utils/log"
|
||||
realy_lol "orly.dev/version"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
"github.com/fasthttp/websocket"
|
||||
"github.com/rs/cors"
|
||||
|
||||
"orly.dev/interfaces/signer"
|
||||
"orly.dev/protocol/openapi"
|
||||
"orly.dev/protocol/socketapi"
|
||||
"orly.dev/utils/context"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
Ctx context.T
|
||||
Cancel context.F
|
||||
options *options.T
|
||||
relay relay.I
|
||||
clientsMu sync.Mutex
|
||||
clients map[*websocket.Conn]struct{}
|
||||
Addr string
|
||||
mux *openapi.ServeMux
|
||||
httpServer *http.Server
|
||||
// authRequired bool
|
||||
// publicReadable bool
|
||||
// maxLimit int
|
||||
// admins []signer.I
|
||||
// owners [][]byte
|
||||
listeners *publish.S
|
||||
huma.API
|
||||
// ConfigurationMx sync.Mutex
|
||||
// configuration *store.Configuration
|
||||
}
|
||||
|
||||
type ServerParams struct {
|
||||
Ctx context.T
|
||||
Cancel context.F
|
||||
Rl relay.I
|
||||
DbPath string
|
||||
MaxLimit int
|
||||
Admins []signer.I
|
||||
Owners [][]byte
|
||||
PublicReadable bool
|
||||
}
|
||||
|
||||
func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
||||
op := options.Default()
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
if storage := sp.Rl.Storage(); storage != nil {
|
||||
if err = storage.Init(sp.DbPath); chk.T(err) {
|
||||
return nil, fmt.Errorf("storage init: %w", err)
|
||||
}
|
||||
}
|
||||
serveMux := openapi.NewServeMux()
|
||||
s = &Server{
|
||||
Ctx: sp.Ctx,
|
||||
Cancel: sp.Cancel,
|
||||
relay: sp.Rl,
|
||||
clients: make(map[*websocket.Conn]struct{}),
|
||||
mux: serveMux,
|
||||
options: op,
|
||||
listeners: publish.New(socketapi.New(), openapi.New()),
|
||||
API: openapi.NewHuma(
|
||||
serveMux, sp.Rl.Name(), realy_lol.V,
|
||||
realy_lol.Description,
|
||||
),
|
||||
}
|
||||
// register the http API operations
|
||||
huma.AutoRegister(s.API, openapi.NewOperations(s))
|
||||
go func() {
|
||||
if err := s.relay.Init(); chk.E(err) {
|
||||
s.Shutdown()
|
||||
}
|
||||
}()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// ServeHTTP implements the relay's http handler.
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// standard nostr protocol only governs the "root" path of the relay and
|
||||
// websockets
|
||||
if r.URL.Path == "/" && r.Header.Get("Accept") == "application/nostr+json" {
|
||||
s.handleRelayInfo(w, r)
|
||||
return
|
||||
}
|
||||
if r.URL.Path == "/" && r.Header.Get("Upgrade") == "websocket" {
|
||||
s.handleWebsocket(w, r)
|
||||
return
|
||||
}
|
||||
log.I.F(
|
||||
"http request: %s from %s", r.URL.String(), helpers.GetRemoteFromReq(r),
|
||||
)
|
||||
s.mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// Start up the relay.
|
||||
func (s *Server) Start(host string, port int, started ...chan bool) error {
|
||||
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
||||
log.I.F("starting relay listener at %s", addr)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.httpServer = &http.Server{
|
||||
Handler: cors.Default().Handler(s),
|
||||
Addr: addr,
|
||||
ReadHeaderTimeout: 7 * time.Second,
|
||||
IdleTimeout: 28 * time.Second,
|
||||
}
|
||||
for _, startedC := range started {
|
||||
close(startedC)
|
||||
}
|
||||
if err = s.httpServer.Serve(ln); errors.Is(err, http.ErrServerClosed) {
|
||||
} else if err != nil {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown the relay.
|
||||
func (s *Server) Shutdown() {
|
||||
log.I.Ln("shutting down relay")
|
||||
s.Cancel()
|
||||
log.W.Ln("closing event store")
|
||||
chk.E(s.relay.Storage().Close())
|
||||
log.W.Ln("shutting down relay listener")
|
||||
chk.E(s.httpServer.Shutdown(s.Ctx))
|
||||
if f, ok := s.relay.(relay.ShutdownAware); ok {
|
||||
f.OnShutdown(s.Ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Router returns the servemux that handles paths on the HTTP server of the
|
||||
// relay.
|
||||
func (s *Server) Router() *http.ServeMux {
|
||||
return s.mux.ServeMux
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"orly.dev/utils/log"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"orly.dev/utils/context"
|
||||
)
|
||||
|
||||
func MonitorResources(c context.T) {
|
||||
tick := time.NewTicker(time.Minute * 15)
|
||||
log.I.Ln("running process", os.Args[0], os.Getpid())
|
||||
// memStats := &runtime.MemStats{}
|
||||
for {
|
||||
select {
|
||||
case <-c.Done():
|
||||
log.D.Ln("shutting down resource monitor")
|
||||
return
|
||||
case <-tick.C:
|
||||
// runtime.ReadMemStats(memStats)
|
||||
log.D.Ln(
|
||||
"# goroutines", runtime.NumGoroutine(), "# cgo calls",
|
||||
runtime.NumCgoCall(),
|
||||
)
|
||||
// log.D.S(memStats)
|
||||
}
|
||||
}
|
||||
}
|
||||
173
cmd/benchmark/BENCHMARK_RESULTS.md
Normal file
173
cmd/benchmark/BENCHMARK_RESULTS.md
Normal file
@@ -0,0 +1,173 @@
|
||||
# Orly Relay Benchmark Results
|
||||
|
||||
## Test Environment
|
||||
|
||||
- **Date**: August 5, 2025
|
||||
- **Relay**: Orly v0.4.14
|
||||
- **Port**: 3334 (WebSocket)
|
||||
- **System**: Linux 5.15.0-151-generic
|
||||
- **Storage**: BadgerDB v4
|
||||
|
||||
## Benchmark Test Results
|
||||
|
||||
### Test 1: Basic Performance (1,000 events, 1KB each)
|
||||
|
||||
**Parameters:**
|
||||
- Events: 1,000
|
||||
- Event size: 1,024 bytes
|
||||
- Concurrent publishers: 5
|
||||
- Queries: 50
|
||||
|
||||
**Results:**
|
||||
```
|
||||
Publish Performance:
|
||||
Events Published: 1,000
|
||||
Total Data: 4.01 MB
|
||||
Duration: 1.769s
|
||||
Rate: 565.42 events/second
|
||||
Bandwidth: 2.26 MB/second
|
||||
|
||||
Query Performance:
|
||||
Queries Executed: 50
|
||||
Events Returned: 2,000
|
||||
Duration: 3.058s
|
||||
Rate: 16.35 queries/second
|
||||
Avg Events/Query: 40.00
|
||||
```
|
||||
|
||||
### Test 2: Medium Load (10,000 events, 2KB each)
|
||||
|
||||
**Parameters:**
|
||||
- Events: 10,000
|
||||
- Event size: 2,048 bytes
|
||||
- Concurrent publishers: 10
|
||||
- Queries: 100
|
||||
|
||||
**Results:**
|
||||
```
|
||||
Publish Performance:
|
||||
Events Published: 10,000
|
||||
Total Data: 76.81 MB
|
||||
Duration: 598.301ms
|
||||
Rate: 16,714.00 events/second
|
||||
Bandwidth: 128.38 MB/second
|
||||
|
||||
Query Performance:
|
||||
Queries Executed: 100
|
||||
Events Returned: 4,000
|
||||
Duration: 8.923s
|
||||
Rate: 11.21 queries/second
|
||||
Avg Events/Query: 40.00
|
||||
```
|
||||
|
||||
### Test 3: High Concurrency (50,000 events, 512 bytes each)
|
||||
|
||||
**Parameters:**
|
||||
- Events: 50,000
|
||||
- Event size: 512 bytes
|
||||
- Concurrent publishers: 50
|
||||
- Queries: 200
|
||||
|
||||
**Results:**
|
||||
```
|
||||
Publish Performance:
|
||||
Events Published: 50,000
|
||||
Total Data: 108.63 MB
|
||||
Duration: 2.368s
|
||||
Rate: 21,118.66 events/second
|
||||
Bandwidth: 45.88 MB/second
|
||||
|
||||
Query Performance:
|
||||
Queries Executed: 200
|
||||
Events Returned: 8,000
|
||||
Duration: 36.146s
|
||||
Rate: 5.53 queries/second
|
||||
Avg Events/Query: 40.00
|
||||
```
|
||||
|
||||
### Test 4: Large Events (5,000 events, 10KB each)
|
||||
|
||||
**Parameters:**
|
||||
- Events: 5,000
|
||||
- Event size: 10,240 bytes
|
||||
- Concurrent publishers: 10
|
||||
- Queries: 50
|
||||
|
||||
**Results:**
|
||||
```
|
||||
Publish Performance:
|
||||
Events Published: 5,000
|
||||
Total Data: 185.26 MB
|
||||
Duration: 934.328ms
|
||||
Rate: 5,351.44 events/second
|
||||
Bandwidth: 198.28 MB/second
|
||||
|
||||
Query Performance:
|
||||
Queries Executed: 50
|
||||
Events Returned: 2,000
|
||||
Duration: 9.982s
|
||||
Rate: 5.01 queries/second
|
||||
Avg Events/Query: 40.00
|
||||
```
|
||||
|
||||
### Test 5: Query-Only Performance (500 queries)
|
||||
|
||||
**Parameters:**
|
||||
- Skip publishing phase
|
||||
- Queries: 500
|
||||
- Query limit: 100
|
||||
|
||||
**Results:**
|
||||
```
|
||||
Query Performance:
|
||||
Queries Executed: 500
|
||||
Events Returned: 20,000
|
||||
Duration: 1m14.384s
|
||||
Rate: 6.72 queries/second
|
||||
Avg Events/Query: 40.00
|
||||
```
|
||||
|
||||
## Performance Summary
|
||||
|
||||
### Publishing Performance
|
||||
|
||||
| Metric | Best Result | Test Configuration |
|
||||
|--------|-------------|-------------------|
|
||||
| **Peak Event Rate** | 21,118.66 events/sec | 50 concurrent publishers, 512-byte events |
|
||||
| **Peak Bandwidth** | 198.28 MB/sec | 10 concurrent publishers, 10KB events |
|
||||
| **Optimal Balance** | 16,714.00 events/sec @ 128.38 MB/sec | 10 concurrent publishers, 2KB events |
|
||||
|
||||
### Query Performance
|
||||
|
||||
| Query Type | Avg Rate | Notes |
|
||||
|------------|----------|--------|
|
||||
| **Light Load** | 16.35 queries/sec | 50 queries after 1K events |
|
||||
| **Medium Load** | 11.21 queries/sec | 100 queries after 10K events |
|
||||
| **Heavy Load** | 5.53 queries/sec | 200 queries after 50K events |
|
||||
| **Sustained** | 6.72 queries/sec | 500 continuous queries |
|
||||
|
||||
## Key Findings
|
||||
|
||||
1. **Optimal Concurrency**: The relay performs best with 10-50 concurrent publishers, achieving rates of 16,000-21,000 events/second.
|
||||
|
||||
2. **Event Size Impact**:
|
||||
- Smaller events (512B-2KB) achieve higher event rates
|
||||
- Larger events (10KB) achieve higher bandwidth utilization but lower event rates
|
||||
|
||||
3. **Query Performance**: Query performance varies with database size:
|
||||
- Fresh database: ~16 queries/second
|
||||
- After 50K events: ~6 queries/second
|
||||
|
||||
4. **Scalability**: The relay maintains consistent performance up to 50 concurrent connections and can sustain 21,000+ events/second under optimal conditions.
|
||||
|
||||
## Query Filter Distribution
|
||||
|
||||
The benchmark tested 5 different query patterns in rotation:
|
||||
1. Query by kind (20%)
|
||||
2. Query by time range (20%)
|
||||
3. Query by tag (20%)
|
||||
4. Query by author (20%)
|
||||
5. Complex queries with multiple conditions (20%)
|
||||
|
||||
All query types showed similar performance characteristics, indicating well-balanced indexing.
|
||||
|
||||
112
cmd/benchmark/README.md
Normal file
112
cmd/benchmark/README.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# Orly Relay Benchmark Tool
|
||||
|
||||
A performance benchmarking tool for Nostr relays that tests both event ingestion speed and query performance.
|
||||
|
||||
## Quick Start (Simple Version)
|
||||
|
||||
The repository includes a simple standalone benchmark tool that doesn't require the full Orly dependencies:
|
||||
|
||||
```bash
|
||||
# Build the simple benchmark
|
||||
go build -o benchmark-simple ./benchmark_simple.go
|
||||
|
||||
# Run with default settings
|
||||
./benchmark-simple
|
||||
|
||||
# Or use the convenience script
|
||||
chmod +x run_benchmark.sh
|
||||
./run_benchmark.sh --relay ws://localhost:7447 --events 10000
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Event Publishing Benchmark**: Tests how fast a relay can accept and store events
|
||||
- **Query Performance Benchmark**: Tests various filter types and query speeds
|
||||
- **Concurrent Publishing**: Supports multiple concurrent publishers to stress test the relay
|
||||
- **Detailed Metrics**: Reports events/second, bandwidth usage, and query performance
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Build the tool
|
||||
go build -o benchmark ./cmd/benchmark
|
||||
|
||||
# Run a full benchmark (publish and query)
|
||||
./benchmark -relay ws://localhost:7447 -events 10000 -queries 100
|
||||
|
||||
# Benchmark only publishing
|
||||
./benchmark -relay ws://localhost:7447 -events 50000 -concurrency 20 -skip-query
|
||||
|
||||
# Benchmark only querying
|
||||
./benchmark -relay ws://localhost:7447 -queries 500 -skip-publish
|
||||
|
||||
# Use custom event sizes
|
||||
./benchmark -relay ws://localhost:7447 -events 10000 -size 2048
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
- `-relay`: Relay URL to benchmark (default: ws://localhost:7447)
|
||||
- `-events`: Number of events to publish (default: 10000)
|
||||
- `-size`: Average size of event content in bytes (default: 1024)
|
||||
- `-concurrency`: Number of concurrent publishers (default: 10)
|
||||
- `-queries`: Number of queries to execute (default: 100)
|
||||
- `-query-limit`: Limit for each query (default: 100)
|
||||
- `-skip-publish`: Skip the publishing phase
|
||||
- `-skip-query`: Skip the query phase
|
||||
- `-v`: Enable verbose output
|
||||
|
||||
## Query Types Tested
|
||||
|
||||
The benchmark tests various query patterns:
|
||||
1. Query by kind
|
||||
2. Query by time range (last hour)
|
||||
3. Query by tag (p tags)
|
||||
4. Query by author
|
||||
5. Complex queries with multiple conditions
|
||||
|
||||
## Output
|
||||
|
||||
The tool provides detailed metrics including:
|
||||
|
||||
**Publish Performance:**
|
||||
- Total events published
|
||||
- Total data transferred
|
||||
- Publishing rate (events/second)
|
||||
- Bandwidth usage (MB/second)
|
||||
|
||||
**Query Performance:**
|
||||
- Total queries executed
|
||||
- Total events returned
|
||||
- Query rate (queries/second)
|
||||
- Average events per query
|
||||
|
||||
## Example Output
|
||||
|
||||
```
|
||||
Publishing 10000 events to ws://localhost:7447...
|
||||
Published 1000 events...
|
||||
Published 2000 events...
|
||||
...
|
||||
|
||||
Querying events from ws://localhost:7447...
|
||||
Executed 20 queries...
|
||||
Executed 40 queries...
|
||||
...
|
||||
|
||||
=== Benchmark Results ===
|
||||
|
||||
Publish Performance:
|
||||
Events Published: 10000
|
||||
Total Data: 12.34 MB
|
||||
Duration: 5.2s
|
||||
Rate: 1923.08 events/second
|
||||
Bandwidth: 2.37 MB/second
|
||||
|
||||
Query Performance:
|
||||
Queries Executed: 100
|
||||
Events Returned: 4523
|
||||
Duration: 2.1s
|
||||
Rate: 47.62 queries/second
|
||||
Avg Events/Query: 45.23
|
||||
```
|
||||
304
cmd/benchmark/benchmark_simple.go
Normal file
304
cmd/benchmark/benchmark_simple.go
Normal file
@@ -0,0 +1,304 @@
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/url"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/gobwas/ws"
|
||||
"github.com/gobwas/ws/wsutil"
|
||||
)
|
||||
|
||||
// Simple event structure for benchmarking
|
||||
type Event struct {
|
||||
ID string `json:"id"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
Kind int `json:"kind"`
|
||||
Tags [][]string `json:"tags"`
|
||||
Content string `json:"content"`
|
||||
Sig string `json:"sig"`
|
||||
}
|
||||
|
||||
// Generate a test event
|
||||
func generateTestEvent(size int) *Event {
|
||||
content := make([]byte, size)
|
||||
rand.Read(content)
|
||||
|
||||
// Generate random pubkey and sig
|
||||
pubkey := make([]byte, 32)
|
||||
sig := make([]byte, 64)
|
||||
rand.Read(pubkey)
|
||||
rand.Read(sig)
|
||||
|
||||
ev := &Event{
|
||||
Pubkey: hex.EncodeToString(pubkey),
|
||||
CreatedAt: time.Now().Unix(),
|
||||
Kind: 1,
|
||||
Tags: [][]string{},
|
||||
Content: string(content),
|
||||
Sig: hex.EncodeToString(sig),
|
||||
}
|
||||
|
||||
// Generate ID (simplified)
|
||||
serialized, _ := json.Marshal([]interface{}{
|
||||
0,
|
||||
ev.Pubkey,
|
||||
ev.CreatedAt,
|
||||
ev.Kind,
|
||||
ev.Tags,
|
||||
ev.Content,
|
||||
})
|
||||
hash := sha256.Sum256(serialized)
|
||||
ev.ID = hex.EncodeToString(hash[:])
|
||||
|
||||
return ev
|
||||
}
|
||||
|
||||
func publishEvents(relayURL string, count int, size int, concurrency int) (int64, int64, time.Duration, error) {
|
||||
u, err := url.Parse(relayURL)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
var publishedEvents atomic.Int64
|
||||
var publishedBytes atomic.Int64
|
||||
var wg sync.WaitGroup
|
||||
|
||||
eventsPerWorker := count / concurrency
|
||||
extraEvents := count % concurrency
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
eventsToPublish := eventsPerWorker
|
||||
if i < extraEvents {
|
||||
eventsToPublish++
|
||||
}
|
||||
|
||||
go func(workerID int, eventCount int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Connect to relay
|
||||
ctx := context.Background()
|
||||
conn, _, _, err := ws.Dial(ctx, u.String())
|
||||
if err != nil {
|
||||
log.Printf("Worker %d: connection error: %v", workerID, err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Publish events
|
||||
for j := 0; j < eventCount; j++ {
|
||||
ev := generateTestEvent(size)
|
||||
|
||||
// Create EVENT message
|
||||
msg, _ := json.Marshal([]interface{}{"EVENT", ev})
|
||||
|
||||
err := wsutil.WriteClientMessage(conn, ws.OpText, msg)
|
||||
if err != nil {
|
||||
log.Printf("Worker %d: write error: %v", workerID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
publishedEvents.Add(1)
|
||||
publishedBytes.Add(int64(len(msg)))
|
||||
|
||||
// Read response (OK or error)
|
||||
_, _, err = wsutil.ReadServerData(conn)
|
||||
if err != nil {
|
||||
log.Printf("Worker %d: read error: %v", workerID, err)
|
||||
}
|
||||
}
|
||||
}(i, eventsToPublish)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(start)
|
||||
|
||||
return publishedEvents.Load(), publishedBytes.Load(), duration, nil
|
||||
}
|
||||
|
||||
func queryEvents(relayURL string, queries int, limit int) (int64, int64, time.Duration, error) {
|
||||
u, err := url.Parse(relayURL)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
conn, _, _, err := ws.Dial(ctx, u.String())
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
var totalQueries int64
|
||||
var totalEvents int64
|
||||
|
||||
start := time.Now()
|
||||
|
||||
for i := 0; i < queries; i++ {
|
||||
// Generate various filter types
|
||||
var filter map[string]interface{}
|
||||
|
||||
switch i % 5 {
|
||||
case 0:
|
||||
// Query by kind
|
||||
filter = map[string]interface{}{
|
||||
"kinds": []int{1},
|
||||
"limit": limit,
|
||||
}
|
||||
case 1:
|
||||
// Query by time range
|
||||
now := time.Now().Unix()
|
||||
filter = map[string]interface{}{
|
||||
"since": now - 3600,
|
||||
"until": now,
|
||||
"limit": limit,
|
||||
}
|
||||
case 2:
|
||||
// Query by tag
|
||||
filter = map[string]interface{}{
|
||||
"#p": []string{hex.EncodeToString(randBytes(32))},
|
||||
"limit": limit,
|
||||
}
|
||||
case 3:
|
||||
// Query by author
|
||||
filter = map[string]interface{}{
|
||||
"authors": []string{hex.EncodeToString(randBytes(32))},
|
||||
"limit": limit,
|
||||
}
|
||||
case 4:
|
||||
// Complex query
|
||||
now := time.Now().Unix()
|
||||
filter = map[string]interface{}{
|
||||
"kinds": []int{1, 6},
|
||||
"authors": []string{hex.EncodeToString(randBytes(32))},
|
||||
"since": now - 7200,
|
||||
"limit": limit,
|
||||
}
|
||||
}
|
||||
|
||||
// Send REQ
|
||||
subID := fmt.Sprintf("bench-%d", i)
|
||||
msg, _ := json.Marshal([]interface{}{"REQ", subID, filter})
|
||||
|
||||
err := wsutil.WriteClientMessage(conn, ws.OpText, msg)
|
||||
if err != nil {
|
||||
log.Printf("Query %d: write error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Read events until EOSE
|
||||
eventCount := 0
|
||||
for {
|
||||
data, err := wsutil.ReadServerText(conn)
|
||||
if err != nil {
|
||||
log.Printf("Query %d: read error: %v", i, err)
|
||||
break
|
||||
}
|
||||
|
||||
var msg []interface{}
|
||||
if err := json.Unmarshal(data, &msg); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(msg) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
msgType, ok := msg[0].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
switch msgType {
|
||||
case "EVENT":
|
||||
eventCount++
|
||||
case "EOSE":
|
||||
goto done
|
||||
}
|
||||
}
|
||||
done:
|
||||
|
||||
// Send CLOSE
|
||||
closeMsg, _ := json.Marshal([]interface{}{"CLOSE", subID})
|
||||
wsutil.WriteClientMessage(conn, ws.OpText, closeMsg)
|
||||
|
||||
totalQueries++
|
||||
totalEvents += int64(eventCount)
|
||||
|
||||
if totalQueries%20 == 0 {
|
||||
fmt.Printf(" Executed %d queries...\n", totalQueries)
|
||||
}
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
return totalQueries, totalEvents, duration, nil
|
||||
}
|
||||
|
||||
func randBytes(n int) []byte {
|
||||
b := make([]byte, n)
|
||||
rand.Read(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
relayURL = flag.String("relay", "ws://localhost:7447", "Relay URL to benchmark")
|
||||
eventCount = flag.Int("events", 10000, "Number of events to publish")
|
||||
eventSize = flag.Int("size", 1024, "Average size of event content in bytes")
|
||||
concurrency = flag.Int("concurrency", 10, "Number of concurrent publishers")
|
||||
queryCount = flag.Int("queries", 100, "Number of queries to execute")
|
||||
queryLimit = flag.Int("query-limit", 100, "Limit for each query")
|
||||
skipPublish = flag.Bool("skip-publish", false, "Skip publishing phase")
|
||||
skipQuery = flag.Bool("skip-query", false, "Skip query phase")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
fmt.Printf("=== Nostr Relay Benchmark ===\n\n")
|
||||
|
||||
// Phase 1: Publish events
|
||||
if !*skipPublish {
|
||||
fmt.Printf("Publishing %d events to %s...\n", *eventCount, *relayURL)
|
||||
published, bytes, duration, err := publishEvents(*relayURL, *eventCount, *eventSize, *concurrency)
|
||||
if err != nil {
|
||||
log.Fatalf("Publishing failed: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nPublish Performance:\n")
|
||||
fmt.Printf(" Events Published: %d\n", published)
|
||||
fmt.Printf(" Total Data: %.2f MB\n", float64(bytes)/1024/1024)
|
||||
fmt.Printf(" Duration: %s\n", duration)
|
||||
fmt.Printf(" Rate: %.2f events/second\n", float64(published)/duration.Seconds())
|
||||
fmt.Printf(" Bandwidth: %.2f MB/second\n", float64(bytes)/duration.Seconds()/1024/1024)
|
||||
}
|
||||
|
||||
// Phase 2: Query events
|
||||
if !*skipQuery {
|
||||
fmt.Printf("\nQuerying events from %s...\n", *relayURL)
|
||||
queries, events, duration, err := queryEvents(*relayURL, *queryCount, *queryLimit)
|
||||
if err != nil {
|
||||
log.Fatalf("Querying failed: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("\nQuery Performance:\n")
|
||||
fmt.Printf(" Queries Executed: %d\n", queries)
|
||||
fmt.Printf(" Events Returned: %d\n", events)
|
||||
fmt.Printf(" Duration: %s\n", duration)
|
||||
fmt.Printf(" Rate: %.2f queries/second\n", float64(queries)/duration.Seconds())
|
||||
fmt.Printf(" Avg Events/Query: %.2f\n", float64(events)/float64(queries))
|
||||
}
|
||||
}
|
||||
352
cmd/benchmark/main.go
Normal file
352
cmd/benchmark/main.go
Normal file
@@ -0,0 +1,352 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/tags"
|
||||
"orly.dev/pkg/encoders/text"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
)
|
||||
|
||||
type BenchmarkResults struct {
|
||||
EventsPublished int64
|
||||
EventsPublishedBytes int64
|
||||
PublishDuration time.Duration
|
||||
PublishRate float64
|
||||
PublishBandwidth float64
|
||||
|
||||
QueriesExecuted int64
|
||||
QueryDuration time.Duration
|
||||
QueryRate float64
|
||||
EventsReturned int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
relayURL = flag.String(
|
||||
"relay", "ws://localhost:7447", "Relay URL to benchmark",
|
||||
)
|
||||
eventCount = flag.Int("events", 10000, "Number of events to publish")
|
||||
eventSize = flag.Int(
|
||||
"size", 1024, "Average size of event content in bytes",
|
||||
)
|
||||
concurrency = flag.Int(
|
||||
"concurrency", 10, "Number of concurrent publishers",
|
||||
)
|
||||
queryCount = flag.Int("queries", 100, "Number of queries to execute")
|
||||
queryLimit = flag.Int("query-limit", 100, "Limit for each query")
|
||||
skipPublish = flag.Bool("skip-publish", false, "Skip publishing phase")
|
||||
skipQuery = flag.Bool("skip-query", false, "Skip query phase")
|
||||
verbose = flag.Bool("v", false, "Verbose output")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
if *verbose {
|
||||
lol.SetLogLevel("trace")
|
||||
}
|
||||
|
||||
c := context.Bg()
|
||||
results := &BenchmarkResults{}
|
||||
|
||||
// Phase 1: Publish events
|
||||
if !*skipPublish {
|
||||
fmt.Printf("Publishing %d events to %s...\n", *eventCount, *relayURL)
|
||||
if err := benchmarkPublish(
|
||||
c, *relayURL, *eventCount, *eventSize, *concurrency, results,
|
||||
); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Error during publish benchmark: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 2: Query events
|
||||
if !*skipQuery {
|
||||
fmt.Printf("\nQuerying events from %s...\n", *relayURL)
|
||||
if err := benchmarkQuery(
|
||||
c, *relayURL, *queryCount, *queryLimit, results,
|
||||
); chk.E(err) {
|
||||
fmt.Fprintf(os.Stderr, "Error during query benchmark: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Print results
|
||||
printResults(results)
|
||||
}
|
||||
|
||||
func benchmarkPublish(
|
||||
c context.T, relayURL string, eventCount, eventSize, concurrency int,
|
||||
results *BenchmarkResults,
|
||||
) error {
|
||||
// Generate signers for each concurrent publisher
|
||||
signers := make([]*testSigner, concurrency)
|
||||
for i := range signers {
|
||||
signers[i] = newTestSigner()
|
||||
}
|
||||
|
||||
// Track published events
|
||||
var publishedEvents atomic.Int64
|
||||
var publishedBytes atomic.Int64
|
||||
var errors atomic.Int64
|
||||
|
||||
// Create wait group for concurrent publishers
|
||||
var wg sync.WaitGroup
|
||||
eventsPerPublisher := eventCount / concurrency
|
||||
extraEvents := eventCount % concurrency
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func(publisherID int) {
|
||||
defer wg.Done()
|
||||
|
||||
// Connect to relay
|
||||
relay, err := ws.RelayConnect(c, relayURL)
|
||||
if err != nil {
|
||||
log.E.F("Publisher %d failed to connect: %v", publisherID, err)
|
||||
errors.Add(1)
|
||||
return
|
||||
}
|
||||
defer relay.Close()
|
||||
|
||||
// Calculate events for this publisher
|
||||
eventsToPublish := eventsPerPublisher
|
||||
if publisherID < extraEvents {
|
||||
eventsToPublish++
|
||||
}
|
||||
|
||||
signer := signers[publisherID]
|
||||
|
||||
// Publish events
|
||||
for j := 0; j < eventsToPublish; j++ {
|
||||
ev := generateEvent(signer, eventSize)
|
||||
|
||||
if err := relay.Publish(c, ev); err != nil {
|
||||
log.E.F(
|
||||
"Publisher %d failed to publish event: %v", publisherID,
|
||||
err,
|
||||
)
|
||||
errors.Add(1)
|
||||
continue
|
||||
}
|
||||
|
||||
evBytes := ev.Marshal(nil)
|
||||
publishedEvents.Add(1)
|
||||
publishedBytes.Add(int64(len(evBytes)))
|
||||
|
||||
if publishedEvents.Load()%1000 == 0 {
|
||||
fmt.Printf(
|
||||
" Published %d events...\n", publishedEvents.Load(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
duration := time.Since(startTime)
|
||||
|
||||
results.EventsPublished = publishedEvents.Load()
|
||||
results.EventsPublishedBytes = publishedBytes.Load()
|
||||
results.PublishDuration = duration
|
||||
results.PublishRate = float64(results.EventsPublished) / duration.Seconds()
|
||||
results.PublishBandwidth = float64(results.EventsPublishedBytes) / duration.Seconds() / 1024 / 1024 // MB/s
|
||||
|
||||
if errors.Load() > 0 {
|
||||
fmt.Printf(
|
||||
" Warning: %d errors occurred during publishing\n", errors.Load(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func benchmarkQuery(
|
||||
c context.T, relayURL string, queryCount, queryLimit int,
|
||||
results *BenchmarkResults,
|
||||
) error {
|
||||
relay, err := ws.RelayConnect(c, relayURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to relay: %w", err)
|
||||
}
|
||||
defer relay.Close()
|
||||
|
||||
var totalEvents atomic.Int64
|
||||
var totalQueries atomic.Int64
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
for i := 0; i < queryCount; i++ {
|
||||
// Generate various filter types
|
||||
var f *filter.F
|
||||
switch i % 5 {
|
||||
case 0:
|
||||
// Query by kind
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Kinds: kinds.New(kind.TextNote),
|
||||
Limit: &limit,
|
||||
}
|
||||
case 1:
|
||||
// Query by time range
|
||||
now := timestamp.Now()
|
||||
since := timestamp.New(now.I64() - 3600) // last hour
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Since: since,
|
||||
Until: now,
|
||||
Limit: &limit,
|
||||
}
|
||||
case 2:
|
||||
// Query by tag
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Tags: tags.New(tag.New([]byte("p"), generateRandomPubkey())),
|
||||
Limit: &limit,
|
||||
}
|
||||
case 3:
|
||||
// Query by author
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Authors: tag.New(generateRandomPubkey()),
|
||||
Limit: &limit,
|
||||
}
|
||||
case 4:
|
||||
// Complex query with multiple conditions
|
||||
now := timestamp.Now()
|
||||
since := timestamp.New(now.I64() - 7200)
|
||||
limit := uint(queryLimit)
|
||||
f = &filter.F{
|
||||
Kinds: kinds.New(kind.TextNote, kind.Repost),
|
||||
Authors: tag.New(generateRandomPubkey()),
|
||||
Since: since,
|
||||
Limit: &limit,
|
||||
}
|
||||
}
|
||||
|
||||
// Execute query
|
||||
events, err := relay.QuerySync(c, f)
|
||||
if err != nil {
|
||||
log.E.F("Query %d failed: %v", i, err)
|
||||
continue
|
||||
}
|
||||
|
||||
totalEvents.Add(int64(len(events)))
|
||||
totalQueries.Add(1)
|
||||
|
||||
if totalQueries.Load()%20 == 0 {
|
||||
fmt.Printf(" Executed %d queries...\n", totalQueries.Load())
|
||||
}
|
||||
}
|
||||
|
||||
duration := time.Since(startTime)
|
||||
|
||||
results.QueriesExecuted = totalQueries.Load()
|
||||
results.QueryDuration = duration
|
||||
results.QueryRate = float64(results.QueriesExecuted) / duration.Seconds()
|
||||
results.EventsReturned = totalEvents.Load()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func generateEvent(signer *testSigner, contentSize int) *event.E {
|
||||
// Generate content with some variation
|
||||
size := contentSize + frand.Intn(contentSize/2) - contentSize/4
|
||||
if size < 10 {
|
||||
size = 10
|
||||
}
|
||||
|
||||
content := text.NostrEscape(nil, frand.Bytes(size))
|
||||
|
||||
ev := &event.E{
|
||||
Pubkey: signer.Pub(),
|
||||
Kind: kind.TextNote,
|
||||
CreatedAt: timestamp.Now(),
|
||||
Content: content,
|
||||
Tags: generateRandomTags(),
|
||||
}
|
||||
|
||||
if err := ev.Sign(signer); chk.E(err) {
|
||||
panic(fmt.Sprintf("failed to sign event: %v", err))
|
||||
}
|
||||
|
||||
return ev
|
||||
}
|
||||
|
||||
func generateRandomTags() *tags.T {
|
||||
t := tags.New()
|
||||
|
||||
// Add some random tags
|
||||
numTags := frand.Intn(5)
|
||||
for i := 0; i < numTags; i++ {
|
||||
switch frand.Intn(3) {
|
||||
case 0:
|
||||
// p tag
|
||||
t.AppendUnique(tag.New([]byte("p"), generateRandomPubkey()))
|
||||
case 1:
|
||||
// e tag
|
||||
t.AppendUnique(tag.New([]byte("e"), generateRandomEventID()))
|
||||
case 2:
|
||||
// t tag
|
||||
t.AppendUnique(
|
||||
tag.New(
|
||||
[]byte("t"),
|
||||
[]byte(fmt.Sprintf("topic%d", frand.Intn(100))),
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func generateRandomPubkey() []byte {
|
||||
return frand.Bytes(32)
|
||||
}
|
||||
|
||||
func generateRandomEventID() []byte {
|
||||
return frand.Bytes(32)
|
||||
}
|
||||
|
||||
func printResults(results *BenchmarkResults) {
|
||||
fmt.Println("\n=== Benchmark Results ===")
|
||||
|
||||
if results.EventsPublished > 0 {
|
||||
fmt.Println("\nPublish Performance:")
|
||||
fmt.Printf(" Events Published: %d\n", results.EventsPublished)
|
||||
fmt.Printf(
|
||||
" Total Data: %.2f MB\n",
|
||||
float64(results.EventsPublishedBytes)/1024/1024,
|
||||
)
|
||||
fmt.Printf(" Duration: %s\n", results.PublishDuration)
|
||||
fmt.Printf(" Rate: %.2f events/second\n", results.PublishRate)
|
||||
fmt.Printf(" Bandwidth: %.2f MB/second\n", results.PublishBandwidth)
|
||||
}
|
||||
|
||||
if results.QueriesExecuted > 0 {
|
||||
fmt.Println("\nQuery Performance:")
|
||||
fmt.Printf(" Queries Executed: %d\n", results.QueriesExecuted)
|
||||
fmt.Printf(" Events Returned: %d\n", results.EventsReturned)
|
||||
fmt.Printf(" Duration: %s\n", results.QueryDuration)
|
||||
fmt.Printf(" Rate: %.2f queries/second\n", results.QueryRate)
|
||||
avgEventsPerQuery := float64(results.EventsReturned) / float64(results.QueriesExecuted)
|
||||
fmt.Printf(" Avg Events/Query: %.2f\n", avgEventsPerQuery)
|
||||
}
|
||||
}
|
||||
82
cmd/benchmark/run_benchmark.sh
Executable file
82
cmd/benchmark/run_benchmark.sh
Executable file
@@ -0,0 +1,82 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Simple Nostr Relay Benchmark Script
|
||||
|
||||
# Default values
|
||||
RELAY_URL="ws://localhost:7447"
|
||||
EVENTS=10000
|
||||
SIZE=1024
|
||||
CONCURRENCY=10
|
||||
QUERIES=100
|
||||
QUERY_LIMIT=100
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--relay)
|
||||
RELAY_URL="$2"
|
||||
shift 2
|
||||
;;
|
||||
--events)
|
||||
EVENTS="$2"
|
||||
shift 2
|
||||
;;
|
||||
--size)
|
||||
SIZE="$2"
|
||||
shift 2
|
||||
;;
|
||||
--concurrency)
|
||||
CONCURRENCY="$2"
|
||||
shift 2
|
||||
;;
|
||||
--queries)
|
||||
QUERIES="$2"
|
||||
shift 2
|
||||
;;
|
||||
--query-limit)
|
||||
QUERY_LIMIT="$2"
|
||||
shift 2
|
||||
;;
|
||||
--skip-publish)
|
||||
SKIP_PUBLISH="-skip-publish"
|
||||
shift
|
||||
;;
|
||||
--skip-query)
|
||||
SKIP_QUERY="-skip-query"
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1"
|
||||
echo "Usage: $0 [--relay URL] [--events N] [--size N] [--concurrency N] [--queries N] [--query-limit N] [--skip-publish] [--skip-query]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Build the benchmark tool if it doesn't exist
|
||||
if [ ! -f benchmark-simple ]; then
|
||||
echo "Building benchmark tool..."
|
||||
go build -o benchmark-simple ./benchmark_simple.go
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to build benchmark tool"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run the benchmark
|
||||
echo "Running Nostr relay benchmark..."
|
||||
echo "Relay: $RELAY_URL"
|
||||
echo "Events: $EVENTS (size: $SIZE bytes)"
|
||||
echo "Concurrency: $CONCURRENCY"
|
||||
echo "Queries: $QUERIES (limit: $QUERY_LIMIT)"
|
||||
echo ""
|
||||
|
||||
./benchmark-simple \
|
||||
-relay "$RELAY_URL" \
|
||||
-events $EVENTS \
|
||||
-size $SIZE \
|
||||
-concurrency $CONCURRENCY \
|
||||
-queries $QUERIES \
|
||||
-query-limit $QUERY_LIMIT \
|
||||
$SKIP_PUBLISH \
|
||||
$SKIP_QUERY
|
||||
63
cmd/benchmark/test_signer.go
Normal file
63
cmd/benchmark/test_signer.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"lukechampine.com/frand"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
)
|
||||
|
||||
// testSigner is a simple signer implementation for benchmarking
|
||||
type testSigner struct {
|
||||
pub []byte
|
||||
sec []byte
|
||||
}
|
||||
|
||||
func newTestSigner() *testSigner {
|
||||
return &testSigner{
|
||||
pub: frand.Bytes(32),
|
||||
sec: frand.Bytes(32),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *testSigner) Pub() []byte {
|
||||
return s.pub
|
||||
}
|
||||
|
||||
func (s *testSigner) Sec() []byte {
|
||||
return s.sec
|
||||
}
|
||||
|
||||
func (s *testSigner) Sign(msg []byte) ([]byte, error) {
|
||||
return frand.Bytes(64), nil
|
||||
}
|
||||
|
||||
func (s *testSigner) Verify(msg, sig []byte) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *testSigner) InitSec(sec []byte) error {
|
||||
s.sec = sec
|
||||
s.pub = frand.Bytes(32)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *testSigner) InitPub(pub []byte) error {
|
||||
s.pub = pub
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *testSigner) Zero() {
|
||||
for i := range s.sec {
|
||||
s.sec[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func (s *testSigner) ECDH(pubkey []byte) ([]byte, error) {
|
||||
return frand.Bytes(32), nil
|
||||
}
|
||||
|
||||
func (s *testSigner) Generate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ signer.I = (*testSigner)(nil)
|
||||
@@ -1,2 +0,0 @@
|
||||
// Package cmd contains the executable applications of the realy suite.
|
||||
package cmd
|
||||
16
cmd/lerproxy/lerproxy.service
Normal file
16
cmd/lerproxy/lerproxy.service
Normal file
@@ -0,0 +1,16 @@
|
||||
# systemd unit to run lerproxy as a service
|
||||
[Unit]
|
||||
Description=lerproxy
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=mleku
|
||||
ExecStart=/home/mleku/.local/bin/lerproxy -m /home/mleku/mapping.txt
|
||||
Restart=always
|
||||
Wants=network-online.target
|
||||
# waits for wireguard service to come up before starting, remove the wg-quick@wg0 section if running it directly on an
|
||||
# internet routeable connection
|
||||
After=network.target network-online.target wg-quick@wg0.service
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -14,8 +14,14 @@ import (
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/utils/log"
|
||||
"orly.dev/cmd/lerproxy/buf"
|
||||
"orly.dev/cmd/lerproxy/hsts"
|
||||
"orly.dev/cmd/lerproxy/reverse"
|
||||
"orly.dev/cmd/lerproxy/tcpkeepalive"
|
||||
"orly.dev/cmd/lerproxy/util"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
@@ -27,13 +33,6 @@ import (
|
||||
"github.com/alexflint/go-arg"
|
||||
"golang.org/x/crypto/acme/autocert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"orly.dev/cmd/lerproxy/buf"
|
||||
"orly.dev/cmd/lerproxy/hsts"
|
||||
"orly.dev/cmd/lerproxy/reverse"
|
||||
"orly.dev/cmd/lerproxy/tcpkeepalive"
|
||||
"orly.dev/cmd/lerproxy/util"
|
||||
"orly.dev/utils/context"
|
||||
)
|
||||
|
||||
type runArgs struct {
|
||||
|
||||
@@ -6,9 +6,8 @@ import (
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"orly.dev/utils/log"
|
||||
|
||||
"orly.dev/cmd/lerproxy/util"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
// NewSingleHostReverseProxy is a copy of httputil.NewSingleHostReverseProxy
|
||||
|
||||
@@ -4,10 +4,9 @@ package tcpkeepalive
|
||||
|
||||
import (
|
||||
"net"
|
||||
"orly.dev/utils/chk"
|
||||
"time"
|
||||
|
||||
"orly.dev/cmd/lerproxy/timeout"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Period can be changed prior to opening a Listener to alter its'
|
||||
|
||||
@@ -4,7 +4,7 @@ package timeout
|
||||
|
||||
import (
|
||||
"net"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,16 +3,15 @@ package main
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"orly.dev/crypto/p256k"
|
||||
"orly.dev/encoders/bech32encoding"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/utils/errorf"
|
||||
"orly.dev/utils/log"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"orly.dev/interfaces/signer"
|
||||
"orly.dev/protocol/httpauth"
|
||||
)
|
||||
|
||||
const secEnv = "NOSTR_SECRET_KEY"
|
||||
|
||||
@@ -8,18 +8,18 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"orly.dev/crypto/p256k"
|
||||
"orly.dev/crypto/sha256"
|
||||
"orly.dev/encoders/bech32encoding"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/utils/errorf"
|
||||
"orly.dev/utils/log"
|
||||
realy_lol "orly.dev/version"
|
||||
"os"
|
||||
|
||||
"orly.dev/encoders/hex"
|
||||
"orly.dev/interfaces/signer"
|
||||
"orly.dev/protocol/httpauth"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
realy_lol "orly.dev/pkg/version"
|
||||
)
|
||||
|
||||
const secEnv = "NOSTR_SECRET_KEY"
|
||||
@@ -191,6 +191,5 @@ func Post(f string, ur *url.URL, sign signer.I) (err error) {
|
||||
if io.Copy(os.Stdout, res.Body); chk.E(err) {
|
||||
return
|
||||
}
|
||||
fmt.Println()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,23 +6,24 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"orly.dev/crypto/ec/bech32"
|
||||
"orly.dev/crypto/ec/schnorr"
|
||||
secp256k2 "orly.dev/crypto/ec/secp256k1"
|
||||
"orly.dev/encoders/bech32encoding"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/utils/interrupt"
|
||||
"orly.dev/utils/log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alexflint/go-arg"
|
||||
"orly.dev/pkg/crypto/ec/bech32"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/utils/atomic"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/interrupt"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"orly.dev/pkg/utils/qu"
|
||||
|
||||
"orly.dev/utils/atomic"
|
||||
"orly.dev/utils/qu"
|
||||
"github.com/alexflint/go-arg"
|
||||
)
|
||||
|
||||
var prefix = append(bech32encoding.PubHRP, '1')
|
||||
@@ -34,9 +35,9 @@ const (
|
||||
)
|
||||
|
||||
type Result struct {
|
||||
sec *secp256k2.SecretKey
|
||||
sec []byte
|
||||
npub []byte
|
||||
pub *secp256k2.PublicKey
|
||||
pub []byte
|
||||
}
|
||||
|
||||
var args struct {
|
||||
@@ -46,6 +47,7 @@ var args struct {
|
||||
}
|
||||
|
||||
func main() {
|
||||
lol.SetLogLevel("info")
|
||||
arg.MustParse(&args)
|
||||
if args.String == "" {
|
||||
_, _ = fmt.Fprintln(
|
||||
@@ -80,7 +82,7 @@ Options:
|
||||
}
|
||||
}
|
||||
|
||||
func Vanity(str string, where int, threads int) (e error) {
|
||||
func Vanity(str string, where int, threads int) (err error) {
|
||||
|
||||
// check the string has valid bech32 ciphers
|
||||
for i := range str {
|
||||
@@ -123,7 +125,7 @@ out:
|
||||
wm := workingFor % time.Second
|
||||
workingFor -= wm
|
||||
fmt.Printf(
|
||||
"working for %v, attempts %d\n",
|
||||
" working for %v, attempts %d",
|
||||
workingFor, counter.Load(),
|
||||
)
|
||||
case r := <-resC:
|
||||
@@ -143,20 +145,16 @@ out:
|
||||
wg.Wait()
|
||||
|
||||
fmt.Printf(
|
||||
"generated in %d attempts using %d threads, taking %v\n",
|
||||
"\r# generated in %d attempts using %d threads, taking %v ",
|
||||
counter.Load(), args.Threads, time.Now().Sub(started),
|
||||
)
|
||||
secBytes := res.sec.Serialize()
|
||||
log.D.Ln(
|
||||
"generated key pair:\n"+
|
||||
"\nhex:\n"+
|
||||
"\tsecret: %s\n"+
|
||||
"\tpublic: %s\n\n",
|
||||
hex.EncodeToString(secBytes),
|
||||
hex.EncodeToString(schnorr.SerializePubKey(res.pub)),
|
||||
fmt.Printf(
|
||||
"\nHSEC = %s\nHPUB = %s\n",
|
||||
hex.EncodeToString(res.sec),
|
||||
hex.EncodeToString(res.pub),
|
||||
)
|
||||
nsec, _ := bech32encoding.SecretKeyToNsec(res.sec)
|
||||
fmt.Printf("\nNSEC = %s\nNPUB = %s\n\n", nsec, res.npub)
|
||||
nsec, _ := bech32encoding.BinToNsec(res.sec)
|
||||
fmt.Printf("NSEC = %s\nNPUB = %s\n", nsec, res.npub)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -186,16 +184,19 @@ out:
|
||||
default:
|
||||
}
|
||||
counter.Inc()
|
||||
r.sec, r.pub, e = GenKeyPair()
|
||||
// r.sec, r.pub, e = GenKeyPair()
|
||||
r.sec, r.pub, e = Gen()
|
||||
if e != nil {
|
||||
log.E.Ln("error generating key: '%v' worker stopping", e)
|
||||
break out
|
||||
}
|
||||
r.npub, e = bech32encoding.PublicKeyToNpub(r.pub)
|
||||
if e != nil {
|
||||
// r.npub, e = bech32encoding.PublicKeyToNpub(r.pub)
|
||||
if r.npub, e = bech32encoding.BinToNpub(r.pub); e != nil {
|
||||
log.E.Ln("fatal error generating npub: %s\n", e)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("\rgenerating key: %s", r.npub)
|
||||
// log.I.F("%s", r.npub)
|
||||
switch where {
|
||||
case PositionBeginning:
|
||||
if bytes.HasPrefix(r.npub, append(prefix, []byte(str)...)) {
|
||||
@@ -216,14 +217,23 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
func Gen() (skb, pkb []byte, err error) {
|
||||
sign := p256k.Signer{}
|
||||
if err = sign.Generate(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
skb, pkb = sign.Sec(), sign.Pub()
|
||||
return
|
||||
}
|
||||
|
||||
// GenKeyPair creates a fresh new key pair using the entropy source used by
|
||||
// crypto/rand (ie, /dev/random on posix systems).
|
||||
func GenKeyPair() (
|
||||
sec *secp256k2.SecretKey,
|
||||
pub *secp256k2.PublicKey, err error,
|
||||
sec *secp256k1.SecretKey,
|
||||
pub *secp256k1.PublicKey, err error,
|
||||
) {
|
||||
|
||||
sec, err = secp256k2.GenerateSecretKey()
|
||||
sec, err = secp256k1.GenerateSecretKey()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error generating key: %s", err)
|
||||
return
|
||||
|
||||
162
cmd/walletcli/README.md
Normal file
162
cmd/walletcli/README.md
Normal file
@@ -0,0 +1,162 @@
|
||||
# NWC Client CLI Tool
|
||||
|
||||
A command-line interface tool for making calls to Nostr Wallet Connect (NWC) services.
|
||||
|
||||
## Overview
|
||||
|
||||
This CLI tool allows you to interact with NWC wallet services using the methods defined in the NIP-47 specification. It provides a simple interface for executing wallet operations and displays the JSON response from the wallet service.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
nwcclient <connection URL> <method> [parameters...]
|
||||
```
|
||||
|
||||
### Connection URL
|
||||
|
||||
The connection URL should be in the Nostr Wallet Connect format:
|
||||
|
||||
```
|
||||
nostr+walletconnect://<wallet_pubkey>?relay=<relay_url>&secret=<secret>
|
||||
```
|
||||
|
||||
### Supported Methods
|
||||
|
||||
The following methods are supported by this CLI tool:
|
||||
|
||||
- `get_info` - Get wallet information
|
||||
- `get_balance` - Get wallet balance
|
||||
- `get_budget` - Get wallet budget
|
||||
- `make_invoice` - Create an invoice
|
||||
- `pay_invoice` - Pay an invoice
|
||||
- `pay_keysend` - Send a keysend payment
|
||||
- `lookup_invoice` - Look up an invoice
|
||||
- `list_transactions` - List transactions
|
||||
- `sign_message` - Sign a message
|
||||
|
||||
### Unsupported Methods
|
||||
|
||||
The following methods are defined in the NIP-47 specification but are not directly supported by this CLI tool due to limitations in the underlying nwc package:
|
||||
|
||||
- `create_connection` - Create a connection
|
||||
- `make_hold_invoice` - Create a hold invoice
|
||||
- `settle_hold_invoice` - Settle a hold invoice
|
||||
- `cancel_hold_invoice` - Cancel a hold invoice
|
||||
- `multi_pay_invoice` - Pay multiple invoices
|
||||
- `multi_pay_keysend` - Send multiple keysend payments
|
||||
|
||||
## Method Parameters
|
||||
|
||||
### Methods with No Parameters
|
||||
|
||||
- `get_info`
|
||||
- `get_balance`
|
||||
- `get_budget`
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> get_info
|
||||
```
|
||||
|
||||
### Methods with Parameters
|
||||
|
||||
#### make_invoice
|
||||
|
||||
```
|
||||
nwcclient <connection URL> make_invoice <amount> <description> [description_hash] [expiry]
|
||||
```
|
||||
|
||||
- `amount` - Amount in millisatoshis (msats)
|
||||
- `description` - Invoice description
|
||||
- `description_hash` (optional) - Hash of the description
|
||||
- `expiry` (optional) - Expiry time in seconds
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> make_invoice 1000000 "Test invoice" "" 3600
|
||||
```
|
||||
|
||||
#### pay_invoice
|
||||
|
||||
```
|
||||
nwcclient <connection URL> pay_invoice <invoice> [amount]
|
||||
```
|
||||
|
||||
- `invoice` - BOLT11 invoice
|
||||
- `amount` (optional) - Amount in millisatoshis (msats)
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> pay_invoice lnbc1...
|
||||
```
|
||||
|
||||
#### pay_keysend
|
||||
|
||||
```
|
||||
nwcclient <connection URL> pay_keysend <amount> <pubkey> [preimage]
|
||||
```
|
||||
|
||||
- `amount` - Amount in millisatoshis (msats)
|
||||
- `pubkey` - Recipient's public key
|
||||
- `preimage` (optional) - Payment preimage
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> pay_keysend 1000000 03...
|
||||
```
|
||||
|
||||
#### lookup_invoice
|
||||
|
||||
```
|
||||
nwcclient <connection URL> lookup_invoice <payment_hash_or_invoice>
|
||||
```
|
||||
|
||||
- `payment_hash_or_invoice` - Payment hash or BOLT11 invoice
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> lookup_invoice 3d...
|
||||
```
|
||||
|
||||
#### list_transactions
|
||||
|
||||
```
|
||||
nwcclient <connection URL> list_transactions [from <timestamp>] [until <timestamp>] [limit <count>] [offset <count>] [unpaid <true|false>] [type <incoming|outgoing>]
|
||||
```
|
||||
|
||||
Parameters are specified as name-value pairs:
|
||||
|
||||
- `from` - Start timestamp
|
||||
- `until` - End timestamp
|
||||
- `limit` - Maximum number of transactions to return
|
||||
- `offset` - Number of transactions to skip
|
||||
- `unpaid` - Whether to include unpaid transactions
|
||||
- `type` - Transaction type (incoming or outgoing)
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> list_transactions limit 10 type incoming
|
||||
```
|
||||
|
||||
#### sign_message
|
||||
|
||||
```
|
||||
nwcclient <connection URL> sign_message <message>
|
||||
```
|
||||
|
||||
- `message` - Message to sign
|
||||
|
||||
Example:
|
||||
```
|
||||
nwcclient <connection URL> sign_message "Hello, world!"
|
||||
```
|
||||
|
||||
## Output
|
||||
|
||||
The tool prints the JSON response from the wallet service to stdout. If an error occurs, an error message is printed to stderr.
|
||||
|
||||
## Limitations
|
||||
|
||||
- The tool only supports methods that have direct client methods in the nwc package.
|
||||
- Complex parameters like metadata are not supported.
|
||||
- The tool does not support interactive authentication or authorization.
|
||||
417
cmd/walletcli/main.go
Normal file
417
cmd/walletcli/main.go
Normal file
@@ -0,0 +1,417 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"orly.dev/pkg/protocol/nwc"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
)
|
||||
|
||||
func printUsage() {
|
||||
fmt.Println("Usage: walletcli \"<NWC connection URL>\" <method> [<args...>]")
|
||||
fmt.Println("\nAvailable methods:")
|
||||
fmt.Println(" get_wallet_service_info - Get wallet service information")
|
||||
fmt.Println(" get_info - Get wallet information")
|
||||
fmt.Println(" get_balance - Get wallet balance")
|
||||
fmt.Println(" get_budget - Get wallet budget")
|
||||
fmt.Println(" make_invoice - Create an invoice")
|
||||
fmt.Println(" Args: <amount> [<description>] [<description_hash>] [<expiry>]")
|
||||
fmt.Println(" pay_invoice - Pay an invoice")
|
||||
fmt.Println(" Args: <invoice> [<amount>] [<comment>]")
|
||||
fmt.Println(" pay_keysend - Pay to a node using keysend")
|
||||
fmt.Println(" Args: <pubkey> <amount> [<preimage>] [<tlv_type> <tlv_value>...]")
|
||||
fmt.Println(" lookup_invoice - Look up an invoice")
|
||||
fmt.Println(" Args: <payment_hash or invoice>")
|
||||
fmt.Println(" list_transactions - List transactions")
|
||||
fmt.Println(" Args: [<limit>] [<offset>] [<from>] [<until>]")
|
||||
fmt.Println(" make_hold_invoice - Create a hold invoice")
|
||||
fmt.Println(" Args: <amount> <payment_hash> [<description>] [<description_hash>] [<expiry>]")
|
||||
fmt.Println(" settle_hold_invoice - Settle a hold invoice")
|
||||
fmt.Println(" Args: <preimage>")
|
||||
fmt.Println(" cancel_hold_invoice - Cancel a hold invoice")
|
||||
fmt.Println(" Args: <payment_hash>")
|
||||
fmt.Println(" sign_message - Sign a message")
|
||||
fmt.Println(" Args: <message>")
|
||||
fmt.Println(" create_connection - Create a connection")
|
||||
fmt.Println(" Args: <pubkey> <name> <methods> [<notification_types>] [<max_amount>] [<budget_renewal>] [<expires_at>]")
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) < 3 {
|
||||
printUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
connectionURL := os.Args[1]
|
||||
method := os.Args[2]
|
||||
args := os.Args[3:]
|
||||
// Create context
|
||||
// ctx, cancel := context.Cancel(context.Bg())
|
||||
ctx := context.Bg()
|
||||
// defer cancel()
|
||||
// Create NWC client
|
||||
client, err := nwc.NewClient(ctx, connectionURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating client: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// Execute the requested method
|
||||
switch method {
|
||||
case "get_wallet_service_info":
|
||||
handleGetWalletServiceInfo(ctx, client)
|
||||
case "get_info":
|
||||
handleGetInfo(ctx, client)
|
||||
case "get_balance":
|
||||
handleGetBalance(ctx, client)
|
||||
case "get_budget":
|
||||
handleGetBudget(ctx, client)
|
||||
case "make_invoice":
|
||||
handleMakeInvoice(ctx, client, args)
|
||||
case "pay_invoice":
|
||||
handlePayInvoice(ctx, client, args)
|
||||
case "pay_keysend":
|
||||
handlePayKeysend(ctx, client, args)
|
||||
case "lookup_invoice":
|
||||
handleLookupInvoice(ctx, client, args)
|
||||
case "list_transactions":
|
||||
handleListTransactions(ctx, client, args)
|
||||
case "make_hold_invoice":
|
||||
handleMakeHoldInvoice(ctx, client, args)
|
||||
case "settle_hold_invoice":
|
||||
handleSettleHoldInvoice(ctx, client, args)
|
||||
case "cancel_hold_invoice":
|
||||
handleCancelHoldInvoice(ctx, client, args)
|
||||
case "sign_message":
|
||||
handleSignMessage(ctx, client, args)
|
||||
case "create_connection":
|
||||
handleCreateConnection(ctx, client, args)
|
||||
default:
|
||||
fmt.Printf("Unknown method: %s\n", method)
|
||||
printUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetWalletServiceInfo(ctx context.T, client *nwc.Client) {
|
||||
if _, raw, err := client.GetWalletServiceInfo(ctx, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetInfo(ctx context.T, client *nwc.Client) {
|
||||
if _, raw, err := client.GetInfo(ctx, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetBalance(ctx context.T, client *nwc.Client) {
|
||||
if _, raw, err := client.GetBalance(ctx, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleGetBudget(ctx context.T, client *nwc.Client) {
|
||||
if _, raw, err := client.GetBudget(ctx, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleMakeInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> make_invoice <amount> [<description>] [<description_hash>] [<expiry>]")
|
||||
return
|
||||
}
|
||||
amount, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params := &nwc.MakeInvoiceParams{
|
||||
Amount: amount,
|
||||
}
|
||||
if len(args) > 1 {
|
||||
params.Description = args[1]
|
||||
}
|
||||
if len(args) > 2 {
|
||||
params.DescriptionHash = args[2]
|
||||
}
|
||||
if len(args) > 3 {
|
||||
expiry, err := strconv.ParseInt(args[3], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing expiry: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.Expiry = &expiry
|
||||
}
|
||||
var raw []byte
|
||||
if _, raw, err = client.MakeInvoice(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handlePayInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> pay_invoice <invoice> [<amount>] [<comment>]")
|
||||
return
|
||||
}
|
||||
params := &nwc.PayInvoiceParams{
|
||||
Invoice: args[0],
|
||||
}
|
||||
if len(args) > 1 {
|
||||
amount, err := strconv.ParseUint(args[1], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.Amount = &amount
|
||||
}
|
||||
if len(args) > 2 {
|
||||
comment := args[2]
|
||||
params.Metadata = &nwc.PayInvoiceMetadata{
|
||||
Comment: &comment,
|
||||
}
|
||||
}
|
||||
if _, raw, err := client.PayInvoice(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleLookupInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> lookup_invoice <payment_hash or invoice>")
|
||||
return
|
||||
}
|
||||
params := &nwc.LookupInvoiceParams{}
|
||||
// Determine if the argument is a payment hash or an invoice
|
||||
if strings.HasPrefix(args[0], "ln") {
|
||||
invoice := args[0]
|
||||
params.Invoice = &invoice
|
||||
} else {
|
||||
paymentHash := args[0]
|
||||
params.PaymentHash = &paymentHash
|
||||
}
|
||||
var err error
|
||||
var raw []byte
|
||||
if _, raw, err = client.LookupInvoice(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleListTransactions(ctx context.T, client *nwc.Client, args []string) {
|
||||
params := &nwc.ListTransactionsParams{}
|
||||
if len(args) > 0 {
|
||||
limit, err := strconv.ParseUint(args[0], 10, 16)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing limit: %v\n", err)
|
||||
return
|
||||
}
|
||||
limitUint16 := uint16(limit)
|
||||
params.Limit = &limitUint16
|
||||
}
|
||||
if len(args) > 1 {
|
||||
offset, err := strconv.ParseUint(args[1], 10, 32)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing offset: %v\n", err)
|
||||
return
|
||||
}
|
||||
offsetUint32 := uint32(offset)
|
||||
params.Offset = &offsetUint32
|
||||
}
|
||||
if len(args) > 2 {
|
||||
from, err := strconv.ParseInt(args[2], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing from: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.From = &from
|
||||
}
|
||||
if len(args) > 3 {
|
||||
until, err := strconv.ParseInt(args[3], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing until: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.Until = &until
|
||||
}
|
||||
var raw []byte
|
||||
var err error
|
||||
if _, raw, err = client.ListTransactions(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleMakeHoldInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 2 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> make_hold_invoice <amount> <payment_hash> [<description>] [<description_hash>] [<expiry>]")
|
||||
return
|
||||
}
|
||||
amount, err := strconv.ParseUint(args[0], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params := &nwc.MakeHoldInvoiceParams{
|
||||
Amount: amount,
|
||||
PaymentHash: args[1],
|
||||
}
|
||||
if len(args) > 2 {
|
||||
params.Description = args[2]
|
||||
}
|
||||
if len(args) > 3 {
|
||||
params.DescriptionHash = args[3]
|
||||
}
|
||||
if len(args) > 4 {
|
||||
expiry, err := strconv.ParseInt(args[4], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing expiry: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.Expiry = &expiry
|
||||
}
|
||||
var raw []byte
|
||||
if _, raw, err = client.MakeHoldInvoice(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleSettleHoldInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> settle_hold_invoice <preimage>")
|
||||
return
|
||||
}
|
||||
params := &nwc.SettleHoldInvoiceParams{
|
||||
Preimage: args[0],
|
||||
}
|
||||
var raw []byte
|
||||
var err error
|
||||
if raw, err = client.SettleHoldInvoice(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleCancelHoldInvoice(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> cancel_hold_invoice <payment_hash>")
|
||||
return
|
||||
}
|
||||
|
||||
params := &nwc.CancelHoldInvoiceParams{
|
||||
PaymentHash: args[0],
|
||||
}
|
||||
var err error
|
||||
var raw []byte
|
||||
if raw, err = client.CancelHoldInvoice(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleSignMessage(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 1 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> sign_message <message>")
|
||||
return
|
||||
}
|
||||
|
||||
params := &nwc.SignMessageParams{
|
||||
Message: args[0],
|
||||
}
|
||||
var raw []byte
|
||||
var err error
|
||||
if _, raw, err = client.SignMessage(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handlePayKeysend(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 2 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> pay_keysend <pubkey> <amount> [<preimage>] [<tlv_type> <tlv_value>...]")
|
||||
return
|
||||
}
|
||||
pubkey := args[0]
|
||||
amount, err := strconv.ParseUint(args[1], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params := &nwc.PayKeysendParams{
|
||||
Pubkey: pubkey,
|
||||
Amount: amount,
|
||||
}
|
||||
// Optional preimage
|
||||
if len(args) > 2 {
|
||||
preimage := args[2]
|
||||
params.Preimage = &preimage
|
||||
}
|
||||
// Optional TLV records (must come in pairs)
|
||||
if len(args) > 3 {
|
||||
// Start from index 3 and process pairs of arguments
|
||||
for i := 3; i < len(args)-1; i += 2 {
|
||||
tlvType, err := strconv.ParseUint(args[i], 10, 32)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing TLV type: %v\n", err)
|
||||
return
|
||||
}
|
||||
tlvValue := args[i+1]
|
||||
params.TLVRecords = append(
|
||||
params.TLVRecords, nwc.PayKeysendTLVRecord{
|
||||
Type: uint32(tlvType),
|
||||
Value: tlvValue,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
var raw []byte
|
||||
if _, raw, err = client.PayKeysend(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
|
||||
func handleCreateConnection(ctx context.T, client *nwc.Client, args []string) {
|
||||
if len(args) < 3 {
|
||||
fmt.Println("Error: Missing required arguments")
|
||||
fmt.Println("Usage: walletcli <NWC connection URL> create_connection <pubkey> <name> <methods> [<notification_types>] [<max_amount>] [<budget_renewal>] [<expires_at>]")
|
||||
return
|
||||
}
|
||||
params := &nwc.CreateConnectionParams{
|
||||
Pubkey: args[0],
|
||||
Name: args[1],
|
||||
RequestMethods: strings.Split(args[2], ","),
|
||||
}
|
||||
if len(args) > 3 {
|
||||
params.NotificationTypes = strings.Split(args[3], ",")
|
||||
}
|
||||
if len(args) > 4 {
|
||||
maxAmount, err := strconv.ParseUint(args[4], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing max_amount: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.MaxAmount = &maxAmount
|
||||
}
|
||||
if len(args) > 5 {
|
||||
params.BudgetRenewal = &args[5]
|
||||
}
|
||||
if len(args) > 6 {
|
||||
expiresAt, err := strconv.ParseInt(args[6], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing expires_at: %v\n", err)
|
||||
return
|
||||
}
|
||||
params.ExpiresAt = &expiresAt
|
||||
}
|
||||
var raw []byte
|
||||
var err error
|
||||
if raw, err = client.CreateConnection(ctx, params, true); !chk.E(err) {
|
||||
fmt.Println(string(raw))
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package base58_test
|
||||
|
||||
import (
|
||||
"orly.dev/utils/lol"
|
||||
)
|
||||
|
||||
var (
|
||||
log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
|
||||
)
|
||||
@@ -1,88 +0,0 @@
|
||||
{
|
||||
"pubkeys": [
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"020000000000000000000000000000000000000000000000000000000000000005",
|
||||
"02FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30",
|
||||
"04F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
|
||||
],
|
||||
"tweaks": [
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"252E4BD67410A76CDF933D30EAA1608214037F1B105A013ECCD3C5C184A6110B"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [0, 1, 2],
|
||||
"expected": "90539EEDE565F5D054F32CC0C220126889ED1E5D193BAF15AEF344FE59D4610C"
|
||||
},
|
||||
{
|
||||
"key_indices": [2, 1, 0],
|
||||
"expected": "6204DE8B083426DC6EAF9502D27024D53FC826BF7D2012148A0575435DF54B2B"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 0, 0],
|
||||
"expected": "B436E3BAD62B8CD409969A224731C193D051162D8C5AE8B109306127DA3AA935"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 0, 1, 1],
|
||||
"expected": "69BC22BFA5D106306E48A20679DE1D7389386124D07571D0D872686028C26A3E"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"key_indices": [0, 3],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Invalid public key"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 4],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Public key exceeds field size"
|
||||
},
|
||||
{
|
||||
"key_indices": [5, 0],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "First byte of public key is not 2 or 3"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 1],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [true],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The tweak must be less than n."
|
||||
},
|
||||
"comment": "Tweak is out of range"
|
||||
},
|
||||
{
|
||||
"key_indices": [6],
|
||||
"tweak_indices": [1],
|
||||
"is_xonly": [false],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The result of tweaking cannot be infinity."
|
||||
},
|
||||
"comment": "Intermediate tweaking result is point at infinity"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
{
|
||||
"pubkeys": [
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659",
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8"
|
||||
],
|
||||
"sorted_pubkeys": [
|
||||
"023590A94E768F8E1815C2F24B4D80A8E3149316C3518CE7B7AD338368D038CA66",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"03DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
|
||||
]
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
{
|
||||
"pnonces": [
|
||||
"020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E66603BA47FBC1834437B3212E89A84D8425E7BF12E0245D98262268EBDCB385D50641",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
|
||||
"020151C80F435648DF67A22B749CD798CE54E0321D034B92B709B567D60A42E6660279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60379BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"04FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B833",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A60248C264CDD57D3C24D79990B0F865674EB62A0F9018277A95011B41BFC193B831",
|
||||
"03FF406FFD8ADB9CD29877E4985014F66A59F6CD01C0E88CAA8E5F3166B1F676A602FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"pnonce_indices": [0, 1],
|
||||
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B024725377345BDE0E9C33AF3C43C0A29A9249F2F2956FA8CFEB55C8573D0262DC8"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [2, 3],
|
||||
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"comment": "Sum of second points encoded in the nonces is point at infinity which is serialized as 33 zero bytes"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"pnonce_indices": [0, 4],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 1 is invalid due wrong tag, 0x04, in the first half",
|
||||
"btcec_err": "invalid public key: unsupported format: 4"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [5, 1],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 0 is invalid because the second half does not correspond to an X coordinate",
|
||||
"btcec_err": "invalid public key: x coordinate 48c264cdd57d3c24d79990b0f865674eb62a0f9018277a95011b41bfc193b831 is not on the secp256k1 curve"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [6, 1],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Public nonce from signer 0 is invalid because second half exceeds field size",
|
||||
"btcec_err": "invalid public key: x >= field prime"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"test_cases": [
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "0101010101010101010101010101010101010101010101010101010101010101",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "227243DCB40EF2A13A981DB188FA433717B506BDFA14B1AE47D5DC027C9C3B9EF2370B2AD206E724243215137C86365699361126991E6FEC816845F837BDDAC3024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "CD0F47FE471D6788FF3243F47345EA0A179AEF69476BE8348322EF39C2723318870C2065AFB52DEDF02BF4FDBF6D2F442E608692F50C2374C08FFFE57042A61C024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"pk": "024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766",
|
||||
"aggpk": "0707070707070707070707070707070707070707070707070707070707070707",
|
||||
"msg": "2626262626262626262626262626262626262626262626262626262626262626262626262626",
|
||||
"extra_in": "0808080808080808080808080808080808080808080808080808080808080808",
|
||||
"expected": "011F8BC60EF061DEEF4D72A0A87200D9994B3F0CD9867910085C38D5366E3E6B9FF03BC0124E56B24069E91EC3F162378983F194E8BD0ED89BE3059649EAE262024D4B6CD1361032CA9BD2AEB9D900AA4D45D9EAD80AC9423374C451A7254D0766"
|
||||
},
|
||||
{
|
||||
"rand_": "0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"sk": null,
|
||||
"pk": "02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"aggpk": null,
|
||||
"msg": null,
|
||||
"extra_in": null,
|
||||
"expected": "890E83616A3BC4640AB9B6374F21C81FF89CDDDBAFAA7475AE2A102A92E3EDB29FD7E874E23342813A60D9646948242646B7951CA046B4B36D7D6078506D3C9402F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,151 +0,0 @@
|
||||
{
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02D2DC6F5DF7C56ACF38C7FA0AE7A759AE30E19B37359DFDE015872324C7EF6E05",
|
||||
"03C7FB101D97FF930ACD0C6760852EF64E69083DE0B06AC6335724754BB4B0522C",
|
||||
"02352433B21E7E05D3B452B81CAE566E06D2E003ECE16D1074AABA4289E0E3D581"
|
||||
],
|
||||
"pnonces": [
|
||||
"036E5EE6E28824029FEA3E8A9DDD2C8483F5AF98F7177C3AF3CB6F47CAF8D94AE902DBA67E4A1F3680826172DA15AFB1A8CA85C7C5CC88900905C8DC8C328511B53E",
|
||||
"03E4F798DA48A76EEC1C9CC5AB7A880FFBA201A5F064E627EC9CB0031D1D58FC5103E06180315C5A522B7EC7C08B69DCD721C313C940819296D0A7AB8E8795AC1F00",
|
||||
"02C0068FD25523A31578B8077F24F78F5BD5F2422AFF47C1FADA0F36B3CEB6C7D202098A55D1736AA5FCC21CF0729CCE852575C06C081125144763C2C4C4A05C09B6",
|
||||
"031F5C87DCFBFCF330DEE4311D85E8F1DEA01D87A6F1C14CDFC7E4F1D8C441CFA40277BF176E9F747C34F81B0D9F072B1B404A86F402C2D86CF9EA9E9C69876EA3B9",
|
||||
"023F7042046E0397822C4144A17F8B63D78748696A46C3B9F0A901D296EC3406C302022B0B464292CF9751D699F10980AC764E6F671EFCA15069BBE62B0D1C62522A",
|
||||
"02D97DDA5988461DF58C5897444F116A7C74E5711BF77A9446E27806563F3B6C47020CBAD9C363A7737F99FA06B6BE093CEAFF5397316C5AC46915C43767AE867C00"
|
||||
],
|
||||
"tweaks": [
|
||||
"B511DA492182A91B0FFB9A98020D55F260AE86D7ECBD0399C7383D59A5F2AF7C",
|
||||
"A815FE049EE3C5AAB66310477FBC8BCCCAC2F3395F59F921C364ACD78A2F48DC",
|
||||
"75448A87274B056468B977BE06EB1E9F657577B7320B0A3376EA51FD420D18A8"
|
||||
],
|
||||
"psigs": [
|
||||
"B15D2CD3C3D22B04DAE438CE653F6B4ECF042F42CFDED7C41B64AAF9B4AF53FB",
|
||||
"6193D6AC61B354E9105BBDC8937A3454A6D705B6D57322A5A472A02CE99FCB64",
|
||||
"9A87D3B79EC67228CB97878B76049B15DBD05B8158D17B5B9114D3C226887505",
|
||||
"66F82EA90923689B855D36C6B7E032FB9970301481B99E01CDB4D6AC7C347A15",
|
||||
"4F5AEE41510848A6447DCD1BBC78457EF69024944C87F40250D3EF2C25D33EFE",
|
||||
"DDEF427BBB847CC027BEFF4EDB01038148917832253EBC355FC33F4A8E2FCCE4",
|
||||
"97B890A26C981DA8102D3BC294159D171D72810FDF7C6A691DEF02F0F7AF3FDC",
|
||||
"53FA9E08BA5243CBCB0D797C5EE83BC6728E539EB76C2D0BF0F971EE4E909971",
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
|
||||
],
|
||||
"msg": "599C67EA410D005B9DA90817CF03ED3B1C868E4DA4EDF00A5880B0082C237869",
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"psig_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E"
|
||||
},
|
||||
{
|
||||
"aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"psig_indices": [
|
||||
2,
|
||||
3
|
||||
],
|
||||
"expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9"
|
||||
},
|
||||
{
|
||||
"aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"psig_indices": [
|
||||
4,
|
||||
5
|
||||
],
|
||||
"expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC"
|
||||
},
|
||||
{
|
||||
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true
|
||||
],
|
||||
"psig_indices": [
|
||||
6,
|
||||
7
|
||||
],
|
||||
"expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true
|
||||
],
|
||||
"psig_indices": [
|
||||
7,
|
||||
8
|
||||
],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1
|
||||
},
|
||||
"comment": "Partial signature is invalid because it exceeds group size"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,194 +0,0 @@
|
||||
{
|
||||
"sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA661",
|
||||
"020000000000000000000000000000000000000000000000000000000000000007"
|
||||
],
|
||||
"secnonces": [
|
||||
"508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9"
|
||||
],
|
||||
"pnonces": [
|
||||
"0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046",
|
||||
"0237C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0387BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"020000000000000000000000000000000000000000000000000000000000000009"
|
||||
],
|
||||
"aggnonces": [
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"048465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61020000000000000000000000000000000000000000000000000000000000000009",
|
||||
"028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD6102FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC30"
|
||||
],
|
||||
"msgs": [
|
||||
"F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
|
||||
"",
|
||||
"2626262626262626262626262626262626262626262626262626262626262626262626262626"
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"expected": "012ABBCB52B3016AC03AD82395A1A415C48B93DEF78718E62A7A90052FE224FB"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 0, 2],
|
||||
"nonce_indices": [1, 0, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 1,
|
||||
"expected": "9FF2F7AAA856150CC8819254218D3ADEEB0535269051897724F9DB3789513A52"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 2,
|
||||
"expected": "FA23C359F6FAC4E7796BB93BC9F0532A95468C539BA20FF86D7C76ED92227900"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 1],
|
||||
"nonce_indices": [0, 3],
|
||||
"aggnonce_index": 1,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"expected": "AE386064B26105404798F75DE2EB9AF5EDA5387B064B83D049CB7C5E08879531",
|
||||
"comment": "Both halves of aggregate nonce correspond to point at infinity"
|
||||
}
|
||||
],
|
||||
"sign_error_test_cases": [
|
||||
{
|
||||
"key_indices": [1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The signer's pubkey must be included in the list of pubkeys."
|
||||
},
|
||||
"comment": "The signers pubkey is not in the list of pubkeys"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 0, 3],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 2,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Signer 2 provided an invalid public key"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 2,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid due wrong tag, 0x04, in the first half"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 3,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid because the second half does not correspond to an X coordinate"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 4,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": null,
|
||||
"contrib": "aggnonce"
|
||||
},
|
||||
"comment": "Aggregate nonce is invalid because second half exceeds field size"
|
||||
},
|
||||
{
|
||||
"key_indices": [0, 1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"secnonce_index": 1,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "first secnonce value is out of range."
|
||||
},
|
||||
"comment": "Secnonce is invalid which may indicate nonce reuse"
|
||||
}
|
||||
],
|
||||
"verify_fail_test_cases": [
|
||||
{
|
||||
"sig": "97AC833ADCB1AFA42EBF9E0725616F3C9A0D5B614F6FE283CEAAA37A8FFAF406",
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"comment": "Wrong signature (which is equal to the negation of valid signature)"
|
||||
},
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 1,
|
||||
"comment": "Wrong signer"
|
||||
},
|
||||
{
|
||||
"sig": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"comment": "Signature exceeds group size"
|
||||
}
|
||||
],
|
||||
"verify_error_test_cases": [
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [4, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubnonce"
|
||||
},
|
||||
"comment": "Invalid pubnonce"
|
||||
},
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [3, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
"contrib": "pubkey"
|
||||
},
|
||||
"comment": "Invalid pubkey"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
{
|
||||
"sk": "7FB9E0E687ADA1EEBF7ECFE2F21E73EBDB51A7D450948DFE8D76D7F2D1007671",
|
||||
"pubkeys": [
|
||||
"03935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"02F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9",
|
||||
"02DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659"
|
||||
],
|
||||
"secnonce": "508B81A611F100A6B2B6B29656590898AF488BCF2E1F55CF22E5CFB84421FE61FA27FD49B1D50085B481285E1CA205D55C82CC1B31FF5CD54A489829355901F703935F972DA013F80AE011890FA89B67A27B7BE6CCB24D3274D18B2D4067F261A9",
|
||||
"pnonces": [
|
||||
"0337C87821AFD50A8644D820A8F3E02E499C931865C2360FB43D0A0D20DAFE07EA0287BF891D2A6DEAEBADC909352AA9405D1428C15F4B75F04DAE642A95C2548480",
|
||||
"0279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980279BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798",
|
||||
"032DE2662628C90B03F5E720284EB52FF7D71F4284F627B68A853D78C78E1FFE9303E4C5524E83FFE1493B9077CF1CA6BEB2090C93D930321071AD40B2F44E599046"
|
||||
],
|
||||
"aggnonce": "028465FCF0BBDBCF443AABCCE533D42B4B5A10966AC09A49655E8C42DAAB8FCD61037496A3CC86926D452CAFCFD55D25972CA1675D549310DE296BFF42F72EEEA8C9",
|
||||
"tweaks": [
|
||||
"E8F791FF9225A2AF0102AFFF4A9A723D9612A682A25EBE79802B263CDFCD83BB",
|
||||
"AE2EA797CC0FE72AC5B97B97F3C6957D7E4199A167A58EB08BCAFFDA70AC0455",
|
||||
"F52ECBC565B3D8BEA2DFD5B75A4F457E54369809322E4120831626F290FA87E0",
|
||||
"1969AD73CC177FA0B4FCED6DF1F7BF9907E665FDE9BA196A74FED0A3CF5AEF9D",
|
||||
"FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141"
|
||||
],
|
||||
"msg": "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [true],
|
||||
"signer_index": 2,
|
||||
"expected": "E28A5C66E61E178C2BA19DB77B6CF9F7E2F0F56C17918CD13135E60CC848FE91",
|
||||
"comment": "A single x-only tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [false],
|
||||
"signer_index": 2,
|
||||
"expected": "38B0767798252F21BF5702C48028B095428320F73A4B14DB1E25DE58543D2D2D",
|
||||
"comment": "A single plain tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1],
|
||||
"is_xonly": [false, true],
|
||||
"signer_index": 2,
|
||||
"expected": "408A0A21C4A0F5DACAF9646AD6EB6FECD7F7A11F03ED1F48DFFF2185BC2C2408",
|
||||
"comment": "A plain tweak followed by an x-only tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1, 2, 3],
|
||||
"is_xonly": [false, false, true, true],
|
||||
"signer_index": 2,
|
||||
"expected": "45ABD206E61E3DF2EC9E264A6FEC8292141A633C28586388235541F9ADE75435",
|
||||
"comment": "Four tweaks: plain, plain, x-only, x-only."
|
||||
},
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1, 2, 3],
|
||||
"is_xonly": [true, false, true, false],
|
||||
"signer_index": 2,
|
||||
"expected": "B255FDCAC27B40C7CE7848E2D3B7BF5EA0ED756DA81565AC804CCCA3E1D5D239",
|
||||
"comment": "Four tweaks: x-only, plain, x-only, plain. If an implementation prohibits applying plain tweaks after x-only tweaks, it can skip this test vector or return an error."
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [4],
|
||||
"is_xonly": [false],
|
||||
"signer_index": 2,
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The tweak must be less than n."
|
||||
},
|
||||
"comment": "Tweak is invalid because it exceeds group size"
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package btcec_test
|
||||
|
||||
import (
|
||||
"orly.dev/utils/lol"
|
||||
)
|
||||
|
||||
var (
|
||||
log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
|
||||
)
|
||||
@@ -1,156 +0,0 @@
|
||||
package btcec_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"orly.dev/crypto/ec/schnorr"
|
||||
"orly.dev/crypto/p256k/btcec"
|
||||
"orly.dev/crypto/sha256"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/encoders/event/examples"
|
||||
)
|
||||
|
||||
func TestSigner_Generate(t *testing.T) {
|
||||
for _ = range 100 {
|
||||
var err error
|
||||
signer := &btcec.Signer{}
|
||||
var skb []byte
|
||||
if err = signer.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
skb = signer.Sec()
|
||||
if err = signer.InitSec(skb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBTCECSignerVerify(t *testing.T) {
|
||||
evs := make([]*event.E, 0, 10000)
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
buf := make([]byte, 1_000_000)
|
||||
scanner.Buffer(buf, len(buf))
|
||||
var err error
|
||||
signer := &btcec.Signer{}
|
||||
for scanner.Scan() {
|
||||
var valid bool
|
||||
b := scanner.Bytes()
|
||||
ev := event.New()
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Errorf("failed to marshal\n%s", b)
|
||||
} else {
|
||||
if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||
t.Errorf("invalid signature\n%s", b)
|
||||
continue
|
||||
}
|
||||
}
|
||||
id := ev.GetIDBytes()
|
||||
if len(id) != sha256.Size {
|
||||
t.Errorf("id should be 32 bytes, got %d", len(id))
|
||||
continue
|
||||
}
|
||||
if err = signer.InitPub(ev.Pubkey); chk.E(err) {
|
||||
t.Errorf("failed to init pub key: %s\n%0x", err, b)
|
||||
}
|
||||
if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
|
||||
t.Errorf("failed to verify: %s\n%0x", err, b)
|
||||
}
|
||||
if !valid {
|
||||
t.Errorf(
|
||||
"invalid signature for pub %0x %0x %0x", ev.Pubkey, id,
|
||||
ev.Sig,
|
||||
)
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBTCECSignerSign(t *testing.T) {
|
||||
evs := make([]*event.E, 0, 10000)
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
buf := make([]byte, 1_000_000)
|
||||
scanner.Buffer(buf, len(buf))
|
||||
var err error
|
||||
signer := &btcec.Signer{}
|
||||
var skb []byte
|
||||
if err = signer.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
skb = signer.Sec()
|
||||
if err = signer.InitSec(skb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
verifier := &btcec.Signer{}
|
||||
pkb := signer.Pub()
|
||||
if err = verifier.InitPub(pkb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for scanner.Scan() {
|
||||
b := scanner.Bytes()
|
||||
ev := event.New()
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Errorf("failed to marshal\n%s", b)
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
var valid bool
|
||||
sig := make([]byte, schnorr.SignatureSize)
|
||||
for _, ev := range evs {
|
||||
ev.Pubkey = pkb
|
||||
id := ev.GetIDBytes()
|
||||
if sig, err = signer.Sign(id); chk.E(err) {
|
||||
t.Errorf("failed to sign: %s\n%0x", err, id)
|
||||
}
|
||||
if valid, err = verifier.Verify(id, sig); chk.E(err) {
|
||||
t.Errorf("failed to verify: %s\n%0x", err, id)
|
||||
}
|
||||
if !valid {
|
||||
t.Errorf("invalid signature")
|
||||
}
|
||||
}
|
||||
signer.Zero()
|
||||
}
|
||||
|
||||
func TestBTCECECDH(t *testing.T) {
|
||||
n := time.Now()
|
||||
var err error
|
||||
var counter int
|
||||
const total = 100
|
||||
for _ = range total {
|
||||
s1 := new(btcec.Signer)
|
||||
if err = s1.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s2 := new(btcec.Signer)
|
||||
if err = s2.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _ = range total {
|
||||
var secret1, secret2 []byte
|
||||
if secret1, err = s1.ECDH(s2.Pub()); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if secret2, err = s2.ECDH(s1.Pub()); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(secret1, secret2) {
|
||||
counter++
|
||||
t.Errorf(
|
||||
"ECDH generation failed to work in both directions, %x %x",
|
||||
secret1,
|
||||
secret2,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
a := time.Now()
|
||||
duration := a.Sub(n)
|
||||
log.I.Ln(
|
||||
"errors", counter, "total", total, "time", duration, "time/op",
|
||||
int(duration/total),
|
||||
"ops/sec", int(time.Second)/int(duration/total),
|
||||
)
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package p256k_test
|
||||
|
||||
import (
|
||||
"orly.dev/utils/lol"
|
||||
)
|
||||
|
||||
var (
|
||||
log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
|
||||
)
|
||||
59
docs/doc-comments-prompt.txt
Normal file
59
docs/doc-comments-prompt.txt
Normal file
@@ -0,0 +1,59 @@
|
||||
Always start documentation comments with the symbol name verbatim, and then use this to start a sentence summarizing the symbol's function
|
||||
|
||||
For documentation comments on functions and methods:
|
||||
|
||||
- Write a general description in one or two sentences at the top
|
||||
|
||||
- use the format `# Header` for headings of sections.
|
||||
|
||||
- Follow by a description of the parameters and then return values, with a series of bullet points describing each item, each with an empty line in between.
|
||||
|
||||
- Last, describe the expected behaviour of the function or method, keep this with one space apart from the comment start token
|
||||
|
||||
For documentation on types, variables and comments, write 1-2 sentences describing how the item is used.
|
||||
|
||||
For documentation on package, summarise in up to 3 sentences the functions and purpose of the package
|
||||
|
||||
Do not use markdown ** or __ or any similar things in initial words of a bullet point, instead use standard godoc style # prefix for header sections
|
||||
|
||||
ALWAYS separate each bullet point with an empty line, and ALWAYS indent them three spaces after the //
|
||||
|
||||
NEVER put a colon after the first word of the first line of a document comment
|
||||
|
||||
Use British English spelling and Oxford commas
|
||||
|
||||
Always break lines before 80 columns, and flow under bullet points two columns right of the bullet point hyphen.
|
||||
|
||||
Do not write a section for parameters or return values when there is none
|
||||
|
||||
In the `# Expected behavior` section always add an empty line after this title before the description, and don't indent this section as this makes it appear as preformatted monospace.
|
||||
|
||||
A good typical example:
|
||||
|
||||
// NewServer initializes and returns a new Server instance based on the provided
|
||||
// ServerParams and optional settings. It sets up storage, initializes the
|
||||
// relay, and configures necessary components for server operation.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - sp (*ServerParams): The configuration parameters for initializing the
|
||||
// server.
|
||||
//
|
||||
// - opts (...options.O): Optional settings that modify the server's behavior.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - s (*Server): The newly created Server instance.
|
||||
//
|
||||
// - err (error): An error if any step fails during initialization.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// - Initializes storage with the provided database path.
|
||||
//
|
||||
// - Configures the server's options using the default settings and applies any
|
||||
// optional settings provided.
|
||||
//
|
||||
// - Sets up a ServeMux for handling HTTP requests.
|
||||
//
|
||||
// - Initializes the relay, starting its operation in a separate goroutine.
|
||||
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 70 KiB |
@@ -1,59 +0,0 @@
|
||||
# Codecbuf - Concurrent-Safe Bytes Buffer Pool
|
||||
|
||||
This package provides a concurrent-safe pool of `bytes.Buffer` objects for encoding data. It helps reduce memory allocations and improve performance by reusing buffers instead of creating new ones for each operation.
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```go
|
||||
// Get a buffer from the default pool
|
||||
buf := codecbuf.Get()
|
||||
|
||||
// Use the buffer
|
||||
buf.WriteString("Hello, World!")
|
||||
// ... do more operations with the buffer ...
|
||||
|
||||
// Return the buffer to the pool when done
|
||||
codecbuf.Put(buf)
|
||||
```
|
||||
|
||||
### Using with defer
|
||||
|
||||
```go
|
||||
func ProcessData() {
|
||||
// Get a buffer from the default pool
|
||||
buf := codecbuf.Get()
|
||||
|
||||
// Return the buffer to the pool when the function exits
|
||||
defer codecbuf.Put(buf)
|
||||
|
||||
// Use the buffer
|
||||
buf.WriteString("Hello, World!")
|
||||
// ... do more operations with the buffer ...
|
||||
}
|
||||
```
|
||||
|
||||
### Creating a Custom Pool
|
||||
|
||||
```go
|
||||
// Create a new buffer pool
|
||||
pool := codecbuf.NewPool()
|
||||
|
||||
// Get a buffer from the custom pool
|
||||
buf := pool.Get()
|
||||
|
||||
// Use the buffer
|
||||
buf.WriteString("Hello, World!")
|
||||
|
||||
// Return the buffer to the custom pool
|
||||
pool.Put(buf)
|
||||
```
|
||||
|
||||
## Performance
|
||||
|
||||
Using a buffer pool can significantly improve performance in applications that frequently create and use byte buffers, especially in high-throughput scenarios. The pool reduces garbage collection pressure by reusing buffers instead of allocating new ones.
|
||||
|
||||
## Thread Safety
|
||||
|
||||
The buffer pool is safe for concurrent use by multiple goroutines. However, individual buffers obtained from the pool should not be used concurrently by multiple goroutines without additional synchronization.
|
||||
@@ -1,53 +0,0 @@
|
||||
// Package codecbuf provides a concurrent-safe bytes buffer pool for encoding
|
||||
// data.
|
||||
package codecbuf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Pool is a concurrent-safe pool of bytes.Buffer objects.
|
||||
type Pool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// NewPool creates a new buffer pool.
|
||||
func NewPool() *Pool {
|
||||
return &Pool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a buffer from the pool or creates a new one if the pool is empty.
|
||||
func (p *Pool) Get() *bytes.Buffer {
|
||||
return p.pool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// Put returns a buffer to the pool after zeroing its bytes for security and resetting it.
|
||||
func (p *Pool) Put(buf *bytes.Buffer) {
|
||||
// Zero out the bytes for security
|
||||
data := buf.Bytes()
|
||||
for i := range data {
|
||||
data[i] = 0
|
||||
}
|
||||
buf.Reset()
|
||||
p.pool.Put(buf)
|
||||
}
|
||||
|
||||
// DefaultPool is the default buffer pool for the application.
|
||||
var DefaultPool = NewPool()
|
||||
|
||||
// Get returns a buffer from the default pool.
|
||||
func Get() *bytes.Buffer {
|
||||
return DefaultPool.Get()
|
||||
}
|
||||
|
||||
// Put returns a buffer to the default pool after zeroing its bytes for security.
|
||||
func Put(buf *bytes.Buffer) {
|
||||
DefaultPool.Put(buf)
|
||||
}
|
||||
@@ -1,234 +0,0 @@
|
||||
package codecbuf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPool(t *testing.T) {
|
||||
// Create a new pool
|
||||
pool := NewPool()
|
||||
|
||||
// Get a buffer from the pool
|
||||
buf := pool.Get()
|
||||
if buf == nil {
|
||||
t.Fatal("Expected non-nil buffer from pool")
|
||||
}
|
||||
|
||||
// Write some data to the buffer
|
||||
testData := "test data"
|
||||
_, err := buf.WriteString(testData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write to buffer: %v", err)
|
||||
}
|
||||
|
||||
// Verify the buffer contains the expected data
|
||||
if buf.String() != testData {
|
||||
t.Fatalf(
|
||||
"Expected buffer to contain %q, got %q", testData, buf.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// Put the buffer back in the pool
|
||||
pool.Put(buf)
|
||||
|
||||
// Get another buffer from the pool (should be the same one, reset)
|
||||
buf2 := pool.Get()
|
||||
if buf2 == nil {
|
||||
t.Fatal("Expected non-nil buffer from pool")
|
||||
}
|
||||
|
||||
// Verify the buffer is empty (was reset)
|
||||
if buf2.Len() != 0 {
|
||||
t.Fatalf("Expected empty buffer, got buffer with length %d", buf2.Len())
|
||||
}
|
||||
|
||||
// Write different data to the buffer
|
||||
testData2 := "different data"
|
||||
_, err = buf2.WriteString(testData2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write to buffer: %v", err)
|
||||
}
|
||||
|
||||
// Verify the buffer contains the new data
|
||||
if buf2.String() != testData2 {
|
||||
t.Fatalf(
|
||||
"Expected buffer to contain %q, got %q", testData2, buf2.String(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultPool(t *testing.T) {
|
||||
// Get a buffer from the default pool
|
||||
buf := Get()
|
||||
if buf == nil {
|
||||
t.Fatal("Expected non-nil buffer from default pool")
|
||||
}
|
||||
|
||||
// Write some data to the buffer
|
||||
testData := "test data for default pool"
|
||||
_, err := buf.WriteString(testData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write to buffer: %v", err)
|
||||
}
|
||||
|
||||
// Verify the buffer contains the expected data
|
||||
if buf.String() != testData {
|
||||
t.Fatalf(
|
||||
"Expected buffer to contain %q, got %q", testData, buf.String(),
|
||||
)
|
||||
}
|
||||
|
||||
// Put the buffer back in the pool
|
||||
Put(buf)
|
||||
|
||||
// Get another buffer from the pool (should be reset)
|
||||
buf2 := Get()
|
||||
if buf2 == nil {
|
||||
t.Fatal("Expected non-nil buffer from default pool")
|
||||
}
|
||||
|
||||
// Verify the buffer is empty (was reset)
|
||||
if buf2.Len() != 0 {
|
||||
t.Fatalf("Expected empty buffer, got buffer with length %d", buf2.Len())
|
||||
}
|
||||
}
|
||||
|
||||
func TestZeroBytes(t *testing.T) {
|
||||
// Create a new pool
|
||||
pool := NewPool()
|
||||
|
||||
// Get a buffer from the pool
|
||||
buf := pool.Get()
|
||||
if buf == nil {
|
||||
t.Fatal("Expected non-nil buffer from pool")
|
||||
}
|
||||
|
||||
// Write some sensitive data to the buffer
|
||||
sensitiveData := []byte{0x01, 0x02, 0x03, 0x04, 0x05}
|
||||
_, err := buf.Write(sensitiveData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write to buffer: %v", err)
|
||||
}
|
||||
|
||||
// Get the capacity before putting it back
|
||||
capacity := buf.Cap()
|
||||
|
||||
// Put the buffer back in the pool
|
||||
pool.Put(buf)
|
||||
|
||||
// Get another buffer from the pool (should be the same one, reset)
|
||||
buf2 := pool.Get()
|
||||
if buf2 == nil {
|
||||
t.Fatal("Expected non-nil buffer from pool")
|
||||
}
|
||||
|
||||
// Verify the buffer is empty (was reset)
|
||||
if buf2.Len() != 0 {
|
||||
t.Fatalf("Expected empty buffer, got buffer with length %d", buf2.Len())
|
||||
}
|
||||
|
||||
// Verify the capacity is the same (should be the same buffer)
|
||||
if buf2.Cap() != capacity {
|
||||
t.Fatalf("Expected capacity %d, got %d", capacity, buf2.Cap())
|
||||
}
|
||||
|
||||
// Get the underlying bytes directly
|
||||
// We need to grow the buffer to the same size as before to access the same memory
|
||||
buf2.Grow(len(sensitiveData))
|
||||
|
||||
// Write some new data to the buffer to expose the underlying memory
|
||||
newData := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
|
||||
_, err = buf2.Write(newData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write to buffer: %v", err)
|
||||
}
|
||||
|
||||
// Read the buffer bytes
|
||||
bufBytes := buf2.Bytes()
|
||||
|
||||
// Verify that the sensitive data was zeroed out
|
||||
// The new data should be there, but no trace of the old data
|
||||
for i, b := range bufBytes[:len(newData)] {
|
||||
if b != newData[i] {
|
||||
t.Fatalf("Expected byte %d to be %d, got %d", i, newData[i], b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultPoolZeroBytes(t *testing.T) {
|
||||
// Get a buffer from the default pool
|
||||
buf := Get()
|
||||
if buf == nil {
|
||||
t.Fatal("Expected non-nil buffer from default pool")
|
||||
}
|
||||
|
||||
// Write some sensitive data to the buffer
|
||||
sensitiveData := []byte{0x01, 0x02, 0x03, 0x04, 0x05}
|
||||
_, err := buf.Write(sensitiveData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write to buffer: %v", err)
|
||||
}
|
||||
|
||||
// Get the capacity before putting it back
|
||||
capacity := buf.Cap()
|
||||
|
||||
// Put the buffer back in the pool
|
||||
Put(buf)
|
||||
|
||||
// Get another buffer from the pool (should be the same one, reset)
|
||||
buf2 := Get()
|
||||
if buf2 == nil {
|
||||
t.Fatal("Expected non-nil buffer from default pool")
|
||||
}
|
||||
|
||||
// Verify the buffer is empty (was reset)
|
||||
if buf2.Len() != 0 {
|
||||
t.Fatalf("Expected empty buffer, got buffer with length %d", buf2.Len())
|
||||
}
|
||||
|
||||
// Verify the capacity is the same (should be the same buffer)
|
||||
if buf2.Cap() != capacity {
|
||||
t.Fatalf("Expected capacity %d, got %d", capacity, buf2.Cap())
|
||||
}
|
||||
|
||||
// Get the underlying bytes directly
|
||||
// We need to grow the buffer to the same size as before to access the same memory
|
||||
buf2.Grow(len(sensitiveData))
|
||||
|
||||
// Write some new data to the buffer to expose the underlying memory
|
||||
newData := []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
|
||||
_, err = buf2.Write(newData)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write to buffer: %v", err)
|
||||
}
|
||||
|
||||
// Read the buffer bytes
|
||||
bufBytes := buf2.Bytes()
|
||||
|
||||
// Verify that the sensitive data was zeroed out
|
||||
// The new data should be there, but no trace of the old data
|
||||
for i, b := range bufBytes[:len(newData)] {
|
||||
if b != newData[i] {
|
||||
t.Fatalf("Expected byte %d to be %d, got %d", i, newData[i], b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWithPool(b *testing.B) {
|
||||
pool := NewPool()
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf := pool.Get()
|
||||
buf.WriteString("benchmark test data")
|
||||
pool.Put(buf)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkWithoutPool(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.WriteString("benchmark test data")
|
||||
}
|
||||
}
|
||||
4
go.mod
4
go.mod
@@ -5,13 +5,12 @@ go 1.24.2
|
||||
require (
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/alexflint/go-arg v1.6.0
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/danielgtaylor/huma/v2 v2.34.1
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/dgraph-io/badger/v4 v4.7.0
|
||||
github.com/fasthttp/websocket v1.5.12
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/gobwas/httphead v0.1.0
|
||||
github.com/gobwas/ws v1.4.0
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/klauspost/cpuid/v2 v2.2.11
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
@@ -41,7 +40,6 @@ require (
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
|
||||
6
go.sum
6
go.sum
@@ -19,6 +19,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/danielgtaylor/huma/v2 v2.34.1 h1:EmOJAbzEGfy0wAq/QMQ1YKfEMBEfE94xdBRLPBP0gwQ=
|
||||
github.com/danielgtaylor/huma/v2 v2.34.1/go.mod h1:ynwJgLk8iGVgoaipi5tgwIQ5yoFNmiu+QdhU7CEEmhk=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -44,13 +46,9 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=
|
||||
github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package listener
|
||||
|
||||
type I interface {
|
||||
Write(p []byte) (n int, err error)
|
||||
Close() error
|
||||
Remote() string
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/interfaces/store"
|
||||
"orly.dev/utils/context"
|
||||
)
|
||||
|
||||
type I interface {
|
||||
Context() context.T
|
||||
HandleRelayInfo(
|
||||
w http.ResponseWriter, r *http.Request,
|
||||
)
|
||||
Storage() store.I
|
||||
AddEvent(
|
||||
c context.T, ev *event.E, hr *http.Request, remote string,
|
||||
) (accepted bool, message []byte)
|
||||
}
|
||||
76
main.go
76
main.go
@@ -1,30 +1,29 @@
|
||||
// Package main is a nostr relay with a simple follow/mute list authentication
|
||||
// scheme and the new HTTP REST based protocol. Configuration is via environment
|
||||
// scheme and the new HTTP REST-based protocol. Configuration is via environment
|
||||
// variables or an optional .env file.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/pkg/profile"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"orly.dev/app/realy"
|
||||
"orly.dev/app/realy/options"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/utils/interrupt"
|
||||
"orly.dev/utils/log"
|
||||
realy_lol "orly.dev/version"
|
||||
app2 "orly.dev/pkg/app"
|
||||
"orly.dev/pkg/app/config"
|
||||
"orly.dev/pkg/app/relay"
|
||||
"orly.dev/pkg/app/relay/options"
|
||||
"orly.dev/pkg/database"
|
||||
"orly.dev/pkg/protocol/openapi"
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/interrupt"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"orly.dev/pkg/version"
|
||||
"os"
|
||||
|
||||
"orly.dev/app"
|
||||
"orly.dev/app/config"
|
||||
"orly.dev/database"
|
||||
"orly.dev/utils/context"
|
||||
"orly.dev/utils/lol"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.I.F("starting realy %s", realy_lol.V)
|
||||
var err error
|
||||
var cfg *config.C
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
@@ -34,6 +33,7 @@ func main() {
|
||||
config.PrintHelp(cfg, os.Stderr)
|
||||
os.Exit(0)
|
||||
}
|
||||
log.I.F("starting %s %s", cfg.AppName, version.V)
|
||||
if config.GetEnv() {
|
||||
config.PrintEnv(cfg, os.Stdout)
|
||||
os.Exit(0)
|
||||
@@ -42,33 +42,53 @@ func main() {
|
||||
config.PrintHelp(cfg, os.Stderr)
|
||||
os.Exit(0)
|
||||
}
|
||||
log.I.Ln("log level", cfg.LogLevel)
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
if cfg.Pprof {
|
||||
defer profile.Start(profile.MemProfile).Stop()
|
||||
go func() {
|
||||
chk.E(http.ListenAndServe("127.0.0.1:6060", nil))
|
||||
}()
|
||||
if cfg.Pprof != "" {
|
||||
switch cfg.Pprof {
|
||||
case "cpu":
|
||||
prof := profile.Start(profile.CPUProfile)
|
||||
defer prof.Stop()
|
||||
case "memory":
|
||||
prof := profile.Start(profile.MemProfile)
|
||||
defer prof.Stop()
|
||||
case "allocation":
|
||||
prof := profile.Start(profile.MemProfileAllocs)
|
||||
defer prof.Stop()
|
||||
}
|
||||
}
|
||||
c, cancel := context.Cancel(context.Bg())
|
||||
storage, err := database.New(c, cancel, cfg.DataDir, cfg.DbLogLevel)
|
||||
if chk.E(err) {
|
||||
var storage *database.D
|
||||
if storage, err = database.New(
|
||||
c, cancel, cfg.DataDir, cfg.DbLogLevel,
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
r := &app.Relay{C: cfg, Store: storage}
|
||||
go app.MonitorResources(c)
|
||||
var server *realy.Server
|
||||
serverParams := &realy.ServerParams{
|
||||
r := &app2.Relay{C: cfg, Store: storage}
|
||||
go app2.MonitorResources(c)
|
||||
var server *relay.Server
|
||||
serverParams := &relay.ServerParams{
|
||||
Ctx: c,
|
||||
Cancel: cancel,
|
||||
Rl: r,
|
||||
DbPath: cfg.DataDir,
|
||||
MaxLimit: 512, // Default max limit for events
|
||||
C: cfg,
|
||||
}
|
||||
var opts []options.O
|
||||
if server, err = realy.NewServer(serverParams, opts...); chk.E(err) {
|
||||
serveMux := servemux.NewServeMux()
|
||||
if server, err = relay.NewServer(
|
||||
serverParams, serveMux, opts...,
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
openapi.New(
|
||||
server,
|
||||
cfg.AppName,
|
||||
version.V,
|
||||
version.Description,
|
||||
"/api",
|
||||
serveMux,
|
||||
)
|
||||
if err != nil {
|
||||
log.F.F("failed to create server: %v", err)
|
||||
}
|
||||
|
||||
313
pkg/app/config/config.go
Normal file
313
pkg/app/config/config.go
Normal file
@@ -0,0 +1,313 @@
|
||||
// Package config provides a go-simpler.org/env configuration table and helpers
|
||||
// for working with the list of key/value lists stored in .env files.
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/utils/apputil"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
env2 "orly.dev/pkg/utils/env"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"orly.dev/pkg/version"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"go-simpler.org/env"
|
||||
)
|
||||
|
||||
// C holds application configuration settings loaded from environment variables
|
||||
// and default values. It defines parameters for app behaviour, storage
|
||||
// locations, logging, and network settings used across the relay service.
|
||||
type C struct {
|
||||
AppName string `env:"ORLY_APP_NAME" default:"ORLY"`
|
||||
Config string `env:"ORLY_CONFIG_DIR" usage:"location for configuration file, which has the name '.env' to make it harder to delete, and is a standard environment KEY=value<newline>... style" default:"~/.config/orly"`
|
||||
State string `env:"ORLY_STATE_DATA_DIR" usage:"storage location for state data affected by dynamic interactive interfaces" default:"~/.local/state/orly"`
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/cache/orly"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof on 127.0.0.1:6060" enum:"cpu,memory,allocation"`
|
||||
AuthRequired bool `env:"ORLY_AUTH_REQUIRED" default:"false" usage:"require authentication for all requests"`
|
||||
PublicReadable bool `env:"ORLY_PUBLIC_READABLE" default:"true" usage:"allow public read access to regardless of whether the client is authed"`
|
||||
SpiderSeeds []string `env:"ORLY_SPIDER_SEEDS" usage:"seeds to use for the spider (relays that are looked up initially to find owner relay lists) (comma separated)" default:"wss://profiles.nostr1.com/,wss://relay.nostr.band/,wss://relay.damus.io/,wss://nostr.wine/,wss://nostr.land/,wss://theforest.nostr1.com/,wss://profiles.nostr1.com/"`
|
||||
SpiderType string `env:"ORLY_SPIDER_TYPE" usage:"whether to spider, and what degree of spidering: none, directory, follows (follows means to the second degree of the follow graph)" default:"directory"`
|
||||
SpiderTime time.Duration `env:"ORLY_SPIDER_FREQUENCY" usage:"how often to run the spider, uses notation 0h0m0s" default:"1h"`
|
||||
SpiderSecondDegree bool `env:"ORLY_SPIDER_SECOND_DEGREE" default:"true" usage:"whether to enable spidering the second degree of follows for non-directory events if ORLY_SPIDER_TYPE is set to 'follows'"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"list of users whose follow lists designate whitelisted users who can publish events, and who can read if public readable is false (comma separated)"`
|
||||
Private bool `env:"ORLY_PRIVATE" usage:"do not spider for user metadata because the relay is private and this would leak relay memberships" default:"false"`
|
||||
Whitelist []string `env:"ORLY_WHITELIST" usage:"only allow connections from this list of IP addresses"`
|
||||
Blacklist []string `env:"ORLY_BLACKLIST" usage:"list of pubkeys to block when auth is not required (comma separated)"`
|
||||
RelaySecret string `env:"ORLY_SECRET_KEY" usage:"secret key for relay cluster replication authentication"`
|
||||
PeerRelays []string `env:"ORLY_PEER_RELAYS" usage:"list of peer relays URLs that new events are pushed to in format <pubkey>|<url>"`
|
||||
}
|
||||
|
||||
// New creates and initializes a new configuration object for the relay
|
||||
// application
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - cfg: A pointer to the initialized configuration struct containing default
|
||||
// or environment-provided values
|
||||
//
|
||||
// - err: An error object that is non-nil if any operation during
|
||||
// initialization fails
|
||||
//
|
||||
// # Expected Behaviour:
|
||||
//
|
||||
// Initializes a new configuration instance by loading environment variables and
|
||||
// checking for a .env file in the default configuration directory. Sets logging
|
||||
// levels based on configuration values and returns the populated configuration
|
||||
// or an error if any step fails
|
||||
func New() (cfg *C, err error) {
|
||||
cfg = &C{}
|
||||
if err = env.Load(cfg, &env.Options{SliceSep: ","}); chk.T(err) {
|
||||
return
|
||||
}
|
||||
if cfg.Config == "" || strings.Contains(cfg.State, "~") {
|
||||
cfg.Config = filepath.Join(xdg.ConfigHome, cfg.AppName)
|
||||
}
|
||||
if cfg.DataDir == "" || strings.Contains(cfg.State, "~") {
|
||||
cfg.DataDir = filepath.Join(xdg.DataHome, cfg.AppName)
|
||||
}
|
||||
if cfg.State == "" || strings.Contains(cfg.State, "~") {
|
||||
cfg.State = filepath.Join(xdg.StateHome, cfg.AppName)
|
||||
}
|
||||
if len(cfg.Owners) > 0 {
|
||||
cfg.AuthRequired = true
|
||||
}
|
||||
envPath := filepath.Join(cfg.Config, ".env")
|
||||
if apputil.FileExists(envPath) {
|
||||
var e env2.Env
|
||||
if e, err = env2.GetEnv(envPath); chk.T(err) {
|
||||
return
|
||||
}
|
||||
if err = env.Load(
|
||||
cfg, &env.Options{SliceSep: ",", Source: e},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
log.I.F("loaded configuration from %s", envPath)
|
||||
}
|
||||
// if spider seeds has no elements, there still is a single entry with an
|
||||
// empty string; and also if any of the fields are empty strings, they need
|
||||
// to be removed.
|
||||
var seeds []string
|
||||
for _, u := range cfg.SpiderSeeds {
|
||||
if u == "" {
|
||||
continue
|
||||
}
|
||||
seeds = append(seeds, u)
|
||||
}
|
||||
cfg.SpiderSeeds = seeds
|
||||
return
|
||||
}
|
||||
|
||||
// HelpRequested determines if the command line arguments indicate a request for help
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - help: A boolean value indicating true if a help flag was detected in the
|
||||
// command line arguments, false otherwise
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// The function checks the first command line argument for common help flags and
|
||||
// returns true if any of them are present. Returns false if no help flag is found
|
||||
func HelpRequested() (help bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "help", "-h", "--h", "-help", "--help", "?":
|
||||
help = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetEnv checks if the first command line argument is "env" and returns
|
||||
// whether the environment configuration should be printed.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - requested: A boolean indicating true if the 'env' argument was
|
||||
// provided, false otherwise.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// The function returns true when the first command line argument is "env"
|
||||
// (case-insensitive), signalling that the environment configuration should be
|
||||
// printed. Otherwise, it returns false.
|
||||
func GetEnv() (requested bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "env":
|
||||
requested = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KV is a key/value pair.
|
||||
type KV struct{ Key, Value string }
|
||||
|
||||
// KVSlice is a sortable slice of key/value pairs, designed for managing
|
||||
// configuration data and enabling operations like merging and sorting based on
|
||||
// keys.
|
||||
type KVSlice []KV
|
||||
|
||||
func (kv KVSlice) Len() int { return len(kv) }
|
||||
func (kv KVSlice) Less(i, j int) bool { return kv[i].Key < kv[j].Key }
|
||||
func (kv KVSlice) Swap(i, j int) { kv[i], kv[j] = kv[j], kv[i] }
|
||||
|
||||
// Compose merges two KVSlice instances into a new slice where key-value pairs
|
||||
// from the second slice override any duplicate keys from the first slice.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - kv2: The second KVSlice whose entries will be merged with the receiver.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - out: A new KVSlice containing all entries from both slices, with keys
|
||||
// from kv2 taking precedence over keys from the receiver.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// The method returns a new KVSlice that combines the contents of the receiver
|
||||
// and kv2. If any key exists in both slices, the value from kv2 is used. The
|
||||
// resulting slice remains sorted by keys as per the KVSlice implementation.
|
||||
func (kv KVSlice) Compose(kv2 KVSlice) (out KVSlice) {
|
||||
// duplicate the initial KVSlice
|
||||
for _, p := range kv {
|
||||
out = append(out, p)
|
||||
}
|
||||
out:
|
||||
for i, p := range kv2 {
|
||||
for j, q := range out {
|
||||
// if the key is repeated, replace the value
|
||||
if p.Key == q.Key {
|
||||
out[j].Value = kv2[i].Value
|
||||
continue out
|
||||
}
|
||||
}
|
||||
out = append(out, p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// EnvKV generates key/value pairs from a configuration object's struct tags
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - cfg: A configuration object whose struct fields are processed for env tags
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - m: A KVSlice containing key/value pairs derived from the config's env tags
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// Processes each field of the config object, extracting values tagged with
|
||||
// "env" and converting them to strings. Skips fields without an "env" tag.
|
||||
// Handles various value types including strings, integers, booleans, durations,
|
||||
// and string slices by joining elements with commas.
|
||||
func EnvKV(cfg any) (m KVSlice) {
|
||||
t := reflect.TypeOf(cfg)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
k := t.Field(i).Tag.Get("env")
|
||||
v := reflect.ValueOf(cfg).Field(i).Interface()
|
||||
var val string
|
||||
switch v.(type) {
|
||||
case string:
|
||||
val = v.(string)
|
||||
case int, bool, time.Duration:
|
||||
val = fmt.Sprint(v)
|
||||
case []string:
|
||||
arr := v.([]string)
|
||||
if len(arr) > 0 {
|
||||
val = strings.Join(arr, ",")
|
||||
}
|
||||
}
|
||||
// this can happen with embedded structs
|
||||
if k == "" {
|
||||
continue
|
||||
}
|
||||
m = append(m, KV{k, val})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PrintEnv outputs sorted environment key/value pairs from a configuration object
|
||||
// to the provided writer
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - cfg: Pointer to the configuration object containing env tags
|
||||
//
|
||||
// - printer: Destination for the output, typically an io.Writer implementation
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// Outputs each environment variable derived from the config's struct tags in
|
||||
// sorted order, formatted as "key=value\n" to the specified writer
|
||||
func PrintEnv(cfg *C, printer io.Writer) {
|
||||
kvs := EnvKV(*cfg)
|
||||
sort.Sort(kvs)
|
||||
for _, v := range kvs {
|
||||
_, _ = fmt.Fprintf(printer, "%s=%s\n", v.Key, v.Value)
|
||||
}
|
||||
}
|
||||
|
||||
// PrintHelp prints help information including application version, environment
|
||||
// variable configuration, and details about .env file handling to the provided
|
||||
// writer
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - cfg: Configuration object containing app name and config directory path
|
||||
//
|
||||
// - printer: Output destination for the help text
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// Prints application name and version followed by environment variable
|
||||
// configuration details, explains .env file behaviour including automatic
|
||||
// loading and custom path options, and displays current configuration values
|
||||
// using PrintEnv. Outputs all information to the specified writer
|
||||
func PrintHelp(cfg *C, printer io.Writer) {
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
"%s %s\n\n", cfg.AppName, version.V,
|
||||
)
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
"Environment variables that configure %s:\n\n", cfg.AppName,
|
||||
)
|
||||
env.Usage(cfg, printer, &env.Options{SliceSep: ","})
|
||||
_, _ = fmt.Fprintf(
|
||||
printer,
|
||||
"\nCLI parameter 'help' also prints this information\n"+
|
||||
"\n.env file found at the path %s will be automatically "+
|
||||
"loaded for configuration.\nset these two variables for a custom load path,"+
|
||||
" this file will be created on first startup.\nenvironment overrides it and "+
|
||||
"you can also edit the file to set configuration options\n\n"+
|
||||
"use the parameter 'env' to print out the current configuration to the terminal\n\n"+
|
||||
"set the environment using\n\n\t%s env > %s/.env\n",
|
||||
cfg.Config,
|
||||
os.Args[0],
|
||||
cfg.Config,
|
||||
)
|
||||
fmt.Fprintf(printer, "\ncurrent configuration:\n\n")
|
||||
PrintEnv(cfg, printer)
|
||||
fmt.Fprintln(printer)
|
||||
return
|
||||
}
|
||||
170
pkg/app/main.go
Normal file
170
pkg/app/main.go
Normal file
@@ -0,0 +1,170 @@
|
||||
// Package app implements the orly nostr relay.
|
||||
package app
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"orly.dev/pkg/app/config"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/filters"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/utils/context"
|
||||
)
|
||||
|
||||
// List represents a set-like structure using a map with empty struct values.
|
||||
type List map[string]struct{}
|
||||
|
||||
// Relay is a struct that represents a relay for Nostr events. It contains a
|
||||
// configuration and a persistence layer for storing the events. The Relay
|
||||
// type implements various methods to handle event acceptance, filtering,
|
||||
// and storage.
|
||||
type Relay struct {
|
||||
sync.Mutex
|
||||
*config.C
|
||||
Store store.I
|
||||
}
|
||||
|
||||
// Name returns the name of the application represented by this relay.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - string: the name of the application.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// This function simply returns the AppName field from the configuration.
|
||||
func (r *Relay) Name() string { return r.C.AppName }
|
||||
|
||||
// Storage represents a persistence layer for Nostr events handled by a relay.
|
||||
func (r *Relay) Storage() store.I { return r.Store }
|
||||
|
||||
// Init initializes and sets up the relay for Nostr events.
|
||||
//
|
||||
// #Return Values
|
||||
//
|
||||
// - err: an error if any issues occurred during initialization.
|
||||
//
|
||||
// #Expected behaviour
|
||||
//
|
||||
// This function is responsible for setting up the relay, configuring it,
|
||||
// and initializing the necessary components to handle Nostr events.
|
||||
func (r *Relay) Init() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcceptEvent checks an event and determines whether the event should be
|
||||
// accepted and if the client has the authority to submit it.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - c - a context.T for signalling if the task has been canceled.
|
||||
//
|
||||
// - evt - an *event.E that is being evaluated.
|
||||
//
|
||||
// - hr - an *http.Request containing the information about the current
|
||||
// connection.
|
||||
//
|
||||
// - origin - the address of the client.
|
||||
//
|
||||
// - authedPubkey - the public key, if authed, of the client for this
|
||||
// connection.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - accept - returns true if the event is accepted.
|
||||
//
|
||||
// - notice - if it is not accepted, a message in the form of
|
||||
// `machine-readable-prefix: reason for error/blocked/rate-limited/etc`
|
||||
//
|
||||
// - afterSave - a closure to run after the event has been stored.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// This function checks whether the client has permission to store the event,
|
||||
// and if they don't, returns false and some kind of error message. If they do,
|
||||
// the event is forwarded to the database to be stored and indexed.
|
||||
func (r *Relay) AcceptEvent(
|
||||
c context.T, evt *event.E, hr *http.Request,
|
||||
origin string, authedPubkey []byte,
|
||||
) (accept bool, notice string, afterSave func()) {
|
||||
accept = true
|
||||
return
|
||||
}
|
||||
|
||||
// AcceptFilter checks if a filter is allowed based on authentication status and
|
||||
// relay policies
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - c: Context for task cancellation.
|
||||
//
|
||||
// - hr: HTTP request containing connection information.
|
||||
//
|
||||
// - f: Filter to evaluate for acceptance.
|
||||
//
|
||||
// - authedPubkey: Public key of authenticated client, if applicable.
|
||||
//
|
||||
// # Return values
|
||||
//
|
||||
// - allowed: The filter if permitted; may be modified during processing.
|
||||
//
|
||||
// - ok: Boolean indicating whether the filter is accepted.
|
||||
//
|
||||
// - modified: Boolean indicating whether the filter was altered during
|
||||
// evaluation.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// The method evaluates whether the provided filter should be allowed based on
|
||||
// authentication status and relay-specific rules. If permitted, returns the
|
||||
// filter (possibly modified) and true for ok; otherwise returns nil or false
|
||||
// for ok accordingly.
|
||||
func (r *Relay) AcceptFilter(
|
||||
c context.T, hr *http.Request, f *filter.S,
|
||||
authedPubkey []byte,
|
||||
) (allowed *filter.S, ok bool, modified bool) {
|
||||
allowed = f
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
// AcceptReq evaluates whether the provided filters are allowed based on
|
||||
// authentication status and relay policies for an incoming HTTP request.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - c: Context for task cancellation.
|
||||
//
|
||||
// - hr: HTTP request containing connection information.
|
||||
//
|
||||
// - id: Identifier associated with the request.
|
||||
//
|
||||
// - ff: Filters to evaluate for acceptance.
|
||||
//
|
||||
// - authedPubkey: Public key of authenticated client, if applicable.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - allowed: The filters if permitted; may be modified during processing.
|
||||
//
|
||||
// - ok: Boolean indicating whether the filters are accepted.
|
||||
//
|
||||
// - modified: Boolean indicating whether the filters were altered during
|
||||
// evaluation.
|
||||
//
|
||||
// # Expected Behaviour:
|
||||
//
|
||||
// The method evaluates whether the provided filters should be allowed based on
|
||||
// authentication status and relay-specific rules. If permitted, returns the
|
||||
// filters (possibly modified) and true for ok; otherwise returns nil or false
|
||||
// for ok accordingly.
|
||||
func (r *Relay) AcceptReq(
|
||||
c context.T, hr *http.Request, id []byte,
|
||||
ff *filters.T, authedPubkey []byte,
|
||||
) (allowed *filters.T, ok bool, modified bool) {
|
||||
allowed = ff
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
77
pkg/app/relay/accept-event.go
Normal file
77
pkg/app/relay/accept-event.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/utils/context"
|
||||
)
|
||||
|
||||
// AcceptEvent determines whether an incoming event should be accepted for
|
||||
// processing based on authentication requirements.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - c: the context of the request
|
||||
//
|
||||
// - ev: pointer to the event structure
|
||||
//
|
||||
// - hr: HTTP request related to the event (if any)
|
||||
//
|
||||
// - authedPubkey: public key of the authenticated user (if any)
|
||||
//
|
||||
// - remote: remote address from where the event was received
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - accept: boolean indicating whether the event should be accepted
|
||||
//
|
||||
// - notice: string providing a message or error notice
|
||||
//
|
||||
// - afterSave: function to execute after saving the event (if applicable)
|
||||
//
|
||||
// # Expected Behaviour:
|
||||
//
|
||||
// - If authentication is required and no public key is provided, reject the
|
||||
// event.
|
||||
//
|
||||
// - Otherwise, accept the event for processing.
|
||||
func (s *Server) AcceptEvent(
|
||||
c context.T, ev *event.E, hr *http.Request, authedPubkey []byte,
|
||||
remote string,
|
||||
) (accept bool, notice string, afterSave func()) {
|
||||
if !s.AuthRequired() {
|
||||
// Check blacklist for public relay mode
|
||||
if len(s.blacklistPubkeys) > 0 {
|
||||
for _, blockedPubkey := range s.blacklistPubkeys {
|
||||
if bytes.Equal(blockedPubkey, ev.Pubkey) {
|
||||
notice = "event author is blacklisted"
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
accept = true
|
||||
return
|
||||
}
|
||||
// if auth is required and the user is not authed, reject
|
||||
if len(authedPubkey) == 0 {
|
||||
notice = "client isn't authed"
|
||||
return
|
||||
}
|
||||
for _, u := range s.OwnersMuted() {
|
||||
if bytes.Equal(u, authedPubkey) {
|
||||
notice = "event author is banned from this relay"
|
||||
return
|
||||
}
|
||||
}
|
||||
// check if the authed user is on the lists
|
||||
list := append(s.OwnersFollowed(), s.FollowedFollows()...)
|
||||
for _, u := range list {
|
||||
if bytes.Equal(u, authedPubkey) {
|
||||
accept = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
314
pkg/app/relay/accept-event_test.go
Normal file
314
pkg/app/relay/accept-event_test.go
Normal file
@@ -0,0 +1,314 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/app/config"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/utils/context"
|
||||
)
|
||||
|
||||
// mockServerForEvent is a simple mock implementation of the Server struct for testing AcceptEvent
|
||||
type mockServerForEvent struct {
|
||||
authRequired bool
|
||||
ownersFollowed [][]byte
|
||||
followedFollows [][]byte
|
||||
}
|
||||
|
||||
func (m *mockServerForEvent) AuthRequired() bool {
|
||||
return m.authRequired
|
||||
}
|
||||
|
||||
func (m *mockServerForEvent) OwnersFollowed() [][]byte {
|
||||
return m.ownersFollowed
|
||||
}
|
||||
|
||||
func (m *mockServerForEvent) FollowedFollows() [][]byte {
|
||||
return m.followedFollows
|
||||
}
|
||||
|
||||
// AcceptEvent implements the Server.AcceptEvent method for testing
|
||||
func (m *mockServerForEvent) AcceptEvent(
|
||||
c context.T, ev *event.E, hr *http.Request, authedPubkey []byte,
|
||||
remote string,
|
||||
) (accept bool, notice string, afterSave func()) {
|
||||
// if auth is required and the user is not authed, reject
|
||||
if m.AuthRequired() && len(authedPubkey) == 0 {
|
||||
return
|
||||
}
|
||||
// check if the authed user is on the lists
|
||||
list := append(m.OwnersFollowed(), m.FollowedFollows()...)
|
||||
for _, u := range list {
|
||||
if bytes.Equal(u, authedPubkey) {
|
||||
accept = true
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestAcceptEvent(t *testing.T) {
|
||||
// Create a context and HTTP request for testing
|
||||
ctx := context.Bg()
|
||||
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||
|
||||
// Create a test event
|
||||
testEvent := &event.E{}
|
||||
|
||||
// Test cases
|
||||
tests := []struct {
|
||||
name string
|
||||
server *mockServerForEvent
|
||||
authedPubkey []byte
|
||||
expectedAccept bool
|
||||
}{
|
||||
{
|
||||
name: "Auth required, no pubkey",
|
||||
server: &mockServerForEvent{
|
||||
authRequired: true,
|
||||
},
|
||||
authedPubkey: nil,
|
||||
expectedAccept: false,
|
||||
},
|
||||
{
|
||||
name: "Auth required, with pubkey, not on lists",
|
||||
server: &mockServerForEvent{
|
||||
authRequired: true,
|
||||
ownersFollowed: [][]byte{
|
||||
[]byte("followed1"),
|
||||
[]byte("followed2"),
|
||||
},
|
||||
followedFollows: [][]byte{
|
||||
[]byte("follow1"),
|
||||
[]byte("follow2"),
|
||||
},
|
||||
},
|
||||
authedPubkey: []byte("test-pubkey"),
|
||||
expectedAccept: false,
|
||||
},
|
||||
{
|
||||
name: "Auth required, with pubkey, on owners followed list",
|
||||
server: &mockServerForEvent{
|
||||
authRequired: true,
|
||||
ownersFollowed: [][]byte{
|
||||
[]byte("followed1"),
|
||||
[]byte("test-pubkey"),
|
||||
[]byte("followed2"),
|
||||
},
|
||||
followedFollows: [][]byte{
|
||||
[]byte("follow1"),
|
||||
[]byte("follow2"),
|
||||
},
|
||||
},
|
||||
authedPubkey: []byte("test-pubkey"),
|
||||
expectedAccept: true,
|
||||
},
|
||||
{
|
||||
name: "Auth required, with pubkey, on followed follows list",
|
||||
server: &mockServerForEvent{
|
||||
authRequired: true,
|
||||
ownersFollowed: [][]byte{
|
||||
[]byte("followed1"),
|
||||
[]byte("followed2"),
|
||||
},
|
||||
followedFollows: [][]byte{
|
||||
[]byte("follow1"),
|
||||
[]byte("test-pubkey"),
|
||||
[]byte("follow2"),
|
||||
},
|
||||
},
|
||||
authedPubkey: []byte("test-pubkey"),
|
||||
expectedAccept: true,
|
||||
},
|
||||
{
|
||||
name: "Auth not required, no pubkey, not on lists",
|
||||
server: &mockServerForEvent{
|
||||
authRequired: false,
|
||||
ownersFollowed: [][]byte{
|
||||
[]byte("followed1"),
|
||||
[]byte("followed2"),
|
||||
},
|
||||
followedFollows: [][]byte{
|
||||
[]byte("follow1"),
|
||||
[]byte("follow2"),
|
||||
},
|
||||
},
|
||||
authedPubkey: nil,
|
||||
expectedAccept: false,
|
||||
},
|
||||
{
|
||||
name: "Auth not required, with pubkey, on lists",
|
||||
server: &mockServerForEvent{
|
||||
authRequired: false,
|
||||
ownersFollowed: [][]byte{
|
||||
[]byte("followed1"),
|
||||
[]byte("test-pubkey"),
|
||||
[]byte("followed2"),
|
||||
},
|
||||
followedFollows: [][]byte{
|
||||
[]byte("follow1"),
|
||||
[]byte("follow2"),
|
||||
},
|
||||
},
|
||||
authedPubkey: []byte("test-pubkey"),
|
||||
expectedAccept: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Run tests
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Use the mock server's AcceptEvent method
|
||||
accept, notice, afterSave := tt.server.AcceptEvent(ctx, testEvent, req, tt.authedPubkey, "127.0.0.1")
|
||||
|
||||
// Check if the acceptance status matches the expected value
|
||||
if accept != tt.expectedAccept {
|
||||
t.Errorf("AcceptEvent() accept = %v, want %v", accept, tt.expectedAccept)
|
||||
}
|
||||
|
||||
// Notice should be empty in the current implementation
|
||||
if notice != "" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
||||
}
|
||||
|
||||
// afterSave should be nil in the current implementation
|
||||
if afterSave != nil {
|
||||
t.Error("AcceptEvent() afterSave is not nil, but should be nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcceptEventWithRealServer tests the AcceptEvent function with a real Server instance
|
||||
func TestAcceptEventWithRealServer(t *testing.T) {
|
||||
// Create a context and HTTP request for testing
|
||||
ctx := context.Bg()
|
||||
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||
|
||||
// Create a test event
|
||||
testEvent := &event.E{}
|
||||
|
||||
// Create a Server instance with configuration
|
||||
s := &Server{
|
||||
C: &config.C{
|
||||
AuthRequired: true,
|
||||
},
|
||||
Lists: new(Lists),
|
||||
}
|
||||
|
||||
// Test with no authenticated pubkey
|
||||
accept, notice, afterSave := s.AcceptEvent(ctx, testEvent, req, nil, "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
if notice != "client isn't authed" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want 'client isn't authed'", notice)
|
||||
}
|
||||
if afterSave != nil {
|
||||
t.Error("AcceptEvent() afterSave is not nil, but should be nil")
|
||||
}
|
||||
|
||||
// Test with authenticated pubkey but not on any list
|
||||
accept, notice, afterSave = s.AcceptEvent(ctx, testEvent, req, []byte("test-pubkey"), "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
|
||||
// Add the pubkey to the owners followed list
|
||||
s.SetOwnersFollowed([][]byte{[]byte("test-pubkey")})
|
||||
|
||||
// Test with authenticated pubkey on the owners followed list
|
||||
accept, notice, afterSave = s.AcceptEvent(ctx, testEvent, req, []byte("test-pubkey"), "127.0.0.1")
|
||||
if !accept {
|
||||
t.Error("AcceptEvent() accept = false, want true")
|
||||
}
|
||||
|
||||
// Clear the owners followed list and add the pubkey to the followed follows list
|
||||
s.SetOwnersFollowed(nil)
|
||||
s.SetFollowedFollows([][]byte{[]byte("test-pubkey")})
|
||||
|
||||
// Test with authenticated pubkey on the followed follows list
|
||||
accept, notice, afterSave = s.AcceptEvent(ctx, testEvent, req, []byte("test-pubkey"), "127.0.0.1")
|
||||
if !accept {
|
||||
t.Error("AcceptEvent() accept = false, want true")
|
||||
}
|
||||
|
||||
// Test with muted user
|
||||
s.SetOwnersMuted([][]byte{[]byte("test-pubkey")})
|
||||
accept, notice, afterSave = s.AcceptEvent(ctx, testEvent, req, []byte("test-pubkey"), "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
if notice != "event author is banned from this relay" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want 'event author is banned from this relay'", notice)
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcceptEventWithBlacklist tests the blacklist functionality when auth is not required
|
||||
func TestAcceptEventWithBlacklist(t *testing.T) {
|
||||
// Create a context and HTTP request for testing
|
||||
ctx := context.Bg()
|
||||
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||
|
||||
// Test pubkey bytes
|
||||
testPubkey := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20}
|
||||
blockedPubkey := []byte{0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30}
|
||||
|
||||
// Test with public relay mode (auth not required) and no blacklist
|
||||
s := &Server{
|
||||
C: &config.C{
|
||||
AuthRequired: false,
|
||||
},
|
||||
Lists: new(Lists),
|
||||
}
|
||||
|
||||
// Create event with test pubkey
|
||||
testEvent := &event.E{}
|
||||
testEvent.Pubkey = testPubkey
|
||||
|
||||
// Should accept when no blacklist
|
||||
accept, notice, _ := s.AcceptEvent(ctx, testEvent, req, nil, "127.0.0.1")
|
||||
if !accept {
|
||||
t.Error("AcceptEvent() accept = false, want true")
|
||||
}
|
||||
if notice != "" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
||||
}
|
||||
|
||||
// Add blacklist with different pubkey
|
||||
s.blacklistPubkeys = [][]byte{blockedPubkey}
|
||||
|
||||
// Should still accept when author not in blacklist
|
||||
accept, notice, _ = s.AcceptEvent(ctx, testEvent, req, nil, "127.0.0.1")
|
||||
if !accept {
|
||||
t.Error("AcceptEvent() accept = false, want true")
|
||||
}
|
||||
if notice != "" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want empty string", notice)
|
||||
}
|
||||
|
||||
// Create event with blocked pubkey
|
||||
blockedEvent := &event.E{}
|
||||
blockedEvent.Pubkey = blockedPubkey
|
||||
|
||||
// Should reject when author is in blacklist
|
||||
accept, notice, _ = s.AcceptEvent(ctx, blockedEvent, req, nil, "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
if notice != "event author is blacklisted" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want 'event author is blacklisted'", notice)
|
||||
}
|
||||
|
||||
// Test with auth required - blacklist should not apply
|
||||
s.C.AuthRequired = true
|
||||
accept, notice, _ = s.AcceptEvent(ctx, blockedEvent, req, nil, "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptEvent() accept = true, want false")
|
||||
}
|
||||
if notice != "client isn't authed" {
|
||||
t.Errorf("AcceptEvent() notice = %v, want 'client isn't authed'", notice)
|
||||
}
|
||||
}
|
||||
50
pkg/app/relay/accept-req.go
Normal file
50
pkg/app/relay/accept-req.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"orly.dev/pkg/encoders/filters"
|
||||
"orly.dev/pkg/utils/context"
|
||||
)
|
||||
|
||||
// AcceptReq determines whether a request should be accepted based on
|
||||
// authentication and public readability settings.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - c: context for the request handling
|
||||
//
|
||||
// - hr: HTTP request received
|
||||
//
|
||||
// - f: filters to apply
|
||||
//
|
||||
// - authedPubkey: authenticated public key (if any)
|
||||
//
|
||||
// - remote: remote address of the request
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - allowed: filters that are allowed after processing
|
||||
//
|
||||
// - accept: boolean indicating whether the request should be accepted
|
||||
//
|
||||
// - modified: boolean indicating if the request has been modified during
|
||||
// processing
|
||||
//
|
||||
// # Expected Behaviour:
|
||||
//
|
||||
// - If authentication is required and there's no authenticated public key,
|
||||
// reject the request.
|
||||
//
|
||||
// - Otherwise, accept the request.
|
||||
func (s *Server) AcceptReq(
|
||||
c context.T, hr *http.Request, ff *filters.T,
|
||||
authedPubkey []byte, remote string,
|
||||
) (allowed *filters.T, accept bool, modified bool) {
|
||||
// if auth is required, and not public readable, reject
|
||||
if s.AuthRequired() && len(authedPubkey) == 0 && !s.PublicReadable() {
|
||||
return
|
||||
}
|
||||
allowed = ff
|
||||
accept = true
|
||||
return
|
||||
}
|
||||
210
pkg/app/relay/accept-req_test.go
Normal file
210
pkg/app/relay/accept-req_test.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/app/config"
|
||||
"orly.dev/pkg/encoders/filters"
|
||||
"orly.dev/pkg/utils/context"
|
||||
)
|
||||
|
||||
// mockServer is a simple mock implementation of the Server struct for testing
|
||||
type mockServer struct {
|
||||
authRequired bool
|
||||
publicReadable bool
|
||||
ownersPubkeys [][]byte
|
||||
}
|
||||
|
||||
func (m *mockServer) AuthRequired() bool {
|
||||
return m.authRequired || m.LenOwnersPubkeys() > 0
|
||||
}
|
||||
|
||||
func (m *mockServer) PublicReadable() bool {
|
||||
return m.publicReadable
|
||||
}
|
||||
|
||||
func (m *mockServer) LenOwnersPubkeys() int {
|
||||
return len(m.ownersPubkeys)
|
||||
}
|
||||
|
||||
func (m *mockServer) OwnersFollowed() [][]byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockServer) FollowedFollows() [][]byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcceptReq implements the Server.AcceptReq method for testing
|
||||
func (m *mockServer) AcceptReq(
|
||||
c context.T, hr *http.Request, ff *filters.T,
|
||||
authedPubkey []byte, remote string,
|
||||
) (allowed *filters.T, accept bool, modified bool) {
|
||||
// if auth is required, and not public readable, reject
|
||||
if m.AuthRequired() && len(authedPubkey) == 0 && !m.PublicReadable() {
|
||||
return
|
||||
}
|
||||
allowed = ff
|
||||
accept = true
|
||||
return
|
||||
}
|
||||
|
||||
func TestAcceptReq(t *testing.T) {
|
||||
// Create a context and HTTP request for testing
|
||||
ctx := context.Bg()
|
||||
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||
|
||||
// Create test filters
|
||||
testFilters := filters.New()
|
||||
|
||||
// Test cases
|
||||
tests := []struct {
|
||||
name string
|
||||
server *mockServer
|
||||
authedPubkey []byte
|
||||
expectedAccept bool
|
||||
}{
|
||||
{
|
||||
name: "Auth required, no pubkey, not public readable",
|
||||
server: &mockServer{
|
||||
authRequired: true,
|
||||
publicReadable: false,
|
||||
},
|
||||
authedPubkey: nil,
|
||||
expectedAccept: false,
|
||||
},
|
||||
{
|
||||
name: "Auth required, no pubkey, public readable",
|
||||
server: &mockServer{
|
||||
authRequired: true,
|
||||
publicReadable: true,
|
||||
},
|
||||
authedPubkey: nil,
|
||||
expectedAccept: true,
|
||||
},
|
||||
{
|
||||
name: "Auth required, with pubkey",
|
||||
server: &mockServer{
|
||||
authRequired: true,
|
||||
publicReadable: false,
|
||||
},
|
||||
authedPubkey: []byte("test-pubkey"),
|
||||
expectedAccept: true,
|
||||
},
|
||||
{
|
||||
name: "Auth not required",
|
||||
server: &mockServer{
|
||||
authRequired: false,
|
||||
publicReadable: false,
|
||||
},
|
||||
authedPubkey: nil,
|
||||
expectedAccept: true,
|
||||
},
|
||||
{
|
||||
name: "Auth required due to owner pubkeys, no pubkey, not public readable",
|
||||
server: &mockServer{
|
||||
authRequired: false,
|
||||
publicReadable: false,
|
||||
ownersPubkeys: [][]byte{[]byte("owner1")},
|
||||
},
|
||||
authedPubkey: nil,
|
||||
expectedAccept: false,
|
||||
},
|
||||
{
|
||||
name: "Auth required due to owner pubkeys, no pubkey, public readable",
|
||||
server: &mockServer{
|
||||
authRequired: false,
|
||||
publicReadable: true,
|
||||
ownersPubkeys: [][]byte{[]byte("owner1")},
|
||||
},
|
||||
authedPubkey: nil,
|
||||
expectedAccept: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Run tests
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Use the mock server's AcceptReq method
|
||||
allowed, accept, modified := tt.server.AcceptReq(ctx, req, testFilters, tt.authedPubkey, "127.0.0.1")
|
||||
|
||||
// Check if the acceptance status matches the expected value
|
||||
if accept != tt.expectedAccept {
|
||||
t.Errorf("AcceptReq() accept = %v, want %v", accept, tt.expectedAccept)
|
||||
}
|
||||
|
||||
// If the request should be accepted, check that the filters are returned
|
||||
if tt.expectedAccept {
|
||||
if allowed == nil {
|
||||
t.Error("AcceptReq() allowed is nil, but request was accepted")
|
||||
}
|
||||
} else {
|
||||
if allowed != nil {
|
||||
t.Error("AcceptReq() allowed is not nil, but request was rejected")
|
||||
}
|
||||
}
|
||||
|
||||
// Modified should be false as the current implementation doesn't modify filters
|
||||
if modified {
|
||||
t.Error("AcceptReq() modified = true, want false")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestAcceptReqWithRealServer tests the AcceptReq function with a real Server instance
|
||||
func TestAcceptReqWithRealServer(t *testing.T) {
|
||||
// Create a context and HTTP request for testing
|
||||
ctx := context.Bg()
|
||||
req, _ := http.NewRequest("GET", "http://example.com", nil)
|
||||
|
||||
// Create test filters
|
||||
testFilters := filters.New()
|
||||
|
||||
// Create a Server instance with configuration
|
||||
s := &Server{
|
||||
C: &config.C{
|
||||
AuthRequired: true,
|
||||
PublicReadable: false,
|
||||
},
|
||||
Lists: new(Lists),
|
||||
}
|
||||
|
||||
// Test with no authenticated pubkey
|
||||
allowed, accept, modified := s.AcceptReq(ctx, req, testFilters, nil, "127.0.0.1")
|
||||
if accept {
|
||||
t.Error("AcceptReq() accept = true, want false")
|
||||
}
|
||||
if allowed != nil {
|
||||
t.Error("AcceptReq() allowed is not nil, but request was rejected")
|
||||
}
|
||||
if modified {
|
||||
t.Error("AcceptReq() modified = true, want false")
|
||||
}
|
||||
|
||||
// Test with authenticated pubkey
|
||||
allowed, accept, modified = s.AcceptReq(ctx, req, testFilters, []byte("test-pubkey"), "127.0.0.1")
|
||||
if !accept {
|
||||
t.Error("AcceptReq() accept = false, want true")
|
||||
}
|
||||
if allowed != testFilters {
|
||||
t.Error("AcceptReq() allowed is not the same as input filters")
|
||||
}
|
||||
if modified {
|
||||
t.Error("AcceptReq() modified = true, want false")
|
||||
}
|
||||
|
||||
// Test with public readable
|
||||
s.C.PublicReadable = true
|
||||
allowed, accept, modified = s.AcceptReq(ctx, req, testFilters, nil, "127.0.0.1")
|
||||
if !accept {
|
||||
t.Error("AcceptReq() accept = false, want true")
|
||||
}
|
||||
if allowed != testFilters {
|
||||
t.Error("AcceptReq() allowed is not the same as input filters")
|
||||
}
|
||||
if modified {
|
||||
t.Error("AcceptReq() modified = true, want false")
|
||||
}
|
||||
}
|
||||
187
pkg/app/relay/addEvent.go
Normal file
187
pkg/app/relay/addEvent.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
realy_lol "orly.dev/pkg/version"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/interfaces/relay"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/normalize"
|
||||
)
|
||||
|
||||
var (
|
||||
NIP20prefixmatcher = regexp.MustCompile(`^\w+: `)
|
||||
)
|
||||
|
||||
var userAgent = fmt.Sprintf("orly/%s", realy_lol.V)
|
||||
|
||||
type WriteCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (w *WriteCloser) Close() error {
|
||||
w.Buffer.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewWriteCloser(w []byte) *WriteCloser {
|
||||
return &WriteCloser{bytes.NewBuffer(w)}
|
||||
}
|
||||
|
||||
// AddEvent processes an incoming event, saves it if valid, and delivers it to
|
||||
// subscribers.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - c: context for request handling
|
||||
//
|
||||
// - rl: relay interface
|
||||
//
|
||||
// - ev: the event to be added
|
||||
//
|
||||
// - hr: HTTP request related to the event (if any)
|
||||
//
|
||||
// - origin: origin of the event (if any)
|
||||
//
|
||||
// - authedPubkey: public key of the authenticated user (if any)
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - accepted: true if the event was successfully processed, false otherwise
|
||||
//
|
||||
// - message: additional information or error message related to the
|
||||
// processing
|
||||
//
|
||||
// # Expected Behaviour:
|
||||
//
|
||||
// - Validates the incoming event.
|
||||
//
|
||||
// - Saves the event using the Publish method if it is not ephemeral.
|
||||
//
|
||||
// - Handles duplicate events by returning an appropriate error message.
|
||||
//
|
||||
// - Delivers the event to subscribers via the listeners' Deliver method.
|
||||
//
|
||||
// - Returns a boolean indicating whether the event was accepted and any
|
||||
// relevant message.
|
||||
func (s *Server) AddEvent(
|
||||
c context.T, rl relay.I, ev *event.E, hr *http.Request, origin string,
|
||||
pubkeys [][]byte,
|
||||
) (accepted bool, message []byte) {
|
||||
|
||||
if ev == nil {
|
||||
return false, normalize.Invalid.F("empty event")
|
||||
}
|
||||
if ev.Kind.IsEphemeral() {
|
||||
} else {
|
||||
if saveErr := s.Publish(c, ev); saveErr != nil {
|
||||
if errors.Is(saveErr, store.ErrDupEvent) {
|
||||
return false, []byte(saveErr.Error())
|
||||
}
|
||||
errmsg := saveErr.Error()
|
||||
if NIP20prefixmatcher.MatchString(errmsg) {
|
||||
if strings.Contains(errmsg, "tombstone") {
|
||||
return false, normalize.Error.F(
|
||||
"%s event was deleted, not storing it again",
|
||||
origin,
|
||||
)
|
||||
}
|
||||
if strings.HasPrefix(errmsg, string(normalize.Blocked)) {
|
||||
return false, []byte(errmsg)
|
||||
}
|
||||
return false, []byte(errmsg)
|
||||
} else {
|
||||
return false, []byte(errmsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
// notify subscribers
|
||||
s.listeners.Deliver(ev)
|
||||
// push the new event to replicas if replicas are configured, and the relay
|
||||
// has an identity key.
|
||||
var err error
|
||||
if len(s.Peers.Addresses) > 0 &&
|
||||
len(s.Peers.I.Sec()) == secp256k1.SecKeyBytesLen {
|
||||
evb := ev.Marshal(nil)
|
||||
var payload io.ReadCloser
|
||||
payload = NewWriteCloser(evb)
|
||||
replica:
|
||||
for i, a := range s.Peers.Addresses {
|
||||
// the peer address index is the same as the list of pubkeys
|
||||
// (they're unpacked from a string containing both, appended at the
|
||||
// same time), so if the pubkeys from the http event endpoint sent
|
||||
// us here matches the index of this address, we can skip it.
|
||||
for _, pk := range pubkeys {
|
||||
if bytes.Equal(s.Peers.Pubkeys[i], pk) {
|
||||
log.I.F(
|
||||
"not sending back to replica that just sent us this event %0x %s",
|
||||
ev.ID, a,
|
||||
)
|
||||
continue replica
|
||||
}
|
||||
}
|
||||
var ur *url.URL
|
||||
if ur, err = url.Parse(a + "/api/event"); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
var r *http.Request
|
||||
r = &http.Request{
|
||||
Method: "POST",
|
||||
URL: ur,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: make(http.Header),
|
||||
Body: payload,
|
||||
ContentLength: int64(len(evb)),
|
||||
Host: ur.Host,
|
||||
}
|
||||
r.Header.Add("User-Agent", userAgent)
|
||||
if err = httpauth.AddNIP98Header(
|
||||
r, ur, "POST", "", s.Peers.I, 0,
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// add this replica's pubkey to the list to prevent re-sending to
|
||||
// other replicas more than twice
|
||||
pubkeys = append(pubkeys, s.Peers.Pub())
|
||||
var pubkeysHeader []byte
|
||||
for j, pk := range pubkeys {
|
||||
pubkeysHeader = hex.EncAppend(pubkeysHeader, pk)
|
||||
if j < len(pubkeys)-1 {
|
||||
pubkeysHeader = append(pubkeysHeader, ':')
|
||||
}
|
||||
}
|
||||
r.Header.Add("X-Pubkeys", string(pubkeysHeader))
|
||||
r.GetBody = func() (rc io.ReadCloser, err error) {
|
||||
rc = payload
|
||||
return
|
||||
}
|
||||
client := &http.Client{}
|
||||
if _, err = client.Do(r); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
log.I.F(
|
||||
"event pushed to replica %s\n%s",
|
||||
ur.String(), evb,
|
||||
)
|
||||
break
|
||||
}
|
||||
}
|
||||
accepted = true
|
||||
return
|
||||
}
|
||||
39
pkg/app/relay/admin-auth.go
Normal file
39
pkg/app/relay/admin-auth.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) AdminAuth(
|
||||
r *http.Request, remote string,
|
||||
tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range s.ownersPubkeys {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
68
pkg/app/relay/auth.go
Normal file
68
pkg/app/relay/auth.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
// ServiceURL constructs the service URL based on the incoming HTTP request. It
|
||||
// checks for authentication requirements and determines the protocol (ws or
|
||||
// wss) based on headers like X-Forwarded-Host, X-Forwarded-Proto, and the host
|
||||
// itself.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - req: A pointer to an http.Request object representing the incoming request.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - st: A string representing the constructed service URL.
|
||||
//
|
||||
// # Expected Behaviour:
|
||||
//
|
||||
// - Checks if authentication is required.
|
||||
//
|
||||
// - Retrieves the host from X-Forwarded-Host or falls back to req.Host.
|
||||
//
|
||||
// - Determines the protocol (ws or wss) based on various conditions including
|
||||
// headers and host details.
|
||||
//
|
||||
// - Returns the constructed URL string.
|
||||
func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||
if !s.AuthRequired() {
|
||||
log.T.F("auth not required")
|
||||
return
|
||||
}
|
||||
host := req.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = req.Host
|
||||
}
|
||||
proto := req.Header.Get("X-Forwarded-Proto")
|
||||
if proto == "" {
|
||||
if host == "localhost" {
|
||||
proto = "ws"
|
||||
} else if strings.Contains(host, ":") {
|
||||
// has a port number
|
||||
proto = "ws"
|
||||
} else if _, err := strconv.Atoi(
|
||||
strings.ReplaceAll(
|
||||
host, ".",
|
||||
"",
|
||||
),
|
||||
); chk.E(err) {
|
||||
// it's a naked IP
|
||||
proto = "ws"
|
||||
} else {
|
||||
proto = "wss"
|
||||
}
|
||||
} else if proto == "https" {
|
||||
proto = "wss"
|
||||
} else if proto == "http" {
|
||||
proto = "ws"
|
||||
}
|
||||
return proto + "://" + host
|
||||
}
|
||||
10
pkg/app/relay/config.go
Normal file
10
pkg/app/relay/config.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/app/config"
|
||||
)
|
||||
|
||||
func (s *Server) Config() (c *config.C) {
|
||||
c = s.C
|
||||
return
|
||||
}
|
||||
68
pkg/app/relay/handleRelayinfo.go
Normal file
68
pkg/app/relay/handleRelayinfo.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"orly.dev/pkg/interfaces/relay"
|
||||
"orly.dev/pkg/protocol/relayinfo"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/version"
|
||||
)
|
||||
|
||||
// HandleRelayInfo generates and returns a relay information document in JSON
|
||||
// format based on the server's configuration and supported NIPs.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - w: HTTP response writer used to send the generated document.
|
||||
//
|
||||
// - r: HTTP request object containing incoming client request data.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// The function constructs a relay information document using either the
|
||||
// Informer interface implementation or predefined server configuration. It
|
||||
// returns this document as a JSON response to the client.
|
||||
func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
||||
r.Header.Set("Content-Type", "application/json")
|
||||
log.I.Ln("handling relay information document")
|
||||
var info *relayinfo.T
|
||||
if informationer, ok := s.relay.(relay.Informer); ok {
|
||||
info = informationer.GetNIP11InformationDocument()
|
||||
} else {
|
||||
supportedNIPs := relayinfo.GetList(
|
||||
relayinfo.BasicProtocol,
|
||||
relayinfo.Authentication,
|
||||
// relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EventDeletion,
|
||||
relayinfo.RelayInformationDocument,
|
||||
relayinfo.GenericTagQueries,
|
||||
// relayinfo.NostrMarketplace,
|
||||
relayinfo.EventTreatment,
|
||||
// relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
// relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
// relayinfo.RelayListMetadata,
|
||||
)
|
||||
sort.Sort(supportedNIPs)
|
||||
log.T.Ln("supported NIPs", supportedNIPs)
|
||||
info = &relayinfo.T{
|
||||
Name: s.relay.Name(),
|
||||
Description: version.Description,
|
||||
Nips: supportedNIPs,
|
||||
Software: version.URL,
|
||||
Version: version.V,
|
||||
Limitation: relayinfo.Limits{
|
||||
AuthRequired: s.C.AuthRequired,
|
||||
RestrictedWrites: s.C.AuthRequired,
|
||||
},
|
||||
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
|
||||
}
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(info); chk.E(err) {
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,11 @@
|
||||
package realy
|
||||
package relay
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"orly.dev/protocol/socketapi"
|
||||
"orly.dev/pkg/protocol/socketapi"
|
||||
)
|
||||
|
||||
func (s *Server) handleWebsocket(w http.ResponseWriter, r *http.Request) {
|
||||
a := &socketapi.A{Server: s}
|
||||
a := &socketapi.A{I: s}
|
||||
a.Serve(w, r, s)
|
||||
}
|
||||
101
pkg/app/relay/helpers/helpers.go
Normal file
101
pkg/app/relay/helpers/helpers.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GenerateDescription generates a detailed description containing the provided
|
||||
// text and an optional list of scopes.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - text: A string representing the base description.
|
||||
//
|
||||
// - scopes: A slice of strings indicating scopes to be included in the
|
||||
// description.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - A string combining the base description and a formatted list of
|
||||
// scopes, if provided.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// The function appends a formatted list of scopes to the base description if
|
||||
// any scopes are provided. If no scopes are provided, it returns the base
|
||||
// description unchanged. The formatted list of scopes includes each scope
|
||||
// surrounded by backticks and separated by commas.
|
||||
func GenerateDescription(text string, scopes []string) string {
|
||||
if len(scopes) == 0 {
|
||||
return text
|
||||
}
|
||||
result := make([]string, 0)
|
||||
for _, value := range scopes {
|
||||
result = append(result, "`"+value+"`")
|
||||
}
|
||||
return text + "<br/><br/>**Scopes**<br/>" + strings.Join(result, ", ")
|
||||
}
|
||||
|
||||
// GetRemoteFromReq retrieves the originating IP address of the client from
|
||||
// an HTTP request, considering standard and non-standard proxy headers.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - r: The HTTP request object containing details of the client and
|
||||
// routing information.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - rr: A string value representing the IP address of the originating
|
||||
// remote client.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// The function first checks for the standardized "Forwarded" header (RFC 7239)
|
||||
// to identify the original client IP. If that isn't available, it falls back to
|
||||
// the "X-Forwarded-For" header. If both headers are absent, it defaults to
|
||||
// using the request's RemoteAddr.
|
||||
//
|
||||
// For the "Forwarded" header, it extracts the client IP from the "for"
|
||||
// parameter. For the "X-Forwarded-For" header, if it contains one IP, it
|
||||
// returns that. If it contains two IPs, it returns the second.
|
||||
func GetRemoteFromReq(r *http.Request) (rr string) {
|
||||
// First check for the standardized Forwarded header (RFC 7239)
|
||||
forwarded := r.Header.Get("Forwarded")
|
||||
if forwarded != "" {
|
||||
// Parse the Forwarded header which can contain multiple parameters
|
||||
//
|
||||
// Format:
|
||||
//
|
||||
// Forwarded: by=<identifier>;for=<identifier>;host=<host>;proto=<http|https>
|
||||
parts := strings.Split(forwarded, ";")
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
if strings.HasPrefix(part, "for=") {
|
||||
// Extract the client IP from the "for" parameter
|
||||
forValue := strings.TrimPrefix(part, "for=")
|
||||
// Remove quotes if present
|
||||
forValue = strings.Trim(forValue, "\"")
|
||||
// Handle IPv6 addresses which are enclosed in square brackets
|
||||
forValue = strings.Trim(forValue, "[]")
|
||||
return forValue
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the Forwarded header is not available or doesn't contain "for"
|
||||
// parameter, fall back to X-Forwarded-For
|
||||
rem := r.Header.Get("X-Forwarded-For")
|
||||
if rem == "" {
|
||||
rr = r.RemoteAddr
|
||||
} else {
|
||||
splitted := strings.Split(rem, " ")
|
||||
if len(splitted) == 1 {
|
||||
rr = splitted[0]
|
||||
}
|
||||
if len(splitted) == 2 {
|
||||
rr = splitted[1]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
108
pkg/app/relay/lists.go
Normal file
108
pkg/app/relay/lists.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Lists manages lists of pubkeys, followed users, follows, and muted users with
|
||||
// concurrency safety via a mutex.
|
||||
//
|
||||
// This list is designed primarily for owner-follow-list in mind, but with an
|
||||
// explicit allowlist/blocklist set up, ownersFollowed corresponds to the
|
||||
// allowed users, and ownersMuted corresponds to the blocked users, and all
|
||||
// filtering logic will work the same way.
|
||||
//
|
||||
// Currently, there is no explicit purpose for the followedFollows list being
|
||||
// separate from the ownersFollowed list, but there could be reasons for this
|
||||
// distinction, such as rate limiting applying to the former and not the latter.
|
||||
type Lists struct {
|
||||
sync.Mutex
|
||||
ownersPubkeys [][]byte
|
||||
ownersFollowed [][]byte
|
||||
followedFollows [][]byte
|
||||
ownersMuted [][]byte
|
||||
}
|
||||
|
||||
func (l *Lists) LenOwnersPubkeys() (ll int) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
ll = len(l.ownersPubkeys)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) OwnersPubkeys() (pks [][]byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
pks = append(pks, l.ownersPubkeys...)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) SetOwnersPubkeys(pks [][]byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.ownersPubkeys = pks
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) LenOwnersFollowed() (ll int) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
ll = len(l.ownersFollowed)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) OwnersFollowed() (pks [][]byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
pks = append(pks, l.ownersFollowed...)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) SetOwnersFollowed(pks [][]byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.ownersFollowed = pks
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) LenFollowedFollows() (ll int) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
ll = len(l.followedFollows)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) FollowedFollows() (pks [][]byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
pks = append(pks, l.followedFollows...)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) SetFollowedFollows(pks [][]byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.followedFollows = pks
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) LenOwnersMuted() (ll int) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
ll = len(l.ownersMuted)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) OwnersMuted() (pks [][]byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
pks = append(pks, l.ownersMuted...)
|
||||
return
|
||||
}
|
||||
|
||||
func (l *Lists) SetOwnersMuted(pks [][]byte) {
|
||||
l.Lock()
|
||||
defer l.Unlock()
|
||||
l.ownersMuted = pks
|
||||
return
|
||||
}
|
||||
217
pkg/app/relay/lists_test.go
Normal file
217
pkg/app/relay/lists_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLists_OwnersPubkeys(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
// Test with empty list
|
||||
pks := l.OwnersPubkeys()
|
||||
if len(pks) != 0 {
|
||||
t.Errorf("Expected empty list, got %d items", len(pks))
|
||||
}
|
||||
|
||||
// Test with some pubkeys
|
||||
testPubkeys := [][]byte{
|
||||
[]byte("pubkey1"),
|
||||
[]byte("pubkey2"),
|
||||
[]byte("pubkey3"),
|
||||
}
|
||||
|
||||
l.SetOwnersPubkeys(testPubkeys)
|
||||
|
||||
// Verify length
|
||||
if l.LenOwnersPubkeys() != len(testPubkeys) {
|
||||
t.Errorf("Expected length %d, got %d", len(testPubkeys), l.LenOwnersPubkeys())
|
||||
}
|
||||
|
||||
// Verify content
|
||||
pks = l.OwnersPubkeys()
|
||||
if len(pks) != len(testPubkeys) {
|
||||
t.Errorf("Expected %d pubkeys, got %d", len(testPubkeys), len(pks))
|
||||
}
|
||||
|
||||
// Verify each pubkey
|
||||
for i, pk := range pks {
|
||||
if !bytes.Equal(pk, testPubkeys[i]) {
|
||||
t.Errorf("Pubkey at index %d doesn't match: expected %s, got %s",
|
||||
i, testPubkeys[i], pk)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that the returned slice is a copy, not a reference
|
||||
pks[0] = []byte("modified")
|
||||
newPks := l.OwnersPubkeys()
|
||||
if bytes.Equal(pks[0], newPks[0]) {
|
||||
t.Error("Returned slice should be a copy, not a reference")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLists_OwnersFollowed(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
// Test with empty list
|
||||
followed := l.OwnersFollowed()
|
||||
if len(followed) != 0 {
|
||||
t.Errorf("Expected empty list, got %d items", len(followed))
|
||||
}
|
||||
|
||||
// Test with some pubkeys
|
||||
testPubkeys := [][]byte{
|
||||
[]byte("followed1"),
|
||||
[]byte("followed2"),
|
||||
[]byte("followed3"),
|
||||
}
|
||||
|
||||
l.SetOwnersFollowed(testPubkeys)
|
||||
|
||||
// Verify length
|
||||
if l.LenOwnersFollowed() != len(testPubkeys) {
|
||||
t.Errorf("Expected length %d, got %d", len(testPubkeys), l.LenOwnersFollowed())
|
||||
}
|
||||
|
||||
// Verify content
|
||||
followed = l.OwnersFollowed()
|
||||
if len(followed) != len(testPubkeys) {
|
||||
t.Errorf("Expected %d followed, got %d", len(testPubkeys), len(followed))
|
||||
}
|
||||
|
||||
// Verify each pubkey
|
||||
for i, pk := range followed {
|
||||
if !bytes.Equal(pk, testPubkeys[i]) {
|
||||
t.Errorf("Followed at index %d doesn't match: expected %s, got %s",
|
||||
i, testPubkeys[i], pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLists_FollowedFollows(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
// Test with empty list
|
||||
follows := l.FollowedFollows()
|
||||
if len(follows) != 0 {
|
||||
t.Errorf("Expected empty list, got %d items", len(follows))
|
||||
}
|
||||
|
||||
// Test with some pubkeys
|
||||
testPubkeys := [][]byte{
|
||||
[]byte("follow1"),
|
||||
[]byte("follow2"),
|
||||
[]byte("follow3"),
|
||||
}
|
||||
|
||||
l.SetFollowedFollows(testPubkeys)
|
||||
|
||||
// Verify length
|
||||
if l.LenFollowedFollows() != len(testPubkeys) {
|
||||
t.Errorf("Expected length %d, got %d", len(testPubkeys), l.LenFollowedFollows())
|
||||
}
|
||||
|
||||
// Verify content
|
||||
follows = l.FollowedFollows()
|
||||
if len(follows) != len(testPubkeys) {
|
||||
t.Errorf("Expected %d follows, got %d", len(testPubkeys), len(follows))
|
||||
}
|
||||
|
||||
// Verify each pubkey
|
||||
for i, pk := range follows {
|
||||
if !bytes.Equal(pk, testPubkeys[i]) {
|
||||
t.Errorf("Follow at index %d doesn't match: expected %s, got %s",
|
||||
i, testPubkeys[i], pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLists_OwnersMuted(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
// Test with empty list
|
||||
muted := l.OwnersMuted()
|
||||
if len(muted) != 0 {
|
||||
t.Errorf("Expected empty list, got %d items", len(muted))
|
||||
}
|
||||
|
||||
// Test with some pubkeys
|
||||
testPubkeys := [][]byte{
|
||||
[]byte("muted1"),
|
||||
[]byte("muted2"),
|
||||
[]byte("muted3"),
|
||||
}
|
||||
|
||||
l.SetOwnersMuted(testPubkeys)
|
||||
|
||||
// Verify length
|
||||
if l.LenOwnersMuted() != len(testPubkeys) {
|
||||
t.Errorf("Expected length %d, got %d", len(testPubkeys), l.LenOwnersMuted())
|
||||
}
|
||||
|
||||
// Verify content
|
||||
muted = l.OwnersMuted()
|
||||
if len(muted) != len(testPubkeys) {
|
||||
t.Errorf("Expected %d muted, got %d", len(testPubkeys), len(muted))
|
||||
}
|
||||
|
||||
// Verify each pubkey
|
||||
for i, pk := range muted {
|
||||
if !bytes.Equal(pk, testPubkeys[i]) {
|
||||
t.Errorf("Muted at index %d doesn't match: expected %s, got %s",
|
||||
i, testPubkeys[i], pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLists_ConcurrentAccess(t *testing.T) {
|
||||
// Create a new Lists instance
|
||||
l := &Lists{}
|
||||
|
||||
// Test concurrent access to the lists
|
||||
done := make(chan bool)
|
||||
|
||||
// Concurrent reads and writes
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
l.SetOwnersPubkeys([][]byte{[]byte("pubkey1"), []byte("pubkey2")})
|
||||
l.OwnersPubkeys()
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
l.SetOwnersFollowed([][]byte{[]byte("followed1"), []byte("followed2")})
|
||||
l.OwnersFollowed()
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
l.SetFollowedFollows([][]byte{[]byte("follow1"), []byte("follow2")})
|
||||
l.FollowedFollows()
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
l.SetOwnersMuted([][]byte{[]byte("muted1"), []byte("muted2")})
|
||||
l.OwnersMuted()
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// Wait for all goroutines to complete
|
||||
for i := 0; i < 4; i++ {
|
||||
<-done
|
||||
}
|
||||
|
||||
// If we got here without deadlocks or panics, the test passes
|
||||
}
|
||||
@@ -1,19 +1,20 @@
|
||||
// Package options provides some option configurations for the realy relay.
|
||||
// Package options provides some option configurations for the relay.
|
||||
//
|
||||
// None of this package is actually in use, and the skip event function has not been
|
||||
// implemented. In theory this could be used for something but it currently isn't.
|
||||
// None of this package is actually in use, and the skip event function has not
|
||||
// been implemented. In theory, this could be used for something but it currently
|
||||
// isn't.
|
||||
package options
|
||||
|
||||
import (
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
)
|
||||
|
||||
type SkipEventFunc func(*event.E) bool
|
||||
|
||||
// T is a collection of options.
|
||||
type T struct {
|
||||
// SkipEventFunc is in theory a function to test whether an event should not be sent in
|
||||
// response to a query.
|
||||
// SkipEventFunc is in theory a function to test whether an event should not
|
||||
// be sent in response to a query.
|
||||
SkipEventFunc
|
||||
}
|
||||
|
||||
@@ -25,7 +26,8 @@ func Default() *T {
|
||||
return &T{}
|
||||
}
|
||||
|
||||
// WithSkipEventFunc is an options.T generator that adds a function to skip events.
|
||||
// WithSkipEventFunc is an options.T generator that adds a function to skip
|
||||
// events.
|
||||
func WithSkipEventFunc(skipEventFunc func(*event.E) bool) O {
|
||||
return func(o *T) {
|
||||
o.SkipEventFunc = skipEventFunc
|
||||
39
pkg/app/relay/owners-followed-auth.go
Normal file
39
pkg/app/relay/owners-followed-auth.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) OwnersFollowedAuth(
|
||||
r *http.Request, remote string,
|
||||
tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range s.ownersFollowed {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
6
pkg/app/relay/owners-pubkeys.go
Normal file
6
pkg/app/relay/owners-pubkeys.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package relay
|
||||
|
||||
func (s *Server) OwnersPubkeys() (pks [][]byte) {
|
||||
pks = s.ownersPubkeys
|
||||
return
|
||||
}
|
||||
72
pkg/app/relay/peers.go
Normal file
72
pkg/app/relay/peers.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/crypto/p256k"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/interfaces/signer"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/keys"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Peers is a structure that keeps the information required when peer
|
||||
// replication is enabled.
|
||||
//
|
||||
// - Addresses are the relay addresses that will be pushed new events when
|
||||
// accepted. From ORLY_PEER_RELAYS first field after the |.
|
||||
//
|
||||
// - Pubkeys are the relay peer public keys that we will send any event to
|
||||
// including privileged type. From ORLY_PEER_RELAYS before the |.
|
||||
//
|
||||
// - I - the signer of this relay, generated from the nsec in
|
||||
// ORLY_SECRET_KEY.
|
||||
type Peers struct {
|
||||
Addresses []string
|
||||
Pubkeys [][]byte
|
||||
signer.I
|
||||
}
|
||||
|
||||
// Init accepts the lists which will come from config.C for peer relay settings
|
||||
// and populate the Peers with this data after decoding it.
|
||||
func (p *Peers) Init(
|
||||
addresses []string, sec string,
|
||||
) (err error) {
|
||||
for _, address := range addresses {
|
||||
if len(address) == 0 {
|
||||
continue
|
||||
}
|
||||
split := strings.Split(address, "@")
|
||||
if len(split) != 2 {
|
||||
log.E.F("invalid peer address: %s", address)
|
||||
continue
|
||||
}
|
||||
p.Addresses = append(p.Addresses, split[1])
|
||||
var pk []byte
|
||||
if pk, err = keys.DecodeNpubOrHex(split[0]); chk.D(err) {
|
||||
continue
|
||||
}
|
||||
p.Pubkeys = append(p.Pubkeys, pk)
|
||||
log.I.F("peer %s added; pubkey: %0x", split[1], pk)
|
||||
}
|
||||
if sec == "" {
|
||||
return
|
||||
}
|
||||
p.I = &p256k.Signer{}
|
||||
var s []byte
|
||||
if s, err = keys.DecodeNsecOrHex(sec); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = p.I.InitSec(s); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var npub []byte
|
||||
if npub, err = bech32encoding.BinToNpub(p.I.Pub()); chk.E(err) {
|
||||
return
|
||||
}
|
||||
log.I.F(
|
||||
"relay peer initialized, relay's npub: %s",
|
||||
npub,
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
// Package publisher is a singleton package that keeps track of subscriptions in
|
||||
// both websockets and http SSE, including managing the authentication state of
|
||||
// a connection.
|
||||
package publish
|
||||
|
||||
import (
|
||||
"orly.dev/app/realy/publish/publisher"
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/interfaces/publisher"
|
||||
"orly.dev/pkg/interfaces/typer"
|
||||
)
|
||||
|
||||
// S is the control structure for the subscription management scheme.
|
||||
@@ -26,11 +24,10 @@ func (s *S) Type() string { return "publish" }
|
||||
func (s *S) Deliver(ev *event.E) {
|
||||
for _, p := range s.Publishers {
|
||||
p.Deliver(ev)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *S) Receive(msg publisher.Message) {
|
||||
func (s *S) Receive(msg typer.T) {
|
||||
t := msg.Type()
|
||||
for _, p := range s.Publishers {
|
||||
if p.Type() == t {
|
||||
23
pkg/app/relay/server-impl.go
Normal file
23
pkg/app/relay/server-impl.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/app/relay/publish"
|
||||
"orly.dev/pkg/interfaces/relay"
|
||||
"orly.dev/pkg/interfaces/server"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/utils/context"
|
||||
)
|
||||
|
||||
func (s *Server) Storage() store.I { return s.relay.Storage() }
|
||||
|
||||
func (s *Server) Relay() relay.I { return s.relay }
|
||||
|
||||
func (s *Server) Publisher() *publish.S { return s.listeners }
|
||||
|
||||
func (s *Server) Context() context.T { return s.Ctx }
|
||||
|
||||
func (s *Server) AuthRequired() bool { return s.C.AuthRequired || s.LenOwnersPubkeys() > 0 }
|
||||
|
||||
func (s *Server) PublicReadable() bool { return s.C.PublicReadable }
|
||||
|
||||
var _ server.I = &Server{}
|
||||
@@ -1,34 +1,55 @@
|
||||
package realy
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"orly.dev/encoders/tags"
|
||||
"orly.dev/utils/chk"
|
||||
"orly.dev/utils/errorf"
|
||||
"orly.dev/utils/log"
|
||||
"orly.dev/utils/normalize"
|
||||
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/encoders/filter"
|
||||
"orly.dev/encoders/kinds"
|
||||
"orly.dev/encoders/tag"
|
||||
"orly.dev/interfaces/store"
|
||||
"orly.dev/utils/context"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/tags"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/normalize"
|
||||
)
|
||||
|
||||
// Publish processes and saves an event based on its type and rules.
|
||||
// It handles replaceable, ephemeral, and parameterized replaceable events.
|
||||
// Duplicate or conflicting events are managed before saving the new one.
|
||||
// Publish processes and stores an event in the server's storage. It handles
|
||||
// different types of events: ephemeral, replaceable, and parameterized
|
||||
// replaceable.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - c (context.Context): The context for the operation.
|
||||
//
|
||||
// - evt (*event.E): The event to be published.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - err (error): An error if any step fails during the publishing process.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// - For ephemeral events, the method doesn't store them and returns
|
||||
// immediately.
|
||||
//
|
||||
// - For replaceable events, it first queries for existing similar events,
|
||||
// deletes older ones, and then stores the new event.
|
||||
//
|
||||
// - For parameterized replaceable events, it performs a similar process but
|
||||
// uses additional tags to identify duplicates.
|
||||
func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
sto := s.relay.Storage()
|
||||
if evt.Kind.IsEphemeral() {
|
||||
// do not store ephemeral events
|
||||
// don't store ephemeral events
|
||||
return nil
|
||||
|
||||
} else if evt.Kind.IsReplaceable() {
|
||||
// replaceable event, delete before storing
|
||||
// replaceable event, delete old after storing
|
||||
var evs []*event.E
|
||||
f := filter.New()
|
||||
f.Authors = tag.New(evt.Pubkey)
|
||||
@@ -41,15 +62,26 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
log.T.F("found %d possible duplicate events", len(evs))
|
||||
for _, ev := range evs {
|
||||
del := true
|
||||
if bytes.Equal(ev.Id, evt.Id) {
|
||||
continue
|
||||
if bytes.Equal(ev.ID, evt.ID) {
|
||||
return errorf.W(
|
||||
string(
|
||||
normalize.Duplicate.F(
|
||||
"event already in relay database",
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
log.I.F(
|
||||
"maybe replace %s with %s", ev.Serialize(), evt.Serialize(),
|
||||
)
|
||||
if ev.CreatedAt.Int() > evt.CreatedAt.Int() {
|
||||
log.I.S(ev, evt)
|
||||
return errorf.W(string(normalize.Invalid.F("not replacing newer replaceable event")))
|
||||
return errorf.W(
|
||||
string(
|
||||
normalize.Invalid.F(
|
||||
"not replacing newer replaceable event",
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
// not deleting these events because some clients are retarded
|
||||
// and the query will pull the new one, but a backup can recover
|
||||
@@ -57,6 +89,56 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
if ev.Kind.IsDirectoryEvent() {
|
||||
del = false
|
||||
}
|
||||
if evt.Kind.Equal(kind.FollowList) {
|
||||
// if the event is from someone on ownersFollowed or
|
||||
// followedFollows, for now add to this list so they're
|
||||
// immediately effective.
|
||||
var isFollowed bool
|
||||
ownersFollowed := s.OwnersFollowed()
|
||||
for _, pk := range ownersFollowed {
|
||||
if bytes.Equal(evt.Pubkey, pk) {
|
||||
isFollowed = true
|
||||
}
|
||||
}
|
||||
if isFollowed {
|
||||
if _, _, err = sto.SaveEvent(
|
||||
c, evt, false, nil,
|
||||
); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
return
|
||||
}
|
||||
// we need to trigger the spider with no fetch
|
||||
if err = s.Spider(true); chk.E(err) {
|
||||
err = nil
|
||||
}
|
||||
// event has been saved and lists updated.
|
||||
// return
|
||||
}
|
||||
|
||||
}
|
||||
if evt.Kind.Equal(kind.MuteList) {
|
||||
// check if this is one of the owners, if so, the mute list
|
||||
// should be applied immediately.
|
||||
owners := s.OwnersPubkeys()
|
||||
for _, pk := range owners {
|
||||
if bytes.Equal(evt.Pubkey, pk) {
|
||||
if _, _, err = sto.SaveEvent(
|
||||
c, evt, false, nil,
|
||||
); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
return
|
||||
}
|
||||
// we need to trigger the spider with no fetch
|
||||
if err = s.Spider(true); chk.E(err) {
|
||||
err = nil
|
||||
}
|
||||
// event has been saved and lists updated.
|
||||
// return
|
||||
}
|
||||
}
|
||||
}
|
||||
// defer the delete until after the save, further down, has
|
||||
// completed.
|
||||
if del {
|
||||
@@ -74,8 +156,6 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
)
|
||||
},
|
||||
)
|
||||
// replaceable events we don't tombstone when replacing,
|
||||
// so if deleted, old versions can be restored
|
||||
if err = sto.DeleteEvent(c, ev.EventId()); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -156,10 +236,17 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, _, err = sto.SaveEvent(c, evt); chk.E(err) && !errors.Is(
|
||||
if _, _, err = sto.SaveEvent(
|
||||
c, evt, false, append(s.Peers.Pubkeys, s.ownersPubkeys...),
|
||||
); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
return
|
||||
}
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf("saved event:\n%s", evt.Serialize())
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
312
pkg/app/relay/server.go
Normal file
312
pkg/app/relay/server.go
Normal file
@@ -0,0 +1,312 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/protocol/openapi"
|
||||
"orly.dev/pkg/protocol/socketapi"
|
||||
|
||||
"orly.dev/pkg/app/config"
|
||||
"orly.dev/pkg/app/relay/helpers"
|
||||
"orly.dev/pkg/app/relay/options"
|
||||
"orly.dev/pkg/app/relay/publish"
|
||||
"orly.dev/pkg/interfaces/relay"
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/keys"
|
||||
"orly.dev/pkg/utils/log"
|
||||
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
// Server represents the core structure for running a nostr relay. It
|
||||
// encapsulates various components such as context, cancel function, options,
|
||||
// relay interface, address, HTTP server, and configuration settings.
|
||||
type Server struct {
|
||||
Ctx context.T
|
||||
Cancel context.F
|
||||
options *options.T
|
||||
relay relay.I
|
||||
Addr string
|
||||
mux *servemux.S
|
||||
httpServer *http.Server
|
||||
listeners *publish.S
|
||||
blacklistPubkeys [][]byte
|
||||
*config.C
|
||||
*Lists
|
||||
*Peers
|
||||
Mux *servemux.S
|
||||
}
|
||||
|
||||
// ServerParams represents the configuration parameters for initializing a
|
||||
// server. It encapsulates various components such as context, cancel function,
|
||||
// relay interface, database path, maximum limit, and configuration settings.
|
||||
type ServerParams struct {
|
||||
Ctx context.T
|
||||
Cancel context.F
|
||||
Rl relay.I
|
||||
DbPath string
|
||||
MaxLimit int
|
||||
Mux *servemux.S
|
||||
*config.C
|
||||
}
|
||||
|
||||
// NewServer initializes and returns a new Server instance based on the provided
|
||||
// ServerParams and optional settings. It sets up storage, initializes the
|
||||
// relay, and configures necessary components for server operation.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - sp (*ServerParams): The configuration parameters for initializing the
|
||||
// server.
|
||||
//
|
||||
// - opts (...options.O): Optional settings that modify the server's behavior.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - s (*Server): The newly created Server instance.
|
||||
//
|
||||
// - err (error): An error if any step fails during initialization.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// - Initializes storage with the provided database path.
|
||||
//
|
||||
// - Configures the server's options using the default settings and applies any
|
||||
// optional settings provided.
|
||||
//
|
||||
// - Sets up a ServeMux for handling HTTP requests.
|
||||
//
|
||||
// - Initializes the relay, starting its operation in a separate goroutine.
|
||||
func NewServer(
|
||||
sp *ServerParams, serveMux *servemux.S, opts ...options.O,
|
||||
) (s *Server, err error) {
|
||||
op := options.Default()
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
}
|
||||
if storage := sp.Rl.Storage(); storage != nil {
|
||||
if err = storage.Init(sp.DbPath); chk.T(err) {
|
||||
return nil, fmt.Errorf("storage init: %w", err)
|
||||
}
|
||||
}
|
||||
s = &Server{
|
||||
Ctx: sp.Ctx,
|
||||
Cancel: sp.Cancel,
|
||||
relay: sp.Rl,
|
||||
mux: serveMux,
|
||||
options: op,
|
||||
C: sp.C,
|
||||
Lists: new(Lists),
|
||||
Peers: new(Peers),
|
||||
}
|
||||
// Parse blacklist pubkeys
|
||||
for _, v := range s.C.Blacklist {
|
||||
if len(v) == 0 {
|
||||
continue
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = keys.DecodeNpubOrHex(v); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
s.blacklistPubkeys = append(s.blacklistPubkeys, pk)
|
||||
}
|
||||
chk.E(
|
||||
s.Peers.Init(sp.C.PeerRelays, sp.C.RelaySecret),
|
||||
)
|
||||
s.listeners = publish.New(socketapi.New(s), openapi.NewPublisher(s))
|
||||
go func() {
|
||||
if err := s.relay.Init(); chk.E(err) {
|
||||
s.Shutdown()
|
||||
}
|
||||
}()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// ServeHTTP handles incoming HTTP requests according to the standard Nostr
|
||||
// protocol. It specifically processes WebSocket upgrades and
|
||||
// "application/nostr+json" Accept headers.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - w (http.ResponseWriter): The response writer for sending responses.
|
||||
//
|
||||
// - r (*http.Request): The request object containing client's details and data.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// - Checks if the request URL path is "/".
|
||||
//
|
||||
// - For WebSocket upgrades, calls handleWebsocket method.
|
||||
//
|
||||
// - If "Accept" header is "application/nostr+json", calls HandleRelayInfo
|
||||
// method.
|
||||
//
|
||||
// - Logs the HTTP request details for non-standard requests.
|
||||
//
|
||||
// - For all other paths, delegates to the internal mux's ServeHTTP method.
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
c := s.Config()
|
||||
remote := helpers.GetRemoteFromReq(r)
|
||||
var whitelisted bool
|
||||
if len(c.Whitelist) > 0 {
|
||||
for _, addr := range c.Whitelist {
|
||||
if strings.HasPrefix(remote, addr) {
|
||||
whitelisted = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
whitelisted = true
|
||||
}
|
||||
if !whitelisted {
|
||||
return
|
||||
}
|
||||
// standard nostr protocol only governs the "root" path of the relay and
|
||||
// websockets
|
||||
if r.URL.Path == "/" {
|
||||
if r.Header.Get("Upgrade") == "websocket" {
|
||||
s.handleWebsocket(w, r)
|
||||
return
|
||||
}
|
||||
if r.Header.Get("Accept") == "application/nostr+json" {
|
||||
s.HandleRelayInfo(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.I.F(
|
||||
"http request: %s from %s",
|
||||
r.URL.String(), helpers.GetRemoteFromReq(r),
|
||||
)
|
||||
s.mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// Start initializes the server by setting up a TCP listener and serving HTTP
|
||||
// requests.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - host (string): The hostname or IP address to listen on.
|
||||
//
|
||||
// - port (int): The port number to bind to.
|
||||
//
|
||||
// - started (...chan bool): Optional channels that are closed after the server
|
||||
// starts successfully.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - err (error): An error if any step fails during the server startup process.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// - Joins the host and port into a full address string.
|
||||
//
|
||||
// - Logs the intention to start the relay listener at the specified address.
|
||||
//
|
||||
// - Listens for TCP connections on the specified address.
|
||||
//
|
||||
// - Configures an HTTP server with CORS middleware, sets timeouts, and binds it
|
||||
// to the listener.
|
||||
//
|
||||
// - If any started channels are provided, closes them upon successful startup.
|
||||
//
|
||||
// - Starts serving requests using the configured HTTP server.
|
||||
func (s *Server) Start(
|
||||
host string, port int, started ...chan bool,
|
||||
) (err error) {
|
||||
log.I.F("running spider every %v", s.C.SpiderTime)
|
||||
if len(s.C.Owners) > 0 {
|
||||
// start up spider
|
||||
if err = s.Spider(s.C.Private); chk.E(err) {
|
||||
// there wasn't any owners, or they couldn't be found on the spider
|
||||
// seeds.
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
// start up a spider run to trigger every 30 minutes
|
||||
ticker := time.NewTicker(s.C.SpiderTime)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err = s.Spider(s.C.Private); chk.E(err) {
|
||||
// there wasn't any owners, or they couldn't be found on the spider
|
||||
// seeds.
|
||||
err = nil
|
||||
}
|
||||
case <-s.Ctx.Done():
|
||||
log.I.F("stopping spider ticker")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
||||
log.I.F("starting relay listener at %s", addr)
|
||||
var ln net.Listener
|
||||
if ln, err = net.Listen("tcp", addr); err != nil {
|
||||
return err
|
||||
}
|
||||
s.httpServer = &http.Server{
|
||||
Handler: cors.Default().Handler(s),
|
||||
Addr: addr,
|
||||
ReadHeaderTimeout: 7 * time.Second,
|
||||
IdleTimeout: 28 * time.Second,
|
||||
}
|
||||
for _, startedC := range started {
|
||||
close(startedC)
|
||||
}
|
||||
if err = s.httpServer.Serve(ln); errors.Is(err, http.ErrServerClosed) {
|
||||
} else if err != nil {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down the server and its components. It ensures that
|
||||
// all resources are properly released.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// - Logs shutting down message.
|
||||
//
|
||||
// - Cancels the context to stop ongoing operations.
|
||||
//
|
||||
// - Closes the event store, logging the action and checking for errors.
|
||||
//
|
||||
// - Shuts down the HTTP server, logging the action and checking for errors.
|
||||
//
|
||||
// - If the relay implements ShutdownAware, it calls OnShutdown with the
|
||||
// context.
|
||||
func (s *Server) Shutdown() {
|
||||
log.I.Ln("shutting down relay")
|
||||
s.Cancel()
|
||||
log.W.Ln("closing event store")
|
||||
chk.E(s.relay.Storage().Close())
|
||||
if s.httpServer != nil {
|
||||
log.W.Ln("shutting down relay listener")
|
||||
chk.E(s.httpServer.Shutdown(s.Ctx))
|
||||
}
|
||||
if f, ok := s.relay.(relay.ShutdownAware); ok {
|
||||
f.OnShutdown(s.Ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Router retrieves and returns the HTTP ServeMux associated with the server.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - router (*http.ServeMux): The ServeMux instance used for routing HTTP
|
||||
// requests.
|
||||
//
|
||||
// # Expected Behaviour
|
||||
//
|
||||
// - Returns the ServeMux that handles incoming HTTP requests to the server.
|
||||
func (s *Server) Router() (router *http.ServeMux) {
|
||||
return s.mux.ServeMux
|
||||
}
|
||||
226
pkg/app/relay/spider-fetch.go
Normal file
226
pkg/app/relay/spider-fetch.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/values"
|
||||
)
|
||||
|
||||
// IdPkTs is a map of event IDs to their id, pubkey, kind, and timestamp
|
||||
// This is used to reduce memory usage by storing only the essential information
|
||||
// instead of the full events
|
||||
type IdPkTs struct {
|
||||
Id []byte
|
||||
Pubkey []byte
|
||||
Kind uint16
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (s *Server) SpiderFetch(
|
||||
k *kinds.T, noFetch, noExtract bool, pubkeys ...[]byte,
|
||||
) (pks [][]byte, err error) {
|
||||
// Map to store id, pubkey, kind, and timestamp for each event
|
||||
// Key is a combination of pubkey and kind for deduplication
|
||||
pkKindMap := make(map[string]*IdPkTs)
|
||||
// Map to collect pubkeys from p tags
|
||||
pkMap := make(map[string]struct{})
|
||||
|
||||
// first search the local database
|
||||
pkList := tag.New(pubkeys...)
|
||||
f := &filter.F{
|
||||
Kinds: k,
|
||||
Authors: pkList,
|
||||
}
|
||||
|
||||
var kindsList string
|
||||
if k != nil {
|
||||
for i, kk := range k.K {
|
||||
if i > 0 {
|
||||
kindsList += ","
|
||||
}
|
||||
kindsList += kk.Name()
|
||||
}
|
||||
} else {
|
||||
kindsList = "*"
|
||||
}
|
||||
|
||||
// Query local database
|
||||
var localEvents event.S
|
||||
if localEvents, err = s.Storage().QueryEvents(s.Ctx, f); chk.E(err) {
|
||||
// none were found, so we need to scan the spiders
|
||||
err = nil
|
||||
}
|
||||
|
||||
// Process local events
|
||||
for _, ev := range localEvents {
|
||||
// Create a key based on pubkey and kind for deduplication
|
||||
pkKindKey := string(ev.Pubkey) + string(ev.Kind.Marshal(nil))
|
||||
|
||||
// Check if we already have an event with this pubkey and kind
|
||||
existing, exists := pkKindMap[pkKindKey]
|
||||
|
||||
// If it doesn't exist or the new event is newer, store it
|
||||
if !exists || ev.CreatedAtInt64() > existing.Timestamp {
|
||||
pkKindMap[pkKindKey] = &IdPkTs{
|
||||
Id: ev.ID,
|
||||
Pubkey: ev.Pubkey,
|
||||
Kind: ev.Kind.ToU16(),
|
||||
Timestamp: ev.CreatedAtInt64(),
|
||||
}
|
||||
|
||||
// Extract p tags if not in noExtract mode
|
||||
if !noExtract {
|
||||
t := ev.Tags.GetAll(tag.New("p"))
|
||||
for _, tt := range t.ToSliceOfTags() {
|
||||
pkh := tt.Value()
|
||||
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||
continue
|
||||
}
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
pkMap[string(pk)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Nil the event to free memory
|
||||
ev = nil
|
||||
}
|
||||
|
||||
log.I.F("%d events found of type %s", len(pkKindMap), kindsList)
|
||||
|
||||
if !noFetch && len(s.C.SpiderSeeds) > 0 {
|
||||
// we need to search the spider seeds.
|
||||
// Break up pubkeys into batches of 128
|
||||
for i := 0; i < len(pubkeys); i += 128 {
|
||||
end := i + 128
|
||||
if end > len(pubkeys) {
|
||||
end = len(pubkeys)
|
||||
}
|
||||
batchPubkeys := pubkeys[i:end]
|
||||
log.I.F(
|
||||
"processing batch %d to %d of %d for kind %s",
|
||||
i, end, len(pubkeys), kindsList,
|
||||
)
|
||||
batchPkList := tag.New(batchPubkeys...)
|
||||
lim := uint(batchPkList.Len())
|
||||
l := &lim
|
||||
var since *timestamp.T
|
||||
if k == nil {
|
||||
since = timestamp.FromTime(time.Now().Add(-1 * s.C.SpiderTime * 3 / 2))
|
||||
} else {
|
||||
l = values.ToUintPointer(512)
|
||||
}
|
||||
batchFilter := &filter.F{
|
||||
Kinds: k,
|
||||
Authors: batchPkList,
|
||||
Since: since,
|
||||
Limit: l,
|
||||
}
|
||||
for _, seed := range s.C.SpiderSeeds {
|
||||
select {
|
||||
case <-s.Ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
var evss event.S
|
||||
var cli *ws.Client
|
||||
if cli, err = ws.RelayConnect(
|
||||
context.Bg(), seed,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
if evss, err = cli.QuerySync(
|
||||
context.Bg(), batchFilter,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
// Process each event immediately
|
||||
for i, ev := range evss {
|
||||
// log.I.S(ev)
|
||||
// Create a key based on pubkey and kind for deduplication
|
||||
pkKindKey := string(ev.Pubkey) + string(ev.Kind.Marshal(nil))
|
||||
// Check if we already have an event with this pubkey and kind
|
||||
existing, exists := pkKindMap[pkKindKey]
|
||||
// If it doesn't exist or the new event is newer, store it and save to database
|
||||
if !exists || ev.CreatedAtInt64() > existing.Timestamp {
|
||||
var ser *types.Uint40
|
||||
if ser, err = s.Storage().GetSerialById(ev.ID); err == nil && ser != nil {
|
||||
err = errorf.E("event already exists: %0x", ev.ID)
|
||||
return
|
||||
} else {
|
||||
// verify the signature
|
||||
var valid bool
|
||||
if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Save the event to the database
|
||||
if _, _, err = s.Storage().SaveEvent(
|
||||
s.Ctx, ev, true, nil,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
// Store the essential information
|
||||
pkKindMap[pkKindKey] = &IdPkTs{
|
||||
Id: ev.ID,
|
||||
Pubkey: ev.Pubkey,
|
||||
Kind: ev.Kind.ToU16(),
|
||||
Timestamp: ev.CreatedAtInt64(),
|
||||
}
|
||||
// Extract p tags if not in noExtract mode
|
||||
if !noExtract {
|
||||
t := ev.Tags.GetAll(tag.New("p"))
|
||||
for _, tt := range t.ToSliceOfTags() {
|
||||
pkh := tt.Value()
|
||||
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||
continue
|
||||
}
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
pkMap[string(pk)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Nil the event in the slice to free memory
|
||||
evss[i] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
chk.E(s.Storage().Sync())
|
||||
debug.FreeOSMemory()
|
||||
// If we're in noExtract mode, just return
|
||||
if noExtract {
|
||||
return
|
||||
}
|
||||
// Convert the collected pubkeys to the return format
|
||||
for pk := range pkMap {
|
||||
pks = append(pks, []byte(pk))
|
||||
}
|
||||
log.I.F("found %d pks", len(pks))
|
||||
return
|
||||
}
|
||||
136
pkg/app/relay/spider.go
Normal file
136
pkg/app/relay/spider.go
Normal file
@@ -0,0 +1,136 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/keys"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
func (s *Server) Spider(noFetch ...bool) (err error) {
|
||||
var ownersPubkeys [][]byte
|
||||
for _, v := range s.C.Owners {
|
||||
var pk []byte
|
||||
if pk, err = keys.DecodeNpubOrHex(v); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// owners themselves are on the OwnersFollowed list as first level
|
||||
ownersPubkeys = append(ownersPubkeys, pk)
|
||||
}
|
||||
if len(ownersPubkeys) == 0 {
|
||||
// there is no OwnersPubkeys, so there is nothing to do.
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
dontFetch := false
|
||||
if len(noFetch) > 0 && noFetch[0] {
|
||||
dontFetch = true
|
||||
}
|
||||
log.I.F("getting ownersFollowed")
|
||||
var ownersFollowed [][]byte
|
||||
if ownersFollowed, err = s.SpiderFetch(
|
||||
kinds.New(kind.FollowList), dontFetch, false, ownersPubkeys...,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// log.I.S(ownersFollowed)
|
||||
log.I.F("getting followedFollows")
|
||||
var followedFollows [][]byte
|
||||
if followedFollows, err = s.SpiderFetch(
|
||||
kinds.New(kind.FollowList), dontFetch, false, ownersFollowed...,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
log.I.F("getting ownersMuted")
|
||||
var ownersMuted [][]byte
|
||||
if ownersMuted, err = s.SpiderFetch(
|
||||
kinds.New(kind.MuteList), dontFetch, false, ownersPubkeys...,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// remove the ownersFollowed and ownersMuted items from the followedFollows
|
||||
// list
|
||||
filteredFollows := make([][]byte, 0, len(followedFollows))
|
||||
for _, follow := range followedFollows {
|
||||
for _, owner := range ownersFollowed {
|
||||
if bytes.Equal(follow, owner) {
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, owner := range ownersMuted {
|
||||
if bytes.Equal(follow, owner) {
|
||||
break
|
||||
}
|
||||
}
|
||||
filteredFollows = append(filteredFollows, follow)
|
||||
}
|
||||
followedFollows = filteredFollows
|
||||
own := "owner"
|
||||
if len(ownersPubkeys) > 1 {
|
||||
own = "owners"
|
||||
}
|
||||
fol := "pubkey"
|
||||
if len(ownersFollowed) > 1 {
|
||||
fol = "pubkeys"
|
||||
}
|
||||
folfol := "pubkey"
|
||||
if len(followedFollows) > 1 {
|
||||
folfol = "pubkeys"
|
||||
}
|
||||
mut := "pubkey"
|
||||
if len(ownersMuted) > 1 {
|
||||
mut = "pubkeys"
|
||||
}
|
||||
log.T.F(
|
||||
"found %d %s with a total of %d followed %s and %d followed's follows %s, and excluding %d owner muted %s",
|
||||
len(ownersPubkeys), own,
|
||||
len(ownersFollowed), fol,
|
||||
len(followedFollows), folfol,
|
||||
len(ownersMuted), mut,
|
||||
)
|
||||
// add the owners to the ownersFollowed
|
||||
ownersFollowed = append(ownersFollowed, ownersPubkeys...)
|
||||
s.SetOwnersPubkeys(ownersPubkeys)
|
||||
s.SetOwnersFollowed(ownersFollowed)
|
||||
s.SetFollowedFollows(followedFollows)
|
||||
s.SetOwnersMuted(ownersMuted)
|
||||
// lastly, update all followed users new events in the background
|
||||
if !dontFetch && s.C.SpiderType != "none" {
|
||||
go func() {
|
||||
var k *kinds.T
|
||||
if s.C.SpiderType == "directory" {
|
||||
k = kinds.New(
|
||||
kind.ProfileMetadata, kind.RelayListMetadata,
|
||||
kind.DMRelaysList, kind.MuteList,
|
||||
)
|
||||
}
|
||||
everyone := ownersFollowed
|
||||
if s.C.SpiderSecondDegree &&
|
||||
(s.C.SpiderType == "follows" ||
|
||||
s.C.SpiderType == "directory") {
|
||||
everyone = append(ownersFollowed, followedFollows...)
|
||||
}
|
||||
_, _ = s.SpiderFetch(
|
||||
k, false, true, everyone...,
|
||||
)
|
||||
// get the directory events also for second degree if spider
|
||||
// type is directory but second degree is disabled, so all
|
||||
// directory data is available for all whitelisted users.
|
||||
if !s.C.SpiderSecondDegree && s.C.SpiderType == "directory" {
|
||||
k = kinds.New(
|
||||
kind.ProfileMetadata, kind.RelayListMetadata,
|
||||
kind.DMRelaysList, kind.MuteList,
|
||||
)
|
||||
everyone = append(ownersFollowed, followedFollows...)
|
||||
_, _ = s.SpiderFetch(
|
||||
k, false, true, everyone...,
|
||||
)
|
||||
|
||||
}
|
||||
}()
|
||||
}
|
||||
}()
|
||||
return
|
||||
}
|
||||
@@ -1,20 +1,21 @@
|
||||
package realy
|
||||
package relay
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/eventid"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/units"
|
||||
"testing"
|
||||
|
||||
"orly.dev/encoders/event"
|
||||
"orly.dev/encoders/eventid"
|
||||
"orly.dev/encoders/filter"
|
||||
"orly.dev/interfaces/store"
|
||||
"orly.dev/utils/context"
|
||||
"orly.dev/utils/units"
|
||||
)
|
||||
|
||||
func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
||||
t.Helper()
|
||||
serveMux := servemux.NewServeMux()
|
||||
srv, _ := NewServer(
|
||||
&ServerParams{
|
||||
Ctx: c,
|
||||
@@ -22,6 +23,7 @@ func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
||||
Rl: tr,
|
||||
MaxLimit: 500 * units.Kb,
|
||||
},
|
||||
serveMux,
|
||||
)
|
||||
started := make(chan bool)
|
||||
go srv.Start("127.0.0.1", 0, started)
|
||||
50
pkg/app/relay/user-auth.go
Normal file
50
pkg/app/relay/user-auth.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) UserAuth(
|
||||
r *http.Request, remote string, tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte, super bool) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range append(s.ownersFollowed, s.followedFollows...) {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
// if the client is one of the relay cluster replicas, also set the super
|
||||
// flag to indicate that privilege checks can be bypassed.
|
||||
if len(s.Peers.Pubkeys) > 0 {
|
||||
for _, pk := range s.Peers.Pubkeys {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
super = true
|
||||
pubkey = pk
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
39
pkg/app/resources.go
Normal file
39
pkg/app/resources.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MonitorResources periodically logs resource usage metrics such as the number
|
||||
// of active goroutines and CGO calls at 15-minute intervals, and exits when the
|
||||
// provided context signals cancellation.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - c: Context used to control the lifecycle of the resource monitoring process.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// The function runs indefinitely, logging metrics every 15 minutes until the
|
||||
// context is cancelled. Upon cancellation, it logs a shutdown message and exits
|
||||
// gracefully without returning any values.
|
||||
func MonitorResources(c context.T) {
|
||||
tick := time.NewTicker(time.Minute * 15)
|
||||
log.I.Ln("running process", os.Args[0], os.Getpid())
|
||||
for {
|
||||
select {
|
||||
case <-c.Done():
|
||||
log.D.Ln("shutting down resource monitor")
|
||||
return
|
||||
case <-tick.C:
|
||||
log.D.Ln(
|
||||
"# goroutines", runtime.NumGoroutine(),
|
||||
"# cgo calls", runtime.NumCgoCall(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,7 @@ package base58_test
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"orly.dev/crypto/ec/base58"
|
||||
"orly.dev/pkg/crypto/ec/base58"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ package base58_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"orly.dev/crypto/ec/base58"
|
||||
"orly.dev/pkg/crypto/ec/base58"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ package base58
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"orly.dev/crypto/sha256"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
)
|
||||
|
||||
// ErrChecksum indicates that the checksum of a check-encoded string does not verify against
|
||||
@@ -5,7 +5,7 @@
|
||||
package base58_test
|
||||
|
||||
import (
|
||||
"orly.dev/crypto/ec/base58"
|
||||
"orly.dev/pkg/crypto/ec/base58"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -6,14 +6,14 @@ package base58_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
base59 "orly.dev/crypto/ec/base58"
|
||||
"orly.dev/pkg/crypto/ec/base58"
|
||||
)
|
||||
|
||||
// This example demonstrates how to decode modified base58 encoded data.
|
||||
func ExampleDecode() {
|
||||
// Decode example modified base58 encoded data.
|
||||
encoded := "25JnwSn7XKfNQ"
|
||||
decoded := base59.Decode(encoded)
|
||||
decoded := base58.Decode(encoded)
|
||||
|
||||
// Show the decoded data.
|
||||
fmt.Println("Decoded Data:", string(decoded))
|
||||
@@ -27,7 +27,7 @@ func ExampleDecode() {
|
||||
func ExampleEncode() {
|
||||
// Encode example data with the modified base58 encoding scheme.
|
||||
data := []byte("Test data")
|
||||
encoded := base59.Encode(data)
|
||||
encoded := base58.Encode(data)
|
||||
|
||||
// Show the encoded data.
|
||||
fmt.Println("Encoded Data:", encoded)
|
||||
@@ -40,7 +40,7 @@ func ExampleEncode() {
|
||||
func ExampleCheckDecode() {
|
||||
// Decode an example Base58Check encoded data.
|
||||
encoded := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
|
||||
decoded, version, err := base59.CheckDecode(encoded)
|
||||
decoded, version, err := base58.CheckDecode(encoded)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return
|
||||
@@ -60,7 +60,7 @@ func ExampleCheckDecode() {
|
||||
func ExampleCheckEncode() {
|
||||
// Encode example data with the Base58Check encoding scheme.
|
||||
data := []byte("Test data")
|
||||
encoded := base59.CheckEncode(data, 0)
|
||||
encoded := base58.CheckEncode(data, 0)
|
||||
|
||||
// Show the encoded data.
|
||||
fmt.Println("Encoded Data:", encoded)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user