Compare commits
74 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
5b4dd9ea60
|
|||
|
bae1d09f8d
|
|||
|
f1f3236196
|
|||
|
f01cd562f8
|
|||
|
d2d0821d19
|
|||
|
09b00c76ed
|
|||
|
de57fd7bc4
|
|||
|
b7c2e609f6
|
|||
|
cc63fe751a
|
|||
|
d96d10723a
|
|||
|
ec50afdec0
|
|||
|
ade987c9ac
|
|||
|
9f39ca8a62
|
|||
|
f85a8b99a3
|
|||
|
d7bda40e18
|
|||
|
b67961773d
|
|||
|
5fd58681c9
|
|||
|
2bdc1b7bc0
|
|||
|
332b9b05f7
|
|||
|
c43ddb77e0
|
|||
|
e90fc619f2
|
|||
|
29e5444545
|
|||
|
7ee613bb0e
|
|||
|
23985719ba
|
|||
|
3314a2a892
|
|||
|
7c14c72e9d
|
|||
|
dbdc5d703e
|
|||
|
c1acf0deaa
|
|||
|
ccffeb902c
|
|||
|
35201490a0
|
|||
|
3afd6131d5
|
|||
|
386878fec8
|
|||
| 474e16c315 | |||
|
|
47e94c5ff6 | ||
|
|
c62fdc96d5 | ||
|
|
4c66eda10e | ||
|
|
9fdef77e02 | ||
|
e8a69077b3
|
|||
|
128bc60726
|
|||
|
6c6f9e8874
|
|||
|
01131f252e
|
|||
|
02333b74ae
|
|||
|
86ac7b7897
|
|||
|
7e6adf9fba
|
|||
|
7d5ebd5ccd
|
|||
|
f8a321eaee
|
|||
|
48c7fab795
|
|||
|
f6054f3c37
|
|||
|
e1da199858
|
|||
|
45b4f82995
|
|||
|
e58eb1d3e3
|
|||
|
72d6ddff15
|
|||
|
a50ef55d8e
|
|||
| c2d5d2a165 | |||
|
05b13399e3
|
|||
|
0dea0ca791
|
|||
|
ff017b45d2
|
|||
|
50179e44ed
|
|||
|
34a3b1ba69
|
|||
|
093a19db29
|
|||
|
2ba361c915
|
|||
|
7736bb7640
|
|||
|
804e1c9649
|
|||
|
81a6aade4e
|
|||
|
fc9600f99d
|
|||
|
199f922208
|
|||
|
405e223aa6
|
|||
|
fc3a89a309
|
|||
|
ba8166da07
|
|||
|
3e3af08644
|
|||
|
fbdf565bf7
|
|||
|
14b6960070
|
|||
|
f9896e52ea
|
|||
|
|
42273ab2fa |
@@ -38,7 +38,7 @@ describing how the item is used.
|
||||
For documentation on package, summarise in up to 3 sentences the functions and
|
||||
purpose of the package
|
||||
|
||||
Do not use markdown ** or __ or any similar things in initial words of a bullet
|
||||
Do not use markdown \*\* or \_\_ or any similar things in initial words of a bullet
|
||||
point, instead use standard godoc style # prefix for header sections
|
||||
|
||||
ALWAYS separate each bullet point with an empty line, and ALWAYS indent them
|
||||
@@ -90,8 +90,10 @@ A good typical example:
|
||||
|
||||
```
|
||||
|
||||
use the source of the relay-tester to help guide what expectations the test has,
|
||||
and use context7 for information about the nostr protocol, and use additional
|
||||
use the source of the relay-tester to help guide what expectations the test has,
|
||||
and use context7 for information about the nostr protocol, and use additional
|
||||
log statements to help locate the cause of bugs
|
||||
|
||||
always use Go v1.25.1 for everything involving Go
|
||||
always use Go v1.25.1 for everything involving Go
|
||||
|
||||
always use the nips repository also for information, found at ../github.com/nostr-protocol/nips attached to the project
|
||||
|
||||
18
.github/workflows/go.yml
vendored
18
.github/workflows/go.yml
vendored
@@ -16,10 +16,9 @@ name: Go
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v[0-9]+.[0-9]+.[0-9]+'
|
||||
- "v[0-9]+.[0-9]+.[0-9]+"
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -28,26 +27,25 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: "1.25"
|
||||
|
||||
- name: Install libsecp256k1
|
||||
run: ./scripts/ubuntu_install_libsecp256k1.sh
|
||||
run: ./scripts/ubuntu_install_libsecp256k1.sh
|
||||
|
||||
- name: Build with cgo
|
||||
run: go build -v ./...
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test with cgo
|
||||
run: go test -v ./...
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Set CGO off
|
||||
run: echo "CGO_ENABLED=0" >> $GITHUB_ENV
|
||||
run: echo "CGO_ENABLED=0" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
run: go build -v ./...
|
||||
run: go build -v ./...
|
||||
|
||||
- name: Test
|
||||
run: go test -v ./...
|
||||
|
||||
run: go test -v ./...
|
||||
# release:
|
||||
# needs: build
|
||||
# runs-on: ubuntu-latest
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -76,7 +76,7 @@ cmd/benchmark/data
|
||||
!*.css
|
||||
!*.ts
|
||||
!*.html
|
||||
!Dockerfile
|
||||
!contrib/stella/Dockerfile
|
||||
!*.lock
|
||||
!*.nix
|
||||
!license
|
||||
@@ -88,10 +88,10 @@ cmd/benchmark/data
|
||||
!.gitignore
|
||||
!version
|
||||
!out.jsonl
|
||||
!Dockerfile*
|
||||
!contrib/stella/Dockerfile
|
||||
!strfry.conf
|
||||
!config.toml
|
||||
!.dockerignore
|
||||
!contrib/stella/.dockerignore
|
||||
!*.jsx
|
||||
!*.tsx
|
||||
!app/web/dist
|
||||
@@ -99,6 +99,7 @@ cmd/benchmark/data
|
||||
!/app/web/dist/*
|
||||
!/app/web/dist/**
|
||||
!bun.lock
|
||||
!*.svelte
|
||||
# ...even if they are in subdirectories
|
||||
!*/
|
||||
/blocklist.json
|
||||
@@ -120,4 +121,5 @@ pkg/database/testrealy
|
||||
/.idea/inspectionProfiles/Project_Default.xml
|
||||
/.idea/.name
|
||||
/ctxproxy.config.yml
|
||||
cmd/benchmark/external/**
|
||||
cmd/benchmark/external/**
|
||||
app/web/dist/**
|
||||
|
||||
@@ -23,29 +23,37 @@ import (
|
||||
// and default values. It defines parameters for app behaviour, storage
|
||||
// locations, logging, and network settings used across the relay service.
|
||||
type C struct {
|
||||
AppName string `env:"ORLY_APP_NAME" usage:"set a name to display on information about the relay" default:"ORLY"`
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
HealthPort int `env:"ORLY_HEALTH_PORT" default:"0" usage:"optional health check HTTP port; 0 disables"`
|
||||
EnableShutdown bool `env:"ORLY_ENABLE_SHUTDOWN" default:"false" usage:"if true, expose /shutdown on the health port to gracefully stop the process (for profiling)"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
|
||||
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
||||
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation,heap,block,goroutine,threadcreate,mutex"`
|
||||
PprofPath string `env:"ORLY_PPROF_PATH" usage:"optional directory to write pprof profiles into (inside container); default is temporary dir"`
|
||||
PprofHTTP bool `env:"ORLY_PPROF_HTTP" default:"false" usage:"if true, expose net/http/pprof on port 6060"`
|
||||
OpenPprofWeb bool `env:"ORLY_OPEN_PPROF_WEB" default:"false" usage:"if true, automatically open the pprof web viewer when profiling is enabled"`
|
||||
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
|
||||
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
|
||||
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
|
||||
SpiderMode string `env:"ORLY_SPIDER_MODE" usage:"spider mode: none,follow" default:"none"`
|
||||
SpiderFrequency time.Duration `env:"ORLY_SPIDER_FREQUENCY" usage:"spider frequency in seconds" default:"1h"`
|
||||
AppName string `env:"ORLY_APP_NAME" usage:"set a name to display on information about the relay" default:"ORLY"`
|
||||
DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the event store" default:"~/.local/share/ORLY"`
|
||||
Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
HealthPort int `env:"ORLY_HEALTH_PORT" default:"0" usage:"optional health check HTTP port; 0 disables"`
|
||||
EnableShutdown bool `env:"ORLY_ENABLE_SHUTDOWN" default:"false" usage:"if true, expose /shutdown on the health port to gracefully stop the process (for profiling)"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"relay log level: fatal error warn info debug trace"`
|
||||
DBLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"database log level: fatal error warn info debug trace"`
|
||||
LogToStdout bool `env:"ORLY_LOG_TO_STDOUT" default:"false" usage:"log to stdout instead of stderr"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof in modes: cpu,memory,allocation,heap,block,goroutine,threadcreate,mutex"`
|
||||
PprofPath string `env:"ORLY_PPROF_PATH" usage:"optional directory to write pprof profiles into (inside container); default is temporary dir"`
|
||||
PprofHTTP bool `env:"ORLY_PPROF_HTTP" default:"false" usage:"if true, expose net/http/pprof on port 6060"`
|
||||
OpenPprofWeb bool `env:"ORLY_OPEN_PPROF_WEB" default:"false" usage:"if true, automatically open the pprof web viewer when profiling is enabled"`
|
||||
IPWhitelist []string `env:"ORLY_IP_WHITELIST" usage:"comma-separated list of IP addresses to allow access from, matches on prefixes to allow private subnets, eg 10.0.0 = 10.0.0.0/8"`
|
||||
Admins []string `env:"ORLY_ADMINS" usage:"comma-separated list of admin npubs"`
|
||||
Owners []string `env:"ORLY_OWNERS" usage:"comma-separated list of owner npubs, who have full control of the relay for wipe and restart and other functions"`
|
||||
ACLMode string `env:"ORLY_ACL_MODE" usage:"ACL mode: follows,none" default:"none"`
|
||||
SpiderMode string `env:"ORLY_SPIDER_MODE" usage:"spider mode: none,follows" default:"none"`
|
||||
SpiderFrequency time.Duration `env:"ORLY_SPIDER_FREQUENCY" usage:"spider frequency in seconds" default:"1h"`
|
||||
BootstrapRelays []string `env:"ORLY_BOOTSTRAP_RELAYS" usage:"comma-separated list of bootstrap relay URLs for initial sync"`
|
||||
NWCUri string `env:"ORLY_NWC_URI" usage:"NWC (Nostr Wallet Connect) connection string for Lightning payments"`
|
||||
SubscriptionEnabled bool `env:"ORLY_SUBSCRIPTION_ENABLED" default:"false" usage:"enable subscription-based access control requiring payment for non-directory events"`
|
||||
MonthlyPriceSats int64 `env:"ORLY_MONTHLY_PRICE_SATS" default:"6000" usage:"price in satoshis for one month subscription (default ~$2 USD)"`
|
||||
RelayURL string `env:"ORLY_RELAY_URL" usage:"base URL for the relay dashboard (e.g., https://relay.example.com)"`
|
||||
|
||||
// Web UI and dev mode settings
|
||||
WebDisableEmbedded bool `env:"ORLY_WEB_DISABLE" default:"false" usage:"disable serving the embedded web UI; useful for hot-reload during development"`
|
||||
WebDevProxyURL string `env:"ORLY_WEB_DEV_PROXY_URL" usage:"when ORLY_WEB_DISABLE is true, reverse-proxy non-API paths to this dev server URL (e.g. http://localhost:5173)"`
|
||||
|
||||
// Sprocket settings
|
||||
SprocketEnabled bool `env:"ORLY_SPROCKET_ENABLED" default:"false" usage:"enable sprocket event processing plugin system"`
|
||||
}
|
||||
|
||||
// New creates and initializes a new configuration object for the relay
|
||||
@@ -136,6 +144,21 @@ func GetEnv() (requested bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// IdentityRequested checks if the first command line argument is "identity" and returns
|
||||
// whether the relay identity should be printed and the program should exit.
|
||||
//
|
||||
// Return Values
|
||||
// - requested: true if the 'identity' subcommand was provided, false otherwise.
|
||||
func IdentityRequested() (requested bool) {
|
||||
if len(os.Args) > 1 {
|
||||
switch strings.ToLower(os.Args[1]) {
|
||||
case "identity":
|
||||
requested = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// KV is a key/value pair.
|
||||
type KV struct{ Key, Value string }
|
||||
|
||||
@@ -206,15 +229,14 @@ func EnvKV(cfg any) (m KVSlice) {
|
||||
k := t.Field(i).Tag.Get("env")
|
||||
v := reflect.ValueOf(cfg).Field(i).Interface()
|
||||
var val string
|
||||
switch v.(type) {
|
||||
switch v := v.(type) {
|
||||
case string:
|
||||
val = v.(string)
|
||||
val = v
|
||||
case int, bool, time.Duration:
|
||||
val = fmt.Sprint(v)
|
||||
case []string:
|
||||
arr := v.([]string)
|
||||
if len(arr) > 0 {
|
||||
val = strings.Join(arr, ",")
|
||||
if len(v) > 0 {
|
||||
val = strings.Join(v, ",")
|
||||
}
|
||||
}
|
||||
// this can happen with embedded structs
|
||||
@@ -286,5 +308,4 @@ func PrintHelp(cfg *C, printer io.Writer) {
|
||||
fmt.Fprintf(printer, "\ncurrent configuration:\n\n")
|
||||
PrintEnv(cfg, printer)
|
||||
fmt.Fprintln(printer)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func (l *Listener) HandleAuth(b []byte) (err error) {
|
||||
var valid bool
|
||||
if valid, err = auth.Validate(
|
||||
env.Event, l.challenge.Load(),
|
||||
l.ServiceURL(l.req),
|
||||
l.WebSocketURL(l.req),
|
||||
); err != nil {
|
||||
e := err.Error()
|
||||
if err = Ok.Error(l, env, e); chk.E(err) {
|
||||
@@ -50,6 +50,34 @@ func (l *Listener) HandleAuth(b []byte) (err error) {
|
||||
env.Event.Pubkey,
|
||||
)
|
||||
l.authedPubkey.Store(env.Event.Pubkey)
|
||||
|
||||
// Check if this is a first-time user and create welcome note
|
||||
go l.handleFirstTimeUser(env.Event.Pubkey)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// handleFirstTimeUser checks if user is logging in for first time and creates welcome note
|
||||
func (l *Listener) handleFirstTimeUser(pubkey []byte) {
|
||||
// Check if this is a first-time user
|
||||
isFirstTime, err := l.Server.D.IsFirstTimeUser(pubkey)
|
||||
if err != nil {
|
||||
log.E.F("failed to check first-time user status: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !isFirstTime {
|
||||
return // Not a first-time user
|
||||
}
|
||||
|
||||
// Get payment processor to create welcome note
|
||||
if l.Server.paymentProcessor != nil {
|
||||
// Set the dashboard URL based on the current HTTP request
|
||||
dashboardURL := l.Server.DashboardURL(l.req)
|
||||
l.Server.paymentProcessor.SetDashboardURL(dashboardURL)
|
||||
|
||||
if err := l.Server.paymentProcessor.CreateWelcomeNote(pubkey); err != nil {
|
||||
log.E.F("failed to create welcome note for first-time user: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
78
app/handle-count.go
Normal file
78
app/handle-count.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/countenvelope"
|
||||
"next.orly.dev/pkg/utils/normalize"
|
||||
)
|
||||
|
||||
// HandleCount processes a COUNT envelope by parsing the request, verifying
|
||||
// permissions, invoking the database CountEvents for each provided filter, and
|
||||
// responding with a COUNT response containing the aggregate count.
|
||||
func (l *Listener) HandleCount(msg []byte) (err error) {
|
||||
log.D.F("HandleCount: START processing from %s", l.remote)
|
||||
|
||||
// Parse the COUNT request
|
||||
env := countenvelope.New()
|
||||
if _, err = env.Unmarshal(msg); chk.E(err) {
|
||||
return normalize.Error.Errorf(err.Error())
|
||||
}
|
||||
log.D.C(func() string { return fmt.Sprintf("COUNT sub=%s filters=%d", env.Subscription, len(env.Filters)) })
|
||||
|
||||
// If ACL is active, send a challenge (same as REQ path)
|
||||
if acl.Registry.Active.Load() != "none" {
|
||||
if err = authenvelope.NewChallengeWith(l.challenge.Load()).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Check read permissions
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
return errors.New("auth required: user not authed or has no read access")
|
||||
default:
|
||||
// allowed to read
|
||||
}
|
||||
|
||||
// Use a bounded context for counting
|
||||
ctx, cancel := context.WithTimeout(l.ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Aggregate count across all provided filters
|
||||
var total int
|
||||
var approx bool // database returns false per implementation
|
||||
for _, f := range env.Filters {
|
||||
if f == nil {
|
||||
continue
|
||||
}
|
||||
var cnt int
|
||||
var a bool
|
||||
cnt, a, err = l.D.CountEvents(ctx, f)
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
total += cnt
|
||||
approx = approx || a
|
||||
}
|
||||
|
||||
// Build and send COUNT response
|
||||
var res *countenvelope.Response
|
||||
if res, err = countenvelope.NewResponseFrom(env.Subscription, total, approx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = res.Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
log.D.F("HandleCount: COMPLETED processing from %s count=%d approx=%v", l.remote, total, approx)
|
||||
return nil
|
||||
}
|
||||
@@ -145,12 +145,10 @@ func (l *Listener) HandleDelete(env *eventenvelope.Submission) (err error) {
|
||||
if ev, err = l.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// check that the author is the same as the signer of the
|
||||
// delete, for the e tag case the author is the signer of
|
||||
// the event.
|
||||
if !utils.FastEqual(env.E.Pubkey, ev.Pubkey) {
|
||||
// allow deletion if the signer is the author OR an admin/owner
|
||||
if !(ownerDelete || utils.FastEqual(env.E.Pubkey, ev.Pubkey)) {
|
||||
log.W.F(
|
||||
"HandleDelete: attempted deletion of event %s by different user - delete pubkey=%s, event pubkey=%s",
|
||||
"HandleDelete: attempted deletion of event %s by unauthorized user - delete pubkey=%s, event pubkey=%s",
|
||||
hex.Enc(ev.ID), hex.Enc(env.E.Pubkey),
|
||||
hex.Enc(ev.Pubkey),
|
||||
)
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
)
|
||||
|
||||
func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
log.D.F("handling event: %s", msg)
|
||||
// decode the envelope
|
||||
env := eventenvelope.NewSubmission()
|
||||
if msg, err = env.Unmarshal(msg); chk.E(err) {
|
||||
@@ -31,6 +32,58 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
if len(msg) > 0 {
|
||||
log.I.F("extra '%s'", msg)
|
||||
}
|
||||
|
||||
// Check if sprocket is enabled and process event through it
|
||||
if l.sprocketManager != nil && l.sprocketManager.IsEnabled() {
|
||||
if !l.sprocketManager.IsRunning() {
|
||||
// Sprocket is enabled but not running - drop all messages
|
||||
log.W.F("sprocket is enabled but not running, dropping event %0x", env.E.ID)
|
||||
if err = Ok.Error(
|
||||
l, env, "sprocket policy not available",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Process event through sprocket
|
||||
response, sprocketErr := l.sprocketManager.ProcessEvent(env.E)
|
||||
if chk.E(sprocketErr) {
|
||||
log.E.F("sprocket processing failed: %v", sprocketErr)
|
||||
if err = Ok.Error(
|
||||
l, env, "sprocket processing failed",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Handle sprocket response
|
||||
switch response.Action {
|
||||
case "accept":
|
||||
// Continue with normal processing
|
||||
log.D.F("sprocket accepted event %0x", env.E.ID)
|
||||
case "reject":
|
||||
// Return OK false with message
|
||||
if err = okenvelope.NewFrom(
|
||||
env.Id(), false,
|
||||
reason.Error.F(response.Msg),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
case "shadowReject":
|
||||
// Return OK true but abort processing
|
||||
if err = Ok.Ok(l, env, ""); chk.E(err) {
|
||||
return
|
||||
}
|
||||
log.D.F("sprocket shadow rejected event %0x", env.E.ID)
|
||||
return
|
||||
default:
|
||||
log.W.F("unknown sprocket action: %s", response.Action)
|
||||
// Default to accept for unknown actions
|
||||
}
|
||||
}
|
||||
// check the event ID is correct
|
||||
calculatedId := env.E.GetIDBytes()
|
||||
if !utils.FastEqual(calculatedId, env.E.ID) {
|
||||
|
||||
@@ -4,48 +4,93 @@ import (
|
||||
"fmt"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/countenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/noticeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
)
|
||||
|
||||
func (l *Listener) HandleMessage(msg []byte, remote string) {
|
||||
// log.D.F("%s received message:\n%s", remote, msg)
|
||||
msgPreview := string(msg)
|
||||
if len(msgPreview) > 150 {
|
||||
msgPreview = msgPreview[:150] + "..."
|
||||
}
|
||||
// log.D.F("%s processing message (len=%d): %s", remote, len(msg), msgPreview)
|
||||
|
||||
l.msgCount++
|
||||
var err error
|
||||
var t string
|
||||
var rem []byte
|
||||
if t, rem, err = envelopes.Identify(msg); !chk.E(err) {
|
||||
switch t {
|
||||
case eventenvelope.L:
|
||||
// log.D.F("eventenvelope: %s %s", remote, rem)
|
||||
err = l.HandleEvent(rem)
|
||||
case reqenvelope.L:
|
||||
// log.D.F("reqenvelope: %s %s", remote, rem)
|
||||
err = l.HandleReq(rem)
|
||||
case closeenvelope.L:
|
||||
// log.D.F("closeenvelope: %s %s", remote, rem)
|
||||
err = l.HandleClose(rem)
|
||||
case authenvelope.L:
|
||||
// log.D.F("authenvelope: %s %s", remote, rem)
|
||||
err = l.HandleAuth(rem)
|
||||
default:
|
||||
err = fmt.Errorf("unknown envelope type %s\n%s", t, rem)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
// log.D.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "notice->%s %s", remote, err,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
if err = noticeenvelope.NewFrom(err.Error()).Write(l); err != nil {
|
||||
return
|
||||
|
||||
// Attempt to identify the envelope type
|
||||
if t, rem, err = envelopes.Identify(msg); err != nil {
|
||||
log.E.F(
|
||||
"%s envelope identification FAILED (len=%d): %v", remote, len(msg),
|
||||
err,
|
||||
)
|
||||
log.T.F("%s malformed message content: %q", remote, msgPreview)
|
||||
chk.E(err)
|
||||
// Send error notice to client
|
||||
if noticeErr := noticeenvelope.NewFrom("malformed message: " + err.Error()).Write(l); noticeErr != nil {
|
||||
log.E.F(
|
||||
"%s failed to send malformed message notice: %v", remote,
|
||||
noticeErr,
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.T.F(
|
||||
"%s identified envelope type: %s (payload_len=%d)", remote, t, len(rem),
|
||||
)
|
||||
|
||||
// Process the identified envelope type
|
||||
switch t {
|
||||
case eventenvelope.L:
|
||||
log.T.F("%s processing EVENT envelope", remote)
|
||||
l.eventCount++
|
||||
err = l.HandleEvent(rem)
|
||||
case reqenvelope.L:
|
||||
log.T.F("%s processing REQ envelope", remote)
|
||||
l.reqCount++
|
||||
err = l.HandleReq(rem)
|
||||
case closeenvelope.L:
|
||||
log.T.F("%s processing CLOSE envelope", remote)
|
||||
err = l.HandleClose(rem)
|
||||
case authenvelope.L:
|
||||
log.T.F("%s processing AUTH envelope", remote)
|
||||
err = l.HandleAuth(rem)
|
||||
case countenvelope.L:
|
||||
log.T.F("%s processing COUNT envelope", remote)
|
||||
err = l.HandleCount(rem)
|
||||
default:
|
||||
err = fmt.Errorf("unknown envelope type %s", t)
|
||||
log.E.F(
|
||||
"%s unknown envelope type: %s (payload: %q)", remote, t,
|
||||
string(rem),
|
||||
)
|
||||
}
|
||||
|
||||
// Handle any processing errors
|
||||
if err != nil {
|
||||
log.E.F("%s message processing FAILED (type=%s): %v", remote, t, err)
|
||||
log.T.F("%s error context - original message: %q", remote, msgPreview)
|
||||
|
||||
// Send error notice to client
|
||||
noticeMsg := fmt.Sprintf("%s: %s", t, err.Error())
|
||||
if noticeErr := noticeenvelope.NewFrom(noticeMsg).Write(l); noticeErr != nil {
|
||||
log.E.F(
|
||||
"%s failed to send error notice after %s processing failure: %v",
|
||||
remote, t, noticeErr,
|
||||
)
|
||||
return
|
||||
}
|
||||
log.T.F("%s sent error notice for %s processing failure", remote, t)
|
||||
} else {
|
||||
log.T.F("%s message processing SUCCESS (type=%s)", remote, t)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,9 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/protocol/relayinfo"
|
||||
"next.orly.dev/pkg/version"
|
||||
)
|
||||
@@ -31,49 +34,68 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
||||
var info *relayinfo.T
|
||||
supportedNIPs := relayinfo.GetList(
|
||||
relayinfo.BasicProtocol,
|
||||
// relayinfo.Authentication,
|
||||
// relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.Authentication,
|
||||
relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EventDeletion,
|
||||
relayinfo.RelayInformationDocument,
|
||||
// relayinfo.GenericTagQueries,
|
||||
relayinfo.GenericTagQueries,
|
||||
// relayinfo.NostrMarketplace,
|
||||
relayinfo.CountingResults,
|
||||
relayinfo.EventTreatment,
|
||||
// relayinfo.CommandResults,
|
||||
relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
// relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
relayinfo.RelayListMetadata,
|
||||
relayinfo.SearchCapability,
|
||||
)
|
||||
if s.Config.ACLMode != "none" {
|
||||
supportedNIPs = relayinfo.GetList(
|
||||
relayinfo.BasicProtocol,
|
||||
relayinfo.Authentication,
|
||||
// relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EncryptedDirectMessage,
|
||||
relayinfo.EventDeletion,
|
||||
relayinfo.RelayInformationDocument,
|
||||
// relayinfo.GenericTagQueries,
|
||||
relayinfo.GenericTagQueries,
|
||||
// relayinfo.NostrMarketplace,
|
||||
relayinfo.CountingResults,
|
||||
relayinfo.EventTreatment,
|
||||
// relayinfo.CommandResults,
|
||||
// relayinfo.ParameterizedReplaceableEvents,
|
||||
// relayinfo.ExpirationTimestamp,
|
||||
relayinfo.CommandResults,
|
||||
relayinfo.ParameterizedReplaceableEvents,
|
||||
relayinfo.ExpirationTimestamp,
|
||||
relayinfo.ProtectedEvents,
|
||||
relayinfo.RelayListMetadata,
|
||||
relayinfo.SearchCapability,
|
||||
)
|
||||
}
|
||||
sort.Sort(supportedNIPs)
|
||||
log.T.Ln("supported NIPs", supportedNIPs)
|
||||
log.I.Ln("supported NIPs", supportedNIPs)
|
||||
// Construct description with dashboard URL
|
||||
dashboardURL := s.DashboardURL(r)
|
||||
description := version.Description + " dashboard: " + dashboardURL
|
||||
|
||||
// Get relay identity pubkey as hex
|
||||
var relayPubkey string
|
||||
if skb, err := s.D.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err == nil {
|
||||
relayPubkey = hex.Enc(sign.Pub())
|
||||
}
|
||||
}
|
||||
|
||||
info = &relayinfo.T{
|
||||
Name: s.Config.AppName,
|
||||
Description: version.Description,
|
||||
Description: description,
|
||||
PubKey: relayPubkey,
|
||||
Nips: supportedNIPs,
|
||||
Software: version.URL,
|
||||
Version: version.V,
|
||||
Version: strings.TrimPrefix(version.V, "v"),
|
||||
Limitation: relayinfo.Limits{
|
||||
AuthRequired: s.Config.ACLMode != "none",
|
||||
RestrictedWrites: s.Config.ACLMode != "none",
|
||||
PaymentRequired: s.Config.MonthlyPriceSats > 0,
|
||||
},
|
||||
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
|
||||
Icon: "https://i.nostr.build/6wGXAn7Zaw9mHxFg.png",
|
||||
}
|
||||
if err := json.NewEncoder(w).Encode(info); chk.E(err) {
|
||||
}
|
||||
|
||||
@@ -4,17 +4,18 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closedenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
@@ -28,15 +29,20 @@ import (
|
||||
)
|
||||
|
||||
func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
// log.T.F("HandleReq: START processing from %s\n%s\n", l.remote, msg)
|
||||
log.D.F("handling REQ: %s", msg)
|
||||
log.T.F("HandleReq: START processing from %s", l.remote)
|
||||
// var rem []byte
|
||||
env := reqenvelope.New()
|
||||
if _, err = env.Unmarshal(msg); chk.E(err) {
|
||||
return normalize.Error.Errorf(err.Error())
|
||||
}
|
||||
// if len(rem) > 0 {
|
||||
// log.I.F("REQ extra bytes: '%s'", rem)
|
||||
// }
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"REQ sub=%s filters=%d", env.Subscription, len(*env.Filters),
|
||||
)
|
||||
},
|
||||
)
|
||||
// send a challenge to the client to auth if an ACL is active
|
||||
if acl.Registry.Active.Load() != "none" {
|
||||
if err = authenvelope.NewChallengeWith(l.challenge.Load()).
|
||||
@@ -48,8 +54,9 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
if err = okenvelope.NewFrom(
|
||||
env.Subscription, false,
|
||||
// For REQ denial, send a CLOSED with auth-required reason (NIP-01)
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription,
|
||||
reason.AuthRequired.F("user not authed or has no read access"),
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
@@ -57,101 +64,134 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
return
|
||||
default:
|
||||
// user has read access or better, continue
|
||||
// log.D.F("user has %s access", accessLevel)
|
||||
}
|
||||
var events event.S
|
||||
// Create a single context for all filter queries, tied to the connection context, to prevent leaks and support timely cancellation
|
||||
queryCtx, queryCancel := context.WithTimeout(
|
||||
l.ctx, 30*time.Second,
|
||||
)
|
||||
defer queryCancel()
|
||||
|
||||
// Collect all events from all filters
|
||||
var allEvents event.S
|
||||
for _, f := range *env.Filters {
|
||||
// idsLen := 0
|
||||
// kindsLen := 0
|
||||
// authorsLen := 0
|
||||
// tagsLen := 0
|
||||
// if f != nil {
|
||||
// if f.Ids != nil {
|
||||
// idsLen = f.Ids.Len()
|
||||
// }
|
||||
// if f.Kinds != nil {
|
||||
// kindsLen = f.Kinds.Len()
|
||||
// }
|
||||
// if f.Authors != nil {
|
||||
// authorsLen = f.Authors.Len()
|
||||
// }
|
||||
// if f.Tags != nil {
|
||||
// tagsLen = f.Tags.Len()
|
||||
// }
|
||||
// }
|
||||
// log.T.F(
|
||||
// "REQ %s: filter summary ids=%d kinds=%d authors=%d tags=%d",
|
||||
// env.Subscription, idsLen, kindsLen, authorsLen, tagsLen,
|
||||
// )
|
||||
if f != nil && f.Authors != nil && f.Authors.Len() > 0 {
|
||||
var authors []string
|
||||
for _, a := range f.Authors.T {
|
||||
authors = append(authors, hex.Enc(a))
|
||||
if f != nil {
|
||||
// Summarize filter details for diagnostics (avoid internal fields)
|
||||
var kindsLen int
|
||||
if f.Kinds != nil {
|
||||
kindsLen = f.Kinds.Len()
|
||||
}
|
||||
// log.T.F("REQ %s: authors=%v", env.Subscription, authors)
|
||||
var authorsLen int
|
||||
if f.Authors != nil {
|
||||
authorsLen = f.Authors.Len()
|
||||
}
|
||||
var idsLen int
|
||||
if f.Ids != nil {
|
||||
idsLen = f.Ids.Len()
|
||||
}
|
||||
var dtag string
|
||||
if f.Tags != nil {
|
||||
if d := f.Tags.GetFirst([]byte("d")); d != nil {
|
||||
dtag = string(d.Value())
|
||||
}
|
||||
}
|
||||
var lim any
|
||||
if f.Limit != nil {
|
||||
lim = *f.Limit
|
||||
}
|
||||
var since any
|
||||
if f.Since != nil {
|
||||
since = f.Since.Int()
|
||||
}
|
||||
var until any
|
||||
if f.Until != nil {
|
||||
until = f.Until.Int()
|
||||
}
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"REQ %s filter: kinds.len=%d authors.len=%d ids.len=%d d=%q limit=%v since=%v until=%v",
|
||||
env.Subscription, kindsLen, authorsLen, idsLen, dtag,
|
||||
lim, since, until,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
// if f != nil && f.Kinds != nil && f.Kinds.Len() > 0 {
|
||||
// log.T.F("REQ %s: kinds=%v", env.Subscription, f.Kinds.ToUint16())
|
||||
// }
|
||||
// if f != nil && f.Ids != nil && f.Ids.Len() > 0 {
|
||||
// var ids []string
|
||||
// for _, id := range f.Ids.T {
|
||||
// ids = append(ids, hex.Enc(id))
|
||||
// }
|
||||
// // var lim any
|
||||
// // if pointers.Present(f.Limit) {
|
||||
// // lim = *f.Limit
|
||||
// // } else {
|
||||
// // lim = nil
|
||||
// // }
|
||||
// // log.T.F(
|
||||
// // "REQ %s: ids filter count=%d ids=%v limit=%v", env.Subscription,
|
||||
// // f.Ids.Len(), ids, lim,
|
||||
// // )
|
||||
// }
|
||||
if f != nil && pointers.Present(f.Limit) {
|
||||
if *f.Limit == 0 {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Use a separate context for QueryEvents to prevent cancellation issues
|
||||
queryCtx, cancel := context.WithTimeout(
|
||||
context.Background(), 30*time.Second,
|
||||
)
|
||||
defer cancel()
|
||||
// log.T.F(
|
||||
// "HandleReq: About to QueryEvents for %s, main context done: %v",
|
||||
// l.remote, l.ctx.Err() != nil,
|
||||
// )
|
||||
if events, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||
var filterEvents event.S
|
||||
if filterEvents, err = l.QueryEvents(queryCtx, f); chk.E(err) {
|
||||
if errors.Is(err, badger.ErrDBClosed) {
|
||||
return
|
||||
}
|
||||
// log.T.F("HandleReq: QueryEvents error for %s: %v", l.remote, err)
|
||||
log.E.F("QueryEvents failed for filter: %v", err)
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
defer func() {
|
||||
for _, ev := range events {
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
// log.T.F(
|
||||
// "HandleReq: QueryEvents completed for %s, found %d events",
|
||||
// l.remote, len(events),
|
||||
// )
|
||||
// Append events from this filter to the overall collection
|
||||
allEvents = append(allEvents, filterEvents...)
|
||||
}
|
||||
events = allEvents
|
||||
defer func() {
|
||||
for _, ev := range events {
|
||||
ev.Free()
|
||||
}
|
||||
}()
|
||||
var tmp event.S
|
||||
privCheck:
|
||||
for _, ev := range events {
|
||||
if kind.IsPrivileged(ev.Kind) &&
|
||||
accessLevel != "admin" { // admins can see all events
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "checking privileged event %0x", ev.ID,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
// Check for private tag first
|
||||
privateTags := ev.Tags.GetAll([]byte("private"))
|
||||
if len(privateTags) > 0 && accessLevel != "admin" {
|
||||
pk := l.authedPubkey.Load()
|
||||
if pk == nil {
|
||||
continue // no auth, can't access private events
|
||||
}
|
||||
|
||||
// Convert authenticated pubkey to npub for comparison
|
||||
authedNpub, err := bech32encoding.BinToNpub(pk)
|
||||
if err != nil {
|
||||
continue // couldn't convert pubkey, skip
|
||||
}
|
||||
|
||||
// Check if authenticated npub is in any private tag
|
||||
authorized := false
|
||||
for _, privateTag := range privateTags {
|
||||
authorizedNpubs := strings.Split(
|
||||
string(privateTag.Value()), ",",
|
||||
)
|
||||
for _, npub := range authorizedNpubs {
|
||||
if strings.TrimSpace(npub) == string(authedNpub) {
|
||||
authorized = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if authorized {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !authorized {
|
||||
continue // not authorized to see this private event
|
||||
}
|
||||
|
||||
tmp = append(tmp, ev)
|
||||
continue
|
||||
}
|
||||
|
||||
if l.Config.ACLMode != "none" &&
|
||||
(kind.IsPrivileged(ev.Kind) && accessLevel != "admin") &&
|
||||
l.authedPubkey.Load() != nil { // admins can see all events
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"checking privileged event %0x", ev.ID,
|
||||
)
|
||||
},
|
||||
)
|
||||
pk := l.authedPubkey.Load()
|
||||
if pk == nil {
|
||||
continue
|
||||
@@ -175,26 +215,26 @@ privCheck:
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(pt, pk) {
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "privileged event %s is for logged in pubkey %0x",
|
||||
// ev.ID, pk,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s is for logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
tmp = append(tmp, ev)
|
||||
continue privCheck
|
||||
}
|
||||
}
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "privileged event %s does not contain the logged in pubkey %0x",
|
||||
// ev.ID, pk,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"privileged event %s does not contain the logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
} else {
|
||||
tmp = append(tmp, ev)
|
||||
}
|
||||
@@ -202,19 +242,19 @@ privCheck:
|
||||
events = tmp
|
||||
seen := make(map[string]struct{})
|
||||
for _, ev := range events {
|
||||
// log.D.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf(
|
||||
// "REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||
// hex.Enc(ev.ID), ev.Kind,
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||
// },
|
||||
// )
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"REQ %s: sending EVENT id=%s kind=%d", env.Subscription,
|
||||
hex.Enc(ev.ID), ev.Kind,
|
||||
)
|
||||
},
|
||||
)
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||
},
|
||||
)
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(
|
||||
env.Subscription, ev,
|
||||
@@ -229,7 +269,7 @@ privCheck:
|
||||
}
|
||||
// write the EOSE to signal to the client that all events found have been
|
||||
// sent.
|
||||
// log.T.F("sending EOSE to %s", l.remote)
|
||||
log.T.F("sending EOSE to %s", l.remote)
|
||||
if err = eoseenvelope.NewFrom(env.Subscription).
|
||||
Write(l); chk.E(err) {
|
||||
return
|
||||
@@ -237,10 +277,10 @@ privCheck:
|
||||
// if the query was for just Ids, we know there can't be any more results,
|
||||
// so cancel the subscription.
|
||||
cancel := true
|
||||
// log.T.F(
|
||||
// "REQ %s: computing cancel/subscription; events_sent=%d",
|
||||
// env.Subscription, len(events),
|
||||
// )
|
||||
log.T.F(
|
||||
"REQ %s: computing cancel/subscription; events_sent=%d",
|
||||
env.Subscription, len(events),
|
||||
)
|
||||
var subbedFilters filter.S
|
||||
for _, f := range *env.Filters {
|
||||
if f.Ids.Len() < 1 {
|
||||
@@ -255,10 +295,10 @@ privCheck:
|
||||
}
|
||||
notFounds = append(notFounds, id)
|
||||
}
|
||||
// log.T.F(
|
||||
// "REQ %s: ids outstanding=%d of %d", env.Subscription,
|
||||
// len(notFounds), f.Ids.Len(),
|
||||
// )
|
||||
log.T.F(
|
||||
"REQ %s: ids outstanding=%d of %d", env.Subscription,
|
||||
len(notFounds), f.Ids.Len(),
|
||||
)
|
||||
// if all were found, don't add to subbedFilters
|
||||
if len(notFounds) == 0 {
|
||||
continue
|
||||
@@ -270,8 +310,8 @@ privCheck:
|
||||
}
|
||||
// also, if we received the limit number of events, subscription ded
|
||||
if pointers.Present(f.Limit) {
|
||||
if len(events) < int(*f.Limit) {
|
||||
cancel = false
|
||||
if len(events) >= int(*f.Limit) {
|
||||
cancel = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -289,12 +329,8 @@ privCheck:
|
||||
},
|
||||
)
|
||||
} else {
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription, nil,
|
||||
).Write(l); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// suppress server-sent CLOSED; client will close subscription if desired
|
||||
}
|
||||
// log.T.F("HandleReq: COMPLETED processing from %s", l.remote)
|
||||
log.T.F("HandleReq: COMPLETED processing from %s", l.remote)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -19,9 +19,8 @@ const (
|
||||
DefaultWriteWait = 10 * time.Second
|
||||
DefaultPongWait = 60 * time.Second
|
||||
DefaultPingWait = DefaultPongWait / 2
|
||||
DefaultReadTimeout = 7 * time.Second // Read timeout to detect stalled connections
|
||||
DefaultWriteTimeout = 3 * time.Second
|
||||
DefaultMaxMessageSize = 1 * units.Mb
|
||||
DefaultMaxMessageSize = 100 * units.Mb
|
||||
|
||||
// CloseMessage denotes a close control message. The optional message
|
||||
// payload contains a numeric code and text. Use the FormatCloseMessage
|
||||
@@ -39,7 +38,9 @@ const (
|
||||
|
||||
func (s *Server) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
||||
remote := GetRemoteFromReq(r)
|
||||
log.T.F("handling websocket connection from %s", remote)
|
||||
|
||||
// Log comprehensive proxy information for debugging
|
||||
LogProxyInfo(r, "WebSocket connection from "+remote)
|
||||
if len(s.Config.IPWhitelist) > 0 {
|
||||
for _, ip := range s.Config.IPWhitelist {
|
||||
log.T.F("checking IP whitelist: %s", ip)
|
||||
@@ -56,38 +57,72 @@ whitelist:
|
||||
defer cancel()
|
||||
var err error
|
||||
var conn *websocket.Conn
|
||||
if conn, err = websocket.Accept(
|
||||
w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}},
|
||||
); chk.E(err) {
|
||||
// Configure WebSocket accept options for proxy compatibility
|
||||
acceptOptions := &websocket.AcceptOptions{
|
||||
OriginPatterns: []string{"*"}, // Allow all origins for proxy compatibility
|
||||
// Don't check origin when behind a proxy - let the proxy handle it
|
||||
InsecureSkipVerify: true,
|
||||
// Try to set a higher compression threshold to allow larger messages
|
||||
CompressionMode: websocket.CompressionDisabled,
|
||||
}
|
||||
|
||||
if conn, err = websocket.Accept(w, r, acceptOptions); chk.E(err) {
|
||||
log.E.F("websocket accept failed from %s: %v", remote, err)
|
||||
return
|
||||
}
|
||||
log.T.F("websocket accepted from %s path=%s", remote, r.URL.String())
|
||||
|
||||
// Set read limit immediately after connection is established
|
||||
conn.SetReadLimit(DefaultMaxMessageSize)
|
||||
log.D.F("set read limit to %d bytes (%d MB) for %s", DefaultMaxMessageSize, DefaultMaxMessageSize/units.Mb, remote)
|
||||
defer conn.CloseNow()
|
||||
listener := &Listener{
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
chal := make([]byte, 32)
|
||||
rand.Read(chal)
|
||||
listener.challenge.Store([]byte(hex.Enc(chal)))
|
||||
// If admins are configured, immediately prompt client to AUTH (NIP-42)
|
||||
if len(s.Config.Admins) > 0 {
|
||||
// log.D.F("sending initial AUTH challenge to %s", remote)
|
||||
if s.Config.ACLMode != "none" {
|
||||
log.D.F("sending AUTH challenge to %s", remote)
|
||||
if err = authenvelope.NewChallengeWith(listener.challenge.Load()).
|
||||
Write(listener); chk.E(err) {
|
||||
log.E.F("failed to send AUTH challenge to %s: %v", remote, err)
|
||||
return
|
||||
}
|
||||
log.D.F("AUTH challenge sent successfully to %s", remote)
|
||||
}
|
||||
ticker := time.NewTicker(DefaultPingWait)
|
||||
go s.Pinger(ctx, conn, ticker, cancel)
|
||||
defer func() {
|
||||
// log.D.F("closing websocket connection from %s", remote)
|
||||
log.D.F("closing websocket connection from %s", remote)
|
||||
|
||||
// Cancel context and stop pinger
|
||||
cancel()
|
||||
ticker.Stop()
|
||||
|
||||
// Cancel all subscriptions for this connection
|
||||
log.D.F("cancelling subscriptions for %s", remote)
|
||||
listener.publishers.Receive(&W{Cancel: true})
|
||||
|
||||
// Log detailed connection statistics
|
||||
dur := time.Since(listener.startTime)
|
||||
log.D.F(
|
||||
"ws connection closed %s: msgs=%d, REQs=%d, EVENTs=%d, duration=%v",
|
||||
remote, listener.msgCount, listener.reqCount, listener.eventCount,
|
||||
dur,
|
||||
)
|
||||
|
||||
// Log any remaining connection state
|
||||
if listener.authedPubkey.Load() != nil {
|
||||
log.D.F("ws connection %s was authenticated", remote)
|
||||
} else {
|
||||
log.D.F("ws connection %s was not authenticated", remote)
|
||||
}
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
@@ -97,12 +132,10 @@ whitelist:
|
||||
}
|
||||
var typ websocket.MessageType
|
||||
var msg []byte
|
||||
// log.T.F("waiting for message from %s", remote)
|
||||
log.T.F("waiting for message from %s", remote)
|
||||
|
||||
// Create a read context with timeout to prevent indefinite blocking
|
||||
readCtx, readCancel := context.WithTimeout(ctx, DefaultReadTimeout)
|
||||
typ, msg, err = conn.Read(readCtx)
|
||||
readCancel()
|
||||
// Block waiting for message; rely on pings and context cancellation to detect dead peers
|
||||
typ, msg, err = conn.Read(ctx)
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(
|
||||
@@ -110,14 +143,6 @@ whitelist:
|
||||
) {
|
||||
return
|
||||
}
|
||||
// Handle timeout errors - occurs when client becomes unresponsive
|
||||
if strings.Contains(err.Error(), "context deadline exceeded") {
|
||||
log.T.F(
|
||||
"connection from %s timed out after %v", remote,
|
||||
DefaultReadTimeout,
|
||||
)
|
||||
return
|
||||
}
|
||||
// Handle EOF errors gracefully - these occur when client closes connection
|
||||
// or sends incomplete/malformed WebSocket frames
|
||||
if strings.Contains(err.Error(), "EOF") ||
|
||||
@@ -125,6 +150,14 @@ whitelist:
|
||||
log.T.F("connection from %s closed: %v", remote, err)
|
||||
return
|
||||
}
|
||||
// Handle message too big errors specifically
|
||||
if strings.Contains(err.Error(), "MessageTooBig") ||
|
||||
strings.Contains(err.Error(), "read limited at") {
|
||||
log.D.F("client %s hit message size limit: %v", remote, err)
|
||||
// Don't log this as an error since it's a client-side limit
|
||||
// Just close the connection gracefully
|
||||
return
|
||||
}
|
||||
status := websocket.CloseStatus(err)
|
||||
switch status {
|
||||
case websocket.StatusNormalClosure,
|
||||
@@ -135,25 +168,49 @@ whitelist:
|
||||
log.T.F(
|
||||
"connection from %s closed with status: %v", remote, status,
|
||||
)
|
||||
case websocket.StatusMessageTooBig:
|
||||
log.D.F("client %s sent message too big: %v", remote, err)
|
||||
default:
|
||||
log.E.F("unexpected close error from %s: %v", remote, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if typ == PingMessage {
|
||||
log.D.F("received PING from %s, sending PONG", remote)
|
||||
// Create a write context with timeout for pong response
|
||||
writeCtx, writeCancel := context.WithTimeout(
|
||||
ctx, DefaultWriteTimeout,
|
||||
)
|
||||
pongStart := time.Now()
|
||||
if err = conn.Write(writeCtx, PongMessage, msg); chk.E(err) {
|
||||
pongDuration := time.Since(pongStart)
|
||||
log.E.F(
|
||||
"failed to send PONG to %s after %v: %v", remote,
|
||||
pongDuration, err,
|
||||
)
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F(
|
||||
"PONG write timeout to %s after %v (limit=%v)", remote,
|
||||
pongDuration, DefaultWriteTimeout,
|
||||
)
|
||||
}
|
||||
writeCancel()
|
||||
return
|
||||
}
|
||||
pongDuration := time.Since(pongStart)
|
||||
log.D.F("sent PONG to %s successfully in %v", remote, pongDuration)
|
||||
if pongDuration > time.Millisecond*50 {
|
||||
log.D.F("SLOW PONG to %s: %v (>50ms)", remote, pongDuration)
|
||||
}
|
||||
writeCancel()
|
||||
continue
|
||||
}
|
||||
// Log message size for debugging
|
||||
if len(msg) > 1000 { // Only log for larger messages
|
||||
log.D.F("received large message from %s: %d bytes", remote, len(msg))
|
||||
}
|
||||
// log.T.F("received message from %s: %s", remote, string(msg))
|
||||
go listener.HandleMessage(msg, remote)
|
||||
listener.HandleMessage(msg, remote)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,21 +219,51 @@ func (s *Server) Pinger(
|
||||
cancel context.CancelFunc,
|
||||
) {
|
||||
defer func() {
|
||||
log.D.F("pinger shutting down")
|
||||
cancel()
|
||||
ticker.Stop()
|
||||
}()
|
||||
var err error
|
||||
pingCount := 0
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
pingCount++
|
||||
log.D.F("sending PING #%d", pingCount)
|
||||
|
||||
// Create a write context with timeout for ping operation
|
||||
pingCtx, pingCancel := context.WithTimeout(ctx, DefaultWriteTimeout)
|
||||
if err = conn.Ping(pingCtx); chk.E(err) {
|
||||
pingStart := time.Now()
|
||||
|
||||
if err = conn.Ping(pingCtx); err != nil {
|
||||
pingDuration := time.Since(pingStart)
|
||||
log.E.F(
|
||||
"PING #%d FAILED after %v: %v", pingCount, pingDuration,
|
||||
err,
|
||||
)
|
||||
|
||||
if pingCtx.Err() != nil {
|
||||
log.E.F(
|
||||
"PING #%d timeout after %v (limit=%v)", pingCount,
|
||||
pingDuration, DefaultWriteTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
chk.E(err)
|
||||
pingCancel()
|
||||
return
|
||||
}
|
||||
|
||||
pingDuration := time.Since(pingStart)
|
||||
log.D.F("PING #%d sent successfully in %v", pingCount, pingDuration)
|
||||
|
||||
if pingDuration > time.Millisecond*100 {
|
||||
log.D.F("SLOW PING #%d: %v (>100ms)", pingCount, pingDuration)
|
||||
}
|
||||
|
||||
pingCancel()
|
||||
case <-ctx.Done():
|
||||
log.T.F("pinger context cancelled after %d pings", pingCount)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package app
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"lol.mleku.dev/log"
|
||||
)
|
||||
|
||||
// GetRemoteFromReq retrieves the originating IP address of the client from
|
||||
@@ -67,3 +69,28 @@ func GetRemoteFromReq(r *http.Request) (rr string) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// LogProxyInfo logs comprehensive proxy information for debugging
|
||||
func LogProxyInfo(r *http.Request, prefix string) {
|
||||
proxyHeaders := map[string]string{
|
||||
"X-Forwarded-For": r.Header.Get("X-Forwarded-For"),
|
||||
"X-Real-IP": r.Header.Get("X-Real-IP"),
|
||||
"X-Forwarded-Proto": r.Header.Get("X-Forwarded-Proto"),
|
||||
"X-Forwarded-Host": r.Header.Get("X-Forwarded-Host"),
|
||||
"X-Forwarded-Port": r.Header.Get("X-Forwarded-Port"),
|
||||
"Forwarded": r.Header.Get("Forwarded"),
|
||||
"Host": r.Header.Get("Host"),
|
||||
"User-Agent": r.Header.Get("User-Agent"),
|
||||
}
|
||||
|
||||
var info []string
|
||||
for header, value := range proxyHeaders {
|
||||
if value != "" {
|
||||
info = append(info, header+":"+value)
|
||||
}
|
||||
}
|
||||
|
||||
if len(info) > 0 {
|
||||
log.T.F("%s proxy info: %s", prefix, strings.Join(info, " "))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,11 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
@@ -17,6 +19,11 @@ type Listener struct {
|
||||
req *http.Request
|
||||
challenge atomic.Bytes
|
||||
authedPubkey atomic.Bytes
|
||||
startTime time.Time
|
||||
// Diagnostics: per-connection counters
|
||||
msgCount int
|
||||
reqCount int
|
||||
eventCount int
|
||||
}
|
||||
|
||||
// Ctx returns the listener's context, but creates a new context for each operation
|
||||
@@ -26,6 +33,18 @@ func (l *Listener) Ctx() context.Context {
|
||||
}
|
||||
|
||||
func (l *Listener) Write(p []byte) (n int, err error) {
|
||||
start := time.Now()
|
||||
msgLen := len(p)
|
||||
|
||||
// Log message attempt with content preview (first 200 chars for diagnostics)
|
||||
preview := string(p)
|
||||
if len(preview) > 200 {
|
||||
preview = preview[:200] + "..."
|
||||
}
|
||||
log.T.F(
|
||||
"ws->%s attempting write: len=%d preview=%q", l.remote, msgLen, preview,
|
||||
)
|
||||
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the main connection context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
@@ -33,9 +52,55 @@ func (l *Listener) Write(p []byte) (n int, err error) {
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
if err = l.conn.Write(writeCtx, websocket.MessageText, p); chk.E(err) {
|
||||
// Attempt the write operation
|
||||
writeStart := time.Now()
|
||||
if err = l.conn.Write(writeCtx, websocket.MessageText, p); err != nil {
|
||||
writeDuration := time.Since(writeStart)
|
||||
totalDuration := time.Since(start)
|
||||
|
||||
// Log detailed failure information
|
||||
log.E.F(
|
||||
"ws->%s WRITE FAILED: len=%d duration=%v write_duration=%v error=%v preview=%q",
|
||||
l.remote, msgLen, totalDuration, writeDuration, err, preview,
|
||||
)
|
||||
|
||||
// Check if this is a context timeout
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F(
|
||||
"ws->%s write timeout after %v (limit=%v)", l.remote,
|
||||
writeDuration, DefaultWriteTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
// Check connection state
|
||||
if l.conn != nil {
|
||||
log.T.F(
|
||||
"ws->%s connection state during failure: remote_addr=%v",
|
||||
l.remote, l.req.RemoteAddr,
|
||||
)
|
||||
}
|
||||
|
||||
chk.E(err) // Still call the original error handler
|
||||
return
|
||||
}
|
||||
n = len(p)
|
||||
|
||||
// Log successful write with timing
|
||||
writeDuration := time.Since(writeStart)
|
||||
totalDuration := time.Since(start)
|
||||
n = msgLen
|
||||
|
||||
log.T.F(
|
||||
"ws->%s WRITE SUCCESS: len=%d duration=%v write_duration=%v",
|
||||
l.remote, n, totalDuration, writeDuration,
|
||||
)
|
||||
|
||||
// Log slow writes for performance diagnostics
|
||||
if writeDuration > time.Millisecond*100 {
|
||||
log.T.F(
|
||||
"ws->%s SLOW WRITE detected: %v (>100ms) len=%d", l.remote,
|
||||
writeDuration, n,
|
||||
)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
38
app/main.go
38
app/main.go
@@ -8,7 +8,8 @@ import (
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
database "next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/crypto/keys"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
)
|
||||
@@ -45,8 +46,43 @@ func Run(
|
||||
publishers: publish.New(NewPublisher(ctx)),
|
||||
Admins: adminKeys,
|
||||
}
|
||||
|
||||
// Initialize sprocket manager
|
||||
l.sprocketManager = NewSprocketManager(ctx, cfg.AppName, cfg.SprocketEnabled)
|
||||
// Initialize the user interface
|
||||
l.UserInterface()
|
||||
|
||||
// Ensure a relay identity secret key exists when subscriptions and NWC are enabled
|
||||
if cfg.SubscriptionEnabled && cfg.NWCUri != "" {
|
||||
if skb, e := db.GetOrCreateRelayIdentitySecret(); e != nil {
|
||||
log.E.F("failed to ensure relay identity key: %v", e)
|
||||
} else if pk, e2 := keys.SecretBytesToPubKeyHex(skb); e2 == nil {
|
||||
log.I.F("relay identity loaded (pub=%s)", pk)
|
||||
// ensure relay identity pubkey is considered an admin for ACL follows mode
|
||||
found := false
|
||||
for _, a := range cfg.Admins {
|
||||
if a == pk {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
cfg.Admins = append(cfg.Admins, pk)
|
||||
log.I.F("added relay identity to admins for follow-list whitelisting")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if l.paymentProcessor, err = NewPaymentProcessor(ctx, cfg, db); err != nil {
|
||||
log.E.F("failed to create payment processor: %v", err)
|
||||
// Continue without payment processor
|
||||
} else {
|
||||
if err = l.paymentProcessor.Start(); err != nil {
|
||||
log.E.F("failed to start payment processor: %v", err)
|
||||
} else {
|
||||
log.I.F("payment processor started successfully")
|
||||
}
|
||||
}
|
||||
addr := fmt.Sprintf("%s:%d", cfg.Listen, cfg.Port)
|
||||
log.I.F("starting listener on http://%s", addr)
|
||||
go func() {
|
||||
|
||||
894
app/payment_processor.go
Normal file
894
app/payment_processor.go
Normal file
@@ -0,0 +1,894 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
// std hex not used; use project hex encoder instead
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/json"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
"next.orly.dev/pkg/protocol/nwc"
|
||||
)
|
||||
|
||||
// PaymentProcessor handles NWC payment notifications and updates subscriptions
|
||||
type PaymentProcessor struct {
|
||||
nwcClient *nwc.Client
|
||||
db *database.D
|
||||
config *config.C
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
dashboardURL string
|
||||
}
|
||||
|
||||
// NewPaymentProcessor creates a new payment processor
|
||||
func NewPaymentProcessor(
|
||||
ctx context.Context, cfg *config.C, db *database.D,
|
||||
) (pp *PaymentProcessor, err error) {
|
||||
if cfg.NWCUri == "" {
|
||||
return nil, fmt.Errorf("NWC URI not configured")
|
||||
}
|
||||
|
||||
var nwcClient *nwc.Client
|
||||
if nwcClient, err = nwc.NewClient(cfg.NWCUri); chk.E(err) {
|
||||
return nil, fmt.Errorf("failed to create NWC client: %w", err)
|
||||
}
|
||||
|
||||
c, cancel := context.WithCancel(ctx)
|
||||
|
||||
pp = &PaymentProcessor{
|
||||
nwcClient: nwcClient,
|
||||
db: db,
|
||||
config: cfg,
|
||||
ctx: c,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
return pp, nil
|
||||
}
|
||||
|
||||
// Start begins listening for payment notifications
|
||||
func (pp *PaymentProcessor) Start() error {
|
||||
// start NWC notifications listener
|
||||
pp.wg.Add(1)
|
||||
go func() {
|
||||
defer pp.wg.Done()
|
||||
if err := pp.listenForPayments(); err != nil {
|
||||
log.E.F("payment processor error: %v", err)
|
||||
}
|
||||
}()
|
||||
// start periodic follow-list sync if subscriptions are enabled
|
||||
if pp.config != nil && pp.config.SubscriptionEnabled {
|
||||
pp.wg.Add(1)
|
||||
go func() {
|
||||
defer pp.wg.Done()
|
||||
pp.runFollowSyncLoop()
|
||||
}()
|
||||
// start daily subscription checker
|
||||
pp.wg.Add(1)
|
||||
go func() {
|
||||
defer pp.wg.Done()
|
||||
pp.runDailySubscriptionChecker()
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully stops the payment processor
|
||||
func (pp *PaymentProcessor) Stop() {
|
||||
if pp.cancel != nil {
|
||||
pp.cancel()
|
||||
}
|
||||
pp.wg.Wait()
|
||||
}
|
||||
|
||||
// listenForPayments subscribes to NWC notifications and processes payments
|
||||
func (pp *PaymentProcessor) listenForPayments() error {
|
||||
return pp.nwcClient.SubscribeNotifications(pp.ctx, pp.handleNotification)
|
||||
}
|
||||
|
||||
// runFollowSyncLoop periodically syncs the relay identity follow list with active subscribers
|
||||
func (pp *PaymentProcessor) runFollowSyncLoop() {
|
||||
t := time.NewTicker(10 * time.Minute)
|
||||
defer t.Stop()
|
||||
// do an initial sync shortly after start
|
||||
_ = pp.syncFollowList()
|
||||
for {
|
||||
select {
|
||||
case <-pp.ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
if err := pp.syncFollowList(); err != nil {
|
||||
log.W.F("follow list sync failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runDailySubscriptionChecker checks once daily for subscription expiry warnings and trial reminders
|
||||
func (pp *PaymentProcessor) runDailySubscriptionChecker() {
|
||||
t := time.NewTicker(24 * time.Hour)
|
||||
defer t.Stop()
|
||||
// do an initial check shortly after start
|
||||
_ = pp.checkSubscriptionStatus()
|
||||
for {
|
||||
select {
|
||||
case <-pp.ctx.Done():
|
||||
return
|
||||
case <-t.C:
|
||||
if err := pp.checkSubscriptionStatus(); err != nil {
|
||||
log.W.F("subscription status check failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// syncFollowList builds a kind-3 event from the relay identity containing only active subscribers
|
||||
func (pp *PaymentProcessor) syncFollowList() error {
|
||||
// ensure we have a relay identity secret
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return nil // nothing to do if no identity
|
||||
}
|
||||
// collect active subscribers
|
||||
actives, err := pp.getActiveSubscriberPubkeys()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return err
|
||||
}
|
||||
// build follow list event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.FollowList.K
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Tags = tag.NewS()
|
||||
for _, pk := range actives {
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(pk)))
|
||||
}
|
||||
// sign and save
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return err
|
||||
}
|
||||
log.I.F(
|
||||
"updated relay follow list with %d active subscribers", len(actives),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getActiveSubscriberPubkeys scans the subscription records and returns active ones
|
||||
func (pp *PaymentProcessor) getActiveSubscriberPubkeys() ([][]byte, error) {
|
||||
prefix := []byte("sub:")
|
||||
now := time.Now()
|
||||
var out [][]byte
|
||||
err := pp.db.DB.View(
|
||||
func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
// key format: sub:<hexpub>
|
||||
hexpub := string(key[len(prefix):])
|
||||
var sub database.Subscription
|
||||
if err := item.Value(
|
||||
func(val []byte) error {
|
||||
return json.Unmarshal(val, &sub)
|
||||
},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if now.Before(sub.TrialEnd) || (!sub.PaidUntil.IsZero() && now.Before(sub.PaidUntil)) {
|
||||
if b, err := hex.Dec(hexpub); err == nil {
|
||||
out = append(out, b)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// checkSubscriptionStatus scans all subscriptions and creates warning/reminder notes
|
||||
func (pp *PaymentProcessor) checkSubscriptionStatus() error {
|
||||
prefix := []byte("sub:")
|
||||
now := time.Now()
|
||||
sevenDaysFromNow := now.AddDate(0, 0, 7)
|
||||
|
||||
return pp.db.DB.View(
|
||||
func(txn *badger.Txn) error {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
// key format: sub:<hexpub>
|
||||
hexpub := string(key[len(prefix):])
|
||||
|
||||
var sub database.Subscription
|
||||
if err := item.Value(
|
||||
func(val []byte) error {
|
||||
return json.Unmarshal(val, &sub)
|
||||
},
|
||||
); err != nil {
|
||||
continue // skip invalid subscription records
|
||||
}
|
||||
|
||||
pubkey, err := hex.Dec(hexpub)
|
||||
if err != nil {
|
||||
continue // skip invalid pubkey
|
||||
}
|
||||
|
||||
// Check if paid subscription is expiring in 7 days
|
||||
if !sub.PaidUntil.IsZero() {
|
||||
// Format dates for comparison (ignore time component)
|
||||
paidUntilDate := sub.PaidUntil.Truncate(24 * time.Hour)
|
||||
sevenDaysDate := sevenDaysFromNow.Truncate(24 * time.Hour)
|
||||
|
||||
if paidUntilDate.Equal(sevenDaysDate) {
|
||||
go pp.createExpiryWarningNote(pubkey, sub.PaidUntil)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if user is on trial (no paid subscription, trial not expired)
|
||||
if sub.PaidUntil.IsZero() && now.Before(sub.TrialEnd) {
|
||||
go pp.createTrialReminderNote(pubkey, sub.TrialEnd)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// createExpiryWarningNote creates a warning note for users whose paid subscription expires in 7 days
|
||||
func (pp *PaymentProcessor) createExpiryWarningNote(userPubkey []byte, expiryTime time.Time) error {
|
||||
// Get relay identity secret to sign the note
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Get relay npub for content link
|
||||
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the warning note content
|
||||
content := fmt.Sprintf(`⚠️ Subscription Expiring Soon ⚠️
|
||||
|
||||
Your paid subscription to this relay will expire in 7 days on %s.
|
||||
|
||||
💰 To extend your subscription:
|
||||
- Monthly price: %d sats
|
||||
- Zap this note with your payment amount
|
||||
- Each %d sats = 30 days of access
|
||||
|
||||
⚡ Payment Instructions:
|
||||
1. Use any Lightning wallet that supports zaps
|
||||
2. Zap this note with your payment
|
||||
3. Your subscription will be automatically extended
|
||||
|
||||
Don't lose access to your private relay! Extend your subscription today.
|
||||
|
||||
Relay: nostr:%s
|
||||
|
||||
Log in to the relay dashboard to access your configuration at: %s`,
|
||||
expiryTime.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K // Kind 1 for text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add "p" tag for the user
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
|
||||
|
||||
// Add expiration tag (5 days from creation)
|
||||
noteExpiry := time.Now().AddDate(0, 0, 5)
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
|
||||
|
||||
// Add "private" tag with authorized npubs (user and relay)
|
||||
var authorizedNpubs []string
|
||||
|
||||
// Add user npub
|
||||
userNpub, err := bech32encoding.BinToNpub(userPubkey)
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(userNpub))
|
||||
}
|
||||
|
||||
// Add relay npub
|
||||
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
|
||||
}
|
||||
|
||||
// Create the private tag with comma-separated npubs
|
||||
if len(authorizedNpubs) > 0 {
|
||||
privateTagValue := strings.Join(authorizedNpubs, ",")
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
|
||||
}
|
||||
|
||||
// Add a special tag to mark this as an expiry warning
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("warning", "subscription-expiry"))
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save expiry warning note: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("created expiry warning note for user %s (expires %s)", hex.Enc(userPubkey), expiryTime.Format("2006-01-02"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// createTrialReminderNote creates a reminder note for users on trial to support the relay
|
||||
func (pp *PaymentProcessor) createTrialReminderNote(userPubkey []byte, trialEnd time.Time) error {
|
||||
// Get relay identity secret to sign the note
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Calculate daily rate
|
||||
dailyRate := monthlyPrice / 30
|
||||
|
||||
// Get relay npub for content link
|
||||
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the reminder note content
|
||||
content := fmt.Sprintf(`🆓 Free Trial Reminder 🆓
|
||||
|
||||
You're currently using this relay for FREE! Your trial expires on %s.
|
||||
|
||||
🙏 Support Relay Operations:
|
||||
This relay provides you with private, censorship-resistant communication. Please consider supporting its continued operation.
|
||||
|
||||
💰 Subscription Details:
|
||||
- Monthly price: %d sats (%d sats/day)
|
||||
- Fair pricing for premium service
|
||||
- Helps keep the relay running 24/7
|
||||
|
||||
⚡ How to Subscribe:
|
||||
Simply zap this note with your payment amount:
|
||||
- Each %d sats = 30 days of access
|
||||
- Payment is processed automatically
|
||||
- No account setup required
|
||||
|
||||
Thank you for considering supporting decentralized communication!
|
||||
|
||||
Relay: nostr:%s
|
||||
|
||||
Log in to the relay dashboard to access your configuration at: %s`,
|
||||
trialEnd.Format("2006-01-02 15:04:05 UTC"), monthlyPrice, dailyRate, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K // Kind 1 for text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add "p" tag for the user
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
|
||||
|
||||
// Add expiration tag (5 days from creation)
|
||||
noteExpiry := time.Now().AddDate(0, 0, 5)
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
|
||||
|
||||
// Add "private" tag with authorized npubs (user and relay)
|
||||
var authorizedNpubs []string
|
||||
|
||||
// Add user npub
|
||||
userNpub, err := bech32encoding.BinToNpub(userPubkey)
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(userNpub))
|
||||
}
|
||||
|
||||
// Add relay npub
|
||||
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
|
||||
}
|
||||
|
||||
// Create the private tag with comma-separated npubs
|
||||
if len(authorizedNpubs) > 0 {
|
||||
privateTagValue := strings.Join(authorizedNpubs, ",")
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
|
||||
}
|
||||
|
||||
// Add a special tag to mark this as a trial reminder
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("reminder", "trial-support"))
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save trial reminder note: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("created trial reminder note for user %s (trial ends %s)", hex.Enc(userPubkey), trialEnd.Format("2006-01-02"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleNotification processes incoming payment notifications
|
||||
func (pp *PaymentProcessor) handleNotification(
|
||||
notificationType string, notification map[string]any,
|
||||
) error {
|
||||
// Only process payment_received notifications
|
||||
if notificationType != "payment_received" {
|
||||
return nil
|
||||
}
|
||||
|
||||
amount, ok := notification["amount"].(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid amount")
|
||||
}
|
||||
|
||||
// Prefer explicit payer/relay pubkeys if provided in metadata
|
||||
var payerPubkey []byte
|
||||
var userNpub string
|
||||
if metadata, ok := notification["metadata"].(map[string]any); ok {
|
||||
if s, ok := metadata["payer_pubkey"].(string); ok && s != "" {
|
||||
if pk, err := decodeAnyPubkey(s); err == nil {
|
||||
payerPubkey = pk
|
||||
}
|
||||
}
|
||||
if payerPubkey == nil {
|
||||
if s, ok := metadata["sender_pubkey"].(string); ok && s != "" { // alias
|
||||
if pk, err := decodeAnyPubkey(s); err == nil {
|
||||
payerPubkey = pk
|
||||
}
|
||||
}
|
||||
}
|
||||
// Optional: the intended subscriber npub (for backwards compat)
|
||||
if userNpub == "" {
|
||||
if npubField, ok := metadata["npub"].(string); ok {
|
||||
userNpub = npubField
|
||||
}
|
||||
}
|
||||
// If relay identity pubkey is provided, verify it matches ours
|
||||
if s, ok := metadata["relay_pubkey"].(string); ok && s != "" {
|
||||
if rpk, err := decodeAnyPubkey(s); err == nil {
|
||||
if skb, err := pp.db.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
|
||||
var signer p256k.Signer
|
||||
if err := signer.InitSec(skb); err == nil {
|
||||
if !strings.EqualFold(hex.Enc(rpk), hex.Enc(signer.Pub())) {
|
||||
log.W.F("relay_pubkey in payment metadata does not match this relay identity: got %s want %s", hex.Enc(rpk), hex.Enc(signer.Pub()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: extract npub from description or metadata
|
||||
description, _ := notification["description"].(string)
|
||||
if userNpub == "" {
|
||||
userNpub = pp.extractNpubFromDescription(description)
|
||||
}
|
||||
|
||||
var pubkey []byte
|
||||
var err error
|
||||
if payerPubkey != nil {
|
||||
pubkey = payerPubkey
|
||||
} else {
|
||||
if userNpub == "" {
|
||||
return fmt.Errorf("no payer_pubkey or npub provided in payment notification")
|
||||
}
|
||||
pubkey, err = pp.npubToPubkey(userNpub)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid npub: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
satsReceived := int64(amount / 1000)
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
days := int((float64(satsReceived) / float64(monthlyPrice)) * 30)
|
||||
if days < 1 {
|
||||
return fmt.Errorf("payment amount too small")
|
||||
}
|
||||
|
||||
if err := pp.db.ExtendSubscription(pubkey, days); err != nil {
|
||||
return fmt.Errorf("failed to extend subscription: %w", err)
|
||||
}
|
||||
|
||||
// Record payment history
|
||||
invoice, _ := notification["invoice"].(string)
|
||||
preimage, _ := notification["preimage"].(string)
|
||||
if err := pp.db.RecordPayment(
|
||||
pubkey, satsReceived, invoice, preimage,
|
||||
); err != nil {
|
||||
log.E.F("failed to record payment: %v", err)
|
||||
}
|
||||
|
||||
// Log helpful identifiers
|
||||
var payerHex = hex.Enc(pubkey)
|
||||
if userNpub == "" {
|
||||
log.I.F("payment processed: payer %s %d sats -> %d days", payerHex, satsReceived, days)
|
||||
} else {
|
||||
log.I.F("payment processed: %s (%s) %d sats -> %d days", userNpub, payerHex, satsReceived, days)
|
||||
}
|
||||
|
||||
// Update ACL follows cache and relay follow list immediately
|
||||
if pp.config != nil && pp.config.ACLMode == "follows" {
|
||||
acl.Registry.AddFollow(pubkey)
|
||||
}
|
||||
// Trigger an immediate follow-list sync in background (best-effort)
|
||||
go func() { _ = pp.syncFollowList() }()
|
||||
|
||||
// Create a note with payment confirmation and private tag
|
||||
if err := pp.createPaymentNote(pubkey, satsReceived, days); err != nil {
|
||||
log.E.F("failed to create payment note: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createPaymentNote creates a note recording the payment with private tag for authorization
|
||||
func (pp *PaymentProcessor) createPaymentNote(payerPubkey []byte, satsReceived int64, days int) error {
|
||||
// Get relay identity secret to sign the note
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
// Get subscription info to determine expiry
|
||||
sub, err := pp.db.GetSubscription(payerPubkey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get subscription: %w", err)
|
||||
}
|
||||
|
||||
var expiryTime time.Time
|
||||
if sub != nil && !sub.PaidUntil.IsZero() {
|
||||
expiryTime = sub.PaidUntil
|
||||
} else {
|
||||
expiryTime = time.Now().AddDate(0, 0, days)
|
||||
}
|
||||
|
||||
// Get relay npub for content link
|
||||
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the note content with nostr:npub link and dashboard link
|
||||
content := fmt.Sprintf("Payment received: %d sats for %d days. Subscription expires: %s\n\nRelay: nostr:%s\n\nLog in to the relay dashboard to access your configuration at: %s",
|
||||
satsReceived, days, expiryTime.Format("2006-01-02 15:04:05 UTC"), string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K // Kind 1 for text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add "p" tag for the payer
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(payerPubkey)))
|
||||
|
||||
// Add expiration tag (5 days from creation)
|
||||
noteExpiry := time.Now().AddDate(0, 0, 5)
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
|
||||
|
||||
// Add "private" tag with authorized npubs (payer and relay)
|
||||
var authorizedNpubs []string
|
||||
|
||||
// Add payer npub
|
||||
payerNpub, err := bech32encoding.BinToNpub(payerPubkey)
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(payerNpub))
|
||||
}
|
||||
|
||||
// Add relay npub
|
||||
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
|
||||
}
|
||||
|
||||
// Create the private tag with comma-separated npubs
|
||||
if len(authorizedNpubs) > 0 {
|
||||
privateTagValue := strings.Join(authorizedNpubs, ",")
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
|
||||
}
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save payment note: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("created payment note for %s with private authorization", hex.Enc(payerPubkey))
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateWelcomeNote creates a welcome note for first-time users with private tag for authorization
|
||||
func (pp *PaymentProcessor) CreateWelcomeNote(userPubkey []byte) error {
|
||||
// Get relay identity secret to sign the note
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Get relay npub for content link
|
||||
relayNpubForContent, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode relay npub: %w", err)
|
||||
}
|
||||
|
||||
// Create the welcome note content with nostr:npub link
|
||||
content := fmt.Sprintf(`Welcome to the relay! 🎉
|
||||
|
||||
You have a FREE 30-day trial that started when you first logged in.
|
||||
|
||||
💰 Subscription Details:
|
||||
- Monthly price: %d sats
|
||||
- Trial period: 30 days from first login
|
||||
|
||||
💡 How to Subscribe:
|
||||
To extend your subscription after the trial ends, simply zap this note with the amount you want to pay. Each %d sats = 30 days of access.
|
||||
|
||||
⚡ Payment Instructions:
|
||||
1. Use any Lightning wallet that supports zaps
|
||||
2. Zap this note with your payment
|
||||
3. Your subscription will be automatically extended
|
||||
|
||||
Relay: nostr:%s
|
||||
|
||||
Log in to the relay dashboard to access your configuration at: %s
|
||||
|
||||
Enjoy your time on the relay!`, monthlyPrice, monthlyPrice, string(relayNpubForContent), pp.getDashboardURL())
|
||||
|
||||
// Build the event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote.K // Kind 1 for text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Add "p" tag for the user
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("p", hex.Enc(userPubkey)))
|
||||
|
||||
// Add expiration tag (5 days from creation)
|
||||
noteExpiry := time.Now().AddDate(0, 0, 5)
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("expiration", fmt.Sprintf("%d", noteExpiry.Unix())))
|
||||
|
||||
// Add "private" tag with authorized npubs (user and relay)
|
||||
var authorizedNpubs []string
|
||||
|
||||
// Add user npub
|
||||
userNpub, err := bech32encoding.BinToNpub(userPubkey)
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(userNpub))
|
||||
}
|
||||
|
||||
// Add relay npub
|
||||
relayNpub, err := bech32encoding.BinToNpub(sign.Pub())
|
||||
if err == nil {
|
||||
authorizedNpubs = append(authorizedNpubs, string(relayNpub))
|
||||
}
|
||||
|
||||
// Create the private tag with comma-separated npubs
|
||||
if len(authorizedNpubs) > 0 {
|
||||
privateTagValue := strings.Join(authorizedNpubs, ",")
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("private", privateTagValue))
|
||||
}
|
||||
|
||||
// Add a special tag to mark this as a welcome note
|
||||
*ev.Tags = append(*ev.Tags, tag.NewFromAny("welcome", "first-time-user"))
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save welcome note: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("created welcome note for first-time user %s", hex.Enc(userPubkey))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDashboardURL sets the dynamic dashboard URL based on HTTP request
|
||||
func (pp *PaymentProcessor) SetDashboardURL(url string) {
|
||||
pp.dashboardURL = url
|
||||
}
|
||||
|
||||
// getDashboardURL returns the dashboard URL for the relay
|
||||
func (pp *PaymentProcessor) getDashboardURL() string {
|
||||
// Use dynamic URL if available
|
||||
if pp.dashboardURL != "" {
|
||||
return pp.dashboardURL
|
||||
}
|
||||
// Fallback to static config
|
||||
if pp.config.RelayURL != "" {
|
||||
return pp.config.RelayURL
|
||||
}
|
||||
// Default fallback if no URL is configured
|
||||
return "https://your-relay.example.com"
|
||||
}
|
||||
|
||||
// extractNpubFromDescription extracts an npub from the payment description
|
||||
func (pp *PaymentProcessor) extractNpubFromDescription(description string) string {
|
||||
// check if the entire description is just an npub
|
||||
description = strings.TrimSpace(description)
|
||||
if strings.HasPrefix(description, "npub1") && len(description) == 63 {
|
||||
return description
|
||||
}
|
||||
|
||||
// Look for npub1... pattern in the description
|
||||
parts := strings.Fields(description)
|
||||
for _, part := range parts {
|
||||
if strings.HasPrefix(part, "npub1") && len(part) == 63 {
|
||||
return part
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// npubToPubkey converts an npub string to pubkey bytes
|
||||
func (pp *PaymentProcessor) npubToPubkey(npubStr string) ([]byte, error) {
|
||||
// Validate npub format
|
||||
if !strings.HasPrefix(npubStr, "npub1") || len(npubStr) != 63 {
|
||||
return nil, fmt.Errorf("invalid npub format")
|
||||
}
|
||||
|
||||
// Decode using bech32encoding
|
||||
prefix, value, err := bech32encoding.Decode([]byte(npubStr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode npub: %w", err)
|
||||
}
|
||||
|
||||
if !strings.EqualFold(string(prefix), "npub") {
|
||||
return nil, fmt.Errorf("invalid prefix: %s", string(prefix))
|
||||
}
|
||||
|
||||
pubkey, ok := value.([]byte)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("decoded value is not []byte")
|
||||
}
|
||||
|
||||
return pubkey, nil
|
||||
}
|
||||
|
||||
// UpdateRelayProfile creates or updates the relay's kind 0 profile with subscription information
|
||||
func (pp *PaymentProcessor) UpdateRelayProfile() error {
|
||||
// Get relay identity secret to sign the profile
|
||||
skb, err := pp.db.GetRelayIdentitySecret()
|
||||
if err != nil || len(skb) != 32 {
|
||||
return fmt.Errorf("no relay identity configured")
|
||||
}
|
||||
|
||||
// Initialize signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.InitSec(skb); err != nil {
|
||||
return fmt.Errorf("failed to initialize signer: %w", err)
|
||||
}
|
||||
|
||||
monthlyPrice := pp.config.MonthlyPriceSats
|
||||
if monthlyPrice <= 0 {
|
||||
monthlyPrice = 6000
|
||||
}
|
||||
|
||||
// Calculate daily rate
|
||||
dailyRate := monthlyPrice / 30
|
||||
|
||||
// Get relay wss:// URL - use dashboard URL but with wss:// scheme
|
||||
relayURL := strings.Replace(pp.getDashboardURL(), "https://", "wss://", 1)
|
||||
|
||||
// Create profile content as JSON
|
||||
profileContent := fmt.Sprintf(`{
|
||||
"name": "Relay Bot",
|
||||
"about": "This relay requires a subscription to access. Zap any of my notes to pay for access. Monthly price: %d sats (%d sats/day). Relay: %s",
|
||||
"lud16": "",
|
||||
"nip05": "",
|
||||
"website": "%s"
|
||||
}`, monthlyPrice, dailyRate, relayURL, pp.getDashboardURL())
|
||||
|
||||
// Build the profile event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.ProfileMetadata.K // Kind 0 for profile metadata
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = timestamp.Now().V
|
||||
ev.Content = []byte(profileContent)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Sign and save the event
|
||||
ev.Sign(sign)
|
||||
if _, _, err := pp.db.SaveEvent(pp.ctx, ev); err != nil {
|
||||
return fmt.Errorf("failed to save relay profile: %w", err)
|
||||
}
|
||||
|
||||
log.I.F("updated relay profile with subscription information")
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeAnyPubkey decodes a public key from either hex string or npub format
|
||||
func decodeAnyPubkey(s string) ([]byte, error) {
|
||||
s = strings.TrimSpace(s)
|
||||
if strings.HasPrefix(s, "npub1") {
|
||||
prefix, value, err := bech32encoding.Decode([]byte(s))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode npub: %w", err)
|
||||
}
|
||||
if !strings.EqualFold(string(prefix), "npub") {
|
||||
return nil, fmt.Errorf("invalid prefix: %s", string(prefix))
|
||||
}
|
||||
b, ok := value.([]byte)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("decoded value is not []byte")
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
// assume hex-encoded public key
|
||||
return hex.Dec(s)
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coder/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
@@ -210,39 +211,68 @@ func (p *P) Deliver(ev *event.E) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !allowed {
|
||||
// Skip delivery for this subscriber
|
||||
continue
|
||||
}
|
||||
}
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the publisher context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
context.Background(), DefaultWriteTimeout,
|
||||
)
|
||||
defer cancel()
|
||||
}
|
||||
if !allowed {
|
||||
log.D.F("subscription delivery DENIED for privileged event %s to %s (auth mismatch)",
|
||||
hex.Enc(ev.ID), d.sub.remote)
|
||||
// Skip delivery for this subscriber
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var res *eventenvelope.Result
|
||||
if res, err = eventenvelope.NewResultWith(d.id, ev); chk.E(err) {
|
||||
log.E.F("failed to create event envelope for %s to %s: %v",
|
||||
hex.Enc(ev.ID), d.sub.remote, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Log delivery attempt
|
||||
msgData := res.Marshal(nil)
|
||||
log.D.F("attempting delivery of event %s (kind=%d, len=%d) to subscription %s @ %s",
|
||||
hex.Enc(ev.ID), ev.Kind, len(msgData), d.id, d.sub.remote)
|
||||
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the publisher context gets cancelled while writing events
|
||||
writeCtx, cancel := context.WithTimeout(
|
||||
context.Background(), DefaultWriteTimeout,
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
if err = d.w.Write(
|
||||
writeCtx, websocket.MessageText, res.Marshal(nil),
|
||||
); err != nil {
|
||||
// On error, remove the subscriber connection safely
|
||||
p.removeSubscriber(d.w)
|
||||
_ = d.w.CloseNow()
|
||||
continue
|
||||
}
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"dispatched event %0x to subscription %s, %s",
|
||||
ev.ID, d.id, d.sub.remote,
|
||||
)
|
||||
},
|
||||
)
|
||||
deliveryStart := time.Now()
|
||||
if err = d.w.Write(
|
||||
writeCtx, websocket.MessageText, msgData,
|
||||
); err != nil {
|
||||
deliveryDuration := time.Since(deliveryStart)
|
||||
|
||||
// Log detailed failure information
|
||||
log.E.F("subscription delivery FAILED: event=%s to=%s sub=%s duration=%v error=%v",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, err)
|
||||
|
||||
// Check for timeout specifically
|
||||
if writeCtx.Err() != nil {
|
||||
log.E.F("subscription delivery TIMEOUT: event=%s to=%s after %v (limit=%v)",
|
||||
hex.Enc(ev.ID), d.sub.remote, deliveryDuration, DefaultWriteTimeout)
|
||||
}
|
||||
|
||||
// Log connection cleanup
|
||||
log.D.F("removing failed subscriber connection: %s", d.sub.remote)
|
||||
|
||||
// On error, remove the subscriber connection safely
|
||||
p.removeSubscriber(d.w)
|
||||
_ = d.w.CloseNow()
|
||||
continue
|
||||
}
|
||||
|
||||
deliveryDuration := time.Since(deliveryStart)
|
||||
log.D.F("subscription delivery SUCCESS: event=%s to=%s sub=%s duration=%v len=%d",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, len(msgData))
|
||||
|
||||
// Log slow deliveries for performance monitoring
|
||||
if deliveryDuration > time.Millisecond*50 {
|
||||
log.D.F("SLOW subscription delivery: event=%s to=%s duration=%v (>50ms)",
|
||||
hex.Enc(ev.ID), d.sub.remote, deliveryDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
645
app/server.go
645
app/server.go
@@ -3,6 +3,7 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
@@ -18,8 +19,11 @@ import (
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/protocol/auth"
|
||||
"next.orly.dev/pkg/protocol/httpauth"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
)
|
||||
|
||||
@@ -38,15 +42,25 @@ type Server struct {
|
||||
// Challenge storage for HTTP UI authentication
|
||||
challengeMutex sync.RWMutex
|
||||
challenges map[string][]byte
|
||||
|
||||
paymentProcessor *PaymentProcessor
|
||||
sprocketManager *SprocketManager
|
||||
}
|
||||
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// Set CORS headers for all responses
|
||||
// Set comprehensive CORS headers for proxy compatibility
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
|
||||
w.Header().Set(
|
||||
"Access-Control-Allow-Headers", "Content-Type, Authorization",
|
||||
)
|
||||
w.Header().Set("Access-Control-Allow-Headers",
|
||||
"Origin, X-Requested-With, Content-Type, Accept, Authorization, "+
|
||||
"X-Forwarded-For, X-Forwarded-Proto, X-Forwarded-Host, X-Real-IP, "+
|
||||
"Upgrade, Connection, Sec-WebSocket-Key, Sec-WebSocket-Version, "+
|
||||
"Sec-WebSocket-Protocol, Sec-WebSocket-Extensions")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
w.Header().Set("Access-Control-Max-Age", "86400")
|
||||
|
||||
// Add proxy-friendly headers
|
||||
w.Header().Set("Vary", "Origin, Access-Control-Request-Method, Access-Control-Request-Headers")
|
||||
|
||||
// Handle preflight OPTIONS requests
|
||||
if r.Method == "OPTIONS" {
|
||||
@@ -54,6 +68,11 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// Log proxy information for debugging (only for WebSocket requests to avoid spam)
|
||||
if r.Header.Get("Upgrade") == "websocket" {
|
||||
LogProxyInfo(r, "HTTP request")
|
||||
}
|
||||
|
||||
// If this is a websocket request, only intercept the relay root path.
|
||||
// This allows other websocket paths (e.g., Vite HMR) to be handled by the dev proxy when enabled.
|
||||
if r.Header.Get("Upgrade") == "websocket" {
|
||||
@@ -78,37 +97,49 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
s.mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||
func (s *Server) ServiceURL(req *http.Request) (url string) {
|
||||
proto := req.Header.Get("X-Forwarded-Proto")
|
||||
if proto == "" {
|
||||
if req.TLS != nil {
|
||||
proto = "https"
|
||||
} else {
|
||||
proto = "http"
|
||||
}
|
||||
}
|
||||
host := req.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = req.Host
|
||||
}
|
||||
return proto + "://" + host
|
||||
}
|
||||
|
||||
func (s *Server) WebSocketURL(req *http.Request) (url string) {
|
||||
proto := req.Header.Get("X-Forwarded-Proto")
|
||||
if proto == "" {
|
||||
if host == "localhost" {
|
||||
proto = "ws"
|
||||
} else if strings.Contains(host, ":") {
|
||||
// has a port number
|
||||
proto = "ws"
|
||||
} else if _, err := strconv.Atoi(
|
||||
strings.ReplaceAll(
|
||||
host, ".",
|
||||
"",
|
||||
),
|
||||
); chk.E(err) {
|
||||
// it's a naked IP
|
||||
proto = "ws"
|
||||
} else {
|
||||
if req.TLS != nil {
|
||||
proto = "wss"
|
||||
} else {
|
||||
proto = "ws"
|
||||
}
|
||||
} else if proto == "https" {
|
||||
proto = "wss"
|
||||
} else if proto == "http" {
|
||||
proto = "ws"
|
||||
} else {
|
||||
// Convert HTTP scheme to WebSocket scheme
|
||||
if proto == "https" {
|
||||
proto = "wss"
|
||||
} else if proto == "http" {
|
||||
proto = "ws"
|
||||
}
|
||||
}
|
||||
host := req.Header.Get("X-Forwarded-Host")
|
||||
if host == "" {
|
||||
host = req.Host
|
||||
}
|
||||
return proto + "://" + host
|
||||
}
|
||||
|
||||
func (s *Server) DashboardURL(req *http.Request) (url string) {
|
||||
return s.ServiceURL(req) + "/"
|
||||
}
|
||||
|
||||
// UserInterface sets up a basic Nostr NDK interface that allows users to log into the relay user interface
|
||||
func (s *Server) UserInterface() {
|
||||
if s.mux == nil {
|
||||
@@ -157,50 +188,76 @@ func (s *Server) UserInterface() {
|
||||
s.mux.HandleFunc("/api/auth/status", s.handleAuthStatus)
|
||||
s.mux.HandleFunc("/api/auth/logout", s.handleAuthLogout)
|
||||
s.mux.HandleFunc("/api/permissions/", s.handlePermissions)
|
||||
// Export endpoints
|
||||
// Export endpoint
|
||||
s.mux.HandleFunc("/api/export", s.handleExport)
|
||||
s.mux.HandleFunc("/api/export/mine", s.handleExportMine)
|
||||
// Events endpoints
|
||||
s.mux.HandleFunc("/api/events/mine", s.handleEventsMine)
|
||||
// Import endpoint (admin only)
|
||||
s.mux.HandleFunc("/api/import", s.handleImport)
|
||||
// Sprocket endpoints (owner only)
|
||||
s.mux.HandleFunc("/api/sprocket/status", s.handleSprocketStatus)
|
||||
s.mux.HandleFunc("/api/sprocket/update", s.handleSprocketUpdate)
|
||||
s.mux.HandleFunc("/api/sprocket/restart", s.handleSprocketRestart)
|
||||
s.mux.HandleFunc("/api/sprocket/versions", s.handleSprocketVersions)
|
||||
s.mux.HandleFunc("/api/sprocket/delete-version", s.handleSprocketDeleteVersion)
|
||||
s.mux.HandleFunc("/api/sprocket/config", s.handleSprocketConfig)
|
||||
}
|
||||
|
||||
// handleLoginInterface serves the main user interface for login
|
||||
func (s *Server) handleLoginInterface(w http.ResponseWriter, r *http.Request) {
|
||||
// In dev mode with proxy configured, forward to dev server
|
||||
if s.Config != nil && s.Config.WebDisableEmbedded && s.devProxy != nil {
|
||||
if s.devProxy != nil {
|
||||
s.devProxy.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
// If embedded UI is disabled but no proxy configured, return a helpful message
|
||||
if s.Config != nil && s.Config.WebDisableEmbedded {
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write([]byte("Web UI disabled (ORLY_WEB_DISABLE=true). Run the web app in standalone dev mode (e.g., npm run dev) or set ORLY_WEB_DEV_PROXY_URL to proxy through this server."))
|
||||
return
|
||||
}
|
||||
// Default: serve embedded React app
|
||||
fileServer := http.FileServer(GetReactAppFS())
|
||||
fileServer.ServeHTTP(w, r)
|
||||
|
||||
// Serve embedded web interface
|
||||
ServeEmbeddedWeb(w, r)
|
||||
}
|
||||
|
||||
// handleAuthChallenge generates and returns an authentication challenge
|
||||
// handleAuthChallenge generates a new authentication challenge
|
||||
func (s *Server) handleAuthChallenge(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate a proper challenge using the auth package
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// Generate a new challenge
|
||||
challenge := auth.GenerateChallenge()
|
||||
challengeHex := hex.Enc(challenge)
|
||||
|
||||
// Store the challenge using the hex value as the key for easy lookup
|
||||
// Store the challenge with expiration (5 minutes)
|
||||
s.challengeMutex.Lock()
|
||||
if s.challenges == nil {
|
||||
s.challenges = make(map[string][]byte)
|
||||
}
|
||||
s.challenges[challengeHex] = challenge
|
||||
s.challengeMutex.Unlock()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"challenge": "` + challengeHex + `"}`))
|
||||
// Clean up expired challenges
|
||||
go func() {
|
||||
time.Sleep(5 * time.Minute)
|
||||
s.challengeMutex.Lock()
|
||||
delete(s.challenges, challengeHex)
|
||||
s.challengeMutex.Unlock()
|
||||
}()
|
||||
|
||||
// Return the challenge
|
||||
response := struct {
|
||||
Challenge string `json:"challenge"`
|
||||
}{
|
||||
Challenge: challengeHex,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(response)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Error generating challenge", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(jsonData)
|
||||
}
|
||||
|
||||
// handleAuthLogin processes authentication requests
|
||||
@@ -250,7 +307,7 @@ func (s *Server) handleAuthLogin(w http.ResponseWriter, r *http.Request) {
|
||||
delete(s.challenges, challengeHex)
|
||||
s.challengeMutex.Unlock()
|
||||
|
||||
relayURL := s.ServiceURL(r)
|
||||
relayURL := s.WebSocketURL(r)
|
||||
|
||||
// Validate the authentication event with the correct challenge
|
||||
// The challenge in the event tag is hex-encoded, so we need to pass the hex string as bytes
|
||||
@@ -274,10 +331,11 @@ func (s *Server) handleAuthLogin(w http.ResponseWriter, r *http.Request) {
|
||||
MaxAge: 60 * 60 * 24 * 30, // 30 days
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
w.Write([]byte(`{"success": true, "pubkey": "` + hex.Enc(evt.Pubkey) + `", "message": "Authentication successful"}`))
|
||||
|
||||
w.Write([]byte(`{"success": true}`))
|
||||
}
|
||||
|
||||
// handleAuthStatus returns the current authentication status
|
||||
// handleAuthStatus checks if the user is authenticated
|
||||
func (s *Server) handleAuthStatus(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
@@ -285,35 +343,63 @@ func (s *Server) handleAuthStatus(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// Check for auth cookie
|
||||
if c, err := r.Cookie("orly_auth"); err == nil && c.Value != "" {
|
||||
// Validate pubkey format (hex)
|
||||
if _, err := hex.Dec(c.Value); !chk.E(err) {
|
||||
w.Write([]byte(`{"authenticated": true, "pubkey": "` + c.Value + `"}`))
|
||||
return
|
||||
}
|
||||
c, err := r.Cookie("orly_auth")
|
||||
if err != nil || c.Value == "" {
|
||||
w.Write([]byte(`{"authenticated": false}`))
|
||||
return
|
||||
}
|
||||
w.Write([]byte(`{"authenticated": false}`))
|
||||
|
||||
// Validate the pubkey format
|
||||
pubkey, err := hex.Dec(c.Value)
|
||||
if chk.E(err) {
|
||||
w.Write([]byte(`{"authenticated": false}`))
|
||||
return
|
||||
}
|
||||
|
||||
// Get user permissions
|
||||
permission := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
|
||||
response := struct {
|
||||
Authenticated bool `json:"authenticated"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
Permission string `json:"permission"`
|
||||
}{
|
||||
Authenticated: true,
|
||||
Pubkey: c.Value,
|
||||
Permission: permission,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(response)
|
||||
if chk.E(err) {
|
||||
w.Write([]byte(`{"authenticated": false}`))
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(jsonData)
|
||||
}
|
||||
|
||||
// handleAuthLogout clears the auth cookie
|
||||
// handleAuthLogout clears the authentication cookie
|
||||
func (s *Server) handleAuthLogout(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
// Expire the cookie
|
||||
http.SetCookie(
|
||||
w, &http.Cookie{
|
||||
Name: "orly_auth",
|
||||
Value: "",
|
||||
Path: "/",
|
||||
MaxAge: -1,
|
||||
HttpOnly: true,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
},
|
||||
)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// Clear the auth cookie
|
||||
cookie := &http.Cookie{
|
||||
Name: "orly_auth",
|
||||
Value: "",
|
||||
Path: "/",
|
||||
HttpOnly: true,
|
||||
SameSite: http.SameSiteLaxMode,
|
||||
MaxAge: -1, // Expire immediately
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
|
||||
w.Write([]byte(`{"success": true}`))
|
||||
}
|
||||
|
||||
@@ -363,100 +449,193 @@ func (s *Server) handlePermissions(w http.ResponseWriter, r *http.Request) {
|
||||
w.Write(jsonData)
|
||||
}
|
||||
|
||||
// handleExport streams all events as JSONL (NDJSON). Admins only.
|
||||
// handleExport streams events as JSONL (NDJSON) using NIP-98 authentication.
|
||||
// Supports both GET (query params) and POST (JSON body) for pubkey filtering.
|
||||
func (s *Server) handleExport(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
if r.Method != http.MethodGet && r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require auth cookie
|
||||
c, err := r.Cookie("orly_auth")
|
||||
if err != nil || c.Value == "" {
|
||||
http.Error(w, "Not authenticated", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
requesterPubHex := c.Value
|
||||
requesterPub, err := hex.Dec(requesterPubHex)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
// Check permissions
|
||||
if acl.Registry.GetAccessLevel(requesterPub, r.RemoteAddr) != "admin" {
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r)
|
||||
if chk.E(err) || !valid {
|
||||
errorMsg := "NIP-98 authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
http.Error(w, errorMsg, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Optional filtering by pubkey(s)
|
||||
// Check permissions - require write, admin, or owner level
|
||||
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
if accessLevel != "write" && accessLevel != "admin" && accessLevel != "owner" {
|
||||
http.Error(w, "Write, admin, or owner permission required", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse pubkeys from request
|
||||
var pks [][]byte
|
||||
q := r.URL.Query()
|
||||
for _, pkHex := range q["pubkey"] {
|
||||
if pkHex == "" {
|
||||
continue
|
||||
|
||||
if r.Method == http.MethodPost {
|
||||
// Parse JSON body for pubkeys
|
||||
var requestBody struct {
|
||||
Pubkeys []string `json:"pubkeys"`
|
||||
}
|
||||
if pk, err := hex.Dec(pkHex); !chk.E(err) {
|
||||
pks = append(pks, pk)
|
||||
|
||||
if err := json.NewDecoder(r.Body).Decode(&requestBody); err == nil {
|
||||
// If JSON parsing succeeds, use pubkeys from body
|
||||
for _, pkHex := range requestBody.Pubkeys {
|
||||
if pkHex == "" {
|
||||
continue
|
||||
}
|
||||
if pk, err := hex.Dec(pkHex); !chk.E(err) {
|
||||
pks = append(pks, pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
// If JSON parsing fails, fall back to empty pubkeys (export all)
|
||||
} else {
|
||||
// GET method - parse query parameters
|
||||
q := r.URL.Query()
|
||||
for _, pkHex := range q["pubkey"] {
|
||||
if pkHex == "" {
|
||||
continue
|
||||
}
|
||||
if pk, err := hex.Dec(pkHex); !chk.E(err) {
|
||||
pks = append(pks, pk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine filename based on whether filtering by pubkeys
|
||||
var filename string
|
||||
if len(pks) == 0 {
|
||||
filename = "all-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
|
||||
} else if len(pks) == 1 {
|
||||
filename = "my-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
|
||||
} else {
|
||||
filename = "filtered-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-ndjson")
|
||||
filename := "events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"")
|
||||
|
||||
// Stream export
|
||||
s.D.Export(s.Ctx, w, pks...)
|
||||
}
|
||||
|
||||
|
||||
// handleExportMine streams only the authenticated user's events as JSONL (NDJSON).
|
||||
func (s *Server) handleExportMine(w http.ResponseWriter, r *http.Request) {
|
||||
// handleEventsMine returns the authenticated user's events in JSON format with pagination using NIP-98 authentication.
|
||||
func (s *Server) handleEventsMine(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require auth cookie
|
||||
c, err := r.Cookie("orly_auth")
|
||||
if err != nil || c.Value == "" {
|
||||
http.Error(w, "Not authenticated", http.StatusUnauthorized)
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r)
|
||||
if chk.E(err) || !valid {
|
||||
errorMsg := "NIP-98 authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
http.Error(w, errorMsg, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
pubkey, err := hex.Dec(c.Value)
|
||||
|
||||
// Parse pagination parameters
|
||||
query := r.URL.Query()
|
||||
limit := 50 // default limit
|
||||
if l := query.Get("limit"); l != "" {
|
||||
if parsed, err := strconv.Atoi(l); err == nil && parsed > 0 && parsed <= 100 {
|
||||
limit = parsed
|
||||
}
|
||||
}
|
||||
|
||||
offset := 0
|
||||
if o := query.Get("offset"); o != "" {
|
||||
if parsed, err := strconv.Atoi(o); err == nil && parsed >= 0 {
|
||||
offset = parsed
|
||||
}
|
||||
}
|
||||
|
||||
// Use QueryEvents with filter for this user's events
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(pubkey),
|
||||
}
|
||||
|
||||
log.Printf("DEBUG: Querying events for pubkey: %s", hex.Enc(pubkey))
|
||||
events, err := s.D.QueryEvents(s.Ctx, f)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
|
||||
log.Printf("DEBUG: QueryEvents failed: %v", err)
|
||||
http.Error(w, "Failed to query events", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
log.Printf("DEBUG: QueryEvents returned %d events", len(events))
|
||||
|
||||
// Apply pagination
|
||||
totalEvents := len(events)
|
||||
if offset >= totalEvents {
|
||||
events = event.S{} // Empty slice
|
||||
} else {
|
||||
end := offset + limit
|
||||
if end > totalEvents {
|
||||
end = totalEvents
|
||||
}
|
||||
events = events[offset:end]
|
||||
}
|
||||
|
||||
// Set content type and write JSON response
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// Format response as proper JSON
|
||||
response := struct {
|
||||
Events []*event.E `json:"events"`
|
||||
Total int `json:"total"`
|
||||
Limit int `json:"limit"`
|
||||
Offset int `json:"offset"`
|
||||
}{
|
||||
Events: events,
|
||||
Total: totalEvents,
|
||||
Limit: limit,
|
||||
Offset: offset,
|
||||
}
|
||||
|
||||
// Marshal and write the response
|
||||
jsonData, err := json.Marshal(response)
|
||||
if chk.E(err) {
|
||||
http.Error(
|
||||
w, "Error generating response", http.StatusInternalServerError,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-ndjson")
|
||||
filename := "my-events-" + time.Now().UTC().Format("20060102-150405Z") + ".jsonl"
|
||||
w.Header().Set("Content-Disposition", "attachment; filename=\""+filename+"\"")
|
||||
|
||||
// Stream export for this user's pubkey only
|
||||
s.D.Export(s.Ctx, w, pubkey)
|
||||
w.Write(jsonData)
|
||||
}
|
||||
|
||||
// handleImport receives a JSONL/NDJSON file or body and enqueues an async import. Admins only.
|
||||
// handleImport receives a JSONL/NDJSON file or body and enqueues an async import using NIP-98 authentication. Admins only.
|
||||
func (s *Server) handleImport(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Require auth cookie
|
||||
c, err := r.Cookie("orly_auth")
|
||||
if err != nil || c.Value == "" {
|
||||
http.Error(w, "Not authenticated", http.StatusUnauthorized)
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r)
|
||||
if chk.E(err) || !valid {
|
||||
errorMsg := "NIP-98 authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
http.Error(w, errorMsg, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
requesterPub, err := hex.Dec(c.Value)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Invalid auth cookie", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
// Admins only
|
||||
if acl.Registry.GetAccessLevel(requesterPub, r.RemoteAddr) != "admin" {
|
||||
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||
|
||||
// Check permissions - require admin or owner level
|
||||
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
if accessLevel != "admin" && accessLevel != "owner" {
|
||||
http.Error(w, "Admin or owner permission required", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -485,3 +664,237 @@ func (s *Server) handleImport(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
w.Write([]byte(`{"success": true, "message": "Import started"}`))
|
||||
}
|
||||
|
||||
// handleSprocketStatus returns the current status of the sprocket script
|
||||
func (s *Server) handleSprocketStatus(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r)
|
||||
if chk.E(err) || !valid {
|
||||
errorMsg := "NIP-98 authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
http.Error(w, errorMsg, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Check permissions - require owner level
|
||||
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
if accessLevel != "owner" {
|
||||
http.Error(w, "Owner permission required", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
status := s.sprocketManager.GetSprocketStatus()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
jsonData, err := json.Marshal(status)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Error generating response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(jsonData)
|
||||
}
|
||||
|
||||
// handleSprocketUpdate updates the sprocket script and restarts it
|
||||
func (s *Server) handleSprocketUpdate(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r)
|
||||
if chk.E(err) || !valid {
|
||||
errorMsg := "NIP-98 authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
http.Error(w, errorMsg, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Check permissions - require owner level
|
||||
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
if accessLevel != "owner" {
|
||||
http.Error(w, "Owner permission required", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
// Read the request body
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Update the sprocket script
|
||||
if err := s.sprocketManager.UpdateSprocket(string(body)); chk.E(err) {
|
||||
http.Error(w, fmt.Sprintf("Failed to update sprocket: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"success": true, "message": "Sprocket updated successfully"}`))
|
||||
}
|
||||
|
||||
// handleSprocketRestart restarts the sprocket script
|
||||
func (s *Server) handleSprocketRestart(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r)
|
||||
if chk.E(err) || !valid {
|
||||
errorMsg := "NIP-98 authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
http.Error(w, errorMsg, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Check permissions - require owner level
|
||||
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
if accessLevel != "owner" {
|
||||
http.Error(w, "Owner permission required", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
// Restart the sprocket script
|
||||
if err := s.sprocketManager.RestartSprocket(); chk.E(err) {
|
||||
http.Error(w, fmt.Sprintf("Failed to restart sprocket: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"success": true, "message": "Sprocket restarted successfully"}`))
|
||||
}
|
||||
|
||||
// handleSprocketVersions returns all sprocket script versions
|
||||
func (s *Server) handleSprocketVersions(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r)
|
||||
if chk.E(err) || !valid {
|
||||
errorMsg := "NIP-98 authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
http.Error(w, errorMsg, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Check permissions - require owner level
|
||||
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
if accessLevel != "owner" {
|
||||
http.Error(w, "Owner permission required", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
versions, err := s.sprocketManager.GetSprocketVersions()
|
||||
if chk.E(err) {
|
||||
http.Error(w, fmt.Sprintf("Failed to get sprocket versions: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
jsonData, err := json.Marshal(versions)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Error generating response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(jsonData)
|
||||
}
|
||||
|
||||
// handleSprocketDeleteVersion deletes a specific sprocket version
|
||||
func (s *Server) handleSprocketDeleteVersion(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate NIP-98 authentication
|
||||
valid, pubkey, err := httpauth.CheckAuth(r)
|
||||
if chk.E(err) || !valid {
|
||||
errorMsg := "NIP-98 authentication validation failed"
|
||||
if err != nil {
|
||||
errorMsg = err.Error()
|
||||
}
|
||||
http.Error(w, errorMsg, http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Check permissions - require owner level
|
||||
accessLevel := acl.Registry.GetAccessLevel(pubkey, r.RemoteAddr)
|
||||
if accessLevel != "owner" {
|
||||
http.Error(w, "Owner permission required", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
// Read the request body
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
var request struct {
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &request); chk.E(err) {
|
||||
http.Error(w, "Invalid JSON in request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if request.Filename == "" {
|
||||
http.Error(w, "Filename is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete the sprocket version
|
||||
if err := s.sprocketManager.DeleteSprocketVersion(request.Filename); chk.E(err) {
|
||||
http.Error(w, fmt.Sprintf("Failed to delete sprocket version: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write([]byte(`{"success": true, "message": "Sprocket version deleted successfully"}`))
|
||||
}
|
||||
|
||||
// handleSprocketConfig returns the sprocket configuration status
|
||||
func (s *Server) handleSprocketConfig(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
response := struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}{
|
||||
Enabled: s.Config.SprocketEnabled,
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(response)
|
||||
if chk.E(err) {
|
||||
http.Error(w, "Error generating response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(jsonData)
|
||||
}
|
||||
|
||||
518
app/sprocket.go
Normal file
518
app/sprocket.go
Normal file
@@ -0,0 +1,518 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
)
|
||||
|
||||
// SprocketResponse represents a response from the sprocket script
|
||||
type SprocketResponse struct {
|
||||
ID string `json:"id"`
|
||||
Action string `json:"action"` // accept, reject, or shadowReject
|
||||
Msg string `json:"msg"` // NIP-20 response message (only used for reject)
|
||||
}
|
||||
|
||||
// SprocketManager handles sprocket script execution and management
|
||||
type SprocketManager struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
configDir string
|
||||
scriptPath string
|
||||
currentCmd *exec.Cmd
|
||||
currentCancel context.CancelFunc
|
||||
mutex sync.RWMutex
|
||||
isRunning bool
|
||||
enabled bool
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
responseChan chan SprocketResponse
|
||||
}
|
||||
|
||||
// NewSprocketManager creates a new sprocket manager
|
||||
func NewSprocketManager(ctx context.Context, appName string, enabled bool) *SprocketManager {
|
||||
configDir := filepath.Join(xdg.ConfigHome, appName)
|
||||
scriptPath := filepath.Join(configDir, "sprocket.sh")
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
sm := &SprocketManager{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
configDir: configDir,
|
||||
scriptPath: scriptPath,
|
||||
enabled: enabled,
|
||||
responseChan: make(chan SprocketResponse, 100), // Buffered channel for responses
|
||||
}
|
||||
|
||||
// Start the sprocket script if it exists and is enabled
|
||||
if enabled {
|
||||
go sm.startSprocketIfExists()
|
||||
}
|
||||
|
||||
return sm
|
||||
}
|
||||
|
||||
// startSprocketIfExists starts the sprocket script if the file exists
|
||||
func (sm *SprocketManager) startSprocketIfExists() {
|
||||
if _, err := os.Stat(sm.scriptPath); err == nil {
|
||||
sm.StartSprocket()
|
||||
}
|
||||
}
|
||||
|
||||
// StartSprocket starts the sprocket script
|
||||
func (sm *SprocketManager) StartSprocket() error {
|
||||
sm.mutex.Lock()
|
||||
defer sm.mutex.Unlock()
|
||||
|
||||
if sm.isRunning {
|
||||
return fmt.Errorf("sprocket is already running")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(sm.scriptPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("sprocket script does not exist")
|
||||
}
|
||||
|
||||
// Create a new context for this command
|
||||
cmdCtx, cmdCancel := context.WithCancel(sm.ctx)
|
||||
|
||||
// Make the script executable
|
||||
if err := os.Chmod(sm.scriptPath, 0755); chk.E(err) {
|
||||
cmdCancel()
|
||||
return fmt.Errorf("failed to make script executable: %v", err)
|
||||
}
|
||||
|
||||
// Start the script
|
||||
cmd := exec.CommandContext(cmdCtx, sm.scriptPath)
|
||||
cmd.Dir = sm.configDir
|
||||
|
||||
// Set up stdio pipes for communication
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if chk.E(err) {
|
||||
cmdCancel()
|
||||
return fmt.Errorf("failed to create stdin pipe: %v", err)
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if chk.E(err) {
|
||||
cmdCancel()
|
||||
stdin.Close()
|
||||
return fmt.Errorf("failed to create stdout pipe: %v", err)
|
||||
}
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if chk.E(err) {
|
||||
cmdCancel()
|
||||
stdin.Close()
|
||||
stdout.Close()
|
||||
return fmt.Errorf("failed to create stderr pipe: %v", err)
|
||||
}
|
||||
|
||||
// Start the command
|
||||
if err := cmd.Start(); chk.E(err) {
|
||||
cmdCancel()
|
||||
stdin.Close()
|
||||
stdout.Close()
|
||||
stderr.Close()
|
||||
return fmt.Errorf("failed to start sprocket: %v", err)
|
||||
}
|
||||
|
||||
sm.currentCmd = cmd
|
||||
sm.currentCancel = cmdCancel
|
||||
sm.stdin = stdin
|
||||
sm.stdout = stdout
|
||||
sm.stderr = stderr
|
||||
sm.isRunning = true
|
||||
|
||||
// Start response reader in background
|
||||
go sm.readResponses()
|
||||
|
||||
// Log stderr output in background
|
||||
go sm.logOutput(stdout, stderr)
|
||||
|
||||
// Monitor the process
|
||||
go sm.monitorProcess()
|
||||
|
||||
log.I.F("sprocket started (pid=%d)", cmd.Process.Pid)
|
||||
return nil
|
||||
}
|
||||
|
||||
// StopSprocket stops the sprocket script gracefully, with SIGKILL fallback
|
||||
func (sm *SprocketManager) StopSprocket() error {
|
||||
sm.mutex.Lock()
|
||||
defer sm.mutex.Unlock()
|
||||
|
||||
if !sm.isRunning || sm.currentCmd == nil {
|
||||
return fmt.Errorf("sprocket is not running")
|
||||
}
|
||||
|
||||
// Close stdin first to signal the script to exit
|
||||
if sm.stdin != nil {
|
||||
sm.stdin.Close()
|
||||
}
|
||||
|
||||
// Cancel the context
|
||||
if sm.currentCancel != nil {
|
||||
sm.currentCancel()
|
||||
}
|
||||
|
||||
// Wait for graceful shutdown with timeout
|
||||
done := make(chan error, 1)
|
||||
go func() {
|
||||
done <- sm.currentCmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
// Process exited gracefully
|
||||
log.I.F("sprocket stopped gracefully")
|
||||
case <-time.After(5 * time.Second):
|
||||
// Force kill after 5 seconds
|
||||
log.W.F("sprocket did not stop gracefully, sending SIGKILL")
|
||||
if err := sm.currentCmd.Process.Kill(); chk.E(err) {
|
||||
log.E.F("failed to kill sprocket process: %v", err)
|
||||
}
|
||||
<-done // Wait for the kill to complete
|
||||
}
|
||||
|
||||
// Clean up pipes
|
||||
if sm.stdin != nil {
|
||||
sm.stdin.Close()
|
||||
sm.stdin = nil
|
||||
}
|
||||
if sm.stdout != nil {
|
||||
sm.stdout.Close()
|
||||
sm.stdout = nil
|
||||
}
|
||||
if sm.stderr != nil {
|
||||
sm.stderr.Close()
|
||||
sm.stderr = nil
|
||||
}
|
||||
|
||||
sm.isRunning = false
|
||||
sm.currentCmd = nil
|
||||
sm.currentCancel = nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RestartSprocket stops and starts the sprocket script
|
||||
func (sm *SprocketManager) RestartSprocket() error {
|
||||
if sm.isRunning {
|
||||
if err := sm.StopSprocket(); chk.E(err) {
|
||||
return fmt.Errorf("failed to stop sprocket: %v", err)
|
||||
}
|
||||
// Give it a moment to fully stop
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
return sm.StartSprocket()
|
||||
}
|
||||
|
||||
// UpdateSprocket updates the sprocket script and restarts it with zero downtime
|
||||
func (sm *SprocketManager) UpdateSprocket(scriptContent string) error {
|
||||
// Ensure config directory exists
|
||||
if err := os.MkdirAll(sm.configDir, 0755); chk.E(err) {
|
||||
return fmt.Errorf("failed to create config directory: %v", err)
|
||||
}
|
||||
|
||||
// If script content is empty, delete the script and stop
|
||||
if strings.TrimSpace(scriptContent) == "" {
|
||||
if sm.isRunning {
|
||||
if err := sm.StopSprocket(); chk.E(err) {
|
||||
log.E.F("failed to stop sprocket before deletion: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(sm.scriptPath); err == nil {
|
||||
if err := os.Remove(sm.scriptPath); chk.E(err) {
|
||||
return fmt.Errorf("failed to delete sprocket script: %v", err)
|
||||
}
|
||||
log.I.F("sprocket script deleted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create backup of existing script if it exists
|
||||
if _, err := os.Stat(sm.scriptPath); err == nil {
|
||||
timestamp := time.Now().Format("20060102150405")
|
||||
backupPath := sm.scriptPath + "." + timestamp
|
||||
if err := os.Rename(sm.scriptPath, backupPath); chk.E(err) {
|
||||
log.W.F("failed to create backup: %v", err)
|
||||
} else {
|
||||
log.I.F("created backup: %s", backupPath)
|
||||
}
|
||||
}
|
||||
|
||||
// Write new script to temporary file first
|
||||
tempPath := sm.scriptPath + ".tmp"
|
||||
if err := os.WriteFile(tempPath, []byte(scriptContent), 0755); chk.E(err) {
|
||||
return fmt.Errorf("failed to write temporary sprocket script: %v", err)
|
||||
}
|
||||
|
||||
// If sprocket is running, do zero-downtime update
|
||||
if sm.isRunning {
|
||||
// Atomically replace the script file
|
||||
if err := os.Rename(tempPath, sm.scriptPath); chk.E(err) {
|
||||
os.Remove(tempPath) // Clean up temp file
|
||||
return fmt.Errorf("failed to replace sprocket script: %v", err)
|
||||
}
|
||||
|
||||
log.I.F("sprocket script updated atomically")
|
||||
|
||||
// Restart the sprocket process
|
||||
return sm.RestartSprocket()
|
||||
} else {
|
||||
// Not running, just replace the file
|
||||
if err := os.Rename(tempPath, sm.scriptPath); chk.E(err) {
|
||||
os.Remove(tempPath) // Clean up temp file
|
||||
return fmt.Errorf("failed to replace sprocket script: %v", err)
|
||||
}
|
||||
|
||||
log.I.F("sprocket script updated")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetSprocketStatus returns the current status of the sprocket
|
||||
func (sm *SprocketManager) GetSprocketStatus() map[string]interface{} {
|
||||
sm.mutex.RLock()
|
||||
defer sm.mutex.RUnlock()
|
||||
|
||||
status := map[string]interface{}{
|
||||
"is_running": sm.isRunning,
|
||||
"script_exists": false,
|
||||
"script_path": sm.scriptPath,
|
||||
}
|
||||
|
||||
if _, err := os.Stat(sm.scriptPath); err == nil {
|
||||
status["script_exists"] = true
|
||||
|
||||
// Get script content
|
||||
if content, err := os.ReadFile(sm.scriptPath); err == nil {
|
||||
status["script_content"] = string(content)
|
||||
}
|
||||
|
||||
// Get file info
|
||||
if info, err := os.Stat(sm.scriptPath); err == nil {
|
||||
status["script_modified"] = info.ModTime()
|
||||
}
|
||||
}
|
||||
|
||||
if sm.isRunning && sm.currentCmd != nil && sm.currentCmd.Process != nil {
|
||||
status["pid"] = sm.currentCmd.Process.Pid
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
// GetSprocketVersions returns a list of all sprocket script versions
|
||||
func (sm *SprocketManager) GetSprocketVersions() ([]map[string]interface{}, error) {
|
||||
versions := []map[string]interface{}{}
|
||||
|
||||
// Check for current script
|
||||
if _, err := os.Stat(sm.scriptPath); err == nil {
|
||||
if info, err := os.Stat(sm.scriptPath); err == nil {
|
||||
if content, err := os.ReadFile(sm.scriptPath); err == nil {
|
||||
versions = append(versions, map[string]interface{}{
|
||||
"name": "sprocket.sh",
|
||||
"path": sm.scriptPath,
|
||||
"modified": info.ModTime(),
|
||||
"content": string(content),
|
||||
"is_current": true,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check for backup versions
|
||||
dir := filepath.Dir(sm.scriptPath)
|
||||
files, err := os.ReadDir(dir)
|
||||
if chk.E(err) {
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if strings.HasPrefix(file.Name(), "sprocket.sh.") && !file.IsDir() {
|
||||
path := filepath.Join(dir, file.Name())
|
||||
if info, err := os.Stat(path); err == nil {
|
||||
if content, err := os.ReadFile(path); err == nil {
|
||||
versions = append(versions, map[string]interface{}{
|
||||
"name": file.Name(),
|
||||
"path": path,
|
||||
"modified": info.ModTime(),
|
||||
"content": string(content),
|
||||
"is_current": false,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return versions, nil
|
||||
}
|
||||
|
||||
// DeleteSprocketVersion deletes a specific sprocket version
|
||||
func (sm *SprocketManager) DeleteSprocketVersion(filename string) error {
|
||||
// Don't allow deleting the current script
|
||||
if filename == "sprocket.sh" {
|
||||
return fmt.Errorf("cannot delete current sprocket script")
|
||||
}
|
||||
|
||||
path := filepath.Join(sm.configDir, filename)
|
||||
if err := os.Remove(path); chk.E(err) {
|
||||
return fmt.Errorf("failed to delete sprocket version: %v", err)
|
||||
}
|
||||
|
||||
log.I.F("deleted sprocket version: %s", filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
// logOutput logs the output from stdout and stderr
|
||||
func (sm *SprocketManager) logOutput(stdout, stderr io.ReadCloser) {
|
||||
defer stdout.Close()
|
||||
defer stderr.Close()
|
||||
|
||||
go func() {
|
||||
io.Copy(os.Stdout, stdout)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
io.Copy(os.Stderr, stderr)
|
||||
}()
|
||||
}
|
||||
|
||||
// ProcessEvent sends an event to the sprocket script and waits for a response
|
||||
func (sm *SprocketManager) ProcessEvent(evt *event.E) (*SprocketResponse, error) {
|
||||
sm.mutex.RLock()
|
||||
if !sm.isRunning || sm.stdin == nil {
|
||||
sm.mutex.RUnlock()
|
||||
return nil, fmt.Errorf("sprocket is not running")
|
||||
}
|
||||
stdin := sm.stdin
|
||||
sm.mutex.RUnlock()
|
||||
|
||||
// Serialize the event to JSON
|
||||
eventJSON, err := json.Marshal(evt)
|
||||
if chk.E(err) {
|
||||
return nil, fmt.Errorf("failed to serialize event: %v", err)
|
||||
}
|
||||
|
||||
// Send the event JSON to the sprocket script
|
||||
// The final ']' should be the only thing after the event's raw JSON
|
||||
if _, err := stdin.Write(eventJSON); chk.E(err) {
|
||||
return nil, fmt.Errorf("failed to write event to sprocket: %v", err)
|
||||
}
|
||||
|
||||
// Wait for response with timeout
|
||||
select {
|
||||
case response := <-sm.responseChan:
|
||||
return &response, nil
|
||||
case <-time.After(5 * time.Second):
|
||||
return nil, fmt.Errorf("sprocket response timeout")
|
||||
case <-sm.ctx.Done():
|
||||
return nil, fmt.Errorf("sprocket context cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
// readResponses reads JSONL responses from the sprocket script
|
||||
func (sm *SprocketManager) readResponses() {
|
||||
if sm.stdout == nil {
|
||||
return
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(sm.stdout)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var response SprocketResponse
|
||||
if err := json.Unmarshal([]byte(line), &response); chk.E(err) {
|
||||
log.E.F("failed to parse sprocket response: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Send response to channel (non-blocking)
|
||||
select {
|
||||
case sm.responseChan <- response:
|
||||
default:
|
||||
log.W.F("sprocket response channel full, dropping response")
|
||||
}
|
||||
}
|
||||
|
||||
if err := scanner.Err(); chk.E(err) {
|
||||
log.E.F("error reading sprocket responses: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// IsEnabled returns whether sprocket is enabled
|
||||
func (sm *SprocketManager) IsEnabled() bool {
|
||||
return sm.enabled
|
||||
}
|
||||
|
||||
// IsRunning returns whether sprocket is currently running
|
||||
func (sm *SprocketManager) IsRunning() bool {
|
||||
sm.mutex.RLock()
|
||||
defer sm.mutex.RUnlock()
|
||||
return sm.isRunning
|
||||
}
|
||||
|
||||
// monitorProcess monitors the sprocket process and cleans up when it exits
|
||||
func (sm *SprocketManager) monitorProcess() {
|
||||
if sm.currentCmd == nil {
|
||||
return
|
||||
}
|
||||
|
||||
err := sm.currentCmd.Wait()
|
||||
|
||||
sm.mutex.Lock()
|
||||
defer sm.mutex.Unlock()
|
||||
|
||||
// Clean up pipes
|
||||
if sm.stdin != nil {
|
||||
sm.stdin.Close()
|
||||
sm.stdin = nil
|
||||
}
|
||||
if sm.stdout != nil {
|
||||
sm.stdout.Close()
|
||||
sm.stdout = nil
|
||||
}
|
||||
if sm.stderr != nil {
|
||||
sm.stderr.Close()
|
||||
sm.stderr = nil
|
||||
}
|
||||
|
||||
sm.isRunning = false
|
||||
sm.currentCmd = nil
|
||||
sm.currentCancel = nil
|
||||
|
||||
if err != nil {
|
||||
log.E.F("sprocket process exited with error: %v", err)
|
||||
} else {
|
||||
log.I.F("sprocket process exited normally")
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down the sprocket manager
|
||||
func (sm *SprocketManager) Shutdown() {
|
||||
sm.cancel()
|
||||
if sm.isRunning {
|
||||
sm.StopSprocket()
|
||||
}
|
||||
}
|
||||
@@ -16,4 +16,10 @@ func GetReactAppFS() http.FileSystem {
|
||||
panic("Failed to load embedded web app: " + err.Error())
|
||||
}
|
||||
return http.FS(webDist)
|
||||
}
|
||||
}
|
||||
|
||||
// ServeEmbeddedWeb serves the embedded web application
|
||||
func ServeEmbeddedWeb(w http.ResponseWriter, r *http.Request) {
|
||||
// Serve the embedded web app
|
||||
http.FileServer(GetReactAppFS()).ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
41
app/web/.gitignore
vendored
41
app/web/.gitignore
vendored
@@ -1,30 +1,11 @@
|
||||
# Dependencies
|
||||
node_modules
|
||||
.pnp
|
||||
.pnp.js
|
||||
|
||||
# Bun
|
||||
.bunfig.toml
|
||||
bun.lockb
|
||||
|
||||
# Build directories
|
||||
build
|
||||
|
||||
# Cache and logs
|
||||
.cache
|
||||
.temp
|
||||
.log
|
||||
*.log
|
||||
|
||||
# Environment variables
|
||||
.env
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
# Editor directories and files
|
||||
.idea
|
||||
.vscode
|
||||
*.swp
|
||||
*.swo
|
||||
node_modules/
|
||||
dist/
|
||||
.vite/
|
||||
.tanstack/
|
||||
.idea/
|
||||
.DS_Store
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
/.idea/
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
# Orly Web Application
|
||||
|
||||
This is a React web application that uses Bun for building and bundling, and is automatically embedded into the Go binary when built.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Bun](https://bun.sh/) - JavaScript runtime and toolkit
|
||||
- Go 1.16+ (for embedding functionality)
|
||||
|
||||
## Development
|
||||
|
||||
There are two ways to develop the web app:
|
||||
|
||||
1) Standalone (recommended for hot reload)
|
||||
- Start the Go relay with the embedded web UI disabled so the React app can run on its own dev server with HMR.
|
||||
- Configure the relay via environment variables:
|
||||
|
||||
```bash
|
||||
# In another shell at repo root
|
||||
export ORLY_WEB_DISABLE=true
|
||||
# Optional: if you want same-origin URLs, you can set a proxy target and access the relay on the same port
|
||||
# export ORLY_WEB_DEV_PROXY_URL=http://localhost:5173
|
||||
|
||||
# Start the relay as usual
|
||||
go run .
|
||||
```
|
||||
|
||||
- Then start the React dev server:
|
||||
|
||||
```bash
|
||||
cd app/web
|
||||
bun install
|
||||
bun dev
|
||||
```
|
||||
|
||||
When ORLY_WEB_DISABLE=true is set, the Go server still serves the API and websocket endpoints and sends permissive CORS headers, so the dev server can access them cross-origin. If ORLY_WEB_DEV_PROXY_URL is set, the Go server will reverse-proxy non-/api paths to the dev server so you can use the same origin.
|
||||
|
||||
2) Embedded (no hot reload)
|
||||
- Build the web app and run the Go server with defaults:
|
||||
|
||||
```bash
|
||||
cd app/web
|
||||
bun install
|
||||
bun run build
|
||||
cd ../../
|
||||
go run .
|
||||
```
|
||||
|
||||
## Building
|
||||
|
||||
The React application needs to be built before compiling the Go binary to ensure that the embedded files are available:
|
||||
|
||||
```bash
|
||||
# Build the React application
|
||||
cd app/web
|
||||
bun install
|
||||
bun run build
|
||||
|
||||
# Build the Go binary from project root
|
||||
cd ../../
|
||||
go build
|
||||
```
|
||||
|
||||
## How it works
|
||||
|
||||
1. The React application is built to the `app/web/dist` directory
|
||||
2. The Go embed directive in `app/web.go` embeds these files into the binary
|
||||
3. When the server runs, it serves the embedded React app at the root path
|
||||
|
||||
## Build Automation
|
||||
|
||||
You can create a shell script to automate the build process:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# build.sh
|
||||
echo "Building React app..."
|
||||
cd app/web
|
||||
bun install
|
||||
bun run build
|
||||
|
||||
echo "Building Go binary..."
|
||||
cd ../../
|
||||
go build
|
||||
|
||||
echo "Build complete!"
|
||||
```
|
||||
|
||||
Make it executable with `chmod +x build.sh` and run with `./build.sh`.
|
||||
182
app/web/bun.lock
182
app/web/bun.lock
@@ -2,35 +2,189 @@
|
||||
"lockfileVersion": 1,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "orly-web",
|
||||
"name": "svelte-app",
|
||||
"dependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0",
|
||||
"sirv-cli": "^2.0.0",
|
||||
},
|
||||
"devDependencies": {
|
||||
"bun-types": "latest",
|
||||
"@rollup/plugin-commonjs": "^24.0.0",
|
||||
"@rollup/plugin-node-resolve": "^15.0.0",
|
||||
"@rollup/plugin-terser": "^0.4.0",
|
||||
"rollup": "^3.15.0",
|
||||
"rollup-plugin-css-only": "^4.3.0",
|
||||
"rollup-plugin-livereload": "^2.0.0",
|
||||
"rollup-plugin-svelte": "^7.1.2",
|
||||
"svelte": "^3.55.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
"packages": {
|
||||
"@types/node": ["@types/node@24.5.2", "", { "dependencies": { "undici-types": "~7.12.0" } }, "sha512-FYxk1I7wPv3K2XBaoyH2cTnocQEu8AOZ60hPbsyukMPLv5/5qr7V1i8PLHdl6Zf87I+xZXFvPCXYjiTFq+YSDQ=="],
|
||||
"@jridgewell/gen-mapping": ["@jridgewell/gen-mapping@0.3.13", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA=="],
|
||||
|
||||
"@types/react": ["@types/react@19.1.13", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-hHkbU/eoO3EG5/MZkuFSKmYqPbSVk5byPFa3e7y/8TybHiLMACgI8seVYlicwk7H5K/rI2px9xrQp/C+AUDTiQ=="],
|
||||
"@jridgewell/resolve-uri": ["@jridgewell/resolve-uri@3.1.2", "", {}, "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw=="],
|
||||
|
||||
"bun-types": ["bun-types@1.2.22", "", { "dependencies": { "@types/node": "*" }, "peerDependencies": { "@types/react": "^19" } }, "sha512-hwaAu8tct/Zn6Zft4U9BsZcXkYomzpHJX28ofvx7k0Zz2HNz54n1n+tDgxoWFGB4PcFvJXJQloPhaV2eP3Q6EA=="],
|
||||
"@jridgewell/source-map": ["@jridgewell/source-map@0.3.11", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.25" } }, "sha512-ZMp1V8ZFcPG5dIWnQLr3NSI1MiCU7UETdS/A0G8V/XWHvJv3ZsFqutJn1Y5RPmAPX6F3BiE397OqveU/9NCuIA=="],
|
||||
|
||||
"csstype": ["csstype@3.1.3", "", {}, "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw=="],
|
||||
"@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.5", "", {}, "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og=="],
|
||||
|
||||
"js-tokens": ["js-tokens@4.0.0", "", {}, "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="],
|
||||
"@jridgewell/trace-mapping": ["@jridgewell/trace-mapping@0.3.31", "", { "dependencies": { "@jridgewell/resolve-uri": "^3.1.0", "@jridgewell/sourcemap-codec": "^1.4.14" } }, "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw=="],
|
||||
|
||||
"loose-envify": ["loose-envify@1.4.0", "", { "dependencies": { "js-tokens": "^3.0.0 || ^4.0.0" }, "bin": { "loose-envify": "cli.js" } }, "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q=="],
|
||||
"@polka/url": ["@polka/url@1.0.0-next.29", "", {}, "sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww=="],
|
||||
|
||||
"react": ["react@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ=="],
|
||||
"@rollup/plugin-commonjs": ["@rollup/plugin-commonjs@24.1.0", "", { "dependencies": { "@rollup/pluginutils": "^5.0.1", "commondir": "^1.0.1", "estree-walker": "^2.0.2", "glob": "^8.0.3", "is-reference": "1.2.1", "magic-string": "^0.27.0" }, "peerDependencies": { "rollup": "^2.68.0||^3.0.0" }, "optionalPeers": ["rollup"] }, "sha512-eSL45hjhCWI0jCCXcNtLVqM5N1JlBGvlFfY0m6oOYnLCJ6N0qEXoZql4sY2MOUArzhH4SA/qBpTxvvZp2Sc+DQ=="],
|
||||
|
||||
"react-dom": ["react-dom@18.3.1", "", { "dependencies": { "loose-envify": "^1.1.0", "scheduler": "^0.23.2" }, "peerDependencies": { "react": "^18.3.1" } }, "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw=="],
|
||||
"@rollup/plugin-node-resolve": ["@rollup/plugin-node-resolve@15.3.1", "", { "dependencies": { "@rollup/pluginutils": "^5.0.1", "@types/resolve": "1.20.2", "deepmerge": "^4.2.2", "is-module": "^1.0.0", "resolve": "^1.22.1" }, "peerDependencies": { "rollup": "^2.78.0||^3.0.0||^4.0.0" }, "optionalPeers": ["rollup"] }, "sha512-tgg6b91pAybXHJQMAAwW9VuWBO6Thi+q7BCNARLwSqlmsHz0XYURtGvh/AuwSADXSI4h/2uHbs7s4FzlZDGSGA=="],
|
||||
|
||||
"scheduler": ["scheduler@0.23.2", "", { "dependencies": { "loose-envify": "^1.1.0" } }, "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ=="],
|
||||
"@rollup/plugin-terser": ["@rollup/plugin-terser@0.4.4", "", { "dependencies": { "serialize-javascript": "^6.0.1", "smob": "^1.0.0", "terser": "^5.17.4" }, "peerDependencies": { "rollup": "^2.0.0||^3.0.0||^4.0.0" }, "optionalPeers": ["rollup"] }, "sha512-XHeJC5Bgvs8LfukDwWZp7yeqin6ns8RTl2B9avbejt6tZqsqvVoWI7ZTQrcNsfKEDWBTnTxM8nMDkO2IFFbd0A=="],
|
||||
|
||||
"undici-types": ["undici-types@7.12.0", "", {}, "sha512-goOacqME2GYyOZZfb5Lgtu+1IDmAlAEu5xnD3+xTzS10hT0vzpf0SPjkXwAw9Jm+4n/mQGDP3LO8CPbYROeBfQ=="],
|
||||
"@rollup/pluginutils": ["@rollup/pluginutils@5.3.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", "picomatch": "^4.0.2" }, "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "optionalPeers": ["rollup"] }, "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q=="],
|
||||
|
||||
"@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="],
|
||||
|
||||
"@types/resolve": ["@types/resolve@1.20.2", "", {}, "sha512-60BCwRFOZCQhDncwQdxxeOEEkbc5dIMccYLwbxsS4TUNeVECQ/pBJ0j09mrHOl/JJvpRPGwO9SvE4nR2Nb/a4Q=="],
|
||||
|
||||
"acorn": ["acorn@8.15.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg=="],
|
||||
|
||||
"anymatch": ["anymatch@3.1.3", "", { "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" } }, "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw=="],
|
||||
|
||||
"balanced-match": ["balanced-match@1.0.2", "", {}, "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="],
|
||||
|
||||
"binary-extensions": ["binary-extensions@2.3.0", "", {}, "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw=="],
|
||||
|
||||
"brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="],
|
||||
|
||||
"braces": ["braces@3.0.3", "", { "dependencies": { "fill-range": "^7.1.1" } }, "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA=="],
|
||||
|
||||
"buffer-from": ["buffer-from@1.1.2", "", {}, "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ=="],
|
||||
|
||||
"chokidar": ["chokidar@3.6.0", "", { "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", "glob-parent": "~5.1.2", "is-binary-path": "~2.1.0", "is-glob": "~4.0.1", "normalize-path": "~3.0.0", "readdirp": "~3.6.0" }, "optionalDependencies": { "fsevents": "~2.3.2" } }, "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw=="],
|
||||
|
||||
"commander": ["commander@2.20.3", "", {}, "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="],
|
||||
|
||||
"commondir": ["commondir@1.0.1", "", {}, "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg=="],
|
||||
|
||||
"console-clear": ["console-clear@1.1.1", "", {}, "sha512-pMD+MVR538ipqkG5JXeOEbKWS5um1H4LUUccUQG68qpeqBYbzYy79Gh55jkd2TtPdRfUaLWdv6LPP//5Zt0aPQ=="],
|
||||
|
||||
"deepmerge": ["deepmerge@4.3.1", "", {}, "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="],
|
||||
|
||||
"estree-walker": ["estree-walker@2.0.2", "", {}, "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="],
|
||||
|
||||
"fill-range": ["fill-range@7.1.1", "", { "dependencies": { "to-regex-range": "^5.0.1" } }, "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg=="],
|
||||
|
||||
"fs.realpath": ["fs.realpath@1.0.0", "", {}, "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="],
|
||||
|
||||
"fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
|
||||
|
||||
"function-bind": ["function-bind@1.1.2", "", {}, "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="],
|
||||
|
||||
"get-port": ["get-port@3.2.0", "", {}, "sha512-x5UJKlgeUiNT8nyo/AcnwLnZuZNcSjSw0kogRB+Whd1fjjFq4B1hySFxSFWWSn4mIBzg3sRNUDFYc4g5gjPoLg=="],
|
||||
|
||||
"glob": ["glob@8.1.0", "", { "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", "inherits": "2", "minimatch": "^5.0.1", "once": "^1.3.0" } }, "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ=="],
|
||||
|
||||
"glob-parent": ["glob-parent@5.1.2", "", { "dependencies": { "is-glob": "^4.0.1" } }, "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow=="],
|
||||
|
||||
"hasown": ["hasown@2.0.2", "", { "dependencies": { "function-bind": "^1.1.2" } }, "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ=="],
|
||||
|
||||
"inflight": ["inflight@1.0.6", "", { "dependencies": { "once": "^1.3.0", "wrappy": "1" } }, "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA=="],
|
||||
|
||||
"inherits": ["inherits@2.0.4", "", {}, "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="],
|
||||
|
||||
"is-binary-path": ["is-binary-path@2.1.0", "", { "dependencies": { "binary-extensions": "^2.0.0" } }, "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw=="],
|
||||
|
||||
"is-core-module": ["is-core-module@2.16.1", "", { "dependencies": { "hasown": "^2.0.2" } }, "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w=="],
|
||||
|
||||
"is-extglob": ["is-extglob@2.1.1", "", {}, "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="],
|
||||
|
||||
"is-glob": ["is-glob@4.0.3", "", { "dependencies": { "is-extglob": "^2.1.1" } }, "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg=="],
|
||||
|
||||
"is-module": ["is-module@1.0.0", "", {}, "sha512-51ypPSPCoTEIN9dy5Oy+h4pShgJmPCygKfyRCISBI+JoWT/2oJvK8QPxmwv7b/p239jXrm9M1mlQbyKJ5A152g=="],
|
||||
|
||||
"is-number": ["is-number@7.0.0", "", {}, "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="],
|
||||
|
||||
"is-reference": ["is-reference@1.2.1", "", { "dependencies": { "@types/estree": "*" } }, "sha512-U82MsXXiFIrjCK4otLT+o2NA2Cd2g5MLoOVXUZjIOhLurrRxpEXzI8O0KZHr3IjLvlAH1kTPYSuqer5T9ZVBKQ=="],
|
||||
|
||||
"kleur": ["kleur@4.1.5", "", {}, "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ=="],
|
||||
|
||||
"livereload": ["livereload@0.9.3", "", { "dependencies": { "chokidar": "^3.5.0", "livereload-js": "^3.3.1", "opts": ">= 1.2.0", "ws": "^7.4.3" }, "bin": { "livereload": "bin/livereload.js" } }, "sha512-q7Z71n3i4X0R9xthAryBdNGVGAO2R5X+/xXpmKeuPMrteg+W2U8VusTKV3YiJbXZwKsOlFlHe+go6uSNjfxrZw=="],
|
||||
|
||||
"livereload-js": ["livereload-js@3.4.1", "", {}, "sha512-5MP0uUeVCec89ZbNOT/i97Mc+q3SxXmiUGhRFOTmhrGPn//uWVQdCvcLJDy64MSBR5MidFdOR7B9viumoavy6g=="],
|
||||
|
||||
"local-access": ["local-access@1.1.0", "", {}, "sha512-XfegD5pyTAfb+GY6chk283Ox5z8WexG56OvM06RWLpAc/UHozO8X6xAxEkIitZOtsSMM1Yr3DkHgW5W+onLhCw=="],
|
||||
|
||||
"magic-string": ["magic-string@0.27.0", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.4.13" } }, "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA=="],
|
||||
|
||||
"minimatch": ["minimatch@5.1.6", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g=="],
|
||||
|
||||
"mri": ["mri@1.2.0", "", {}, "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA=="],
|
||||
|
||||
"mrmime": ["mrmime@2.0.1", "", {}, "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ=="],
|
||||
|
||||
"normalize-path": ["normalize-path@3.0.0", "", {}, "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="],
|
||||
|
||||
"once": ["once@1.4.0", "", { "dependencies": { "wrappy": "1" } }, "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w=="],
|
||||
|
||||
"opts": ["opts@2.0.2", "", {}, "sha512-k41FwbcLnlgnFh69f4qdUfvDQ+5vaSDnVPFI/y5XuhKRq97EnVVneO9F1ESVCdiVu4fCS2L8usX3mU331hB7pg=="],
|
||||
|
||||
"path-parse": ["path-parse@1.0.7", "", {}, "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="],
|
||||
|
||||
"picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="],
|
||||
|
||||
"randombytes": ["randombytes@2.1.0", "", { "dependencies": { "safe-buffer": "^5.1.0" } }, "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ=="],
|
||||
|
||||
"readdirp": ["readdirp@3.6.0", "", { "dependencies": { "picomatch": "^2.2.1" } }, "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA=="],
|
||||
|
||||
"resolve": ["resolve@1.22.10", "", { "dependencies": { "is-core-module": "^2.16.0", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, "bin": { "resolve": "bin/resolve" } }, "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w=="],
|
||||
|
||||
"resolve.exports": ["resolve.exports@2.0.3", "", {}, "sha512-OcXjMsGdhL4XnbShKpAcSqPMzQoYkYyhbEaeSko47MjRP9NfEQMhZkXL1DoFlt9LWQn4YttrdnV6X2OiyzBi+A=="],
|
||||
|
||||
"rollup": ["rollup@3.29.5", "", { "optionalDependencies": { "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-GVsDdsbJzzy4S/v3dqWPJ7EfvZJfCHiDqe80IyrF59LYuP+e6U1LJoUqeuqRbwAWoMNoXivMNeNAOf5E22VA1w=="],
|
||||
|
||||
"rollup-plugin-css-only": ["rollup-plugin-css-only@4.5.5", "", { "dependencies": { "@rollup/pluginutils": "5" }, "peerDependencies": { "rollup": "<5" } }, "sha512-O2m2Sj8qsAtjUVqZyGTDXJypaOFFNV4knz8OlS6wJBws6XEICIiLsXmI56SbQEmWDqYU5TgRgWmslGj4THofJQ=="],
|
||||
|
||||
"rollup-plugin-livereload": ["rollup-plugin-livereload@2.0.5", "", { "dependencies": { "livereload": "^0.9.1" } }, "sha512-vqQZ/UQowTW7VoiKEM5ouNW90wE5/GZLfdWuR0ELxyKOJUIaj+uismPZZaICU4DnWPVjnpCDDxEqwU7pcKY/PA=="],
|
||||
|
||||
"rollup-plugin-svelte": ["rollup-plugin-svelte@7.2.3", "", { "dependencies": { "@rollup/pluginutils": "^4.1.0", "resolve.exports": "^2.0.0" }, "peerDependencies": { "rollup": ">=2.0.0", "svelte": ">=3.5.0" } }, "sha512-LlniP+h00DfM+E4eav/Kk8uGjgPUjGIBfrAS/IxQvsuFdqSM0Y2sXf31AdxuIGSW9GsmocDqOfaxR5QNno/Tgw=="],
|
||||
|
||||
"sade": ["sade@1.8.1", "", { "dependencies": { "mri": "^1.1.0" } }, "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A=="],
|
||||
|
||||
"safe-buffer": ["safe-buffer@5.2.1", "", {}, "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="],
|
||||
|
||||
"semiver": ["semiver@1.1.0", "", {}, "sha512-QNI2ChmuioGC1/xjyYwyZYADILWyW6AmS1UH6gDj/SFUUUS4MBAWs/7mxnkRPc/F4iHezDP+O8t0dO8WHiEOdg=="],
|
||||
|
||||
"serialize-javascript": ["serialize-javascript@6.0.2", "", { "dependencies": { "randombytes": "^2.1.0" } }, "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g=="],
|
||||
|
||||
"sirv": ["sirv@2.0.4", "", { "dependencies": { "@polka/url": "^1.0.0-next.24", "mrmime": "^2.0.0", "totalist": "^3.0.0" } }, "sha512-94Bdh3cC2PKrbgSOUqTiGPWVZeSiXfKOVZNJniWoqrWrRkB1CJzBU3NEbiTsPcYy1lDsANA/THzS+9WBiy5nfQ=="],
|
||||
|
||||
"sirv-cli": ["sirv-cli@2.0.2", "", { "dependencies": { "console-clear": "^1.1.0", "get-port": "^3.2.0", "kleur": "^4.1.4", "local-access": "^1.0.1", "sade": "^1.6.0", "semiver": "^1.0.0", "sirv": "^2.0.0", "tinydate": "^1.0.0" }, "bin": { "sirv": "bin.js" } }, "sha512-OtSJDwxsF1NWHc7ps3Sa0s+dPtP15iQNJzfKVz+MxkEo3z72mCD+yu30ct79rPr0CaV1HXSOBp+MIY5uIhHZ1A=="],
|
||||
|
||||
"smob": ["smob@1.5.0", "", {}, "sha512-g6T+p7QO8npa+/hNx9ohv1E5pVCmWrVCUzUXJyLdMmftX6ER0oiWY/w9knEonLpnOp6b6FenKnMfR8gqwWdwig=="],
|
||||
|
||||
"source-map": ["source-map@0.6.1", "", {}, "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="],
|
||||
|
||||
"source-map-support": ["source-map-support@0.5.21", "", { "dependencies": { "buffer-from": "^1.0.0", "source-map": "^0.6.0" } }, "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w=="],
|
||||
|
||||
"supports-preserve-symlinks-flag": ["supports-preserve-symlinks-flag@1.0.0", "", {}, "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w=="],
|
||||
|
||||
"svelte": ["svelte@3.59.2", "", {}, "sha512-vzSyuGr3eEoAtT/A6bmajosJZIUWySzY2CzB3w2pgPvnkUjGqlDnsNnA0PMO+mMAhuyMul6C2uuZzY6ELSkzyA=="],
|
||||
|
||||
"terser": ["terser@5.44.0", "", { "dependencies": { "@jridgewell/source-map": "^0.3.3", "acorn": "^8.15.0", "commander": "^2.20.0", "source-map-support": "~0.5.20" }, "bin": { "terser": "bin/terser" } }, "sha512-nIVck8DK+GM/0Frwd+nIhZ84pR/BX7rmXMfYwyg+Sri5oGVE99/E3KvXqpC2xHFxyqXyGHTKBSioxxplrO4I4w=="],
|
||||
|
||||
"tinydate": ["tinydate@1.3.0", "", {}, "sha512-7cR8rLy2QhYHpsBDBVYnnWXm8uRTr38RoZakFSW7Bs7PzfMPNZthuMLkwqZv7MTu8lhQ91cOFYS5a7iFj2oR3w=="],
|
||||
|
||||
"to-regex-range": ["to-regex-range@5.0.1", "", { "dependencies": { "is-number": "^7.0.0" } }, "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ=="],
|
||||
|
||||
"totalist": ["totalist@3.0.1", "", {}, "sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ=="],
|
||||
|
||||
"wrappy": ["wrappy@1.0.2", "", {}, "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="],
|
||||
|
||||
"ws": ["ws@7.5.10", "", { "peerDependencies": { "bufferutil": "^4.0.1", "utf-8-validate": "^5.0.2" }, "optionalPeers": ["bufferutil", "utf-8-validate"] }, "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ=="],
|
||||
|
||||
"anymatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="],
|
||||
|
||||
"readdirp/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="],
|
||||
|
||||
"rollup-plugin-svelte/@rollup/pluginutils": ["@rollup/pluginutils@4.2.1", "", { "dependencies": { "estree-walker": "^2.0.1", "picomatch": "^2.2.2" } }, "sha512-iKnFXr7NkdZAIHiIWE+BX5ULi/ucVFYWD6TbAV+rZctiRTY2PL6tsIKhoIOaoskiWAkgu+VsbXgUVDNLHf+InQ=="],
|
||||
|
||||
"rollup-plugin-svelte/@rollup/pluginutils/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="],
|
||||
}
|
||||
}
|
||||
|
||||
160
app/web/dist/index-bnzmmj1a.js
vendored
160
app/web/dist/index-bnzmmj1a.js
vendored
File diff suppressed because one or more lines are too long
1
app/web/dist/index-cepjm5g7.css
vendored
1
app/web/dist/index-cepjm5g7.css
vendored
File diff suppressed because one or more lines are too long
34
app/web/dist/index.html
vendored
34
app/web/dist/index.html
vendored
@@ -1,30 +1,14 @@
|
||||
<!DOCTYPE html>
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Nostr Relay</title>
|
||||
|
||||
<link rel="stylesheet" crossorigin href="./index-cepjm5g7.css"><script type="module" crossorigin src="./index-bnzmmj1a.js"></script></head>
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<title>Next Orly</title>
|
||||
<link rel="icon" href="/favicon.png" type="image/png" />
|
||||
<link rel="stylesheet" href="/bundle.css" />
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
// Apply system theme preference immediately to avoid flash of wrong theme
|
||||
function applyTheme(isDark) {
|
||||
document.body.classList.remove('bg-white', 'bg-gray-900');
|
||||
document.body.classList.add(isDark ? 'bg-gray-900' : 'bg-white');
|
||||
}
|
||||
|
||||
// Set initial theme
|
||||
applyTheme(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches);
|
||||
|
||||
// Listen for theme changes
|
||||
if (window.matchMedia) {
|
||||
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
|
||||
applyTheme(e.matches);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
<div id="root"></div>
|
||||
|
||||
<div id="app"></div>
|
||||
<script src="/bundle.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
112
app/web/dist/tailwind.min.css
vendored
112
app/web/dist/tailwind.min.css
vendored
@@ -1,112 +0,0 @@
|
||||
/*
|
||||
Local Tailwind CSS (minimal subset for this UI)
|
||||
Note: This file includes just the utilities used by the app to keep size small.
|
||||
You can replace this with a full Tailwind build if desired.
|
||||
*/
|
||||
|
||||
/* Preflight-like resets (very minimal) */
|
||||
*,::before,::after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}
|
||||
html,body,#root{height:100%}
|
||||
html{line-height:1.5;-webkit-text-size-adjust:100%;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,Segoe UI,Roboto,Helvetica,Arial,Noto Sans,\"Apple Color Emoji\",\"Segoe UI Emoji\"}
|
||||
body{margin:0}
|
||||
button,input{font:inherit;color:inherit}
|
||||
img{display:block;max-width:100%;height:auto}
|
||||
|
||||
/* Layout */
|
||||
.sticky{position:sticky}.relative{position:relative}.absolute{position:absolute}
|
||||
.top-0{top:0}.left-0{left:0}.inset-0{top:0;right:0;bottom:0;left:0}
|
||||
.z-50{z-index:50}.z-10{z-index:10}
|
||||
.block{display:block}.flex{display:flex}
|
||||
.items-center{align-items:center}.justify-start{justify-content:flex-start}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}
|
||||
.flex-grow{flex-grow:1}.shrink-0{flex-shrink:0}
|
||||
.overflow-hidden{overflow:hidden}
|
||||
|
||||
/* Sizing */
|
||||
.w-full{width:100%}.w-auto{width:auto}.w-16{width:4rem}
|
||||
.h-full{height:100%}.h-16{height:4rem}
|
||||
.aspect-square{aspect-ratio:1/1}
|
||||
.max-w-3xl{max-width:48rem}
|
||||
|
||||
/* Spacing */
|
||||
.p-0{padding:0}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-6{padding:1.5rem}
|
||||
.px-2{padding-left:.5rem;padding-right:.5rem}
|
||||
.mr-0{margin-right:0}.mr-2{margin-right:.5rem}
|
||||
.mt-2{margin-top:.5rem}.mt-5{margin-top:1.25rem}
|
||||
.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-4{margin-bottom:1rem}.mb-5{margin-bottom:1.25rem}
|
||||
.mx-auto{margin-left:auto;margin-right:auto}
|
||||
|
||||
/* Borders & Radius */
|
||||
.rounded{border-radius:.25rem}.rounded-full{border-radius:9999px}
|
||||
.border-0{border-width:0}.border-2{border-width:2px}
|
||||
.border-white{border-color:#fff}
|
||||
.border{border-width:1px}.border-gray-300{border-color:#d1d5db}.border-gray-600{border-color:#4b5563}
|
||||
.border-red-500{border-color:#ef4444}.border-red-700{border-color:#b91c1c}
|
||||
|
||||
/* Colors / Backgrounds */
|
||||
.bg-white{background-color:#fff}
|
||||
.bg-gray-100{background-color:#f3f4f6}
|
||||
.bg-gray-200{background-color:#e5e7eb}
|
||||
.bg-gray-300{background-color:#d1d5db}
|
||||
.bg-gray-600{background-color:#4b5563}
|
||||
.bg-gray-700{background-color:#374151}
|
||||
.bg-gray-800{background-color:#1f2937}
|
||||
.bg-gray-900{background-color:#111827}
|
||||
.bg-blue-500{background-color:#3b82f6}
|
||||
.bg-blue-600{background-color:#2563eb}.hover\:bg-blue-700:hover{background-color:#1d4ed8}
|
||||
.hover\:bg-blue-600:hover{background-color:#2563eb}
|
||||
.bg-red-600{background-color:#dc2626}.hover\:bg-red-700:hover{background-color:#b91c1c}
|
||||
.bg-cyan-100{background-color:#cffafe}
|
||||
.bg-green-100{background-color:#d1fae5}
|
||||
.bg-red-100{background-color:#fee2e2}
|
||||
.bg-red-50{background-color:#fef2f2}
|
||||
.bg-green-900{background-color:#064e3b}
|
||||
.bg-red-900{background-color:#7f1d1d}
|
||||
.bg-cyan-900{background-color:#164e63}
|
||||
.bg-cover{background-size:cover}.bg-center{background-position:center}
|
||||
.bg-transparent{background-color:transparent}
|
||||
|
||||
/* Text */
|
||||
.text-left{text-align:left}
|
||||
.text-white{color:#fff}
|
||||
.text-gray-300{color:#d1d5db}
|
||||
.text-gray-500{color:#6b7280}.hover\:text-gray-800:hover{color:#1f2937}
|
||||
.hover\:text-gray-100:hover{color:#f3f4f6}
|
||||
.text-gray-700{color:#374151}
|
||||
.text-gray-800{color:#1f2937}
|
||||
.text-gray-900{color:#111827}
|
||||
.text-gray-100{color:#f3f4f6}
|
||||
.text-green-800{color:#065f46}
|
||||
.text-green-100{color:#dcfce7}
|
||||
.text-red-800{color:#991b1b}
|
||||
.text-red-200{color:#fecaca}
|
||||
.text-red-100{color:#fee2e2}
|
||||
.text-cyan-800{color:#155e75}
|
||||
.text-cyan-100{color:#cffafe}
|
||||
.text-base{font-size:1rem;line-height:1.5rem}
|
||||
.text-lg{font-size:1.125rem;line-height:1.75rem}
|
||||
.text-2xl{font-size:1.5rem;line-height:2rem}
|
||||
.font-bold{font-weight:700}
|
||||
|
||||
/* Opacity */
|
||||
.opacity-70{opacity:.7}
|
||||
|
||||
/* Effects */
|
||||
.shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,0.1),0 1px 2px -1px rgba(0,0,0,0.1);box-shadow:var(--tw-shadow)}
|
||||
|
||||
/* Cursor */
|
||||
.cursor-pointer{cursor:pointer}
|
||||
|
||||
/* Box model */
|
||||
.box-border{box-sizing:border-box}
|
||||
|
||||
/* Utilities */
|
||||
.hover\:bg-transparent:hover{background-color:transparent}
|
||||
.hover\:bg-gray-200:hover{background-color:#e5e7eb}
|
||||
.hover\:bg-gray-600:hover{background-color:#4b5563}
|
||||
.focus\:ring-2:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}
|
||||
.focus\:ring-blue-200:focus{--tw-ring-color:rgba(191, 219, 254, var(--tw-ring-opacity))}
|
||||
.focus\:ring-blue-500:focus{--tw-ring-color:rgba(59, 130, 246, var(--tw-ring-opacity))}
|
||||
.disabled\:opacity-50:disabled{opacity:.5}
|
||||
.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}
|
||||
|
||||
/* Height for avatar images in header already inherit from container */
|
||||
BIN
app/web/favicon.ico
Normal file
BIN
app/web/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 485 KiB |
@@ -1,18 +1,24 @@
|
||||
{
|
||||
"name": "orly-web",
|
||||
"version": "0.1.0",
|
||||
"name": "svelte-app",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "bun --hot --port 5173 public/dev.html",
|
||||
"build": "rm -rf dist && bun build ./public/index.html --outdir ./dist --minify --splitting && cp -r public/tailwind.min.css dist/",
|
||||
"preview": "bun x serve dist"
|
||||
},
|
||||
"dependencies": {
|
||||
"react": "^18.2.0",
|
||||
"react-dom": "^18.2.0"
|
||||
"build": "rollup -c",
|
||||
"dev": "rollup -c -w",
|
||||
"start": "sirv public --no-clear"
|
||||
},
|
||||
"devDependencies": {
|
||||
"bun-types": "latest"
|
||||
"@rollup/plugin-commonjs": "^24.0.0",
|
||||
"@rollup/plugin-node-resolve": "^15.0.0",
|
||||
"@rollup/plugin-terser": "^0.4.0",
|
||||
"rollup": "^3.15.0",
|
||||
"rollup-plugin-css-only": "^4.3.0",
|
||||
"rollup-plugin-livereload": "^2.0.0",
|
||||
"rollup-plugin-svelte": "^7.1.2",
|
||||
"svelte": "^3.55.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"sirv-cli": "^2.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
app/web/public/build/bundle.css
Normal file
2
app/web/public/build/bundle.css
Normal file
File diff suppressed because one or more lines are too long
2
app/web/public/build/bundle.js
Normal file
2
app/web/public/build/bundle.js
Normal file
File diff suppressed because one or more lines are too long
@@ -1,13 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Nostr Relay (Dev)</title>
|
||||
<link rel="stylesheet" href="tailwind.min.css" />
|
||||
</head>
|
||||
<body class="bg-white">
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/index.jsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
BIN
app/web/public/favicon.png
Normal file
BIN
app/web/public/favicon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.1 KiB |
69
app/web/public/global.css
Normal file
69
app/web/public/global.css
Normal file
@@ -0,0 +1,69 @@
|
||||
html,
|
||||
body {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
body {
|
||||
color: #333;
|
||||
margin: 0;
|
||||
padding: 8px;
|
||||
box-sizing: border-box;
|
||||
font-family:
|
||||
-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu,
|
||||
Cantarell, "Helvetica Neue", sans-serif;
|
||||
}
|
||||
|
||||
a {
|
||||
color: rgb(0, 100, 200);
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: rgb(0, 80, 160);
|
||||
}
|
||||
|
||||
label {
|
||||
display: block;
|
||||
}
|
||||
|
||||
input,
|
||||
button,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit;
|
||||
font-size: inherit;
|
||||
-webkit-padding: 0.4em 0;
|
||||
padding: 0.4em;
|
||||
margin: 0 0 0.5em 0;
|
||||
box-sizing: border-box;
|
||||
border: 1px solid #ccc;
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
input:disabled {
|
||||
color: #ccc;
|
||||
}
|
||||
|
||||
button {
|
||||
color: #333;
|
||||
background-color: #f4f4f4;
|
||||
outline: none;
|
||||
}
|
||||
|
||||
button:disabled {
|
||||
color: #999;
|
||||
}
|
||||
|
||||
button:not(:disabled):active {
|
||||
background-color: #ddd;
|
||||
}
|
||||
|
||||
button:focus {
|
||||
border-color: #666;
|
||||
}
|
||||
@@ -1,30 +1,17 @@
|
||||
<!DOCTYPE html>
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Nostr Relay</title>
|
||||
<link rel="stylesheet" href="tailwind.min.css" />
|
||||
<meta charset="utf-8" />
|
||||
<meta name="viewport" content="width=device-width,initial-scale=1" />
|
||||
|
||||
<title>ORLY?</title>
|
||||
|
||||
<link rel="icon" type="image/png" href="/orly.png" />
|
||||
<link rel="stylesheet" href="/global.css" />
|
||||
<link rel="stylesheet" href="/build/bundle.css" />
|
||||
|
||||
<script defer src="/build/bundle.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<script>
|
||||
// Apply system theme preference immediately to avoid flash of wrong theme
|
||||
function applyTheme(isDark) {
|
||||
document.body.classList.remove('bg-white', 'bg-gray-900');
|
||||
document.body.classList.add(isDark ? 'bg-gray-900' : 'bg-white');
|
||||
}
|
||||
|
||||
// Set initial theme
|
||||
applyTheme(window.matchMedia && window.matchMedia('(prefers-color-scheme: dark)').matches);
|
||||
|
||||
// Listen for theme changes
|
||||
if (window.matchMedia) {
|
||||
window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', e => {
|
||||
applyTheme(e.matches);
|
||||
});
|
||||
}
|
||||
</script>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/index.jsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
<body></body>
|
||||
</html>
|
||||
|
||||
BIN
app/web/public/orly.png
Normal file
BIN
app/web/public/orly.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 514 KiB |
112
app/web/public/tailwind.min.css
vendored
112
app/web/public/tailwind.min.css
vendored
@@ -1,112 +0,0 @@
|
||||
/*
|
||||
Local Tailwind CSS (minimal subset for this UI)
|
||||
Note: This file includes just the utilities used by the app to keep size small.
|
||||
You can replace this with a full Tailwind build if desired.
|
||||
*/
|
||||
|
||||
/* Preflight-like resets (very minimal) */
|
||||
*,::before,::after{box-sizing:border-box;border-width:0;border-style:solid;border-color:#e5e7eb}
|
||||
html,body,#root{height:100%}
|
||||
html{line-height:1.5;-webkit-text-size-adjust:100%;tab-size:4;font-family:ui-sans-serif,system-ui,-apple-system,Segoe UI,Roboto,Helvetica,Arial,Noto Sans,\"Apple Color Emoji\",\"Segoe UI Emoji\"}
|
||||
body{margin:0}
|
||||
button,input{font:inherit;color:inherit}
|
||||
img{display:block;max-width:100%;height:auto}
|
||||
|
||||
/* Layout */
|
||||
.sticky{position:sticky}.relative{position:relative}.absolute{position:absolute}
|
||||
.top-0{top:0}.left-0{left:0}.inset-0{top:0;right:0;bottom:0;left:0}
|
||||
.z-50{z-index:50}.z-10{z-index:10}
|
||||
.block{display:block}.flex{display:flex}
|
||||
.items-center{align-items:center}.justify-start{justify-content:flex-start}.justify-center{justify-content:center}.justify-end{justify-content:flex-end}
|
||||
.flex-grow{flex-grow:1}.shrink-0{flex-shrink:0}
|
||||
.overflow-hidden{overflow:hidden}
|
||||
|
||||
/* Sizing */
|
||||
.w-full{width:100%}.w-auto{width:auto}.w-16{width:4rem}
|
||||
.h-full{height:100%}.h-16{height:4rem}
|
||||
.aspect-square{aspect-ratio:1/1}
|
||||
.max-w-3xl{max-width:48rem}
|
||||
|
||||
/* Spacing */
|
||||
.p-0{padding:0}.p-2{padding:.5rem}.p-3{padding:.75rem}.p-6{padding:1.5rem}
|
||||
.px-2{padding-left:.5rem;padding-right:.5rem}
|
||||
.mr-0{margin-right:0}.mr-2{margin-right:.5rem}
|
||||
.mt-2{margin-top:.5rem}.mt-5{margin-top:1.25rem}
|
||||
.mb-1{margin-bottom:.25rem}.mb-2{margin-bottom:.5rem}.mb-4{margin-bottom:1rem}.mb-5{margin-bottom:1.25rem}
|
||||
.mx-auto{margin-left:auto;margin-right:auto}
|
||||
|
||||
/* Borders & Radius */
|
||||
.rounded{border-radius:.25rem}.rounded-full{border-radius:9999px}
|
||||
.border-0{border-width:0}.border-2{border-width:2px}
|
||||
.border-white{border-color:#fff}
|
||||
.border{border-width:1px}.border-gray-300{border-color:#d1d5db}.border-gray-600{border-color:#4b5563}
|
||||
.border-red-500{border-color:#ef4444}.border-red-700{border-color:#b91c1c}
|
||||
|
||||
/* Colors / Backgrounds */
|
||||
.bg-white{background-color:#fff}
|
||||
.bg-gray-100{background-color:#f3f4f6}
|
||||
.bg-gray-200{background-color:#e5e7eb}
|
||||
.bg-gray-300{background-color:#d1d5db}
|
||||
.bg-gray-600{background-color:#4b5563}
|
||||
.bg-gray-700{background-color:#374151}
|
||||
.bg-gray-800{background-color:#1f2937}
|
||||
.bg-gray-900{background-color:#111827}
|
||||
.bg-blue-500{background-color:#3b82f6}
|
||||
.bg-blue-600{background-color:#2563eb}.hover\:bg-blue-700:hover{background-color:#1d4ed8}
|
||||
.hover\:bg-blue-600:hover{background-color:#2563eb}
|
||||
.bg-red-600{background-color:#dc2626}.hover\:bg-red-700:hover{background-color:#b91c1c}
|
||||
.bg-cyan-100{background-color:#cffafe}
|
||||
.bg-green-100{background-color:#d1fae5}
|
||||
.bg-red-100{background-color:#fee2e2}
|
||||
.bg-red-50{background-color:#fef2f2}
|
||||
.bg-green-900{background-color:#064e3b}
|
||||
.bg-red-900{background-color:#7f1d1d}
|
||||
.bg-cyan-900{background-color:#164e63}
|
||||
.bg-cover{background-size:cover}.bg-center{background-position:center}
|
||||
.bg-transparent{background-color:transparent}
|
||||
|
||||
/* Text */
|
||||
.text-left{text-align:left}
|
||||
.text-white{color:#fff}
|
||||
.text-gray-300{color:#d1d5db}
|
||||
.text-gray-500{color:#6b7280}.hover\:text-gray-800:hover{color:#1f2937}
|
||||
.hover\:text-gray-100:hover{color:#f3f4f6}
|
||||
.text-gray-700{color:#374151}
|
||||
.text-gray-800{color:#1f2937}
|
||||
.text-gray-900{color:#111827}
|
||||
.text-gray-100{color:#f3f4f6}
|
||||
.text-green-800{color:#065f46}
|
||||
.text-green-100{color:#dcfce7}
|
||||
.text-red-800{color:#991b1b}
|
||||
.text-red-200{color:#fecaca}
|
||||
.text-red-100{color:#fee2e2}
|
||||
.text-cyan-800{color:#155e75}
|
||||
.text-cyan-100{color:#cffafe}
|
||||
.text-base{font-size:1rem;line-height:1.5rem}
|
||||
.text-lg{font-size:1.125rem;line-height:1.75rem}
|
||||
.text-2xl{font-size:1.5rem;line-height:2rem}
|
||||
.font-bold{font-weight:700}
|
||||
|
||||
/* Opacity */
|
||||
.opacity-70{opacity:.7}
|
||||
|
||||
/* Effects */
|
||||
.shadow{--tw-shadow:0 1px 3px 0 rgba(0,0,0,0.1),0 1px 2px -1px rgba(0,0,0,0.1);box-shadow:var(--tw-shadow)}
|
||||
|
||||
/* Cursor */
|
||||
.cursor-pointer{cursor:pointer}
|
||||
|
||||
/* Box model */
|
||||
.box-border{box-sizing:border-box}
|
||||
|
||||
/* Utilities */
|
||||
.hover\:bg-transparent:hover{background-color:transparent}
|
||||
.hover\:bg-gray-200:hover{background-color:#e5e7eb}
|
||||
.hover\:bg-gray-600:hover{background-color:#4b5563}
|
||||
.focus\:ring-2:focus{--tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);--tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);box-shadow:var(--tw-ring-offset-shadow),var(--tw-ring-shadow),var(--tw-shadow, 0 0 #0000)}
|
||||
.focus\:ring-blue-200:focus{--tw-ring-color:rgba(191, 219, 254, var(--tw-ring-opacity))}
|
||||
.focus\:ring-blue-500:focus{--tw-ring-color:rgba(59, 130, 246, var(--tw-ring-opacity))}
|
||||
.disabled\:opacity-50:disabled{opacity:.5}
|
||||
.disabled\:cursor-not-allowed:disabled{cursor:not-allowed}
|
||||
|
||||
/* Height for avatar images in header already inherit from container */
|
||||
3
app/web/readme.adoc
Normal file
3
app/web/readme.adoc
Normal file
@@ -0,0 +1,3 @@
|
||||
= nostrly.app
|
||||
|
||||
a simple, material design nostr kind 1 nostr note client
|
||||
78
app/web/rollup.config.js
Normal file
78
app/web/rollup.config.js
Normal file
@@ -0,0 +1,78 @@
|
||||
import { spawn } from "child_process";
|
||||
import svelte from "rollup-plugin-svelte";
|
||||
import commonjs from "@rollup/plugin-commonjs";
|
||||
import terser from "@rollup/plugin-terser";
|
||||
import resolve from "@rollup/plugin-node-resolve";
|
||||
import livereload from "rollup-plugin-livereload";
|
||||
import css from "rollup-plugin-css-only";
|
||||
|
||||
const production = !process.env.ROLLUP_WATCH;
|
||||
|
||||
function serve() {
|
||||
let server;
|
||||
|
||||
function toExit() {
|
||||
if (server) server.kill(0);
|
||||
}
|
||||
|
||||
return {
|
||||
writeBundle() {
|
||||
if (server) return;
|
||||
server = spawn("npm", ["run", "start", "--", "--dev"], {
|
||||
stdio: ["ignore", "inherit", "inherit"],
|
||||
shell: true,
|
||||
});
|
||||
|
||||
process.on("SIGTERM", toExit);
|
||||
process.on("exit", toExit);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export default {
|
||||
input: "src/main.js",
|
||||
output: {
|
||||
sourcemap: true,
|
||||
format: "iife",
|
||||
name: "app",
|
||||
file: "dist/bundle.js",
|
||||
},
|
||||
plugins: [
|
||||
svelte({
|
||||
compilerOptions: {
|
||||
// enable run-time checks when not in production
|
||||
dev: !production,
|
||||
},
|
||||
}),
|
||||
// we'll extract any component CSS out into
|
||||
// a separate file - better for performance
|
||||
css({ output: "bundle.css" }),
|
||||
|
||||
// If you have external dependencies installed from
|
||||
// npm, you'll most likely need these plugins. In
|
||||
// some cases you'll need additional configuration -
|
||||
// consult the documentation for details:
|
||||
// https://github.com/rollup/plugins/tree/master/packages/commonjs
|
||||
resolve({
|
||||
browser: true,
|
||||
dedupe: ["svelte"],
|
||||
exportConditions: ["svelte"],
|
||||
}),
|
||||
commonjs(),
|
||||
|
||||
// In dev mode, call `npm run start` once
|
||||
// the bundle has been generated
|
||||
!production && serve(),
|
||||
|
||||
// Watch the `public` directory and refresh the
|
||||
// browser on changes when not in production
|
||||
!production && livereload("public"),
|
||||
|
||||
// If we're building for production (npm run build
|
||||
// instead of npm run dev), minify
|
||||
production && terser(),
|
||||
],
|
||||
watch: {
|
||||
clearScreen: false,
|
||||
},
|
||||
};
|
||||
147
app/web/scripts/setupTypeScript.js
Normal file
147
app/web/scripts/setupTypeScript.js
Normal file
@@ -0,0 +1,147 @@
|
||||
// @ts-check
|
||||
|
||||
/** This script modifies the project to support TS code in .svelte files like:
|
||||
|
||||
<script lang="ts">
|
||||
export let name: string;
|
||||
</script>
|
||||
|
||||
As well as validating the code for CI.
|
||||
*/
|
||||
|
||||
/** To work on this script:
|
||||
rm -rf test-template template && git clone sveltejs/template test-template && node scripts/setupTypeScript.js test-template
|
||||
*/
|
||||
|
||||
import fs from "fs";
|
||||
import path from "path";
|
||||
import { argv } from "process";
|
||||
import url from "url";
|
||||
|
||||
const __filename = url.fileURLToPath(import.meta.url);
|
||||
const __dirname = url.fileURLToPath(new URL(".", import.meta.url));
|
||||
const projectRoot = argv[2] || path.join(__dirname, "..");
|
||||
|
||||
// Add deps to pkg.json
|
||||
const packageJSON = JSON.parse(
|
||||
fs.readFileSync(path.join(projectRoot, "package.json"), "utf8"),
|
||||
);
|
||||
packageJSON.devDependencies = Object.assign(packageJSON.devDependencies, {
|
||||
"svelte-check": "^3.0.0",
|
||||
"svelte-preprocess": "^5.0.0",
|
||||
"@rollup/plugin-typescript": "^11.0.0",
|
||||
typescript: "^4.9.0",
|
||||
tslib: "^2.5.0",
|
||||
"@tsconfig/svelte": "^3.0.0",
|
||||
});
|
||||
|
||||
// Add script for checking
|
||||
packageJSON.scripts = Object.assign(packageJSON.scripts, {
|
||||
check: "svelte-check",
|
||||
});
|
||||
|
||||
// Write the package JSON
|
||||
fs.writeFileSync(
|
||||
path.join(projectRoot, "package.json"),
|
||||
JSON.stringify(packageJSON, null, " "),
|
||||
);
|
||||
|
||||
// mv src/main.js to main.ts - note, we need to edit rollup.config.js for this too
|
||||
const beforeMainJSPath = path.join(projectRoot, "src", "main.js");
|
||||
const afterMainTSPath = path.join(projectRoot, "src", "main.ts");
|
||||
fs.renameSync(beforeMainJSPath, afterMainTSPath);
|
||||
|
||||
// Switch the app.svelte file to use TS
|
||||
const appSveltePath = path.join(projectRoot, "src", "App.svelte");
|
||||
let appFile = fs.readFileSync(appSveltePath, "utf8");
|
||||
appFile = appFile.replace("<script>", '<script lang="ts">');
|
||||
appFile = appFile.replace("export let name;", "export let name: string;");
|
||||
fs.writeFileSync(appSveltePath, appFile);
|
||||
|
||||
// Edit rollup config
|
||||
const rollupConfigPath = path.join(projectRoot, "rollup.config.js");
|
||||
let rollupConfig = fs.readFileSync(rollupConfigPath, "utf8");
|
||||
|
||||
// Edit imports
|
||||
rollupConfig = rollupConfig.replace(
|
||||
`'rollup-plugin-css-only';`,
|
||||
`'rollup-plugin-css-only';
|
||||
import sveltePreprocess from 'svelte-preprocess';
|
||||
import typescript from '@rollup/plugin-typescript';`,
|
||||
);
|
||||
|
||||
// Replace name of entry point
|
||||
rollupConfig = rollupConfig.replace(`'src/main.js'`, `'src/main.ts'`);
|
||||
|
||||
// Add preprocessor
|
||||
rollupConfig = rollupConfig.replace(
|
||||
"compilerOptions:",
|
||||
"preprocess: sveltePreprocess({ sourceMap: !production }),\n\t\t\tcompilerOptions:",
|
||||
);
|
||||
|
||||
// Add TypeScript
|
||||
rollupConfig = rollupConfig.replace(
|
||||
"commonjs(),",
|
||||
"commonjs(),\n\t\ttypescript({\n\t\t\tsourceMap: !production,\n\t\t\tinlineSources: !production\n\t\t}),",
|
||||
);
|
||||
fs.writeFileSync(rollupConfigPath, rollupConfig);
|
||||
|
||||
// Add svelte.config.js
|
||||
const tsconfig = `{
|
||||
"extends": "@tsconfig/svelte/tsconfig.json",
|
||||
|
||||
"include": ["src/**/*"],
|
||||
"exclude": ["node_modules/*", "__sapper__/*", "public/*"]
|
||||
}`;
|
||||
const tsconfigPath = path.join(projectRoot, "tsconfig.json");
|
||||
fs.writeFileSync(tsconfigPath, tsconfig);
|
||||
|
||||
// Add TSConfig
|
||||
const svelteConfig = `import sveltePreprocess from 'svelte-preprocess';
|
||||
|
||||
export default {
|
||||
preprocess: sveltePreprocess()
|
||||
};
|
||||
`;
|
||||
const svelteConfigPath = path.join(projectRoot, "svelte.config.js");
|
||||
fs.writeFileSync(svelteConfigPath, svelteConfig);
|
||||
|
||||
// Add global.d.ts
|
||||
const dtsPath = path.join(projectRoot, "src", "global.d.ts");
|
||||
fs.writeFileSync(dtsPath, `/// <reference types="svelte" />`);
|
||||
|
||||
// Delete this script, but not during testing
|
||||
if (!argv[2]) {
|
||||
// Remove the script
|
||||
fs.unlinkSync(path.join(__filename));
|
||||
|
||||
// Check for Mac's DS_store file, and if it's the only one left remove it
|
||||
const remainingFiles = fs.readdirSync(path.join(__dirname));
|
||||
if (remainingFiles.length === 1 && remainingFiles[0] === ".DS_store") {
|
||||
fs.unlinkSync(path.join(__dirname, ".DS_store"));
|
||||
}
|
||||
|
||||
// Check if the scripts folder is empty
|
||||
if (fs.readdirSync(path.join(__dirname)).length === 0) {
|
||||
// Remove the scripts folder
|
||||
fs.rmdirSync(path.join(__dirname));
|
||||
}
|
||||
}
|
||||
|
||||
// Adds the extension recommendation
|
||||
fs.mkdirSync(path.join(projectRoot, ".vscode"), { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(projectRoot, ".vscode", "extensions.json"),
|
||||
`{
|
||||
"recommendations": ["svelte.svelte-vscode"]
|
||||
}
|
||||
`,
|
||||
);
|
||||
|
||||
console.log("Converted to TypeScript.");
|
||||
|
||||
if (fs.existsSync(path.join(projectRoot, "node_modules"))) {
|
||||
console.log(
|
||||
"\nYou will need to re-run your dependency manager to get started.",
|
||||
);
|
||||
}
|
||||
@@ -1,697 +0,0 @@
|
||||
import React, { useState, useEffect, useRef } from 'react';
|
||||
|
||||
function App() {
|
||||
const [user, setUser] = useState(null);
|
||||
const [status, setStatus] = useState('Ready to authenticate');
|
||||
const [statusType, setStatusType] = useState('info');
|
||||
const [profileData, setProfileData] = useState(null);
|
||||
|
||||
// Theme state for dark/light mode
|
||||
const [isDarkMode, setIsDarkMode] = useState(false);
|
||||
|
||||
const [checkingAuth, setCheckingAuth] = useState(true);
|
||||
|
||||
// Login view layout measurements
|
||||
const titleRef = useRef(null);
|
||||
const fileInputRef = useRef(null);
|
||||
const [loginPadding, setLoginPadding] = useState(16); // default fallback padding in px
|
||||
|
||||
useEffect(() => {
|
||||
function updatePadding() {
|
||||
if (titleRef.current) {
|
||||
const h = titleRef.current.offsetHeight || 0;
|
||||
// Pad area around the text by half the title text height
|
||||
setLoginPadding(Math.max(0, Math.round(h / 2)));
|
||||
}
|
||||
}
|
||||
updatePadding();
|
||||
window.addEventListener('resize', updatePadding);
|
||||
return () => window.removeEventListener('resize', updatePadding);
|
||||
}, []);
|
||||
|
||||
// Effect to detect and track system theme preference
|
||||
useEffect(() => {
|
||||
// Check if the browser supports prefers-color-scheme
|
||||
const darkModeMediaQuery = window.matchMedia('(prefers-color-scheme: dark)');
|
||||
|
||||
// Set the initial theme based on system preference
|
||||
setIsDarkMode(darkModeMediaQuery.matches);
|
||||
|
||||
// Add listener to respond to system theme changes
|
||||
const handleThemeChange = (e) => {
|
||||
setIsDarkMode(e.matches);
|
||||
};
|
||||
|
||||
// Modern browsers
|
||||
darkModeMediaQuery.addEventListener('change', handleThemeChange);
|
||||
|
||||
// Cleanup listener on component unmount
|
||||
return () => {
|
||||
darkModeMediaQuery.removeEventListener('change', handleThemeChange);
|
||||
};
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
// Check authentication status on page load
|
||||
(async () => {
|
||||
await checkStatus();
|
||||
setCheckingAuth(false);
|
||||
})();
|
||||
}, []);
|
||||
|
||||
// Effect to fetch profile when user changes
|
||||
useEffect(() => {
|
||||
if (user?.pubkey) {
|
||||
fetchUserProfile(user.pubkey);
|
||||
}
|
||||
}, [user?.pubkey]);
|
||||
|
||||
function relayURL() {
|
||||
try {
|
||||
return window.location.protocol.replace('http', 'ws') + '//' + window.location.host;
|
||||
} catch (_) {
|
||||
return 'ws://localhost:3333';
|
||||
}
|
||||
}
|
||||
|
||||
async function checkStatus() {
|
||||
try {
|
||||
const response = await fetch('/api/auth/status');
|
||||
const data = await response.json();
|
||||
if (data.authenticated && data.pubkey) {
|
||||
// Fetch permission first, then set user and profile
|
||||
try {
|
||||
const permResponse = await fetch(`/api/permissions/${data.pubkey}`);
|
||||
const permData = await permResponse.json();
|
||||
if (permData && permData.permission) {
|
||||
const fullUser = { pubkey: data.pubkey, permission: permData.permission };
|
||||
setUser(fullUser);
|
||||
updateStatus(`Already authenticated as: ${data.pubkey.slice(0, 16)}...`, 'success');
|
||||
// Fire and forget profile fetch
|
||||
fetchUserProfile(data.pubkey);
|
||||
}
|
||||
} catch (_) {
|
||||
// ignore permission fetch errors
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore errors for status check
|
||||
}
|
||||
}
|
||||
|
||||
function updateStatus(message, type = 'info') {
|
||||
setStatus(message);
|
||||
setStatusType(type);
|
||||
}
|
||||
|
||||
function statusClassName() {
|
||||
const base = 'mt-5 mb-5 p-3 rounded';
|
||||
|
||||
// Return theme-appropriate status classes
|
||||
switch (statusType) {
|
||||
case 'success':
|
||||
return base + ' ' + getThemeClasses('bg-green-100 text-green-800', 'bg-green-900 text-green-100');
|
||||
case 'error':
|
||||
return base + ' ' + getThemeClasses('bg-red-100 text-red-800', 'bg-red-900 text-red-100');
|
||||
case 'info':
|
||||
default:
|
||||
return base + ' ' + getThemeClasses('bg-cyan-100 text-cyan-800', 'bg-cyan-900 text-cyan-100');
|
||||
}
|
||||
}
|
||||
|
||||
async function getChallenge() {
|
||||
try {
|
||||
const response = await fetch('/api/auth/challenge');
|
||||
const data = await response.json();
|
||||
return data.challenge;
|
||||
} catch (error) {
|
||||
updateStatus('Failed to get authentication challenge: ' + error.message, 'error');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async function loginWithExtension() {
|
||||
if (!window.nostr) {
|
||||
updateStatus('No Nostr extension found. Please install a NIP-07 compatible extension like nos2x or Alby.', 'error');
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
updateStatus('Connecting to extension...', 'info');
|
||||
|
||||
// Get public key from extension
|
||||
const pubkey = await window.nostr.getPublicKey();
|
||||
|
||||
// Get challenge from server
|
||||
const challenge = await getChallenge();
|
||||
|
||||
// Create authentication event
|
||||
const authEvent = {
|
||||
kind: 22242,
|
||||
created_at: Math.floor(Date.now() / 1000),
|
||||
tags: [
|
||||
['relay', relayURL()],
|
||||
['challenge', challenge]
|
||||
],
|
||||
content: ''
|
||||
};
|
||||
|
||||
// Sign the event with extension
|
||||
const signedEvent = await window.nostr.signEvent(authEvent);
|
||||
|
||||
// Send to server
|
||||
await authenticate(signedEvent);
|
||||
|
||||
} catch (error) {
|
||||
updateStatus('Extension login failed: ' + error.message, 'error');
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchKind0FromRelay(pubkeyHex, timeoutMs = 4000) {
|
||||
return new Promise((resolve) => {
|
||||
let resolved = false;
|
||||
let events = [];
|
||||
let ws;
|
||||
try {
|
||||
ws = new WebSocket(relayURL());
|
||||
} catch (e) {
|
||||
resolve(null);
|
||||
return;
|
||||
}
|
||||
|
||||
const subId = 'profile-' + Math.random().toString(36).slice(2);
|
||||
const timer = setTimeout(() => {
|
||||
if (ws && ws.readyState === 1) {
|
||||
try { ws.close(); } catch (_) {}
|
||||
}
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
resolve(null);
|
||||
}
|
||||
}, timeoutMs);
|
||||
|
||||
ws.onopen = () => {
|
||||
try {
|
||||
const req = [
|
||||
'REQ',
|
||||
subId,
|
||||
{ kinds: [0], authors: [pubkeyHex] }
|
||||
];
|
||||
ws.send(JSON.stringify(req));
|
||||
} catch (_) {}
|
||||
};
|
||||
|
||||
ws.onmessage = (msg) => {
|
||||
try {
|
||||
const data = JSON.parse(msg.data);
|
||||
const type = data[0];
|
||||
if (type === 'EVENT' && data[1] === subId) {
|
||||
const event = data[2];
|
||||
if (event && event.kind === 0 && event.content) {
|
||||
events.push(event);
|
||||
}
|
||||
} else if (type === 'EOSE' && data[1] === subId) {
|
||||
try {
|
||||
ws.send(JSON.stringify(['CLOSE', subId]));
|
||||
} catch (_) {}
|
||||
try { ws.close(); } catch (_) {}
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
if (events.length) {
|
||||
const latest = events.reduce((a, b) => (a.created_at > b.created_at ? a : b));
|
||||
try {
|
||||
const meta = JSON.parse(latest.content);
|
||||
resolve(meta || null);
|
||||
} catch (_) {
|
||||
resolve(null);
|
||||
}
|
||||
} else {
|
||||
resolve(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (_) {
|
||||
// ignore malformed messages
|
||||
}
|
||||
};
|
||||
|
||||
ws.onerror = () => {
|
||||
try { ws.close(); } catch (_) {}
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
resolve(null);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => {
|
||||
clearTimeout(timer);
|
||||
if (!resolved) {
|
||||
resolved = true;
|
||||
if (events.length) {
|
||||
const latest = events.reduce((a, b) => (a.created_at > b.created_at ? a : b));
|
||||
try {
|
||||
const meta = JSON.parse(latest.content);
|
||||
resolve(meta || null);
|
||||
} catch (_) {
|
||||
resolve(null);
|
||||
}
|
||||
} else {
|
||||
resolve(null);
|
||||
}
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
// Function to fetch user profile metadata (kind 0)
|
||||
async function fetchUserProfile(pubkeyHex) {
|
||||
try {
|
||||
// Create a simple placeholder with the pubkey
|
||||
const placeholderProfile = {
|
||||
name: `user:${pubkeyHex.slice(0, 8)}`,
|
||||
about: 'No profile data available'
|
||||
};
|
||||
|
||||
// Always set the placeholder profile first
|
||||
setProfileData(placeholderProfile);
|
||||
|
||||
// First, try to get profile kind:0 from the relay itself
|
||||
let relayMetadata = null;
|
||||
try {
|
||||
relayMetadata = await fetchKind0FromRelay(pubkeyHex);
|
||||
} catch (_) {}
|
||||
|
||||
if (relayMetadata) {
|
||||
const parsed = typeof relayMetadata === 'string' ? JSON.parse(relayMetadata) : relayMetadata;
|
||||
setProfileData({
|
||||
name: parsed.name || placeholderProfile.name,
|
||||
display_name: parsed.display_name,
|
||||
picture: parsed.picture,
|
||||
banner: parsed.banner,
|
||||
about: parsed.about || placeholderProfile.about
|
||||
});
|
||||
return parsed;
|
||||
}
|
||||
|
||||
// Fallback: try extension metadata if available
|
||||
if (window.nostr && window.nostr.getPublicKey) {
|
||||
try {
|
||||
if (window.nostr.getUserMetadata) {
|
||||
const metadata = await window.nostr.getUserMetadata();
|
||||
if (metadata) {
|
||||
try {
|
||||
const parsedMetadata = typeof metadata === 'string' ? JSON.parse(metadata) : metadata;
|
||||
setProfileData({
|
||||
name: parsedMetadata.name || placeholderProfile.name,
|
||||
display_name: parsedMetadata.display_name,
|
||||
picture: parsedMetadata.picture,
|
||||
banner: parsedMetadata.banner,
|
||||
about: parsedMetadata.about || placeholderProfile.about
|
||||
});
|
||||
return parsedMetadata;
|
||||
} catch (parseError) {
|
||||
console.log('Error parsing user metadata:', parseError);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (nostrError) {
|
||||
console.log('Could not get profile from extension:', nostrError);
|
||||
}
|
||||
}
|
||||
|
||||
return placeholderProfile;
|
||||
} catch (error) {
|
||||
console.error('Error handling profile data:', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async function authenticate(signedEvent) {
|
||||
try {
|
||||
const response = await fetch('/api/auth/login', {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify(signedEvent)
|
||||
});
|
||||
|
||||
const result = await response.json();
|
||||
|
||||
if (result.success) {
|
||||
setUser(result.pubkey);
|
||||
updateStatus('Successfully authenticated as: ' + result.pubkey.slice(0, 16) + '...', 'success');
|
||||
|
||||
// Check permissions after login
|
||||
const permResponse = await fetch(`/api/permissions/${result.pubkey}`);
|
||||
const permData = await permResponse.json();
|
||||
if (permData && permData.permission) {
|
||||
setUser({pubkey: result.pubkey, permission: permData.permission});
|
||||
|
||||
// Fetch user profile data
|
||||
await fetchUserProfile(result.pubkey);
|
||||
}
|
||||
} else {
|
||||
updateStatus('Authentication failed: ' + result.error, 'error');
|
||||
}
|
||||
} catch (error) {
|
||||
updateStatus('Authentication request failed: ' + error.message, 'error');
|
||||
}
|
||||
}
|
||||
|
||||
async function logout() {
|
||||
try {
|
||||
await fetch('/api/auth/logout', { method: 'POST' });
|
||||
} catch (_) {}
|
||||
setUser(null);
|
||||
updateStatus('Logged out', 'info');
|
||||
}
|
||||
|
||||
function handleImportButton() {
|
||||
try {
|
||||
fileInputRef?.current?.click();
|
||||
} catch (_) {}
|
||||
}
|
||||
|
||||
async function handleImportChange(e) {
|
||||
const file = e?.target?.files && e.target.files[0];
|
||||
if (!file) return;
|
||||
try {
|
||||
updateStatus('Uploading import file...', 'info');
|
||||
const fd = new FormData();
|
||||
fd.append('file', file);
|
||||
const res = await fetch('/api/import', { method: 'POST', body: fd });
|
||||
if (res.ok) {
|
||||
updateStatus('Import started. Processing will continue in the background.', 'success');
|
||||
} else {
|
||||
const txt = await res.text();
|
||||
updateStatus('Import failed: ' + txt, 'error');
|
||||
}
|
||||
} catch (err) {
|
||||
updateStatus('Import failed: ' + (err?.message || String(err)), 'error');
|
||||
} finally {
|
||||
// reset input so selecting the same file again works
|
||||
if (e && e.target) e.target.value = '';
|
||||
}
|
||||
}
|
||||
|
||||
// =========================
|
||||
// Export Specific Pubkeys UI state and handlers (admin)
|
||||
// =========================
|
||||
const [exportPubkeys, setExportPubkeys] = useState([{ value: '' }]);
|
||||
|
||||
function isHex64(str) {
|
||||
if (!str) return false;
|
||||
const s = String(str).trim();
|
||||
return /^[0-9a-fA-F]{64}$/.test(s);
|
||||
}
|
||||
|
||||
function normalizeHex(str) {
|
||||
return String(str || '').trim();
|
||||
}
|
||||
|
||||
function addExportPubkeyField() {
|
||||
// Add new field at the end of the list so it appears downwards
|
||||
setExportPubkeys((arr) => [...arr, { value: '' }]);
|
||||
}
|
||||
|
||||
function removeExportPubkeyField(idx) {
|
||||
setExportPubkeys((arr) => arr.filter((_, i) => i !== idx));
|
||||
}
|
||||
|
||||
function changeExportPubkey(idx, val) {
|
||||
const v = normalizeHex(val);
|
||||
setExportPubkeys((arr) => arr.map((item, i) => (i === idx ? { value: v } : item)));
|
||||
}
|
||||
|
||||
function validExportPubkeys() {
|
||||
return exportPubkeys
|
||||
.map((p) => normalizeHex(p.value))
|
||||
.filter((v) => v.length > 0 && isHex64(v));
|
||||
}
|
||||
|
||||
function canExportSpecific() {
|
||||
// Enable only if every opened field is non-empty and a valid 64-char hex
|
||||
if (!exportPubkeys || exportPubkeys.length === 0) return false;
|
||||
return exportPubkeys.every((p) => {
|
||||
const v = normalizeHex(p.value);
|
||||
return v.length === 64 && isHex64(v);
|
||||
});
|
||||
}
|
||||
|
||||
function handleExportSpecific() {
|
||||
const vals = validExportPubkeys();
|
||||
if (!vals.length) return;
|
||||
const qs = vals.map((v) => `pubkey=${encodeURIComponent(v)}`).join('&');
|
||||
try {
|
||||
window.location.href = `/api/export?${qs}`;
|
||||
} catch (_) {}
|
||||
}
|
||||
|
||||
// Theme utility functions for conditional styling
|
||||
function getThemeClasses(lightClass, darkClass) {
|
||||
return isDarkMode ? darkClass : lightClass;
|
||||
}
|
||||
|
||||
// Get background color class for container panels
|
||||
function getPanelBgClass() {
|
||||
return getThemeClasses('bg-gray-200', 'bg-gray-800');
|
||||
}
|
||||
|
||||
// Get text color class for standard text
|
||||
function getTextClass() {
|
||||
return getThemeClasses('text-gray-700', 'text-gray-300');
|
||||
}
|
||||
|
||||
// Get background color for buttons
|
||||
function getButtonBgClass() {
|
||||
return getThemeClasses('bg-gray-100', 'bg-gray-700');
|
||||
}
|
||||
|
||||
// Get text color for buttons
|
||||
function getButtonTextClass() {
|
||||
return getThemeClasses('text-gray-500', 'text-gray-300');
|
||||
}
|
||||
|
||||
// Get hover classes for buttons
|
||||
function getButtonHoverClass() {
|
||||
return getThemeClasses('hover:text-gray-800', 'hover:text-gray-100');
|
||||
}
|
||||
|
||||
// Prevent UI flash: wait until we checked auth status
|
||||
if (checkingAuth) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={`min-h-screen ${getThemeClasses('bg-gray-100', 'bg-gray-900')}`}>
|
||||
{user?.permission ? (
|
||||
<>
|
||||
{/* Logged in view with user profile */}
|
||||
<div className={`sticky top-0 left-0 w-full ${getThemeClasses('bg-gray-100', 'bg-gray-900')} z-50 h-16 flex items-center overflow-hidden`}>
|
||||
<div className="flex items-center h-full w-full box-border">
|
||||
<div className="relative overflow-hidden flex flex-grow items-center justify-start h-full">
|
||||
{profileData?.banner && (
|
||||
<div className="absolute inset-0 opacity-70 bg-cover bg-center" style={{ backgroundImage: `url(${profileData.banner})` }}></div>
|
||||
)}
|
||||
<div className="relative z-10 p-2 flex items-center h-full">
|
||||
{profileData?.picture && <img src={profileData.picture} alt="User Avatar" className={`h-full aspect-square w-auto rounded-full object-cover border-2 ${getThemeClasses('border-white', 'border-gray-600')} mr-2 shadow box-border`} />}
|
||||
<div className={getTextClass()}>
|
||||
<div className="font-bold text-base block">
|
||||
{profileData?.display_name || profileData?.name || user.pubkey.slice(0, 8)}
|
||||
{profileData?.name && profileData?.display_name && ` (${profileData.name})`}
|
||||
</div>
|
||||
<div className="font-bold text-lg text-left">
|
||||
{user.permission === "admin" ? "Admin Dashboard" : "Subscriber Dashboard"}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div className="flex items-center justify-end shrink-0 h-full">
|
||||
<button className={`bg-transparent ${getButtonTextClass()} border-0 text-2xl cursor-pointer flex items-center justify-center h-full aspect-square shrink-0 hover:bg-transparent ${getButtonHoverClass()}`} onClick={logout}>✕</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{/* Dashboard content container - stacks vertically and fills remaining space */}
|
||||
<div className="flex-grow overflow-y-auto p-4">
|
||||
{/* Hidden file input for import (admin) */}
|
||||
<input
|
||||
type="file"
|
||||
ref={fileInputRef}
|
||||
onChange={handleImportChange}
|
||||
accept=".json,.jsonl,text/plain,application/x-ndjson,application/json"
|
||||
style={{ display: 'none' }}
|
||||
/>
|
||||
<div className={`m-2 p-2 w-full ${getPanelBgClass()} rounded-lg`}>
|
||||
<div className={`text-lg font-bold flex items-center ${getTextClass()}`}>Welcome</div>
|
||||
<p className={getTextClass()}>here you can configure all the things</p>
|
||||
</div>
|
||||
|
||||
{/* Export only my events */}
|
||||
<div className={`m-2 p-2 ${getPanelBgClass()} rounded-lg w-full`}>
|
||||
<div className="w-full flex items-center justify-end p-2 bg-gray-900 rounded-lg">
|
||||
<div className="pr-2 m-2 w-full">
|
||||
<div className={`text-base font-bold mb-1 ${getTextClass()}`}>Export My Events</div>
|
||||
<p className={`text-sm w-full ${getTextClass()}`}>Download your own events as line-delimited JSON (JSONL/NDJSON). Only events you authored will be included.</p>
|
||||
</div>
|
||||
<button
|
||||
className={`${getButtonBgClass()} ${getButtonTextClass()} border-0 text-2xl cursor-pointer flex items-center justify-center h-full aspect-square shrink-0 hover:bg-transparent ${getButtonHoverClass()}`}
|
||||
onClick={() => { window.location.href = '/api/export/mine'; }}
|
||||
aria-label="Download my events as JSONL"
|
||||
title="Download my events"
|
||||
>
|
||||
⤓
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{user.permission === "admin" && (
|
||||
<>
|
||||
<div className={`m-2 p-2 ${getPanelBgClass()} rounded-lg w-full`}>
|
||||
<div className="flex items-center justify-between p-2 m-4 bg-gray-900 round">
|
||||
<div className="pr-2 w-full">
|
||||
<div className={`text-base font-bold mb-1 ${getTextClass()}`}>Export All Events (admin)</div>
|
||||
<p className={`text-sm ${getTextClass()}`}>Download all stored events as line-delimited JSON (JSONL/NDJSON). This may take a while on large databases.</p>
|
||||
</div>
|
||||
<button
|
||||
className={`${getButtonBgClass()} ${getButtonTextClass()} border-0 text-2xl cursor-pointer flex m-2 items-center justify-center h-full aspect-square shrink-0 hover:bg-transparent ${getButtonHoverClass()}`}
|
||||
onClick={() => { window.location.href = '/api/export'; }}
|
||||
aria-label="Download all events as JSONL"
|
||||
title="Download all events"
|
||||
>
|
||||
⤓
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Export specific pubkeys (admin) */}
|
||||
<div className={`m-2 p-2 ${getPanelBgClass()} rounded-lg w-full`}>
|
||||
<div className="w-full flex items-start justify-between gap-4 m-2 p-2 bg-gray-900 rounded-lg">
|
||||
{/* Left: title and help text */}
|
||||
<div className="flex-1 pr-2 w-full">
|
||||
<div className={`text-base font-bold mb-1 ${getTextClass()}`}>Export Specific Pubkeys (admin)</div>
|
||||
<p className={`text-sm ${getTextClass()}`}>Enter one or more author pubkeys (64-character hex). Only valid entries will be exported.</p>
|
||||
{/* Right: controls (buttons stacked vertically + list below) */}
|
||||
<div className="flex flex-col items-end gap-2 self-end justify-end p-2">
|
||||
<button
|
||||
className={`${getButtonBgClass()} ${getTextClass()} text-base p-4 rounded m-2 ${getThemeClasses('hover:bg-gray-200', 'hover:bg-gray-600')}`}
|
||||
onClick={addExportPubkeyField}
|
||||
title="Add another pubkey"
|
||||
type="button"
|
||||
>
|
||||
+ Add
|
||||
</button>
|
||||
</div>
|
||||
<div className="flex flex-col items-end gap-2 min-w-[320px] justify-end p-2">
|
||||
|
||||
<div className="gap-2 justify-end">
|
||||
{exportPubkeys.map((item, idx) => {
|
||||
const v = (item?.value || '').trim();
|
||||
const valid = v.length === 0 ? true : isHex64(v);
|
||||
return (
|
||||
<div key={idx} className="flex items-center gap-2 ">
|
||||
<input
|
||||
type="text"
|
||||
inputMode="text"
|
||||
autoComplete="off"
|
||||
spellCheck="false"
|
||||
className={`flex-1 text-sm px-2 py-1 border rounded outline-none ${valid
|
||||
? getThemeClasses('border-gray-300 bg-white text-gray-900 focus:ring-2 focus:ring-blue-200', 'border-gray-600 bg-gray-700 text-gray-100 focus:ring-2 focus:ring-blue-500')
|
||||
: getThemeClasses('border-red-500 bg-red-50 text-red-800', 'border-red-700 bg-red-900 text-red-200')}`}
|
||||
placeholder="e.g., 64-hex pubkey"
|
||||
value={v}
|
||||
onChange={(e) => changeExportPubkey(idx, e.target.value)}
|
||||
/>
|
||||
<button
|
||||
className={`${getButtonBgClass()} ${getTextClass()} px-2 py-1 rounded ${getThemeClasses('hover:bg-gray-200', 'hover:bg-gray-600')}`}
|
||||
onClick={() => removeExportPubkeyField(idx)}
|
||||
title="Remove this pubkey"
|
||||
type="button"
|
||||
>
|
||||
✕
|
||||
</button>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div className="flex justify-end items-end gap-2 self-end">
|
||||
<button
|
||||
className={`${getThemeClasses('bg-blue-600', 'bg-blue-500')} text-white px-3 py-1 rounded disabled:opacity-50 disabled:cursor-not-allowed ${canExportSpecific() ? getThemeClasses('hover:bg-blue-700', 'hover:bg-blue-600') : ''}`}
|
||||
onClick={handleExportSpecific}
|
||||
disabled={!canExportSpecific()}
|
||||
title={canExportSpecific() ? 'Download events for specified pubkeys' : 'Enter a valid 64-character hex pubkey in every field'}
|
||||
type="button"
|
||||
>
|
||||
Export
|
||||
</button>
|
||||
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
<div className={`m-2 p-2 ${getPanelBgClass()} rounded-lg w-full`}>
|
||||
<div className="flex items-center justify-between p-2 bg-gray-900 rounded-lg">
|
||||
<div className="pr-2 w-full">
|
||||
<div className={`text-base font-bold mb-1 ${getTextClass()}`}>Import Events (admin)</div>
|
||||
<p className={`text-sm ${getTextClass()}`}>Upload events in line-delimited JSON (JSONL/NDJSON) to import into the database.</p>
|
||||
</div>
|
||||
<button
|
||||
className={`${getButtonBgClass()} ${getButtonTextClass()} border-0 text-2xl cursor-pointer flex items-center justify-center h-full aspect-square shrink-0 hover:bg-transparent ${getButtonHoverClass()}`}
|
||||
onClick={handleImportButton}
|
||||
aria-label="Import events from JSONL"
|
||||
title="Import events"
|
||||
>
|
||||
↥
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
{/* Empty flex grow box to ensure background fills entire viewport */}
|
||||
<div className={`flex-grow ${getThemeClasses('bg-gray-100', 'bg-gray-900')}`}></div>
|
||||
</div>
|
||||
</>
|
||||
) : (
|
||||
// Not logged in view - shows the login form
|
||||
<div className="w-full h-full flex items-center justify-center">
|
||||
<div
|
||||
className={getThemeClasses('bg-gray-100', 'bg-gray-900')}
|
||||
style={{ width: '800px', maxWidth: '100%', boxSizing: 'border-box', padding: `${loginPadding}px` }}
|
||||
>
|
||||
<div className="flex items-center gap-3 mb-3">
|
||||
<img
|
||||
src="/orly.png"
|
||||
alt="Orly logo"
|
||||
className="object-contain"
|
||||
style={{ width: '4rem', height: '4rem' }}
|
||||
onError={(e) => {
|
||||
// fallback to repo docs image if public asset missing
|
||||
e.currentTarget.onerror = null;
|
||||
e.currentTarget.src = "/docs/orly.png";
|
||||
}}
|
||||
/>
|
||||
<h1 ref={titleRef} className={`text-2xl font-bold p-2 ${getTextClass()}`}>ORLY🦉 Dashboard Login</h1>
|
||||
</div>
|
||||
|
||||
<p className={`mb-4 ${getTextClass()}`}>Authenticate to this Nostr relay using your browser extension.</p>
|
||||
|
||||
<div className={statusClassName()}>
|
||||
{status}
|
||||
</div>
|
||||
|
||||
<div className="mb-5">
|
||||
<button className={`${getThemeClasses('bg-blue-600', 'bg-blue-500')} text-white px-5 py-3 rounded ${getThemeClasses('hover:bg-blue-700', 'hover:bg-blue-600')}`} onClick={loginWithExtension}>Login with Browser Extension (NIP-07)</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div className={`flex-grow ${getThemeClasses('bg-gray-100', 'bg-gray-900')}`}></div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
||||
2920
app/web/src/App.svelte
Normal file
2920
app/web/src/App.svelte
Normal file
File diff suppressed because it is too large
Load Diff
392
app/web/src/LoginModal.svelte
Normal file
392
app/web/src/LoginModal.svelte
Normal file
@@ -0,0 +1,392 @@
|
||||
<script>
|
||||
import { createEventDispatcher } from 'svelte';
|
||||
|
||||
const dispatch = createEventDispatcher();
|
||||
|
||||
export let showModal = false;
|
||||
export let isDarkTheme = false;
|
||||
|
||||
let activeTab = 'extension';
|
||||
let nsecInput = '';
|
||||
let isLoading = false;
|
||||
let errorMessage = '';
|
||||
let successMessage = '';
|
||||
|
||||
function closeModal() {
|
||||
showModal = false;
|
||||
nsecInput = '';
|
||||
errorMessage = '';
|
||||
successMessage = '';
|
||||
dispatch('close');
|
||||
}
|
||||
|
||||
function switchTab(tab) {
|
||||
activeTab = tab;
|
||||
errorMessage = '';
|
||||
successMessage = '';
|
||||
}
|
||||
|
||||
async function loginWithExtension() {
|
||||
isLoading = true;
|
||||
errorMessage = '';
|
||||
successMessage = '';
|
||||
|
||||
try {
|
||||
// Check if window.nostr is available
|
||||
if (!window.nostr) {
|
||||
throw new Error('No Nostr extension found. Please install a NIP-07 compatible extension like nos2x or Alby.');
|
||||
}
|
||||
|
||||
// Get public key from extension
|
||||
const pubkey = await window.nostr.getPublicKey();
|
||||
|
||||
if (pubkey) {
|
||||
// Store authentication info
|
||||
localStorage.setItem('nostr_auth_method', 'extension');
|
||||
localStorage.setItem('nostr_pubkey', pubkey);
|
||||
|
||||
successMessage = 'Successfully logged in with extension!';
|
||||
dispatch('login', {
|
||||
method: 'extension',
|
||||
pubkey: pubkey,
|
||||
signer: window.nostr
|
||||
});
|
||||
|
||||
setTimeout(() => {
|
||||
closeModal();
|
||||
}, 1500);
|
||||
}
|
||||
} catch (error) {
|
||||
errorMessage = error.message;
|
||||
} finally {
|
||||
isLoading = false;
|
||||
}
|
||||
}
|
||||
|
||||
function validateNsec(nsec) {
|
||||
// Basic validation for nsec format
|
||||
if (!nsec.startsWith('nsec1')) {
|
||||
return false;
|
||||
}
|
||||
// Should be around 63 characters long
|
||||
if (nsec.length < 60 || nsec.length > 70) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function nsecToHex(nsec) {
|
||||
// This is a simplified conversion - in a real app you'd use a proper library
|
||||
// For demo purposes, we'll simulate the conversion
|
||||
try {
|
||||
// Remove 'nsec1' prefix and decode (simplified)
|
||||
const withoutPrefix = nsec.slice(5);
|
||||
// In reality, you'd use bech32 decoding here
|
||||
// For now, we'll generate a mock hex key
|
||||
return 'mock_' + withoutPrefix.slice(0, 32);
|
||||
} catch (error) {
|
||||
throw new Error('Invalid nsec format');
|
||||
}
|
||||
}
|
||||
|
||||
async function loginWithNsec() {
|
||||
isLoading = true;
|
||||
errorMessage = '';
|
||||
successMessage = '';
|
||||
|
||||
try {
|
||||
if (!nsecInput.trim()) {
|
||||
throw new Error('Please enter your nsec');
|
||||
}
|
||||
|
||||
if (!validateNsec(nsecInput.trim())) {
|
||||
throw new Error('Invalid nsec format. Must start with "nsec1"');
|
||||
}
|
||||
|
||||
// Convert nsec to hex format (simplified for demo)
|
||||
const privateKey = nsecToHex(nsecInput.trim());
|
||||
|
||||
// In a real implementation, you'd derive the public key from private key
|
||||
const publicKey = 'derived_' + privateKey.slice(5, 37);
|
||||
|
||||
// Store securely (in production, consider more secure storage)
|
||||
localStorage.setItem('nostr_auth_method', 'nsec');
|
||||
localStorage.setItem('nostr_pubkey', publicKey);
|
||||
localStorage.setItem('nostr_privkey', privateKey);
|
||||
|
||||
successMessage = 'Successfully logged in with nsec!';
|
||||
dispatch('login', {
|
||||
method: 'nsec',
|
||||
pubkey: publicKey,
|
||||
privateKey: privateKey
|
||||
});
|
||||
|
||||
setTimeout(() => {
|
||||
closeModal();
|
||||
}, 1500);
|
||||
} catch (error) {
|
||||
errorMessage = error.message;
|
||||
} finally {
|
||||
isLoading = false;
|
||||
}
|
||||
}
|
||||
|
||||
function handleKeydown(event) {
|
||||
if (event.key === 'Escape') {
|
||||
closeModal();
|
||||
}
|
||||
if (event.key === 'Enter' && activeTab === 'nsec') {
|
||||
loginWithNsec();
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
<svelte:window on:keydown={handleKeydown} />
|
||||
|
||||
{#if showModal}
|
||||
<div class="modal-overlay" on:click={closeModal} on:keydown={(e) => e.key === 'Escape' && closeModal()} role="button" tabindex="0">
|
||||
<div class="modal" class:dark-theme={isDarkTheme} on:click|stopPropagation on:keydown|stopPropagation>
|
||||
<div class="modal-header">
|
||||
<h2>Login to Nostr</h2>
|
||||
<button class="close-btn" on:click={closeModal}>×</button>
|
||||
</div>
|
||||
|
||||
<div class="tab-container">
|
||||
<div class="tabs">
|
||||
<button
|
||||
class="tab-btn"
|
||||
class:active={activeTab === 'extension'}
|
||||
on:click={() => switchTab('extension')}
|
||||
>
|
||||
Extension
|
||||
</button>
|
||||
<button
|
||||
class="tab-btn"
|
||||
class:active={activeTab === 'nsec'}
|
||||
on:click={() => switchTab('nsec')}
|
||||
>
|
||||
Nsec
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<div class="tab-content">
|
||||
{#if activeTab === 'extension'}
|
||||
<div class="extension-login">
|
||||
<p>Login using a NIP-07 compatible browser extension like nos2x or Alby.</p>
|
||||
<button
|
||||
class="login-extension-btn"
|
||||
on:click={loginWithExtension}
|
||||
disabled={isLoading}
|
||||
>
|
||||
{isLoading ? 'Connecting...' : 'Log in using extension'}
|
||||
</button>
|
||||
</div>
|
||||
{:else}
|
||||
<div class="nsec-login">
|
||||
<p>Enter your nsec (private key) to login. This will be stored securely in your browser.</p>
|
||||
<input
|
||||
type="password"
|
||||
placeholder="nsec1..."
|
||||
bind:value={nsecInput}
|
||||
disabled={isLoading}
|
||||
class="nsec-input"
|
||||
/>
|
||||
<button
|
||||
class="login-nsec-btn"
|
||||
on:click={loginWithNsec}
|
||||
disabled={isLoading || !nsecInput.trim()}
|
||||
>
|
||||
{isLoading ? 'Logging in...' : 'Log in with nsec'}
|
||||
</button>
|
||||
</div>
|
||||
{/if}
|
||||
|
||||
{#if errorMessage}
|
||||
<div class="message error-message">{errorMessage}</div>
|
||||
{/if}
|
||||
|
||||
{#if successMessage}
|
||||
<div class="message success-message">{successMessage}</div>
|
||||
{/if}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
|
||||
<style>
|
||||
.modal-overlay {
|
||||
position: fixed;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
background-color: rgba(0, 0, 0, 0.5);
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
.modal {
|
||||
background: var(--bg-color);
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 4px 20px rgba(0, 0, 0, 0.3);
|
||||
width: 90%;
|
||||
max-width: 500px;
|
||||
max-height: 90vh;
|
||||
overflow-y: auto;
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.modal-header {
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding: 20px;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.modal-header h2 {
|
||||
margin: 0;
|
||||
color: var(--text-color);
|
||||
font-size: 1.5rem;
|
||||
}
|
||||
|
||||
.close-btn {
|
||||
background: none;
|
||||
border: none;
|
||||
font-size: 1.5rem;
|
||||
cursor: pointer;
|
||||
color: var(--text-color);
|
||||
padding: 0;
|
||||
width: 30px;
|
||||
height: 30px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
border-radius: 50%;
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
|
||||
.close-btn:hover {
|
||||
background-color: var(--tab-hover-bg);
|
||||
}
|
||||
|
||||
.tab-container {
|
||||
padding: 20px;
|
||||
}
|
||||
|
||||
.tabs {
|
||||
display: flex;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
.tab-btn {
|
||||
flex: 1;
|
||||
padding: 12px 16px;
|
||||
background: none;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
color: var(--text-color);
|
||||
font-size: 1rem;
|
||||
transition: all 0.2s;
|
||||
border-bottom: 2px solid transparent;
|
||||
}
|
||||
|
||||
.tab-btn:hover {
|
||||
background-color: var(--tab-hover-bg);
|
||||
}
|
||||
|
||||
.tab-btn.active {
|
||||
border-bottom-color: var(--primary);
|
||||
color: var(--primary);
|
||||
}
|
||||
|
||||
.tab-content {
|
||||
min-height: 200px;
|
||||
}
|
||||
|
||||
.extension-login,
|
||||
.nsec-login {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 16px;
|
||||
}
|
||||
|
||||
.extension-login p,
|
||||
.nsec-login p {
|
||||
margin: 0;
|
||||
color: var(--text-color);
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.login-extension-btn,
|
||||
.login-nsec-btn {
|
||||
padding: 12px 24px;
|
||||
background: var(--primary);
|
||||
color: white;
|
||||
border: none;
|
||||
border-radius: 6px;
|
||||
cursor: pointer;
|
||||
font-size: 1rem;
|
||||
transition: background-color 0.2s;
|
||||
}
|
||||
|
||||
.login-extension-btn:hover:not(:disabled),
|
||||
.login-nsec-btn:hover:not(:disabled) {
|
||||
background: #00ACC1;
|
||||
}
|
||||
|
||||
.login-extension-btn:disabled,
|
||||
.login-nsec-btn:disabled {
|
||||
background: #ccc;
|
||||
cursor: not-allowed;
|
||||
}
|
||||
|
||||
.nsec-input {
|
||||
padding: 12px;
|
||||
border: 1px solid var(--input-border);
|
||||
border-radius: 6px;
|
||||
font-size: 1rem;
|
||||
background: var(--bg-color);
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
.nsec-input:focus {
|
||||
outline: none;
|
||||
border-color: var(--primary);
|
||||
}
|
||||
|
||||
.message {
|
||||
padding: 10px;
|
||||
border-radius: 4px;
|
||||
margin-top: 16px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.error-message {
|
||||
background: #ffebee;
|
||||
color: #c62828;
|
||||
border: 1px solid #ffcdd2;
|
||||
}
|
||||
|
||||
.success-message {
|
||||
background: #e8f5e8;
|
||||
color: #2e7d32;
|
||||
border: 1px solid #c8e6c9;
|
||||
}
|
||||
|
||||
.modal.dark-theme .error-message {
|
||||
background: #4a2c2a;
|
||||
color: #ffcdd2;
|
||||
border: 1px solid #6d4c41;
|
||||
}
|
||||
|
||||
.modal.dark-theme .success-message {
|
||||
background: #2e4a2e;
|
||||
color: #a5d6a7;
|
||||
border: 1px solid #4caf50;
|
||||
}
|
||||
</style>
|
||||
14
app/web/src/constants.js
Normal file
14
app/web/src/constants.js
Normal file
@@ -0,0 +1,14 @@
|
||||
// Default Nostr relays for searching
|
||||
export const DEFAULT_RELAYS = [
|
||||
// Use the local relay WebSocket endpoint
|
||||
`wss://${window.location.host}/ws`,
|
||||
// Fallback to external relays if local fails
|
||||
"wss://relay.damus.io",
|
||||
"wss://relay.nostr.band",
|
||||
"wss://nos.lol",
|
||||
"wss://relay.nostr.net",
|
||||
"wss://relay.minibits.cash",
|
||||
"wss://relay.coinos.io/",
|
||||
"wss://nwc.primal.net",
|
||||
"wss://relay.orly.dev",
|
||||
];
|
||||
@@ -1,11 +0,0 @@
|
||||
import React from 'react';
|
||||
import { createRoot } from 'react-dom/client';
|
||||
import App from './App';
|
||||
import './styles.css';
|
||||
|
||||
const root = createRoot(document.getElementById('root'));
|
||||
root.render(
|
||||
<React.StrictMode>
|
||||
<App />
|
||||
</React.StrictMode>
|
||||
);
|
||||
11
app/web/src/main.js
Normal file
11
app/web/src/main.js
Normal file
@@ -0,0 +1,11 @@
|
||||
import App from "./App.svelte";
|
||||
import "../public/global.css";
|
||||
|
||||
const app = new App({
|
||||
target: document.body,
|
||||
props: {
|
||||
name: "world",
|
||||
},
|
||||
});
|
||||
|
||||
export default app;
|
||||
599
app/web/src/nostr.js
Normal file
599
app/web/src/nostr.js
Normal file
@@ -0,0 +1,599 @@
|
||||
import { DEFAULT_RELAYS } from "./constants.js";
|
||||
|
||||
// Simple WebSocket relay manager
|
||||
class NostrClient {
|
||||
constructor() {
|
||||
this.relays = new Map();
|
||||
this.subscriptions = new Map();
|
||||
}
|
||||
|
||||
async connect() {
|
||||
console.log("Starting connection to", DEFAULT_RELAYS.length, "relays...");
|
||||
|
||||
const connectionPromises = DEFAULT_RELAYS.map((relayUrl) => {
|
||||
return new Promise((resolve) => {
|
||||
try {
|
||||
console.log(`Attempting to connect to ${relayUrl}`);
|
||||
const ws = new WebSocket(relayUrl);
|
||||
|
||||
ws.onopen = () => {
|
||||
console.log(`✓ Successfully connected to ${relayUrl}`);
|
||||
resolve(true);
|
||||
};
|
||||
|
||||
ws.onerror = (error) => {
|
||||
console.error(`✗ Error connecting to ${relayUrl}:`, error);
|
||||
resolve(false);
|
||||
};
|
||||
|
||||
ws.onclose = (event) => {
|
||||
console.warn(
|
||||
`Connection closed to ${relayUrl}:`,
|
||||
event.code,
|
||||
event.reason,
|
||||
);
|
||||
};
|
||||
|
||||
ws.onmessage = (event) => {
|
||||
console.log(`Message from ${relayUrl}:`, event.data);
|
||||
try {
|
||||
this.handleMessage(relayUrl, JSON.parse(event.data));
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Failed to parse message from ${relayUrl}:`,
|
||||
error,
|
||||
event.data,
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
this.relays.set(relayUrl, ws);
|
||||
|
||||
// Timeout after 5 seconds
|
||||
setTimeout(() => {
|
||||
if (ws.readyState !== WebSocket.OPEN) {
|
||||
console.warn(`Connection timeout for ${relayUrl}`);
|
||||
resolve(false);
|
||||
}
|
||||
}, 5000);
|
||||
} catch (error) {
|
||||
console.error(`Failed to create WebSocket for ${relayUrl}:`, error);
|
||||
resolve(false);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
const results = await Promise.all(connectionPromises);
|
||||
const successfulConnections = results.filter(Boolean).length;
|
||||
console.log(
|
||||
`Connected to ${successfulConnections}/${DEFAULT_RELAYS.length} relays`,
|
||||
);
|
||||
|
||||
// Wait a bit more for connections to stabilize
|
||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
}
|
||||
|
||||
handleMessage(relayUrl, message) {
|
||||
console.log(`Processing message from ${relayUrl}:`, message);
|
||||
const [type, subscriptionId, event, ...rest] = message;
|
||||
|
||||
console.log(`Message type: ${type}, subscriptionId: ${subscriptionId}`);
|
||||
|
||||
if (type === "EVENT") {
|
||||
console.log(`Received EVENT for subscription ${subscriptionId}:`, event);
|
||||
if (this.subscriptions.has(subscriptionId)) {
|
||||
console.log(
|
||||
`Found callback for subscription ${subscriptionId}, executing...`,
|
||||
);
|
||||
const callback = this.subscriptions.get(subscriptionId);
|
||||
callback(event);
|
||||
} else {
|
||||
console.warn(`No callback found for subscription ${subscriptionId}`);
|
||||
}
|
||||
} else if (type === "EOSE") {
|
||||
console.log(
|
||||
`End of stored events for subscription ${subscriptionId} from ${relayUrl}`,
|
||||
);
|
||||
// Dispatch EOSE event for fetchEvents function
|
||||
if (this.subscriptions.has(subscriptionId)) {
|
||||
window.dispatchEvent(new CustomEvent('nostr-eose', {
|
||||
detail: { subscriptionId, relayUrl }
|
||||
}));
|
||||
}
|
||||
} else if (type === "NOTICE") {
|
||||
console.warn(`Notice from ${relayUrl}:`, subscriptionId);
|
||||
} else {
|
||||
console.log(`Unknown message type ${type} from ${relayUrl}:`, message);
|
||||
}
|
||||
}
|
||||
|
||||
subscribe(filters, callback) {
|
||||
const subscriptionId = Math.random().toString(36).substring(7);
|
||||
console.log(
|
||||
`Creating subscription ${subscriptionId} with filters:`,
|
||||
filters,
|
||||
);
|
||||
|
||||
this.subscriptions.set(subscriptionId, callback);
|
||||
|
||||
const subscription = ["REQ", subscriptionId, filters];
|
||||
console.log(`Subscription message:`, JSON.stringify(subscription));
|
||||
|
||||
let sentCount = 0;
|
||||
for (const [relayUrl, ws] of this.relays) {
|
||||
console.log(
|
||||
`Checking relay ${relayUrl}, readyState: ${ws.readyState} (${ws.readyState === WebSocket.OPEN ? "OPEN" : "NOT OPEN"})`,
|
||||
);
|
||||
if (ws.readyState === WebSocket.OPEN) {
|
||||
try {
|
||||
ws.send(JSON.stringify(subscription));
|
||||
console.log(`✓ Sent subscription to ${relayUrl}`);
|
||||
sentCount++;
|
||||
} catch (error) {
|
||||
console.error(`✗ Failed to send subscription to ${relayUrl}:`, error);
|
||||
}
|
||||
} else {
|
||||
console.warn(`✗ Cannot send to ${relayUrl}, connection not ready`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(
|
||||
`Subscription ${subscriptionId} sent to ${sentCount}/${this.relays.size} relays`,
|
||||
);
|
||||
return subscriptionId;
|
||||
}
|
||||
|
||||
unsubscribe(subscriptionId) {
|
||||
this.subscriptions.delete(subscriptionId);
|
||||
|
||||
const closeMessage = ["CLOSE", subscriptionId];
|
||||
|
||||
for (const [relayUrl, ws] of this.relays) {
|
||||
if (ws.readyState === WebSocket.OPEN) {
|
||||
ws.send(JSON.stringify(closeMessage));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
disconnect() {
|
||||
for (const [relayUrl, ws] of this.relays) {
|
||||
ws.close();
|
||||
}
|
||||
this.relays.clear();
|
||||
this.subscriptions.clear();
|
||||
}
|
||||
|
||||
// Publish an event to all connected relays
|
||||
async publish(event) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const eventMessage = ["EVENT", event];
|
||||
console.log("Publishing event:", eventMessage);
|
||||
|
||||
let publishedCount = 0;
|
||||
let okCount = 0;
|
||||
let errorCount = 0;
|
||||
const totalRelays = this.relays.size;
|
||||
|
||||
if (totalRelays === 0) {
|
||||
reject(new Error("No relays connected"));
|
||||
return;
|
||||
}
|
||||
|
||||
const handleResponse = (relayUrl, success) => {
|
||||
if (success) {
|
||||
okCount++;
|
||||
} else {
|
||||
errorCount++;
|
||||
}
|
||||
|
||||
if (okCount + errorCount === totalRelays) {
|
||||
if (okCount > 0) {
|
||||
resolve({ success: true, okCount, errorCount });
|
||||
} else {
|
||||
reject(new Error(`All relays rejected the event. Errors: ${errorCount}`));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Set up a temporary listener for OK responses
|
||||
const originalHandleMessage = this.handleMessage.bind(this);
|
||||
this.handleMessage = (relayUrl, message) => {
|
||||
if (message[0] === "OK" && message[1] === event.id) {
|
||||
const success = message[2] === true;
|
||||
console.log(`Relay ${relayUrl} response:`, success ? "OK" : "REJECTED", message[3] || "");
|
||||
handleResponse(relayUrl, success);
|
||||
}
|
||||
// Call original handler for other messages
|
||||
originalHandleMessage(relayUrl, message);
|
||||
};
|
||||
|
||||
// Send to all connected relays
|
||||
for (const [relayUrl, ws] of this.relays) {
|
||||
if (ws.readyState === WebSocket.OPEN) {
|
||||
try {
|
||||
ws.send(JSON.stringify(eventMessage));
|
||||
publishedCount++;
|
||||
console.log(`Event sent to ${relayUrl}`);
|
||||
} catch (error) {
|
||||
console.error(`Failed to send event to ${relayUrl}:`, error);
|
||||
handleResponse(relayUrl, false);
|
||||
}
|
||||
} else {
|
||||
console.warn(`Relay ${relayUrl} is not open, skipping`);
|
||||
handleResponse(relayUrl, false);
|
||||
}
|
||||
}
|
||||
|
||||
// Restore original handler after timeout
|
||||
setTimeout(() => {
|
||||
this.handleMessage = originalHandleMessage;
|
||||
if (okCount + errorCount < totalRelays) {
|
||||
reject(new Error("Timeout waiting for relay responses"));
|
||||
}
|
||||
}, 10000); // 10 second timeout
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Create a global client instance
|
||||
export const nostrClient = new NostrClient();
|
||||
|
||||
// IndexedDB helpers for caching events (kind 0 profiles)
|
||||
const DB_NAME = "nostrCache";
|
||||
const DB_VERSION = 1;
|
||||
const STORE_EVENTS = "events";
|
||||
|
||||
function openDB() {
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
const req = indexedDB.open(DB_NAME, DB_VERSION);
|
||||
req.onupgradeneeded = () => {
|
||||
const db = req.result;
|
||||
if (!db.objectStoreNames.contains(STORE_EVENTS)) {
|
||||
const store = db.createObjectStore(STORE_EVENTS, { keyPath: "id" });
|
||||
store.createIndex("byKindAuthor", ["kind", "pubkey"], {
|
||||
unique: false,
|
||||
});
|
||||
store.createIndex(
|
||||
"byKindAuthorCreated",
|
||||
["kind", "pubkey", "created_at"],
|
||||
{ unique: false },
|
||||
);
|
||||
}
|
||||
};
|
||||
req.onsuccess = () => resolve(req.result);
|
||||
req.onerror = () => reject(req.error);
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function getLatestProfileEvent(pubkey) {
|
||||
try {
|
||||
const db = await openDB();
|
||||
return await new Promise((resolve, reject) => {
|
||||
const tx = db.transaction(STORE_EVENTS, "readonly");
|
||||
const idx = tx.objectStore(STORE_EVENTS).index("byKindAuthorCreated");
|
||||
const range = IDBKeyRange.bound(
|
||||
[0, pubkey, -Infinity],
|
||||
[0, pubkey, Infinity],
|
||||
);
|
||||
const req = idx.openCursor(range, "prev"); // newest first
|
||||
req.onsuccess = () => {
|
||||
const cursor = req.result;
|
||||
resolve(cursor ? cursor.value : null);
|
||||
};
|
||||
req.onerror = () => reject(req.error);
|
||||
});
|
||||
} catch (e) {
|
||||
console.warn("IDB getLatestProfileEvent failed", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async function putEvent(event) {
|
||||
try {
|
||||
const db = await openDB();
|
||||
await new Promise((resolve, reject) => {
|
||||
const tx = db.transaction(STORE_EVENTS, "readwrite");
|
||||
tx.oncomplete = () => resolve();
|
||||
tx.onerror = () => reject(tx.error);
|
||||
tx.objectStore(STORE_EVENTS).put(event);
|
||||
});
|
||||
} catch (e) {
|
||||
console.warn("IDB putEvent failed", e);
|
||||
}
|
||||
}
|
||||
|
||||
function parseProfileFromEvent(event) {
|
||||
try {
|
||||
const profile = JSON.parse(event.content || "{}");
|
||||
return {
|
||||
name: profile.name || profile.display_name || "",
|
||||
picture: profile.picture || "",
|
||||
banner: profile.banner || "",
|
||||
about: profile.about || "",
|
||||
nip05: profile.nip05 || "",
|
||||
lud16: profile.lud16 || profile.lud06 || "",
|
||||
};
|
||||
} catch (e) {
|
||||
return {
|
||||
name: "",
|
||||
picture: "",
|
||||
banner: "",
|
||||
about: "",
|
||||
nip05: "",
|
||||
lud16: "",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch user profile metadata (kind 0)
|
||||
export async function fetchUserProfile(pubkey) {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
console.log(`Starting profile fetch for pubkey: ${pubkey}`);
|
||||
|
||||
let resolved = false;
|
||||
let newestEvent = null;
|
||||
let debounceTimer = null;
|
||||
let overallTimer = null;
|
||||
let subscriptionId = null;
|
||||
|
||||
function cleanup() {
|
||||
if (subscriptionId) {
|
||||
try {
|
||||
nostrClient.unsubscribe(subscriptionId);
|
||||
} catch {}
|
||||
}
|
||||
if (debounceTimer) clearTimeout(debounceTimer);
|
||||
if (overallTimer) clearTimeout(overallTimer);
|
||||
}
|
||||
|
||||
// 1) Try cached profile first and resolve immediately if present
|
||||
try {
|
||||
const cachedEvent = await getLatestProfileEvent(pubkey);
|
||||
if (cachedEvent) {
|
||||
console.log("Using cached profile event");
|
||||
const profile = parseProfileFromEvent(cachedEvent);
|
||||
resolved = true; // resolve immediately with cache
|
||||
resolve(profile);
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn("Failed to load cached profile", e);
|
||||
}
|
||||
|
||||
// 2) Set overall timeout
|
||||
overallTimer = setTimeout(() => {
|
||||
if (!newestEvent) {
|
||||
console.log("Profile fetch timeout reached");
|
||||
if (!resolved) reject(new Error("Profile fetch timeout"));
|
||||
} else if (!resolved) {
|
||||
resolve(parseProfileFromEvent(newestEvent));
|
||||
}
|
||||
cleanup();
|
||||
}, 15000);
|
||||
|
||||
// 3) Wait a bit to ensure connections are ready and then subscribe without limit
|
||||
setTimeout(() => {
|
||||
console.log("Starting subscription after connection delay...");
|
||||
subscriptionId = nostrClient.subscribe(
|
||||
{
|
||||
kinds: [0],
|
||||
authors: [pubkey],
|
||||
},
|
||||
(event) => {
|
||||
// Collect all kind 0 events and pick the newest by created_at
|
||||
if (!event || event.kind !== 0) return;
|
||||
console.log("Profile event received:", event);
|
||||
|
||||
if (
|
||||
!newestEvent ||
|
||||
(event.created_at || 0) > (newestEvent.created_at || 0)
|
||||
) {
|
||||
newestEvent = event;
|
||||
}
|
||||
|
||||
// Debounce to wait for more relays; then finalize selection
|
||||
if (debounceTimer) clearTimeout(debounceTimer);
|
||||
debounceTimer = setTimeout(async () => {
|
||||
try {
|
||||
if (newestEvent) {
|
||||
await putEvent(newestEvent); // cache newest only
|
||||
const profile = parseProfileFromEvent(newestEvent);
|
||||
|
||||
// Notify listeners that an updated profile is available
|
||||
try {
|
||||
if (typeof window !== "undefined" && window.dispatchEvent) {
|
||||
window.dispatchEvent(
|
||||
new CustomEvent("profile-updated", {
|
||||
detail: { pubkey, profile, event: newestEvent },
|
||||
}),
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
console.warn("Failed to dispatch profile-updated event", e);
|
||||
}
|
||||
|
||||
if (!resolved) {
|
||||
resolve(profile);
|
||||
resolved = true;
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
}, 800);
|
||||
},
|
||||
);
|
||||
}, 2000);
|
||||
});
|
||||
}
|
||||
|
||||
// Fetch events using WebSocket REQ envelopes
|
||||
export async function fetchEvents(filters, options = {}) {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
console.log(`Starting event fetch with filters:`, filters);
|
||||
|
||||
let resolved = false;
|
||||
let events = [];
|
||||
let debounceTimer = null;
|
||||
let overallTimer = null;
|
||||
let subscriptionId = null;
|
||||
let eoseReceived = false;
|
||||
|
||||
const {
|
||||
timeout = 30000,
|
||||
debounceDelay = 1000,
|
||||
limit = null
|
||||
} = options;
|
||||
|
||||
function cleanup() {
|
||||
if (subscriptionId) {
|
||||
try {
|
||||
nostrClient.unsubscribe(subscriptionId);
|
||||
} catch {}
|
||||
}
|
||||
if (debounceTimer) clearTimeout(debounceTimer);
|
||||
if (overallTimer) clearTimeout(overallTimer);
|
||||
}
|
||||
|
||||
// Set overall timeout
|
||||
overallTimer = setTimeout(() => {
|
||||
if (!resolved) {
|
||||
console.log("Event fetch timeout reached");
|
||||
if (events.length > 0) {
|
||||
resolve(events);
|
||||
} else {
|
||||
reject(new Error("Event fetch timeout"));
|
||||
}
|
||||
resolved = true;
|
||||
}
|
||||
cleanup();
|
||||
}, timeout);
|
||||
|
||||
// Subscribe to events
|
||||
setTimeout(() => {
|
||||
console.log("Starting event subscription...");
|
||||
|
||||
// Add limit to filters if specified
|
||||
const requestFilters = { ...filters };
|
||||
if (limit) {
|
||||
requestFilters.limit = limit;
|
||||
}
|
||||
|
||||
console.log('Sending REQ with filters:', requestFilters);
|
||||
|
||||
subscriptionId = nostrClient.subscribe(
|
||||
requestFilters,
|
||||
(event) => {
|
||||
if (!event) return;
|
||||
console.log("Event received:", event);
|
||||
|
||||
// Check if we already have this event (deduplication)
|
||||
const existingEvent = events.find(e => e.id === event.id);
|
||||
if (!existingEvent) {
|
||||
events.push(event);
|
||||
}
|
||||
|
||||
// If we have a limit and reached it, resolve immediately
|
||||
if (limit && events.length >= limit) {
|
||||
if (!resolved) {
|
||||
resolve(events.slice(0, limit));
|
||||
resolved = true;
|
||||
}
|
||||
cleanup();
|
||||
return;
|
||||
}
|
||||
|
||||
// Debounce to wait for more events
|
||||
if (debounceTimer) clearTimeout(debounceTimer);
|
||||
debounceTimer = setTimeout(() => {
|
||||
if (eoseReceived && !resolved) {
|
||||
resolve(events);
|
||||
resolved = true;
|
||||
cleanup();
|
||||
}
|
||||
}, debounceDelay);
|
||||
},
|
||||
);
|
||||
|
||||
// Listen for EOSE events
|
||||
const handleEOSE = (event) => {
|
||||
if (event.detail.subscriptionId === subscriptionId) {
|
||||
console.log("EOSE received for subscription", subscriptionId);
|
||||
eoseReceived = true;
|
||||
|
||||
// If we haven't resolved yet and have events, resolve now
|
||||
if (!resolved && events.length > 0) {
|
||||
resolve(events);
|
||||
resolved = true;
|
||||
cleanup();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Add EOSE listener
|
||||
window.addEventListener('nostr-eose', handleEOSE);
|
||||
|
||||
// Cleanup EOSE listener
|
||||
const originalCleanup = cleanup;
|
||||
cleanup = () => {
|
||||
window.removeEventListener('nostr-eose', handleEOSE);
|
||||
originalCleanup();
|
||||
};
|
||||
}, 1000);
|
||||
});
|
||||
}
|
||||
|
||||
// Fetch all events with timestamp-based pagination
|
||||
export async function fetchAllEvents(options = {}) {
|
||||
const {
|
||||
limit = 100,
|
||||
since = null,
|
||||
until = null,
|
||||
authors = null
|
||||
} = options;
|
||||
|
||||
const filters = {};
|
||||
|
||||
if (since) filters.since = since;
|
||||
if (until) filters.until = until;
|
||||
if (authors) filters.authors = authors;
|
||||
|
||||
const events = await fetchEvents(filters, {
|
||||
limit: limit,
|
||||
timeout: 30000
|
||||
});
|
||||
|
||||
return events;
|
||||
}
|
||||
|
||||
// Fetch user's events with timestamp-based pagination
|
||||
export async function fetchUserEvents(pubkey, options = {}) {
|
||||
const {
|
||||
limit = 100,
|
||||
since = null,
|
||||
until = null
|
||||
} = options;
|
||||
|
||||
const filters = {
|
||||
authors: [pubkey]
|
||||
};
|
||||
|
||||
if (since) filters.since = since;
|
||||
if (until) filters.until = until;
|
||||
|
||||
const events = await fetchEvents(filters, {
|
||||
limit: limit,
|
||||
timeout: 30000
|
||||
});
|
||||
|
||||
return events;
|
||||
}
|
||||
|
||||
|
||||
// Initialize client connection
|
||||
export async function initializeNostrClient() {
|
||||
await nostrClient.connect();
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
body {
|
||||
font-family: Arial, sans-serif;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
.container {
|
||||
background: #f9f9f9;
|
||||
padding: 30px;
|
||||
border-radius: 8px;
|
||||
margin-top: 20px; /* Reduced space since header is now sticky */
|
||||
}
|
||||
|
||||
.form-group {
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
|
||||
label {
|
||||
display: block;
|
||||
margin-bottom: 5px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
input, textarea {
|
||||
width: 100%;
|
||||
padding: 10px;
|
||||
border: 1px solid #ddd;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
button {
|
||||
background: #007cba;
|
||||
color: white;
|
||||
padding: 12px 20px;
|
||||
border: none;
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
background: #005a87;
|
||||
}
|
||||
|
||||
.danger-button {
|
||||
background: #dc3545;
|
||||
}
|
||||
|
||||
.danger-button:hover {
|
||||
background: #c82333;
|
||||
}
|
||||
|
||||
.status {
|
||||
margin-top: 20px;
|
||||
margin-bottom: 20px;
|
||||
padding: 10px;
|
||||
border-radius: 4px;
|
||||
}
|
||||
|
||||
.success {
|
||||
background: #d4edda;
|
||||
color: #155724;
|
||||
}
|
||||
|
||||
.error {
|
||||
background: #f8d7da;
|
||||
color: #721c24;
|
||||
}
|
||||
|
||||
.info {
|
||||
background: #d1ecf1;
|
||||
color: #0c5460;
|
||||
}
|
||||
|
||||
.header-panel {
|
||||
position: sticky;
|
||||
top: 0;
|
||||
left: 0;
|
||||
width: 100%;
|
||||
background-color: #f8f9fa;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||
z-index: 1000;
|
||||
height: 60px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
background-size: cover;
|
||||
background-position: center;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.header-content {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
height: 100%;
|
||||
padding: 0 0 0 12px;
|
||||
width: 100%;
|
||||
margin: 0 auto;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
.header-left {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.header-center {
|
||||
display: flex;
|
||||
flex-grow: 1;
|
||||
align-items: center;
|
||||
justify-content: flex-start;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}
|
||||
|
||||
.header-right {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: flex-end;
|
||||
height: 100%;
|
||||
}
|
||||
|
||||
.header-logo {
|
||||
height: 100%;
|
||||
aspect-ratio: 1 / 1;
|
||||
width: auto;
|
||||
border-radius: 0;
|
||||
object-fit: cover;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.user-avatar {
|
||||
width: 40px;
|
||||
height: 40px;
|
||||
border-radius: 50%;
|
||||
object-fit: cover;
|
||||
border: 2px solid white;
|
||||
margin-right: 10px;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.2);
|
||||
}
|
||||
|
||||
.user-profile {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.user-info {
|
||||
font-weight: bold;
|
||||
font-size: 1.2em;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.user-name {
|
||||
font-weight: bold;
|
||||
font-size: 1em;
|
||||
display: block;
|
||||
}
|
||||
|
||||
.profile-banner {
|
||||
position: absolute;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 0;
|
||||
z-index: -1;
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.logout-button {
|
||||
background: transparent;
|
||||
color: #6c757d;
|
||||
border: none;
|
||||
font-size: 20px;
|
||||
cursor: pointer;
|
||||
padding: 0;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
width: 48px;
|
||||
height: 100%;
|
||||
margin-left: 10px;
|
||||
margin-right: 0;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.logout-button:hover {
|
||||
background: transparent;
|
||||
color: #343a40;
|
||||
}
|
||||
@@ -54,6 +54,7 @@ cd cmd/benchmark
|
||||
```
|
||||
|
||||
This will:
|
||||
|
||||
- Clone all external relay repositories
|
||||
- Create Docker configurations for each relay
|
||||
- Set up configuration files
|
||||
@@ -68,6 +69,7 @@ docker compose up --build
|
||||
```
|
||||
|
||||
The system will:
|
||||
|
||||
- Build and start all relay containers
|
||||
- Wait for all relays to become healthy
|
||||
- Run benchmarks against each relay sequentially
|
||||
@@ -89,15 +91,15 @@ ls reports/run_YYYYMMDD_HHMMSS/
|
||||
|
||||
### Docker Compose Services
|
||||
|
||||
| Service | Port | Description |
|
||||
|---------|------|-------------|
|
||||
| next-orly | 8001 | This repository's BadgerDB relay |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
| relayer-basic | 8004 | Basic relayer example |
|
||||
| strfry | 8005 | Strfry C++ LMDB relay |
|
||||
| nostr-rs-relay | 8006 | Rust SQLite relay |
|
||||
| benchmark-runner | - | Orchestrates tests and aggregates results |
|
||||
| Service | Port | Description |
|
||||
| ---------------- | ---- | ----------------------------------------- |
|
||||
| next-orly | 8001 | This repository's BadgerDB relay |
|
||||
| khatru-sqlite | 8002 | Khatru with SQLite backend |
|
||||
| khatru-badger | 8003 | Khatru with Badger backend |
|
||||
| relayer-basic | 8004 | Basic relayer example |
|
||||
| strfry | 8005 | Strfry C++ LMDB relay |
|
||||
| nostr-rs-relay | 8006 | Rust SQLite relay |
|
||||
| benchmark-runner | - | Orchestrates tests and aggregates results |
|
||||
|
||||
### File Structure
|
||||
|
||||
@@ -130,16 +132,16 @@ The benchmark can be configured via environment variables in `docker-compose.yml
|
||||
|
||||
```yaml
|
||||
environment:
|
||||
- BENCHMARK_EVENTS=10000 # Number of events per test
|
||||
- BENCHMARK_WORKERS=8 # Concurrent workers
|
||||
- BENCHMARK_DURATION=60s # Test duration
|
||||
- BENCHMARK_TARGETS=... # Relay endpoints to test
|
||||
- BENCHMARK_EVENTS=10000 # Number of events per test
|
||||
- BENCHMARK_WORKERS=8 # Concurrent workers
|
||||
- BENCHMARK_DURATION=60s # Test duration
|
||||
- BENCHMARK_TARGETS=... # Relay endpoints to test
|
||||
```
|
||||
|
||||
### Custom Configuration
|
||||
|
||||
1. **Modify test parameters**: Edit environment variables in `docker-compose.yml`
|
||||
2. **Add new relays**:
|
||||
2. **Add new relays**:
|
||||
- Add service to `docker-compose.yml`
|
||||
- Create appropriate Dockerfile
|
||||
- Update `BENCHMARK_TARGETS` environment variable
|
||||
@@ -174,16 +176,19 @@ go build -o benchmark main.go
|
||||
## Benchmark Results Interpretation
|
||||
|
||||
### Peak Throughput Test
|
||||
|
||||
- **High events/sec**: Good write performance
|
||||
- **Low latency**: Efficient event processing
|
||||
- **High success rate**: Stable under load
|
||||
|
||||
### Burst Pattern Test
|
||||
### Burst Pattern Test
|
||||
|
||||
- **Consistent performance**: Good handling of variable loads
|
||||
- **Low P95/P99 latency**: Predictable response times
|
||||
- **No errors during bursts**: Robust queuing/buffering
|
||||
|
||||
### Mixed Read/Write Test
|
||||
|
||||
- **Balanced throughput**: Good concurrent operation handling
|
||||
- **Low read latency**: Efficient query processing
|
||||
- **Stable write performance**: Queries don't significantly impact writes
|
||||
@@ -200,6 +205,7 @@ go build -o benchmark main.go
|
||||
### Modifying Relay Configurations
|
||||
|
||||
Each relay's Dockerfile and configuration can be customized:
|
||||
|
||||
- **Resource limits**: Adjust memory/CPU limits in docker-compose.yml
|
||||
- **Database settings**: Modify configuration files in `configs/`
|
||||
- **Network settings**: Update port mappings and health checks
|
||||
@@ -257,4 +263,4 @@ To add support for new relay implementations:
|
||||
|
||||
## License
|
||||
|
||||
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms.
|
||||
This benchmark suite is part of the next.orly.dev project and follows the same licensing terms.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: '3.8'
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# Next.orly.dev relay (this repository)
|
||||
@@ -19,7 +19,11 @@ services:
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null"]
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"code=$(curl -s -o /dev/null -w '%{http_code}' http://localhost:8080 || echo 000); echo $$code | grep -E '^(101|200|400|404|426)$' >/dev/null",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -41,7 +45,11 @@ services:
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -63,7 +71,11 @@ services:
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"wget --quiet --server-response --tries=1 http://localhost:3334 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -87,7 +99,11 @@ services:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null"]
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"wget --quiet --server-response --tries=1 http://localhost:7447 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404)' >/dev/null",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -108,7 +124,11 @@ services:
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null"]
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"wget --quiet --server-response --tries=1 http://127.0.0.1:8080 2>&1 | grep -E 'HTTP/[0-9.]+ (101|200|400|404|426)' >/dev/null",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -130,7 +150,15 @@ services:
|
||||
networks:
|
||||
- benchmark-net
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080"]
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--quiet",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:8080",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -197,4 +225,4 @@ networks:
|
||||
|
||||
volumes:
|
||||
benchmark-data:
|
||||
driver: local
|
||||
driver: local
|
||||
|
||||
1
cmd/benchmark/external/khatru
vendored
1
cmd/benchmark/external/khatru
vendored
Submodule cmd/benchmark/external/khatru deleted from 668c41b988
@@ -325,10 +325,10 @@ func (b *Benchmark) RunSuite() {
|
||||
fmt.Printf("RunConcurrentQueryStoreTest..\n")
|
||||
b.RunConcurrentQueryStoreTest()
|
||||
if round < 2 {
|
||||
fmt.Println("\nPausing 10s before next round...")
|
||||
fmt.Printf("\nPausing 10s before next round...\n")
|
||||
time.Sleep(10 * time.Second)
|
||||
}
|
||||
fmt.Println("\n=== Test round completed ===\n")
|
||||
fmt.Printf("\n=== Test round completed ===\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Fixes: failed to solve: error from sender: open cmd/benchmark/data/postgres: permission denied
|
||||
|
||||
# Benchmark data and reports (mounted at runtime via volumes)
|
||||
cmd/benchmark/data/
|
||||
../../cmd/benchmark/data/
|
||||
cmd/benchmark/reports/
|
||||
|
||||
# VCS and OS cruft
|
||||
@@ -13,6 +13,8 @@ cmd/benchmark/reports/
|
||||
|
||||
# Go build cache and binaries
|
||||
**/bin/
|
||||
**/dist/
|
||||
**/build/
|
||||
**/*.out
|
||||
|
||||
# Allow web dist directory (needed for embedding)
|
||||
!app/web/dist/
|
||||
511
contrib/stella/APACHE-PROXY-GUIDE.md
Normal file
511
contrib/stella/APACHE-PROXY-GUIDE.md
Normal file
@@ -0,0 +1,511 @@
|
||||
# Apache Reverse Proxy Guide for Docker Apps
|
||||
|
||||
**Complete guide for WebSocket-enabled applications - covers both Plesk and Standard Apache**
|
||||
**Updated with real-world troubleshooting solutions and latest Orly relay improvements**
|
||||
|
||||
## 🎯 **What This Solves**
|
||||
|
||||
- WebSocket connection failures (`NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`)
|
||||
- Nostr relay connectivity issues (`HTTP 426` instead of WebSocket upgrade)
|
||||
- Docker container proxy configuration
|
||||
- SSL certificate integration
|
||||
- Plesk configuration conflicts and virtual host precedence issues
|
||||
- **NEW**: WebSocket scheme validation errors (`expected 'ws' got 'wss'`)
|
||||
- **NEW**: Proxy-friendly relay configuration with enhanced CORS headers
|
||||
- **NEW**: Improved error handling for malformed client data
|
||||
|
||||
## 🐳 **Step 1: Deploy Your Docker Application**
|
||||
|
||||
### **For Stella's Orly Relay (Latest Version with Proxy Improvements):**
|
||||
|
||||
```bash
|
||||
# Pull and run the relay with enhanced proxy support
|
||||
docker run -d \
|
||||
--name orly-relay \
|
||||
--restart unless-stopped \
|
||||
-p 127.0.0.1:7777:7777 \
|
||||
-v /data/orly-relay:/data \
|
||||
-e ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx \
|
||||
-e ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z,npub1m4ny6hjqzepn4rxknuq94c2gpqzr29ufkkw7ttcxyak7v43n6vvsajc2jl \
|
||||
-e ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.nostr.band,wss://relay.damus.io \
|
||||
-e ORLY_RELAY_URL=wss://orly-relay.imwald.eu \
|
||||
-e ORLY_ACL_MODE=follows \
|
||||
-e ORLY_SPIDER_MODE=follows \
|
||||
-e ORLY_SPIDER_FREQUENCY=1h \
|
||||
-e ORLY_SUBSCRIPTION_ENABLED=false \
|
||||
silberengel/next-orly:latest
|
||||
|
||||
# Test the relay
|
||||
curl -I http://127.0.0.1:7777
|
||||
# Should return: HTTP/1.1 200 OK with enhanced CORS headers
|
||||
```
|
||||
|
||||
### **For Web Apps (like Jumble):**
|
||||
|
||||
```bash
|
||||
# Run with fixed port for easier proxy setup
|
||||
docker run -d \
|
||||
--name jumble-app \
|
||||
--restart unless-stopped \
|
||||
-p 127.0.0.1:3000:80 \
|
||||
-e NODE_ENV=production \
|
||||
silberengel/imwald-jumble:latest
|
||||
|
||||
# Test the app
|
||||
curl -I http://127.0.0.1:3000
|
||||
```
|
||||
|
||||
## 🔧 **Step 2A: PLESK Configuration**
|
||||
|
||||
### **For Your Friend's Standard Apache Setup:**
|
||||
|
||||
**Tell your friend to create `/etc/apache2/sites-available/domain.conf`:**
|
||||
|
||||
```apache
|
||||
<VirtualHost *:443>
|
||||
ServerName your-domain.com
|
||||
|
||||
# SSL Configuration (Let's Encrypt)
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
# Enable required modules first:
|
||||
# sudo a2enmod proxy proxy_http proxy_wstunnel rewrite headers ssl
|
||||
|
||||
# Proxy settings
|
||||
ProxyPreserveHost On
|
||||
ProxyRequests Off
|
||||
|
||||
# WebSocket upgrade handling - CRITICAL for apps with WebSockets
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule ^/?(.*) "ws://127.0.0.1:PORT/$1" [P,L]
|
||||
|
||||
# Regular HTTP proxy
|
||||
ProxyPass / http://127.0.0.1:PORT/
|
||||
ProxyPassReverse / http://127.0.0.1:PORT/
|
||||
|
||||
# Headers for modern web apps
|
||||
Header always set X-Forwarded-Proto "https"
|
||||
Header always set X-Forwarded-Port "443"
|
||||
Header always set X-Forwarded-For %{REMOTE_ADDR}s
|
||||
|
||||
# Security headers
|
||||
Header always set Strict-Transport-Security "max-age=63072000; includeSubDomains"
|
||||
Header always set X-Content-Type-Options nosniff
|
||||
Header always set X-Frame-Options SAMEORIGIN
|
||||
</VirtualHost>
|
||||
|
||||
# Redirect HTTP to HTTPS
|
||||
<VirtualHost *:80>
|
||||
ServerName your-domain.com
|
||||
Redirect permanent / https://your-domain.com/
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
**Then enable it:**
|
||||
|
||||
```bash
|
||||
sudo a2ensite domain.conf
|
||||
sudo systemctl reload apache2
|
||||
```
|
||||
|
||||
### **For Plesk Users (You):**
|
||||
|
||||
⚠️ **Important**: Plesk often doesn't apply Apache directives correctly through the interface. If the interface method fails, use the "Direct Apache Override" method below.
|
||||
|
||||
#### **Method 1: Plesk Interface (Try First)**
|
||||
|
||||
1. **Go to Plesk** → Websites & Domains → **your-domain.com**
|
||||
2. **Click "Apache & nginx Settings"**
|
||||
3. **DISABLE nginx** (uncheck "Proxy mode" and "Smart static files processing")
|
||||
4. **Clear HTTP section** (leave empty)
|
||||
5. **In HTTPS section, add:**
|
||||
|
||||
**For Nostr Relay (port 7777):**
|
||||
|
||||
```apache
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
```
|
||||
|
||||
6. **Click "Apply"** and wait 60 seconds
|
||||
|
||||
#### **Method 2: Direct Apache Override (If Plesk Interface Fails)**
|
||||
|
||||
If Plesk doesn't apply your configuration (common issue), bypass it entirely:
|
||||
|
||||
```bash
|
||||
# Create direct Apache override
|
||||
sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
|
||||
<VirtualHost YOUR_SERVER_IP:443>
|
||||
ServerName your-domain.com
|
||||
ServerAlias www.your-domain.com
|
||||
ServerAlias ipv4.your-domain.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# For Nostr relay - proxy everything to WebSocket
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
# CORS headers
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
Header always set Access-Control-Allow-Headers "Origin, X-Requested-With, Content-Type, Accept, Authorization"
|
||||
|
||||
# Logging
|
||||
ErrorLog /var/log/apache2/relay-error.log
|
||||
CustomLog /var/log/apache2/relay-access.log combined
|
||||
</VirtualHost>
|
||||
EOF
|
||||
|
||||
# Enable the override
|
||||
sudo a2enconf relay-override
|
||||
sudo mkdir -p /var/www/relay
|
||||
sudo systemctl restart apache2
|
||||
|
||||
# Remove Plesk config if it conflicts
|
||||
sudo rm /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
```
|
||||
|
||||
#### **Method 3: Debugging Plesk Issues**
|
||||
|
||||
If configurations aren't being applied:
|
||||
|
||||
```bash
|
||||
# Check if Plesk applied your config
|
||||
grep -E "(ProxyPass|proxy)" /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
|
||||
# Check virtual host precedence
|
||||
apache2ctl -S | grep your-domain.com
|
||||
|
||||
# Check Apache modules
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
```
|
||||
|
||||
#### **For Web Apps (port 3000 or 32768):**
|
||||
|
||||
```apache
|
||||
ProxyPreserveHost On
|
||||
ProxyRequests Off
|
||||
|
||||
# WebSocket upgrade handling
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule ^/?(.*) "ws://127.0.0.1:32768/$1" [P,L]
|
||||
|
||||
# Regular HTTP proxy
|
||||
ProxyPass / http://127.0.0.1:32768/
|
||||
ProxyPassReverse / http://127.0.0.1:32768/
|
||||
|
||||
# Headers
|
||||
ProxyAddHeaders On
|
||||
Header always set X-Forwarded-Proto "https"
|
||||
Header always set X-Forwarded-Port "443"
|
||||
```
|
||||
|
||||
### **Method B: Direct Apache Override (RECOMMENDED for Plesk)**
|
||||
|
||||
⚠️ **Use this if Plesk interface doesn't work** (common issue):
|
||||
|
||||
```bash
|
||||
# Create direct Apache override with your server's IP
|
||||
sudo tee /etc/apache2/conf-available/relay-override.conf << 'EOF'
|
||||
<VirtualHost YOUR_SERVER_IP:443>
|
||||
ServerName your-domain.com
|
||||
ServerAlias www.your-domain.com
|
||||
ServerAlias ipv4.your-domain.com
|
||||
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/your-domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/your-domain.com/privkey.pem
|
||||
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# For Nostr relay - proxy everything to WebSocket
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
# CORS headers
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
|
||||
# Logging
|
||||
ErrorLog /var/log/apache2/relay-error.log
|
||||
CustomLog /var/log/apache2/relay-access.log combined
|
||||
</VirtualHost>
|
||||
EOF
|
||||
|
||||
# Enable override and create directory
|
||||
sudo a2enconf relay-override
|
||||
sudo mkdir -p /var/www/relay
|
||||
sudo systemctl restart apache2
|
||||
|
||||
# Remove conflicting Plesk config if needed
|
||||
sudo rm /etc/apache2/plesk.conf.d/vhosts/your-domain.com.conf
|
||||
```
|
||||
|
||||
## ⚡ **Step 3: Enable Required Modules**
|
||||
|
||||
In Plesk, you might need to enable modules. SSH to your server:
|
||||
|
||||
```bash
|
||||
# Enable Apache modules
|
||||
sudo a2enmod proxy
|
||||
sudo a2enmod proxy_http
|
||||
sudo a2enmod proxy_wstunnel
|
||||
sudo a2enmod rewrite
|
||||
sudo a2enmod headers
|
||||
sudo systemctl restart apache2
|
||||
```
|
||||
|
||||
## 🆕 **Step 4: Latest Orly Relay Improvements**
|
||||
|
||||
### **Enhanced Proxy Support**
|
||||
|
||||
The latest Orly relay includes several proxy improvements:
|
||||
|
||||
1. **Flexible WebSocket Scheme Handling**: Accepts both `ws://` and `wss://` schemes for authentication
|
||||
2. **Enhanced CORS Headers**: Better compatibility with web applications
|
||||
3. **Improved Error Handling**: More robust handling of malformed client data
|
||||
4. **Proxy-Aware Logging**: Better debugging information for proxy setups
|
||||
|
||||
### **Key Environment Variables**
|
||||
|
||||
```bash
|
||||
# Essential for proxy setups
|
||||
ORLY_RELAY_URL=wss://your-domain.com # Must match your public URL
|
||||
ORLY_ACL_MODE=follows # Enable follows-based access control
|
||||
ORLY_SPIDER_MODE=follows # Enable content syncing from other relays
|
||||
ORLY_SUBSCRIPTION_ENABLED=false # Disable payment requirements
|
||||
```
|
||||
|
||||
### **Testing the Enhanced Relay**
|
||||
|
||||
```bash
|
||||
# Test local connectivity
|
||||
curl -I http://127.0.0.1:7777
|
||||
|
||||
# Expected response includes enhanced CORS headers:
|
||||
# Access-Control-Allow-Credentials: true
|
||||
# Access-Control-Max-Age: 86400
|
||||
# Vary: Origin, Access-Control-Request-Method, Access-Control-Request-Headers
|
||||
```
|
||||
|
||||
## ⚡ **Step 4: Alternative - Nginx in Plesk**
|
||||
|
||||
If Apache keeps giving issues, switch to Nginx in Plesk:
|
||||
|
||||
1. Go to Plesk → Websites & Domains → orly-relay.imwald.eu
|
||||
2. Click "Apache & nginx Settings"
|
||||
3. Enable "nginx" and set it to serve static files
|
||||
4. In "Additional nginx directives" add:
|
||||
|
||||
```nginx
|
||||
location / {
|
||||
proxy_pass http://127.0.0.1:7777;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
```
|
||||
|
||||
## 🧪 **Testing**
|
||||
|
||||
After making changes:
|
||||
|
||||
1. **Apply settings** in Plesk
|
||||
2. **Wait 30 seconds** for changes to take effect
|
||||
3. **Test WebSocket**:
|
||||
```bash
|
||||
# From your server
|
||||
echo '["REQ","test",{}]' | websocat wss://orly-relay.imwald.eu/
|
||||
```
|
||||
|
||||
## 🎯 **Expected Result**
|
||||
|
||||
- ✅ No more "websocket error" in browser console
|
||||
- ✅ `wss://orly-relay.imwald.eu/` connects successfully
|
||||
- ✅ Jumble app can publish notes
|
||||
|
||||
## 🚨 **Real-World Troubleshooting Guide**
|
||||
|
||||
_Based on actual deployment experience with Plesk and WebSocket issues_
|
||||
|
||||
### **Critical Issues & Solutions:**
|
||||
|
||||
#### **🔴 HTTP 503 Service Unavailable**
|
||||
|
||||
- **Cause**: Docker container not running
|
||||
- **Check**: `docker ps | grep relay`
|
||||
- **Fix**: `docker start container-name`
|
||||
|
||||
#### **🔴 HTTP 426 Instead of WebSocket Upgrade**
|
||||
|
||||
- **Cause**: Apache using `http://` proxy instead of `ws://`
|
||||
- **Fix**: Use `ProxyPass / ws://127.0.0.1:7777/` (not `http://`)
|
||||
|
||||
#### **🔴 Plesk Configuration Not Applied**
|
||||
|
||||
- **Symptom**: Config not in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
- **Solution**: Use Direct Apache Override method (bypass Plesk interface)
|
||||
|
||||
#### **🔴 Virtual Host Conflicts**
|
||||
|
||||
- **Check**: `apache2ctl -S | grep domain.com`
|
||||
- **Fix**: Remove Plesk config: `sudo rm /etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
|
||||
#### **🔴 Nginx Intercepting (Plesk)**
|
||||
|
||||
- **Symptom**: Response shows `Server: nginx`
|
||||
- **Fix**: Disable nginx in Plesk settings
|
||||
|
||||
### **Debug Commands:**
|
||||
|
||||
```bash
|
||||
# Essential debugging
|
||||
docker ps | grep relay # Container running?
|
||||
curl -I http://127.0.0.1:7777 # Local relay (should return 200 with CORS headers)
|
||||
apache2ctl -S | grep domain.com # Virtual host precedence
|
||||
grep ProxyPass /etc/apache2/plesk.conf.d/vhosts/domain.conf # Config applied?
|
||||
|
||||
# WebSocket testing
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ # Root path
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ws/ # /ws/ path
|
||||
|
||||
# Check relay logs for proxy information
|
||||
docker logs relay-name | grep -i "proxy info"
|
||||
docker logs relay-name | grep -i "websocket connection"
|
||||
```
|
||||
|
||||
## 🚨 **Latest Troubleshooting Solutions**
|
||||
|
||||
### **WebSocket Scheme Validation Errors**
|
||||
|
||||
**Problem**: `"HTTP Scheme incorrect: expected 'ws' got 'wss'"`
|
||||
|
||||
**Solution**: Use the latest Orly relay image with enhanced proxy support:
|
||||
|
||||
```bash
|
||||
# Pull the latest image with proxy improvements
|
||||
docker pull silberengel/next-orly:latest
|
||||
|
||||
# Restart with the latest image
|
||||
docker stop orly-relay && docker rm orly-relay
|
||||
# Then run with the configuration above
|
||||
```
|
||||
|
||||
### **Malformed Client Data Errors**
|
||||
|
||||
**Problem**: `"invalid hex array size, got 2 expect 64"`
|
||||
|
||||
**Solution**: These are client-side issues, not server problems. The latest relay handles them gracefully:
|
||||
|
||||
- The relay now sends helpful error messages to clients
|
||||
- Malformed requests are logged but don't crash the relay
|
||||
- Normal operations continue despite client errors
|
||||
|
||||
### **Follows ACL Not Working**
|
||||
|
||||
**Problem**: Only owners can write, admins can't write
|
||||
|
||||
**Solution**: Ensure proper configuration:
|
||||
|
||||
```bash
|
||||
# Check ACL configuration
|
||||
docker exec orly-relay env | grep ACL
|
||||
|
||||
# Should show: ORLY_ACL_MODE=follows
|
||||
# If not, restart with explicit configuration
|
||||
```
|
||||
|
||||
### **Spider Not Syncing Content**
|
||||
|
||||
**Problem**: Spider enabled but not pulling events
|
||||
|
||||
**Solution**: Check for relay lists and follow events:
|
||||
|
||||
```bash
|
||||
# Check spider status
|
||||
docker logs orly-relay | grep -i spider
|
||||
|
||||
# Look for relay discovery
|
||||
docker logs orly-relay | grep -i "relay URLs"
|
||||
|
||||
# Check for follow events
|
||||
docker logs orly-relay | grep -i "kind.*3"
|
||||
```
|
||||
|
||||
### **Working Solution (Proven):**
|
||||
|
||||
```apache
|
||||
<VirtualHost SERVER_IP:443>
|
||||
ServerName domain.com
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
|
||||
DocumentRoot /var/www/relay
|
||||
|
||||
# Direct WebSocket proxy - this is the key!
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**Key Lessons**:
|
||||
|
||||
1. Plesk interface often fails to apply Apache directives
|
||||
2. Use `ws://` proxy for Nostr relays, not `http://`
|
||||
3. Direct Apache config files are more reliable than Plesk interface
|
||||
4. Always check virtual host precedence with `apache2ctl -S`
|
||||
5. **NEW**: Use the latest Orly relay image for better proxy compatibility
|
||||
6. **NEW**: Enhanced CORS headers improve web app compatibility
|
||||
7. **NEW**: Flexible WebSocket scheme handling eliminates authentication errors
|
||||
8. **NEW**: Improved error handling makes the relay more robust
|
||||
|
||||
## 🎉 **Summary of Latest Improvements**
|
||||
|
||||
### **Enhanced Proxy Support**
|
||||
|
||||
- ✅ Flexible WebSocket scheme validation (accepts both `ws://` and `wss://`)
|
||||
- ✅ Enhanced CORS headers for better web app compatibility
|
||||
- ✅ Improved error handling for malformed client data
|
||||
- ✅ Proxy-aware logging for better debugging
|
||||
|
||||
### **Spider and ACL Features**
|
||||
|
||||
- ✅ Follows-based access control (`ORLY_ACL_MODE=follows`)
|
||||
- ✅ Content syncing from other relays (`ORLY_SPIDER_MODE=follows`)
|
||||
- ✅ No payment requirements (`ORLY_SUBSCRIPTION_ENABLED=false`)
|
||||
|
||||
### **Production Ready**
|
||||
|
||||
- ✅ Robust error handling
|
||||
- ✅ Enhanced logging and debugging
|
||||
- ✅ Better client compatibility
|
||||
- ✅ Improved proxy support
|
||||
|
||||
**The latest Orly relay is now fully optimized for proxy environments and provides a much better user experience!**
|
||||
195
contrib/stella/DOCKER.md
Normal file
195
contrib/stella/DOCKER.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# Docker Deployment Guide
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Basic Relay Setup
|
||||
|
||||
```bash
|
||||
# Build and start the relay
|
||||
docker-compose up -d
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f orly-relay
|
||||
|
||||
# Stop the relay
|
||||
docker-compose down
|
||||
```
|
||||
|
||||
### 2. With Nginx Proxy (for SSL/domain setup)
|
||||
|
||||
```bash
|
||||
# Start relay with nginx proxy
|
||||
docker-compose --profile proxy up -d
|
||||
|
||||
# Configure SSL certificates in nginx/ssl/
|
||||
# Then update nginx/nginx.conf to enable HTTPS
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Copy `env.example` to `.env` and customize:
|
||||
|
||||
```bash
|
||||
cp env.example .env
|
||||
# Edit .env with your settings
|
||||
```
|
||||
|
||||
Key settings:
|
||||
|
||||
- `ORLY_OWNERS`: Owner npubs (comma-separated, full control)
|
||||
- `ORLY_ADMINS`: Admin npubs (comma-separated, deletion permissions)
|
||||
- `ORLY_PORT`: Port to listen on (default: 7777)
|
||||
- `ORLY_MAX_CONNECTIONS`: Max concurrent connections
|
||||
- `ORLY_CONCURRENT_WORKERS`: CPU cores for concurrent processing (0 = auto)
|
||||
|
||||
### Data Persistence
|
||||
|
||||
The relay data is stored in `./data` directory which is mounted as a volume.
|
||||
|
||||
### Performance Tuning
|
||||
|
||||
Based on the v0.4.8 optimizations:
|
||||
|
||||
- Concurrent event publishing using all CPU cores
|
||||
- Optimized BadgerDB access patterns
|
||||
- Configurable batch sizes and cache settings
|
||||
|
||||
## Development
|
||||
|
||||
### Local Build
|
||||
|
||||
```bash
|
||||
# Pull the latest image (recommended)
|
||||
docker pull silberengel/orly-relay:latest
|
||||
|
||||
# Or build locally if needed
|
||||
docker build -t silberengel/orly-relay:latest .
|
||||
|
||||
# Run with custom settings
|
||||
docker run -p 7777:7777 -v $(pwd)/data:/data silberengel/orly-relay:latest
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Test WebSocket connection
|
||||
websocat ws://localhost:7777
|
||||
|
||||
# Run stress tests (if available in cmd/stresstest)
|
||||
go run ./cmd/stresstest -relay ws://localhost:7777
|
||||
```
|
||||
|
||||
## Production Deployment
|
||||
|
||||
### SSL Setup
|
||||
|
||||
1. Get SSL certificates (Let's Encrypt recommended)
|
||||
2. Place certificates in `nginx/ssl/`
|
||||
3. Update `nginx/nginx.conf` to enable HTTPS
|
||||
4. Start with proxy profile: `docker-compose --profile proxy up -d`
|
||||
|
||||
### Monitoring
|
||||
|
||||
- Health checks are configured for both services
|
||||
- Logs are rotated (max 10MB, 3 files)
|
||||
- Resource limits are set to prevent runaway processes
|
||||
|
||||
### Security
|
||||
|
||||
- Runs as non-root user (uid 1000)
|
||||
- Rate limiting configured in nginx
|
||||
- Configurable authentication and event size limits
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues (Real-World Experience)
|
||||
|
||||
#### **Container Issues:**
|
||||
|
||||
1. **Port already in use**: Change `ORLY_PORT` in docker-compose.yml
|
||||
2. **Permission denied**: Ensure `./data` directory is writable
|
||||
3. **Container won't start**: Check logs with `docker logs container-name`
|
||||
|
||||
#### **WebSocket Issues:**
|
||||
|
||||
4. **HTTP 426 instead of WebSocket upgrade**:
|
||||
- Use `ws://127.0.0.1:7777` in proxy config, not `http://`
|
||||
- Ensure `proxy_wstunnel` module is enabled
|
||||
5. **Connection refused in browser but works with websocat**:
|
||||
- Clear browser cache and service workers
|
||||
- Try incognito mode
|
||||
- Add CORS headers to Apache/nginx config
|
||||
|
||||
#### **Plesk-Specific Issues:**
|
||||
|
||||
6. **Plesk not applying Apache directives**:
|
||||
- Check if config appears in `/etc/apache2/plesk.conf.d/vhosts/domain.conf`
|
||||
- Use direct Apache override if Plesk interface fails
|
||||
7. **Virtual host conflicts**:
|
||||
- Check precedence with `apache2ctl -S`
|
||||
- Remove conflicting Plesk configs if needed
|
||||
|
||||
#### **SSL Certificate Issues:**
|
||||
|
||||
8. **Self-signed certificate after Let's Encrypt**:
|
||||
- Plesk might not be using the correct certificate
|
||||
- Import Let's Encrypt certs into Plesk or use direct Apache config
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Container debugging
|
||||
docker ps | grep relay
|
||||
docker logs orly-relay
|
||||
curl -I http://127.0.0.1:7777 # Should return HTTP 426
|
||||
|
||||
# WebSocket testing
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/
|
||||
echo '["REQ","test",{}]' | websocat wss://domain.com/ws/
|
||||
|
||||
# Apache debugging (for reverse proxy issues)
|
||||
apache2ctl -S | grep domain.com
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
grep ProxyPass /etc/apache2/plesk.conf.d/vhosts/domain.conf
|
||||
```
|
||||
|
||||
### Logs
|
||||
|
||||
```bash
|
||||
# View relay logs
|
||||
docker-compose logs -f orly-relay
|
||||
|
||||
# View nginx logs (if using proxy)
|
||||
docker-compose logs -f nginx
|
||||
|
||||
# Apache logs (for reverse proxy debugging)
|
||||
sudo tail -f /var/log/apache2/error.log
|
||||
sudo tail -f /var/log/apache2/domain-error.log
|
||||
```
|
||||
|
||||
### Working Reverse Proxy Config
|
||||
|
||||
**For Apache (direct config file):**
|
||||
|
||||
```apache
|
||||
<VirtualHost SERVER_IP:443>
|
||||
ServerName domain.com
|
||||
SSLEngine on
|
||||
SSLCertificateFile /etc/letsencrypt/live/domain.com/fullchain.pem
|
||||
SSLCertificateKeyFile /etc/letsencrypt/live/domain.com/privkey.pem
|
||||
|
||||
# Direct WebSocket proxy for Nostr relay
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
ProxyPass / ws://127.0.0.1:7777/
|
||||
ProxyPassReverse / ws://127.0.0.1:7777/
|
||||
|
||||
Header always set Access-Control-Allow-Origin "*"
|
||||
</VirtualHost>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
_Crafted for Stella's digital forest_ 🌲
|
||||
78
contrib/stella/Dockerfile
Normal file
78
contrib/stella/Dockerfile
Normal file
@@ -0,0 +1,78 @@
|
||||
# Dockerfile for Stella's Nostr Relay (next.orly.dev)
|
||||
# Owner: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
|
||||
FROM golang:alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
git \
|
||||
build-base \
|
||||
autoconf \
|
||||
automake \
|
||||
libtool \
|
||||
pkgconfig
|
||||
|
||||
# Install secp256k1 library from Alpine packages
|
||||
RUN apk add --no-cache libsecp256k1-dev
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /build
|
||||
|
||||
# Copy go modules first (for better caching)
|
||||
COPY ../../go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY ../.. .
|
||||
|
||||
# Build the relay with optimizations from v0.4.8
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build -ldflags "-w -s" -o relay .
|
||||
|
||||
# Create non-root user for security
|
||||
RUN adduser -D -u 1000 stella && \
|
||||
chown -R 1000:1000 /build
|
||||
|
||||
# Final stage - minimal runtime image
|
||||
FROM alpine:latest
|
||||
|
||||
# Install only runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
libsecp256k1 \
|
||||
libsecp256k1-dev
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /build/relay /app/relay
|
||||
|
||||
# Create runtime user and directories
|
||||
RUN adduser -D -u 1000 stella && \
|
||||
mkdir -p /data /profiles /app && \
|
||||
chown -R 1000:1000 /data /profiles /app
|
||||
|
||||
# Expose the relay port
|
||||
EXPOSE 7777
|
||||
|
||||
# Set environment variables for Stella's relay
|
||||
ENV ORLY_DATA_DIR=/data
|
||||
ENV ORLY_LISTEN=0.0.0.0
|
||||
ENV ORLY_PORT=7777
|
||||
ENV ORLY_LOG_LEVEL=info
|
||||
ENV ORLY_MAX_CONNECTIONS=1000
|
||||
ENV ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
ENV ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1m4ny6hjqzepn4rxknuq94c2gpqzr29ufkkw7ttcxyak7v43n6vvsajc2jl,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
|
||||
|
||||
# Health check to ensure relay is responding
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD sh -c "code=\$(curl -s -o /dev/null -w '%{http_code}' http://127.0.0.1:7777 || echo 000); echo \$code | grep -E '^(101|200|400|404|426)$' >/dev/null || exit 1"
|
||||
|
||||
# Create volume for persistent data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Drop privileges and run as stella user
|
||||
USER 1000:1000
|
||||
|
||||
# Run Stella's Nostr relay
|
||||
CMD ["/app/relay"]
|
||||
106
contrib/stella/SERVICE-WORKER-FIX.md
Normal file
106
contrib/stella/SERVICE-WORKER-FIX.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# Service Worker Certificate Caching Fix
|
||||
|
||||
## 🚨 **Problem**
|
||||
|
||||
When accessing Jumble from the ImWald landing page, the service worker serves a cached self-signed certificate instead of the new Let's Encrypt certificate.
|
||||
|
||||
## ⚡ **Solutions**
|
||||
|
||||
### **Option 1: Force Service Worker Update**
|
||||
|
||||
Add this to your Jumble app's service worker or main JavaScript:
|
||||
|
||||
```javascript
|
||||
// Force service worker update and certificate refresh
|
||||
if ("serviceWorker" in navigator) {
|
||||
navigator.serviceWorker.getRegistrations().then(function (registrations) {
|
||||
for (let registration of registrations) {
|
||||
registration.update(); // Force update
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Clear all caches on certificate update
|
||||
if ("caches" in window) {
|
||||
caches.keys().then(function (names) {
|
||||
for (let name of names) {
|
||||
caches.delete(name);
|
||||
}
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### **Option 2: Update Service Worker Cache Strategy**
|
||||
|
||||
In your service worker file, add cache busting for SSL-sensitive requests:
|
||||
|
||||
```javascript
|
||||
// In your service worker
|
||||
self.addEventListener("fetch", function (event) {
|
||||
// Don't cache HTTPS requests that might have certificate issues
|
||||
if (
|
||||
event.request.url.startsWith("https://") &&
|
||||
event.request.url.includes("imwald.eu")
|
||||
) {
|
||||
event.respondWith(fetch(event.request, { cache: "no-store" }));
|
||||
return;
|
||||
}
|
||||
|
||||
// Your existing fetch handling...
|
||||
});
|
||||
```
|
||||
|
||||
### **Option 3: Version Your Service Worker**
|
||||
|
||||
Update your service worker with a new version number:
|
||||
|
||||
```javascript
|
||||
// At the top of your service worker
|
||||
const CACHE_VERSION = "v2.0.1"; // Increment this when certificates change
|
||||
const CACHE_NAME = `jumble-cache-${CACHE_VERSION}`;
|
||||
|
||||
// Clear old caches
|
||||
self.addEventListener("activate", function (event) {
|
||||
event.waitUntil(
|
||||
caches.keys().then(function (cacheNames) {
|
||||
return Promise.all(
|
||||
cacheNames.map(function (cacheName) {
|
||||
if (cacheName !== CACHE_NAME) {
|
||||
return caches.delete(cacheName);
|
||||
}
|
||||
}),
|
||||
);
|
||||
}),
|
||||
);
|
||||
});
|
||||
```
|
||||
|
||||
### **Option 4: Add Cache Headers**
|
||||
|
||||
In your Plesk Apache config for Jumble, add:
|
||||
|
||||
```apache
|
||||
# Prevent service worker from caching SSL-sensitive content
|
||||
Header always set Cache-Control "no-cache, no-store, must-revalidate"
|
||||
Header always set Pragma "no-cache"
|
||||
Header always set Expires "0"
|
||||
|
||||
# Only for service worker file
|
||||
<Files "sw.js">
|
||||
Header always set Cache-Control "no-cache, no-store, must-revalidate"
|
||||
</Files>
|
||||
```
|
||||
|
||||
## 🧹 **Immediate User Fix**
|
||||
|
||||
For users experiencing the certificate issue:
|
||||
|
||||
1. **Clear browser data** for jumble.imwald.eu
|
||||
2. **Unregister service worker**:
|
||||
- F12 → Application → Service Workers → Unregister
|
||||
3. **Hard refresh**: Ctrl+Shift+R
|
||||
4. **Or use incognito mode** to test
|
||||
|
||||
---
|
||||
|
||||
This will prevent the service worker from serving stale certificate data.
|
||||
116
contrib/stella/WEBSOCKET-DEBUG.md
Normal file
116
contrib/stella/WEBSOCKET-DEBUG.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# WebSocket Connection Debug Guide
|
||||
|
||||
## 🚨 **Current Issue**
|
||||
|
||||
`wss://orly-relay.imwald.eu/` returns `NS_ERROR_WEBSOCKET_CONNECTION_REFUSED`
|
||||
|
||||
## 🔍 **Debug Steps**
|
||||
|
||||
### **Step 1: Verify Relay is Running**
|
||||
|
||||
```bash
|
||||
# On your server
|
||||
curl -I http://127.0.0.1:7777
|
||||
# Should return: HTTP/1.1 426 Upgrade Required
|
||||
|
||||
docker ps | grep stella
|
||||
# Should show running container
|
||||
```
|
||||
|
||||
### **Step 2: Test Apache Modules**
|
||||
|
||||
```bash
|
||||
# Check if WebSocket modules are enabled
|
||||
apache2ctl -M | grep -E "(proxy|rewrite)"
|
||||
|
||||
# If missing, enable them:
|
||||
sudo a2enmod proxy
|
||||
sudo a2enmod proxy_http
|
||||
sudo a2enmod proxy_wstunnel
|
||||
sudo a2enmod rewrite
|
||||
sudo a2enmod headers
|
||||
sudo systemctl restart apache2
|
||||
```
|
||||
|
||||
### **Step 3: Check Apache Configuration**
|
||||
|
||||
```bash
|
||||
# Check what Plesk generated
|
||||
sudo cat /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
|
||||
|
||||
# Look for proxy and rewrite rules
|
||||
grep -E "(Proxy|Rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf
|
||||
```
|
||||
|
||||
### **Step 4: Test Direct WebSocket Connection**
|
||||
|
||||
```bash
|
||||
# Test if the issue is Apache or the relay itself
|
||||
echo '["REQ","test",{}]' | websocat ws://127.0.0.1:7777/
|
||||
|
||||
# If that works, the issue is Apache proxy
|
||||
# If that fails, the issue is the relay
|
||||
```
|
||||
|
||||
### **Step 5: Check Apache Error Logs**
|
||||
|
||||
```bash
|
||||
# Watch Apache errors in real-time
|
||||
sudo tail -f /var/log/apache2/error.log
|
||||
|
||||
# Then try connecting to wss://orly-relay.imwald.eu/ and see what errors appear
|
||||
```
|
||||
|
||||
## 🔧 **Specific Plesk Fix**
|
||||
|
||||
Based on your current status, try this **exact configuration** in Plesk:
|
||||
|
||||
### **Go to Apache & nginx Settings for orly-relay.imwald.eu:**
|
||||
|
||||
**Clear both HTTP and HTTPS sections, then add to HTTPS:**
|
||||
|
||||
```apache
|
||||
# Enable proxy
|
||||
ProxyRequests Off
|
||||
ProxyPreserveHost On
|
||||
|
||||
# WebSocket handling - the key part
|
||||
RewriteEngine On
|
||||
RewriteCond %{HTTP:Upgrade} =websocket [NC]
|
||||
RewriteCond %{HTTP:Connection} upgrade [NC]
|
||||
RewriteRule /(.*) ws://127.0.0.1:7777/$1 [P,L]
|
||||
|
||||
# Fallback for regular HTTP
|
||||
RewriteCond %{HTTP:Upgrade} !=websocket [NC]
|
||||
RewriteRule /(.*) http://127.0.0.1:7777/$1 [P,L]
|
||||
|
||||
# Headers
|
||||
ProxyAddHeaders On
|
||||
```
|
||||
|
||||
### **Alternative Simpler Version:**
|
||||
|
||||
If the above doesn't work, try just:
|
||||
|
||||
```apache
|
||||
ProxyPass / http://127.0.0.1:7777/
|
||||
ProxyPassReverse / http://127.0.0.1:7777/
|
||||
ProxyPass /ws ws://127.0.0.1:7777/
|
||||
ProxyPassReverse /ws ws://127.0.0.1:7777/
|
||||
```
|
||||
|
||||
## 🧪 **Testing Commands**
|
||||
|
||||
```bash
|
||||
# Test the WebSocket after each change
|
||||
echo '["REQ","test",{}]' | websocat wss://orly-relay.imwald.eu/
|
||||
|
||||
# Check what's actually being served
|
||||
curl -v https://orly-relay.imwald.eu/ 2>&1 | grep -E "(HTTP|upgrade|connection)"
|
||||
```
|
||||
|
||||
## 🎯 **Expected Fix**
|
||||
|
||||
The issue is likely that Apache isn't properly handling the WebSocket upgrade request. The `proxy_wstunnel` module and correct rewrite rules should fix this.
|
||||
|
||||
Try the **simpler ProxyPass version first** - it's often more reliable in Plesk environments.
|
||||
116
contrib/stella/debug-websocket.sh
Executable file
116
contrib/stella/debug-websocket.sh
Executable file
@@ -0,0 +1,116 @@
|
||||
#!/bin/bash
|
||||
# WebSocket Debug Script for Stella's Orly Relay
|
||||
|
||||
echo "🔍 Debugging WebSocket Connection for orly-relay.imwald.eu"
|
||||
echo "=================================================="
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 1: Check if relay container is running"
|
||||
echo "----------------------------------------------"
|
||||
docker ps | grep -E "(stella|relay|orly)" || echo "❌ No relay containers found"
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 2: Test local relay connection"
|
||||
echo "--------------------------------------"
|
||||
if curl -s -I http://127.0.0.1:7777 | grep -q "426"; then
|
||||
echo "✅ Local relay responding correctly (HTTP 426)"
|
||||
else
|
||||
echo "❌ Local relay not responding correctly"
|
||||
curl -I http://127.0.0.1:7777
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 3: Check Apache modules"
|
||||
echo "------------------------------"
|
||||
if apache2ctl -M 2>/dev/null | grep -q "proxy_wstunnel"; then
|
||||
echo "✅ proxy_wstunnel module enabled"
|
||||
else
|
||||
echo "❌ proxy_wstunnel module NOT enabled"
|
||||
echo "Run: sudo a2enmod proxy_wstunnel"
|
||||
fi
|
||||
|
||||
if apache2ctl -M 2>/dev/null | grep -q "rewrite"; then
|
||||
echo "✅ rewrite module enabled"
|
||||
else
|
||||
echo "❌ rewrite module NOT enabled"
|
||||
echo "Run: sudo a2enmod rewrite"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 4: Check Plesk Apache configuration"
|
||||
echo "------------------------------------------"
|
||||
if [ -f "/etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf" ]; then
|
||||
echo "✅ Plesk config file exists"
|
||||
echo "Current proxy configuration:"
|
||||
grep -E "(Proxy|Rewrite|proxy|rewrite)" /etc/apache2/plesk.conf.d/vhosts/orly-relay.imwald.eu.conf || echo "❌ No proxy/rewrite rules found"
|
||||
else
|
||||
echo "❌ Plesk config file not found"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 5: Test WebSocket connections"
|
||||
echo "------------------------------------"
|
||||
|
||||
# Test with curl first (simpler)
|
||||
echo "Testing HTTP upgrade request to local relay..."
|
||||
if curl -s -I -H "Connection: Upgrade" -H "Upgrade: websocket" http://127.0.0.1:7777 | grep -q "426\|101"; then
|
||||
echo "✅ Local relay accepts upgrade requests"
|
||||
else
|
||||
echo "❌ Local relay doesn't accept upgrade requests"
|
||||
fi
|
||||
|
||||
echo "Testing HTTP upgrade request to remote relay..."
|
||||
if curl -s -I -H "Connection: Upgrade" -H "Upgrade: websocket" https://orly-relay.imwald.eu | grep -q "426\|101"; then
|
||||
echo "✅ Remote relay accepts upgrade requests"
|
||||
else
|
||||
echo "❌ Remote relay doesn't accept upgrade requests"
|
||||
echo "This indicates Apache proxy issue"
|
||||
fi
|
||||
|
||||
# Try to install websocat if not available
|
||||
if ! command -v websocat >/dev/null 2>&1; then
|
||||
echo ""
|
||||
echo "📥 Installing websocat for proper WebSocket testing..."
|
||||
if wget -q https://github.com/vi/websocat/releases/download/v1.12.0/websocat.x86_64-unknown-linux-musl -O websocat 2>/dev/null; then
|
||||
chmod +x websocat
|
||||
echo "✅ websocat installed"
|
||||
else
|
||||
echo "❌ Could not install websocat (no internet or wget issue)"
|
||||
echo "Manual install: wget https://github.com/vi/websocat/releases/download/v1.12.0/websocat.x86_64-unknown-linux-musl -O websocat && chmod +x websocat"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test with websocat if available
|
||||
if command -v ./websocat >/dev/null 2>&1; then
|
||||
echo ""
|
||||
echo "Testing actual WebSocket connection..."
|
||||
echo "Local WebSocket test:"
|
||||
timeout 3 bash -c 'echo "[\"REQ\",\"test\",{}]" | ./websocat ws://127.0.0.1:7777/' 2>/dev/null || echo "❌ Local WebSocket failed"
|
||||
|
||||
echo "Remote WebSocket test (ignoring SSL):"
|
||||
timeout 3 bash -c 'echo "[\"REQ\",\"test\",{}]" | ./websocat --insecure wss://orly-relay.imwald.eu/' 2>/dev/null || echo "❌ Remote WebSocket failed"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 6: Check ports and connections"
|
||||
echo "------------------------------------"
|
||||
echo "Ports listening on 7777:"
|
||||
netstat -tlnp 2>/dev/null | grep :7777 || ss -tlnp 2>/dev/null | grep :7777 || echo "❌ No process listening on port 7777"
|
||||
|
||||
echo ""
|
||||
echo "📋 Step 7: Test SSL certificate"
|
||||
echo "------------------------------"
|
||||
echo "Certificate issuer:"
|
||||
echo | openssl s_client -connect orly-relay.imwald.eu:443 -servername orly-relay.imwald.eu 2>/dev/null | openssl x509 -noout -issuer 2>/dev/null || echo "❌ SSL test failed"
|
||||
|
||||
echo ""
|
||||
echo "🎯 RECOMMENDED NEXT STEPS:"
|
||||
echo "========================="
|
||||
echo "1. If proxy_wstunnel is missing: sudo a2enmod proxy_wstunnel && sudo systemctl restart apache2"
|
||||
echo "2. If no proxy rules found: Add configuration in Plesk Apache & nginx Settings"
|
||||
echo "3. If local WebSocket fails: Check if relay container is actually running"
|
||||
echo "4. If remote WebSocket fails but local works: Apache proxy configuration issue"
|
||||
echo ""
|
||||
echo "🔧 Try this simple Plesk configuration:"
|
||||
echo "ProxyPass / http://127.0.0.1:7777/"
|
||||
echo "ProxyPassReverse / http://127.0.0.1:7777/"
|
||||
96
contrib/stella/docker-compose.yml
Normal file
96
contrib/stella/docker-compose.yml
Normal file
@@ -0,0 +1,96 @@
|
||||
# Docker Compose for Stella's Nostr Relay
|
||||
# Owner: npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
|
||||
services:
|
||||
orly-relay:
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: Dockerfile
|
||||
image: silberengel/next-orly:latest
|
||||
container_name: orly-relay
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "127.0.0.1:7777:7777"
|
||||
volumes:
|
||||
- relay_data:/data
|
||||
- ./profiles:/profiles:ro
|
||||
environment:
|
||||
# Relay Configuration
|
||||
- ORLY_DATA_DIR=/data
|
||||
- ORLY_LISTEN=0.0.0.0
|
||||
- ORLY_PORT=7777
|
||||
- ORLY_LOG_LEVEL=info
|
||||
- ORLY_DB_LOG_LEVEL=error
|
||||
- ORLY_OWNERS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx
|
||||
- ORLY_ADMINS=npub1v30tsz9vw6ylpz63g0a702nj3xa26t3m7p5us8f2y2sd8v6cnsvq465zjx,npub1m4ny6hjqzepn4rxknuq94c2gpqzr29ufkkw7ttcxyak7v43n6vvsajc2jl,npub1l5sga6xg72phsz5422ykujprejwud075ggrr3z2hwyrfgr7eylqstegx9z
|
||||
|
||||
# ACL and Spider Configuration
|
||||
- ORLY_ACL_MODE=follows
|
||||
- ORLY_SPIDER_MODE=follows
|
||||
|
||||
# Bootstrap relay URLs for initial sync
|
||||
- ORLY_BOOTSTRAP_RELAYS=wss://profiles.nostr1.com,wss://purplepag.es,wss://relay.nostr.band,wss://relay.damus.io
|
||||
|
||||
# Subscription Settings (optional)
|
||||
- ORLY_SUBSCRIPTION_ENABLED=false
|
||||
- ORLY_MONTHLY_PRICE_SATS=0
|
||||
|
||||
# Performance Settings
|
||||
- ORLY_MAX_CONNECTIONS=1000
|
||||
- ORLY_MAX_EVENT_SIZE=65536
|
||||
- ORLY_MAX_SUBSCRIPTIONS=20
|
||||
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:7777"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
# Resource limits
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
cpus: "1.0"
|
||||
reservations:
|
||||
memory: 256M
|
||||
cpus: "0.25"
|
||||
|
||||
# Logging configuration
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Optional: Nginx reverse proxy for SSL/domain setup
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: stella-nginx
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./nginx/ssl:/etc/nginx/ssl:ro
|
||||
- nginx_logs:/var/log/nginx
|
||||
depends_on:
|
||||
- orly-relay
|
||||
profiles:
|
||||
- proxy # Only start with: docker-compose --profile proxy up
|
||||
|
||||
volumes:
|
||||
relay_data:
|
||||
driver: local
|
||||
driver_opts:
|
||||
type: none
|
||||
o: bind
|
||||
device: ./data
|
||||
nginx_logs:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
default:
|
||||
name: orly-relay-network
|
||||
154
contrib/stella/manage-relay.sh
Executable file
154
contrib/stella/manage-relay.sh
Executable file
@@ -0,0 +1,154 @@
|
||||
#!/bin/bash
|
||||
# Stella's Orly Relay Management Script
|
||||
# Uses docker-compose.yml directly for configuration
|
||||
|
||||
set -e
|
||||
|
||||
# Get script directory and project root
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_DIR="$SCRIPT_DIR"
|
||||
|
||||
# Configuration from docker-compose.yml
|
||||
RELAY_SERVICE="orly-relay"
|
||||
CONTAINER_NAME="orly-nostr-relay"
|
||||
RELAY_URL="ws://127.0.0.1:7777"
|
||||
HTTP_URL="http://127.0.0.1:7777"
|
||||
RELAY_DATA_DIR="/home/madmin/.local/share/orly-relay"
|
||||
|
||||
# Change to project directory for docker-compose commands
|
||||
cd "$PROJECT_DIR"
|
||||
|
||||
case "${1:-}" in
|
||||
"start")
|
||||
echo "🚀 Starting Stella's Orly Relay..."
|
||||
docker compose up -d orly-relay
|
||||
echo "✅ Relay started!"
|
||||
;;
|
||||
"stop")
|
||||
echo "⏹️ Stopping Stella's Orly Relay..."
|
||||
docker compose down
|
||||
echo "✅ Relay stopped!"
|
||||
;;
|
||||
"restart")
|
||||
echo "🔄 Restarting Stella's Orly Relay..."
|
||||
docker compose restart orly-relay
|
||||
echo "✅ Relay restarted!"
|
||||
;;
|
||||
"status")
|
||||
echo "📊 Stella's Orly Relay Status:"
|
||||
docker compose ps orly-relay
|
||||
;;
|
||||
"logs")
|
||||
echo "📜 Stella's Orly Relay Logs:"
|
||||
docker compose logs -f orly-relay
|
||||
;;
|
||||
"test")
|
||||
echo "🧪 Testing relay connection..."
|
||||
if curl -s -I "$HTTP_URL" | grep -q "426 Upgrade Required"; then
|
||||
echo "✅ Relay is responding correctly!"
|
||||
echo "📡 WebSocket URL: $RELAY_URL"
|
||||
echo "🌐 HTTP URL: $HTTP_URL"
|
||||
else
|
||||
echo "❌ Relay is not responding correctly"
|
||||
echo " Expected: 426 Upgrade Required"
|
||||
echo " URL: $HTTP_URL"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"enable")
|
||||
echo "🔧 Enabling relay to start at boot..."
|
||||
sudo systemctl enable $RELAY_SERVICE
|
||||
echo "✅ Relay will start automatically at boot!"
|
||||
;;
|
||||
"disable")
|
||||
echo "🔧 Disabling relay auto-start..."
|
||||
sudo systemctl disable $RELAY_SERVICE
|
||||
echo "✅ Relay will not start automatically at boot!"
|
||||
;;
|
||||
"info")
|
||||
echo "📋 Stella's Orly Relay Information:"
|
||||
echo " Service: $RELAY_SERVICE"
|
||||
echo " Container: $CONTAINER_NAME"
|
||||
echo " WebSocket URL: $RELAY_URL"
|
||||
echo " HTTP URL: $HTTP_URL"
|
||||
echo " Data Directory: $RELAY_DATA_DIR"
|
||||
echo " Config Directory: $PROJECT_DIR"
|
||||
echo ""
|
||||
echo "🐳 Docker Information:"
|
||||
echo " Compose File: $PROJECT_DIR/docker-compose.yml"
|
||||
echo " Container Status:"
|
||||
docker compose ps orly-relay 2>/dev/null || echo " Not running"
|
||||
echo ""
|
||||
echo "💡 Configuration:"
|
||||
echo " All settings are defined in docker-compose.yml"
|
||||
echo " Use 'docker compose config' to see parsed configuration"
|
||||
;;
|
||||
"docker-logs")
|
||||
echo "🐳 Docker Container Logs:"
|
||||
docker compose logs -f orly-relay 2>/dev/null || echo "❌ Container not found or not running"
|
||||
;;
|
||||
"docker-status")
|
||||
echo "🐳 Docker Container Status:"
|
||||
docker compose ps orly-relay
|
||||
;;
|
||||
"docker-restart")
|
||||
echo "🔄 Restarting Docker Container..."
|
||||
docker compose restart orly-relay
|
||||
echo "✅ Container restarted!"
|
||||
;;
|
||||
"docker-update")
|
||||
echo "🔄 Updating and restarting Docker Container..."
|
||||
docker compose pull orly-relay
|
||||
docker compose up -d orly-relay
|
||||
echo "✅ Container updated and restarted!"
|
||||
;;
|
||||
"docker-build")
|
||||
echo "🔨 Building Docker Container..."
|
||||
docker compose build orly-relay
|
||||
echo "✅ Container built!"
|
||||
;;
|
||||
"docker-down")
|
||||
echo "⏹️ Stopping Docker Container..."
|
||||
docker compose down
|
||||
echo "✅ Container stopped!"
|
||||
;;
|
||||
"docker-config")
|
||||
echo "📋 Docker Compose Configuration:"
|
||||
docker compose config
|
||||
;;
|
||||
*)
|
||||
echo "🌲 Stella's Orly Relay Management Script"
|
||||
echo ""
|
||||
echo "Usage: $0 [COMMAND]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " start Start the relay"
|
||||
echo " stop Stop the relay"
|
||||
echo " restart Restart the relay"
|
||||
echo " status Show relay status"
|
||||
echo " logs Show relay logs (follow mode)"
|
||||
echo " test Test relay connection"
|
||||
echo " enable Enable auto-start at boot"
|
||||
echo " disable Disable auto-start at boot"
|
||||
echo " info Show relay information"
|
||||
echo ""
|
||||
echo "Docker Commands:"
|
||||
echo " docker-logs Show Docker container logs"
|
||||
echo " docker-status Show Docker container status"
|
||||
echo " docker-restart Restart Docker container only"
|
||||
echo " docker-update Update and restart container"
|
||||
echo " docker-build Build Docker container"
|
||||
echo " docker-down Stop Docker container"
|
||||
echo " docker-config Show Docker Compose configuration"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 start # Start the relay"
|
||||
echo " $0 status # Check if it's running"
|
||||
echo " $0 test # Test WebSocket connection"
|
||||
echo " $0 logs # Watch real-time logs"
|
||||
echo " $0 docker-logs # Watch Docker container logs"
|
||||
echo " $0 docker-update # Update and restart container"
|
||||
echo ""
|
||||
echo "🌲 Crafted in the digital forest by Stella ✨"
|
||||
;;
|
||||
esac
|
||||
42
contrib/stella/stella-relay.service
Normal file
42
contrib/stella/stella-relay.service
Normal file
@@ -0,0 +1,42 @@
|
||||
[Unit]
|
||||
Description=Stella's Orly Nostr Relay (Docker Compose)
|
||||
Documentation=https://github.com/Silberengel/next.orly.dev
|
||||
After=network-online.target docker.service
|
||||
Wants=network-online.target
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
User=madmin
|
||||
Group=madmin
|
||||
WorkingDirectory=/home/madmin/Projects/GitCitadel/next.orly.dev
|
||||
|
||||
# Start the relay using docker compose
|
||||
ExecStart=/usr/bin/docker compose up -d orly-relay
|
||||
|
||||
# Stop the relay
|
||||
ExecStop=/usr/bin/docker compose down
|
||||
|
||||
# Reload configuration (restart containers)
|
||||
ExecReload=/usr/bin/docker compose restart orly-relay
|
||||
|
||||
# Security settings
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=read-only
|
||||
ReadWritePaths=/home/madmin/.local/share/orly-relay
|
||||
ReadWritePaths=/home/madmin/Projects/GitCitadel/next.orly.dev/data
|
||||
|
||||
# Resource limits
|
||||
LimitNOFILE=65536
|
||||
LimitNPROC=4096
|
||||
|
||||
# Restart policy
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
TimeoutStartSec=60
|
||||
TimeoutStopSec=30
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
BIN
docs/orly.png
BIN
docs/orly.png
Binary file not shown.
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 485 KiB |
287
docs/websocket-req-comparison.md
Normal file
287
docs/websocket-req-comparison.md
Normal file
@@ -0,0 +1,287 @@
|
||||
# WebSocket REQ Handling Comparison: Khatru vs Next.orly.dev
|
||||
|
||||
## Overview
|
||||
|
||||
This document compares how two Nostr relay implementations handle WebSocket connections and REQ (subscription) messages:
|
||||
|
||||
1. **Khatru** - A popular Go-based Nostr relay library by fiatjaf
|
||||
2. **Next.orly.dev** - A custom relay implementation with advanced features
|
||||
|
||||
## Architecture Comparison
|
||||
|
||||
### Khatru Architecture
|
||||
|
||||
- **Monolithic approach**: Single large `HandleWebsocket` method (~380 lines) processes all message types
|
||||
- **Inline processing**: REQ handling is embedded within the main websocket handler
|
||||
- **Hook-based extensibility**: Uses function slices for customizable behavior
|
||||
- **Simple structure**: WebSocket struct with basic fields and mutex for thread safety
|
||||
|
||||
### Next.orly.dev Architecture
|
||||
|
||||
- **Modular approach**: Separate methods for each message type (`HandleReq`, `HandleEvent`, etc.)
|
||||
- **Layered processing**: Message identification → envelope parsing → type-specific handling
|
||||
- **Publisher-subscriber system**: Dedicated infrastructure for subscription management
|
||||
- **Rich context**: Listener struct with detailed state tracking and metrics
|
||||
|
||||
## Connection Establishment
|
||||
|
||||
### Khatru
|
||||
|
||||
```go
|
||||
// Simple websocket upgrade
|
||||
conn, err := rl.upgrader.Upgrade(w, r, nil)
|
||||
ws := &WebSocket{
|
||||
conn: conn,
|
||||
Request: r,
|
||||
Challenge: hex.EncodeToString(challenge),
|
||||
negentropySessions: xsync.NewMapOf[string, *NegentropySession](),
|
||||
}
|
||||
```
|
||||
|
||||
### Next.orly.dev
|
||||
|
||||
```go
|
||||
// More sophisticated setup with IP whitelisting
|
||||
conn, err = websocket.Accept(w, r, &websocket.AcceptOptions{OriginPatterns: []string{"*"}})
|
||||
listener := &Listener{
|
||||
ctx: ctx,
|
||||
Server: s,
|
||||
conn: conn,
|
||||
remote: remote,
|
||||
req: r,
|
||||
}
|
||||
// Immediate AUTH challenge if ACLs are configured
|
||||
```
|
||||
|
||||
**Key Differences:**
|
||||
|
||||
- Next.orly.dev includes IP whitelisting and immediate authentication challenges
|
||||
- Khatru uses fasthttp/websocket library vs next.orly.dev using coder/websocket
|
||||
- Next.orly.dev has more detailed connection state tracking
|
||||
|
||||
## Message Processing
|
||||
|
||||
### Khatru
|
||||
|
||||
- Uses `nostr.MessageParser` for sequential parsing
|
||||
- Switch statement on envelope type within goroutine
|
||||
- Direct processing without intermediate validation layers
|
||||
|
||||
### Next.orly.dev
|
||||
|
||||
- Custom envelope identification system (`envelopes.Identify`)
|
||||
- Separate validation and processing phases
|
||||
- Extensive logging and error handling at each step
|
||||
|
||||
## REQ Message Handling
|
||||
|
||||
### Khatru REQ Processing
|
||||
|
||||
```go
|
||||
case *nostr.ReqEnvelope:
|
||||
eose := sync.WaitGroup{}
|
||||
eose.Add(len(env.Filters))
|
||||
|
||||
// Handle each filter separately
|
||||
for _, filter := range env.Filters {
|
||||
err := srl.handleRequest(reqCtx, env.SubscriptionID, &eose, ws, filter)
|
||||
if err != nil {
|
||||
// Fail everything if any filter is rejected
|
||||
ws.WriteJSON(nostr.ClosedEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason})
|
||||
return
|
||||
} else {
|
||||
rl.addListener(ws, env.SubscriptionID, srl, filter, cancelReqCtx)
|
||||
}
|
||||
}
|
||||
|
||||
go func() {
|
||||
eose.Wait()
|
||||
ws.WriteJSON(nostr.EOSEEnvelope(env.SubscriptionID))
|
||||
}()
|
||||
```
|
||||
|
||||
### Next.orly.dev REQ Processing
|
||||
|
||||
```go
|
||||
// Comprehensive ACL and authentication checks first
|
||||
accessLevel := acl.Registry.GetAccessLevel(l.authedPubkey.Load(), l.remote)
|
||||
switch accessLevel {
|
||||
case "none":
|
||||
return // Send auth-required response
|
||||
}
|
||||
|
||||
// Process all filters and collect events
|
||||
for _, f := range *env.Filters {
|
||||
filterEvents, err = l.QueryEvents(queryCtx, f)
|
||||
allEvents = append(allEvents, filterEvents...)
|
||||
}
|
||||
|
||||
// Apply privacy and privilege checks
|
||||
// Send all historical events
|
||||
// Set up ongoing subscription only if needed
|
||||
```
|
||||
|
||||
## Key Architectural Differences
|
||||
|
||||
### 1. **Filter Processing Strategy**
|
||||
|
||||
**Khatru:**
|
||||
|
||||
- Processes each filter independently and concurrently
|
||||
- Uses WaitGroup to coordinate EOSE across all filters
|
||||
- Immediately sets up listeners for ongoing subscriptions
|
||||
- Fails entire subscription if any filter is rejected
|
||||
|
||||
**Next.orly.dev:**
|
||||
|
||||
- Processes all filters sequentially in a single context
|
||||
- Collects all events before applying access control
|
||||
- Only sets up subscriptions for filters that need ongoing updates
|
||||
- Gracefully handles individual filter failures
|
||||
|
||||
### 2. **Access Control Integration**
|
||||
|
||||
**Khatru:**
|
||||
|
||||
- Basic NIP-42 authentication support
|
||||
- Hook-based authorization via `RejectFilter` functions
|
||||
- Limited built-in access control features
|
||||
|
||||
**Next.orly.dev:**
|
||||
|
||||
- Comprehensive ACL system with multiple access levels
|
||||
- Built-in support for private events with npub authorization
|
||||
- Privileged event filtering based on pubkey and p-tags
|
||||
- Granular permission checking at multiple stages
|
||||
|
||||
### 3. **Subscription Management**
|
||||
|
||||
**Khatru:**
|
||||
|
||||
```go
|
||||
// Simple listener registration
|
||||
type listenerSpec struct {
|
||||
filter nostr.Filter
|
||||
cancel context.CancelCauseFunc
|
||||
subRelay *Relay
|
||||
}
|
||||
rl.addListener(ws, subscriptionID, relay, filter, cancel)
|
||||
```
|
||||
|
||||
**Next.orly.dev:**
|
||||
|
||||
```go
|
||||
// Publisher-subscriber system with rich metadata
|
||||
type W struct {
|
||||
Conn *websocket.Conn
|
||||
remote string
|
||||
Id string
|
||||
Receiver event.C
|
||||
Filters *filter.S
|
||||
AuthedPubkey []byte
|
||||
}
|
||||
l.publishers.Receive(&W{...})
|
||||
```
|
||||
|
||||
### 4. **Performance Optimizations**
|
||||
|
||||
**Khatru:**
|
||||
|
||||
- Concurrent filter processing
|
||||
- Immediate streaming of events as they're found
|
||||
- Memory-efficient with direct event streaming
|
||||
|
||||
**Next.orly.dev:**
|
||||
|
||||
- Batch processing with deduplication
|
||||
- Memory management with explicit `ev.Free()` calls
|
||||
- Smart subscription cancellation for ID-only queries
|
||||
- Event result caching and seen-tracking
|
||||
|
||||
### 5. **Error Handling & Observability**
|
||||
|
||||
**Khatru:**
|
||||
|
||||
- Basic error logging
|
||||
- Simple connection state management
|
||||
- Limited metrics and observability
|
||||
|
||||
**Next.orly.dev:**
|
||||
|
||||
- Comprehensive error handling with context preservation
|
||||
- Detailed logging at each processing stage
|
||||
- Built-in metrics (message count, REQ count, event count)
|
||||
- Graceful degradation on individual component failures
|
||||
|
||||
## Memory Management
|
||||
|
||||
### Khatru
|
||||
|
||||
- Relies on Go's garbage collector
|
||||
- Simple WebSocket struct with minimal state
|
||||
- Uses sync.Map for thread-safe operations
|
||||
|
||||
### Next.orly.dev
|
||||
|
||||
- Explicit memory management with `ev.Free()` calls
|
||||
- Resource pooling and reuse patterns
|
||||
- Detailed tracking of connection resources
|
||||
|
||||
## Concurrency Models
|
||||
|
||||
### Khatru
|
||||
|
||||
- Per-connection goroutine for message reading
|
||||
- Additional goroutines for each message processing
|
||||
- WaitGroup coordination for multi-filter EOSE
|
||||
|
||||
### Next.orly.dev
|
||||
|
||||
- Per-connection goroutine with single-threaded message processing
|
||||
- Publisher-subscriber system handles concurrent event distribution
|
||||
- Context-based cancellation throughout
|
||||
|
||||
## Trade-offs Analysis
|
||||
|
||||
### Khatru Advantages
|
||||
|
||||
- **Simplicity**: Easier to understand and modify
|
||||
- **Performance**: Lower latency due to concurrent processing
|
||||
- **Flexibility**: Hook-based architecture allows extensive customization
|
||||
- **Streaming**: Events sent as soon as they're found
|
||||
|
||||
### Khatru Disadvantages
|
||||
|
||||
- **Monolithic**: Large methods harder to maintain
|
||||
- **Limited ACL**: Basic authentication and authorization
|
||||
- **Error handling**: Less graceful failure recovery
|
||||
- **Resource usage**: No explicit memory management
|
||||
|
||||
### Next.orly.dev Advantages
|
||||
|
||||
- **Security**: Comprehensive ACL and privacy features
|
||||
- **Observability**: Extensive logging and metrics
|
||||
- **Resource management**: Explicit memory and connection lifecycle management
|
||||
- **Modularity**: Easier to test and extend individual components
|
||||
- **Robustness**: Graceful handling of edge cases and failures
|
||||
|
||||
### Next.orly.dev Disadvantages
|
||||
|
||||
- **Complexity**: Higher cognitive overhead and learning curve
|
||||
- **Latency**: Sequential processing may be slower for some use cases
|
||||
- **Resource overhead**: More memory usage due to batching and state tracking
|
||||
- **Coupling**: Tighter integration between components
|
||||
|
||||
## Conclusion
|
||||
|
||||
Both implementations represent different philosophies:
|
||||
|
||||
- **Khatru** prioritizes simplicity, performance, and extensibility through a hook-based architecture
|
||||
- **Next.orly.dev** prioritizes security, observability, and robustness through comprehensive built-in features
|
||||
|
||||
The choice between them depends on specific requirements:
|
||||
|
||||
- Choose **Khatru** for high-performance relays with custom business logic
|
||||
- Choose **Next.orly.dev** for production relays requiring comprehensive access control and monitoring
|
||||
|
||||
Both approaches demonstrate mature understanding of Nostr protocol requirements while making different trade-offs in complexity vs. features.
|
||||
44
go.mod
44
go.mod
@@ -4,47 +4,51 @@ go 1.25.0
|
||||
|
||||
require (
|
||||
github.com/adrg/xdg v0.5.3
|
||||
github.com/coder/websocket v1.8.13
|
||||
github.com/coder/websocket v1.8.14
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/dgraph-io/badger/v4 v4.8.0
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/klauspost/cpuid/v2 v2.3.0
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
|
||||
go-simpler.org/env v0.12.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b
|
||||
golang.org/x/crypto v0.42.0
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||
golang.org/x/net v0.43.0
|
||||
golang.org/x/net v0.44.0
|
||||
honnef.co/go/tools v0.6.1
|
||||
lol.mleku.dev v1.0.3
|
||||
lukechampine.com/frand v1.5.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
|
||||
github.com/google/flatbuffers v25.9.23+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20251002213607-436353cc1ee6 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/templexxx/cpu v0.0.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.37.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.37.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||
golang.org/x/mod v0.27.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.35.0 // indirect
|
||||
golang.org/x/tools v0.36.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
github.com/templexxx/cpu v0.1.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.38.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20251002181428-27f1f14c8bb9 // indirect
|
||||
golang.org/x/mod v0.28.0 // indirect
|
||||
golang.org/x/sync v0.17.0 // indirect
|
||||
golang.org/x/sys v0.36.0 // indirect
|
||||
golang.org/x/tools v0.37.0 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
retract v1.0.3
|
||||
|
||||
102
go.sum
102
go.sum
@@ -1,39 +1,55 @@
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
|
||||
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g=
|
||||
github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0/go.mod h1:gpoRV3VzrEY1a9dWAYV6T1U7YzfgttXdd/ZzL1s9OZM=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU=
|
||||
github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20251002213607-436353cc1ee6 h1:/WHh/1k4thM/w+PAZEIiZK9NwCMFahw5tUzKUCnUtds=
|
||||
github.com/google/pprof v0.0.0-20251002213607-436353cc1ee6/go.mod h1:I6V7YzU0XDpsHqbsyrghnFZLO1gwK6NPTNvmetQIk9U=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
@@ -44,68 +60,76 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/templexxx/cpu v0.0.1 h1:hY4WdLOgKdc8y13EYklu9OUTXik80BkxHoWvTO6MQQY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/templexxx/cpu v0.0.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/cpu v0.1.1 h1:isxHaxBXpYFWnk2DReuKkigaZyrjs2+9ypIdGP4h+HI=
|
||||
github.com/templexxx/cpu v0.1.1/go.mod h1:w7Tb+7qgcAlIyX4NhLuDKt78AHA5SzPmq0Wj6HiEnnk=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3Wb1+pWBaWv/BlHK0ZYIu/KaL6eHg=
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
|
||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
|
||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
|
||||
go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
|
||||
go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
|
||||
go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
|
||||
go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
|
||||
go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
|
||||
go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
|
||||
go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
|
||||
go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
|
||||
go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
|
||||
go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0=
|
||||
golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI=
|
||||
golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9 h1:TQwNpfvNkxAVlItJf6Cr5JTsVZoC/Sj7K3OZv2Pc14A=
|
||||
golang.org/x/exp v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:TwQYMMnGpvZyc+JpB/UAuTNIsVJifOlSkrZkhcvpVUk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20251002181428-27f1f14c8bb9 h1:EvjuVHWMoRaAxH402KMgrQpGUjoBy/OWvZjLOqQnwNk=
|
||||
golang.org/x/exp/typeparams v0.0.0-20251002181428-27f1f14c8bb9/go.mod h1:4Mzdyp/6jzw9auFDJ3OMF5qksa7UvPnzKqTVGcb04ms=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
|
||||
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
|
||||
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
|
||||
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
|
||||
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
|
||||
golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I=
|
||||
golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
|
||||
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
|
||||
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
|
||||
golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
|
||||
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
|
||||
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
|
||||
golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
||||
31
main.go
31
main.go
@@ -17,7 +17,9 @@ import (
|
||||
"next.orly.dev/app"
|
||||
"next.orly.dev/app/config"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/crypto/keys"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/spider"
|
||||
"next.orly.dev/pkg/version"
|
||||
)
|
||||
@@ -51,11 +53,32 @@ func main() {
|
||||
runtime.GOMAXPROCS(runtime.NumCPU() * 4)
|
||||
var err error
|
||||
var cfg *config.C
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
}
|
||||
log.I.F("starting %s %s", cfg.AppName, version.V)
|
||||
if cfg, err = config.New(); chk.T(err) {
|
||||
}
|
||||
log.I.F("starting %s %s", cfg.AppName, version.V)
|
||||
|
||||
// If OpenPprofWeb is true and profiling is enabled, we need to ensure HTTP profiling is also enabled
|
||||
// Handle 'identity' subcommand: print relay identity secret and pubkey and exit
|
||||
if config.IdentityRequested() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var db *database.D
|
||||
if db, err = database.New(ctx, cancel, cfg.DataDir, cfg.DBLogLevel); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
defer db.Close()
|
||||
skb, err := db.GetOrCreateRelayIdentitySecret()
|
||||
if chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
pk, err := keys.SecretBytesToPubKeyHex(skb)
|
||||
if chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("identity secret: %s\nidentity pubkey: %s\n", hex.Enc(skb), pk)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// If OpenPprofWeb is true and profiling is enabled, we need to ensure HTTP profiling is also enabled
|
||||
if cfg.OpenPprofWeb && cfg.Pprof != "" && !cfg.PprofHTTP {
|
||||
log.I.F("enabling HTTP pprof server to support web viewer")
|
||||
cfg.PprofHTTP = true
|
||||
|
||||
@@ -66,3 +66,15 @@ func (s *S) Type() (typ string) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AddFollow forwards a pubkey to the active ACL if it supports dynamic follows
|
||||
func (s *S) AddFollow(pub []byte) {
|
||||
for _, i := range s.ACL {
|
||||
if i.Type() == s.Active.Load() {
|
||||
if f, ok := i.(*Follows); ok {
|
||||
f.AddFollow(pub)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package acl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -21,9 +24,9 @@ import (
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
"next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/normalize"
|
||||
@@ -37,6 +40,7 @@ type Follows struct {
|
||||
pubs *publish.S
|
||||
followsMx sync.RWMutex
|
||||
admins [][]byte
|
||||
owners [][]byte
|
||||
follows [][]byte
|
||||
updated chan struct{}
|
||||
subsCancel context.CancelFunc
|
||||
@@ -66,6 +70,16 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
err = errorf.E("both config and database must be set")
|
||||
return
|
||||
}
|
||||
// add owners list
|
||||
for _, owner := range f.cfg.Owners {
|
||||
var own []byte
|
||||
if o, e := bech32encoding.NpubOrHexToPublicKeyBinary(owner); chk.E(e) {
|
||||
continue
|
||||
} else {
|
||||
own = o
|
||||
}
|
||||
f.owners = append(f.owners, own)
|
||||
}
|
||||
// find admin follow lists
|
||||
f.followsMx.Lock()
|
||||
defer f.followsMx.Unlock()
|
||||
@@ -107,7 +121,7 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
for _, v := range ev.Tags.GetAll([]byte("p")) {
|
||||
// log.I.F("adding follow: %s", v.Value())
|
||||
var a []byte
|
||||
if b, e := hex.Dec(string(v.Value())); chk.E(e) {
|
||||
if b, e := hex.DecodeString(string(v.Value())); chk.E(e) {
|
||||
continue
|
||||
} else {
|
||||
a = b
|
||||
@@ -126,11 +140,13 @@ func (f *Follows) Configure(cfg ...any) (err error) {
|
||||
}
|
||||
|
||||
func (f *Follows) GetAccessLevel(pub []byte, address string) (level string) {
|
||||
if f.cfg == nil {
|
||||
return "write"
|
||||
}
|
||||
f.followsMx.RLock()
|
||||
defer f.followsMx.RUnlock()
|
||||
for _, v := range f.owners {
|
||||
if utils.FastEqual(v, pub) {
|
||||
return "owner"
|
||||
}
|
||||
}
|
||||
for _, v := range f.admins {
|
||||
if utils.FastEqual(v, pub) {
|
||||
return "admin"
|
||||
@@ -141,6 +157,9 @@ func (f *Follows) GetAccessLevel(pub []byte, address string) (level string) {
|
||||
return "write"
|
||||
}
|
||||
}
|
||||
if f.cfg == nil {
|
||||
return "write"
|
||||
}
|
||||
return "read"
|
||||
}
|
||||
|
||||
@@ -157,6 +176,8 @@ func (f *Follows) adminRelays() (urls []string) {
|
||||
copy(admins, f.admins)
|
||||
f.followsMx.RUnlock()
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
// First, try to get relay URLs from admin kind 10002 events
|
||||
for _, adm := range admins {
|
||||
fl := &filter.F{
|
||||
Authors: tag.NewFromAny(adm),
|
||||
@@ -193,6 +214,29 @@ func (f *Follows) adminRelays() (urls []string) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no admin relays found, use bootstrap relays as fallback
|
||||
if len(urls) == 0 {
|
||||
log.I.F("no admin relays found in DB, checking bootstrap relays")
|
||||
if len(f.cfg.BootstrapRelays) > 0 {
|
||||
log.I.F("using bootstrap relays: %v", f.cfg.BootstrapRelays)
|
||||
for _, relay := range f.cfg.BootstrapRelays {
|
||||
n := string(normalize.URL(relay))
|
||||
if n == "" {
|
||||
log.W.F("invalid bootstrap relay URL: %s", relay)
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[n]; ok {
|
||||
continue
|
||||
}
|
||||
seen[n] = struct{}{}
|
||||
urls = append(urls, n)
|
||||
}
|
||||
} else {
|
||||
log.W.F("no bootstrap relays configured")
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -208,9 +252,9 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
urls := f.adminRelays()
|
||||
log.I.S(urls)
|
||||
// log.I.S(urls)
|
||||
if len(urls) == 0 {
|
||||
log.W.F("follows syncer: no admin relays found in DB (kind 10002)")
|
||||
log.W.F("follows syncer: no admin relays found in DB (kind 10002) and no bootstrap relays configured")
|
||||
return
|
||||
}
|
||||
log.T.F(
|
||||
@@ -227,18 +271,58 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
return
|
||||
default:
|
||||
}
|
||||
c, _, err := websocket.Dial(ctx, u, nil)
|
||||
// Create a timeout context for the connection
|
||||
connCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
|
||||
// Create proper headers for the WebSocket connection
|
||||
headers := http.Header{}
|
||||
headers.Set("User-Agent", "ORLY-Relay/0.9.2")
|
||||
headers.Set("Origin", "https://orly.dev")
|
||||
|
||||
// Use proper WebSocket dial options
|
||||
dialOptions := &websocket.DialOptions{
|
||||
HTTPHeader: headers,
|
||||
}
|
||||
|
||||
c, _, err := websocket.Dial(connCtx, u, dialOptions)
|
||||
cancel()
|
||||
if err != nil {
|
||||
log.W.F("follows syncer: dial %s failed: %v", u, err)
|
||||
|
||||
// Handle different types of errors
|
||||
if strings.Contains(
|
||||
err.Error(), "response status code 101 but got 403",
|
||||
) {
|
||||
// 403 means the relay is not accepting connections from
|
||||
// us. Forbidden is the meaning, usually used to
|
||||
// indicate either the IP or user is blocked. so stop
|
||||
// trying this one.
|
||||
return
|
||||
// 403 means the relay is not accepting connections from us
|
||||
// Forbidden is the meaning, usually used to indicate either the IP or user is blocked
|
||||
// But we should still retry after a longer delay
|
||||
log.W.F(
|
||||
"follows syncer: relay %s returned 403, will retry after longer delay",
|
||||
u,
|
||||
)
|
||||
timer := time.NewTimer(5 * time.Minute) // Wait 5 minutes before retrying 403 errors
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-timer.C:
|
||||
}
|
||||
continue
|
||||
} else if strings.Contains(
|
||||
err.Error(), "timeout",
|
||||
) || strings.Contains(err.Error(), "connection refused") {
|
||||
// Network issues, retry with normal backoff
|
||||
log.W.F(
|
||||
"follows syncer: network issue with %s, retrying in %v",
|
||||
u, backoff,
|
||||
)
|
||||
} else {
|
||||
// Other errors, retry with normal backoff
|
||||
log.W.F(
|
||||
"follows syncer: connection error with %s, retrying in %v",
|
||||
u, backoff,
|
||||
)
|
||||
}
|
||||
|
||||
timer := time.NewTimer(backoff)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
@@ -251,21 +335,42 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
continue
|
||||
}
|
||||
backoff = time.Second
|
||||
// send REQ
|
||||
log.T.F("follows syncer: successfully connected to %s", u)
|
||||
|
||||
// send REQ for kind 3 (follow lists), kind 10002 (relay lists), and all events from follows
|
||||
ff := &filter.S{}
|
||||
f1 := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(authors...),
|
||||
Limit: values.ToUintPointer(0),
|
||||
Kinds: kind.NewS(kind.New(kind.FollowList.K)),
|
||||
Limit: values.ToUintPointer(100),
|
||||
}
|
||||
*ff = append(*ff, f1)
|
||||
f2 := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(authors...),
|
||||
Kinds: kind.NewS(kind.New(kind.RelayListMetadata.K)),
|
||||
Limit: values.ToUintPointer(100),
|
||||
}
|
||||
// Add filter for all events from follows (last 30 days)
|
||||
oneMonthAgo := timestamp.FromUnix(time.Now().Add(-30 * 24 * time.Hour).Unix())
|
||||
f3 := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(authors...),
|
||||
Since: oneMonthAgo,
|
||||
Limit: values.ToUintPointer(1000),
|
||||
}
|
||||
*ff = append(*ff, f1, f2, f3)
|
||||
req := reqenvelope.NewFrom([]byte("follows-sync"), ff)
|
||||
if err = c.Write(
|
||||
ctx, websocket.MessageText, req.Marshal(nil),
|
||||
); chk.E(err) {
|
||||
log.W.F(
|
||||
"follows syncer: failed to send REQ to %s: %v", u, err,
|
||||
)
|
||||
_ = c.Close(websocket.StatusInternalError, "write failed")
|
||||
continue
|
||||
}
|
||||
log.T.F("sent REQ to %s for follows subscription", u)
|
||||
log.T.F(
|
||||
"follows syncer: sent REQ to %s for kind 3, 10002, and all events (last 30 days) from followed users",
|
||||
u,
|
||||
)
|
||||
// read loop
|
||||
for {
|
||||
select {
|
||||
@@ -293,6 +398,30 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
|
||||
if ok, err := res.Event.Verify(); chk.T(err) || !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process events based on kind
|
||||
switch res.Event.Kind {
|
||||
case kind.FollowList.K:
|
||||
log.T.F(
|
||||
"follows syncer: received kind 3 (follow list) event from %s on relay %s",
|
||||
hex.EncodeToString(res.Event.Pubkey), u,
|
||||
)
|
||||
// Extract followed pubkeys from 'p' tags in kind 3 events
|
||||
f.extractFollowedPubkeys(res.Event)
|
||||
case kind.RelayListMetadata.K:
|
||||
log.T.F(
|
||||
"follows syncer: received kind 10002 (relay list) event from %s on relay %s",
|
||||
hex.EncodeToString(res.Event.Pubkey), u,
|
||||
)
|
||||
default:
|
||||
// Log all other events from followed users
|
||||
log.T.F(
|
||||
"follows syncer: received kind %d event from %s on relay %s",
|
||||
res.Event.Kind,
|
||||
hex.EncodeToString(res.Event.Pubkey), u,
|
||||
)
|
||||
}
|
||||
|
||||
if _, _, err = f.D.SaveEvent(
|
||||
ctx, res.Event,
|
||||
); err != nil {
|
||||
@@ -364,12 +493,56 @@ func (f *Follows) Syncer() {
|
||||
func (f *Follows) GetFollowedPubkeys() [][]byte {
|
||||
f.followsMx.RLock()
|
||||
defer f.followsMx.RUnlock()
|
||||
|
||||
|
||||
followedPubkeys := make([][]byte, len(f.follows))
|
||||
copy(followedPubkeys, f.follows)
|
||||
return followedPubkeys
|
||||
}
|
||||
|
||||
// extractFollowedPubkeys extracts followed pubkeys from 'p' tags in kind 3 events
|
||||
func (f *Follows) extractFollowedPubkeys(event *event.E) {
|
||||
if event.Kind != kind.FollowList.K {
|
||||
return
|
||||
}
|
||||
|
||||
// Extract all 'p' tags (followed pubkeys) from the kind 3 event
|
||||
for _, tag := range event.Tags.GetAll([]byte("p")) {
|
||||
if len(tag.Value()) == 32 { // Valid pubkey length
|
||||
f.AddFollow(tag.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddFollow appends a pubkey to the in-memory follows list if not already present
|
||||
// and signals the syncer to refresh subscriptions.
|
||||
func (f *Follows) AddFollow(pub []byte) {
|
||||
if len(pub) == 0 {
|
||||
return
|
||||
}
|
||||
f.followsMx.Lock()
|
||||
defer f.followsMx.Unlock()
|
||||
for _, p := range f.follows {
|
||||
if bytes.Equal(p, pub) {
|
||||
return
|
||||
}
|
||||
}
|
||||
b := make([]byte, len(pub))
|
||||
copy(b, pub)
|
||||
f.follows = append(f.follows, b)
|
||||
log.I.F(
|
||||
"follows syncer: added new followed pubkey: %s",
|
||||
hex.EncodeToString(pub),
|
||||
)
|
||||
// notify syncer if initialized
|
||||
if f.updated != nil {
|
||||
select {
|
||||
case f.updated <- struct{}{}:
|
||||
default:
|
||||
// if channel is full or not yet listened to, ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
log.T.F("registering follows ACL")
|
||||
Registry.Register(new(Follows))
|
||||
|
||||
@@ -2,13 +2,71 @@ package acl
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/app/config"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
type None struct{}
|
||||
type None struct {
|
||||
cfg *config.C
|
||||
owners [][]byte
|
||||
admins [][]byte
|
||||
}
|
||||
|
||||
func (n None) Configure(cfg ...any) (err error) { return }
|
||||
func (n *None) Configure(cfg ...any) (err error) {
|
||||
for _, ca := range cfg {
|
||||
switch c := ca.(type) {
|
||||
case *config.C:
|
||||
n.cfg = c
|
||||
}
|
||||
}
|
||||
if n.cfg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
func (n None) GetAccessLevel(pub []byte, address string) (level string) {
|
||||
// Load owners
|
||||
for _, owner := range n.cfg.Owners {
|
||||
if len(owner) == 0 {
|
||||
continue
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = bech32encoding.NpubOrHexToPublicKeyBinary(owner); err != nil {
|
||||
continue
|
||||
}
|
||||
n.owners = append(n.owners, pk)
|
||||
}
|
||||
|
||||
// Load admins
|
||||
for _, admin := range n.cfg.Admins {
|
||||
if len(admin) == 0 {
|
||||
continue
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = bech32encoding.NpubOrHexToPublicKeyBinary(admin); err != nil {
|
||||
continue
|
||||
}
|
||||
n.admins = append(n.admins, pk)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (n *None) GetAccessLevel(pub []byte, address string) (level string) {
|
||||
// Check owners first
|
||||
for _, v := range n.owners {
|
||||
if utils.FastEqual(v, pub) {
|
||||
return "owner"
|
||||
}
|
||||
}
|
||||
|
||||
// Check admins
|
||||
for _, v := range n.admins {
|
||||
if utils.FastEqual(v, pub) {
|
||||
return "admin"
|
||||
}
|
||||
}
|
||||
|
||||
// Default to write for everyone else
|
||||
return "write"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
realy.lol/pkg/ec
|
||||
=====
|
||||
# realy.lol/pkg/ec
|
||||
|
||||
This is a full drop-in replacement for
|
||||
[github.com/btcsuite/btcd/btcec](https://github.com/btcsuite/btcd/tree/master/btcec)
|
||||
@@ -20,7 +19,7 @@ message signing with the extra test vectors present and passing.
|
||||
|
||||
The remainder of this document is from the original README.md.
|
||||
|
||||
------------------------------------------------------------------------------
|
||||
---
|
||||
|
||||
Package `ec` implements elliptic curve cryptography needed for working with
|
||||
Bitcoin. It is designed so that it may be used with the standard
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
chainhash
|
||||
=========
|
||||
# chainhash
|
||||
|
||||
[](http://copyfree.org)
|
||||
=======
|
||||
# [](http://copyfree.org)
|
||||
|
||||
chainhash provides a generic hash type and associated functions that allows the
|
||||
specific hash algorithm to be abstracted.
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
ecdsa
|
||||
=====
|
||||
# ecdsa
|
||||
|
||||
[](http://copyfree.org)
|
||||
[](https://pkg.go.dev/mleku.online/git/ec/secp/ecdsa)
|
||||
|
||||
@@ -14,45 +14,25 @@
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [0, 1, 2],
|
||||
"expected": "90539EEDE565F5D054F32CC0C220126889ED1E5D193BAF15AEF344FE59D4610C"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
2,
|
||||
1,
|
||||
0
|
||||
],
|
||||
"key_indices": [2, 1, 0],
|
||||
"expected": "6204DE8B083426DC6EAF9502D27024D53FC826BF7D2012148A0575435DF54B2B"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
0,
|
||||
0
|
||||
],
|
||||
"key_indices": [0, 0, 0],
|
||||
"expected": "B436E3BAD62B8CD409969A224731C193D051162D8C5AE8B109306127DA3AA935"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
1
|
||||
],
|
||||
"key_indices": [0, 0, 1, 1],
|
||||
"expected": "69BC22BFA5D106306E48A20679DE1D7389386124D07571D0D872686028C26A3E"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"key_indices": [0, 3],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
@@ -63,10 +43,7 @@
|
||||
"comment": "Invalid public key"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [0, 4],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
@@ -77,10 +54,7 @@
|
||||
"comment": "Public key exceeds field size"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
5,
|
||||
0
|
||||
],
|
||||
"key_indices": [5, 0],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"error": {
|
||||
@@ -91,16 +65,9 @@
|
||||
"comment": "First byte of public key is not 2 or 3"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
true
|
||||
],
|
||||
"key_indices": [0, 1],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [true],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The tweak must be less than n."
|
||||
@@ -108,15 +75,9 @@
|
||||
"comment": "Tweak is out of range"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
6
|
||||
],
|
||||
"tweak_indices": [
|
||||
1
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"key_indices": [6],
|
||||
"tweak_indices": [1],
|
||||
"is_xonly": [false],
|
||||
"error": {
|
||||
"type": "value",
|
||||
"message": "The result of tweaking cannot be infinity."
|
||||
|
||||
@@ -10,27 +10,18 @@
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"pnonce_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"pnonce_indices": [0, 1],
|
||||
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B024725377345BDE0E9C33AF3C43C0A29A9249F2F2956FA8CFEB55C8573D0262DC8"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [
|
||||
2,
|
||||
3
|
||||
],
|
||||
"pnonce_indices": [2, 3],
|
||||
"expected": "035FE1873B4F2967F52FEA4A06AD5A8ECCBE9D0FD73068012C894E2E87CCB5804B000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"comment": "Sum of second points encoded in the nonces is point at infinity which is serialized as 33 zero bytes"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"pnonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"pnonce_indices": [0, 4],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1,
|
||||
@@ -40,10 +31,7 @@
|
||||
"btcec_err": "invalid public key: unsupported format: 4"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [
|
||||
5,
|
||||
1
|
||||
],
|
||||
"pnonce_indices": [5, 1],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
@@ -53,10 +41,7 @@
|
||||
"btcec_err": "invalid public key: x coordinate 48c264cdd57d3c24d79990b0f865674eb62a0f9018277a95011b41bfc193b831 is not on the secp256k1 curve"
|
||||
},
|
||||
{
|
||||
"pnonce_indices": [
|
||||
6,
|
||||
1
|
||||
],
|
||||
"pnonce_indices": [6, 1],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 0,
|
||||
|
||||
@@ -37,4 +37,4 @@
|
||||
"expected": "890E83616A3BC4640AB9B6374F21C81FF89CDDDBAFAA7475AE2A102A92E3EDB29FD7E874E23342813A60D9646948242646B7951CA046B4B36D7D6078506D3C9402F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,114 +33,49 @@
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"aggnonce": "0341432722C5CD0268D829C702CF0D1CBCE57033EED201FD335191385227C3210C03D377F2D258B64AADC0E16F26462323D701D286046A2EA93365656AFD9875982B",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"nonce_indices": [0, 1],
|
||||
"key_indices": [0, 1],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"psig_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"psig_indices": [0, 1],
|
||||
"expected": "041DA22223CE65C92C9A0D6C2CAC828AAF1EEE56304FEC371DDF91EBB2B9EF0912F1038025857FEDEB3FF696F8B99FA4BB2C5812F6095A2E0004EC99CE18DE1E"
|
||||
},
|
||||
{
|
||||
"aggnonce": "0224AFD36C902084058B51B5D36676BBA4DC97C775873768E58822F87FE437D792028CB15929099EEE2F5DAE404CD39357591BA32E9AF4E162B8D3E7CB5EFE31CB20",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [0, 2],
|
||||
"key_indices": [0, 2],
|
||||
"tweak_indices": [],
|
||||
"is_xonly": [],
|
||||
"psig_indices": [
|
||||
2,
|
||||
3
|
||||
],
|
||||
"psig_indices": [2, 3],
|
||||
"expected": "1069B67EC3D2F3C7C08291ACCB17A9C9B8F2819A52EB5DF8726E17E7D6B52E9F01800260A7E9DAC450F4BE522DE4CE12BA91AEAF2B4279219EF74BE1D286ADD9"
|
||||
},
|
||||
{
|
||||
"aggnonce": "0208C5C438C710F4F96A61E9FF3C37758814B8C3AE12BFEA0ED2C87FF6954FF186020B1816EA104B4FCA2D304D733E0E19CEAD51303FF6420BFD222335CAA402916D",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
2
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"psig_indices": [
|
||||
4,
|
||||
5
|
||||
],
|
||||
"nonce_indices": [0, 3],
|
||||
"key_indices": [0, 2],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [false],
|
||||
"psig_indices": [4, 5],
|
||||
"expected": "5C558E1DCADE86DA0B2F02626A512E30A22CF5255CAEA7EE32C38E9A71A0E9148BA6C0E6EC7683B64220F0298696F1B878CD47B107B81F7188812D593971E0CC"
|
||||
},
|
||||
{
|
||||
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true
|
||||
],
|
||||
"psig_indices": [
|
||||
6,
|
||||
7
|
||||
],
|
||||
"nonce_indices": [0, 4],
|
||||
"key_indices": [0, 3],
|
||||
"tweak_indices": [0, 1, 2],
|
||||
"is_xonly": [true, false, true],
|
||||
"psig_indices": [6, 7],
|
||||
"expected": "839B08820B681DBA8DAF4CC7B104E8F2638F9388F8D7A555DC17B6E6971D7426CE07BF6AB01F1DB50E4E33719295F4094572B79868E440FB3DEFD3FAC1DB589E"
|
||||
}
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"aggnonce": "02B5AD07AFCD99B6D92CB433FBD2A28FDEB98EAE2EB09B6014EF0F8197CD58403302E8616910F9293CF692C49F351DB86B25E352901F0E237BAFDA11F1C1CEF29FFD",
|
||||
"nonce_indices": [
|
||||
0,
|
||||
4
|
||||
],
|
||||
"key_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true
|
||||
],
|
||||
"psig_indices": [
|
||||
7,
|
||||
8
|
||||
],
|
||||
"nonce_indices": [0, 4],
|
||||
"key_indices": [0, 3],
|
||||
"tweak_indices": [0, 1, 2],
|
||||
"is_xonly": [true, false, true],
|
||||
"psig_indices": [7, 8],
|
||||
"error": {
|
||||
"type": "invalid_contribution",
|
||||
"signer": 1
|
||||
@@ -148,4 +83,4 @@
|
||||
"comment": "Partial signature is invalid because it exceeds group size"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,62 +31,32 @@
|
||||
],
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"expected": "012ABBCB52B3016AC03AD82395A1A415C48B93DEF78718E62A7A90052FE224FB"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
0,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
0,
|
||||
2
|
||||
],
|
||||
"key_indices": [1, 0, 2],
|
||||
"nonce_indices": [1, 0, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 1,
|
||||
"expected": "9FF2F7AAA856150CC8819254218D3ADEEB0535269051897724F9DB3789513A52"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 2,
|
||||
"expected": "FA23C359F6FAC4E7796BB93BC9F0532A95468C539BA20FF86D7C76ED92227900"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
3
|
||||
],
|
||||
"key_indices": [0, 1],
|
||||
"nonce_indices": [0, 3],
|
||||
"aggnonce_index": 1,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
@@ -96,10 +66,7 @@
|
||||
],
|
||||
"sign_error_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
@@ -110,11 +77,7 @@
|
||||
"comment": "The signers pubkey is not in the list of pubkeys"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
0,
|
||||
3
|
||||
],
|
||||
"key_indices": [1, 0, 3],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
@@ -126,11 +89,7 @@
|
||||
"comment": "Signer 2 provided an invalid public key"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 2,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
@@ -142,11 +101,7 @@
|
||||
"comment": "Aggregate nonce is invalid due wrong tag, 0x04, in the first half"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 3,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
@@ -158,11 +113,7 @@
|
||||
"comment": "Aggregate nonce is invalid because the second half does not correspond to an X coordinate"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"aggnonce_index": 4,
|
||||
"msg_index": 0,
|
||||
"secnonce_index": 0,
|
||||
@@ -174,11 +125,7 @@
|
||||
"comment": "Aggregate nonce is invalid because second half exceeds field size"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [0, 1, 2],
|
||||
"aggnonce_index": 0,
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
@@ -193,48 +140,24 @@
|
||||
"verify_fail_test_cases": [
|
||||
{
|
||||
"sig": "97AC833ADCB1AFA42EBF9E0725616F3C9A0D5B614F6FE283CEAAA37A8FFAF406",
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"comment": "Wrong signature (which is equal to the negation of valid signature)"
|
||||
},
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 1,
|
||||
"comment": "Wrong signer"
|
||||
},
|
||||
{
|
||||
"sig": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141",
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"comment": "Signature exceeds group size"
|
||||
@@ -243,16 +166,8 @@
|
||||
"verify_error_test_cases": [
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
4,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [0, 1, 2],
|
||||
"nonce_indices": [4, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"error": {
|
||||
@@ -264,16 +179,8 @@
|
||||
},
|
||||
{
|
||||
"sig": "68537CC5234E505BD14061F8DA9E90C220A181855FD8BDB7F127BB12403B4D3B",
|
||||
"key_indices": [
|
||||
3,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"nonce_indices": [
|
||||
0,
|
||||
1,
|
||||
2
|
||||
],
|
||||
"key_indices": [3, 1, 2],
|
||||
"nonce_indices": [0, 1, 2],
|
||||
"msg_index": 0,
|
||||
"signer_index": 0,
|
||||
"error": {
|
||||
|
||||
@@ -22,120 +22,46 @@
|
||||
"msg": "F95466D086770E689964664219266FE5ED215C92AE20BAB5C9D79ADDDDF3C0CF",
|
||||
"valid_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
true
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [true],
|
||||
"signer_index": 2,
|
||||
"expected": "E28A5C66E61E178C2BA19DB77B6CF9F7E2F0F56C17918CD13135E60CC848FE91",
|
||||
"comment": "A single x-only tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0],
|
||||
"is_xonly": [false],
|
||||
"signer_index": 2,
|
||||
"expected": "38B0767798252F21BF5702C48028B095428320F73A4B14DB1E25DE58543D2D2D",
|
||||
"comment": "A single plain tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1
|
||||
],
|
||||
"is_xonly": [
|
||||
false,
|
||||
true
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1],
|
||||
"is_xonly": [false, true],
|
||||
"signer_index": 2,
|
||||
"expected": "408A0A21C4A0F5DACAF9646AD6EB6FECD7F7A11F03ED1F48DFFF2185BC2C2408",
|
||||
"comment": "A plain tweak followed by an x-only tweak"
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"is_xonly": [
|
||||
false,
|
||||
false,
|
||||
true,
|
||||
true
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1, 2, 3],
|
||||
"is_xonly": [false, false, true, true],
|
||||
"signer_index": 2,
|
||||
"expected": "45ABD206E61E3DF2EC9E264A6FEC8292141A633C28586388235541F9ADE75435",
|
||||
"comment": "Four tweaks: plain, plain, x-only, x-only."
|
||||
},
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
0,
|
||||
1,
|
||||
2,
|
||||
3
|
||||
],
|
||||
"is_xonly": [
|
||||
true,
|
||||
false,
|
||||
true,
|
||||
false
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [0, 1, 2, 3],
|
||||
"is_xonly": [true, false, true, false],
|
||||
"signer_index": 2,
|
||||
"expected": "B255FDCAC27B40C7CE7848E2D3B7BF5EA0ED756DA81565AC804CCCA3E1D5D239",
|
||||
"comment": "Four tweaks: x-only, plain, x-only, plain. If an implementation prohibits applying plain tweaks after x-only tweaks, it can skip this test vector or return an error."
|
||||
@@ -143,22 +69,10 @@
|
||||
],
|
||||
"error_test_cases": [
|
||||
{
|
||||
"key_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"nonce_indices": [
|
||||
1,
|
||||
2,
|
||||
0
|
||||
],
|
||||
"tweak_indices": [
|
||||
4
|
||||
],
|
||||
"is_xonly": [
|
||||
false
|
||||
],
|
||||
"key_indices": [1, 2, 0],
|
||||
"nonce_indices": [1, 2, 0],
|
||||
"tweak_indices": [4],
|
||||
"is_xonly": [false],
|
||||
"signer_index": 2,
|
||||
"error": {
|
||||
"type": "value",
|
||||
|
||||
@@ -25,16 +25,16 @@ An overview of the features provided by this package are as follows:
|
||||
|
||||
- Secret key generation, serialization, and parsing
|
||||
- Public key generation, serialization and parsing per ANSI X9.62-1998
|
||||
- Parses uncompressed, compressed, and hybrid public keys
|
||||
- Serializes uncompressed and compressed public keys
|
||||
- Parses uncompressed, compressed, and hybrid public keys
|
||||
- Serializes uncompressed and compressed public keys
|
||||
- Specialized types for performing optimized and constant time field operations
|
||||
- `FieldVal` type for working modulo the secp256k1 field prime
|
||||
- `ModNScalar` type for working modulo the secp256k1 group order
|
||||
- `FieldVal` type for working modulo the secp256k1 field prime
|
||||
- `ModNScalar` type for working modulo the secp256k1 group order
|
||||
- Elliptic curve operations in Jacobian projective coordinates
|
||||
- Point addition
|
||||
- Point doubling
|
||||
- Scalar multiplication with an arbitrary point
|
||||
- Scalar multiplication with the base point (group generator)
|
||||
- Point addition
|
||||
- Point doubling
|
||||
- Scalar multiplication with an arbitrary point
|
||||
- Scalar multiplication with the base point (group generator)
|
||||
- Point decompression from a given x coordinate
|
||||
- Nonce generation via RFC6979 with support for extra data and version
|
||||
information that can be used to prevent nonce reuse between signing algorithms
|
||||
|
||||
1
pkg/crypto/encryption/README.md
Normal file
1
pkg/crypto/encryption/README.md
Normal file
@@ -0,0 +1 @@
|
||||
Code copied from https://github.com/paulmillr/nip44/tree/e7aed61aaf77240ac10c325683eed14b22e7950f/go.
|
||||
3
pkg/crypto/encryption/doc.go
Normal file
3
pkg/crypto/encryption/doc.go
Normal file
@@ -0,0 +1,3 @@
|
||||
// Package encryption contains the message encryption schemes defined in NIP-04
|
||||
// and NIP-44, used for encrypting the content of nostr messages.
|
||||
package encryption
|
||||
88
pkg/crypto/encryption/nip4.go
Normal file
88
pkg/crypto/encryption/nip4.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package encryption
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"encoding/base64"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lukechampine.com/frand"
|
||||
)
|
||||
|
||||
// EncryptNip4 encrypts message with key using aes-256-cbc. key should be the shared secret generated by
|
||||
// ComputeSharedSecret.
|
||||
//
|
||||
// Returns: base64(encrypted_bytes) + "?iv=" + base64(initialization_vector).
|
||||
func EncryptNip4(msg, key []byte) (ct []byte, err error) {
|
||||
// block size is 16 bytes
|
||||
iv := make([]byte, 16)
|
||||
if _, err = frand.Read(iv); chk.E(err) {
|
||||
err = errorf.E("error creating initialization vector: %w", err)
|
||||
return
|
||||
}
|
||||
// automatically picks aes-256 based on key length (32 bytes)
|
||||
var block cipher.Block
|
||||
if block, err = aes.NewCipher(key); chk.E(err) {
|
||||
err = errorf.E("error creating block cipher: %w", err)
|
||||
return
|
||||
}
|
||||
mode := cipher.NewCBCEncrypter(block, iv)
|
||||
plaintext := []byte(msg)
|
||||
// add padding
|
||||
base := len(plaintext)
|
||||
// this will be a number between 1 and 16 (inclusive), never 0
|
||||
bs := block.BlockSize()
|
||||
padding := bs - base%bs
|
||||
// encode the padding in all the padding bytes themselves
|
||||
padText := bytes.Repeat([]byte{byte(padding)}, padding)
|
||||
paddedMsgBytes := append(plaintext, padText...)
|
||||
ciphertext := make([]byte, len(paddedMsgBytes))
|
||||
mode.CryptBlocks(ciphertext, paddedMsgBytes)
|
||||
return []byte(base64.StdEncoding.EncodeToString(ciphertext) + "?iv=" +
|
||||
base64.StdEncoding.EncodeToString(iv)), nil
|
||||
}
|
||||
|
||||
// DecryptNip4 decrypts a content string using the shared secret key. The inverse operation to message ->
|
||||
// EncryptNip4(message, key).
|
||||
func DecryptNip4(content, key []byte) (msg []byte, err error) {
|
||||
parts := bytes.Split(content, []byte("?iv="))
|
||||
if len(parts) < 2 {
|
||||
return nil, errorf.E(
|
||||
"error parsing encrypted message: no initialization vector",
|
||||
)
|
||||
}
|
||||
ciphertext := make([]byte, base64.StdEncoding.EncodedLen(len(parts[0])))
|
||||
if _, err = base64.StdEncoding.Decode(ciphertext, parts[0]); chk.E(err) {
|
||||
err = errorf.E("error decoding ciphertext from base64: %w", err)
|
||||
return
|
||||
}
|
||||
iv := make([]byte, base64.StdEncoding.EncodedLen(len(parts[1])))
|
||||
if _, err = base64.StdEncoding.Decode(iv, parts[1]); chk.E(err) {
|
||||
err = errorf.E("error decoding iv from base64: %w", err)
|
||||
return
|
||||
}
|
||||
var block cipher.Block
|
||||
if block, err = aes.NewCipher(key); chk.E(err) {
|
||||
err = errorf.E("error creating block cipher: %w", err)
|
||||
return
|
||||
}
|
||||
mode := cipher.NewCBCDecrypter(block, iv)
|
||||
msg = make([]byte, len(ciphertext))
|
||||
mode.CryptBlocks(msg, ciphertext)
|
||||
// remove padding
|
||||
var (
|
||||
plaintextLen = len(msg)
|
||||
)
|
||||
if plaintextLen > 0 {
|
||||
// the padding amount is encoded in the padding bytes themselves
|
||||
padding := int(msg[plaintextLen-1])
|
||||
if padding > plaintextLen {
|
||||
err = errorf.E("invalid padding amount: %d", padding)
|
||||
return
|
||||
}
|
||||
msg = msg[0 : plaintextLen-padding]
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
260
pkg/crypto/encryption/nip44.go
Normal file
260
pkg/crypto/encryption/nip44.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package encryption
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"golang.org/x/crypto/chacha20"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
"next.orly.dev/pkg/interfaces/signer"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
version byte = 2
|
||||
MinPlaintextSize = 0x0001 // 1b msg => padded to 32b
|
||||
MaxPlaintextSize = 0xffff // 65535 (64kb-1) => padded to 64kb
|
||||
)
|
||||
|
||||
type Opts struct {
|
||||
err error
|
||||
nonce []byte
|
||||
}
|
||||
|
||||
// Deprecated: use WithCustomNonce instead of WithCustomSalt, so the naming is less confusing
|
||||
var WithCustomSalt = WithCustomNonce
|
||||
|
||||
// WithCustomNonce enables using a custom nonce (salt) instead of using the
|
||||
// system crypto/rand entropy source.
|
||||
func WithCustomNonce(salt []byte) func(opts *Opts) {
|
||||
return func(opts *Opts) {
|
||||
if len(salt) != 32 {
|
||||
opts.err = errorf.E("salt must be 32 bytes, got %d", len(salt))
|
||||
}
|
||||
opts.nonce = salt
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt data using a provided symmetric conversation key using NIP-44
|
||||
// encryption (chacha20 cipher stream and sha256 HMAC).
|
||||
func Encrypt(
|
||||
plaintext, conversationKey []byte, applyOptions ...func(opts *Opts),
|
||||
) (
|
||||
cipherString []byte, err error,
|
||||
) {
|
||||
|
||||
var o Opts
|
||||
for _, apply := range applyOptions {
|
||||
apply(&o)
|
||||
}
|
||||
if chk.E(o.err) {
|
||||
err = o.err
|
||||
return
|
||||
}
|
||||
if o.nonce == nil {
|
||||
o.nonce = make([]byte, 32)
|
||||
if _, err = rand.Read(o.nonce); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
var enc, cc20nonce, auth []byte
|
||||
if enc, cc20nonce, auth, err = getKeys(
|
||||
conversationKey, o.nonce,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
plain := plaintext
|
||||
size := len(plain)
|
||||
if size < MinPlaintextSize || size > MaxPlaintextSize {
|
||||
err = errorf.E("plaintext should be between 1b and 64kB")
|
||||
return
|
||||
}
|
||||
padding := CalcPadding(size)
|
||||
padded := make([]byte, 2+padding)
|
||||
binary.BigEndian.PutUint16(padded, uint16(size))
|
||||
copy(padded[2:], plain)
|
||||
var cipher []byte
|
||||
if cipher, err = encrypt(enc, cc20nonce, padded); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var mac []byte
|
||||
if mac, err = sha256Hmac(auth, cipher, o.nonce); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ct := make([]byte, 0, 1+32+len(cipher)+32)
|
||||
ct = append(ct, version)
|
||||
ct = append(ct, o.nonce...)
|
||||
ct = append(ct, cipher...)
|
||||
ct = append(ct, mac...)
|
||||
cipherString = make([]byte, base64.StdEncoding.EncodedLen(len(ct)))
|
||||
base64.StdEncoding.Encode(cipherString, ct)
|
||||
return
|
||||
}
|
||||
|
||||
// Decrypt data that has been encoded using a provided symmetric conversation
|
||||
// key using NIP-44 encryption (chacha20 cipher stream and sha256 HMAC).
|
||||
func Decrypt(b64ciphertextWrapped, conversationKey []byte) (
|
||||
plaintext []byte,
|
||||
err error,
|
||||
) {
|
||||
cLen := len(b64ciphertextWrapped)
|
||||
if cLen < 132 || cLen > 87472 {
|
||||
err = errorf.E("invalid payload length: %d", cLen)
|
||||
return
|
||||
}
|
||||
if len(b64ciphertextWrapped) > 0 && b64ciphertextWrapped[0] == '#' {
|
||||
err = errorf.E("unknown version")
|
||||
return
|
||||
}
|
||||
var decoded []byte
|
||||
if decoded, err = base64.StdEncoding.DecodeString(string(b64ciphertextWrapped)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if decoded[0] != version {
|
||||
err = errorf.E("unknown version %d", decoded[0])
|
||||
return
|
||||
}
|
||||
dLen := len(decoded)
|
||||
if dLen < 99 || dLen > 65603 {
|
||||
err = errorf.E("invalid data length: %d", dLen)
|
||||
return
|
||||
}
|
||||
nonce, ciphertext, givenMac := decoded[1:33], decoded[33:dLen-32], decoded[dLen-32:]
|
||||
var enc, cc20nonce, auth []byte
|
||||
if enc, cc20nonce, auth, err = getKeys(conversationKey, nonce); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var expectedMac []byte
|
||||
if expectedMac, err = sha256Hmac(auth, ciphertext, nonce); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !utils.FastEqual(givenMac, expectedMac) {
|
||||
err = errorf.E("invalid hmac")
|
||||
return
|
||||
}
|
||||
var padded []byte
|
||||
if padded, err = encrypt(enc, cc20nonce, ciphertext); chk.E(err) {
|
||||
return
|
||||
}
|
||||
unpaddedLen := binary.BigEndian.Uint16(padded[0:2])
|
||||
if unpaddedLen < uint16(MinPlaintextSize) || unpaddedLen > uint16(MaxPlaintextSize) ||
|
||||
len(padded) != 2+CalcPadding(int(unpaddedLen)) {
|
||||
err = errorf.E("invalid padding")
|
||||
return
|
||||
}
|
||||
unpadded := padded[2:][:unpaddedLen]
|
||||
if len(unpadded) == 0 || len(unpadded) != int(unpaddedLen) {
|
||||
err = errorf.E("invalid padding")
|
||||
return
|
||||
}
|
||||
plaintext = unpadded
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateConversationKeyFromHex performs an ECDH key generation hashed with the nip-44-v2 using hkdf.
|
||||
func GenerateConversationKeyFromHex(pkh, skh string) (ck []byte, err error) {
|
||||
if skh >= "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" ||
|
||||
skh == "0000000000000000000000000000000000000000000000000000000000000000" {
|
||||
err = errorf.E(
|
||||
"invalid private key: x coordinate %s is not on the secp256k1 curve",
|
||||
skh,
|
||||
)
|
||||
return
|
||||
}
|
||||
var sign signer.I
|
||||
if sign, err = p256k.NewSecFromHex(skh); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = p256k.HexToBin(pkh); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var shared []byte
|
||||
if shared, err = sign.ECDH(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
|
||||
return
|
||||
}
|
||||
|
||||
func GenerateConversationKeyWithSigner(sign signer.I, pk []byte) (
|
||||
ck []byte, err error,
|
||||
) {
|
||||
var shared []byte
|
||||
if shared, err = sign.ECDH(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
|
||||
return
|
||||
}
|
||||
|
||||
func encrypt(key, nonce, message []byte) (dst []byte, err error) {
|
||||
var cipher *chacha20.Cipher
|
||||
if cipher, err = chacha20.NewUnauthenticatedCipher(key, nonce); chk.E(err) {
|
||||
return
|
||||
}
|
||||
dst = make([]byte, len(message))
|
||||
cipher.XORKeyStream(dst, message)
|
||||
return
|
||||
}
|
||||
|
||||
func sha256Hmac(key, ciphertext, nonce []byte) (h []byte, err error) {
|
||||
if len(nonce) != sha256.Size {
|
||||
err = errorf.E("nonce aad must be 32 bytes")
|
||||
return
|
||||
}
|
||||
hm := hmac.New(sha256.New, key)
|
||||
hm.Write(nonce)
|
||||
hm.Write(ciphertext)
|
||||
h = hm.Sum(nil)
|
||||
return
|
||||
}
|
||||
|
||||
func getKeys(conversationKey, nonce []byte) (
|
||||
enc, cc20nonce, auth []byte, err error,
|
||||
) {
|
||||
if len(conversationKey) != 32 {
|
||||
err = errorf.E("conversation key must be 32 bytes")
|
||||
return
|
||||
}
|
||||
if len(nonce) != 32 {
|
||||
err = errorf.E("nonce must be 32 bytes")
|
||||
return
|
||||
}
|
||||
r := hkdf.Expand(sha256.New, conversationKey, nonce)
|
||||
enc = make([]byte, 32)
|
||||
if _, err = io.ReadFull(r, enc); chk.E(err) {
|
||||
return
|
||||
}
|
||||
cc20nonce = make([]byte, 12)
|
||||
if _, err = io.ReadFull(r, cc20nonce); chk.E(err) {
|
||||
return
|
||||
}
|
||||
auth = make([]byte, 32)
|
||||
if _, err = io.ReadFull(r, auth); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CalcPadding creates padding for the message payload that is precisely a power
|
||||
// of two in order to reduce the chances of plaintext attack. This is plainly
|
||||
// retarded because it could blow out the message size a lot when just a random few
|
||||
// dozen bytes and a length prefix would achieve the same result.
|
||||
func CalcPadding(sLen int) (l int) {
|
||||
if sLen <= 32 {
|
||||
return 32
|
||||
}
|
||||
nextPower := 1 << int(math.Floor(math.Log2(float64(sLen-1)))+1)
|
||||
chunk := int(math.Max(32, float64(nextPower/8)))
|
||||
l = chunk * int(math.Floor(float64((sLen-1)/chunk))+1)
|
||||
return
|
||||
}
|
||||
1381
pkg/crypto/encryption/nip44_test.go
Normal file
1381
pkg/crypto/encryption/nip44_test.go
Normal file
File diff suppressed because it is too large
Load Diff
83
pkg/crypto/keys/keys.go
Normal file
83
pkg/crypto/keys/keys.go
Normal file
@@ -0,0 +1,83 @@
|
||||
// Package keys is a set of helpers for generating and converting public/secret
|
||||
// keys to hex and back to binary.
|
||||
package keys
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// GeneratePrivateKey - deprecated, use GenerateSecretKeyHex
|
||||
var GeneratePrivateKey = func() string { return GenerateSecretKeyHex() }
|
||||
|
||||
// GenerateSecretKey creates a new secret key and returns the bytes of the secret.
|
||||
func GenerateSecretKey() (skb []byte, err error) {
|
||||
signer := &p256k.Signer{}
|
||||
if err = signer.Generate(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
skb = signer.Sec()
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateSecretKeyHex generates a secret key and encodes the bytes as hex.
|
||||
func GenerateSecretKeyHex() (sks string) {
|
||||
skb, err := GenerateSecretKey()
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
return hex.Enc(skb)
|
||||
}
|
||||
|
||||
// GetPublicKeyHex generates a public key from a hex encoded secret key.
|
||||
func GetPublicKeyHex(sk string) (pk string, err error) {
|
||||
var b []byte
|
||||
if b, err = hex.Dec(sk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
signer := &p256k.Signer{}
|
||||
if err = signer.InitSec(b); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
return hex.Enc(signer.Pub()), nil
|
||||
}
|
||||
|
||||
// SecretBytesToPubKeyHex generates a public key from secret key bytes.
|
||||
func SecretBytesToPubKeyHex(skb []byte) (pk string, err error) {
|
||||
signer := &p256k.Signer{}
|
||||
if err = signer.InitSec(skb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return hex.Enc(signer.Pub()), nil
|
||||
}
|
||||
|
||||
// IsValid32ByteHex checks that a hex string is a valid 32 bytes lower case hex encoded value as
|
||||
// per nostr NIP-01 spec.
|
||||
func IsValid32ByteHex[V []byte | string](pk V) bool {
|
||||
if utils.FastEqual(bytes.ToLower([]byte(pk)), []byte(pk)) {
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
dec := make([]byte, 32)
|
||||
if _, err = hex.DecBytes(dec, []byte(pk)); chk.E(err) {
|
||||
}
|
||||
return len(dec) == 32
|
||||
}
|
||||
|
||||
// IsValidPublicKey checks that a hex encoded public key is a valid BIP-340 public key.
|
||||
func IsValidPublicKey[V []byte | string](pk V) bool {
|
||||
v, _ := hex.Dec(string(pk))
|
||||
_, err := schnorr.ParsePubKey(v)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// HexPubkeyToBytes decodes a pubkey from hex encoded string/bytes.
|
||||
func HexPubkeyToBytes[V []byte | string](hpk V) (pkb []byte, err error) {
|
||||
return hex.DecAppend(nil, []byte(hpk))
|
||||
}
|
||||
@@ -25,7 +25,7 @@ it
|
||||
|
||||
For ubuntu, you need these:
|
||||
|
||||
sudo apt -y install build-essential autoconf libtool
|
||||
sudo apt -y install build-essential autoconf libtool
|
||||
|
||||
For other linux distributions, the process is the same but the dependencies are
|
||||
likely different. The main thing is it requires make, gcc/++, autoconf and
|
||||
@@ -65,4 +65,4 @@ coordinate and this is incorrect for nostr. It will be enabled soon... for now
|
||||
it is done with the `btcec` fallback version. This is slower, however previous
|
||||
tests have shown that this ECDH library is fast enough to enable 8mb/s
|
||||
throughput per CPU thread when used to generate a distinct secret for TCP
|
||||
packets. The C library will likely raise this to 20mb/s or more.
|
||||
packets. The C library will likely raise this to 20mb/s or more.
|
||||
|
||||
@@ -95,9 +95,9 @@ Note that, because of the scheduling overhead, for small messages (< 1 MB) you
|
||||
will be better off using the regular SHA256 hashing (but those are typically not
|
||||
performance critical anyway). Some other tips to get the best performance:
|
||||
|
||||
* Have many go routines doing SHA256 calculations in parallel.
|
||||
* Try to Write() messages in multiples of 64 bytes.
|
||||
* Try to keep the overall length of messages to a roughly similar size ie. 5
|
||||
- Have many go routines doing SHA256 calculations in parallel.
|
||||
- Try to Write() messages in multiples of 64 bytes.
|
||||
- Try to keep the overall length of messages to a roughly similar size ie. 5
|
||||
MB (this way all 16 ‘lanes’ in the AVX512 computations are contributing as
|
||||
much as possible).
|
||||
|
||||
@@ -128,7 +128,7 @@ Below is the speed in MB/s for a single core (ranked fast to slow) for blocks
|
||||
larger than 1 MB.
|
||||
|
||||
| Processor | SIMD | Speed (MB/s) |
|
||||
|-----------------------------------|---------|-------------:|
|
||||
| --------------------------------- | ------- | -----------: |
|
||||
| 3.0 GHz Intel Xeon Platinum 8124M | AVX512 | 3498 |
|
||||
| 3.7 GHz AMD Ryzen 7 2700X | SHA Ext | 1979 |
|
||||
| 1.2 GHz ARM Cortex-A53 | ARM64 | 638 |
|
||||
@@ -160,18 +160,18 @@ Below you can see a small excerpt highlighting one of the rounds as is done for
|
||||
the SHA256 calculation process (for full code
|
||||
see [sha256block_arm64.s](https://github.com/minio/sha256-simd/blob/master/sha256block_arm64.s)).
|
||||
|
||||
```
|
||||
sha256h q2, q3, v9.4s
|
||||
sha256h2 q3, q4, v9.4s
|
||||
sha256su0 v5.4s, v6.4s
|
||||
rev32 v8.16b, v8.16b
|
||||
add v9.4s, v7.4s, v18.4s
|
||||
mov v4.16b, v2.16b
|
||||
sha256h q2, q3, v10.4s
|
||||
sha256h2 q3, q4, v10.4s
|
||||
sha256su0 v6.4s, v7.4s
|
||||
sha256su1 v5.4s, v7.4s, v8.4s
|
||||
```
|
||||
```
|
||||
sha256h q2, q3, v9.4s
|
||||
sha256h2 q3, q4, v9.4s
|
||||
sha256su0 v5.4s, v6.4s
|
||||
rev32 v8.16b, v8.16b
|
||||
add v9.4s, v7.4s, v18.4s
|
||||
mov v4.16b, v2.16b
|
||||
sha256h q2, q3, v10.4s
|
||||
sha256h2 q3, q4, v10.4s
|
||||
sha256su0 v6.4s, v7.4s
|
||||
sha256su1 v5.4s, v7.4s, v8.4s
|
||||
```
|
||||
|
||||
### Detailed benchmarks
|
||||
|
||||
|
||||
44
pkg/database/count.go
Normal file
44
pkg/database/count.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
)
|
||||
|
||||
// CountEvents mirrors the initial selection logic of QueryEvents but stops
|
||||
// once we have identified candidate event serials (id/pk/ts). It returns the
|
||||
// count of those serials. The `approx` flag is always false as requested.
|
||||
func (d *D) CountEvents(c context.Context, f *filter.F) (
|
||||
count int, approx bool, err error,
|
||||
) {
|
||||
approx = false
|
||||
if f == nil {
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
// If explicit Ids are provided, count how many of them resolve to serials.
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
var serials map[string]interface{}
|
||||
// Use type inference without importing extra packages by discarding the
|
||||
// concrete value type via a two-step assignment.
|
||||
if tmp, idErr := d.GetSerialsByIds(f.Ids); idErr != nil {
|
||||
return 0, false, idErr
|
||||
} else {
|
||||
// Reassign to a map with empty interface values to avoid referencing
|
||||
// the concrete Uint40 type here.
|
||||
serials = make(map[string]interface{}, len(tmp))
|
||||
for k := range tmp {
|
||||
serials[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
return len(serials), false, nil
|
||||
}
|
||||
|
||||
// Otherwise, query for candidate Id/Pubkey/Timestamp triplets and count them.
|
||||
if idPkTs, qErr := d.QueryForIds(c, f); qErr != nil {
|
||||
return 0, false, qErr
|
||||
} else {
|
||||
return len(idPkTs), false, nil
|
||||
}
|
||||
}
|
||||
@@ -52,8 +52,18 @@ func New(
|
||||
}
|
||||
|
||||
opts := badger.DefaultOptions(d.dataDir)
|
||||
opts.BlockCacheSize = int64(units.Gb)
|
||||
opts.BlockSize = units.Gb
|
||||
// Use sane defaults to avoid excessive memory usage during startup.
|
||||
// Badger's default BlockSize is small (e.g., 4KB). Overriding it to very large values
|
||||
// can cause massive allocations and OOM panics during deployments.
|
||||
// Set BlockCacheSize to a moderate value and keep BlockSize small.
|
||||
opts.BlockCacheSize = int64(256 * units.Mb) // 256 MB cache
|
||||
opts.BlockSize = 4 * units.Kb // 4 KB block size
|
||||
// Prevent huge allocations during table building and memtable flush.
|
||||
// Badger's TableBuilder buffer is sized by BaseTableSize; ensure it's small.
|
||||
opts.BaseTableSize = 64 * units.Mb // 64 MB per table (default ~2MB, increased for fewer files but safe)
|
||||
opts.MemTableSize = 64 * units.Mb // 64 MB memtable to match table size
|
||||
// Keep value log files to a moderate size as well
|
||||
opts.ValueLogFileSize = 256 * units.Mb // 256 MB value log files
|
||||
opts.CompactL0OnClose = true
|
||||
opts.LmaxCompaction = true
|
||||
opts.Compression = options.None
|
||||
|
||||
@@ -153,5 +153,35 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
||||
if err = appendIndexBytes(&idxs, kindPubkeyIndex); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Word token indexes (from content)
|
||||
if len(ev.Content) > 0 {
|
||||
for _, h := range TokenHashes(ev.Content) {
|
||||
w := new(Word)
|
||||
w.FromWord(h) // 8-byte truncated hash
|
||||
wIdx := indexes.WordEnc(w, ser)
|
||||
if err = appendIndexBytes(&idxs, wIdx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Extend full-text search to include all fields of all tags
|
||||
if ev.Tags != nil && ev.Tags.Len() > 0 {
|
||||
for _, t := range *ev.Tags {
|
||||
for _, field := range t.T { // include key and all values
|
||||
if len(field) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, h := range TokenHashes(field) {
|
||||
w := new(Word)
|
||||
w.FromWord(h)
|
||||
wIdx := indexes.WordEnc(w, ser)
|
||||
if err = appendIndexBytes(&idxs, wIdx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -113,6 +113,27 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Word search: if Search field is present, generate word index ranges
|
||||
if len(f.Search) > 0 {
|
||||
for _, h := range TokenHashes(f.Search) {
|
||||
w := new(types2.Word)
|
||||
w.FromWord(h)
|
||||
buf := new(bytes.Buffer)
|
||||
idx := indexes.WordEnc(w, nil)
|
||||
if err = idx.MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
b := buf.Bytes()
|
||||
end := make([]byte, len(b))
|
||||
copy(end, b)
|
||||
for i := 0; i < 5; i++ { // match any serial
|
||||
end = append(end, 0xff)
|
||||
}
|
||||
idxs = append(idxs, Range{b, end})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
caStart := new(types2.Uint64)
|
||||
caEnd := new(types2.Uint64)
|
||||
|
||||
|
||||
81
pkg/database/identity.go
Normal file
81
pkg/database/identity.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/keys"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
const relayIdentitySecretKey = "relay:identity:sk"
|
||||
|
||||
// GetRelayIdentitySecret returns the relay identity secret key bytes if present.
|
||||
// If the key is not found, returns (nil, badger.ErrKeyNotFound).
|
||||
func (d *D) GetRelayIdentitySecret() (skb []byte, err error) {
|
||||
err = d.DB.View(func(txn *badger.Txn) error {
|
||||
item, err := txn.Get([]byte(relayIdentitySecretKey))
|
||||
if errors.Is(err, badger.ErrKeyNotFound) {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return item.Value(func(val []byte) error {
|
||||
// value stored as hex string
|
||||
b, err := hex.Dec(string(val))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
skb = make([]byte, len(b))
|
||||
copy(skb, b)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// SetRelayIdentitySecret stores the relay identity secret key bytes (expects 32 bytes).
|
||||
func (d *D) SetRelayIdentitySecret(skb []byte) (err error) {
|
||||
if len(skb) != 32 {
|
||||
return fmt.Errorf("invalid secret key length: %d", len(skb))
|
||||
}
|
||||
val := []byte(hex.Enc(skb))
|
||||
return d.DB.Update(func(txn *badger.Txn) error {
|
||||
return txn.Set([]byte(relayIdentitySecretKey), val)
|
||||
})
|
||||
}
|
||||
|
||||
// GetOrCreateRelayIdentitySecret retrieves the existing relay identity secret
|
||||
// key or creates and stores a new one if none exists.
|
||||
func (d *D) GetOrCreateRelayIdentitySecret() (skb []byte, err error) {
|
||||
// Try get fast path
|
||||
if skb, err = d.GetRelayIdentitySecret(); err == nil && len(skb) == 32 {
|
||||
return skb, nil
|
||||
}
|
||||
if err != nil && !errors.Is(err, badger.ErrKeyNotFound) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create new key and store atomically
|
||||
var gen []byte
|
||||
if gen, err = keys.GenerateSecretKey(); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
if err = d.SetRelayIdentitySecret(gen); chk.E(err) {
|
||||
return nil, err
|
||||
}
|
||||
log.I.F("generated new relay identity key (pub=%s)", mustPub(gen))
|
||||
return gen, nil
|
||||
}
|
||||
|
||||
func mustPub(skb []byte) string {
|
||||
pk, err := keys.SecretBytesToPubKeyHex(skb)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return pk
|
||||
}
|
||||
@@ -69,6 +69,7 @@ const (
|
||||
TagPubkeyPrefix = I("tpc") // tag, pubkey, created at
|
||||
TagKindPubkeyPrefix = I("tkp") // tag, kind, pubkey, created at
|
||||
|
||||
WordPrefix = I("wrd") // word hash, serial
|
||||
ExpirationPrefix = I("exp") // timestamp of expiration
|
||||
VersionPrefix = I("ver") // database version number, for triggering reindexes when new keys are added (policy is add-only).
|
||||
)
|
||||
@@ -106,6 +107,8 @@ func Prefix(prf int) (i I) {
|
||||
return ExpirationPrefix
|
||||
case Version:
|
||||
return VersionPrefix
|
||||
case Word:
|
||||
return WordPrefix
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -147,6 +150,8 @@ func Identify(r io.Reader) (i int, err error) {
|
||||
|
||||
case ExpirationPrefix:
|
||||
i = Expiration
|
||||
case WordPrefix:
|
||||
i = Word
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -233,6 +238,21 @@ func FullIdPubkeyDec(
|
||||
return New(NewPrefix(), ser, fid, p, ca)
|
||||
}
|
||||
|
||||
// Word index for tokenized search terms
|
||||
//
|
||||
// 3 prefix|8 word-hash|5 serial
|
||||
var Word = next()
|
||||
|
||||
func WordVars() (w *types.Word, ser *types.Uint40) {
|
||||
return new(types.Word), new(types.Uint40)
|
||||
}
|
||||
func WordEnc(w *types.Word, ser *types.Uint40) (enc *T) {
|
||||
return New(NewPrefix(Word), w, ser)
|
||||
}
|
||||
func WordDec(w *types.Word, ser *types.Uint40) (enc *T) {
|
||||
return New(NewPrefix(), w, ser)
|
||||
}
|
||||
|
||||
// CreatedAt is an index that allows search for the timestamp on the event.
|
||||
//
|
||||
// 3 prefix|8 timestamp|5 serial
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
currentVersion uint32 = 1
|
||||
currentVersion uint32 = 2
|
||||
)
|
||||
|
||||
func (d *D) RunMigrations() {
|
||||
@@ -56,22 +56,8 @@ func (d *D) RunMigrations() {
|
||||
}
|
||||
if dbVersion == 0 {
|
||||
log.D.F("no version tag found, creating...")
|
||||
// write the version tag now
|
||||
if err = d.Update(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
buf := new(bytes.Buffer)
|
||||
vv := new(types.Uint32)
|
||||
vv.Set(currentVersion)
|
||||
log.I.S(vv)
|
||||
if err = indexes.VersionEnc(vv).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = txn.Set(buf.Bytes(), nil); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
// write the version tag now (ensure any old tags are removed first)
|
||||
if err = d.writeVersionTag(currentVersion); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -79,7 +65,136 @@ func (d *D) RunMigrations() {
|
||||
log.I.F("migrating to version 1...")
|
||||
// the first migration is expiration tags
|
||||
d.UpdateExpirationTags()
|
||||
// bump to version 1
|
||||
_ = d.writeVersionTag(1)
|
||||
}
|
||||
if dbVersion < 2 {
|
||||
log.I.F("migrating to version 2...")
|
||||
// backfill word indexes
|
||||
d.UpdateWordIndexes()
|
||||
// bump to version 2
|
||||
_ = d.writeVersionTag(2)
|
||||
}
|
||||
}
|
||||
|
||||
// writeVersionTag writes a new version tag key to the database (no value)
|
||||
func (d *D) writeVersionTag(ver uint32) (err error) {
|
||||
return d.Update(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
// delete any existing version keys first (there should only be one, but be safe)
|
||||
verPrf := new(bytes.Buffer)
|
||||
if _, err = indexes.VersionPrefix.Write(verPrf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: verPrf.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
if err = txn.Delete(key); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// now write the new version key
|
||||
buf := new(bytes.Buffer)
|
||||
vv := new(types.Uint32)
|
||||
vv.Set(ver)
|
||||
if err = indexes.VersionEnc(vv).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return txn.Set(buf.Bytes(), nil)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *D) UpdateWordIndexes() {
|
||||
log.T.F("updating word indexes...")
|
||||
var err error
|
||||
var wordIndexes [][]byte
|
||||
// iterate all events and generate word index keys from content and tags
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
prf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(nil).MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prf.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
var val []byte
|
||||
if val, err = item.ValueCopy(nil); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// decode the event
|
||||
ev := new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(val)); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// log.I.F("updating word indexes for event: %s", ev.Serialize())
|
||||
// read serial from key
|
||||
key := item.Key()
|
||||
ser := indexes.EventVars()
|
||||
if err = indexes.EventDec(ser).UnmarshalRead(bytes.NewBuffer(key)); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// collect unique word hashes for this event
|
||||
seen := make(map[string]struct{})
|
||||
// from content
|
||||
if len(ev.Content) > 0 {
|
||||
for _, h := range TokenHashes(ev.Content) {
|
||||
seen[string(h)] = struct{}{}
|
||||
}
|
||||
}
|
||||
// from all tag fields (key and values)
|
||||
if ev.Tags != nil && ev.Tags.Len() > 0 {
|
||||
for _, t := range *ev.Tags {
|
||||
for _, field := range t.T {
|
||||
if len(field) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, h := range TokenHashes(field) {
|
||||
seen[string(h)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// build keys
|
||||
for k := range seen {
|
||||
w := new(types.Word)
|
||||
w.FromWord([]byte(k))
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.WordEnc(
|
||||
w, ser,
|
||||
).MarshalWrite(buf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
wordIndexes = append(wordIndexes, buf.Bytes())
|
||||
}
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// sort the indexes for ordered writes
|
||||
sort.Slice(
|
||||
wordIndexes, func(i, j int) bool {
|
||||
return bytes.Compare(
|
||||
wordIndexes[i], wordIndexes[j],
|
||||
) < 0
|
||||
},
|
||||
)
|
||||
// write in a batch
|
||||
batch := d.NewWriteBatch()
|
||||
for _, v := range wordIndexes {
|
||||
if err = batch.Set(v, nil); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
_ = batch.Flush()
|
||||
log.T.F("finished updating word indexes...")
|
||||
}
|
||||
|
||||
func (d *D) UpdateExpirationTags() {
|
||||
|
||||
194
pkg/database/query-events-search_test.go
Normal file
194
pkg/database/query-events-search_test.go
Normal file
@@ -0,0 +1,194 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/timestamp"
|
||||
)
|
||||
|
||||
// helper to create a fresh DB
|
||||
func newTestDB(t *testing.T) (*D, context.Context, context.CancelFunc, string) {
|
||||
t.Helper()
|
||||
tempDir, err := os.MkdirTemp("", "search-db-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
db, err := New(ctx, cancel, tempDir, "error")
|
||||
if err != nil {
|
||||
cancel()
|
||||
os.RemoveAll(tempDir)
|
||||
t.Fatalf("Failed to init DB: %v", err)
|
||||
}
|
||||
return db, ctx, cancel, tempDir
|
||||
}
|
||||
|
||||
// TestQueryEventsBySearchTerms creates a small set of events with content and tags,
|
||||
// saves them, then queries using filter.Search to ensure the word index works.
|
||||
func TestQueryEventsBySearchTerms(t *testing.T) {
|
||||
db, ctx, cancel, tempDir := newTestDB(t)
|
||||
defer func() {
|
||||
// cancel context first to stop background routines cleanly
|
||||
cancel()
|
||||
db.Close()
|
||||
os.RemoveAll(tempDir)
|
||||
}()
|
||||
|
||||
// signer for all events
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.Generate(); chk.E(err) {
|
||||
t.Fatalf("signer generate: %v", err)
|
||||
}
|
||||
|
||||
now := timestamp.Now().V
|
||||
|
||||
// Events to cover tokenizer rules:
|
||||
// - regular words
|
||||
// - URLs ignored
|
||||
// - 64-char hex ignored
|
||||
// - nostr: URIs ignored
|
||||
// - #[n] mentions ignored
|
||||
// - tag fields included in search
|
||||
|
||||
// 1. Contains words: "alpha beta", plus URL and hex (ignored)
|
||||
ev1 := event.New()
|
||||
ev1.Kind = kind.TextNote.K
|
||||
ev1.Pubkey = sign.Pub()
|
||||
ev1.CreatedAt = now - 5
|
||||
ev1.Content = []byte("Alpha beta visit https://example.com deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef")
|
||||
ev1.Tags = tag.NewS()
|
||||
ev1.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev1); err != nil {
|
||||
t.Fatalf("save ev1: %v", err)
|
||||
}
|
||||
|
||||
// 2. Contains overlap word "beta" and unique "gamma" and nostr: URI ignored
|
||||
ev2 := event.New()
|
||||
ev2.Kind = kind.TextNote.K
|
||||
ev2.Pubkey = sign.Pub()
|
||||
ev2.CreatedAt = now - 4
|
||||
ev2.Content = []byte("beta and GAMMA with nostr:nevent1qqqqq")
|
||||
ev2.Tags = tag.NewS()
|
||||
ev2.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev2); err != nil {
|
||||
t.Fatalf("save ev2: %v", err)
|
||||
}
|
||||
|
||||
// 3. Contains only a URL (should not create word tokens) and mention #[1] (ignored)
|
||||
ev3 := event.New()
|
||||
ev3.Kind = kind.TextNote.K
|
||||
ev3.Pubkey = sign.Pub()
|
||||
ev3.CreatedAt = now - 3
|
||||
ev3.Content = []byte("see www.example.org #[1]")
|
||||
ev3.Tags = tag.NewS()
|
||||
ev3.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev3); err != nil {
|
||||
t.Fatalf("save ev3: %v", err)
|
||||
}
|
||||
|
||||
// 4. No content words, but tag value has searchable words: "delta epsilon"
|
||||
ev4 := event.New()
|
||||
ev4.Kind = kind.TextNote.K
|
||||
ev4.Pubkey = sign.Pub()
|
||||
ev4.CreatedAt = now - 2
|
||||
ev4.Content = []byte("")
|
||||
ev4.Tags = tag.NewS()
|
||||
*ev4.Tags = append(*ev4.Tags, tag.NewFromAny("t", "delta epsilon"))
|
||||
ev4.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev4); err != nil {
|
||||
t.Fatalf("save ev4: %v", err)
|
||||
}
|
||||
|
||||
// 5. Another event with both content and tag tokens for ordering checks
|
||||
ev5 := event.New()
|
||||
ev5.Kind = kind.TextNote.K
|
||||
ev5.Pubkey = sign.Pub()
|
||||
ev5.CreatedAt = now - 1
|
||||
ev5.Content = []byte("alpha DELTA mixed-case and link http://foo.bar")
|
||||
ev5.Tags = tag.NewS()
|
||||
*ev5.Tags = append(*ev5.Tags, tag.NewFromAny("t", "zeta"))
|
||||
ev5.Sign(sign)
|
||||
if _, _, err := db.SaveEvent(ctx, ev5); err != nil {
|
||||
t.Fatalf("save ev5: %v", err)
|
||||
}
|
||||
|
||||
// Small sleep to ensure created_at ordering is the only factor
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
|
||||
// Helper to run a search and return IDs
|
||||
run := func(q string) ([]*event.E, error) {
|
||||
f := &filter.F{Search: []byte(q)}
|
||||
return db.QueryEvents(ctx, f)
|
||||
}
|
||||
|
||||
// Single-term search: alpha -> should match ev1 and ev5 ordered by created_at desc (ev5 newer)
|
||||
if evs, err := run("alpha"); err != nil {
|
||||
t.Fatalf("search alpha: %v", err)
|
||||
} else {
|
||||
if len(evs) != 2 {
|
||||
t.Fatalf("alpha expected 2 results, got %d", len(evs))
|
||||
}
|
||||
if !(evs[0].CreatedAt >= evs[1].CreatedAt) {
|
||||
t.Fatalf("results not ordered by created_at desc")
|
||||
}
|
||||
}
|
||||
|
||||
// Overlap term beta -> ev1 and ev2
|
||||
if evs, err := run("beta"); err != nil {
|
||||
t.Fatalf("search beta: %v", err)
|
||||
} else if len(evs) != 2 {
|
||||
t.Fatalf("beta expected 2 results, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Unique term gamma -> only ev2
|
||||
if evs, err := run("gamma"); err != nil {
|
||||
t.Fatalf("search gamma: %v", err)
|
||||
} else if len(evs) != 1 {
|
||||
t.Fatalf("gamma expected 1 result, got %d", len(evs))
|
||||
}
|
||||
|
||||
// URL terms should be ignored: example -> appears only as URL in ev1/ev3/ev5; tokenizer ignores URLs so expect 0
|
||||
if evs, err := run("example"); err != nil {
|
||||
t.Fatalf("search example: %v", err)
|
||||
} else if len(evs) != 0 {
|
||||
t.Fatalf("example expected 0 results (URL tokens ignored), got %d", len(evs))
|
||||
}
|
||||
|
||||
// Tag words searchable: delta should match ev4 and ev5 (delta in tag for ev4, in content for ev5)
|
||||
if evs, err := run("delta"); err != nil {
|
||||
t.Fatalf("search delta: %v", err)
|
||||
} else if len(evs) != 2 {
|
||||
t.Fatalf("delta expected 2 results, got %d", len(evs))
|
||||
}
|
||||
|
||||
// Very short token ignored: single-letter should yield 0
|
||||
if evs, err := run("a"); err != nil {
|
||||
t.Fatalf("search short token: %v", err)
|
||||
} else if len(evs) != 0 {
|
||||
t.Fatalf("single-letter expected 0 results, got %d", len(evs))
|
||||
}
|
||||
|
||||
// 64-char hex should be ignored
|
||||
hex64 := "0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"
|
||||
if evs, err := run(hex64); err != nil {
|
||||
t.Fatalf("search hex64: %v", err)
|
||||
} else if len(evs) != 0 {
|
||||
t.Fatalf("hex64 expected 0 results, got %d", len(evs))
|
||||
}
|
||||
|
||||
// nostr: scheme ignored
|
||||
if evs, err := run("nostr:nevent1qqqqq"); err != nil {
|
||||
t.Fatalf("search nostr: %v", err)
|
||||
} else if len(evs) != 0 {
|
||||
t.Fatalf("nostr: expected 0 results, got %d", len(evs))
|
||||
}
|
||||
}
|
||||
@@ -173,10 +173,10 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
|
||||
}
|
||||
}
|
||||
if ev.CreatedAt < maxTs {
|
||||
// err = fmt.Errorf(
|
||||
// "blocked: was deleted by address %s: event is older than the delete: event: %d delete: %d",
|
||||
// at, ev.CreatedAt, maxTs,
|
||||
// )
|
||||
err = errorf.E(
|
||||
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
|
||||
ev.ID, at, ev.CreatedAt, maxTs,
|
||||
)
|
||||
return
|
||||
}
|
||||
return
|
||||
@@ -203,22 +203,14 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
|
||||
return
|
||||
}
|
||||
if len(s) > 0 {
|
||||
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
|
||||
// regardless of timestamp, since it's a specific deletion of this exact event
|
||||
// err = errorf.E(
|
||||
// "blocked: was deleted by ID and cannot be resubmitted",
|
||||
// // ev.ID,
|
||||
// )
|
||||
// Any e-tag deletion found means the exact event was deleted and cannot be resubmitted
|
||||
err = errorf.E("blocked: %0x has been deleted", ev.ID)
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(sers) > 0 {
|
||||
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
|
||||
// regardless of timestamp, since it's a specific deletion of this exact event
|
||||
// err = errorf.E(
|
||||
// "blocked: was deleted by ID and cannot be resubmitted",
|
||||
// // ev.ID,
|
||||
// )
|
||||
// Any e-tag deletion found means the exact event was deleted and cannot be resubmitted
|
||||
err = errorf.E("blocked: %0x has been deleted", ev.ID)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -7,13 +7,16 @@ import (
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/interfaces/store"
|
||||
)
|
||||
|
||||
// QueryForIds retrieves a list of IdPkTs based on the provided filter.
|
||||
// It supports filtering by ranges and tags but disallows filtering by Ids.
|
||||
// Results are sorted by timestamp in reverse chronological order.
|
||||
// Results are sorted by timestamp in reverse chronological order by default.
|
||||
// When a search query is present, results are ranked by a 50/50 blend of
|
||||
// match count (how many distinct search terms matched) and recency.
|
||||
// Returns an error if the filter contains Ids or if any operation fails.
|
||||
func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
idPkTs []*store.IdPkTs, err error,
|
||||
@@ -29,6 +32,9 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
}
|
||||
var results []*store.IdPkTs
|
||||
var founds []*types.Uint40
|
||||
// When searching, we want to count how many index ranges (search terms)
|
||||
// matched each note. We'll track counts by serial.
|
||||
counts := make(map[uint64]int)
|
||||
for _, idx := range idxs {
|
||||
if founds, err = d.GetSerialsByRange(idx); chk.E(err) {
|
||||
return
|
||||
@@ -37,6 +43,12 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
if tmp, err = d.GetFullIdPubkeyBySerials(founds); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// If this query is driven by Search terms, increment count per serial
|
||||
if len(f.Search) > 0 {
|
||||
for _, v := range tmp {
|
||||
counts[v.Ser]++
|
||||
}
|
||||
}
|
||||
results = append(results, tmp...)
|
||||
}
|
||||
// deduplicate in case this somehow happened (such as two or more
|
||||
@@ -48,12 +60,109 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
|
||||
idPkTs = append(idPkTs, idpk)
|
||||
}
|
||||
}
|
||||
// sort results by timestamp in reverse chronological order
|
||||
sort.Slice(
|
||||
idPkTs, func(i, j int) bool {
|
||||
return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
},
|
||||
)
|
||||
|
||||
// If search is combined with Authors/Kinds/Tags, require events to match ALL of those present fields in addition to the word match.
|
||||
if len(f.Search) > 0 && ((f.Authors != nil && f.Authors.Len() > 0) || (f.Kinds != nil && f.Kinds.Len() > 0) || (f.Tags != nil && f.Tags.Len() > 0)) {
|
||||
// Build serial list for fetching full events
|
||||
serials := make([]*types.Uint40, 0, len(idPkTs))
|
||||
for _, v := range idPkTs {
|
||||
s := new(types.Uint40)
|
||||
s.Set(v.Ser)
|
||||
serials = append(serials, s)
|
||||
}
|
||||
var evs map[uint64]*event.E
|
||||
if evs, err = d.FetchEventsBySerials(serials); chk.E(err) {
|
||||
return
|
||||
}
|
||||
filtered := make([]*store.IdPkTs, 0, len(idPkTs))
|
||||
for _, v := range idPkTs {
|
||||
ev, ok := evs[v.Ser]
|
||||
if !ok || ev == nil {
|
||||
continue
|
||||
}
|
||||
matchesAll := true
|
||||
if f.Authors != nil && f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) {
|
||||
matchesAll = false
|
||||
}
|
||||
if matchesAll && f.Kinds != nil && f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) {
|
||||
matchesAll = false
|
||||
}
|
||||
if matchesAll && f.Tags != nil && f.Tags.Len() > 0 {
|
||||
// Require the event to satisfy all tag filters as in MatchesIgnoringTimestampConstraints
|
||||
tagOK := true
|
||||
for _, t := range *f.Tags {
|
||||
if t.Len() < 2 {
|
||||
continue
|
||||
}
|
||||
key := t.Key()
|
||||
values := t.T[1:]
|
||||
if !ev.Tags.ContainsAny(key, values) {
|
||||
tagOK = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if !tagOK {
|
||||
matchesAll = false
|
||||
}
|
||||
}
|
||||
if matchesAll {
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
}
|
||||
idPkTs = filtered
|
||||
}
|
||||
|
||||
if len(f.Search) == 0 {
|
||||
// No search query: sort by timestamp in reverse chronological order
|
||||
sort.Slice(
|
||||
idPkTs, func(i, j int) bool {
|
||||
return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
},
|
||||
)
|
||||
} else {
|
||||
// Search query present: blend match count relevance with recency (50/50)
|
||||
// Normalize both match count and timestamp to [0,1] and compute score.
|
||||
var maxCount int
|
||||
var minTs, maxTs int64
|
||||
if len(idPkTs) > 0 {
|
||||
minTs, maxTs = idPkTs[0].Ts, idPkTs[0].Ts
|
||||
}
|
||||
for _, v := range idPkTs {
|
||||
if c := counts[v.Ser]; c > maxCount {
|
||||
maxCount = c
|
||||
}
|
||||
if v.Ts < minTs {
|
||||
minTs = v.Ts
|
||||
}
|
||||
if v.Ts > maxTs {
|
||||
maxTs = v.Ts
|
||||
}
|
||||
}
|
||||
// Precompute denominator to avoid div-by-zero
|
||||
tsSpan := maxTs - minTs
|
||||
if tsSpan <= 0 {
|
||||
tsSpan = 1
|
||||
}
|
||||
if maxCount <= 0 {
|
||||
maxCount = 1
|
||||
}
|
||||
sort.Slice(
|
||||
idPkTs, func(i, j int) bool {
|
||||
ci := float64(counts[idPkTs[i].Ser]) / float64(maxCount)
|
||||
cj := float64(counts[idPkTs[j].Ser]) / float64(maxCount)
|
||||
ai := float64(idPkTs[i].Ts-minTs) / float64(tsSpan)
|
||||
aj := float64(idPkTs[j].Ts-minTs) / float64(tsSpan)
|
||||
si := 0.5*ci + 0.5*ai
|
||||
sj := 0.5*cj + 0.5*aj
|
||||
if si == sj {
|
||||
// tie-break by recency
|
||||
return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
}
|
||||
return si > sj
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
if f.Limit != nil && len(idPkTs) > int(*f.Limit) {
|
||||
idPkTs = idPkTs[:*f.Limit]
|
||||
}
|
||||
|
||||
@@ -9,14 +9,23 @@ import (
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrOlderThanExisting is returned when a candidate event is older than an existing replaceable/addressable event.
|
||||
ErrOlderThanExisting = errors.New("older than existing event")
|
||||
// ErrMissingDTag is returned when a parameterized replaceable event lacks the required 'd' tag.
|
||||
ErrMissingDTag = errors.New("event is missing a d tag identifier")
|
||||
)
|
||||
|
||||
func (d *D) GetSerialsFromFilter(f *filter.F) (
|
||||
sers types.Uint40s, err error,
|
||||
) {
|
||||
@@ -34,6 +43,65 @@ func (d *D) GetSerialsFromFilter(f *filter.F) (
|
||||
return
|
||||
}
|
||||
|
||||
// WouldReplaceEvent checks if the provided event would replace existing events
|
||||
// based on Nostr's replaceable or parameterized replaceable semantics. It
|
||||
// returns true along with the serials of events that should be replaced if the
|
||||
// candidate is newer-or-equal. If an existing event is newer, it returns
|
||||
// (false, serials, ErrOlderThanExisting). If no conflicts exist, it returns
|
||||
// (false, nil, nil).
|
||||
func (d *D) WouldReplaceEvent(ev *event.E) (bool, types.Uint40s, error) {
|
||||
// Only relevant for replaceable or parameterized replaceable kinds
|
||||
if !(kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind)) {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
var f *filter.F
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
f = &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.New(ev.Kind)),
|
||||
}
|
||||
} else {
|
||||
// parameterized replaceable requires 'd' tag
|
||||
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||
if dTag == nil {
|
||||
return false, nil, ErrMissingDTag
|
||||
}
|
||||
f = &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.New(ev.Kind)),
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("d", dTag.Value()),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
sers, err := d.GetSerialsFromFilter(f)
|
||||
if chk.E(err) {
|
||||
return false, nil, err
|
||||
}
|
||||
if len(sers) == 0 {
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
// Determine if any existing event is newer than the candidate
|
||||
shouldReplace := true
|
||||
for _, s := range sers {
|
||||
oldEv, ferr := d.FetchEventBySerial(s)
|
||||
if chk.E(ferr) {
|
||||
continue
|
||||
}
|
||||
if ev.CreatedAt < oldEv.CreatedAt {
|
||||
shouldReplace = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldReplace {
|
||||
return true, sers, nil
|
||||
}
|
||||
return false, sers, ErrOlderThanExisting
|
||||
}
|
||||
|
||||
// SaveEvent saves an event to the database, generating all the necessary indexes.
|
||||
func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
|
||||
if ev == nil {
|
||||
@@ -66,117 +134,37 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
|
||||
err = fmt.Errorf("blocked: %s", err.Error())
|
||||
return
|
||||
}
|
||||
// check for replacement
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
// find the events and check timestamps before deleting
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.New(ev.Kind)),
|
||||
}
|
||||
// check for replacement (separated check vs deletion)
|
||||
if kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind) {
|
||||
var wouldReplace bool
|
||||
var sers types.Uint40s
|
||||
if sers, err = d.GetSerialsFromFilter(f); chk.E(err) {
|
||||
var werr error
|
||||
if wouldReplace, sers, werr = d.WouldReplaceEvent(ev); werr != nil {
|
||||
if errors.Is(werr, ErrOlderThanExisting) {
|
||||
if kind.IsReplaceable(ev.Kind) {
|
||||
err = errors.New("blocked: event is older than existing replaceable event")
|
||||
} else {
|
||||
err = errors.New("blocked: event is older than existing addressable event")
|
||||
}
|
||||
return
|
||||
}
|
||||
if errors.Is(werr, ErrMissingDTag) {
|
||||
// keep behavior consistent with previous implementation
|
||||
err = ErrMissingDTag
|
||||
return
|
||||
}
|
||||
// any other error
|
||||
return
|
||||
}
|
||||
// if found, check timestamps before deleting
|
||||
if len(sers) > 0 {
|
||||
var shouldReplace bool = true
|
||||
if wouldReplace {
|
||||
for _, s := range sers {
|
||||
var oldEv *event.E
|
||||
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Only replace if the new event is newer or same timestamp
|
||||
if ev.CreatedAt < oldEv.CreatedAt {
|
||||
// log.I.F(
|
||||
// "SaveEvent: rejecting older replaceable event ID=%s (created_at=%d) - existing event ID=%s (created_at=%d)",
|
||||
// hex.Enc(ev.ID), ev.CreatedAt, hex.Enc(oldEv.ID),
|
||||
// oldEv.CreatedAt,
|
||||
// )
|
||||
shouldReplace = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldReplace {
|
||||
for _, s := range sers {
|
||||
var oldEv *event.E
|
||||
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// log.I.F(
|
||||
// "SaveEvent: replacing older replaceable event ID=%s (created_at=%d) with newer event ID=%s (created_at=%d)",
|
||||
// hex.Enc(oldEv.ID), oldEv.CreatedAt, hex.Enc(ev.ID),
|
||||
// ev.CreatedAt,
|
||||
// )
|
||||
if err = d.DeleteEventBySerial(
|
||||
c, s, oldEv,
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Don't save the older event - return an error
|
||||
err = errors.New("blocked: event is older than existing replaceable event")
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if kind.IsParameterizedReplaceable(ev.Kind) {
|
||||
// find the events and check timestamps before deleting
|
||||
dTag := ev.Tags.GetFirst([]byte("d"))
|
||||
if dTag == nil {
|
||||
err = errors.New("event is missing a d tag identifier")
|
||||
return
|
||||
}
|
||||
f := &filter.F{
|
||||
Authors: tag.NewFromBytesSlice(ev.Pubkey),
|
||||
Kinds: kind.NewS(kind.New(ev.Kind)),
|
||||
Tags: tag.NewS(
|
||||
tag.NewFromAny("d", dTag.Value()),
|
||||
),
|
||||
}
|
||||
var sers types.Uint40s
|
||||
if sers, err = d.GetSerialsFromFilter(f); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// if found, check timestamps before deleting
|
||||
if len(sers) > 0 {
|
||||
var shouldReplace bool = true
|
||||
for _, s := range sers {
|
||||
var oldEv *event.E
|
||||
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
|
||||
if err = d.DeleteEventBySerial(c, s, oldEv); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Only replace if the new event is newer or same timestamp
|
||||
if ev.CreatedAt < oldEv.CreatedAt {
|
||||
// log.I.F(
|
||||
// "SaveEvent: rejecting older addressable event ID=%s (created_at=%d) - existing event ID=%s (created_at=%d)",
|
||||
// hex.Enc(ev.ID), ev.CreatedAt, hex.Enc(oldEv.ID),
|
||||
// oldEv.CreatedAt,
|
||||
// )
|
||||
shouldReplace = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if shouldReplace {
|
||||
for _, s := range sers {
|
||||
var oldEv *event.E
|
||||
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// log.I.F(
|
||||
// "SaveEvent: replacing older addressable event ID=%s (created_at=%d) with newer event ID=%s (created_at=%d)",
|
||||
// hex.Enc(oldEv.ID), oldEv.CreatedAt, hex.Enc(ev.ID),
|
||||
// ev.CreatedAt,
|
||||
// )
|
||||
if err = d.DeleteEventBySerial(
|
||||
c, s, oldEv,
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Don't save the older event - return an error
|
||||
err = errors.New("blocked: event is older than existing addressable event")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -230,10 +218,10 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
|
||||
return
|
||||
},
|
||||
)
|
||||
// log.T.F(
|
||||
// "total data written: %d bytes keys %d bytes values for event ID %s", kc,
|
||||
// vc, hex.Enc(ev.ID),
|
||||
// )
|
||||
log.T.F(
|
||||
"total data written: %d bytes keys %d bytes values for event ID %s", kc,
|
||||
vc, hex.Enc(ev.ID),
|
||||
)
|
||||
// log.T.C(
|
||||
// func() string {
|
||||
// return fmt.Sprintf("event:\n%s\n", ev.Serialize())
|
||||
|
||||
@@ -188,3 +188,30 @@ func (d *D) GetPaymentHistory(pubkey []byte) ([]Payment, error) {
|
||||
|
||||
return payments, err
|
||||
}
|
||||
|
||||
// IsFirstTimeUser checks if a user is logging in for the first time and marks them as seen
|
||||
func (d *D) IsFirstTimeUser(pubkey []byte) (bool, error) {
|
||||
key := fmt.Sprintf("firstlogin:%s", hex.EncodeToString(pubkey))
|
||||
|
||||
isFirstTime := false
|
||||
err := d.DB.Update(
|
||||
func(txn *badger.Txn) error {
|
||||
_, err := txn.Get([]byte(key))
|
||||
if errors.Is(err, badger.ErrKeyNotFound) {
|
||||
// First time - record the login
|
||||
isFirstTime = true
|
||||
now := time.Now()
|
||||
data, err := json.Marshal(map[string]interface{}{
|
||||
"first_login": now,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return txn.Set([]byte(key), data)
|
||||
}
|
||||
return err // Return any other error as-is
|
||||
},
|
||||
)
|
||||
|
||||
return isFirstTime, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user