Compare commits
16 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
354a2f1cda
|
|||
|
0123c2d6f5
|
|||
|
f092d817c9
|
|||
|
c7eb532443
|
|||
|
e56b3f0083
|
|||
|
|
9064b3ab5f | ||
|
3486d3d4ab
|
|||
|
0ba555c6a8
|
|||
|
54f65d8740
|
|||
|
2ff8b47410
|
|||
|
ba2d35012c
|
|||
|
b70f03bce0
|
|||
|
8954846864
|
|||
|
5e6c0b80aa
|
|||
|
80ab3caa5f
|
|||
|
62f244d114
|
10
.github/workflows/go.yml
vendored
10
.github/workflows/go.yml
vendored
@@ -75,11 +75,11 @@ jobs:
|
||||
mkdir -p release-binaries
|
||||
|
||||
# Build for different platforms
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=amd64 CGO_ENABLED=1 go build -o release-binaries/orly-${VERSION}-linux-amd64 .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-linux-arm64 .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-darwin-amd64 .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=darwin GOARCH=arm64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-darwin-arm64 .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-windows-amd64.exe .
|
||||
GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=amd64 CGO_ENABLED=1 go build -ldflags "-s -w" -o release-binaries/orly-${VERSION}-linux-amd64 .
|
||||
# GOEXPERIMENT=greenteagc,jsonv2 GOOS=linux GOARCH=arm64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-linux-arm64 .
|
||||
# GOEXPERIMENT=greenteagc,jsonv2 GOOS=darwin GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-darwin-amd64 .
|
||||
# GOEXPERIMENT=greenteagc,jsonv2 GOOS=darwin GOARCH=arm64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-darwin-arm64 .
|
||||
# GOEXPERIMENT=greenteagc,jsonv2 GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o release-binaries/orly-${VERSION}-windows-amd64.exe .
|
||||
|
||||
# Note: Only building orly binary as requested
|
||||
# Other cmd utilities (aggregator, benchmark, convert, policytest, stresstest) are development tools
|
||||
|
||||
@@ -37,7 +37,6 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
}
|
||||
}()
|
||||
|
||||
log.I.F("HandleEvent: continuing with event processing...")
|
||||
if len(msg) > 0 {
|
||||
log.I.F("extra '%s'", msg)
|
||||
}
|
||||
@@ -176,6 +175,18 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
|
||||
}
|
||||
return
|
||||
}
|
||||
// validate timestamp - reject events too far in the future (more than 1 hour)
|
||||
now := time.Now().Unix()
|
||||
if env.E.CreatedAt > now+3600 {
|
||||
if err = Ok.Invalid(
|
||||
l, env,
|
||||
"timestamp too far in the future",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// verify the signature
|
||||
var ok bool
|
||||
if ok, err = env.Verify(); chk.T(err) {
|
||||
|
||||
@@ -283,13 +283,13 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
if !authorized {
|
||||
continue // not authorized to see this private event
|
||||
}
|
||||
|
||||
tmp = append(tmp, ev)
|
||||
continue
|
||||
// Event has private tag and user is authorized - continue to privileged check
|
||||
}
|
||||
|
||||
if l.Config.ACLMode != "none" &&
|
||||
kind.IsPrivileged(ev.Kind) && accessLevel != "admin" { // admins can see all events
|
||||
// Always filter privileged events based on kind, regardless of ACLMode
|
||||
// Privileged events should only be sent to users who are authenticated and
|
||||
// are either the event author or listed in p tags
|
||||
if kind.IsPrivileged(ev.Kind) && accessLevel != "admin" { // admins can see all events
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
@@ -357,6 +357,57 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// Check if policy defines this event as privileged (even if not in hardcoded list)
|
||||
// Policy check will handle this later, but we can skip it here if not authenticated
|
||||
// to avoid unnecessary processing
|
||||
if l.policyManager != nil && l.policyManager.Manager != nil && l.policyManager.Manager.IsEnabled() {
|
||||
rule, hasRule := l.policyManager.Rules[int(ev.Kind)]
|
||||
if hasRule && rule.Privileged && accessLevel != "admin" {
|
||||
pk := l.authedPubkey.Load()
|
||||
if pk == nil {
|
||||
// Not authenticated - cannot see policy-privileged events
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"policy-privileged event %s denied - not authenticated",
|
||||
ev.ID,
|
||||
)
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
// Policy check will verify authorization later, but we need to check
|
||||
// if user is party to the event here
|
||||
authorized := false
|
||||
if utils.FastEqual(ev.Pubkey, pk) {
|
||||
authorized = true
|
||||
} else {
|
||||
// Check p tags
|
||||
pTags := ev.Tags.GetAll([]byte("p"))
|
||||
for _, pTag := range pTags {
|
||||
var pt []byte
|
||||
if pt, err = hexenc.Dec(string(pTag.Value())); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
if utils.FastEqual(pt, pk) {
|
||||
authorized = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !authorized {
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"policy-privileged event %s does not contain the logged in pubkey %0x",
|
||||
ev.ID, pk,
|
||||
)
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
tmp = append(tmp, ev)
|
||||
}
|
||||
}
|
||||
@@ -384,27 +435,28 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
}
|
||||
|
||||
// Deduplicate events (in case chunk processing returned duplicates)
|
||||
if len(allEvents) > 0 {
|
||||
// Use events (already filtered for privileged/policy) instead of allEvents
|
||||
if len(events) > 0 {
|
||||
seen := make(map[string]struct{})
|
||||
var deduplicatedEvents event.S
|
||||
originalCount := len(allEvents)
|
||||
for _, ev := range allEvents {
|
||||
originalCount := len(events)
|
||||
for _, ev := range events {
|
||||
eventID := hexenc.Enc(ev.ID)
|
||||
if _, exists := seen[eventID]; !exists {
|
||||
seen[eventID] = struct{}{}
|
||||
deduplicatedEvents = append(deduplicatedEvents, ev)
|
||||
}
|
||||
}
|
||||
allEvents = deduplicatedEvents
|
||||
if originalCount != len(allEvents) {
|
||||
log.T.F("REQ %s: deduplicated %d events to %d unique events", env.Subscription, originalCount, len(allEvents))
|
||||
events = deduplicatedEvents
|
||||
if originalCount != len(events) {
|
||||
log.T.F("REQ %s: deduplicated %d events to %d unique events", env.Subscription, originalCount, len(events))
|
||||
}
|
||||
}
|
||||
|
||||
// Apply managed ACL filtering for read access if managed ACL is active
|
||||
if acl.Registry.Active.Load() == "managed" {
|
||||
var aclFilteredEvents event.S
|
||||
for _, ev := range allEvents {
|
||||
for _, ev := range events {
|
||||
// Check if event is banned
|
||||
eventID := hex.EncodeToString(ev.ID)
|
||||
if banned, err := l.getManagedACL().IsEventBanned(eventID); err == nil && banned {
|
||||
@@ -430,13 +482,13 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
|
||||
aclFilteredEvents = append(aclFilteredEvents, ev)
|
||||
}
|
||||
allEvents = aclFilteredEvents
|
||||
events = aclFilteredEvents
|
||||
}
|
||||
|
||||
// Apply private tag filtering - only show events with "private" tags to authorized users
|
||||
var privateFilteredEvents event.S
|
||||
authedPubkey := l.authedPubkey.Load()
|
||||
for _, ev := range allEvents {
|
||||
for _, ev := range events {
|
||||
// Check if event has private tags
|
||||
hasPrivateTag := false
|
||||
var privatePubkey []byte
|
||||
@@ -469,10 +521,10 @@ func (l *Listener) HandleReq(msg []byte) (err error) {
|
||||
log.D.F("private tag: filtering out event %s from unauthorized user", hexenc.Enc(ev.ID))
|
||||
}
|
||||
}
|
||||
allEvents = privateFilteredEvents
|
||||
events = privateFilteredEvents
|
||||
|
||||
seen := make(map[string]struct{})
|
||||
for _, ev := range allEvents {
|
||||
for _, ev := range events {
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
|
||||
@@ -71,6 +71,10 @@ whitelist:
|
||||
// Set read limit immediately after connection is established
|
||||
conn.SetReadLimit(DefaultMaxMessageSize)
|
||||
log.D.F("set read limit to %d bytes (%d MB) for %s", DefaultMaxMessageSize, DefaultMaxMessageSize/units.Mb, remote)
|
||||
|
||||
// Set initial read deadline - pong handler will extend it when pongs are received
|
||||
conn.SetReadDeadline(time.Now().Add(DefaultPongWait))
|
||||
|
||||
defer conn.Close()
|
||||
listener := &Listener{
|
||||
ctx: ctx,
|
||||
@@ -79,6 +83,16 @@ whitelist:
|
||||
remote: remote,
|
||||
req: r,
|
||||
startTime: time.Now(),
|
||||
writeChan: make(chan WriteRequest, 100), // Buffered channel for writes
|
||||
writeDone: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Start write worker goroutine
|
||||
go listener.writeWorker()
|
||||
|
||||
// Register write channel with publisher
|
||||
if socketPub := listener.publishers.GetSocketPublisher(); socketPub != nil {
|
||||
socketPub.SetWriteChan(conn, listener.writeChan)
|
||||
}
|
||||
|
||||
// Check for blacklisted IPs
|
||||
@@ -100,18 +114,20 @@ whitelist:
|
||||
log.D.F("AUTH challenge sent successfully to %s", remote)
|
||||
}
|
||||
ticker := time.NewTicker(DefaultPingWait)
|
||||
// Set pong handler
|
||||
// Set pong handler - extends read deadline when pongs are received
|
||||
conn.SetPongHandler(func(string) error {
|
||||
conn.SetReadDeadline(time.Now().Add(DefaultPongWait))
|
||||
return nil
|
||||
})
|
||||
// Set ping handler
|
||||
conn.SetPingHandler(func(string) error {
|
||||
// Set ping handler - extends read deadline when pings are received
|
||||
// Send pong through write channel
|
||||
conn.SetPingHandler(func(msg string) error {
|
||||
conn.SetReadDeadline(time.Now().Add(DefaultPongWait))
|
||||
return conn.WriteControl(websocket.PongMessage, []byte{}, time.Now().Add(DefaultWriteTimeout))
|
||||
deadline := time.Now().Add(DefaultWriteTimeout)
|
||||
return listener.WriteControl(websocket.PongMessage, []byte{}, deadline)
|
||||
})
|
||||
// Don't pass cancel to Pinger - it should not be able to cancel the connection context
|
||||
go s.Pinger(ctx, conn, ticker)
|
||||
go s.Pinger(ctx, listener, ticker)
|
||||
defer func() {
|
||||
log.D.F("closing websocket connection from %s", remote)
|
||||
|
||||
@@ -119,6 +135,11 @@ whitelist:
|
||||
cancel()
|
||||
ticker.Stop()
|
||||
|
||||
// Close write channel to signal worker to exit
|
||||
close(listener.writeChan)
|
||||
// Wait for write worker to finish
|
||||
<-listener.writeDone
|
||||
|
||||
// Cancel all subscriptions for this connection
|
||||
log.D.F("cancelling subscriptions for %s", remote)
|
||||
listener.publishers.Receive(&W{
|
||||
@@ -159,14 +180,14 @@ whitelist:
|
||||
var msg []byte
|
||||
log.T.F("waiting for message from %s", remote)
|
||||
|
||||
// Set read deadline for context cancellation
|
||||
deadline := time.Now().Add(DefaultPongWait)
|
||||
// Don't set read deadline here - it's set initially and extended by pong handler
|
||||
// This prevents premature timeouts on idle connections with active subscriptions
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
conn.SetReadDeadline(deadline)
|
||||
|
||||
// Block waiting for message; rely on pings and context cancellation to detect dead peers
|
||||
// The read deadline is managed by the pong handler which extends it when pongs are received
|
||||
typ, msg, err = conn.ReadMessage()
|
||||
|
||||
if err != nil {
|
||||
@@ -187,6 +208,12 @@ whitelist:
|
||||
log.T.F("connection from %s closed: %v", remote, err)
|
||||
return
|
||||
}
|
||||
// Handle timeout errors specifically - these can occur on idle connections
|
||||
// but pongs should extend the deadline, so a timeout usually means dead connection
|
||||
if strings.Contains(err.Error(), "timeout") || strings.Contains(err.Error(), "deadline exceeded") {
|
||||
log.T.F("connection from %s read timeout (likely dead connection): %v", remote, err)
|
||||
return
|
||||
}
|
||||
// Handle message too big errors specifically
|
||||
if strings.Contains(err.Error(), "message too large") ||
|
||||
strings.Contains(err.Error(), "read limited at") {
|
||||
@@ -212,17 +239,44 @@ whitelist:
|
||||
}
|
||||
if typ == websocket.PingMessage {
|
||||
log.D.F("received PING from %s, sending PONG", remote)
|
||||
// Create a write context with timeout for pong response
|
||||
// Send pong through write channel
|
||||
deadline := time.Now().Add(DefaultWriteTimeout)
|
||||
conn.SetWriteDeadline(deadline)
|
||||
pongStart := time.Now()
|
||||
if err = conn.WriteControl(websocket.PongMessage, msg, deadline); chk.E(err) {
|
||||
if err = listener.WriteControl(websocket.PongMessage, msg, deadline); err != nil {
|
||||
pongDuration := time.Since(pongStart)
|
||||
log.E.F(
|
||||
"failed to send PONG to %s after %v: %v", remote,
|
||||
pongDuration, err,
|
||||
)
|
||||
return
|
||||
|
||||
// Check if this is a timeout vs a connection error
|
||||
isTimeout := strings.Contains(err.Error(), "timeout") || strings.Contains(err.Error(), "deadline exceeded")
|
||||
isConnectionError := strings.Contains(err.Error(), "use of closed network connection") ||
|
||||
strings.Contains(err.Error(), "broken pipe") ||
|
||||
strings.Contains(err.Error(), "connection reset") ||
|
||||
websocket.IsCloseError(err, websocket.CloseAbnormalClosure,
|
||||
websocket.CloseGoingAway,
|
||||
websocket.CloseNoStatusReceived)
|
||||
|
||||
if isConnectionError {
|
||||
log.E.F(
|
||||
"failed to send PONG to %s after %v (connection error): %v", remote,
|
||||
pongDuration, err,
|
||||
)
|
||||
return
|
||||
} else if isTimeout {
|
||||
// Timeout on pong - log but don't close immediately
|
||||
// The read deadline will catch dead connections
|
||||
log.W.F(
|
||||
"failed to send PONG to %s after %v (timeout, but connection may still be alive): %v", remote,
|
||||
pongDuration, err,
|
||||
)
|
||||
// Continue - don't close connection on pong timeout
|
||||
} else {
|
||||
// Unknown error - log and continue
|
||||
log.E.F(
|
||||
"failed to send PONG to %s after %v (unknown error): %v", remote,
|
||||
pongDuration, err,
|
||||
)
|
||||
// Continue - don't close on unknown errors
|
||||
}
|
||||
continue
|
||||
}
|
||||
pongDuration := time.Since(pongStart)
|
||||
log.D.F("sent PONG to %s successfully in %v", remote, pongDuration)
|
||||
@@ -241,7 +295,7 @@ whitelist:
|
||||
}
|
||||
|
||||
func (s *Server) Pinger(
|
||||
ctx context.Context, conn *websocket.Conn, ticker *time.Ticker,
|
||||
ctx context.Context, listener *Listener, ticker *time.Ticker,
|
||||
) {
|
||||
defer func() {
|
||||
log.D.F("pinger shutting down")
|
||||
@@ -257,19 +311,46 @@ func (s *Server) Pinger(
|
||||
pingCount++
|
||||
log.D.F("sending PING #%d", pingCount)
|
||||
|
||||
// Set write deadline for ping operation
|
||||
// Send ping through write channel
|
||||
deadline := time.Now().Add(DefaultWriteTimeout)
|
||||
conn.SetWriteDeadline(deadline)
|
||||
pingStart := time.Now()
|
||||
|
||||
if err = conn.WriteControl(websocket.PingMessage, []byte{}, deadline); err != nil {
|
||||
if err = listener.WriteControl(websocket.PingMessage, []byte{}, deadline); err != nil {
|
||||
pingDuration := time.Since(pingStart)
|
||||
log.E.F(
|
||||
"PING #%d FAILED after %v: %v", pingCount, pingDuration,
|
||||
err,
|
||||
)
|
||||
chk.E(err)
|
||||
return
|
||||
|
||||
// Check if this is a timeout vs a connection error
|
||||
isTimeout := strings.Contains(err.Error(), "timeout") || strings.Contains(err.Error(), "deadline exceeded")
|
||||
isConnectionError := strings.Contains(err.Error(), "use of closed network connection") ||
|
||||
strings.Contains(err.Error(), "broken pipe") ||
|
||||
strings.Contains(err.Error(), "connection reset") ||
|
||||
websocket.IsCloseError(err, websocket.CloseAbnormalClosure,
|
||||
websocket.CloseGoingAway,
|
||||
websocket.CloseNoStatusReceived)
|
||||
|
||||
if isConnectionError {
|
||||
log.E.F(
|
||||
"PING #%d FAILED after %v (connection error): %v", pingCount, pingDuration,
|
||||
err,
|
||||
)
|
||||
chk.E(err)
|
||||
return
|
||||
} else if isTimeout {
|
||||
// Timeout on ping - log but don't stop pinger immediately
|
||||
// The read deadline will catch dead connections
|
||||
log.W.F(
|
||||
"PING #%d timeout after %v (connection may still be alive): %v", pingCount, pingDuration,
|
||||
err,
|
||||
)
|
||||
// Continue - don't stop pinger on timeout
|
||||
} else {
|
||||
// Unknown error - log and continue
|
||||
log.E.F(
|
||||
"PING #%d FAILED after %v (unknown error): %v", pingCount, pingDuration,
|
||||
err,
|
||||
)
|
||||
// Continue - don't stop pinger on unknown errors
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pingDuration := time.Since(pingStart)
|
||||
|
||||
143
app/listener.go
143
app/listener.go
@@ -7,16 +7,20 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/database"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
"next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/atomic"
|
||||
)
|
||||
|
||||
// WriteRequest represents a write operation to be performed by the write worker
|
||||
type WriteRequest = publish.WriteRequest
|
||||
|
||||
type Listener struct {
|
||||
*Server
|
||||
conn *websocket.Conn
|
||||
@@ -28,6 +32,8 @@ type Listener struct {
|
||||
startTime time.Time
|
||||
isBlacklisted bool // Marker to identify blacklisted IPs
|
||||
blacklistTimeout time.Time // When to timeout blacklisted connections
|
||||
writeChan chan WriteRequest // Channel for write requests
|
||||
writeDone chan struct{} // Closed when write worker exits
|
||||
// Diagnostics: per-connection counters
|
||||
msgCount int
|
||||
reqCount int
|
||||
@@ -40,75 +46,80 @@ func (l *Listener) Ctx() context.Context {
|
||||
return l.ctx
|
||||
}
|
||||
|
||||
// writeWorker is the single goroutine that handles all writes to the websocket connection.
|
||||
// This serializes all writes to prevent concurrent write panics.
|
||||
func (l *Listener) writeWorker() {
|
||||
defer close(l.writeDone)
|
||||
for {
|
||||
select {
|
||||
case <-l.ctx.Done():
|
||||
return
|
||||
case req, ok := <-l.writeChan:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
deadline := req.Deadline
|
||||
if deadline.IsZero() {
|
||||
deadline = time.Now().Add(DefaultWriteTimeout)
|
||||
}
|
||||
l.conn.SetWriteDeadline(deadline)
|
||||
writeStart := time.Now()
|
||||
var err error
|
||||
if req.IsControl {
|
||||
err = l.conn.WriteControl(req.MsgType, req.Data, deadline)
|
||||
} else {
|
||||
err = l.conn.WriteMessage(req.MsgType, req.Data)
|
||||
}
|
||||
if err != nil {
|
||||
writeDuration := time.Since(writeStart)
|
||||
log.E.F("ws->%s write worker FAILED: len=%d duration=%v error=%v",
|
||||
l.remote, len(req.Data), writeDuration, err)
|
||||
// Check for connection errors - if so, stop the worker
|
||||
isConnectionError := strings.Contains(err.Error(), "use of closed network connection") ||
|
||||
strings.Contains(err.Error(), "broken pipe") ||
|
||||
strings.Contains(err.Error(), "connection reset") ||
|
||||
websocket.IsCloseError(err, websocket.CloseAbnormalClosure,
|
||||
websocket.CloseGoingAway,
|
||||
websocket.CloseNoStatusReceived)
|
||||
if isConnectionError {
|
||||
return
|
||||
}
|
||||
// Continue for other errors (timeouts, etc.)
|
||||
} else {
|
||||
writeDuration := time.Since(writeStart)
|
||||
if writeDuration > time.Millisecond*100 {
|
||||
log.D.F("ws->%s write worker SLOW: len=%d duration=%v",
|
||||
l.remote, len(req.Data), writeDuration)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Listener) Write(p []byte) (n int, err error) {
|
||||
start := time.Now()
|
||||
msgLen := len(p)
|
||||
|
||||
// Log message attempt with content preview (first 200 chars for diagnostics)
|
||||
preview := string(p)
|
||||
if len(preview) > 200 {
|
||||
preview = preview[:200] + "..."
|
||||
// Send write request to channel - non-blocking with timeout
|
||||
select {
|
||||
case <-l.ctx.Done():
|
||||
return 0, l.ctx.Err()
|
||||
case l.writeChan <- WriteRequest{Data: p, MsgType: websocket.TextMessage, IsControl: false}:
|
||||
return len(p), nil
|
||||
case <-time.After(DefaultWriteTimeout):
|
||||
log.E.F("ws->%s write channel timeout", l.remote)
|
||||
return 0, errorf.E("write channel timeout")
|
||||
}
|
||||
log.T.F(
|
||||
"ws->%s attempting write: len=%d preview=%q", l.remote, msgLen, preview,
|
||||
)
|
||||
}
|
||||
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the main connection context gets cancelled while writing events
|
||||
deadline := time.Now().Add(DefaultWriteTimeout)
|
||||
l.conn.SetWriteDeadline(deadline)
|
||||
|
||||
// Attempt the write operation
|
||||
writeStart := time.Now()
|
||||
if err = l.conn.WriteMessage(websocket.TextMessage, p); err != nil {
|
||||
writeDuration := time.Since(writeStart)
|
||||
totalDuration := time.Since(start)
|
||||
|
||||
// Log detailed failure information
|
||||
log.E.F(
|
||||
"ws->%s WRITE FAILED: len=%d duration=%v write_duration=%v error=%v preview=%q",
|
||||
l.remote, msgLen, totalDuration, writeDuration, err, preview,
|
||||
)
|
||||
|
||||
// Check if this is a context timeout
|
||||
if strings.Contains(err.Error(), "timeout") || strings.Contains(err.Error(), "deadline") {
|
||||
log.E.F(
|
||||
"ws->%s write timeout after %v (limit=%v)", l.remote,
|
||||
writeDuration, DefaultWriteTimeout,
|
||||
)
|
||||
}
|
||||
|
||||
// Check connection state
|
||||
if l.conn != nil {
|
||||
log.T.F(
|
||||
"ws->%s connection state during failure: remote_addr=%v",
|
||||
l.remote, l.req.RemoteAddr,
|
||||
)
|
||||
}
|
||||
|
||||
chk.E(err) // Still call the original error handler
|
||||
return
|
||||
// WriteControl sends a control message through the write channel
|
||||
func (l *Listener) WriteControl(messageType int, data []byte, deadline time.Time) (err error) {
|
||||
select {
|
||||
case <-l.ctx.Done():
|
||||
return l.ctx.Err()
|
||||
case l.writeChan <- WriteRequest{Data: data, MsgType: messageType, IsControl: true, Deadline: deadline}:
|
||||
return nil
|
||||
case <-time.After(DefaultWriteTimeout):
|
||||
log.E.F("ws->%s writeControl channel timeout", l.remote)
|
||||
return errorf.E("writeControl channel timeout")
|
||||
}
|
||||
|
||||
// Log successful write with timing
|
||||
writeDuration := time.Since(writeStart)
|
||||
totalDuration := time.Since(start)
|
||||
n = msgLen
|
||||
|
||||
log.T.F(
|
||||
"ws->%s WRITE SUCCESS: len=%d duration=%v write_duration=%v",
|
||||
l.remote, n, totalDuration, writeDuration,
|
||||
)
|
||||
|
||||
// Log slow writes for performance diagnostics
|
||||
if writeDuration > time.Millisecond*100 {
|
||||
log.T.F(
|
||||
"ws->%s SLOW WRITE detected: %v (>100ms) len=%d", l.remote,
|
||||
writeDuration, n,
|
||||
)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// getManagedACL returns the managed ACL instance if available
|
||||
|
||||
@@ -3,7 +3,6 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -18,6 +17,7 @@ import (
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/interfaces/publisher"
|
||||
"next.orly.dev/pkg/interfaces/typer"
|
||||
"next.orly.dev/pkg/protocol/publish"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
@@ -33,6 +33,9 @@ type Subscription struct {
|
||||
// connections.
|
||||
type Map map[*websocket.Conn]map[string]Subscription
|
||||
|
||||
// WriteChanMap maps websocket connections to their write channels
|
||||
type WriteChanMap map[*websocket.Conn]chan<- publish.WriteRequest
|
||||
|
||||
type W struct {
|
||||
*websocket.Conn
|
||||
|
||||
@@ -69,19 +72,37 @@ type P struct {
|
||||
Mx sync.RWMutex
|
||||
// Map is the map of subscribers and subscriptions from the websocket api.
|
||||
Map
|
||||
// WriteChans maps websocket connections to their write channels
|
||||
WriteChans WriteChanMap
|
||||
}
|
||||
|
||||
var _ publisher.I = &P{}
|
||||
|
||||
func NewPublisher(c context.Context) (publisher *P) {
|
||||
return &P{
|
||||
c: c,
|
||||
Map: make(Map),
|
||||
c: c,
|
||||
Map: make(Map),
|
||||
WriteChans: make(WriteChanMap, 100),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *P) Type() (typeName string) { return Type }
|
||||
|
||||
// SetWriteChan stores the write channel for a websocket connection
|
||||
func (p *P) SetWriteChan(conn *websocket.Conn, writeChan chan<- publish.WriteRequest) {
|
||||
p.Mx.Lock()
|
||||
defer p.Mx.Unlock()
|
||||
p.WriteChans[conn] = writeChan
|
||||
}
|
||||
|
||||
// GetWriteChan returns the write channel for a websocket connection
|
||||
func (p *P) GetWriteChan(conn *websocket.Conn) (chan<- publish.WriteRequest, bool) {
|
||||
p.Mx.RLock()
|
||||
defer p.Mx.RUnlock()
|
||||
ch, ok := p.WriteChans[conn]
|
||||
return ch, ok
|
||||
}
|
||||
|
||||
// Receive handles incoming messages to manage websocket listener subscriptions
|
||||
// and associated filters.
|
||||
//
|
||||
@@ -269,42 +290,40 @@ func (p *P) Deliver(ev *event.E) {
|
||||
log.D.F("attempting delivery of event %s (kind=%d, len=%d) to subscription %s @ %s",
|
||||
hex.Enc(ev.ID), ev.Kind, len(msgData), d.id, d.sub.remote)
|
||||
|
||||
// Use a separate context with timeout for writes to prevent race conditions
|
||||
// where the publisher context gets cancelled while writing events
|
||||
deadline := time.Now().Add(DefaultWriteTimeout)
|
||||
d.w.SetWriteDeadline(deadline)
|
||||
// Get write channel for this connection
|
||||
p.Mx.RLock()
|
||||
writeChan, hasChan := p.GetWriteChan(d.w)
|
||||
stillSubscribed := p.Map[d.w] != nil
|
||||
p.Mx.RUnlock()
|
||||
|
||||
deliveryStart := time.Now()
|
||||
if err = d.w.WriteMessage(websocket.TextMessage, msgData); err != nil {
|
||||
deliveryDuration := time.Since(deliveryStart)
|
||||
|
||||
// Log detailed failure information
|
||||
log.E.F("subscription delivery FAILED: event=%s to=%s sub=%s duration=%v error=%v",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, err)
|
||||
|
||||
// Check for timeout specifically
|
||||
if strings.Contains(err.Error(), "timeout") || strings.Contains(err.Error(), "deadline") {
|
||||
log.E.F("subscription delivery TIMEOUT: event=%s to=%s after %v (limit=%v)",
|
||||
hex.Enc(ev.ID), d.sub.remote, deliveryDuration, DefaultWriteTimeout)
|
||||
}
|
||||
|
||||
// Log connection cleanup
|
||||
log.D.F("removing failed subscriber connection: %s", d.sub.remote)
|
||||
|
||||
// On error, remove the subscriber connection safely
|
||||
p.removeSubscriber(d.w)
|
||||
_ = d.w.Close()
|
||||
if !stillSubscribed {
|
||||
log.D.F("skipping delivery to %s - connection no longer subscribed", d.sub.remote)
|
||||
continue
|
||||
}
|
||||
|
||||
deliveryDuration := time.Since(deliveryStart)
|
||||
log.D.F("subscription delivery SUCCESS: event=%s to=%s sub=%s duration=%v len=%d",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, deliveryDuration, len(msgData))
|
||||
if !hasChan {
|
||||
log.D.F("skipping delivery to %s - no write channel available", d.sub.remote)
|
||||
continue
|
||||
}
|
||||
|
||||
// Log slow deliveries for performance monitoring
|
||||
if deliveryDuration > time.Millisecond*50 {
|
||||
log.D.F("SLOW subscription delivery: event=%s to=%s duration=%v (>50ms)",
|
||||
hex.Enc(ev.ID), d.sub.remote, deliveryDuration)
|
||||
// Send to write channel - non-blocking with timeout
|
||||
select {
|
||||
case <-p.c.Done():
|
||||
continue
|
||||
case writeChan <- publish.WriteRequest{Data: msgData, MsgType: websocket.TextMessage, IsControl: false}:
|
||||
log.D.F("subscription delivery QUEUED: event=%s to=%s sub=%s len=%d",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id, len(msgData))
|
||||
case <-time.After(DefaultWriteTimeout):
|
||||
log.E.F("subscription delivery TIMEOUT: event=%s to=%s sub=%s (write channel full)",
|
||||
hex.Enc(ev.ID), d.sub.remote, d.id)
|
||||
// Check if connection is still valid
|
||||
p.Mx.RLock()
|
||||
stillSubscribed = p.Map[d.w] != nil
|
||||
p.Mx.RUnlock()
|
||||
if !stillSubscribed {
|
||||
log.D.F("removing failed subscriber connection due to channel timeout: %s", d.sub.remote)
|
||||
p.removeSubscriber(d.w)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -321,6 +340,7 @@ func (p *P) removeSubscriberId(ws *websocket.Conn, id string) {
|
||||
// Check the actual map after deletion, not the original reference
|
||||
if len(p.Map[ws]) == 0 {
|
||||
delete(p.Map, ws)
|
||||
delete(p.WriteChans, ws)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -331,6 +351,7 @@ func (p *P) removeSubscriber(ws *websocket.Conn) {
|
||||
defer p.Mx.Unlock()
|
||||
clear(p.Map[ws])
|
||||
delete(p.Map, ws)
|
||||
delete(p.WriteChans, ws)
|
||||
}
|
||||
|
||||
// canSeePrivateEvent checks if the authenticated user can see an event with a private tag
|
||||
|
||||
319
cmd/policyfiltertest/main.go
Normal file
319
cmd/policyfiltertest/main.go
Normal file
@@ -0,0 +1,319 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/protocol/ws"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
url := flag.String("url", "ws://127.0.0.1:34568", "relay websocket URL")
|
||||
allowedPubkeyHex := flag.String("allowed-pubkey", "", "hex-encoded allowed pubkey")
|
||||
allowedSecHex := flag.String("allowed-sec", "", "hex-encoded allowed secret key")
|
||||
unauthorizedPubkeyHex := flag.String("unauthorized-pubkey", "", "hex-encoded unauthorized pubkey")
|
||||
unauthorizedSecHex := flag.String("unauthorized-sec", "", "hex-encoded unauthorized secret key")
|
||||
timeout := flag.Duration("timeout", 10*time.Second, "operation timeout")
|
||||
flag.Parse()
|
||||
|
||||
if *allowedPubkeyHex == "" || *allowedSecHex == "" {
|
||||
log.E.F("required flags: -allowed-pubkey and -allowed-sec")
|
||||
os.Exit(1)
|
||||
}
|
||||
if *unauthorizedPubkeyHex == "" || *unauthorizedSecHex == "" {
|
||||
log.E.F("required flags: -unauthorized-pubkey and -unauthorized-sec")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Decode keys
|
||||
allowedSecBytes, err := hex.Dec(*allowedSecHex)
|
||||
if err != nil {
|
||||
log.E.F("failed to decode allowed secret key: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
allowedSigner := &p256k.Signer{}
|
||||
if err = allowedSigner.InitSec(allowedSecBytes); chk.E(err) {
|
||||
log.E.F("failed to initialize allowed signer: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
unauthorizedSecBytes, err := hex.Dec(*unauthorizedSecHex)
|
||||
if err != nil {
|
||||
log.E.F("failed to decode unauthorized secret key: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
unauthorizedSigner := &p256k.Signer{}
|
||||
if err = unauthorizedSigner.InitSec(unauthorizedSecBytes); chk.E(err) {
|
||||
log.E.F("failed to initialize unauthorized signer: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), *timeout)
|
||||
defer cancel()
|
||||
|
||||
// Test 1: Authenticated as allowed pubkey - should work
|
||||
fmt.Println("Test 1: Publishing event 30520 with allowed pubkey (authenticated)...")
|
||||
if err := testWriteEvent(ctx, *url, 30520, allowedSigner, allowedSigner); err != nil {
|
||||
fmt.Printf("❌ FAILED: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("✅ PASSED: Event published successfully")
|
||||
|
||||
// Test 2: Authenticated as allowed pubkey, then read event 10306 - should work
|
||||
// First publish an event, then read it
|
||||
fmt.Println("\nTest 2: Publishing and reading event 10306 with allowed pubkey (authenticated)...")
|
||||
if err := testWriteEvent(ctx, *url, 10306, allowedSigner, allowedSigner); err != nil {
|
||||
fmt.Printf("❌ FAILED to publish: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := testReadEvent(ctx, *url, 10306, allowedSigner); err != nil {
|
||||
fmt.Printf("❌ FAILED to read: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("✅ PASSED: Event readable by allowed user")
|
||||
|
||||
// Test 3: Unauthenticated request - should be blocked
|
||||
fmt.Println("\nTest 3: Publishing event 30520 without authentication...")
|
||||
if err := testWriteEventUnauthenticated(ctx, *url, 30520, allowedSigner); err != nil {
|
||||
fmt.Printf("✅ PASSED: Event correctly blocked (expected): %v\n", err)
|
||||
} else {
|
||||
fmt.Println("❌ FAILED: Event was allowed when it should have been blocked")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Test 4: Authenticated as unauthorized pubkey - should be blocked
|
||||
fmt.Println("\nTest 4: Publishing event 30520 with unauthorized pubkey...")
|
||||
if err := testWriteEvent(ctx, *url, 30520, unauthorizedSigner, unauthorizedSigner); err != nil {
|
||||
fmt.Printf("✅ PASSED: Event correctly blocked (expected): %v\n", err)
|
||||
} else {
|
||||
fmt.Println("❌ FAILED: Event was allowed when it should have been blocked")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Test 5: Read event 10306 without authentication - should be blocked
|
||||
// Event was published in test 2, so it exists in the database
|
||||
fmt.Println("\nTest 5: Reading event 10306 without authentication (should be blocked)...")
|
||||
// Wait a bit to ensure event is stored
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
// If no error is returned, that means no events were received (which is correct)
|
||||
// If an error is returned, it means an event was received (which is wrong)
|
||||
if err := testReadEventUnauthenticated(ctx, *url, 10306); err != nil {
|
||||
// If we got an error about receiving an event, that's a failure
|
||||
if strings.Contains(err.Error(), "unexpected event received") {
|
||||
fmt.Printf("❌ FAILED: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// Other errors (like connection errors) are also failures
|
||||
fmt.Printf("❌ FAILED: Unexpected error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("✅ PASSED: No events received (correctly filtered by policy)")
|
||||
|
||||
// Test 6: Read event 10306 with unauthorized pubkey - should be blocked
|
||||
fmt.Println("\nTest 6: Reading event 10306 with unauthorized pubkey (should be blocked)...")
|
||||
// If no error is returned, that means no events were received (which is correct)
|
||||
// If an error is returned about receiving an event, that's a failure
|
||||
if err := testReadEvent(ctx, *url, 10306, unauthorizedSigner); err != nil {
|
||||
// Connection/subscription errors are failures
|
||||
fmt.Printf("❌ FAILED: Unexpected error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("✅ PASSED: No events received (correctly filtered by policy)")
|
||||
|
||||
fmt.Println("\n✅ All tests passed!")
|
||||
}
|
||||
|
||||
func testWriteEvent(ctx context.Context, url string, kindNum uint16, eventSigner, authSigner *p256k.Signer) error {
|
||||
rl, err := ws.RelayConnect(ctx, url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect error: %w", err)
|
||||
}
|
||||
defer rl.Close()
|
||||
|
||||
// Send a REQ first to trigger AUTH challenge (when AuthToWrite is enabled)
|
||||
// This is needed because challenges are sent on REQ, not on connect
|
||||
limit := uint(1)
|
||||
ff := filter.NewS(&filter.F{
|
||||
Kinds: kind.NewS(kind.New(kindNum)),
|
||||
Limit: &limit,
|
||||
})
|
||||
sub, err := rl.Subscribe(ctx, ff)
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscription error (may be expected): %w", err)
|
||||
}
|
||||
// Wait a bit for challenge to arrive
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
sub.Unsub()
|
||||
|
||||
// Authenticate
|
||||
if err = rl.Auth(ctx, authSigner); err != nil {
|
||||
return fmt.Errorf("auth error: %w", err)
|
||||
}
|
||||
|
||||
// Create and sign event
|
||||
ev := &event.E{
|
||||
CreatedAt: time.Now().Unix(),
|
||||
Kind: kind.K{K: kindNum}.K,
|
||||
Tags: tag.NewS(),
|
||||
Content: []byte(fmt.Sprintf("test event kind %d", kindNum)),
|
||||
}
|
||||
// Add p tag for privileged check
|
||||
pTag := tag.NewFromAny("p", hex.Enc(authSigner.Pub()))
|
||||
ev.Tags.Append(pTag)
|
||||
|
||||
// Add d tag for addressable events (kinds 30000-39999)
|
||||
if kindNum >= 30000 && kindNum < 40000 {
|
||||
dTag := tag.NewFromAny("d", "test")
|
||||
ev.Tags.Append(dTag)
|
||||
}
|
||||
|
||||
if err = ev.Sign(eventSigner); err != nil {
|
||||
return fmt.Errorf("sign error: %w", err)
|
||||
}
|
||||
|
||||
// Publish
|
||||
if err = rl.Publish(ctx, ev); err != nil {
|
||||
return fmt.Errorf("publish error: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testWriteEventUnauthenticated(ctx context.Context, url string, kindNum uint16, eventSigner *p256k.Signer) error {
|
||||
rl, err := ws.RelayConnect(ctx, url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect error: %w", err)
|
||||
}
|
||||
defer rl.Close()
|
||||
|
||||
// Do NOT authenticate
|
||||
|
||||
// Create and sign event
|
||||
ev := &event.E{
|
||||
CreatedAt: time.Now().Unix(),
|
||||
Kind: kind.K{K: kindNum}.K,
|
||||
Tags: tag.NewS(),
|
||||
Content: []byte(fmt.Sprintf("test event kind %d (unauthenticated)", kindNum)),
|
||||
}
|
||||
|
||||
// Add d tag for addressable events (kinds 30000-39999)
|
||||
if kindNum >= 30000 && kindNum < 40000 {
|
||||
dTag := tag.NewFromAny("d", "test")
|
||||
ev.Tags.Append(dTag)
|
||||
}
|
||||
|
||||
if err = ev.Sign(eventSigner); err != nil {
|
||||
return fmt.Errorf("sign error: %w", err)
|
||||
}
|
||||
|
||||
// Publish (should fail)
|
||||
if err = rl.Publish(ctx, ev); err != nil {
|
||||
return fmt.Errorf("publish error (expected): %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testReadEvent(ctx context.Context, url string, kindNum uint16, authSigner *p256k.Signer) error {
|
||||
rl, err := ws.RelayConnect(ctx, url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect error: %w", err)
|
||||
}
|
||||
defer rl.Close()
|
||||
|
||||
// Send a REQ first to trigger AUTH challenge (when AuthToWrite is enabled)
|
||||
// Then authenticate
|
||||
ff := filter.NewS(&filter.F{
|
||||
Kinds: kind.NewS(kind.New(kindNum)),
|
||||
})
|
||||
sub, err := rl.Subscribe(ctx, ff)
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscription error: %w", err)
|
||||
}
|
||||
// Wait a bit for challenge to arrive
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Authenticate
|
||||
if err = rl.Auth(ctx, authSigner); err != nil {
|
||||
sub.Unsub()
|
||||
return fmt.Errorf("auth error: %w", err)
|
||||
}
|
||||
|
||||
// Wait for events or timeout
|
||||
// If we receive any events, return nil (success)
|
||||
// If we don't receive events, also return nil (no events found, which may be expected)
|
||||
select {
|
||||
case ev := <-sub.Events:
|
||||
if ev != nil {
|
||||
sub.Unsub()
|
||||
return nil // Event received
|
||||
}
|
||||
case <-sub.EndOfStoredEvents:
|
||||
// EOSE received, no more events
|
||||
sub.Unsub()
|
||||
return nil
|
||||
case <-time.After(5 * time.Second):
|
||||
// No events received - this might be OK if no events exist or they're filtered
|
||||
sub.Unsub()
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
sub.Unsub()
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testReadEventUnauthenticated(ctx context.Context, url string, kindNum uint16) error {
|
||||
rl, err := ws.RelayConnect(ctx, url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("connect error: %w", err)
|
||||
}
|
||||
defer rl.Close()
|
||||
|
||||
// Do NOT authenticate
|
||||
|
||||
// Subscribe to events
|
||||
ff := filter.NewS(&filter.F{
|
||||
Kinds: kind.NewS(kind.New(kindNum)),
|
||||
})
|
||||
|
||||
sub, err := rl.Subscribe(ctx, ff)
|
||||
if err != nil {
|
||||
return fmt.Errorf("subscription error (may be expected): %w", err)
|
||||
}
|
||||
defer sub.Unsub()
|
||||
|
||||
// Wait for events or timeout
|
||||
// If we receive any events, that's a failure (should be blocked)
|
||||
select {
|
||||
case ev := <-sub.Events:
|
||||
if ev != nil {
|
||||
return fmt.Errorf("unexpected event received: should have been blocked by policy (event ID: %s)", hex.Enc(ev.ID))
|
||||
}
|
||||
case <-sub.EndOfStoredEvents:
|
||||
// EOSE received, no events (this is expected for unauthenticated privileged events)
|
||||
return nil
|
||||
case <-time.After(5 * time.Second):
|
||||
// No events received - this is expected for unauthenticated requests
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
71
cmd/relay-tester/README.md
Normal file
71
cmd/relay-tester/README.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# relay-tester
|
||||
|
||||
A command-line tool for testing Nostr relay implementations against the NIP-01 specification and related NIPs.
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
relay-tester -url <relay-url> [options]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
- `-url` (required): Relay websocket URL (e.g., `ws://127.0.0.1:3334` or `wss://relay.example.com`)
|
||||
- `-test <name>`: Run a specific test by name (default: run all tests)
|
||||
- `-json`: Output results in JSON format
|
||||
- `-v`: Verbose output (shows additional info for each test)
|
||||
- `-list`: List all available tests and exit
|
||||
|
||||
## Examples
|
||||
|
||||
### Run all tests against a local relay:
|
||||
```bash
|
||||
relay-tester -url ws://127.0.0.1:3334
|
||||
```
|
||||
|
||||
### Run all tests with verbose output:
|
||||
```bash
|
||||
relay-tester -url ws://127.0.0.1:3334 -v
|
||||
```
|
||||
|
||||
### Run a specific test:
|
||||
```bash
|
||||
relay-tester -url ws://127.0.0.1:3334 -test "Publishes basic event"
|
||||
```
|
||||
|
||||
### Output results as JSON:
|
||||
```bash
|
||||
relay-tester -url ws://127.0.0.1:3334 -json
|
||||
```
|
||||
|
||||
### List all available tests:
|
||||
```bash
|
||||
relay-tester -list
|
||||
```
|
||||
|
||||
## Exit Codes
|
||||
|
||||
- `0`: All required tests passed
|
||||
- `1`: One or more required tests failed, or an error occurred
|
||||
|
||||
## Test Categories
|
||||
|
||||
The relay-tester runs tests covering:
|
||||
|
||||
- **Basic Event Operations**: Publishing, finding by ID/author/kind/tags
|
||||
- **Filtering**: Time ranges, limits, multiple filters, scrape queries
|
||||
- **Replaceable Events**: Metadata and contact list replacement
|
||||
- **Parameterized Replaceable Events**: Addressable events with `d` tags
|
||||
- **Event Deletion**: Deletion events (NIP-09)
|
||||
- **Ephemeral Events**: Event handling for ephemeral kinds
|
||||
- **EOSE Handling**: End of stored events signaling
|
||||
- **Event Validation**: Signature verification, ID hash verification
|
||||
- **JSON Compliance**: NIP-01 JSON escape sequences
|
||||
|
||||
## Notes
|
||||
|
||||
- Tests are run in dependency order (some tests depend on others)
|
||||
- Required tests must pass for the relay to be considered compliant
|
||||
- Optional tests may fail without affecting overall compliance
|
||||
- The tool connects to the relay using WebSocket and runs tests sequentially
|
||||
|
||||
160
cmd/relay-tester/main.go
Normal file
160
cmd/relay-tester/main.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"lol.mleku.dev/log"
|
||||
relaytester "next.orly.dev/relay-tester"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
relayURL = flag.String("url", "", "relay websocket URL (required, e.g., ws://127.0.0.1:3334)")
|
||||
testName = flag.String("test", "", "run specific test by name (default: run all tests)")
|
||||
jsonOut = flag.Bool("json", false, "output results in JSON format")
|
||||
verbose = flag.Bool("v", false, "verbose output")
|
||||
listTests = flag.Bool("list", false, "list all available tests and exit")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
if *listTests {
|
||||
listAllTests()
|
||||
return
|
||||
}
|
||||
|
||||
if *relayURL == "" {
|
||||
log.E.F("required flag: -url (relay websocket URL)")
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Validate URL format
|
||||
if !strings.HasPrefix(*relayURL, "ws://") && !strings.HasPrefix(*relayURL, "wss://") {
|
||||
log.E.F("URL must start with ws:// or wss://")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Create test suite
|
||||
if *verbose {
|
||||
log.I.F("Creating test suite for %s...", *relayURL)
|
||||
}
|
||||
suite, err := relaytester.NewTestSuite(*relayURL)
|
||||
if err != nil {
|
||||
log.E.F("failed to create test suite: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Run tests
|
||||
var results []relaytester.TestResult
|
||||
if *testName != "" {
|
||||
if *verbose {
|
||||
log.I.F("Running test: %s", *testName)
|
||||
}
|
||||
result, err := suite.RunTest(*testName)
|
||||
if err != nil {
|
||||
log.E.F("failed to run test %s: %v", *testName, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
results = []relaytester.TestResult{result}
|
||||
} else {
|
||||
if *verbose {
|
||||
log.I.F("Running all tests...")
|
||||
}
|
||||
if results, err = suite.Run(); err != nil {
|
||||
log.E.F("failed to run tests: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Output results
|
||||
if *jsonOut {
|
||||
jsonOutput, err := relaytester.FormatJSON(results)
|
||||
if err != nil {
|
||||
log.E.F("failed to format JSON: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println(jsonOutput)
|
||||
} else {
|
||||
outputResults(results, *verbose)
|
||||
}
|
||||
|
||||
// Check exit code
|
||||
hasRequiredFailures := false
|
||||
for _, result := range results {
|
||||
if result.Required && !result.Pass {
|
||||
hasRequiredFailures = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hasRequiredFailures {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func outputResults(results []relaytester.TestResult, verbose bool) {
|
||||
passed := 0
|
||||
failed := 0
|
||||
requiredFailed := 0
|
||||
|
||||
for _, result := range results {
|
||||
if result.Pass {
|
||||
passed++
|
||||
if verbose {
|
||||
fmt.Printf("PASS: %s", result.Name)
|
||||
if result.Info != "" {
|
||||
fmt.Printf(" - %s", result.Info)
|
||||
}
|
||||
fmt.Println()
|
||||
} else {
|
||||
fmt.Printf("PASS: %s\n", result.Name)
|
||||
}
|
||||
} else {
|
||||
failed++
|
||||
if result.Required {
|
||||
requiredFailed++
|
||||
fmt.Printf("FAIL (required): %s", result.Name)
|
||||
} else {
|
||||
fmt.Printf("FAIL (optional): %s", result.Name)
|
||||
}
|
||||
if result.Info != "" {
|
||||
fmt.Printf(" - %s", result.Info)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("Test Summary:")
|
||||
fmt.Printf(" Total: %d\n", len(results))
|
||||
fmt.Printf(" Passed: %d\n", passed)
|
||||
fmt.Printf(" Failed: %d\n", failed)
|
||||
fmt.Printf(" Required Failed: %d\n", requiredFailed)
|
||||
}
|
||||
|
||||
func listAllTests() {
|
||||
// Create a dummy test suite to get the list of tests
|
||||
suite, err := relaytester.NewTestSuite("ws://127.0.0.1:0")
|
||||
if err != nil {
|
||||
log.E.F("failed to create test suite: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Println("Available tests:")
|
||||
fmt.Println()
|
||||
|
||||
testNames := suite.ListTests()
|
||||
testInfo := suite.GetTestNames()
|
||||
|
||||
for _, name := range testNames {
|
||||
required := ""
|
||||
if testInfo[name] {
|
||||
required = " (required)"
|
||||
}
|
||||
fmt.Printf(" - %s%s\n", name, required)
|
||||
}
|
||||
}
|
||||
|
||||
8
go.mod
8
go.mod
@@ -20,13 +20,18 @@ require (
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||
golang.org/x/net v0.46.0
|
||||
honnef.co/go/tools v0.6.1
|
||||
lol.mleku.dev v1.0.4
|
||||
lol.mleku.dev v1.0.5
|
||||
lukechampine.com/frand v1.5.1
|
||||
p256k1.mleku.dev v1.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.6 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
@@ -35,6 +40,7 @@ require (
|
||||
github.com/google/flatbuffers v25.9.23+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20251007162407-5df77e3f7d1d // indirect
|
||||
github.com/klauspost/compress v1.18.1 // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/templexxx/cpu v0.1.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
|
||||
16
go.sum
16
go.sum
@@ -2,6 +2,10 @@ github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
|
||||
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.6 h1:IzlsEr9olcSRKB/n7c4351F3xHKxS2lma+1UFGCYd4E=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.6/go.mod h1:m22FrOAiuxl/tht9wIqAoGHcbnCCaPWyauO8y2LGGtQ=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
@@ -16,6 +20,10 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0 h1:JYph1ChBijCw8SLeybvPINizbDKWZ5n/GYbz2yhN/bs=
|
||||
github.com/dgraph-io/badger/v4 v4.8.0/go.mod h1:U6on6e8k/RTbUWxqKR0MvugJuVmkxSNc79ap4917h4w=
|
||||
github.com/dgraph-io/ristretto/v2 v2.3.0 h1:qTQ38m7oIyd4GAed/QkUZyPFNMnvVWyazGXRwvOt5zk=
|
||||
@@ -60,6 +68,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
|
||||
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
|
||||
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
|
||||
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
@@ -138,7 +148,9 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
|
||||
honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
|
||||
lol.mleku.dev v1.0.4 h1:SOngs7erj8J3nXz673kYFgXQHFO+jkCI1E2iOlpyzV8=
|
||||
lol.mleku.dev v1.0.4/go.mod h1:DQ0WnmkntA9dPLCXgvtIgYt5G0HSqx3wSTLolHgWeLA=
|
||||
lol.mleku.dev v1.0.5 h1:irwfwz+Scv74G/2OXmv05YFKOzUNOVZ735EAkYgjgM8=
|
||||
lol.mleku.dev v1.0.5/go.mod h1:JlsqP0CZDLKRyd85XGcy79+ydSRqmFkrPzYFMYxQ+zs=
|
||||
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
|
||||
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=
|
||||
p256k1.mleku.dev v1.0.1 h1:4ZQ+2xNfKpL6+e9urKP6f/QdHKKUNIEsqvFwogpluZw=
|
||||
p256k1.mleku.dev v1.0.1/go.mod h1:gY2ybEebhiSgSDlJ8ERgAe833dn2EDqs7aBsvwpgu0s=
|
||||
|
||||
@@ -4,22 +4,18 @@ package p256k
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/p256k/btcec"
|
||||
p256k1signer "p256k1.mleku.dev/signer"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.T.Ln("using btcec signature library")
|
||||
log.T.Ln("using p256k1.mleku.dev/signer (pure Go/Btcec)")
|
||||
}
|
||||
|
||||
// BTCECSigner is always available but enabling it disables the use of
|
||||
// github.com/bitcoin-core/secp256k1 CGO signature implementation and points it at the btec
|
||||
// version.
|
||||
// Signer is an alias for the BtcecSigner type from p256k1.mleku.dev/signer (btcec version).
|
||||
// This is used when CGO is not available.
|
||||
type Signer = p256k1signer.BtcecSigner
|
||||
|
||||
type Signer = btcec.Signer
|
||||
type Keygen = btcec.Keygen
|
||||
// Keygen is an alias for the P256K1Gen type from p256k1.mleku.dev/signer (btcec version).
|
||||
type Keygen = p256k1signer.P256K1Gen
|
||||
|
||||
func NewKeygen() (k *Keygen) { return new(Keygen) }
|
||||
|
||||
var NewSecFromHex = btcec.NewSecFromHex[string]
|
||||
var NewPubFromHex = btcec.NewPubFromHex[string]
|
||||
var HexToBin = btcec.HexToBin
|
||||
var NewKeygen = p256k1signer.NewP256K1Gen
|
||||
@@ -1,6 +1,9 @@
|
||||
// Package p256k is a signer interface that (by default) uses the
|
||||
// bitcoin/libsecp256k1 library for fast signature creation and verification of
|
||||
// the BIP-340 nostr X-only signatures and public keys, and ECDH.
|
||||
// Package p256k provides a signer interface that uses p256k1.mleku.dev library for
|
||||
// fast signature creation and verification of BIP-340 nostr X-only signatures and
|
||||
// public keys, and ECDH.
|
||||
//
|
||||
// Currently the ECDH is only implemented with the btcec library.
|
||||
// The package provides type aliases to p256k1.mleku.dev/signer:
|
||||
// - cgo: Uses the CGO-optimized version from p256k1.mleku.dev
|
||||
// - btcec: Uses the btcec version from p256k1.mleku.dev
|
||||
// - default: Uses the pure Go version from p256k1.mleku.dev
|
||||
package p256k
|
||||
|
||||
41
pkg/crypto/p256k/helpers-btcec.go
Normal file
41
pkg/crypto/p256k/helpers-btcec.go
Normal file
@@ -0,0 +1,41 @@
|
||||
//go:build !cgo
|
||||
|
||||
package p256k
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/interfaces/signer"
|
||||
p256k1signer "p256k1.mleku.dev/signer"
|
||||
)
|
||||
|
||||
func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {
|
||||
sk := make([]byte, len(skh)/2)
|
||||
if _, err = hex.DecBytes(sk, []byte(skh)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sign = p256k1signer.NewBtcecSigner()
|
||||
if err = sign.InitSec(sk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewPubFromHex[V []byte | string](pkh V) (sign signer.I, err error) {
|
||||
pk := make([]byte, len(pkh)/2)
|
||||
if _, err = hex.DecBytes(pk, []byte(pkh)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sign = p256k1signer.NewBtcecSigner()
|
||||
if err = sign.InitPub(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func HexToBin(hexStr string) (b []byte, err error) {
|
||||
if b, err = hex.DecAppend(b, []byte(hexStr)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/interfaces/signer"
|
||||
p256k1signer "p256k1.mleku.dev/signer"
|
||||
)
|
||||
|
||||
func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {
|
||||
@@ -13,7 +14,7 @@ func NewSecFromHex[V []byte | string](skh V) (sign signer.I, err error) {
|
||||
if _, err = hex.DecBytes(sk, []byte(skh)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sign = &Signer{}
|
||||
sign = p256k1signer.NewP256K1Signer()
|
||||
if err = sign.InitSec(sk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -25,7 +26,7 @@ func NewPubFromHex[V []byte | string](pkh V) (sign signer.I, err error) {
|
||||
if _, err = hex.DecBytes(pk, []byte(pkh)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sign = &Signer{}
|
||||
sign = p256k1signer.NewP256K1Signer()
|
||||
if err = sign.InitPub(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2,139 +2,19 @@
|
||||
|
||||
package p256k
|
||||
|
||||
import "C"
|
||||
import (
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/ec"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/interfaces/signer"
|
||||
p256k1signer "p256k1.mleku.dev/signer"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.T.Ln("using bitcoin/secp256k1 signature library")
|
||||
log.T.Ln("using p256k1.mleku.dev/signer (CGO)")
|
||||
}
|
||||
|
||||
// Signer implements the signer.I interface.
|
||||
//
|
||||
// Either the Sec or Pub must be populated, the former is for generating
|
||||
// signatures, the latter is for verifying them.
|
||||
//
|
||||
// When using this library only for verification, a constructor that converts
|
||||
// from bytes to PubKey is needed prior to calling Verify.
|
||||
type Signer struct {
|
||||
// SecretKey is the secret key.
|
||||
SecretKey *SecKey
|
||||
// PublicKey is the public key.
|
||||
PublicKey *PubKey
|
||||
// BTCECSec is needed for ECDH as currently the CGO bindings don't include it
|
||||
BTCECSec *btcec.SecretKey
|
||||
skb, pkb []byte
|
||||
}
|
||||
// Signer is an alias for the P256K1Signer type from p256k1.mleku.dev/signer (cgo version).
|
||||
type Signer = p256k1signer.P256K1Signer
|
||||
|
||||
var _ signer.I = &Signer{}
|
||||
// Keygen is an alias for the P256K1Gen type from p256k1.mleku.dev/signer (cgo version).
|
||||
type Keygen = p256k1signer.P256K1Gen
|
||||
|
||||
// Generate a new Signer key pair using the CGO bindings to libsecp256k1
|
||||
func (s *Signer) Generate() (err error) {
|
||||
var cs *Sec
|
||||
var cx *XPublicKey
|
||||
if s.skb, s.pkb, cs, cx, err = Generate(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
s.SecretKey = &cs.Key
|
||||
s.PublicKey = cx.Key
|
||||
s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Signer) InitSec(skb []byte) (err error) {
|
||||
var cs *Sec
|
||||
var cx *XPublicKey
|
||||
// var cp *PublicKey
|
||||
if s.pkb, cs, cx, err = FromSecretBytes(skb); chk.E(err) {
|
||||
if err.Error() != "provided secret generates a public key with odd Y coordinate, fixed version returned" {
|
||||
log.E.Ln(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
s.skb = skb
|
||||
s.SecretKey = &cs.Key
|
||||
s.PublicKey = cx.Key
|
||||
// s.ECPublicKey = cp.Key
|
||||
// needed for ecdh
|
||||
s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Signer) InitPub(pub []byte) (err error) {
|
||||
var up *Pub
|
||||
if up, err = PubFromBytes(pub); chk.E(err) {
|
||||
return
|
||||
}
|
||||
s.PublicKey = &up.Key
|
||||
s.pkb = up.PubB()
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Signer) Sec() (b []byte) {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.skb
|
||||
}
|
||||
func (s *Signer) Pub() (b []byte) {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return s.pkb
|
||||
}
|
||||
|
||||
// func (s *Signer) ECPub() (b []byte) { return s.pkb }
|
||||
|
||||
func (s *Signer) Sign(msg []byte) (sig []byte, err error) {
|
||||
if s.SecretKey == nil {
|
||||
err = errorf.E("p256k: I secret not initialized")
|
||||
return
|
||||
}
|
||||
u := ToUchar(msg)
|
||||
if sig, err = Sign(u, s.SecretKey); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Signer) Verify(msg, sig []byte) (valid bool, err error) {
|
||||
if s.PublicKey == nil {
|
||||
err = errorf.E("p256k: Pubkey not initialized")
|
||||
return
|
||||
}
|
||||
var uMsg, uSig *Uchar
|
||||
if uMsg, err = Msg(msg); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if uSig, err = Sig(sig); chk.E(err) {
|
||||
return
|
||||
}
|
||||
valid = Verify(uMsg, uSig, s.PublicKey)
|
||||
if !valid {
|
||||
err = errorf.E("p256k: invalid signature")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) {
|
||||
var pub *secp256k1.PublicKey
|
||||
if pub, err = secp256k1.ParsePubKey(
|
||||
append(
|
||||
[]byte{0x02},
|
||||
pubkeyBytes...,
|
||||
),
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
secret = btcec.GenerateSharedSecret(s.BTCECSec, pub)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Signer) Zero() { Zero(s.SecretKey) }
|
||||
var NewKeygen = p256k1signer.NewP256K1Gen
|
||||
@@ -1,426 +0,0 @@
|
||||
//go:build cgo
|
||||
|
||||
package p256k
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"unsafe"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/crypto/ec/secp256k1"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
)
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lsecp256k1
|
||||
#include <secp256k1.h>
|
||||
#include <secp256k1_schnorrsig.h>
|
||||
#include <secp256k1_extrakeys.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
type (
|
||||
Context = C.secp256k1_context
|
||||
Uchar = C.uchar
|
||||
Cint = C.int
|
||||
SecKey = C.secp256k1_keypair
|
||||
PubKey = C.secp256k1_xonly_pubkey
|
||||
ECPubKey = C.secp256k1_pubkey
|
||||
)
|
||||
|
||||
var (
|
||||
ctx *Context
|
||||
)
|
||||
|
||||
func CreateContext() *Context {
|
||||
return C.secp256k1_context_create(
|
||||
C.SECP256K1_CONTEXT_SIGN |
|
||||
C.SECP256K1_CONTEXT_VERIFY,
|
||||
)
|
||||
}
|
||||
|
||||
func GetRandom() (u *Uchar) {
|
||||
rnd := make([]byte, 32)
|
||||
_, _ = rand.Read(rnd)
|
||||
return ToUchar(rnd)
|
||||
}
|
||||
|
||||
func AssertLen(b []byte, length int, name string) (err error) {
|
||||
if len(b) != length {
|
||||
err = errorf.E("%s should be %d bytes, got %d", name, length, len(b))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func RandomizeContext(ctx *C.secp256k1_context) {
|
||||
C.secp256k1_context_randomize(ctx, GetRandom())
|
||||
return
|
||||
}
|
||||
|
||||
func CreateRandomContext() (c *Context) {
|
||||
c = CreateContext()
|
||||
RandomizeContext(c)
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
if ctx = CreateContext(); ctx == nil {
|
||||
panic("failed to create secp256k1 context")
|
||||
}
|
||||
}
|
||||
|
||||
func ToUchar(b []byte) (u *Uchar) { return (*Uchar)(unsafe.Pointer(&b[0])) }
|
||||
|
||||
type Sec struct {
|
||||
Key SecKey
|
||||
}
|
||||
|
||||
func GenSec() (sec *Sec, err error) {
|
||||
if _, _, sec, _, err = Generate(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func SecFromBytes(sk []byte) (sec *Sec, err error) {
|
||||
sec = new(Sec)
|
||||
if C.secp256k1_keypair_create(ctx, &sec.Key, ToUchar(sk)) != 1 {
|
||||
err = errorf.E("failed to parse private key")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Sec) Sec() *SecKey { return &s.Key }
|
||||
|
||||
func (s *Sec) Pub() (p *Pub, err error) {
|
||||
p = new(Pub)
|
||||
if C.secp256k1_keypair_xonly_pub(ctx, &p.Key, nil, s.Sec()) != 1 {
|
||||
err = errorf.E("pubkey derivation failed")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// type PublicKey struct {
|
||||
// Key *C.secp256k1_pubkey
|
||||
// }
|
||||
//
|
||||
// func NewPublicKey() *PublicKey {
|
||||
// return &PublicKey{
|
||||
// Key: &C.secp256k1_pubkey{},
|
||||
// }
|
||||
// }
|
||||
|
||||
type XPublicKey struct {
|
||||
Key *C.secp256k1_xonly_pubkey
|
||||
}
|
||||
|
||||
func NewXPublicKey() *XPublicKey {
|
||||
return &XPublicKey{
|
||||
Key: &C.secp256k1_xonly_pubkey{},
|
||||
}
|
||||
}
|
||||
|
||||
// FromSecretBytes parses and processes what should be a secret key. If it is a correct key within the curve order, but
|
||||
// with a public key having an odd Y coordinate, it returns an error with the fixed key.
|
||||
func FromSecretBytes(skb []byte) (
|
||||
pkb []byte,
|
||||
sec *Sec,
|
||||
pub *XPublicKey,
|
||||
// ecPub *PublicKey,
|
||||
err error,
|
||||
) {
|
||||
xpkb := make([]byte, schnorr.PubKeyBytesLen)
|
||||
// clen := C.size_t(secp256k1.PubKeyBytesLenCompressed - 1)
|
||||
pkb = make([]byte, schnorr.PubKeyBytesLen)
|
||||
var parity Cint
|
||||
// ecPub = NewPublicKey()
|
||||
pub = NewXPublicKey()
|
||||
sec = &Sec{}
|
||||
uskb := ToUchar(skb)
|
||||
res := C.secp256k1_keypair_create(ctx, &sec.Key, uskb)
|
||||
if res != 1 {
|
||||
err = errorf.E("failed to create secp256k1 keypair")
|
||||
return
|
||||
}
|
||||
// C.secp256k1_keypair_pub(ctx, ecPub.Key, &sec.Key)
|
||||
// C.secp256k1_ec_pubkey_serialize(ctx, ToUchar(ecpkb), &clen, ecPub.Key,
|
||||
// C.SECP256K1_EC_COMPRESSED)
|
||||
// if ecpkb[0] != 2 {
|
||||
// log.W.ToSliceOfBytes("odd pubkey from %0x -> %0x", skb, ecpkb)
|
||||
// Negate(skb)
|
||||
// uskb = ToUchar(skb)
|
||||
// res = C.secp256k1_keypair_create(ctx, &sec.Key, uskb)
|
||||
// if res != 1 {
|
||||
// err = errorf.E("failed to create secp256k1 keypair")
|
||||
// return
|
||||
// }
|
||||
// C.secp256k1_keypair_pub(ctx, ecPub.Key, &sec.Key)
|
||||
// C.secp256k1_ec_pubkey_serialize(ctx, ToUchar(ecpkb), &clen, ecPub.Key, C.SECP256K1_EC_COMPRESSED)
|
||||
// C.secp256k1_keypair_xonly_pub(ctx, pub.Key, &parity, &sec.Key)
|
||||
// err = errors.New("provided secret generates a public key with odd Y coordinate, fixed version returned")
|
||||
// }
|
||||
C.secp256k1_keypair_xonly_pub(ctx, pub.Key, &parity, &sec.Key)
|
||||
C.secp256k1_xonly_pubkey_serialize(ctx, ToUchar(xpkb), pub.Key)
|
||||
pkb = xpkb
|
||||
// log.I.S(sec, pub, skb, pkb)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate gathers entropy to generate a full set of bytes and CGO values of it and derived from it to perform
|
||||
// signature and ECDH operations.
|
||||
func Generate() (
|
||||
skb, pkb []byte,
|
||||
sec *Sec,
|
||||
pub *XPublicKey,
|
||||
err error,
|
||||
) {
|
||||
skb = make([]byte, secp256k1.SecKeyBytesLen)
|
||||
pkb = make([]byte, schnorr.PubKeyBytesLen)
|
||||
upkb := ToUchar(pkb)
|
||||
var parity Cint
|
||||
pub = NewXPublicKey()
|
||||
sec = &Sec{}
|
||||
for {
|
||||
if _, err = rand.Read(skb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
uskb := ToUchar(skb)
|
||||
if res := C.secp256k1_keypair_create(ctx, &sec.Key, uskb); res != 1 {
|
||||
err = errorf.E("failed to create secp256k1 keypair")
|
||||
continue
|
||||
}
|
||||
C.secp256k1_keypair_xonly_pub(ctx, pub.Key, &parity, &sec.Key)
|
||||
C.secp256k1_xonly_pubkey_serialize(ctx, upkb, pub.Key)
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Negate inverts a secret key so an odd prefix bit becomes even and vice versa.
|
||||
func Negate(uskb []byte) { C.secp256k1_ec_seckey_negate(ctx, ToUchar(uskb)) }
|
||||
|
||||
type ECPub struct {
|
||||
Key ECPubKey
|
||||
}
|
||||
|
||||
// ECPubFromSchnorrBytes converts a BIP-340 public key to its even standard 33 byte encoding.
|
||||
//
|
||||
// This function is for the purpose of getting a key to do ECDH from an x-only key.
|
||||
func ECPubFromSchnorrBytes(xkb []byte) (pub *ECPub, err error) {
|
||||
if err = AssertLen(xkb, schnorr.PubKeyBytesLen, "pubkey"); chk.E(err) {
|
||||
return
|
||||
}
|
||||
pub = &ECPub{}
|
||||
p := append([]byte{0}, xkb...)
|
||||
if C.secp256k1_ec_pubkey_parse(
|
||||
ctx, &pub.Key, ToUchar(p),
|
||||
secp256k1.PubKeyBytesLenCompressed,
|
||||
) != 1 {
|
||||
err = errorf.E("failed to parse pubkey from %0x", p)
|
||||
log.I.S(pub)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// // ECPubFromBytes parses a pubkey from 33 bytes to the bitcoin-core/secp256k1 struct.
|
||||
// func ECPubFromBytes(pkb []byte) (pub *ECPub, err error) {
|
||||
// if err = AssertLen(pkb, secp256k1.PubKeyBytesLenCompressed, "pubkey"); chk.E(err) {
|
||||
// return
|
||||
// }
|
||||
// pub = &ECPub{}
|
||||
// if C.secp256k1_ec_pubkey_parse(ctx, &pub.Key, ToUchar(pkb),
|
||||
// secp256k1.PubKeyBytesLenCompressed) != 1 {
|
||||
// err = errorf.E("failed to parse pubkey from %0x", pkb)
|
||||
// log.I.S(pub)
|
||||
// return
|
||||
// }
|
||||
// return
|
||||
// }
|
||||
|
||||
// Pub is a schnorr BIP-340 public key.
|
||||
type Pub struct {
|
||||
Key PubKey
|
||||
}
|
||||
|
||||
// PubFromBytes creates a public key from raw bytes.
|
||||
func PubFromBytes(pk []byte) (pub *Pub, err error) {
|
||||
if err = AssertLen(pk, schnorr.PubKeyBytesLen, "pubkey"); chk.E(err) {
|
||||
return
|
||||
}
|
||||
pub = new(Pub)
|
||||
if C.secp256k1_xonly_pubkey_parse(ctx, &pub.Key, ToUchar(pk)) != 1 {
|
||||
err = errorf.E("failed to parse pubkey from %0x", pk)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PubB returns the contained public key as bytes.
|
||||
func (p *Pub) PubB() (b []byte) {
|
||||
b = make([]byte, schnorr.PubKeyBytesLen)
|
||||
C.secp256k1_xonly_pubkey_serialize(ctx, ToUchar(b), &p.Key)
|
||||
return
|
||||
}
|
||||
|
||||
// Pub returns the public key as a PubKey.
|
||||
func (p *Pub) Pub() *PubKey { return &p.Key }
|
||||
|
||||
// ToBytes returns the contained public key as bytes.
|
||||
func (p *Pub) ToBytes() (b []byte, err error) {
|
||||
b = make([]byte, schnorr.PubKeyBytesLen)
|
||||
if C.secp256k1_xonly_pubkey_serialize(ctx, ToUchar(b), p.Pub()) != 1 {
|
||||
err = errorf.E("pubkey serialize failed")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Sign a message and return a schnorr BIP-340 64 byte signature.
|
||||
func Sign(msg *Uchar, sk *SecKey) (sig []byte, err error) {
|
||||
sig = make([]byte, schnorr.SignatureSize)
|
||||
c := CreateRandomContext()
|
||||
if C.secp256k1_schnorrsig_sign32(
|
||||
c, ToUchar(sig), msg, sk,
|
||||
GetRandom(),
|
||||
) != 1 {
|
||||
err = errorf.E("failed to sign message")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SignFromBytes Signs a message using a provided secret key and message as raw bytes.
|
||||
func SignFromBytes(msg, sk []byte) (sig []byte, err error) {
|
||||
var umsg *Uchar
|
||||
if umsg, err = Msg(msg); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var sec *Sec
|
||||
if sec, err = SecFromBytes(sk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return Sign(umsg, sec.Sec())
|
||||
}
|
||||
|
||||
// Msg checks that a message hash is correct, and converts it for use with a Signer.
|
||||
func Msg(b []byte) (id *Uchar, err error) {
|
||||
if err = AssertLen(b, sha256.Size, "id"); chk.E(err) {
|
||||
return
|
||||
}
|
||||
id = ToUchar(b)
|
||||
return
|
||||
}
|
||||
|
||||
// Sig checks that a signature bytes is correct, and converts it for use with a Signer.
|
||||
func Sig(b []byte) (sig *Uchar, err error) {
|
||||
if err = AssertLen(b, schnorr.SignatureSize, "sig"); chk.E(err) {
|
||||
return
|
||||
}
|
||||
sig = ToUchar(b)
|
||||
return
|
||||
}
|
||||
|
||||
// Verify a message signature matches the provided PubKey.
|
||||
func Verify(msg, sig *Uchar, pk *PubKey) (valid bool) {
|
||||
return C.secp256k1_schnorrsig_verify(ctx, sig, msg, 32, pk) == 1
|
||||
}
|
||||
|
||||
// VerifyFromBytes a signature from the raw bytes of the message hash, signature and public key
|
||||
func VerifyFromBytes(msg, sig, pk []byte) (err error) {
|
||||
var umsg, usig *Uchar
|
||||
if umsg, err = Msg(msg); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if usig, err = Sig(sig); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var pub *Pub
|
||||
if pub, err = PubFromBytes(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
valid := Verify(umsg, usig, pub.Pub())
|
||||
if !valid {
|
||||
err = errorf.E("failed to verify signature")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Zero wipes the memory of a SecKey by overwriting it three times with random data and then
|
||||
// zeroing it.
|
||||
func Zero(sk *SecKey) {
|
||||
b := (*[96]byte)(unsafe.Pointer(sk))[:96]
|
||||
for range 3 {
|
||||
rand.Read(b)
|
||||
// reverse the order and negate
|
||||
lb := len(b)
|
||||
l := lb / 2
|
||||
for j := range l {
|
||||
b[j] = ^b[lb-1-j]
|
||||
}
|
||||
}
|
||||
for i := range b {
|
||||
b[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Keygen is an implementation of a key miner designed to be used for vanity key generation with X-only BIP-340 keys.
|
||||
type Keygen struct {
|
||||
secBytes, comprPubBytes []byte
|
||||
secUchar, cmprPubUchar *Uchar
|
||||
sec *Sec
|
||||
// ecpub *PublicKey
|
||||
cmprLen C.size_t
|
||||
}
|
||||
|
||||
// NewKeygen allocates the required buffers for deriving a key. This should only be done once to avoid garbage and make
|
||||
// the key mining as fast as possible.
|
||||
//
|
||||
// This allocates everything and creates proper CGO variables needed for the generate function so they only need to be
|
||||
// allocated once per thread.
|
||||
func NewKeygen() (k *Keygen) {
|
||||
k = new(Keygen)
|
||||
k.cmprLen = C.size_t(secp256k1.PubKeyBytesLenCompressed)
|
||||
k.secBytes = make([]byte, secp256k1.SecKeyBytesLen)
|
||||
k.comprPubBytes = make([]byte, secp256k1.PubKeyBytesLenCompressed)
|
||||
k.secUchar = ToUchar(k.secBytes)
|
||||
k.cmprPubUchar = ToUchar(k.comprPubBytes)
|
||||
k.sec = &Sec{}
|
||||
// k.ecpub = NewPublicKey()
|
||||
return
|
||||
}
|
||||
|
||||
// Generate takes a pair of buffers for the secret and ec pubkey bytes and gathers new entropy and returns a valid
|
||||
// secret key and the compressed pubkey bytes for the partial collision search.
|
||||
//
|
||||
// The first byte of pubBytes must be sliced off before deriving the hex/Bech32 forms of the nostr public key.
|
||||
func (k *Keygen) Generate() (
|
||||
sec *Sec,
|
||||
pub *XPublicKey,
|
||||
pubBytes []byte,
|
||||
err error,
|
||||
) {
|
||||
if _, err = rand.Read(k.secBytes); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if res := C.secp256k1_keypair_create(
|
||||
ctx, &k.sec.Key, k.secUchar,
|
||||
); res != 1 {
|
||||
err = errorf.E("failed to create secp256k1 keypair")
|
||||
return
|
||||
}
|
||||
var parity Cint
|
||||
C.secp256k1_keypair_xonly_pub(ctx, pub.Key, &parity, &sec.Key)
|
||||
// C.secp256k1_keypair_pub(ctx, k.ecpub.Key, &k.sec.Key)
|
||||
// C.secp256k1_ec_pubkey_serialize(ctx, k.cmprPubUchar, &k.cmprLen, k.ecpub.Key,
|
||||
// C.SECP256K1_EC_COMPRESSED)
|
||||
// pubBytes = k.comprPubBytes
|
||||
C.secp256k1_xonly_pubkey_serialize(ctx, ToUchar(pubBytes), pub.Key)
|
||||
// pubBytes =
|
||||
return
|
||||
}
|
||||
@@ -8,20 +8,27 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
)
|
||||
|
||||
// Helper function to create test event
|
||||
func createTestEventBench(id, pubkey, content string, kind uint16) *event.E {
|
||||
return &event.E{
|
||||
ID: []byte(id),
|
||||
Kind: kind,
|
||||
Pubkey: []byte(pubkey),
|
||||
Content: []byte(content),
|
||||
Tags: &tag.S{},
|
||||
CreatedAt: time.Now().Unix(),
|
||||
// Helper function to create test event for benchmarks (reuses signer)
|
||||
func createTestEventBench(b *testing.B, signer *p256k.Signer, content string, kind uint16) *event.E {
|
||||
ev := event.New()
|
||||
ev.CreatedAt = time.Now().Unix()
|
||||
ev.Kind = kind
|
||||
ev.Content = []byte(content)
|
||||
ev.Tags = tag.NewS()
|
||||
|
||||
// Sign the event properly
|
||||
if err := ev.Sign(signer); chk.E(err) {
|
||||
b.Fatalf("Failed to sign test event: %v", err)
|
||||
}
|
||||
|
||||
return ev
|
||||
}
|
||||
|
||||
func BenchmarkCheckKindsPolicy(b *testing.B) {
|
||||
@@ -38,12 +45,13 @@ func BenchmarkCheckKindsPolicy(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkCheckRulePolicy(b *testing.B) {
|
||||
// Create test event
|
||||
testEvent := createTestEventBench("test-event-id", "test-pubkey", "test content", 1)
|
||||
// Generate keypair once for all events
|
||||
signer, pubkey := generateTestKeypairB(b)
|
||||
testEvent := createTestEventBench(b, signer, "test content", 1)
|
||||
|
||||
rule := Rule{
|
||||
Description: "test rule",
|
||||
WriteAllow: []string{"test-pubkey"},
|
||||
WriteAllow: []string{hex.Enc(pubkey)},
|
||||
SizeLimit: int64Ptr(10000),
|
||||
ContentLimit: int64Ptr(1000),
|
||||
MustHaveTags: []string{"p"},
|
||||
@@ -53,13 +61,14 @@ func BenchmarkCheckRulePolicy(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
policy.checkRulePolicy("write", testEvent, rule, []byte("test-pubkey"))
|
||||
policy.checkRulePolicy("write", testEvent, rule, pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCheckPolicy(b *testing.B) {
|
||||
// Create test event
|
||||
testEvent := createTestEventBench("test-event-id", "test-pubkey", "test content", 1)
|
||||
// Generate keypair once for all events
|
||||
signer, pubkey := generateTestKeypairB(b)
|
||||
testEvent := createTestEventBench(b, signer, "test content", 1)
|
||||
|
||||
policy := &P{
|
||||
Kind: Kinds{
|
||||
@@ -68,14 +77,14 @@ func BenchmarkCheckPolicy(b *testing.B) {
|
||||
Rules: map[int]Rule{
|
||||
1: {
|
||||
Description: "test rule",
|
||||
WriteAllow: []string{"test-pubkey"},
|
||||
WriteAllow: []string{hex.Enc(pubkey)},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
policy.CheckPolicy("write", testEvent, []byte("test-pubkey"), "127.0.0.1")
|
||||
policy.CheckPolicy("write", testEvent, pubkey, "127.0.0.1")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,8 +123,9 @@ done
|
||||
// Give the script time to start
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Create test event
|
||||
testEvent := createTestEventBench("test-event-id", "test-pubkey", "test content", 1)
|
||||
// Generate keypair once for all events
|
||||
signer, pubkey := generateTestKeypairB(b)
|
||||
testEvent := createTestEventBench(b, signer, "test content", 1)
|
||||
|
||||
policy := &P{
|
||||
Manager: manager,
|
||||
@@ -130,7 +140,7 @@ done
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
policy.CheckPolicy("write", testEvent, []byte("test-pubkey"), "127.0.0.1")
|
||||
policy.CheckPolicy("write", testEvent, pubkey, "127.0.0.1")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -190,16 +200,19 @@ func BenchmarkCheckPolicyMultipleKinds(b *testing.B) {
|
||||
Rules: rules,
|
||||
}
|
||||
|
||||
// Generate keypair once for all events
|
||||
signer, pubkey := generateTestKeypairB(b)
|
||||
|
||||
// Create test events with different kinds
|
||||
events := make([]*event.E, 100)
|
||||
for i := 0; i < 100; i++ {
|
||||
events[i] = createTestEvent("test-event-id", "test-pubkey", "test content", uint16(i+1))
|
||||
events[i] = createTestEventBench(b, signer, "test content", uint16(i+1))
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
event := events[i%100]
|
||||
policy.CheckPolicy("write", event, []byte("test-pubkey"), "127.0.0.1")
|
||||
policy.CheckPolicy("write", event, pubkey, "127.0.0.1")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -217,11 +230,13 @@ func BenchmarkCheckPolicyLargeWhitelist(b *testing.B) {
|
||||
Rules: map[int]Rule{},
|
||||
}
|
||||
|
||||
testEvent := createTestEvent("test-event-id", "test-pubkey", "test content", 500) // Kind in the middle of the whitelist
|
||||
// Generate keypair once for all events
|
||||
signer, pubkey := generateTestKeypairB(b)
|
||||
testEvent := createTestEventBench(b, signer, "test content", 500) // Kind in the middle of the whitelist
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
policy.CheckPolicy("write", testEvent, []byte("test-pubkey"), "127.0.0.1")
|
||||
policy.CheckPolicy("write", testEvent, pubkey, "127.0.0.1")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,22 +254,25 @@ func BenchmarkCheckPolicyLargeBlacklist(b *testing.B) {
|
||||
Rules: map[int]Rule{},
|
||||
}
|
||||
|
||||
testEvent := createTestEvent("test-event-id", "test-pubkey", "test content", 1500) // Kind not in blacklist
|
||||
// Generate keypair once for all events
|
||||
signer, pubkey := generateTestKeypairB(b)
|
||||
testEvent := createTestEventBench(b, signer, "test content", 1500) // Kind not in blacklist
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
policy.CheckPolicy("write", testEvent, []byte("test-pubkey"), "127.0.0.1")
|
||||
policy.CheckPolicy("write", testEvent, pubkey, "127.0.0.1")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCheckPolicyComplexRule(b *testing.B) {
|
||||
// Create test event with many tags
|
||||
testEvent := createTestEventBench("test-event-id", "test-pubkey", "test content", 1)
|
||||
// Generate keypair once for all events
|
||||
signer, pubkey := generateTestKeypairB(b)
|
||||
testEvent := createTestEventBench(b, signer, "test content", 1)
|
||||
|
||||
// Add many tags
|
||||
for i := 0; i < 100; i++ {
|
||||
tagItem1 := tag.New()
|
||||
tagItem1.T = append(tagItem1.T, []byte("p"), []byte("test-pubkey"))
|
||||
tagItem1.T = append(tagItem1.T, []byte("p"), []byte(hex.Enc(pubkey)))
|
||||
*testEvent.Tags = append(*testEvent.Tags, tagItem1)
|
||||
|
||||
tagItem2 := tag.New()
|
||||
@@ -264,7 +282,7 @@ func BenchmarkCheckPolicyComplexRule(b *testing.B) {
|
||||
|
||||
rule := Rule{
|
||||
Description: "complex rule",
|
||||
WriteAllow: []string{"test-pubkey"},
|
||||
WriteAllow: []string{hex.Enc(pubkey)},
|
||||
SizeLimit: int64Ptr(100000),
|
||||
ContentLimit: int64Ptr(10000),
|
||||
MustHaveTags: []string{"p", "e"},
|
||||
@@ -275,7 +293,7 @@ func BenchmarkCheckPolicyComplexRule(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
policy.checkRulePolicy("write", testEvent, rule, []byte("test-pubkey"))
|
||||
policy.checkRulePolicy("write", testEvent, rule, pubkey)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,11 +312,12 @@ func BenchmarkCheckPolicyLargeEvent(b *testing.B) {
|
||||
},
|
||||
}
|
||||
|
||||
// Create test event with large content
|
||||
testEvent := createTestEvent("test-event-id", "test-pubkey", largeContent, 1)
|
||||
// Generate keypair once for all events
|
||||
signer, pubkey := generateTestKeypairB(b)
|
||||
testEvent := createTestEventBench(b, signer, largeContent, 1)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
policy.CheckPolicy("write", testEvent, []byte("test-pubkey"), "127.0.0.1")
|
||||
policy.CheckPolicy("write", testEvent, pubkey, "127.0.0.1")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,11 +131,13 @@ type PolicyManager struct {
|
||||
currentCancel context.CancelFunc
|
||||
mutex sync.RWMutex
|
||||
isRunning bool
|
||||
isStarting bool
|
||||
enabled bool
|
||||
stdin io.WriteCloser
|
||||
stdout io.ReadCloser
|
||||
stderr io.ReadCloser
|
||||
responseChan chan PolicyResponse
|
||||
startupChan chan error
|
||||
}
|
||||
|
||||
// P represents a complete policy configuration for a Nostr relay.
|
||||
@@ -203,6 +205,7 @@ func NewWithManager(ctx context.Context, appName string, enabled bool) *P {
|
||||
scriptPath: scriptPath,
|
||||
enabled: enabled,
|
||||
responseChan: make(chan PolicyResponse, 100), // Buffered channel for responses
|
||||
startupChan: make(chan error, 1), // Channel for startup completion
|
||||
}
|
||||
|
||||
// Load policy configuration from JSON file
|
||||
@@ -279,8 +282,21 @@ func (p *P) CheckPolicy(access string, ev *event.E, loggedInPubkey []byte, ipAdd
|
||||
}
|
||||
|
||||
// Check if script is present and enabled
|
||||
if rule.Script != "" && p.Manager != nil && p.Manager.IsEnabled() {
|
||||
return p.checkScriptPolicy(access, ev, rule.Script, loggedInPubkey, ipAddress)
|
||||
if rule.Script != "" && p.Manager != nil {
|
||||
if p.Manager.IsEnabled() {
|
||||
// Check if script file exists before trying to use it
|
||||
if _, err := os.Stat(p.Manager.GetScriptPath()); err == nil {
|
||||
// Script exists, try to use it
|
||||
allowed, err := p.checkScriptPolicy(access, ev, rule.Script, loggedInPubkey, ipAddress)
|
||||
if err == nil {
|
||||
// Script ran successfully, return its decision
|
||||
return allowed, nil
|
||||
}
|
||||
// Script failed, fall through to apply other criteria
|
||||
log.W.F("policy script check failed for kind %d: %v, applying other criteria", ev.Kind, err)
|
||||
}
|
||||
// Script doesn't exist or failed, fall through to apply other criteria
|
||||
}
|
||||
}
|
||||
|
||||
// Apply rule-based filtering
|
||||
@@ -452,12 +468,31 @@ func (p *P) checkRulePolicy(access string, ev *event.E, rule Rule, loggedInPubke
|
||||
|
||||
// checkScriptPolicy runs the policy script to determine if event should be allowed
|
||||
func (p *P) checkScriptPolicy(access string, ev *event.E, scriptPath string, loggedInPubkey []byte, ipAddress string) (allowed bool, err error) {
|
||||
if p.Manager == nil || !p.Manager.IsRunning() {
|
||||
// If script is not running, fall back to default policy
|
||||
log.W.F("policy rule for kind %d is inactive (script not running), falling back to default policy (%s)", ev.Kind, p.DefaultPolicy)
|
||||
if p.Manager == nil {
|
||||
return false, fmt.Errorf("policy manager is not initialized")
|
||||
}
|
||||
|
||||
// If policy is disabled, fall back to default policy immediately
|
||||
if !p.Manager.IsEnabled() {
|
||||
log.W.F("policy rule for kind %d is inactive (policy disabled), falling back to default policy (%s)", ev.Kind, p.DefaultPolicy)
|
||||
return p.getDefaultPolicyAction(), nil
|
||||
}
|
||||
|
||||
// Policy is enabled, check if it's running
|
||||
if !p.Manager.IsRunning() {
|
||||
// Check if script file exists
|
||||
if _, err := os.Stat(p.Manager.GetScriptPath()); os.IsNotExist(err) {
|
||||
// Script doesn't exist, return error so caller can fall back to other criteria
|
||||
return false, fmt.Errorf("policy script does not exist at %s", p.Manager.GetScriptPath())
|
||||
}
|
||||
|
||||
// Try to start the policy and wait for it
|
||||
if err := p.Manager.ensureRunning(); err != nil {
|
||||
// Startup failed, return error so caller can fall back to other criteria
|
||||
return false, fmt.Errorf("failed to start policy script: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create policy event with additional context
|
||||
policyEvent := &PolicyEvent{
|
||||
E: ev,
|
||||
@@ -535,6 +570,91 @@ func (pm *PolicyManager) startPolicyIfExists() {
|
||||
}
|
||||
}
|
||||
|
||||
// ensureRunning ensures the policy is running, starting it if necessary.
|
||||
// It waits for startup to complete with a timeout and returns an error if startup fails.
|
||||
func (pm *PolicyManager) ensureRunning() error {
|
||||
pm.mutex.Lock()
|
||||
// Check if already running
|
||||
if pm.isRunning {
|
||||
pm.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if already starting
|
||||
if pm.isStarting {
|
||||
pm.mutex.Unlock()
|
||||
// Wait for startup to complete
|
||||
select {
|
||||
case err := <-pm.startupChan:
|
||||
if err != nil {
|
||||
return fmt.Errorf("policy startup failed: %v", err)
|
||||
}
|
||||
// Double-check it's actually running after receiving signal
|
||||
pm.mutex.RLock()
|
||||
running := pm.isRunning
|
||||
pm.mutex.RUnlock()
|
||||
if !running {
|
||||
return fmt.Errorf("policy startup completed but process is not running")
|
||||
}
|
||||
return nil
|
||||
case <-time.After(10 * time.Second):
|
||||
return fmt.Errorf("policy startup timeout")
|
||||
case <-pm.ctx.Done():
|
||||
return fmt.Errorf("policy context cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
// Mark as starting
|
||||
pm.isStarting = true
|
||||
pm.mutex.Unlock()
|
||||
|
||||
// Start the policy in a goroutine
|
||||
go func() {
|
||||
err := pm.StartPolicy()
|
||||
pm.mutex.Lock()
|
||||
pm.isStarting = false
|
||||
pm.mutex.Unlock()
|
||||
// Signal startup completion (non-blocking)
|
||||
// Drain any stale value first, then send
|
||||
select {
|
||||
case <-pm.startupChan:
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case pm.startupChan <- err:
|
||||
default:
|
||||
// Channel should be empty now, but if it's full, try again
|
||||
pm.startupChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for startup to complete
|
||||
select {
|
||||
case err := <-pm.startupChan:
|
||||
if err != nil {
|
||||
return fmt.Errorf("policy startup failed: %v", err)
|
||||
}
|
||||
// Double-check it's actually running after receiving signal
|
||||
pm.mutex.RLock()
|
||||
running := pm.isRunning
|
||||
pm.mutex.RUnlock()
|
||||
if !running {
|
||||
return fmt.Errorf("policy startup completed but process is not running")
|
||||
}
|
||||
return nil
|
||||
case <-time.After(10 * time.Second):
|
||||
pm.mutex.Lock()
|
||||
pm.isStarting = false
|
||||
pm.mutex.Unlock()
|
||||
return fmt.Errorf("policy startup timeout")
|
||||
case <-pm.ctx.Done():
|
||||
pm.mutex.Lock()
|
||||
pm.isStarting = false
|
||||
pm.mutex.Unlock()
|
||||
return fmt.Errorf("policy context cancelled")
|
||||
}
|
||||
}
|
||||
|
||||
// StartPolicy starts the policy script process.
|
||||
// Returns an error if the script doesn't exist, can't be executed, or is already running.
|
||||
func (pm *PolicyManager) StartPolicy() error {
|
||||
@@ -800,6 +920,11 @@ func (pm *PolicyManager) IsRunning() bool {
|
||||
return pm.isRunning
|
||||
}
|
||||
|
||||
// GetScriptPath returns the path to the policy script.
|
||||
func (pm *PolicyManager) GetScriptPath() string {
|
||||
return pm.scriptPath
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down the policy manager.
|
||||
// It cancels the context and stops any running policy script.
|
||||
func (pm *PolicyManager) Shutdown() {
|
||||
|
||||
516
pkg/policy/policy_integration_test.go
Normal file
516
pkg/policy/policy_integration_test.go
Normal file
@@ -0,0 +1,516 @@
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
)
|
||||
|
||||
// TestPolicyIntegration runs the relay with policy enabled and tests event filtering
|
||||
func TestPolicyIntegration(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
// Generate test keys
|
||||
allowedSigner := &p256k.Signer{}
|
||||
if err := allowedSigner.Generate(); chk.E(err) {
|
||||
t.Fatalf("Failed to generate allowed signer: %v", err)
|
||||
}
|
||||
allowedPubkeyHex := hex.Enc(allowedSigner.Pub())
|
||||
|
||||
unauthorizedSigner := &p256k.Signer{}
|
||||
if err := unauthorizedSigner.Generate(); chk.E(err) {
|
||||
t.Fatalf("Failed to generate unauthorized signer: %v", err)
|
||||
}
|
||||
|
||||
// Create temporary directory for policy config
|
||||
tempDir := t.TempDir()
|
||||
configDir := filepath.Join(tempDir, "ORLY_TEST")
|
||||
if err := os.MkdirAll(configDir, 0755); chk.E(err) {
|
||||
t.Fatalf("Failed to create config directory: %v", err)
|
||||
}
|
||||
|
||||
// Create policy JSON with generated keys
|
||||
policyJSON := map[string]interface{}{
|
||||
"kind": map[string]interface{}{
|
||||
"whitelist": []int{4678, 10306, 30520, 30919},
|
||||
},
|
||||
"rules": map[string]interface{}{
|
||||
"4678": map[string]interface{}{
|
||||
"description": "Zenotp message events",
|
||||
"script": filepath.Join(configDir, "validate4678.js"), // Won't exist, should fall back to default
|
||||
"privileged": true,
|
||||
},
|
||||
"10306": map[string]interface{}{
|
||||
"description": "End user whitelist changes",
|
||||
"read_allow": []string{allowedPubkeyHex},
|
||||
"privileged": true,
|
||||
},
|
||||
"30520": map[string]interface{}{
|
||||
"description": "Zenotp events",
|
||||
"write_allow": []string{allowedPubkeyHex},
|
||||
"privileged": true,
|
||||
},
|
||||
"30919": map[string]interface{}{
|
||||
"description": "Zenotp events",
|
||||
"write_allow": []string{allowedPubkeyHex},
|
||||
"privileged": true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyJSONBytes, err := json.MarshalIndent(policyJSON, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal policy JSON: %v", err)
|
||||
}
|
||||
|
||||
policyPath := filepath.Join(configDir, "policy.json")
|
||||
if err := os.WriteFile(policyPath, policyJSONBytes, 0644); chk.E(err) {
|
||||
t.Fatalf("Failed to write policy file: %v", err)
|
||||
}
|
||||
|
||||
// Create events with proper signatures
|
||||
// Event 1: Kind 30520 with allowed pubkey (should be allowed)
|
||||
event30520Allowed := event.New()
|
||||
event30520Allowed.CreatedAt = time.Now().Unix()
|
||||
event30520Allowed.Kind = kind.K{K: 30520}.K
|
||||
event30520Allowed.Content = []byte("test event 30520")
|
||||
event30520Allowed.Tags = tag.NewS()
|
||||
addPTag(event30520Allowed, allowedSigner.Pub()) // Add p tag for privileged check
|
||||
if err := event30520Allowed.Sign(allowedSigner); chk.E(err) {
|
||||
t.Fatalf("Failed to sign event30520Allowed: %v", err)
|
||||
}
|
||||
|
||||
// Event 2: Kind 30520 with unauthorized pubkey (should be denied)
|
||||
event30520Unauthorized := event.New()
|
||||
event30520Unauthorized.CreatedAt = time.Now().Unix()
|
||||
event30520Unauthorized.Kind = kind.K{K: 30520}.K
|
||||
event30520Unauthorized.Content = []byte("test event 30520 unauthorized")
|
||||
event30520Unauthorized.Tags = tag.NewS()
|
||||
if err := event30520Unauthorized.Sign(unauthorizedSigner); chk.E(err) {
|
||||
t.Fatalf("Failed to sign event30520Unauthorized: %v", err)
|
||||
}
|
||||
|
||||
// Event 3: Kind 10306 with allowed pubkey (should be readable by allowed user)
|
||||
event10306Allowed := event.New()
|
||||
event10306Allowed.CreatedAt = time.Now().Unix()
|
||||
event10306Allowed.Kind = kind.K{K: 10306}.K
|
||||
event10306Allowed.Content = []byte("test event 10306")
|
||||
event10306Allowed.Tags = tag.NewS()
|
||||
addPTag(event10306Allowed, allowedSigner.Pub()) // Add p tag for privileged check
|
||||
if err := event10306Allowed.Sign(allowedSigner); chk.E(err) {
|
||||
t.Fatalf("Failed to sign event10306Allowed: %v", err)
|
||||
}
|
||||
|
||||
// Event 4: Kind 4678 with allowed pubkey (script-based, should fall back to default)
|
||||
event4678Allowed := event.New()
|
||||
event4678Allowed.CreatedAt = time.Now().Unix()
|
||||
event4678Allowed.Kind = kind.K{K: 4678}.K
|
||||
event4678Allowed.Content = []byte("test event 4678")
|
||||
event4678Allowed.Tags = tag.NewS()
|
||||
addPTag(event4678Allowed, allowedSigner.Pub()) // Add p tag for privileged check
|
||||
if err := event4678Allowed.Sign(allowedSigner); chk.E(err) {
|
||||
t.Fatalf("Failed to sign event4678Allowed: %v", err)
|
||||
}
|
||||
|
||||
// Test policy loading
|
||||
policy, err := New(policyJSONBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create policy: %v", err)
|
||||
}
|
||||
|
||||
// Verify policy loaded correctly
|
||||
if len(policy.Rules) != 4 {
|
||||
t.Errorf("Expected 4 rules, got %d", len(policy.Rules))
|
||||
}
|
||||
|
||||
// Test policy checks directly
|
||||
t.Run("policy checks", func(t *testing.T) {
|
||||
// Test 1: Event 30520 with allowed pubkey should be allowed
|
||||
allowed, err := policy.CheckPolicy("write", event30520Allowed, allowedSigner.Pub(), "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !allowed {
|
||||
t.Error("Expected event30520Allowed to be allowed")
|
||||
}
|
||||
|
||||
// Test 2: Event 30520 with unauthorized pubkey should be denied
|
||||
allowed, err = policy.CheckPolicy("write", event30520Unauthorized, unauthorizedSigner.Pub(), "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Expected event30520Unauthorized to be denied")
|
||||
}
|
||||
|
||||
// Test 3: Event 10306 should be readable by allowed user
|
||||
allowed, err = policy.CheckPolicy("read", event10306Allowed, allowedSigner.Pub(), "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !allowed {
|
||||
t.Error("Expected event10306Allowed to be readable by allowed user")
|
||||
}
|
||||
|
||||
// Test 4: Event 10306 should NOT be readable by unauthorized user
|
||||
allowed, err = policy.CheckPolicy("read", event10306Allowed, unauthorizedSigner.Pub(), "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Expected event10306Allowed to be denied for unauthorized user")
|
||||
}
|
||||
|
||||
// Test 5: Event 10306 should NOT be readable without authentication
|
||||
allowed, err = policy.CheckPolicy("read", event10306Allowed, nil, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Expected event10306Allowed to be denied without authentication (privileged)")
|
||||
}
|
||||
|
||||
// Test 6: Event 30520 should NOT be writable without authentication
|
||||
allowed, err = policy.CheckPolicy("write", event30520Allowed, nil, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Expected event30520Allowed to be denied without authentication (privileged)")
|
||||
}
|
||||
|
||||
// Test 7: Event 4678 should fall back to default policy (allow) when script not running
|
||||
allowed, err = policy.CheckPolicy("write", event4678Allowed, allowedSigner.Pub(), "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if !allowed {
|
||||
t.Error("Expected event4678Allowed to be allowed when script not running (falls back to default)")
|
||||
}
|
||||
|
||||
// Test 8: Event 4678 should be denied without authentication (privileged check)
|
||||
allowed, err = policy.CheckPolicy("write", event4678Allowed, nil, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if allowed {
|
||||
t.Error("Expected event4678Allowed to be denied without authentication (privileged)")
|
||||
}
|
||||
})
|
||||
|
||||
// Test with relay simulation (checking log output)
|
||||
t.Run("relay simulation", func(t *testing.T) {
|
||||
// Note: We can't easily capture log output in tests, so we just verify
|
||||
// that policy checks work correctly
|
||||
|
||||
// Simulate policy checks that would happen in relay
|
||||
// First, publish events (simulate write checks)
|
||||
checks := []struct {
|
||||
name string
|
||||
event *event.E
|
||||
loggedInPubkey []byte
|
||||
access string
|
||||
shouldAllow bool
|
||||
shouldLog string // Expected log message substring, empty means no specific log expected
|
||||
}{
|
||||
{
|
||||
name: "write 30520 with allowed pubkey",
|
||||
event: event30520Allowed,
|
||||
loggedInPubkey: allowedSigner.Pub(),
|
||||
access: "write",
|
||||
shouldAllow: true,
|
||||
},
|
||||
{
|
||||
name: "write 30520 with unauthorized pubkey",
|
||||
event: event30520Unauthorized,
|
||||
loggedInPubkey: unauthorizedSigner.Pub(),
|
||||
access: "write",
|
||||
shouldAllow: false,
|
||||
},
|
||||
{
|
||||
name: "read 10306 with allowed pubkey",
|
||||
event: event10306Allowed,
|
||||
loggedInPubkey: allowedSigner.Pub(),
|
||||
access: "read",
|
||||
shouldAllow: true,
|
||||
},
|
||||
{
|
||||
name: "read 10306 with unauthorized pubkey",
|
||||
event: event10306Allowed,
|
||||
loggedInPubkey: unauthorizedSigner.Pub(),
|
||||
access: "read",
|
||||
shouldAllow: false,
|
||||
},
|
||||
{
|
||||
name: "read 10306 without authentication",
|
||||
event: event10306Allowed,
|
||||
loggedInPubkey: nil,
|
||||
access: "read",
|
||||
shouldAllow: false,
|
||||
},
|
||||
{
|
||||
name: "write 30520 without authentication",
|
||||
event: event30520Allowed,
|
||||
loggedInPubkey: nil,
|
||||
access: "write",
|
||||
shouldAllow: false,
|
||||
},
|
||||
{
|
||||
name: "write 4678 with allowed pubkey",
|
||||
event: event4678Allowed,
|
||||
loggedInPubkey: allowedSigner.Pub(),
|
||||
access: "write",
|
||||
shouldAllow: true,
|
||||
shouldLog: "", // Should not log "policy rule is inactive" if script is not configured
|
||||
},
|
||||
}
|
||||
|
||||
for _, check := range checks {
|
||||
t.Run(check.name, func(t *testing.T) {
|
||||
allowed, err := policy.CheckPolicy(check.access, check.event, check.loggedInPubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if allowed != check.shouldAllow {
|
||||
t.Errorf("Expected allowed=%v, got %v", check.shouldAllow, allowed)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
// Test event IDs are regenerated correctly after signing
|
||||
t.Run("event ID regeneration", func(t *testing.T) {
|
||||
// Create a new event, sign it, then verify ID is correct
|
||||
testEvent := event.New()
|
||||
testEvent.CreatedAt = time.Now().Unix()
|
||||
testEvent.Kind = kind.K{K: 30520}.K
|
||||
testEvent.Content = []byte("test content")
|
||||
testEvent.Tags = tag.NewS()
|
||||
|
||||
// Sign the event
|
||||
if err := testEvent.Sign(allowedSigner); chk.E(err) {
|
||||
t.Fatalf("Failed to sign test event: %v", err)
|
||||
}
|
||||
|
||||
// Verify event ID is correct (should be SHA256 of serialized event)
|
||||
if len(testEvent.ID) != 32 {
|
||||
t.Errorf("Expected event ID to be 32 bytes, got %d", len(testEvent.ID))
|
||||
}
|
||||
|
||||
// Verify signature is correct
|
||||
if len(testEvent.Sig) != 64 {
|
||||
t.Errorf("Expected event signature to be 64 bytes, got %d", len(testEvent.Sig))
|
||||
}
|
||||
|
||||
// Verify signature validates using event's Verify method
|
||||
valid, err := testEvent.Verify()
|
||||
if err != nil {
|
||||
t.Errorf("Failed to verify signature: %v", err)
|
||||
}
|
||||
if !valid {
|
||||
t.Error("Event signature verification failed")
|
||||
}
|
||||
})
|
||||
|
||||
// Test WebSocket client simulation (for future integration)
|
||||
t.Run("websocket client simulation", func(t *testing.T) {
|
||||
// This test simulates what would happen if we connected via WebSocket
|
||||
// For now, we'll just verify the events can be serialized correctly
|
||||
|
||||
events := []*event.E{
|
||||
event30520Allowed,
|
||||
event30520Unauthorized,
|
||||
event10306Allowed,
|
||||
event4678Allowed,
|
||||
}
|
||||
|
||||
for i, ev := range events {
|
||||
t.Run(fmt.Sprintf("event_%d", i), func(t *testing.T) {
|
||||
// Serialize event
|
||||
serialized := ev.Serialize()
|
||||
if len(serialized) == 0 {
|
||||
t.Error("Event serialization returned empty")
|
||||
}
|
||||
|
||||
// Verify event can be parsed back (simplified check)
|
||||
if len(ev.ID) != 32 {
|
||||
t.Errorf("Event ID length incorrect: %d", len(ev.ID))
|
||||
}
|
||||
if len(ev.Pubkey) != 32 {
|
||||
t.Errorf("Event pubkey length incorrect: %d", len(ev.Pubkey))
|
||||
}
|
||||
if len(ev.Sig) != 64 {
|
||||
t.Errorf("Event signature length incorrect: %d", len(ev.Sig))
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// TestPolicyWithRelay creates a comprehensive test that simulates relay behavior
|
||||
func TestPolicyWithRelay(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping integration test")
|
||||
}
|
||||
|
||||
// Generate keys
|
||||
allowedSigner := &p256k.Signer{}
|
||||
if err := allowedSigner.Generate(); chk.E(err) {
|
||||
t.Fatalf("Failed to generate allowed signer: %v", err)
|
||||
}
|
||||
allowedPubkeyHex := hex.Enc(allowedSigner.Pub())
|
||||
|
||||
unauthorizedSigner := &p256k.Signer{}
|
||||
if err := unauthorizedSigner.Generate(); chk.E(err) {
|
||||
t.Fatalf("Failed to generate unauthorized signer: %v", err)
|
||||
}
|
||||
|
||||
// Create policy JSON
|
||||
policyJSON := map[string]interface{}{
|
||||
"kind": map[string]interface{}{
|
||||
"whitelist": []int{4678, 10306, 30520, 30919},
|
||||
},
|
||||
"rules": map[string]interface{}{
|
||||
"10306": map[string]interface{}{
|
||||
"description": "End user whitelist changes",
|
||||
"read_allow": []string{allowedPubkeyHex},
|
||||
"privileged": true,
|
||||
},
|
||||
"30520": map[string]interface{}{
|
||||
"description": "Zenotp events",
|
||||
"write_allow": []string{allowedPubkeyHex},
|
||||
"privileged": true,
|
||||
},
|
||||
"30919": map[string]interface{}{
|
||||
"description": "Zenotp events",
|
||||
"write_allow": []string{allowedPubkeyHex},
|
||||
"privileged": true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyJSONBytes, err := json.Marshal(policyJSON)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal policy JSON: %v", err)
|
||||
}
|
||||
|
||||
policy, err := New(policyJSONBytes)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create policy: %v", err)
|
||||
}
|
||||
|
||||
// Create test event (kind 30520) with allowed pubkey
|
||||
testEvent := event.New()
|
||||
testEvent.CreatedAt = time.Now().Unix()
|
||||
testEvent.Kind = kind.K{K: 30520}.K
|
||||
testEvent.Content = []byte("test content")
|
||||
testEvent.Tags = tag.NewS()
|
||||
addPTag(testEvent, allowedSigner.Pub())
|
||||
if err := testEvent.Sign(allowedSigner); chk.E(err) {
|
||||
t.Fatalf("Failed to sign test event: %v", err)
|
||||
}
|
||||
|
||||
// Test scenarios
|
||||
scenarios := []struct {
|
||||
name string
|
||||
loggedInPubkey []byte
|
||||
expectedResult bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "authenticated as allowed pubkey",
|
||||
loggedInPubkey: allowedSigner.Pub(),
|
||||
expectedResult: true,
|
||||
description: "Should allow when authenticated as allowed pubkey",
|
||||
},
|
||||
{
|
||||
name: "unauthenticated",
|
||||
loggedInPubkey: nil,
|
||||
expectedResult: false,
|
||||
description: "Should deny when not authenticated (privileged check)",
|
||||
},
|
||||
{
|
||||
name: "authenticated as different pubkey",
|
||||
loggedInPubkey: unauthorizedSigner.Pub(),
|
||||
expectedResult: false,
|
||||
description: "Should deny when authenticated as different pubkey",
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range scenarios {
|
||||
t.Run(scenario.name, func(t *testing.T) {
|
||||
allowed, err := policy.CheckPolicy("write", testEvent, scenario.loggedInPubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if allowed != scenario.expectedResult {
|
||||
t.Errorf("%s: Expected allowed=%v, got %v", scenario.description, scenario.expectedResult, allowed)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Test read access for kind 10306
|
||||
readEvent := event.New()
|
||||
readEvent.CreatedAt = time.Now().Unix()
|
||||
readEvent.Kind = kind.K{K: 10306}.K
|
||||
readEvent.Content = []byte("test read event")
|
||||
readEvent.Tags = tag.NewS()
|
||||
addPTag(readEvent, allowedSigner.Pub())
|
||||
if err := readEvent.Sign(allowedSigner); chk.E(err) {
|
||||
t.Fatalf("Failed to sign read event: %v", err)
|
||||
}
|
||||
|
||||
readScenarios := []struct {
|
||||
name string
|
||||
loggedInPubkey []byte
|
||||
expectedResult bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "read authenticated as allowed pubkey",
|
||||
loggedInPubkey: allowedSigner.Pub(),
|
||||
expectedResult: true,
|
||||
description: "Should allow read when authenticated as allowed pubkey",
|
||||
},
|
||||
{
|
||||
name: "read unauthenticated",
|
||||
loggedInPubkey: nil,
|
||||
expectedResult: false,
|
||||
description: "Should deny read when not authenticated (privileged check)",
|
||||
},
|
||||
{
|
||||
name: "read authenticated as different pubkey",
|
||||
loggedInPubkey: unauthorizedSigner.Pub(),
|
||||
expectedResult: false,
|
||||
description: "Should deny read when authenticated as different pubkey",
|
||||
},
|
||||
}
|
||||
|
||||
for _, scenario := range readScenarios {
|
||||
t.Run(scenario.name, func(t *testing.T) {
|
||||
allowed, err := policy.CheckPolicy("read", readEvent, scenario.loggedInPubkey, "127.0.0.1")
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
if allowed != scenario.expectedResult {
|
||||
t.Errorf("%s: Expected allowed=%v, got %v", scenario.description, scenario.expectedResult, allowed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,11 +1,28 @@
|
||||
package publish
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/interfaces/publisher"
|
||||
"next.orly.dev/pkg/interfaces/typer"
|
||||
)
|
||||
|
||||
// WriteRequest represents a write operation to be performed by the write worker
|
||||
type WriteRequest struct {
|
||||
Data []byte
|
||||
MsgType int
|
||||
IsControl bool
|
||||
Deadline time.Time
|
||||
}
|
||||
|
||||
// WriteChanSetter defines the interface for setting write channels
|
||||
type WriteChanSetter interface {
|
||||
SetWriteChan(*websocket.Conn, chan<- WriteRequest)
|
||||
GetWriteChan(*websocket.Conn) (chan<- WriteRequest, bool)
|
||||
}
|
||||
|
||||
// S is the control structure for the subscription management scheme.
|
||||
type S struct {
|
||||
publisher.Publishers
|
||||
@@ -36,3 +53,15 @@ func (s *S) Receive(msg typer.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetSocketPublisher returns the socketapi publisher instance
|
||||
func (s *S) GetSocketPublisher() WriteChanSetter {
|
||||
for _, p := range s.Publishers {
|
||||
if p.Type() == "socketapi" {
|
||||
if socketPub, ok := p.(WriteChanSetter); ok {
|
||||
return socketPub
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
200
pkg/run/run.go
Normal file
200
pkg/run/run.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package run
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/adrg/xdg"
|
||||
"lol.mleku.dev/chk"
|
||||
lol "lol.mleku.dev"
|
||||
"next.orly.dev/app"
|
||||
"next.orly.dev/app/config"
|
||||
"next.orly.dev/pkg/acl"
|
||||
"next.orly.dev/pkg/database"
|
||||
)
|
||||
|
||||
// Options configures relay startup behavior.
|
||||
type Options struct {
|
||||
// CleanupDataDir controls whether the data directory is deleted on Stop().
|
||||
// Defaults to true. Set to false to preserve the data directory.
|
||||
CleanupDataDir *bool
|
||||
|
||||
// StdoutWriter is an optional writer to receive stdout logs.
|
||||
// If nil, stdout will be captured to a buffer accessible via Relay.Stdout().
|
||||
StdoutWriter io.Writer
|
||||
|
||||
// StderrWriter is an optional writer to receive stderr logs.
|
||||
// If nil, stderr will be captured to a buffer accessible via Relay.Stderr().
|
||||
StderrWriter io.Writer
|
||||
}
|
||||
|
||||
// Relay represents a running relay instance that can be started and stopped.
|
||||
type Relay struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
db *database.D
|
||||
quit chan struct{}
|
||||
dataDir string
|
||||
cleanupDataDir bool
|
||||
|
||||
// Log capture
|
||||
stdoutBuf *bytes.Buffer
|
||||
stderrBuf *bytes.Buffer
|
||||
stdoutWriter io.Writer
|
||||
stderrWriter io.Writer
|
||||
logMu sync.RWMutex
|
||||
}
|
||||
|
||||
// Start initializes and starts a relay with the given configuration.
|
||||
// It bypasses the configuration loading step and uses the provided config directly.
|
||||
//
|
||||
// Parameters:
|
||||
// - cfg: The configuration to use for the relay
|
||||
// - opts: Optional configuration for relay behavior. If nil, defaults are used.
|
||||
//
|
||||
// Returns:
|
||||
// - relay: A Relay instance that can be used to stop the relay
|
||||
// - err: An error if initialization or startup fails
|
||||
func Start(cfg *config.C, opts *Options) (relay *Relay, err error) {
|
||||
relay = &Relay{
|
||||
cleanupDataDir: true,
|
||||
}
|
||||
|
||||
// Apply options
|
||||
var userStdoutWriter, userStderrWriter io.Writer
|
||||
if opts != nil {
|
||||
if opts.CleanupDataDir != nil {
|
||||
relay.cleanupDataDir = *opts.CleanupDataDir
|
||||
}
|
||||
userStdoutWriter = opts.StdoutWriter
|
||||
userStderrWriter = opts.StderrWriter
|
||||
}
|
||||
|
||||
// Set up log capture buffers
|
||||
relay.stdoutBuf = &bytes.Buffer{}
|
||||
relay.stderrBuf = &bytes.Buffer{}
|
||||
|
||||
// Build writers list for stdout
|
||||
stdoutWriters := []io.Writer{relay.stdoutBuf}
|
||||
if userStdoutWriter != nil {
|
||||
stdoutWriters = append(stdoutWriters, userStdoutWriter)
|
||||
}
|
||||
stdoutWriters = append(stdoutWriters, os.Stdout)
|
||||
relay.stdoutWriter = io.MultiWriter(stdoutWriters...)
|
||||
|
||||
// Build writers list for stderr
|
||||
stderrWriters := []io.Writer{relay.stderrBuf}
|
||||
if userStderrWriter != nil {
|
||||
stderrWriters = append(stderrWriters, userStderrWriter)
|
||||
}
|
||||
stderrWriters = append(stderrWriters, os.Stderr)
|
||||
relay.stderrWriter = io.MultiWriter(stderrWriters...)
|
||||
|
||||
// Set up logging - write to appropriate destination and capture
|
||||
if cfg.LogToStdout {
|
||||
lol.Writer = relay.stdoutWriter
|
||||
} else {
|
||||
lol.Writer = relay.stderrWriter
|
||||
}
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
|
||||
// Expand DataDir if needed
|
||||
if cfg.DataDir == "" || strings.Contains(cfg.DataDir, "~") {
|
||||
cfg.DataDir = filepath.Join(xdg.DataHome, cfg.AppName)
|
||||
}
|
||||
relay.dataDir = cfg.DataDir
|
||||
|
||||
// Create context
|
||||
relay.ctx, relay.cancel = context.WithCancel(context.Background())
|
||||
|
||||
// Initialize database
|
||||
if relay.db, err = database.New(
|
||||
relay.ctx, relay.cancel, cfg.DataDir, cfg.DBLogLevel,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Configure ACL
|
||||
acl.Registry.Active.Store(cfg.ACLMode)
|
||||
if err = acl.Registry.Configure(cfg, relay.db, relay.ctx); chk.E(err) {
|
||||
return
|
||||
}
|
||||
acl.Registry.Syncer()
|
||||
|
||||
// Start the relay
|
||||
relay.quit = app.Run(relay.ctx, cfg, relay.db)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Stop gracefully stops the relay by canceling the context and closing the database.
|
||||
// If CleanupDataDir is enabled (default), it also removes the data directory.
|
||||
//
|
||||
// Returns:
|
||||
// - err: An error if shutdown fails
|
||||
func (r *Relay) Stop() (err error) {
|
||||
if r.cancel != nil {
|
||||
r.cancel()
|
||||
}
|
||||
if r.quit != nil {
|
||||
<-r.quit
|
||||
}
|
||||
if r.db != nil {
|
||||
err = r.db.Close()
|
||||
}
|
||||
// Clean up data directory if enabled
|
||||
if r.cleanupDataDir && r.dataDir != "" {
|
||||
if rmErr := os.RemoveAll(r.dataDir); rmErr != nil {
|
||||
if err == nil {
|
||||
err = rmErr
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Stdout returns the complete stdout log buffer contents.
|
||||
func (r *Relay) Stdout() string {
|
||||
r.logMu.RLock()
|
||||
defer r.logMu.RUnlock()
|
||||
if r.stdoutBuf == nil {
|
||||
return ""
|
||||
}
|
||||
return r.stdoutBuf.String()
|
||||
}
|
||||
|
||||
// Stderr returns the complete stderr log buffer contents.
|
||||
func (r *Relay) Stderr() string {
|
||||
r.logMu.RLock()
|
||||
defer r.logMu.RUnlock()
|
||||
if r.stderrBuf == nil {
|
||||
return ""
|
||||
}
|
||||
return r.stderrBuf.String()
|
||||
}
|
||||
|
||||
// StdoutBytes returns the complete stdout log buffer as bytes.
|
||||
func (r *Relay) StdoutBytes() []byte {
|
||||
r.logMu.RLock()
|
||||
defer r.logMu.RUnlock()
|
||||
if r.stdoutBuf == nil {
|
||||
return nil
|
||||
}
|
||||
return r.stdoutBuf.Bytes()
|
||||
}
|
||||
|
||||
// StderrBytes returns the complete stderr log buffer as bytes.
|
||||
func (r *Relay) StderrBytes() []byte {
|
||||
r.logMu.RLock()
|
||||
defer r.logMu.RUnlock()
|
||||
if r.stderrBuf == nil {
|
||||
return nil
|
||||
}
|
||||
return r.stderrBuf.Bytes()
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.20.0
|
||||
v0.23.0
|
||||
326
relay-tester/client.go
Normal file
326
relay-tester/client.go
Normal file
@@ -0,0 +1,326 @@
|
||||
package relaytester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"lol.mleku.dev/errorf"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
// Client wraps a WebSocket connection to a relay for testing.
|
||||
type Client struct {
|
||||
conn *websocket.Conn
|
||||
url string
|
||||
mu sync.Mutex
|
||||
subs map[string]chan []byte
|
||||
complete map[string]bool // Track if subscription is complete (e.g., by ID)
|
||||
okCh chan []byte // Channel for OK messages
|
||||
countCh chan []byte // Channel for COUNT messages
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewClient creates a new test client connected to the relay.
|
||||
func NewClient(url string) (c *Client, err error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
var conn *websocket.Conn
|
||||
dialer := websocket.Dialer{
|
||||
HandshakeTimeout: 5 * time.Second,
|
||||
}
|
||||
if conn, _, err = dialer.Dial(url, nil); err != nil {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
c = &Client{
|
||||
conn: conn,
|
||||
url: url,
|
||||
subs: make(map[string]chan []byte),
|
||||
complete: make(map[string]bool),
|
||||
okCh: make(chan []byte, 100),
|
||||
countCh: make(chan []byte, 100),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
go c.readLoop()
|
||||
return
|
||||
}
|
||||
|
||||
// Close closes the client connection.
|
||||
func (c *Client) Close() error {
|
||||
c.cancel()
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// URL returns the relay URL.
|
||||
func (c *Client) URL() string {
|
||||
return c.url
|
||||
}
|
||||
|
||||
// Send sends a JSON message to the relay.
|
||||
func (c *Client) Send(msg interface{}) (err error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
var data []byte
|
||||
if data, err = json.Marshal(msg); err != nil {
|
||||
return errorf.E("failed to marshal message: %w", err)
|
||||
}
|
||||
if err = c.conn.WriteMessage(websocket.TextMessage, data); err != nil {
|
||||
return errorf.E("failed to write message: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// readLoop reads messages from the relay and routes them to subscriptions.
|
||||
func (c *Client) readLoop() {
|
||||
defer c.conn.Close()
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
_, msg, err := c.conn.ReadMessage()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var raw []interface{}
|
||||
if err = json.Unmarshal(msg, &raw); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(raw) < 2 {
|
||||
continue
|
||||
}
|
||||
typ, ok := raw[0].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
c.mu.Lock()
|
||||
switch typ {
|
||||
case "EVENT":
|
||||
if len(raw) >= 2 {
|
||||
if subID, ok := raw[1].(string); ok {
|
||||
if ch, exists := c.subs[subID]; exists {
|
||||
select {
|
||||
case ch <- msg:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "EOSE":
|
||||
if len(raw) >= 2 {
|
||||
if subID, ok := raw[1].(string); ok {
|
||||
if ch, exists := c.subs[subID]; exists {
|
||||
// Send EOSE message to channel
|
||||
select {
|
||||
case ch <- msg:
|
||||
default:
|
||||
}
|
||||
// For complete subscriptions (by ID), close the channel after EOSE
|
||||
if c.complete[subID] {
|
||||
close(ch)
|
||||
delete(c.subs, subID)
|
||||
delete(c.complete, subID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "OK":
|
||||
// Route OK messages to okCh for WaitForOK
|
||||
select {
|
||||
case c.okCh <- msg:
|
||||
default:
|
||||
}
|
||||
case "COUNT":
|
||||
// Route COUNT messages to countCh for Count
|
||||
select {
|
||||
case c.countCh <- msg:
|
||||
default:
|
||||
}
|
||||
case "NOTICE":
|
||||
// Notice messages are logged
|
||||
case "CLOSED":
|
||||
// Closed messages indicate subscription ended
|
||||
case "AUTH":
|
||||
// Auth challenge messages
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// Subscribe creates a subscription and returns a channel for events.
|
||||
func (c *Client) Subscribe(subID string, filters []interface{}) (ch chan []byte, err error) {
|
||||
req := []interface{}{"REQ", subID}
|
||||
req = append(req, filters...)
|
||||
if err = c.Send(req); err != nil {
|
||||
return
|
||||
}
|
||||
c.mu.Lock()
|
||||
ch = make(chan []byte, 100)
|
||||
c.subs[subID] = ch
|
||||
// Check if subscription is complete (has 'ids' filter)
|
||||
isComplete := false
|
||||
for _, f := range filters {
|
||||
if fMap, ok := f.(map[string]interface{}); ok {
|
||||
if ids, exists := fMap["ids"]; exists {
|
||||
if idList, ok := ids.([]string); ok && len(idList) > 0 {
|
||||
isComplete = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
c.complete[subID] = isComplete
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Unsubscribe closes a subscription.
|
||||
func (c *Client) Unsubscribe(subID string) error {
|
||||
c.mu.Lock()
|
||||
if ch, exists := c.subs[subID]; exists {
|
||||
// Channel might already be closed by EOSE, so use recover to handle gracefully
|
||||
func() {
|
||||
defer func() {
|
||||
if recover() != nil {
|
||||
// Channel was already closed, ignore
|
||||
}
|
||||
}()
|
||||
close(ch)
|
||||
}()
|
||||
delete(c.subs, subID)
|
||||
delete(c.complete, subID)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
return c.Send([]interface{}{"CLOSE", subID})
|
||||
}
|
||||
|
||||
// Publish sends an EVENT message to the relay.
|
||||
func (c *Client) Publish(ev *event.E) (err error) {
|
||||
evJSON := ev.Serialize()
|
||||
var evMap map[string]interface{}
|
||||
if err = json.Unmarshal(evJSON, &evMap); err != nil {
|
||||
return errorf.E("failed to unmarshal event: %w", err)
|
||||
}
|
||||
return c.Send([]interface{}{"EVENT", evMap})
|
||||
}
|
||||
|
||||
// WaitForOK waits for an OK response for the given event ID.
|
||||
func (c *Client) WaitForOK(eventID []byte, timeout time.Duration) (accepted bool, reason string, err error) {
|
||||
ctx, cancel := context.WithTimeout(c.ctx, timeout)
|
||||
defer cancel()
|
||||
idStr := hex.Enc(eventID)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false, "", errorf.E("timeout waiting for OK response")
|
||||
case msg := <-c.okCh:
|
||||
var raw []interface{}
|
||||
if err = json.Unmarshal(msg, &raw); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(raw) < 3 {
|
||||
continue
|
||||
}
|
||||
if id, ok := raw[1].(string); ok && id == idStr {
|
||||
accepted, _ = raw[2].(bool)
|
||||
if len(raw) > 3 {
|
||||
reason, _ = raw[3].(string)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count sends a COUNT request and returns the count.
|
||||
func (c *Client) Count(filters []interface{}) (count int64, err error) {
|
||||
req := []interface{}{"COUNT", "count-sub"}
|
||||
req = append(req, filters...)
|
||||
if err = c.Send(req); err != nil {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, errorf.E("timeout waiting for COUNT response")
|
||||
case msg := <-c.countCh:
|
||||
var raw []interface{}
|
||||
if err = json.Unmarshal(msg, &raw); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(raw) >= 3 {
|
||||
if subID, ok := raw[1].(string); ok && subID == "count-sub" {
|
||||
// COUNT response format: ["COUNT", "subscription-id", count, approximate?]
|
||||
if cnt, ok := raw[2].(float64); ok {
|
||||
return int64(cnt), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Auth sends an AUTH message with the signed event.
|
||||
func (c *Client) Auth(ev *event.E) error {
|
||||
evJSON := ev.Serialize()
|
||||
var evMap map[string]interface{}
|
||||
if err := json.Unmarshal(evJSON, &evMap); err != nil {
|
||||
return errorf.E("failed to unmarshal event: %w", err)
|
||||
}
|
||||
return c.Send([]interface{}{"AUTH", evMap})
|
||||
}
|
||||
|
||||
// GetEvents collects all events from a subscription until EOSE.
|
||||
func (c *Client) GetEvents(subID string, filters []interface{}, timeout time.Duration) (events []*event.E, err error) {
|
||||
ch, err := c.Subscribe(subID, filters)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer c.Unsubscribe(subID)
|
||||
ctx, cancel := context.WithTimeout(c.ctx, timeout)
|
||||
defer cancel()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return events, nil
|
||||
case msg, ok := <-ch:
|
||||
if !ok {
|
||||
return events, nil
|
||||
}
|
||||
var raw []interface{}
|
||||
if err = json.Unmarshal(msg, &raw); err != nil {
|
||||
continue
|
||||
}
|
||||
if len(raw) < 2 {
|
||||
continue
|
||||
}
|
||||
typ, ok := raw[0].(string)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
switch typ {
|
||||
case "EVENT":
|
||||
if len(raw) >= 3 {
|
||||
if evData, ok := raw[2].(map[string]interface{}); ok {
|
||||
evJSON, _ := json.Marshal(evData)
|
||||
ev := event.New()
|
||||
if _, err = ev.Unmarshal(evJSON); err == nil {
|
||||
events = append(events, ev)
|
||||
}
|
||||
}
|
||||
}
|
||||
case "EOSE":
|
||||
// End of stored events - return what we have
|
||||
return events, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
131
relay-tester/keys.go
Normal file
131
relay-tester/keys.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package relaytester
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/bech32encoding"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
)
|
||||
|
||||
// KeyPair represents a test keypair.
|
||||
type KeyPair struct {
|
||||
Secret *p256k.Signer
|
||||
Pubkey []byte
|
||||
Nsec string
|
||||
Npub string
|
||||
}
|
||||
|
||||
// GenerateKeyPair generates a new keypair for testing.
|
||||
func GenerateKeyPair() (kp *KeyPair, err error) {
|
||||
kp = &KeyPair{}
|
||||
kp.Secret = &p256k.Signer{}
|
||||
if err = kp.Secret.Generate(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
kp.Pubkey = kp.Secret.Pub()
|
||||
nsecBytes, err := bech32encoding.BinToNsec(kp.Secret.Sec())
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
kp.Nsec = string(nsecBytes)
|
||||
npubBytes, err := bech32encoding.BinToNpub(kp.Pubkey)
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
kp.Npub = string(npubBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// CreateEvent creates a signed event with the given parameters.
|
||||
func CreateEvent(signer *p256k.Signer, kindNum uint16, content string, tags *tag.S) (ev *event.E, err error) {
|
||||
ev = event.New()
|
||||
ev.CreatedAt = time.Now().Unix()
|
||||
ev.Kind = kindNum
|
||||
ev.Content = []byte(content)
|
||||
if tags != nil {
|
||||
ev.Tags = tags
|
||||
} else {
|
||||
ev.Tags = tag.NewS()
|
||||
}
|
||||
if err = ev.Sign(signer); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CreateEventWithTags creates an event with specific tags.
|
||||
func CreateEventWithTags(signer *p256k.Signer, kindNum uint16, content string, tagPairs [][]string) (ev *event.E, err error) {
|
||||
tags := tag.NewS()
|
||||
for _, pair := range tagPairs {
|
||||
if len(pair) >= 2 {
|
||||
// Build tag fields as []byte variadic arguments
|
||||
tagFields := make([][]byte, len(pair))
|
||||
tagFields[0] = []byte(pair[0])
|
||||
for i := 1; i < len(pair); i++ {
|
||||
tagFields[i] = []byte(pair[i])
|
||||
}
|
||||
tags.Append(tag.NewFromBytesSlice(tagFields...))
|
||||
}
|
||||
}
|
||||
return CreateEvent(signer, kindNum, content, tags)
|
||||
}
|
||||
|
||||
// CreateReplaceableEvent creates a replaceable event (kind 0-3, 10000-19999).
|
||||
func CreateReplaceableEvent(signer *p256k.Signer, kindNum uint16, content string) (ev *event.E, err error) {
|
||||
return CreateEvent(signer, kindNum, content, nil)
|
||||
}
|
||||
|
||||
// CreateEphemeralEvent creates an ephemeral event (kind 20000-29999).
|
||||
func CreateEphemeralEvent(signer *p256k.Signer, kindNum uint16, content string) (ev *event.E, err error) {
|
||||
return CreateEvent(signer, kindNum, content, nil)
|
||||
}
|
||||
|
||||
// CreateDeleteEvent creates a deletion event (kind 5).
|
||||
func CreateDeleteEvent(signer *p256k.Signer, eventIDs [][]byte, reason string) (ev *event.E, err error) {
|
||||
tags := tag.NewS()
|
||||
for _, id := range eventIDs {
|
||||
// e tags must contain hex-encoded event IDs
|
||||
tags.Append(tag.NewFromBytesSlice([]byte("e"), []byte(hex.Enc(id))))
|
||||
}
|
||||
if reason != "" {
|
||||
tags.Append(tag.NewFromBytesSlice([]byte("content"), []byte(reason)))
|
||||
}
|
||||
return CreateEvent(signer, kind.EventDeletion.K, reason, tags)
|
||||
}
|
||||
|
||||
// CreateParameterizedReplaceableEvent creates a parameterized replaceable event (kind 30000-39999).
|
||||
func CreateParameterizedReplaceableEvent(signer *p256k.Signer, kindNum uint16, content string, dTag string) (ev *event.E, err error) {
|
||||
tags := tag.NewS()
|
||||
tags.Append(tag.NewFromBytesSlice([]byte("d"), []byte(dTag)))
|
||||
return CreateEvent(signer, kindNum, content, tags)
|
||||
}
|
||||
|
||||
// RandomID generates a random 32-byte ID.
|
||||
func RandomID() (id []byte, err error) {
|
||||
id = make([]byte, 32)
|
||||
if _, err = rand.Read(id); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate random ID: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MustHex decodes a hex string or panics.
|
||||
func MustHex(s string) []byte {
|
||||
b, err := hex.Dec(s)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid hex: %s", s))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// HexID returns the hex-encoded event ID.
|
||||
func HexID(ev *event.E) string {
|
||||
return hex.Enc(ev.ID)
|
||||
}
|
||||
449
relay-tester/test.go
Normal file
449
relay-tester/test.go
Normal file
@@ -0,0 +1,449 @@
|
||||
package relaytester
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/errorf"
|
||||
)
|
||||
|
||||
// TestResult represents the result of a test.
|
||||
type TestResult struct {
|
||||
Name string `json:"test"`
|
||||
Pass bool `json:"pass"`
|
||||
Required bool `json:"required"`
|
||||
Info string `json:"info,omitempty"`
|
||||
}
|
||||
|
||||
// TestFunc is a function that runs a test case.
|
||||
type TestFunc func(client *Client, key1, key2 *KeyPair) (result TestResult)
|
||||
|
||||
// TestCase represents a test case with dependencies.
|
||||
type TestCase struct {
|
||||
Name string
|
||||
Required bool
|
||||
Func TestFunc
|
||||
Dependencies []string // Names of tests that must run before this one
|
||||
}
|
||||
|
||||
// TestSuite runs all tests against a relay.
|
||||
type TestSuite struct {
|
||||
relayURL string
|
||||
key1 *KeyPair
|
||||
key2 *KeyPair
|
||||
tests map[string]*TestCase
|
||||
results map[string]TestResult
|
||||
order []string
|
||||
}
|
||||
|
||||
// NewTestSuite creates a new test suite.
|
||||
func NewTestSuite(relayURL string) (suite *TestSuite, err error) {
|
||||
suite = &TestSuite{
|
||||
relayURL: relayURL,
|
||||
tests: make(map[string]*TestCase),
|
||||
results: make(map[string]TestResult),
|
||||
}
|
||||
if suite.key1, err = GenerateKeyPair(); err != nil {
|
||||
return
|
||||
}
|
||||
if suite.key2, err = GenerateKeyPair(); err != nil {
|
||||
return
|
||||
}
|
||||
suite.registerTests()
|
||||
return
|
||||
}
|
||||
|
||||
// AddTest adds a test case to the suite.
|
||||
func (s *TestSuite) AddTest(tc *TestCase) {
|
||||
s.tests[tc.Name] = tc
|
||||
}
|
||||
|
||||
// registerTests registers all test cases.
|
||||
func (s *TestSuite) registerTests() {
|
||||
allTests := []*TestCase{
|
||||
{
|
||||
Name: "Publishes basic event",
|
||||
Required: true,
|
||||
Func: testPublishBasicEvent,
|
||||
},
|
||||
{
|
||||
Name: "Finds event by ID",
|
||||
Required: true,
|
||||
Func: testFindByID,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds event by author",
|
||||
Required: true,
|
||||
Func: testFindByAuthor,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds event by kind",
|
||||
Required: true,
|
||||
Func: testFindByKind,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds event by tags",
|
||||
Required: true,
|
||||
Func: testFindByTags,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds by multiple tags",
|
||||
Required: true,
|
||||
Func: testFindByMultipleTags,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds by time range",
|
||||
Required: true,
|
||||
Func: testFindByTimeRange,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Rejects invalid signature",
|
||||
Required: true,
|
||||
Func: testRejectInvalidSignature,
|
||||
},
|
||||
{
|
||||
Name: "Rejects future event",
|
||||
Required: true,
|
||||
Func: testRejectFutureEvent,
|
||||
},
|
||||
{
|
||||
Name: "Rejects expired event",
|
||||
Required: false,
|
||||
Func: testRejectExpiredEvent,
|
||||
},
|
||||
{
|
||||
Name: "Handles replaceable events",
|
||||
Required: true,
|
||||
Func: testReplaceableEvents,
|
||||
},
|
||||
{
|
||||
Name: "Handles ephemeral events",
|
||||
Required: false,
|
||||
Func: testEphemeralEvents,
|
||||
},
|
||||
{
|
||||
Name: "Handles parameterized replaceable events",
|
||||
Required: true,
|
||||
Func: testParameterizedReplaceableEvents,
|
||||
},
|
||||
{
|
||||
Name: "Handles deletion events",
|
||||
Required: true,
|
||||
Func: testDeletionEvents,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Handles COUNT request",
|
||||
Required: true,
|
||||
Func: testCountRequest,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Handles limit parameter",
|
||||
Required: true,
|
||||
Func: testLimitParameter,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Handles multiple filters",
|
||||
Required: true,
|
||||
Func: testMultipleFilters,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Handles subscription close",
|
||||
Required: true,
|
||||
Func: testSubscriptionClose,
|
||||
},
|
||||
// Filter tests
|
||||
{
|
||||
Name: "Since and until filters are inclusive",
|
||||
Required: true,
|
||||
Func: testSinceUntilAreInclusive,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Limit zero works",
|
||||
Required: true,
|
||||
Func: testLimitZero,
|
||||
},
|
||||
// Find tests
|
||||
{
|
||||
Name: "Events are ordered from newest to oldest",
|
||||
Required: true,
|
||||
Func: testEventsOrderedFromNewestToOldest,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Newest events are returned when filter is limited",
|
||||
Required: true,
|
||||
Func: testNewestEventsWhenLimited,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds by pubkey and kind",
|
||||
Required: true,
|
||||
Func: testFindByPubkeyAndKind,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds by pubkey and tags",
|
||||
Required: true,
|
||||
Func: testFindByPubkeyAndTags,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds by kind and tags",
|
||||
Required: true,
|
||||
Func: testFindByKindAndTags,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Finds by scrape",
|
||||
Required: true,
|
||||
Func: testFindByScrape,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
// Replaceable event tests
|
||||
{
|
||||
Name: "Replaces metadata",
|
||||
Required: true,
|
||||
Func: testReplacesMetadata,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Replaces contact list",
|
||||
Required: true,
|
||||
Func: testReplacesContactList,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Replaced events are still available by ID",
|
||||
Required: false,
|
||||
Func: testReplacedEventsStillAvailableByID,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Replaceable events replace older ones",
|
||||
Required: true,
|
||||
Func: testReplaceableEventRemovesPrevious,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Replaceable events rejected if a newer one exists",
|
||||
Required: true,
|
||||
Func: testReplaceableEventRejectedIfFuture,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Addressable events replace older ones",
|
||||
Required: true,
|
||||
Func: testAddressableEventRemovesPrevious,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Addressable events rejected if a newer one exists",
|
||||
Required: true,
|
||||
Func: testAddressableEventRejectedIfFuture,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
// Deletion tests
|
||||
{
|
||||
Name: "Deletes by a-tag address",
|
||||
Required: true,
|
||||
Func: testDeleteByAddr,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Delete by a-tag deletes older but not newer",
|
||||
Required: true,
|
||||
Func: testDeleteByAddrOnlyDeletesOlder,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Delete by a-tag is bound by a-tag",
|
||||
Required: true,
|
||||
Func: testDeleteByAddrIsBoundByTag,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
// Ephemeral tests
|
||||
{
|
||||
Name: "Ephemeral subscriptions work",
|
||||
Required: false,
|
||||
Func: testEphemeralSubscriptionsWork,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Persists ephemeral events",
|
||||
Required: false,
|
||||
Func: testPersistsEphemeralEvents,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
// EOSE tests
|
||||
{
|
||||
Name: "Supports EOSE",
|
||||
Required: true,
|
||||
Func: testSupportsEose,
|
||||
},
|
||||
{
|
||||
Name: "Subscription receives event after ping period",
|
||||
Required: true,
|
||||
Func: testSubscriptionReceivesEventAfterPingPeriod,
|
||||
},
|
||||
{
|
||||
Name: "Closes complete subscriptions after EOSE",
|
||||
Required: false,
|
||||
Func: testClosesCompleteSubscriptionsAfterEose,
|
||||
},
|
||||
{
|
||||
Name: "Keeps open incomplete subscriptions after EOSE",
|
||||
Required: true,
|
||||
Func: testKeepsOpenIncompleteSubscriptionsAfterEose,
|
||||
},
|
||||
// JSON tests
|
||||
{
|
||||
Name: "Accepts events with empty tags",
|
||||
Required: false,
|
||||
Func: testAcceptsEventsWithEmptyTags,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
{
|
||||
Name: "Accepts NIP-01 JSON escape sequences",
|
||||
Required: true,
|
||||
Func: testAcceptsNip1JsonEscapeSequences,
|
||||
Dependencies: []string{"Publishes basic event"},
|
||||
},
|
||||
// Registration tests
|
||||
{
|
||||
Name: "Sends OK after EVENT",
|
||||
Required: true,
|
||||
Func: testSendsOkAfterEvent,
|
||||
},
|
||||
{
|
||||
Name: "Verifies event signatures",
|
||||
Required: true,
|
||||
Func: testVerifiesSignatures,
|
||||
},
|
||||
{
|
||||
Name: "Verifies event ID hashes",
|
||||
Required: true,
|
||||
Func: testVerifiesIdHashes,
|
||||
},
|
||||
}
|
||||
for _, tc := range allTests {
|
||||
s.AddTest(tc)
|
||||
}
|
||||
s.topologicalSort()
|
||||
}
|
||||
|
||||
// topologicalSort orders tests based on dependencies.
|
||||
func (s *TestSuite) topologicalSort() {
|
||||
visited := make(map[string]bool)
|
||||
temp := make(map[string]bool)
|
||||
var visit func(name string)
|
||||
visit = func(name string) {
|
||||
if temp[name] {
|
||||
return
|
||||
}
|
||||
if visited[name] {
|
||||
return
|
||||
}
|
||||
temp[name] = true
|
||||
if tc, exists := s.tests[name]; exists {
|
||||
for _, dep := range tc.Dependencies {
|
||||
visit(dep)
|
||||
}
|
||||
}
|
||||
temp[name] = false
|
||||
visited[name] = true
|
||||
s.order = append(s.order, name)
|
||||
}
|
||||
for name := range s.tests {
|
||||
if !visited[name] {
|
||||
visit(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs all tests in the suite.
|
||||
func (s *TestSuite) Run() (results []TestResult, err error) {
|
||||
client, err := NewClient(s.relayURL)
|
||||
if err != nil {
|
||||
return nil, errorf.E("failed to connect to relay: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
for _, name := range s.order {
|
||||
tc := s.tests[name]
|
||||
if tc == nil {
|
||||
continue
|
||||
}
|
||||
result := tc.Func(client, s.key1, s.key2)
|
||||
result.Name = name
|
||||
result.Required = tc.Required
|
||||
s.results[name] = result
|
||||
results = append(results, result)
|
||||
time.Sleep(100 * time.Millisecond) // Small delay between tests
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RunTest runs a specific test by name.
|
||||
func (s *TestSuite) RunTest(testName string) (result TestResult, err error) {
|
||||
tc, exists := s.tests[testName]
|
||||
if !exists {
|
||||
return result, errorf.E("test %s not found", testName)
|
||||
}
|
||||
// Check dependencies
|
||||
for _, dep := range tc.Dependencies {
|
||||
if _, exists := s.results[dep]; !exists {
|
||||
return result, errorf.E("test %s depends on %s which has not been run", testName, dep)
|
||||
}
|
||||
if !s.results[dep].Pass {
|
||||
return result, errorf.E("test %s depends on %s which failed", testName, dep)
|
||||
}
|
||||
}
|
||||
client, err := NewClient(s.relayURL)
|
||||
if err != nil {
|
||||
return result, errorf.E("failed to connect to relay: %w", err)
|
||||
}
|
||||
defer client.Close()
|
||||
result = tc.Func(client, s.key1, s.key2)
|
||||
result.Name = testName
|
||||
result.Required = tc.Required
|
||||
s.results[testName] = result
|
||||
return
|
||||
}
|
||||
|
||||
// GetResults returns all test results.
|
||||
func (s *TestSuite) GetResults() map[string]TestResult {
|
||||
return s.results
|
||||
}
|
||||
|
||||
// ListTests returns a list of all test names in execution order.
|
||||
func (s *TestSuite) ListTests() []string {
|
||||
return s.order
|
||||
}
|
||||
|
||||
// GetTestNames returns all registered test names as a map (name -> required).
|
||||
func (s *TestSuite) GetTestNames() map[string]bool {
|
||||
result := make(map[string]bool)
|
||||
for name, tc := range s.tests {
|
||||
result[name] = tc.Required
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// FormatJSON formats results as JSON.
|
||||
func FormatJSON(results []TestResult) (output string, err error) {
|
||||
var data []byte
|
||||
if data, err = json.Marshal(results); err != nil {
|
||||
return
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
1949
relay-tester/tests.go
Normal file
1949
relay-tester/tests.go
Normal file
File diff suppressed because it is too large
Load Diff
245
relay_test.go
Normal file
245
relay_test.go
Normal file
@@ -0,0 +1,245 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
lol "lol.mleku.dev"
|
||||
"next.orly.dev/app/config"
|
||||
"next.orly.dev/pkg/run"
|
||||
relaytester "next.orly.dev/relay-tester"
|
||||
)
|
||||
|
||||
var (
|
||||
testRelayURL string
|
||||
testName string
|
||||
testJSON bool
|
||||
keepDataDir bool
|
||||
relayPort int
|
||||
relayDataDir string
|
||||
)
|
||||
|
||||
func TestRelay(t *testing.T) {
|
||||
var err error
|
||||
var relay *run.Relay
|
||||
var relayURL string
|
||||
|
||||
// Determine relay URL
|
||||
if testRelayURL != "" {
|
||||
relayURL = testRelayURL
|
||||
} else {
|
||||
// Start local relay for testing
|
||||
var port int
|
||||
if relay, port, err = startTestRelay(); err != nil {
|
||||
t.Fatalf("Failed to start test relay: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if stopErr := relay.Stop(); stopErr != nil {
|
||||
t.Logf("Error stopping relay: %v", stopErr)
|
||||
}
|
||||
}()
|
||||
relayURL = fmt.Sprintf("ws://127.0.0.1:%d", port)
|
||||
t.Logf("Waiting for relay to be ready at %s...", relayURL)
|
||||
// Wait for relay to be ready - try connecting to verify it's up
|
||||
if err = waitForRelay(relayURL, 10*time.Second); err != nil {
|
||||
t.Fatalf("Relay not ready after timeout: %v", err)
|
||||
}
|
||||
t.Logf("Relay is ready at %s", relayURL)
|
||||
}
|
||||
|
||||
// Create test suite
|
||||
t.Logf("Creating test suite for %s...", relayURL)
|
||||
suite, err := relaytester.NewTestSuite(relayURL)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create test suite: %v", err)
|
||||
}
|
||||
t.Logf("Test suite created, running tests...")
|
||||
|
||||
// Run tests
|
||||
var results []relaytester.TestResult
|
||||
if testName != "" {
|
||||
// Run specific test
|
||||
result, err := suite.RunTest(testName)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to run test %s: %v", testName, err)
|
||||
}
|
||||
results = []relaytester.TestResult{result}
|
||||
} else {
|
||||
// Run all tests
|
||||
if results, err = suite.Run(); err != nil {
|
||||
t.Fatalf("Failed to run tests: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Output results
|
||||
if testJSON {
|
||||
jsonOutput, err := relaytester.FormatJSON(results)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to format JSON: %v", err)
|
||||
}
|
||||
fmt.Println(jsonOutput)
|
||||
} else {
|
||||
outputResults(results, t)
|
||||
}
|
||||
|
||||
// Check if any required tests failed
|
||||
for _, result := range results {
|
||||
if result.Required && !result.Pass {
|
||||
t.Errorf("Required test '%s' failed: %s", result.Name, result.Info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startTestRelay() (relay *run.Relay, port int, err error) {
|
||||
cfg := &config.C{
|
||||
AppName: "ORLY-TEST",
|
||||
DataDir: relayDataDir,
|
||||
Listen: "127.0.0.1",
|
||||
Port: 0, // Always use random port, unless overridden via -port flag
|
||||
HealthPort: 0,
|
||||
EnableShutdown: false,
|
||||
LogLevel: "warn",
|
||||
DBLogLevel: "warn",
|
||||
DBBlockCacheMB: 512,
|
||||
DBIndexCacheMB: 256,
|
||||
LogToStdout: false,
|
||||
PprofHTTP: false,
|
||||
ACLMode: "none",
|
||||
AuthRequired: false,
|
||||
AuthToWrite: false,
|
||||
SubscriptionEnabled: false,
|
||||
MonthlyPriceSats: 6000,
|
||||
FollowListFrequency: time.Hour,
|
||||
WebDisableEmbedded: false,
|
||||
SprocketEnabled: false,
|
||||
SpiderMode: "none",
|
||||
PolicyEnabled: false,
|
||||
}
|
||||
|
||||
// Use explicitly set port if provided via flag, otherwise find an available port
|
||||
if relayPort > 0 {
|
||||
cfg.Port = relayPort
|
||||
} else {
|
||||
var listener net.Listener
|
||||
if listener, err = net.Listen("tcp", "127.0.0.1:0"); err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to find available port: %w", err)
|
||||
}
|
||||
addr := listener.Addr().(*net.TCPAddr)
|
||||
cfg.Port = addr.Port
|
||||
listener.Close()
|
||||
}
|
||||
|
||||
// Set default data dir if not specified
|
||||
if cfg.DataDir == "" {
|
||||
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("orly-test-%d", time.Now().UnixNano()))
|
||||
cfg.DataDir = tmpDir
|
||||
}
|
||||
|
||||
// Set up logging
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
|
||||
// Create options
|
||||
cleanup := !keepDataDir
|
||||
opts := &run.Options{
|
||||
CleanupDataDir: &cleanup,
|
||||
}
|
||||
|
||||
// Start relay
|
||||
if relay, err = run.Start(cfg, opts); err != nil {
|
||||
return nil, 0, fmt.Errorf("failed to start relay: %w", err)
|
||||
}
|
||||
|
||||
return relay, cfg.Port, nil
|
||||
}
|
||||
|
||||
// waitForRelay waits for the relay to be ready by attempting to connect
|
||||
func waitForRelay(url string, timeout time.Duration) error {
|
||||
// Extract host:port from ws:// URL
|
||||
addr := url
|
||||
if len(url) > 7 && url[:5] == "ws://" {
|
||||
addr = url[5:]
|
||||
}
|
||||
deadline := time.Now().Add(timeout)
|
||||
attempts := 0
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", addr, 500*time.Millisecond)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
attempts++
|
||||
if attempts%10 == 0 {
|
||||
// Log every 10th attempt (every second)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return fmt.Errorf("timeout waiting for relay at %s after %d attempts", url, attempts)
|
||||
}
|
||||
|
||||
func outputResults(results []relaytester.TestResult, t *testing.T) {
|
||||
passed := 0
|
||||
failed := 0
|
||||
requiredFailed := 0
|
||||
|
||||
for _, result := range results {
|
||||
if result.Pass {
|
||||
passed++
|
||||
t.Logf("PASS: %s", result.Name)
|
||||
} else {
|
||||
failed++
|
||||
if result.Required {
|
||||
requiredFailed++
|
||||
t.Errorf("FAIL (required): %s - %s", result.Name, result.Info)
|
||||
} else {
|
||||
t.Logf("FAIL (optional): %s - %s", result.Name, result.Info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("\nTest Summary:")
|
||||
t.Logf(" Total: %d", len(results))
|
||||
t.Logf(" Passed: %d", passed)
|
||||
t.Logf(" Failed: %d", failed)
|
||||
t.Logf(" Required Failed: %d", requiredFailed)
|
||||
}
|
||||
|
||||
// TestMain allows custom test setup/teardown
|
||||
func TestMain(m *testing.M) {
|
||||
// Manually parse our custom flags to avoid conflicts with Go's test flags
|
||||
for i := 1; i < len(os.Args); i++ {
|
||||
arg := os.Args[i]
|
||||
switch arg {
|
||||
case "-relay-url":
|
||||
if i+1 < len(os.Args) {
|
||||
testRelayURL = os.Args[i+1]
|
||||
i++
|
||||
}
|
||||
case "-test-name":
|
||||
if i+1 < len(os.Args) {
|
||||
testName = os.Args[i+1]
|
||||
i++
|
||||
}
|
||||
case "-json":
|
||||
testJSON = true
|
||||
case "-keep-data":
|
||||
keepDataDir = true
|
||||
case "-port":
|
||||
if i+1 < len(os.Args) {
|
||||
fmt.Sscanf(os.Args[i+1], "%d", &relayPort)
|
||||
i++
|
||||
}
|
||||
case "-data-dir":
|
||||
if i+1 < len(os.Args) {
|
||||
relayDataDir = os.Args[i+1]
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
@@ -71,6 +71,9 @@ check_go_installation() {
|
||||
install_go() {
|
||||
log_info "Installing Go $GO_VERSION..."
|
||||
|
||||
# Save original directory
|
||||
local original_dir=$(pwd)
|
||||
|
||||
# Determine architecture
|
||||
local arch=$(uname -m)
|
||||
case $arch in
|
||||
@@ -100,13 +103,17 @@ install_go() {
|
||||
rm -rf "$GOROOT"
|
||||
fi
|
||||
|
||||
# Extract Go
|
||||
log_info "Extracting Go to $GOROOT..."
|
||||
tar -xf "$go_archive"
|
||||
|
||||
# Extract Go to a temporary location first, then move to final destination
|
||||
log_info "Extracting Go..."
|
||||
tar -xf "$go_archive" -C /tmp
|
||||
mv /tmp/go "$GOROOT"
|
||||
|
||||
# Clean up
|
||||
rm -f "$go_archive"
|
||||
|
||||
# Return to original directory
|
||||
cd "$original_dir"
|
||||
|
||||
log_success "Go $GO_VERSION installed successfully"
|
||||
}
|
||||
|
||||
@@ -167,7 +174,10 @@ build_application() {
|
||||
log_info "Updating embedded web assets..."
|
||||
./scripts/update-embedded-web.sh
|
||||
|
||||
# The update-embedded-web.sh script should have built the binary
|
||||
# Build the binary in the current directory
|
||||
log_info "Building binary in current directory..."
|
||||
CGO_ENABLED=1 go build -o "$BINARY_NAME"
|
||||
|
||||
if [[ -f "./$BINARY_NAME" ]]; then
|
||||
log_success "ORLY relay built successfully"
|
||||
else
|
||||
|
||||
198
scripts/run-policy-filter-test.sh
Executable file
198
scripts/run-policy-filter-test.sh
Executable file
@@ -0,0 +1,198 @@
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Policy Filter Integration Test
|
||||
# This script runs the relay with the example policy and tests event filtering
|
||||
|
||||
# Config
|
||||
PORT=${PORT:-34568}
|
||||
URL=${URL:-ws://127.0.0.1:${PORT}}
|
||||
LOG=/tmp/orly-policy-filter.out
|
||||
PID=/tmp/orly-policy-filter.pid
|
||||
DATADIR=$(mktemp -d)
|
||||
CONFIG_DIR="$HOME/.config/ORLY_POLICY_TEST"
|
||||
|
||||
cleanup() {
|
||||
trap - EXIT
|
||||
if [[ -f "$PID" ]]; then
|
||||
kill -INT "$(cat "$PID")" 2>/dev/null || true
|
||||
rm -f "$PID"
|
||||
fi
|
||||
rm -rf "$DATADIR"
|
||||
rm -rf "$CONFIG_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
echo "🧪 Policy Filter Integration Test"
|
||||
echo "=================================="
|
||||
|
||||
# Create config directory
|
||||
mkdir -p "$CONFIG_DIR"
|
||||
|
||||
# Generate keys using Go helper
|
||||
echo "🔑 Generating test keys..."
|
||||
KEYGEN_TMP=$(mktemp)
|
||||
cat > "$KEYGEN_TMP.go" <<'EOF'
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"next.orly.dev/pkg/crypto/p256k"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Generate allowed signer
|
||||
allowedSigner := &p256k.Signer{}
|
||||
if err := allowedSigner.Generate(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
allowedPubkeyHex := hex.Enc(allowedSigner.Pub())
|
||||
allowedSecHex := hex.Enc(allowedSigner.Sec())
|
||||
|
||||
// Generate unauthorized signer
|
||||
unauthorizedSigner := &p256k.Signer{}
|
||||
if err := unauthorizedSigner.Generate(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
unauthorizedPubkeyHex := hex.Enc(unauthorizedSigner.Pub())
|
||||
unauthorizedSecHex := hex.Enc(unauthorizedSigner.Sec())
|
||||
|
||||
result := map[string]string{
|
||||
"allowedPubkey": allowedPubkeyHex,
|
||||
"allowedSec": allowedSecHex,
|
||||
"unauthorizedPubkey": unauthorizedPubkeyHex,
|
||||
"unauthorizedSec": unauthorizedSecHex,
|
||||
}
|
||||
|
||||
jsonBytes, _ := json.Marshal(result)
|
||||
fmt.Println(string(jsonBytes))
|
||||
}
|
||||
EOF
|
||||
|
||||
# Run from the project root directory
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
cd "$PROJECT_ROOT"
|
||||
KEYS=$(go run -tags=cgo "$KEYGEN_TMP.go" 2>&1 | grep -E '^\{.*\}$' || true)
|
||||
rm -f "$KEYGEN_TMP.go"
|
||||
cd - > /dev/null
|
||||
|
||||
ALLOWED_PUBKEY=$(echo "$KEYS" | jq -r '.allowedPubkey')
|
||||
ALLOWED_SEC=$(echo "$KEYS" | jq -r '.allowedSec')
|
||||
UNAUTHORIZED_PUBKEY=$(echo "$KEYS" | jq -r '.unauthorizedPubkey')
|
||||
UNAUTHORIZED_SEC=$(echo "$KEYS" | jq -r '.unauthorizedSec')
|
||||
|
||||
echo "✅ Generated keys:"
|
||||
echo " Allowed pubkey: $ALLOWED_PUBKEY"
|
||||
echo " Unauthorized pubkey: $UNAUTHORIZED_PUBKEY"
|
||||
|
||||
# Create policy JSON with generated keys
|
||||
echo "📝 Creating policy.json..."
|
||||
cat > "$CONFIG_DIR/policy.json" <<EOF
|
||||
{
|
||||
"kind": {
|
||||
"whitelist": [4678, 10306, 30520, 30919]
|
||||
},
|
||||
"rules": {
|
||||
"4678": {
|
||||
"description": "Zenotp message events",
|
||||
"script": "$CONFIG_DIR/validate4678.js",
|
||||
"privileged": true
|
||||
},
|
||||
"10306": {
|
||||
"description": "End user whitelist changes",
|
||||
"read_allow": [
|
||||
"$ALLOWED_PUBKEY"
|
||||
],
|
||||
"privileged": true
|
||||
},
|
||||
"30520": {
|
||||
"description": "Zenotp events",
|
||||
"write_allow": [
|
||||
"$ALLOWED_PUBKEY"
|
||||
],
|
||||
"privileged": true
|
||||
},
|
||||
"30919": {
|
||||
"description": "Zenotp events",
|
||||
"write_allow": [
|
||||
"$ALLOWED_PUBKEY"
|
||||
],
|
||||
"privileged": true
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
echo "✅ Policy file created at: $CONFIG_DIR/policy.json"
|
||||
|
||||
# Build relay and test client
|
||||
echo "🔨 Building relay..."
|
||||
go build -o orly .
|
||||
|
||||
# Start relay
|
||||
echo "🚀 Starting relay on ${URL} with policy enabled..."
|
||||
ORLY_APP_NAME="ORLY_POLICY_TEST" \
|
||||
ORLY_DATA_DIR="$DATADIR" \
|
||||
ORLY_PORT=${PORT} \
|
||||
ORLY_POLICY_ENABLED=true \
|
||||
ORLY_ACL_MODE=none \
|
||||
ORLY_AUTH_TO_WRITE=true \
|
||||
ORLY_LOG_LEVEL=info \
|
||||
./orly >"$LOG" 2>&1 & echo $! >"$PID"
|
||||
|
||||
# Wait for relay to start
|
||||
sleep 3
|
||||
if ! ps -p "$(cat "$PID")" >/dev/null 2>&1; then
|
||||
echo "❌ Relay failed to start; logs:" >&2
|
||||
sed -n '1,200p' "$LOG" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Relay started (PID: $(cat "$PID"))"
|
||||
|
||||
# Build test client
|
||||
echo "🔨 Building test client..."
|
||||
go build -o cmd/policyfiltertest/policyfiltertest ./cmd/policyfiltertest
|
||||
|
||||
# Export keys for test client
|
||||
export ALLOWED_PUBKEY
|
||||
export ALLOWED_SEC
|
||||
export UNAUTHORIZED_PUBKEY
|
||||
export UNAUTHORIZED_SEC
|
||||
|
||||
# Run tests
|
||||
echo "🧪 Running policy filter tests..."
|
||||
set +e
|
||||
cmd/policyfiltertest/policyfiltertest -url "${URL}" -allowed-pubkey "$ALLOWED_PUBKEY" -allowed-sec "$ALLOWED_SEC" -unauthorized-pubkey "$UNAUTHORIZED_PUBKEY" -unauthorized-sec "$UNAUTHORIZED_SEC"
|
||||
TEST_RESULT=$?
|
||||
set -e
|
||||
|
||||
# Check logs for "policy rule is inactive" messages
|
||||
echo "📋 Checking logs for policy rule inactivity..."
|
||||
if grep -q "policy rule is inactive" "$LOG"; then
|
||||
echo "⚠️ WARNING: Found 'policy rule is inactive' messages in logs"
|
||||
grep "policy rule is inactive" "$LOG" | head -5
|
||||
else
|
||||
echo "✅ No 'policy rule is inactive' messages found (good)"
|
||||
fi
|
||||
|
||||
# Check logs for policy filtered events
|
||||
echo "📋 Checking logs for policy filtered events..."
|
||||
if grep -q "policy filtered out event" "$LOG"; then
|
||||
echo "✅ Found policy filtered events (expected):"
|
||||
grep "policy filtered out event" "$LOG" | head -5
|
||||
fi
|
||||
|
||||
if [ $TEST_RESULT -eq 0 ]; then
|
||||
echo "✅ All tests passed!"
|
||||
exit 0
|
||||
else
|
||||
echo "❌ Tests failed with exit code $TEST_RESULT"
|
||||
echo "📋 Last 50 lines of relay log:"
|
||||
tail -50 "$LOG"
|
||||
exit $TEST_RESULT
|
||||
fi
|
||||
|
||||
Submodule scripts/secp256k1 deleted from 0cdc758a56
0
scripts/sprocket/SPROCKET_TEST_README.md
Normal file → Executable file
0
scripts/sprocket/SPROCKET_TEST_README.md
Normal file → Executable file
0
scripts/sprocket/test-sprocket-complete.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-complete.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-demo.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-demo.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-example.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-example.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-final.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-final.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-manual.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-manual.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-simple.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-simple.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-working.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket-working.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket.py
Normal file → Executable file
0
scripts/sprocket/test-sprocket.py
Normal file → Executable file
0
scripts/sprocket/test-sprocket.sh
Normal file → Executable file
0
scripts/sprocket/test-sprocket.sh
Normal file → Executable file
@@ -1,14 +1,40 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
||||
apt -y install build-essential autoconf libtool git wget
|
||||
cd $SCRIPT_DIR
|
||||
|
||||
# Update package lists
|
||||
apt-get update
|
||||
|
||||
# Try to install from package manager first (much faster)
|
||||
echo "Attempting to install secp256k1 from package manager..."
|
||||
if apt-get install -y libsecp256k1-dev >/dev/null 2>&1; then
|
||||
echo "✓ Installed secp256k1 from package manager"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Fall back to building from source if package not available
|
||||
echo "Package not available in repository, building from source..."
|
||||
|
||||
# Install build dependencies
|
||||
apt-get install -y build-essential autoconf automake libtool git wget pkg-config
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
rm -rf secp256k1
|
||||
|
||||
# Clone and setup secp256k1
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git
|
||||
cd secp256k1
|
||||
git checkout v0.6.0
|
||||
|
||||
# Initialize and update submodules
|
||||
git submodule init
|
||||
git submodule update
|
||||
|
||||
# Build and install
|
||||
./autogen.sh
|
||||
./configure --enable-module-schnorrsig --enable-module-ecdh --prefix=/usr
|
||||
make -j1
|
||||
sudo make install
|
||||
make -j$(nproc)
|
||||
make install
|
||||
|
||||
cd "$SCRIPT_DIR"
|
||||
|
||||
Reference in New Issue
Block a user