Compare commits
31 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
5237fb1a1f
|
|||
|
6901950059
|
|||
|
251fc17933
|
|||
|
fdb9e18b03
|
|||
|
67552edf04
|
|||
|
f25b760d84
|
|||
|
bfa38822e0
|
|||
|
eac5e05e77
|
|||
|
b72f2dd51e
|
|||
|
cc32703be0
|
|||
|
994d26bb09
|
|||
|
ea2d833e66
|
|||
|
af04f89df8
|
|||
|
fab2f104ff
|
|||
|
06940efcec
|
|||
|
0ba36a3f67
|
|||
|
d4bee83992
|
|||
|
aabb536d13
|
|||
|
498073460c
|
|||
|
11d378bfc3
|
|||
|
9b7e8d28de
|
|||
|
c16ee76638
|
|||
|
132fdc9f36
|
|||
|
4f1d48c247
|
|||
|
651791aec1
|
|||
|
53d649c64e
|
|||
|
4dafab3fd6
|
|||
|
f2475c48b7
|
|||
|
b5448f4153
|
|||
|
11d318d4e3
|
|||
|
53e8e160dd
|
2
.gitignore
vendored
2
.gitignore
vendored
@@ -85,6 +85,7 @@ node_modules/**
|
||||
!.name
|
||||
!.gitignore
|
||||
!version
|
||||
!out.jsonl
|
||||
# ...even if they are in subdirectories
|
||||
!*/
|
||||
/blocklist.json
|
||||
@@ -102,3 +103,4 @@ pkg/database/testrealy
|
||||
/.idea/codeStyles/codeStyleConfig.xml
|
||||
/.idea/material_theme_project_new.xml
|
||||
/.idea/orly.iml
|
||||
/.idea/go.imports.xml
|
||||
|
||||
46
main.go
46
main.go
@@ -1,26 +1,26 @@
|
||||
// Package main is a nostr relay with a simple follow/mute list authentication
|
||||
// scheme and the new HTTP REST based protocol. Configuration is via environment
|
||||
// scheme and the new HTTP REST-based protocol. Configuration is via environment
|
||||
// variables or an optional .env file.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/profile"
|
||||
_ "net/http/pprof"
|
||||
app2 "orly.dev/pkg/app"
|
||||
"orly.dev/pkg/app/config"
|
||||
"orly.dev/pkg/app/relay"
|
||||
"orly.dev/pkg/app/relay/options"
|
||||
"orly.dev/pkg/database"
|
||||
"orly.dev/pkg/protocol/openapi"
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/interrupt"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"orly.dev/pkg/version"
|
||||
"os"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -43,15 +43,24 @@ func main() {
|
||||
os.Exit(0)
|
||||
}
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
if cfg.Pprof {
|
||||
defer profile.Start(profile.MemProfile).Stop()
|
||||
go func() {
|
||||
chk.E(http.ListenAndServe("127.0.0.1:6060", nil))
|
||||
}()
|
||||
if cfg.Pprof != "" {
|
||||
switch cfg.Pprof {
|
||||
case "cpu":
|
||||
prof := profile.Start(profile.CPUProfile)
|
||||
defer prof.Stop()
|
||||
case "memory":
|
||||
prof := profile.Start(profile.MemProfile)
|
||||
defer prof.Stop()
|
||||
case "allocation":
|
||||
prof := profile.Start(profile.MemProfileAllocs)
|
||||
defer prof.Stop()
|
||||
}
|
||||
}
|
||||
c, cancel := context.Cancel(context.Bg())
|
||||
storage, err := database.New(c, cancel, cfg.DataDir, cfg.DbLogLevel)
|
||||
if chk.E(err) {
|
||||
var storage *database.D
|
||||
if storage, err = database.New(
|
||||
c, cancel, cfg.DataDir, cfg.DbLogLevel,
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
r := &app2.Relay{C: cfg, Store: storage}
|
||||
@@ -66,9 +75,20 @@ func main() {
|
||||
C: cfg,
|
||||
}
|
||||
var opts []options.O
|
||||
if server, err = relay.NewServer(serverParams, opts...); chk.E(err) {
|
||||
serveMux := servemux.NewServeMux()
|
||||
if server, err = relay.NewServer(
|
||||
serverParams, serveMux, opts...,
|
||||
); chk.E(err) {
|
||||
os.Exit(1)
|
||||
}
|
||||
openapi.New(
|
||||
server,
|
||||
cfg.AppName,
|
||||
version.V,
|
||||
version.Description,
|
||||
"/api",
|
||||
serveMux,
|
||||
)
|
||||
if err != nil {
|
||||
log.F.F("failed to create server: %v", err)
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ type C struct {
|
||||
Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
|
||||
LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
|
||||
Pprof bool `env:"ORLY_PPROF" default:"false" usage:"enable pprof on 127.0.0.1:6060"`
|
||||
Pprof string `env:"ORLY_PPROF" usage:"enable pprof on 127.0.0.1:6060" enum:"cpu,memory,allocation"`
|
||||
AuthRequired bool `env:"ORLY_AUTH_REQUIRED" default:"false" usage:"require authentication for all requests"`
|
||||
PublicReadable bool `env:"ORLY_PUBLIC_READABLE" default:"true" usage:"allow public read access to regardless of whether the client is authed"`
|
||||
SpiderSeeds []string `env:"ORLY_SPIDER_SEEDS" usage:"seeds to use for the spider (relays that are looked up initially to find owner relay lists) (comma separated)" default:"wss://relay.nostr.band/,wss://relay.damus.io/,wss://nostr.wine/,wss://nostr.land/,wss://theforest.nostr1.com/"`
|
||||
@@ -73,6 +73,9 @@ func New() (cfg *C, err error) {
|
||||
if cfg.State == "" || strings.Contains(cfg.State, "~") {
|
||||
cfg.State = filepath.Join(xdg.StateHome, cfg.AppName)
|
||||
}
|
||||
if len(cfg.Owners) > 0 {
|
||||
cfg.AuthRequired = true
|
||||
}
|
||||
envPath := filepath.Join(cfg.Config, ".env")
|
||||
if apputil.FileExists(envPath) {
|
||||
var e env2.Env
|
||||
|
||||
@@ -41,8 +41,13 @@ func (s *Server) AcceptEvent(
|
||||
c context.T, ev *event.E, hr *http.Request, authedPubkey []byte,
|
||||
remote string,
|
||||
) (accept bool, notice string, afterSave func()) {
|
||||
if !s.AuthRequired() {
|
||||
accept = true
|
||||
return
|
||||
}
|
||||
// if auth is required and the user is not authed, reject
|
||||
if s.AuthRequired() && len(authedPubkey) == 0 {
|
||||
notice = "client isn't authed"
|
||||
return
|
||||
}
|
||||
// check if the authed user is on the lists
|
||||
@@ -53,6 +58,14 @@ func (s *Server) AcceptEvent(
|
||||
break
|
||||
}
|
||||
}
|
||||
// todo: check if event author is on owners' mute lists or block list
|
||||
if !accept {
|
||||
return
|
||||
}
|
||||
for _, u := range s.OwnersMuted() {
|
||||
if bytes.Equal(u, authedPubkey) {
|
||||
notice = "event author is banned from this relay"
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -3,16 +3,20 @@ package relay
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/interfaces/relay"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/protocol/socketapi"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/normalize"
|
||||
)
|
||||
|
||||
var (
|
||||
NIP20prefixmatcher = regexp.MustCompile(`^\w+: `)
|
||||
)
|
||||
|
||||
// AddEvent processes an incoming event, saves it if valid, and delivers it to
|
||||
// subscribers.
|
||||
//
|
||||
@@ -50,9 +54,7 @@ import (
|
||||
// - Returns a boolean indicating whether the event was accepted and any
|
||||
// relevant message.
|
||||
func (s *Server) AddEvent(
|
||||
c context.T, rl relay.I, ev *event.E,
|
||||
hr *http.Request, origin string,
|
||||
authedPubkey []byte,
|
||||
c context.T, rl relay.I, ev *event.E, hr *http.Request, origin string,
|
||||
) (accepted bool, message []byte) {
|
||||
|
||||
if ev == nil {
|
||||
@@ -65,9 +67,12 @@ func (s *Server) AddEvent(
|
||||
return false, []byte(saveErr.Error())
|
||||
}
|
||||
errmsg := saveErr.Error()
|
||||
if socketapi.NIP20prefixmatcher.MatchString(errmsg) {
|
||||
if NIP20prefixmatcher.MatchString(errmsg) {
|
||||
if strings.Contains(errmsg, "tombstone") {
|
||||
return false, normalize.Error.F("event was deleted, not storing it again")
|
||||
return false, normalize.Error.F(
|
||||
"%s event was deleted, not storing it again",
|
||||
origin,
|
||||
)
|
||||
}
|
||||
if strings.HasPrefix(errmsg, string(normalize.Blocked)) {
|
||||
return false, []byte(errmsg)
|
||||
|
||||
39
pkg/app/relay/admin-auth.go
Normal file
39
pkg/app/relay/admin-auth.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) AdminAuth(
|
||||
r *http.Request, remote string,
|
||||
tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range s.ownersPubkeys {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
)
|
||||
|
||||
// ServiceURL constructs the service URL based on the incoming HTTP request. It
|
||||
@@ -34,8 +33,6 @@ import (
|
||||
//
|
||||
// - Returns the constructed URL string.
|
||||
func (s *Server) ServiceURL(req *http.Request) (st string) {
|
||||
lol.Tracer("ServiceURL")
|
||||
defer func() { lol.Tracer("end ServiceURL", st) }()
|
||||
if !s.AuthRequired() {
|
||||
log.T.F("auth not required")
|
||||
return
|
||||
|
||||
@@ -55,7 +55,8 @@ func (s *Server) HandleRelayInfo(w http.ResponseWriter, r *http.Request) {
|
||||
Nips: supportedNIPs, Software: version.URL,
|
||||
Version: version.V,
|
||||
Limitation: relayinfo.Limits{
|
||||
AuthRequired: s.C.AuthRequired,
|
||||
AuthRequired: s.C.AuthRequired,
|
||||
RestrictedWrites: s.C.AuthRequired,
|
||||
},
|
||||
Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
|
||||
}
|
||||
|
||||
39
pkg/app/relay/owners-followed-auth.go
Normal file
39
pkg/app/relay/owners-followed-auth.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) OwnersFollowedAuth(
|
||||
r *http.Request, remote string,
|
||||
tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range s.ownersFollowed {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
6
pkg/app/relay/owners-pubkeys.go
Normal file
6
pkg/app/relay/owners-pubkeys.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package relay
|
||||
|
||||
func (s *Server) OwnersPubkeys() (pks [][]byte) {
|
||||
pks = s.ownersPubkeys
|
||||
return
|
||||
}
|
||||
@@ -60,7 +60,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
log.T.F("found %d possible duplicate events", len(evs))
|
||||
for _, ev := range evs {
|
||||
del := true
|
||||
if bytes.Equal(ev.Id, evt.Id) {
|
||||
if bytes.Equal(ev.ID, evt.ID) {
|
||||
continue
|
||||
}
|
||||
log.I.F(
|
||||
@@ -88,7 +88,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
if isFollowed {
|
||||
if _, _, err = sto.SaveEvent(
|
||||
c, evt,
|
||||
c, evt, false,
|
||||
); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
@@ -110,7 +110,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
for _, pk := range owners {
|
||||
if bytes.Equal(evt.Pubkey, pk) {
|
||||
if _, _, err = sto.SaveEvent(
|
||||
c, evt,
|
||||
c, evt, false,
|
||||
); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
@@ -222,7 +222,7 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, _, err = sto.SaveEvent(c, evt); err != nil && !errors.Is(
|
||||
if _, _, err = sto.SaveEvent(c, evt, false); err != nil && !errors.Is(
|
||||
err, store.ErrDupEvent,
|
||||
) {
|
||||
return
|
||||
|
||||
@@ -37,6 +37,7 @@ type Server struct {
|
||||
listeners *publish.S
|
||||
*config.C
|
||||
*Lists
|
||||
Mux *servemux.S
|
||||
}
|
||||
|
||||
// ServerParams represents the configuration parameters for initializing a
|
||||
@@ -48,6 +49,7 @@ type ServerParams struct {
|
||||
Rl relay.I
|
||||
DbPath string
|
||||
MaxLimit int
|
||||
Mux *servemux.S
|
||||
*config.C
|
||||
}
|
||||
|
||||
@@ -78,7 +80,9 @@ type ServerParams struct {
|
||||
// - Sets up a ServeMux for handling HTTP requests.
|
||||
//
|
||||
// - Initializes the relay, starting its operation in a separate goroutine.
|
||||
func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
||||
func NewServer(
|
||||
sp *ServerParams, serveMux *servemux.S, opts ...options.O,
|
||||
) (s *Server, err error) {
|
||||
op := options.Default()
|
||||
for _, opt := range opts {
|
||||
opt(op)
|
||||
@@ -88,7 +92,6 @@ func NewServer(sp *ServerParams, opts ...options.O) (s *Server, err error) {
|
||||
return nil, fmt.Errorf("storage init: %w", err)
|
||||
}
|
||||
}
|
||||
serveMux := servemux.NewServeMux()
|
||||
s = &Server{
|
||||
Ctx: sp.Ctx,
|
||||
Cancel: sp.Cancel,
|
||||
@@ -209,8 +212,8 @@ func (s *Server) Start(
|
||||
}()
|
||||
addr := net.JoinHostPort(host, strconv.Itoa(port))
|
||||
log.I.F("starting relay listener at %s", addr)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
var ln net.Listener
|
||||
if ln, err = net.Listen("tcp", addr); err != nil {
|
||||
return err
|
||||
}
|
||||
s.httpServer = &http.Server{
|
||||
|
||||
@@ -1,136 +1,238 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"sort"
|
||||
"sync"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
// IdPkTs is a map of event IDs to their id, pubkey, kind, and timestamp
|
||||
// This is used to reduce memory usage by storing only the essential information
|
||||
// instead of the full events
|
||||
type IdPkTs struct {
|
||||
Id []byte
|
||||
Pubkey []byte
|
||||
Kind uint16
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (s *Server) SpiderFetch(
|
||||
k *kind.T, noFetch bool, pubkeys ...[]byte,
|
||||
k *kinds.T, noFetch, noExtract bool, pubkeys ...[]byte,
|
||||
) (pks [][]byte, err error) {
|
||||
// Map to store id, pubkey, kind, and timestamp for each event
|
||||
// Key is a combination of pubkey and kind for deduplication
|
||||
pkKindMap := make(map[string]*IdPkTs)
|
||||
// Map to collect pubkeys from p tags
|
||||
pkMap := make(map[string]struct{})
|
||||
|
||||
// first search the local database
|
||||
pkList := tag.New(pubkeys...)
|
||||
f := &filter.F{
|
||||
Kinds: kinds.New(k),
|
||||
Kinds: k,
|
||||
Authors: pkList,
|
||||
}
|
||||
var evs event.S
|
||||
if evs, err = s.Storage().QueryEvents(s.Ctx, f); chk.E(err) {
|
||||
|
||||
var kindsList string
|
||||
for i, kk := range k.K {
|
||||
if i > 0 {
|
||||
kindsList += ","
|
||||
}
|
||||
kindsList += kk.Name()
|
||||
}
|
||||
|
||||
// Query local database
|
||||
var localEvents event.S
|
||||
if localEvents, err = s.Storage().QueryEvents(s.Ctx, f); chk.E(err) {
|
||||
// none were found, so we need to scan the spiders
|
||||
err = nil
|
||||
}
|
||||
if len(evs) < len(pubkeys) && !noFetch {
|
||||
|
||||
// Process local events
|
||||
for _, ev := range localEvents {
|
||||
// Create a key based on pubkey and kind for deduplication
|
||||
pkKindKey := string(ev.Pubkey) + string(ev.Kind.Marshal(nil))
|
||||
|
||||
// Check if we already have an event with this pubkey and kind
|
||||
existing, exists := pkKindMap[pkKindKey]
|
||||
|
||||
// If it doesn't exist or the new event is newer, store it
|
||||
if !exists || ev.CreatedAtInt64() > existing.Timestamp {
|
||||
pkKindMap[pkKindKey] = &IdPkTs{
|
||||
Id: ev.ID,
|
||||
Pubkey: ev.Pubkey,
|
||||
Kind: ev.Kind.ToU16(),
|
||||
Timestamp: ev.CreatedAtInt64(),
|
||||
}
|
||||
|
||||
// Extract p tags if not in noExtract mode
|
||||
if !noExtract {
|
||||
t := ev.Tags.GetAll(tag.New("p"))
|
||||
for _, tt := range t.ToSliceOfTags() {
|
||||
pkh := tt.Value()
|
||||
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||
continue
|
||||
}
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
pkMap[string(pk)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Nil the event to free memory
|
||||
ev = nil
|
||||
}
|
||||
|
||||
log.I.F("%d events found of type %s", len(pkKindMap), kindsList)
|
||||
|
||||
if !noFetch {
|
||||
// we need to search the spider seeds.
|
||||
// Break up pubkeys into batches of 512
|
||||
for i := 0; i < len(pubkeys); i += 512 {
|
||||
end := i + 512
|
||||
// Break up pubkeys into batches of 128
|
||||
for i := 0; i < len(pubkeys); i += 128 {
|
||||
end := i + 128
|
||||
if end > len(pubkeys) {
|
||||
end = len(pubkeys)
|
||||
}
|
||||
batchPubkeys := pubkeys[i:end]
|
||||
log.I.F(
|
||||
"processing batch %d to %d of %d for kind %s",
|
||||
i, end, len(pubkeys), k.Name(),
|
||||
i, end, len(pubkeys), kindsList,
|
||||
)
|
||||
batchPkList := tag.New(batchPubkeys...)
|
||||
lim := uint(batchPkList.Len())
|
||||
batchFilter := &filter.F{
|
||||
Kinds: kinds.New(k),
|
||||
Kinds: k,
|
||||
Authors: batchPkList,
|
||||
Limit: &lim,
|
||||
}
|
||||
|
||||
var mx sync.Mutex
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, seed := range s.C.SpiderSeeds {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
select {
|
||||
case <-s.Ctx.Done():
|
||||
return
|
||||
default:
|
||||
select {
|
||||
case <-s.Ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
var evss event.S
|
||||
var cli *ws.Client
|
||||
if cli, err = ws.RelayConnect(
|
||||
context.Bg(), seed, ws.WithSignatureChecker(
|
||||
func(e *event.E) bool {
|
||||
return true
|
||||
},
|
||||
),
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
if evss, err = cli.QuerySync(
|
||||
context.Bg(), batchFilter,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Process each event immediately
|
||||
for i, ev := range evss {
|
||||
// Create a key based on pubkey and kind for deduplication
|
||||
pkKindKey := string(ev.Pubkey) + string(ev.Kind.Marshal(nil))
|
||||
|
||||
// Check if we already have an event with this pubkey and kind
|
||||
existing, exists := pkKindMap[pkKindKey]
|
||||
|
||||
// If it doesn't exist or the new event is newer, store it and save to database
|
||||
if !exists || ev.CreatedAtInt64() > existing.Timestamp {
|
||||
var ser *types.Uint40
|
||||
if ser, err = s.Storage().GetSerialById(ev.ID); err == nil && ser != nil {
|
||||
err = errorf.E("event already exists: %0x", ev.ID)
|
||||
return
|
||||
} else {
|
||||
// verify the signature
|
||||
var valid bool
|
||||
if valid, err = ev.Verify(); chk.E(err) || !valid {
|
||||
continue
|
||||
}
|
||||
log.I.F("event %0x is valid", ev.ID)
|
||||
}
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = s.Storage().SaveEvent(
|
||||
s.Ctx, ev, true, // already verified
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
if lol.Level.Load() == lol.Trace {
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"saved event:\n%s", ev.Marshal(nil),
|
||||
)
|
||||
},
|
||||
)
|
||||
} else {
|
||||
log.I.F("saved event: %0x", ev.ID)
|
||||
}
|
||||
|
||||
// Store the essential information
|
||||
pkKindMap[pkKindKey] = &IdPkTs{
|
||||
Id: ev.ID,
|
||||
Pubkey: ev.Pubkey,
|
||||
Kind: ev.Kind.ToU16(),
|
||||
Timestamp: ev.CreatedAtInt64(),
|
||||
}
|
||||
|
||||
// Extract p tags if not in noExtract mode
|
||||
if !noExtract {
|
||||
t := ev.Tags.GetAll(tag.New("p"))
|
||||
for _, tt := range t.ToSliceOfTags() {
|
||||
pkh := tt.Value()
|
||||
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||
continue
|
||||
}
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
if _, err = hex.DecBytes(pk, pkh); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
pkMap[string(pk)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
var evss event.S
|
||||
var cli *ws.Client
|
||||
if cli, err = ws.RelayConnect(
|
||||
context.Bg(), seed,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
if evss, err = cli.QuerySync(
|
||||
context.Bg(), batchFilter,
|
||||
); chk.E(err) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
mx.Lock()
|
||||
for _, ev := range evss {
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
mx.Unlock()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
// save the events to the database
|
||||
for _, ev := range evs {
|
||||
if _, _, err = s.Storage().SaveEvent(s.Ctx, ev); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
|
||||
// Nil the event in the slice to free memory
|
||||
evss[i] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !k.Equal(kind.FollowList) {
|
||||
chk.E(s.Storage().Sync())
|
||||
debug.FreeOSMemory()
|
||||
|
||||
// If we're in noExtract mode, just return
|
||||
if noExtract {
|
||||
return
|
||||
}
|
||||
// deduplicate and take the newest
|
||||
var tmp event.S
|
||||
evMap := make(map[string]event.S)
|
||||
for _, ev := range evs {
|
||||
evMap[ev.PubKeyString()] = append(evMap[ev.PubKeyString()], ev)
|
||||
}
|
||||
for _, evm := range evMap {
|
||||
if len(evm) < 1 {
|
||||
continue
|
||||
}
|
||||
if len(evm) > 1 {
|
||||
sort.Sort(evm)
|
||||
}
|
||||
tmp = append(tmp, evm[0])
|
||||
}
|
||||
evs = tmp
|
||||
// we have all we're going to get now
|
||||
pkMap := make(map[string]struct{})
|
||||
for _, ev := range evs {
|
||||
t := ev.Tags.GetAll(tag.New("p"))
|
||||
for _, tt := range t.ToSliceOfTags() {
|
||||
pkh := tt.Value()
|
||||
if len(pkh) != 2*schnorr.PubKeyBytesLen {
|
||||
continue
|
||||
}
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
if _, err = hex.DecBytes(pk, pkh); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
pkMap[string(pk)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the collected pubkeys to the return format
|
||||
for pk := range pkMap {
|
||||
pks = append(pks, []byte(pk))
|
||||
}
|
||||
|
||||
log.I.F("found %d pks", len(pks))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/kinds"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
@@ -52,21 +53,22 @@ func (s *Server) Spider(noFetch ...bool) (err error) {
|
||||
log.I.F("getting ownersFollowed")
|
||||
var ownersFollowed [][]byte
|
||||
if ownersFollowed, err = s.SpiderFetch(
|
||||
kind.FollowList, dontFetch, ownersPubkeys...,
|
||||
kinds.New(kind.FollowList), dontFetch, false, ownersPubkeys...,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// log.I.S(ownersFollowed)
|
||||
log.I.F("getting followedFollows")
|
||||
var followedFollows [][]byte
|
||||
if followedFollows, err = s.SpiderFetch(
|
||||
kind.FollowList, dontFetch, ownersFollowed...,
|
||||
kinds.New(kind.FollowList), dontFetch, false, ownersFollowed...,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
log.I.F("getting ownersMuted")
|
||||
var ownersMuted [][]byte
|
||||
if ownersMuted, err = s.SpiderFetch(
|
||||
kind.MuteList, dontFetch, ownersPubkeys...,
|
||||
kinds.New(kind.MuteList), dontFetch, false, ownersPubkeys...,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -74,22 +76,17 @@ func (s *Server) Spider(noFetch ...bool) (err error) {
|
||||
// list
|
||||
filteredFollows := make([][]byte, 0, len(followedFollows))
|
||||
for _, follow := range followedFollows {
|
||||
found := false
|
||||
for _, owner := range ownersFollowed {
|
||||
if bytes.Equal(follow, owner) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, owner := range ownersMuted {
|
||||
if bytes.Equal(follow, owner) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
filteredFollows = append(filteredFollows, follow)
|
||||
}
|
||||
filteredFollows = append(filteredFollows, follow)
|
||||
}
|
||||
followedFollows = filteredFollows
|
||||
own := "owner"
|
||||
@@ -115,7 +112,7 @@ func (s *Server) Spider(noFetch ...bool) (err error) {
|
||||
len(followedFollows), folfol,
|
||||
len(ownersMuted), mut,
|
||||
)
|
||||
// add the owners
|
||||
// add the owners to the ownersFollowed
|
||||
ownersFollowed = append(ownersFollowed, ownersPubkeys...)
|
||||
s.SetOwnersPubkeys(ownersPubkeys)
|
||||
s.SetOwnersFollowed(ownersFollowed)
|
||||
@@ -125,9 +122,12 @@ func (s *Server) Spider(noFetch ...bool) (err error) {
|
||||
if !dontFetch {
|
||||
go func() {
|
||||
everyone := append(ownersFollowed, followedFollows...)
|
||||
s.SpiderFetch(kind.ProfileMetadata, false, everyone...)
|
||||
s.SpiderFetch(kind.RelayListMetadata, false, everyone...)
|
||||
s.SpiderFetch(kind.DMRelaysList, false, everyone...)
|
||||
s.SpiderFetch(
|
||||
kinds.New(
|
||||
kind.ProfileMetadata, kind.RelayListMetadata,
|
||||
kind.DMRelaysList,
|
||||
), false, true, everyone...,
|
||||
)
|
||||
}()
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"orly.dev/pkg/encoders/eventid"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/units"
|
||||
"testing"
|
||||
@@ -14,6 +15,7 @@ import (
|
||||
|
||||
func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
||||
t.Helper()
|
||||
serveMux := servemux.NewServeMux()
|
||||
srv, _ := NewServer(
|
||||
&ServerParams{
|
||||
Ctx: c,
|
||||
@@ -21,6 +23,7 @@ func startTestRelay(c context.T, t *testing.T, tr *testRelay) *Server {
|
||||
Rl: tr,
|
||||
MaxLimit: 500 * units.Kb,
|
||||
},
|
||||
serveMux,
|
||||
)
|
||||
started := make(chan bool)
|
||||
go srv.Start("127.0.0.1", 0, started)
|
||||
|
||||
39
pkg/app/relay/user-auth.go
Normal file
39
pkg/app/relay/user-auth.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package relay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/http"
|
||||
"orly.dev/pkg/protocol/httpauth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) UserAuth(
|
||||
r *http.Request, remote string,
|
||||
tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte) {
|
||||
var valid bool
|
||||
var err error
|
||||
var tolerate time.Duration
|
||||
if len(tolerance) > 0 {
|
||||
tolerate = tolerance[0]
|
||||
}
|
||||
if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !valid {
|
||||
log.E.F(
|
||||
"invalid auth %s from %s",
|
||||
r.Header.Get("Authorization"), remote,
|
||||
)
|
||||
return
|
||||
}
|
||||
for _, pk := range append(s.ownersFollowed, s.followedFollows...) {
|
||||
if bytes.Equal(pk, pubkey) {
|
||||
authed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -61,7 +61,7 @@ func TestSignerVerify(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
|
||||
t.Errorf("failed to verify: %s\n%0x", err, ev.Id)
|
||||
t.Errorf("failed to verify: %s\n%0x", err, ev.ID)
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
|
||||
@@ -2,7 +2,6 @@ package database
|
||||
|
||||
import (
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"io"
|
||||
"orly.dev/pkg/encoders/eventidserial"
|
||||
"orly.dev/pkg/utils/apputil"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
@@ -75,16 +74,6 @@ func (d *D) Wipe() (err error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d *D) Import(r io.Reader) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d *D) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (d *D) SetLogLevel(level string) {
|
||||
d.Logger.SetLogLevel(lol.GetLogLevel(level))
|
||||
}
|
||||
@@ -106,6 +95,7 @@ func (d *D) Init(path string) (err error) {
|
||||
|
||||
// Sync flushes the database buffers to disk.
|
||||
func (d *D) Sync() (err error) {
|
||||
d.DB.RunValueLogGC(0.5)
|
||||
return d.DB.Sync()
|
||||
}
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ func (d *D) DeleteEvent(c context.T, eid *eventid.T) (err error) {
|
||||
return
|
||||
}
|
||||
if ser == nil {
|
||||
// Event not found, nothing to delete
|
||||
// Event wasn't found, nothing to delete
|
||||
return
|
||||
}
|
||||
// Fetch the event to get its data
|
||||
@@ -33,7 +33,7 @@ func (d *D) DeleteEvent(c context.T, eid *eventid.T) (err error) {
|
||||
return
|
||||
}
|
||||
if ev == nil {
|
||||
// Event not found, nothing to delete
|
||||
// Event wasn't found, nothing to delete
|
||||
return
|
||||
}
|
||||
// Get all indexes for the event
|
||||
|
||||
108
pkg/database/export.go
Normal file
108
pkg/database/export.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"io"
|
||||
"orly.dev/pkg/database/indexes"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/codecbuf"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/units"
|
||||
)
|
||||
|
||||
// Export the complete database of stored events to an io.Writer in line structured minified
|
||||
// JSON.
|
||||
func (d *D) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
|
||||
var err error
|
||||
evB := make([]byte, 0, units.Mb)
|
||||
evBuf := bytes.NewBuffer(evB)
|
||||
if len(pubkeys) == 0 {
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
buf := codecbuf.Get()
|
||||
defer codecbuf.Put(buf)
|
||||
if err = indexes.EventEnc(nil).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: buf.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
if err = item.Value(
|
||||
func(val []byte) (err error) {
|
||||
evBuf.Write(val)
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
ev := event.New()
|
||||
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Serialize the event to JSON and write it to the output
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
evBuf.Reset()
|
||||
}
|
||||
return
|
||||
},
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
for _, pubkey := range pubkeys {
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
pkBuf := codecbuf.Get()
|
||||
defer codecbuf.Put(pkBuf)
|
||||
ph := &types.PubHash{}
|
||||
if err = ph.FromPubkey(pubkey); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = indexes.PubkeyEnc(
|
||||
ph, nil, nil,
|
||||
).MarshalWrite(pkBuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: pkBuf.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
if err = item.Value(
|
||||
func(val []byte) (err error) {
|
||||
evBuf.Write(val)
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
ev := event.New()
|
||||
if err = ev.UnmarshalBinary(evBuf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
// Serialize the event to JSON and write it to the output
|
||||
if _, err = w.Write(ev.Serialize()); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
if _, err = w.Write([]byte{'\n'}); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
evBuf.Reset()
|
||||
}
|
||||
return
|
||||
},
|
||||
); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
111
pkg/database/export_test.go
Normal file
111
pkg/database/export_test.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/event/examples"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
)
|
||||
|
||||
// TestExport tests the Export function by:
|
||||
// 1. Creating a new database with events from examples.Cache
|
||||
// 2. Checking that all event IDs in the cache are found in the export
|
||||
// 3. Verifying this also works when only a few pubkeys are requested
|
||||
func TestExport(t *testing.T) {
|
||||
// Create a temporary directory for the database
|
||||
tempDir, err := os.MkdirTemp("", "test-db-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after the test
|
||||
|
||||
// Create a context and cancel function for the database
|
||||
ctx, cancel := context.Cancel(context.Bg())
|
||||
defer cancel()
|
||||
|
||||
// Initialize the database
|
||||
db, err := New(ctx, cancel, tempDir, "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a scanner to read events from examples.Cache
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
|
||||
|
||||
// Maps to store event IDs and their associated pubkeys
|
||||
eventIDs := make(map[string]bool)
|
||||
pubkeyToEventIDs := make(map[string][]string)
|
||||
|
||||
// Process each event
|
||||
for scanner.Scan() {
|
||||
chk.E(scanner.Err())
|
||||
b := scanner.Bytes()
|
||||
ev := event.New()
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Store the event ID
|
||||
eventID := ev.IdString()
|
||||
eventIDs[eventID] = true
|
||||
|
||||
// Store the event ID by pubkey
|
||||
pubkey := ev.PubKeyString()
|
||||
pubkeyToEventIDs[pubkey] = append(pubkeyToEventIDs[pubkey], eventID)
|
||||
}
|
||||
|
||||
// Check for scanner errors
|
||||
if err = scanner.Err(); err != nil {
|
||||
t.Fatalf("Scanner error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Saved %d events to the database", len(eventIDs))
|
||||
|
||||
// Test 1: Export all events and verify all IDs are in the export
|
||||
var exportBuffer bytes.Buffer
|
||||
db.Export(ctx, &exportBuffer)
|
||||
|
||||
// Parse the exported events and check that all IDs are present
|
||||
exportedIDs := make(map[string]bool)
|
||||
exportScanner := bufio.NewScanner(&exportBuffer)
|
||||
exportScanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
|
||||
exportCount := 0
|
||||
for exportScanner.Scan() {
|
||||
b := exportScanner.Bytes()
|
||||
ev := event.New()
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exportedIDs[ev.IdString()] = true
|
||||
exportCount++
|
||||
}
|
||||
// Check for scanner errors
|
||||
if err = exportScanner.Err(); err != nil {
|
||||
t.Fatalf("Scanner error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Found %d events in the export", exportCount)
|
||||
|
||||
// Check that all original event IDs are in the export
|
||||
for id := range eventIDs {
|
||||
if !exportedIDs[id] {
|
||||
t.Errorf("Event ID %s not found in export", id)
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("All %d event IDs found in export", len(eventIDs))
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func TestFetchEventBySerial(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestFetchEventBySerial(t *testing.T) {
|
||||
var sers types.Uint40s
|
||||
sers, err = db.QueryForSerials(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.New(testEvent.Id),
|
||||
Ids: tag.New(testEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -102,10 +102,10 @@ func TestFetchEventBySerial(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify the fetched event has the same ID as the original event
|
||||
if !bytes.Equal(fetchedEvent.Id, testEvent.Id) {
|
||||
if !bytes.Equal(fetchedEvent.ID, testEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Fetched event ID doesn't match original event ID. Got %x, expected %x",
|
||||
fetchedEvent.Id, testEvent.Id,
|
||||
fetchedEvent.ID, testEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -39,9 +39,9 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
||||
if err = ser.Set(serial); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Id index
|
||||
// ID index
|
||||
idHash := new(IdHash)
|
||||
if err = idHash.FromId(ev.Id); chk.E(err) {
|
||||
if err = idHash.FromId(ev.ID); chk.E(err) {
|
||||
return
|
||||
}
|
||||
idIndex := indexes.IdEnc(idHash, ser)
|
||||
@@ -50,7 +50,7 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
||||
}
|
||||
// FullIdPubkey index
|
||||
fullID := new(Id)
|
||||
if err = fullID.FromId(ev.Id); chk.E(err) {
|
||||
if err = fullID.FromId(ev.ID); chk.E(err) {
|
||||
return
|
||||
}
|
||||
pubHash := new(PubHash)
|
||||
|
||||
@@ -60,7 +60,7 @@ func testBasicEvent(t *testing.T) {
|
||||
for i := range id {
|
||||
id[i] = byte(i)
|
||||
}
|
||||
ev.Id = id
|
||||
ev.ID = id
|
||||
|
||||
// Set Pubkey
|
||||
pubkey := make([]byte, 32)
|
||||
@@ -92,7 +92,7 @@ func testBasicEvent(t *testing.T) {
|
||||
|
||||
// Create and verify the expected indexes
|
||||
|
||||
// 1. Id index
|
||||
// 1. ID index
|
||||
ser := new(types2.Uint40)
|
||||
err = ser.Set(serial)
|
||||
if chk.E(err) {
|
||||
@@ -100,7 +100,7 @@ func testBasicEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
idHash := new(types2.IdHash)
|
||||
err = idHash.FromId(ev.Id)
|
||||
err = idHash.FromId(ev.ID)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to create IdHash: %v", err)
|
||||
}
|
||||
@@ -109,9 +109,9 @@ func testBasicEvent(t *testing.T) {
|
||||
|
||||
// 2. FullIdPubkey index
|
||||
fullID := new(types2.Id)
|
||||
err = fullID.FromId(ev.Id)
|
||||
err = fullID.FromId(ev.ID)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to create Id: %v", err)
|
||||
t.Fatalf("Failed to create ID: %v", err)
|
||||
}
|
||||
|
||||
pubHash := new(types2.PubHash)
|
||||
@@ -156,7 +156,7 @@ func testEventWithTags(t *testing.T) {
|
||||
for i := range id {
|
||||
id[i] = byte(i)
|
||||
}
|
||||
ev.Id = id
|
||||
ev.ID = id
|
||||
|
||||
// Set Pubkey
|
||||
pubkey := make([]byte, 32)
|
||||
@@ -210,7 +210,7 @@ func testEventWithTags(t *testing.T) {
|
||||
}
|
||||
|
||||
idHash := new(types2.IdHash)
|
||||
err = idHash.FromId(ev.Id)
|
||||
err = idHash.FromId(ev.ID)
|
||||
if chk.E(err) {
|
||||
t.Fatalf("Failed to create IdHash: %v", err)
|
||||
}
|
||||
@@ -268,7 +268,7 @@ func testErrorHandling(t *testing.T) {
|
||||
for i := range id {
|
||||
id[i] = byte(i)
|
||||
}
|
||||
ev.Id = id
|
||||
ev.ID = id
|
||||
|
||||
// Set Pubkey
|
||||
pubkey := make([]byte, 32)
|
||||
|
||||
@@ -76,7 +76,7 @@ func CreatePubHashFromData(data []byte) (p *types2.PubHash, err error) {
|
||||
// complete set of combinations of all fields in the event, thus there is no
|
||||
// need to decode events until they are to be delivered.
|
||||
func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
// Id eid
|
||||
// ID eid
|
||||
//
|
||||
// If there is any Ids in the filter, none of the other fields matter. It
|
||||
// should be an error, but convention just ignores it.
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
// TestGetIndexesFromFilter tests the GetIndexesFromFilter function
|
||||
func TestGetIndexesFromFilter(t *testing.T) {
|
||||
t.Run("Id", testIdFilter)
|
||||
t.Run("ID", testIdFilter)
|
||||
t.Run("Pubkey", testPubkeyFilter)
|
||||
t.Run("CreatedAt", testCreatedAtFilter)
|
||||
t.Run("CreatedAtUntil", testCreatedAtUntilFilter)
|
||||
@@ -77,9 +77,9 @@ func verifyIndex(
|
||||
}
|
||||
}
|
||||
|
||||
// Test Id filter
|
||||
// Test ID filter
|
||||
func testIdFilter(t *testing.T) {
|
||||
// Create a filter with an Id
|
||||
// Create a filter with an ID
|
||||
f := filter.New()
|
||||
id := make([]byte, sha256.Size)
|
||||
for i := range id {
|
||||
@@ -102,7 +102,7 @@ func testIdFilter(t *testing.T) {
|
||||
expectedIdx := indexes.IdEnc(idHash, nil)
|
||||
|
||||
// Verify the generated index
|
||||
// For Id filter, both start and end indexes are the same
|
||||
// For ID filter, both start and end indexes are the same
|
||||
verifyIndex(t, idxs, expectedIdx, expectedIdx)
|
||||
}
|
||||
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestGetSerialById(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ func TestGetSerialById(t *testing.T) {
|
||||
testEvent := events[3] // Using the same event as in QueryForIds test
|
||||
|
||||
// Get the serial by ID
|
||||
serial, err := db.GetSerialById(testEvent.Id)
|
||||
serial, err := db.GetSerialById(testEvent.ID)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get serial by ID: %v", err)
|
||||
}
|
||||
@@ -82,10 +82,10 @@ func TestGetSerialById(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test with a non-existent ID
|
||||
nonExistentId := make([]byte, len(testEvent.Id))
|
||||
nonExistentId := make([]byte, len(testEvent.ID))
|
||||
// Ensure it's different from any real ID
|
||||
for i := range nonExistentId {
|
||||
nonExistentId[i] = ^testEvent.Id[i]
|
||||
nonExistentId[i] = ^testEvent.ID[i]
|
||||
}
|
||||
|
||||
serial, err = db.GetSerialById(nonExistentId)
|
||||
|
||||
@@ -60,12 +60,12 @@ func TestGetSerialsByRange(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
// Get the serial for this event
|
||||
serial, err := db.GetSerialById(ev.Id)
|
||||
serial, err := db.GetSerialById(ev.ID)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to get serial for event #%d: %v", eventCount+1, err,
|
||||
@@ -73,7 +73,7 @@ func TestGetSerialsByRange(t *testing.T) {
|
||||
}
|
||||
|
||||
if serial != nil {
|
||||
eventSerials[string(ev.Id)] = serial
|
||||
eventSerials[string(ev.ID)] = serial
|
||||
}
|
||||
|
||||
eventCount++
|
||||
|
||||
82
pkg/database/import.go
Normal file
82
pkg/database/import.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
const maxLen = 500000000
|
||||
|
||||
// Import a collection of events in line structured minified JSON format (JSONL).
|
||||
func (d *D) Import(rr io.Reader) {
|
||||
// store to disk so we can return fast
|
||||
tmpPath := os.TempDir() + string(os.PathSeparator) + "orly"
|
||||
os.MkdirAll(tmpPath, 0700)
|
||||
tmp, err := os.CreateTemp(tmpPath, "")
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
log.I.F("buffering upload to %s", tmp.Name())
|
||||
if _, err = io.Copy(tmp, rr); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if _, err = tmp.Seek(0, 0); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
var err error
|
||||
// Create a scanner to read the buffer line by line
|
||||
scan := bufio.NewScanner(tmp)
|
||||
scanBuf := make([]byte, maxLen)
|
||||
scan.Buffer(scanBuf, maxLen)
|
||||
|
||||
var count, total int
|
||||
for scan.Scan() {
|
||||
select {
|
||||
case <-d.ctx.Done():
|
||||
log.I.F("context closed")
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
b := scan.Bytes()
|
||||
total += len(b) + 1
|
||||
if len(b) < 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
ev := &event.E{}
|
||||
if _, err = ev.Unmarshal(b); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, _, err = d.SaveEvent(d.ctx, ev, false); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
b = nil
|
||||
ev = nil
|
||||
count++
|
||||
if count%100 == 0 {
|
||||
log.I.F("received %d events", count)
|
||||
debug.FreeOSMemory()
|
||||
}
|
||||
}
|
||||
|
||||
log.I.F("read %d bytes and saved %d events", total, count)
|
||||
err = scan.Err()
|
||||
if chk.E(err) {
|
||||
}
|
||||
|
||||
// Help garbage collection
|
||||
tmp = nil
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
@@ -186,7 +186,7 @@ func EventDec(ser *types.Uint40) (enc *T) { return New(NewPrefix(), ser) }
|
||||
// Id contains a truncated 8-byte hash of an event index. This is the secondary
|
||||
// key of an event, the primary key is the serial found in the Event.
|
||||
//
|
||||
// 3 prefix|8 Id hash|5 serial
|
||||
// 3 prefix|8 ID hash|5 serial
|
||||
var Id = next()
|
||||
|
||||
func IdVars() (id *types.IdHash, ser *types.Uint40) {
|
||||
@@ -202,7 +202,7 @@ func IdDec(id *types.IdHash, ser *types.Uint40) (enc *T) {
|
||||
// FullIdPubkey is an index designed to enable sorting and filtering of
|
||||
// results found via other indexes, without having to decode the event.
|
||||
//
|
||||
// 3 prefix|5 serial|32 Id|8 pubkey hash|8 timestamp
|
||||
// 3 prefix|5 serial|32 ID|8 pubkey hash|8 timestamp
|
||||
var FullIdPubkey = next()
|
||||
|
||||
func FullIdPubkeyVars() (
|
||||
|
||||
@@ -83,7 +83,7 @@ func TestPrefixFunction(t *testing.T) {
|
||||
expected I
|
||||
}{
|
||||
{"Event", Event, EventPrefix},
|
||||
{"Id", Id, IdPrefix},
|
||||
{"ID", Id, IdPrefix},
|
||||
{"FullIdPubkey", FullIdPubkey, FullIdPubkeyPrefix},
|
||||
{"Pubkey", Pubkey, PubkeyPrefix},
|
||||
{"CreatedAt", CreatedAt, CreatedAtPrefix},
|
||||
@@ -122,7 +122,7 @@ func TestIdentify(t *testing.T) {
|
||||
expected int
|
||||
}{
|
||||
{"Event", EventPrefix, Event},
|
||||
{"Id", IdPrefix, Id},
|
||||
{"ID", IdPrefix, Id},
|
||||
{"FullIdPubkey", FullIdPubkeyPrefix, FullIdPubkey},
|
||||
{"Pubkey", PubkeyPrefix, Pubkey},
|
||||
{"CreatedAt", CreatedAtPrefix, CreatedAt},
|
||||
|
||||
@@ -15,7 +15,7 @@ type Id struct {
|
||||
func (fi *Id) FromId(id []byte) (err error) {
|
||||
if len(id) != IdLen {
|
||||
err = errorf.E(
|
||||
"fullid.FromId: invalid Id length, got %d require %d", len(id),
|
||||
"fullid.FromId: invalid ID length, got %d require %d", len(id),
|
||||
IdLen,
|
||||
)
|
||||
return
|
||||
|
||||
@@ -43,7 +43,7 @@ func TestFromId(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
||||
// Create a Id with a known value
|
||||
// Create a ID with a known value
|
||||
fi1 := &Id{}
|
||||
validId := make([]byte, sha256.Size)
|
||||
for i := 0; i < sha256.Size; i++ {
|
||||
@@ -80,7 +80,7 @@ func TestIdMarshalWriteUnmarshalRead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIdUnmarshalReadWithCorruptedData(t *testing.T) {
|
||||
// Create a Id with a known value
|
||||
// Create a ID with a known value
|
||||
fi1 := &Id{}
|
||||
validId := make([]byte, sha256.Size)
|
||||
for i := 0; i < sha256.Size; i++ {
|
||||
@@ -91,7 +91,7 @@ func TestIdUnmarshalReadWithCorruptedData(t *testing.T) {
|
||||
t.Fatalf("FromId failed: %v", err)
|
||||
}
|
||||
|
||||
// Create a second Id with a different value
|
||||
// Create a second ID with a different value
|
||||
fi2 := &Id{}
|
||||
differentId := make([]byte, sha256.Size)
|
||||
for i := 0; i < sha256.Size; i++ {
|
||||
|
||||
@@ -23,7 +23,7 @@ func (i *IdHash) Set(idh []byte) {
|
||||
func (i *IdHash) FromId(id []byte) (err error) {
|
||||
if len(id) != sha256.Size {
|
||||
err = errorf.E(
|
||||
"FromId: invalid Id length, got %d require %d", len(id),
|
||||
"FromId: invalid ID length, got %d require %d", len(id),
|
||||
sha256.Size,
|
||||
)
|
||||
return
|
||||
@@ -43,7 +43,7 @@ func (i *IdHash) FromIdBase64(idb64 string) (err error) {
|
||||
// Check if the decoded ID has the correct length
|
||||
if len(decoded) != sha256.Size {
|
||||
err = errorf.E(
|
||||
"FromIdBase64: invalid Id length, got %d require %d", len(decoded),
|
||||
"FromIdBase64: invalid ID length, got %d require %d", len(decoded),
|
||||
sha256.Size,
|
||||
)
|
||||
return
|
||||
@@ -62,7 +62,7 @@ func (i *IdHash) FromIdHex(idh string) (err error) {
|
||||
}
|
||||
if len(id) != sha256.Size {
|
||||
err = errorf.E(
|
||||
"FromIdHex: invalid Id length, got %d require %d", len(id),
|
||||
"FromIdHex: invalid ID length, got %d require %d", len(id),
|
||||
sha256.Size,
|
||||
)
|
||||
return
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
||||
baseEvent.Sign(sign)
|
||||
|
||||
// Save the base parameterized replaceable event
|
||||
if _, _, err := db.SaveEvent(ctx, baseEvent); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, baseEvent, false); err != nil {
|
||||
t.Fatalf("Failed to save base parameterized replaceable event: %v", err)
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
||||
newerEvent.Sign(sign)
|
||||
|
||||
// Save the newer parameterized replaceable event
|
||||
if _, _, err := db.SaveEvent(ctx, newerEvent); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, newerEvent, false); err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to save newer parameterized replaceable event: %v", err,
|
||||
)
|
||||
@@ -83,7 +83,7 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
||||
newestEvent.Sign(sign)
|
||||
|
||||
// Save the newest parameterized replaceable event
|
||||
if _, _, err := db.SaveEvent(ctx, newestEvent); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, newestEvent, false); err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to save newest parameterized replaceable event: %v", err,
|
||||
)
|
||||
@@ -127,10 +127,10 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it's the newest event
|
||||
if !bytes.Equal(evs[0].Id, newestEvent.Id) {
|
||||
if !bytes.Equal(evs[0].ID, newestEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match the newest event. Got %x, expected %x",
|
||||
evs[0].Id, newestEvent.Id,
|
||||
evs[0].ID, newestEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -145,7 +145,7 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
||||
// Query for the base event by ID
|
||||
evs, err = db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.New(baseEvent.Id),
|
||||
Ids: tag.New(baseEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -161,10 +161,10 @@ func TestMultipleParameterizedReplaceableEvents(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it's the base event
|
||||
if !bytes.Equal(evs[0].Id, baseEvent.Id) {
|
||||
if !bytes.Equal(evs[0].ID, baseEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match when querying for base event by ID. Got %x, expected %x",
|
||||
evs[0].Id, baseEvent.Id,
|
||||
evs[0].ID, baseEvent.ID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
||||
}
|
||||
// fetch the events
|
||||
var ev *event.E
|
||||
if ev, err = d.FetchEventBySerial(ser); chk.E(err) {
|
||||
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
||||
continue
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
@@ -218,7 +218,7 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
||||
isIdInFilter := false
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
for i := 0; i < f.Ids.Len(); i++ {
|
||||
if bytes.Equal(ev.Id, f.Ids.B(i)) {
|
||||
if bytes.Equal(ev.ID, f.Ids.B(i)) {
|
||||
isIdInFilter = true
|
||||
break
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func setupTestDB(t *testing.T) (
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ func TestQueryEventsByID(t *testing.T) {
|
||||
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.New(testEvent.Id),
|
||||
Ids: tag.New(testEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -103,10 +103,10 @@ func TestQueryEventsByID(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it's the correct event
|
||||
if !bytes.Equal(evs[0].Id, testEvent.Id) {
|
||||
if !bytes.Equal(evs[0].ID, testEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match. Got %x, expected %x", evs[0].Id,
|
||||
testEvent.Id,
|
||||
"Event ID doesn't match. Got %x, expected %x", evs[0].ID,
|
||||
testEvent.ID,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -202,7 +202,7 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
||||
replaceableEvent.Tags = tags.New()
|
||||
replaceableEvent.Sign(sign)
|
||||
// Save the replaceable event
|
||||
if _, _, err := db.SaveEvent(ctx, replaceableEvent); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, replaceableEvent, false); err != nil {
|
||||
t.Fatalf("Failed to save replaceable event: %v", err)
|
||||
}
|
||||
|
||||
@@ -216,14 +216,14 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
||||
newerEvent.Tags = tags.New()
|
||||
newerEvent.Sign(sign)
|
||||
// Save the newer event
|
||||
if _, _, err := db.SaveEvent(ctx, newerEvent); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, newerEvent, false); err != nil {
|
||||
t.Fatalf("Failed to save newer event: %v", err)
|
||||
}
|
||||
|
||||
// Query for the original event by ID
|
||||
evs, err := db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.New(replaceableEvent.Id),
|
||||
Ids: tag.New(replaceableEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -239,10 +239,10 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it's the original event
|
||||
if !bytes.Equal(evs[0].Id, replaceableEvent.Id) {
|
||||
if !bytes.Equal(evs[0].ID, replaceableEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match when querying for replaced event. Got %x, expected %x",
|
||||
evs[0].Id, replaceableEvent.Id,
|
||||
evs[0].ID, replaceableEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -269,10 +269,10 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it's the newer event
|
||||
if !bytes.Equal(evs[0].Id, newerEvent.Id) {
|
||||
if !bytes.Equal(evs[0].ID, newerEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match when querying for replaceable events. Got %x, expected %x",
|
||||
evs[0].Id, newerEvent.Id,
|
||||
evs[0].ID, newerEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -289,11 +289,11 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
||||
|
||||
// Add an e-tag referencing the replaceable event
|
||||
deletionEvent.Tags = deletionEvent.Tags.AppendTags(
|
||||
tag.New([]byte{'e'}, []byte(hex.Enc(replaceableEvent.Id))),
|
||||
tag.New([]byte{'e'}, []byte(hex.Enc(replaceableEvent.ID))),
|
||||
)
|
||||
|
||||
// Save the deletion event
|
||||
if _, _, err = db.SaveEvent(ctx, deletionEvent); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, deletionEvent, false); err != nil {
|
||||
t.Fatalf("Failed to save deletion event: %v", err)
|
||||
}
|
||||
|
||||
@@ -319,17 +319,17 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it's still the newer event
|
||||
if !bytes.Equal(evs[0].Id, newerEvent.Id) {
|
||||
if !bytes.Equal(evs[0].ID, newerEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match after deletion. Got %x, expected %x",
|
||||
evs[0].Id, newerEvent.Id,
|
||||
evs[0].ID, newerEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
// Query for the original event by ID
|
||||
evs, err = db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.New(replaceableEvent.Id),
|
||||
Ids: tag.New(replaceableEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -345,10 +345,10 @@ func TestReplaceableEventsAndDeletion(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it's the original event
|
||||
if !bytes.Equal(evs[0].Id, replaceableEvent.Id) {
|
||||
if !bytes.Equal(evs[0].ID, replaceableEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match when querying for deleted event by ID. Got %x, expected %x",
|
||||
evs[0].Id, replaceableEvent.Id,
|
||||
evs[0].ID, replaceableEvent.ID,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -379,7 +379,7 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
||||
paramEvent.Sign(sign)
|
||||
|
||||
// Save the parameterized replaceable event
|
||||
if _, _, err := db.SaveEvent(ctx, paramEvent); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, paramEvent, false); err != nil {
|
||||
t.Fatalf("Failed to save parameterized replaceable event: %v", err)
|
||||
}
|
||||
|
||||
@@ -405,7 +405,7 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
||||
paramDeletionEvent.Sign(sign)
|
||||
|
||||
// Save the parameterized deletion event
|
||||
if _, _, err := db.SaveEvent(ctx, paramDeletionEvent); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, paramDeletionEvent, false); err != nil {
|
||||
t.Fatalf("Failed to save parameterized deletion event: %v", err)
|
||||
}
|
||||
|
||||
@@ -433,12 +433,12 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
||||
paramDeletionEvent2.Tags = tags.New()
|
||||
// Add an e-tag referencing the parameterized replaceable event
|
||||
paramDeletionEvent2.Tags = paramDeletionEvent2.Tags.AppendTags(
|
||||
tag.New([]byte{'e'}, []byte(hex.Enc(paramEvent.Id))),
|
||||
tag.New([]byte{'e'}, []byte(hex.Enc(paramEvent.ID))),
|
||||
)
|
||||
paramDeletionEvent2.Sign(sign)
|
||||
|
||||
// Save the parameterized deletion event with e-tag
|
||||
if _, _, err := db.SaveEvent(ctx, paramDeletionEvent2); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, paramDeletionEvent2, false); err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to save parameterized deletion event with e-tag: %v", err,
|
||||
)
|
||||
@@ -483,7 +483,7 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
||||
// Query for the parameterized event by ID
|
||||
evs, err = db.QueryEvents(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.New(paramEvent.Id),
|
||||
Ids: tag.New(paramEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -501,10 +501,10 @@ func TestParameterizedReplaceableEventsAndDeletion(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify it's the correct event
|
||||
if !bytes.Equal(evs[0].Id, paramEvent.Id) {
|
||||
if !bytes.Equal(evs[0].ID, paramEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match when querying for deleted parameterized event by ID. Got %x, expected %x",
|
||||
evs[0].Id, paramEvent.Id,
|
||||
evs[0].ID, paramEvent.ID,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func TestQueryForAuthorsTags(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func TestQueryForAuthorsTags(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
|
||||
if !bytes.Equal(ev.Pubkey, testEvent.Pubkey) {
|
||||
|
||||
@@ -56,7 +56,7 @@ func TestQueryForCreatedAt(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -105,7 +105,7 @@ func TestQueryForCreatedAt(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func TestQueryForCreatedAt(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
@@ -181,7 +181,7 @@ func TestQueryForCreatedAt(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
||||
@@ -43,6 +43,9 @@ func (d *D) QueryForIds(c context.T, f *filter.F) (
|
||||
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if fidpk == nil {
|
||||
continue
|
||||
}
|
||||
tagIdPkTs = append(tagIdPkTs, *fidpk)
|
||||
}
|
||||
} else {
|
||||
@@ -59,6 +62,9 @@ func (d *D) QueryForIds(c context.T, f *filter.F) (
|
||||
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if fidpk == nil {
|
||||
continue
|
||||
}
|
||||
temp = append(temp, *fidpk)
|
||||
}
|
||||
var intersecting []store.IdPkTs
|
||||
@@ -93,6 +99,9 @@ func (d *D) QueryForIds(c context.T, f *filter.F) (
|
||||
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if fidpk == nil {
|
||||
continue
|
||||
}
|
||||
idPkTs = append(idPkTs, *fidpk)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,7 +60,7 @@ func TestQueryForIds(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -86,34 +86,34 @@ func TestQueryForIds(t *testing.T) {
|
||||
len(idTsPk),
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(idTsPk[0].Id, events[5474].Id) {
|
||||
if !bytes.Equal(idTsPk[0].Id, events[5474].ID) {
|
||||
t.Fatalf(
|
||||
"failed to get expected event, got %0x, expected %0x", idTsPk[0].Id,
|
||||
events[5474].Id,
|
||||
events[5474].ID,
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(idTsPk[1].Id, events[272].Id) {
|
||||
if !bytes.Equal(idTsPk[1].Id, events[272].ID) {
|
||||
t.Fatalf(
|
||||
"failed to get expected event, got %0x, expected %0x", idTsPk[1].Id,
|
||||
events[272].Id,
|
||||
events[272].ID,
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(idTsPk[2].Id, events[1].Id) {
|
||||
if !bytes.Equal(idTsPk[2].Id, events[1].ID) {
|
||||
t.Fatalf(
|
||||
"failed to get expected event, got %0x, expected %0x", idTsPk[2].Id,
|
||||
events[1].Id,
|
||||
events[1].ID,
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(idTsPk[3].Id, events[80].Id) {
|
||||
if !bytes.Equal(idTsPk[3].Id, events[80].ID) {
|
||||
t.Fatalf(
|
||||
"failed to get expected event, got %0x, expected %0x", idTsPk[3].Id,
|
||||
events[80].Id,
|
||||
events[80].ID,
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(idTsPk[4].Id, events[123].Id) {
|
||||
if !bytes.Equal(idTsPk[4].Id, events[123].ID) {
|
||||
t.Fatalf(
|
||||
"failed to get expected event, got %0x, expected %0x", idTsPk[4].Id,
|
||||
events[123].Id,
|
||||
events[123].ID,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -141,7 +141,7 @@ func TestQueryForIds(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
if ev.Kind.K != testKind.K {
|
||||
t.Fatalf(
|
||||
@@ -207,7 +207,7 @@ func TestQueryForIds(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
|
||||
// Check if the event has the tag we're looking for
|
||||
@@ -258,7 +258,7 @@ func TestQueryForIds(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
if ev.Kind.K != testKind.K {
|
||||
t.Fatalf(
|
||||
@@ -305,7 +305,7 @@ func TestQueryForIds(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
if ev.Kind.K != testEvent.Kind.K {
|
||||
t.Fatalf(
|
||||
@@ -366,7 +366,7 @@ func TestQueryForIds(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
if ev.Kind.K != testEvent.Kind.K {
|
||||
t.Fatalf(
|
||||
@@ -433,7 +433,7 @@ func TestQueryForIds(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
|
||||
if !bytes.Equal(ev.Pubkey, testEvent.Pubkey) {
|
||||
@@ -506,7 +506,7 @@ func TestQueryForIds(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ func TestQueryForKindsAuthorsTags(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ func TestQueryForKindsAuthorsTags(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
if ev.Kind.K != testKind.K {
|
||||
t.Fatalf(
|
||||
|
||||
@@ -58,7 +58,7 @@ func TestQueryForKindsAuthors(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ func TestQueryForKindsAuthors(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
if ev.Kind.K != testKind.K {
|
||||
t.Fatalf(
|
||||
|
||||
@@ -58,7 +58,7 @@ func TestQueryForKindsTags(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -132,7 +132,7 @@ func TestQueryForKindsTags(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
if ev.Kind.K != testKind.K {
|
||||
t.Fatalf(
|
||||
|
||||
@@ -57,7 +57,7 @@ func TestQueryForKinds(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -96,7 +96,7 @@ func TestQueryForKinds(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
if ev.Kind.K != testKind.K {
|
||||
t.Fatalf(
|
||||
|
||||
@@ -31,6 +31,9 @@ func (d *D) QueryForSerials(c context.T, f *filter.F) (
|
||||
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if fidpk == nil {
|
||||
continue
|
||||
}
|
||||
idPkTs = append(idPkTs, *fidpk)
|
||||
// sort by timestamp
|
||||
sort.Slice(
|
||||
|
||||
@@ -60,12 +60,12 @@ func TestQueryForSerials(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
// Get the serial for this event
|
||||
serial, err := db.GetSerialById(ev.Id)
|
||||
serial, err := db.GetSerialById(ev.ID)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to get serial for event #%d: %v", eventCount+1, err,
|
||||
@@ -73,7 +73,7 @@ func TestQueryForSerials(t *testing.T) {
|
||||
}
|
||||
|
||||
if serial != nil {
|
||||
eventSerials[string(ev.Id)] = serial
|
||||
eventSerials[string(ev.ID)] = serial
|
||||
}
|
||||
|
||||
eventCount++
|
||||
@@ -91,7 +91,7 @@ func TestQueryForSerials(t *testing.T) {
|
||||
|
||||
serials, err := db.QueryForSerials(
|
||||
ctx, &filter.F{
|
||||
Ids: tag.New(testEvent.Id),
|
||||
Ids: tag.New(testEvent.ID),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -110,10 +110,10 @@ func TestQueryForSerials(t *testing.T) {
|
||||
t.Fatalf("Failed to fetch event for serial: %v", err)
|
||||
}
|
||||
|
||||
if !bytes.Equal(ev.Id, testEvent.Id) {
|
||||
if !bytes.Equal(ev.ID, testEvent.ID) {
|
||||
t.Fatalf(
|
||||
"Event ID doesn't match. Got %x, expected %x",
|
||||
ev.Id, testEvent.Id,
|
||||
ev.ID, testEvent.ID,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ func TestQueryForTags(t *testing.T) {
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
if _, _, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if _, _, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ func TestQueryForTags(t *testing.T) {
|
||||
// Find the event with this ID
|
||||
var found bool
|
||||
for _, ev := range events {
|
||||
if bytes.Equal(result.Id, ev.Id) {
|
||||
if bytes.Equal(result.Id, ev.ID) {
|
||||
found = true
|
||||
|
||||
// Check if the event has the tag we're looking for
|
||||
|
||||
@@ -20,11 +20,17 @@ import (
|
||||
)
|
||||
|
||||
// SaveEvent saves an event to the database, generating all the necessary indexes.
|
||||
func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) {
|
||||
// Get a buffer from the pool
|
||||
buf := new(bytes.Buffer)
|
||||
// Marshal the event to binary
|
||||
ev.MarshalBinary(buf)
|
||||
func (d *D) SaveEvent(c context.T, ev *event.E, noVerify bool) (
|
||||
kc, vc int, err error,
|
||||
) {
|
||||
if !noVerify {
|
||||
// check if the event already exists
|
||||
var ser *types.Uint40
|
||||
if ser, err = d.GetSerialById(ev.ID); err == nil && ser != nil {
|
||||
err = errorf.E("event already exists: %0x", ev.ID)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// check if an existing delete event references this event submission
|
||||
if ev.Kind.IsParameterizedReplaceable() {
|
||||
@@ -59,13 +65,16 @@ func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) {
|
||||
// stable value but refers to any event from the author, of the
|
||||
// kind, with the identifier. so we need to fetch the full ID index
|
||||
// to get the timestamp and ensure that the event post-dates it.
|
||||
// otherwise it should be rejected.
|
||||
// otherwise, it should be rejected.
|
||||
var idPkTss []*store.IdPkTs
|
||||
for _, ser := range sers {
|
||||
var fidpk *store.IdPkTs
|
||||
if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if fidpk == nil {
|
||||
continue
|
||||
}
|
||||
idPkTss = append(idPkTss, fidpk)
|
||||
}
|
||||
// sort by timestamp, so the first is the newest
|
||||
@@ -77,7 +86,7 @@ func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) {
|
||||
if ev.CreatedAt.I64() < idPkTss[0].Ts {
|
||||
err = errorf.E(
|
||||
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
|
||||
ev.Id, at, ev.CreatedAt.I64(), idPkTss[0].Ts,
|
||||
ev.ID, at, ev.CreatedAt.I64(), idPkTss[0].Ts,
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -85,17 +94,15 @@ func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) {
|
||||
}
|
||||
} else {
|
||||
var idxs []Range
|
||||
// log.I.S(ev.Pubkey)
|
||||
if idxs, err = GetIndexesFromFilter(
|
||||
&filter.F{
|
||||
Authors: tag.New(ev.Pubkey),
|
||||
Kinds: kinds.New(kind.Deletion),
|
||||
Tags: tags.New(tag.New([]byte("#e"), ev.Id)),
|
||||
Tags: tags.New(tag.New([]byte("#e"), ev.ID)),
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// log.I.S(idxs)
|
||||
var sers types.Uint40s
|
||||
for _, idx := range idxs {
|
||||
var s types.Uint40s
|
||||
@@ -108,7 +115,7 @@ func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) {
|
||||
// really there can only be one of these; the chances of an idhash
|
||||
// collision are basically zero in practice, at least, one in a
|
||||
// billion or more anyway, more than a human is going to create.
|
||||
err = errorf.E("blocked: %0x was deleted by event Id", ev.Id)
|
||||
err = errorf.E("blocked: %0x was deleted by event ID", ev.ID)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func TestSaveEvents(t *testing.T) {
|
||||
|
||||
// Save the event to the database
|
||||
var k, v int
|
||||
if k, v, err = db.SaveEvent(ctx, ev); err != nil {
|
||||
if k, v, err = db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event #%d: %v", eventCount+1, err)
|
||||
}
|
||||
kc += k
|
||||
@@ -125,7 +125,7 @@ func TestDeletionEventWithETagRejection(t *testing.T) {
|
||||
regularEvent.Sign(sign)
|
||||
|
||||
// Save the regular event
|
||||
if _, _, err := db.SaveEvent(ctx, regularEvent); err != nil {
|
||||
if _, _, err := db.SaveEvent(ctx, regularEvent, false); err != nil {
|
||||
t.Fatalf("Failed to save regular event: %v", err)
|
||||
}
|
||||
|
||||
@@ -140,13 +140,13 @@ func TestDeletionEventWithETagRejection(t *testing.T) {
|
||||
|
||||
// Add an e-tag referencing the regular event
|
||||
deletionEvent.Tags = deletionEvent.Tags.AppendTags(
|
||||
tag.New([]byte{'e'}, []byte(hex.Enc(regularEvent.Id))),
|
||||
tag.New([]byte{'e'}, []byte(hex.Enc(regularEvent.ID))),
|
||||
)
|
||||
|
||||
deletionEvent.Sign(sign)
|
||||
|
||||
// Try to save the deletion event, it should be rejected
|
||||
_, _, err = db.SaveEvent(ctx, deletionEvent)
|
||||
_, _, err = db.SaveEvent(ctx, deletionEvent, false)
|
||||
if err == nil {
|
||||
t.Fatal("Expected deletion event with e-tag to be rejected, but it was accepted")
|
||||
}
|
||||
@@ -154,6 +154,66 @@ func TestDeletionEventWithETagRejection(t *testing.T) {
|
||||
// Verify the error message
|
||||
expectedError := "deletion events referencing other events with 'e' tag are not allowed"
|
||||
if err.Error() != expectedError {
|
||||
t.Fatalf("Expected error message '%s', got '%s'", expectedError, err.Error())
|
||||
t.Fatalf(
|
||||
"Expected error message '%s', got '%s'", expectedError, err.Error(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSaveExistingEvent tests that attempting to save an event that already exists
|
||||
// returns an error.
|
||||
func TestSaveExistingEvent(t *testing.T) {
|
||||
// Create a temporary directory for the database
|
||||
tempDir, err := os.MkdirTemp("", "test-db-*")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create temporary directory: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir) // Clean up after the test
|
||||
|
||||
// Create a context and cancel function for the database
|
||||
ctx, cancel := context.Cancel(context.Bg())
|
||||
defer cancel()
|
||||
|
||||
// Initialize the database
|
||||
db, err := New(ctx, cancel, tempDir, "info")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create database: %v", err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Create a signer
|
||||
sign := new(p256k.Signer)
|
||||
if err := sign.Generate(); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Create an event
|
||||
ev := event.New()
|
||||
ev.Kind = kind.TextNote // Kind 1 is a text note
|
||||
ev.Pubkey = sign.Pub()
|
||||
ev.CreatedAt = new(timestamp.T)
|
||||
ev.CreatedAt.V = timestamp.Now().V
|
||||
ev.Content = []byte("Test event")
|
||||
ev.Tags = tags.New()
|
||||
ev.Sign(sign)
|
||||
|
||||
// Save the event for the first time
|
||||
if _, _, err := db.SaveEvent(ctx, ev, false); err != nil {
|
||||
t.Fatalf("Failed to save event: %v", err)
|
||||
}
|
||||
|
||||
// Try to save the same event again, it should be rejected
|
||||
_, _, err = db.SaveEvent(ctx, ev, false)
|
||||
if err == nil {
|
||||
t.Fatal("Expected error when saving an existing event, but got nil")
|
||||
}
|
||||
|
||||
// Verify the error message
|
||||
expectedErrorPrefix := "event already exists: "
|
||||
if !bytes.HasPrefix([]byte(err.Error()), []byte(expectedErrorPrefix)) {
|
||||
t.Fatalf(
|
||||
"Expected error message to start with '%s', got '%s'",
|
||||
expectedErrorPrefix, err.Error(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,7 +232,7 @@ func EncodeEvent(
|
||||
return bech32.Encode(NeventHRP, bits5)
|
||||
}
|
||||
|
||||
// EncodeEntity encodes a pubkey, kind, event Id, and relay hints.
|
||||
// EncodeEntity encodes a pubkey, kind, event ID, and relay hints.
|
||||
func EncodeEntity(pk []byte, k *kind.T, id []byte, relays [][]byte) (
|
||||
s []byte, err error,
|
||||
) {
|
||||
|
||||
@@ -13,7 +13,7 @@ type Profile struct {
|
||||
Relays [][]byte `json:"relays,omitempty"`
|
||||
}
|
||||
|
||||
// Event pointer is the combination of an event Id, relay hints, author, pubkey,
|
||||
// Event pointer is the combination of an event ID, relay hints, author, pubkey,
|
||||
// and kind.
|
||||
type Event struct {
|
||||
ID *eventid.T `json:"id"`
|
||||
|
||||
@@ -35,7 +35,20 @@ func NewChallengeWith[V string | []byte](challenge V) *Challenge {
|
||||
// Label returns the label of a authenvelope.Challenge.
|
||||
func (en *Challenge) Label() string { return L }
|
||||
|
||||
// Write the authenvelope.Challenge to a provided io.Writer.
|
||||
// Write encodes and writes the Challenge instance to the provided writer.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - w (io.Writer): The destination where the encoded data will be written.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - err (error): An error if writing to the writer fails.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// Encodes the Challenge instance into a byte slice using Marshal, logs the
|
||||
// encoded challenge, and writes it to the provided io.Writer.
|
||||
func (en *Challenge) Write(w io.Writer) (err error) {
|
||||
var b []byte
|
||||
b = en.Marshal(b)
|
||||
@@ -44,8 +57,26 @@ func (en *Challenge) Write(w io.Writer) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Marshal a authenvelope.Challenge to minified JSON, appending to a provided destination
|
||||
// slice. Note that this ensures correct string escaping on the challenge field.
|
||||
// Marshal encodes the Challenge instance into a byte slice, formatting it as
|
||||
// a JSON-like structure with a specific label and escaping rules applied to
|
||||
// its content.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - dst ([]byte): The destination buffer where the encoded data will be written.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - b ([]byte): The byte slice containing the encoded Challenge data.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// - Prepares the destination buffer and applies a label to it.
|
||||
//
|
||||
// - Escapes the challenge content according to Nostr-specific rules before
|
||||
// appending it to the output.
|
||||
//
|
||||
// - Returns the resulting byte slice with the complete encoded structure.
|
||||
func (en *Challenge) Marshal(dst []byte) (b []byte) {
|
||||
b = dst
|
||||
var err error
|
||||
@@ -63,9 +94,24 @@ func (en *Challenge) Marshal(dst []byte) (b []byte) {
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal a authenvelope.Challenge from minified JSON, returning the remainder after the
|
||||
// end of the envelope. Note that this ensures the challenge string was
|
||||
// correctly escaped by NIP-01 escaping rules.
|
||||
// Unmarshal parses the provided byte slice and extracts the challenge value,
|
||||
// leaving any remaining bytes after parsing.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - b ([]byte): The byte slice containing the encoded challenge data.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - r ([]byte): Any remaining bytes after parsing the challenge.
|
||||
//
|
||||
// - err (error): An error if parsing fails.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// - Extracts the quoted challenge string from the input byte slice.
|
||||
//
|
||||
// - Trims any trailing characters following the closing quote.
|
||||
func (en *Challenge) Unmarshal(b []byte) (r []byte, err error) {
|
||||
r = b
|
||||
if en.Challenge, r, err = text2.UnmarshalQuoted(r); chk.E(err) {
|
||||
@@ -80,8 +126,26 @@ func (en *Challenge) Unmarshal(b []byte) (r []byte, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// ParseChallenge reads a authenvelope.Challenge encoded in minified JSON and unpacks it to
|
||||
// the runtime format.
|
||||
// ParseChallenge parses the provided byte slice into a new Challenge instance,
|
||||
// extracting the challenge value and returning any remaining bytes after parsing.
|
||||
//
|
||||
// # Parameters
|
||||
//
|
||||
// - b ([]byte): The byte slice containing the encoded challenge data.
|
||||
//
|
||||
// # Return Values
|
||||
//
|
||||
// - t (*Challenge): A pointer to the newly created and populated Challenge
|
||||
// instance.
|
||||
//
|
||||
// - rem ([]byte): Any remaining bytes in the input slice after parsing.
|
||||
//
|
||||
// - err (error): An error if parsing fails.
|
||||
//
|
||||
// # Expected behaviour
|
||||
//
|
||||
// Parses the byte slice into a new Challenge instance using Unmarshal,
|
||||
// returning any remaining bytes and an error if parsing fails.
|
||||
func ParseChallenge(b []byte) (t *Challenge, rem []byte, err error) {
|
||||
t = NewChallenge()
|
||||
if rem, err = t.Unmarshal(b); chk.E(err) {
|
||||
@@ -107,7 +171,7 @@ func NewResponseWith(event *event.E) *Response { return &Response{Event: event}
|
||||
// Label returns the label of a auth Response envelope.
|
||||
func (en *Response) Label() string { return L }
|
||||
|
||||
func (en *Response) Id() []byte { return en.Event.Id }
|
||||
func (en *Response) Id() []byte { return en.Event.ID }
|
||||
|
||||
// Write the Response to a provided io.Writer.
|
||||
func (en *Response) Write(w io.Writer) (err error) {
|
||||
|
||||
@@ -30,7 +30,7 @@ func New() *T {
|
||||
return &T{Subscription: subscription.NewStd()}
|
||||
}
|
||||
|
||||
// NewFrom creates a new closedenvelope.T populated with subscription Id and Reason.
|
||||
// NewFrom creates a new closedenvelope.T populated with subscription ID and Reason.
|
||||
func NewFrom(id *subscription.Id, msg []byte) *T {
|
||||
return &T{
|
||||
Subscription: id, Reason: msg,
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestMarshalUnmarshal(t *testing.T) {
|
||||
if rem, err = req2.Unmarshal(rb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// log.I.Ln(req2.Id)
|
||||
// log.I.Ln(req2.ID)
|
||||
if len(rem) > 0 {
|
||||
t.Fatalf(
|
||||
"unmarshal failed, remainder\n%d %s",
|
||||
|
||||
@@ -24,7 +24,7 @@ var _ codec.Envelope = (*T)(nil)
|
||||
// New creates an empty new standard formatted closeenvelope.T.
|
||||
func New() *T { return &T{ID: subscription.NewStd()} }
|
||||
|
||||
// NewFrom creates a new closeenvelope.T populated with subscription Id.
|
||||
// NewFrom creates a new closeenvelope.T populated with subscription ID.
|
||||
func NewFrom(id *subscription.Id) *T { return &T{ID: id} }
|
||||
|
||||
// Label returns the label of a closeenvelope.T.
|
||||
|
||||
@@ -34,7 +34,7 @@ func TestMarshalUnmarshal(t *testing.T) {
|
||||
if rem, err = req2.Unmarshal(rb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// log.I.Ln(req2.Id)
|
||||
// log.I.Ln(req2.ID)
|
||||
if len(rem) > 0 {
|
||||
t.Fatalf(
|
||||
"unmarshal failed, remainder\n%d %s",
|
||||
|
||||
@@ -177,7 +177,7 @@ func (en *Response) Unmarshal(b []byte) (r []byte, err error) {
|
||||
r = b
|
||||
var inID, inCount bool
|
||||
for ; len(r) > 0; r = r[1:] {
|
||||
// first we should be finding a subscription Id
|
||||
// first we should be finding a subscription ID
|
||||
if !inID && r[0] == '"' {
|
||||
r = r[1:]
|
||||
// so we don't do this twice
|
||||
|
||||
@@ -20,7 +20,7 @@ func TestMarshalUnmarshal(t *testing.T) {
|
||||
}
|
||||
req := NewFrom(s)
|
||||
rb = req.Marshal(rb)
|
||||
// log.I.Ln(req.Id)
|
||||
// log.I.Ln(req.ID)
|
||||
rb1 = rb1[:len(rb)]
|
||||
copy(rb1, rb)
|
||||
var rem []byte
|
||||
@@ -35,7 +35,7 @@ func TestMarshalUnmarshal(t *testing.T) {
|
||||
if rem, err = req2.Unmarshal(rb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// log.I.Ln(req2.Id)
|
||||
// log.I.Ln(req2.ID)
|
||||
if len(rem) > 0 {
|
||||
t.Fatalf(
|
||||
"unmarshal failed, remainder\n%d %s",
|
||||
|
||||
@@ -31,7 +31,7 @@ func NewSubmissionWith(ev *event.E) *Submission { return &Submission{E: ev} }
|
||||
// Label returns the label of a event eventenvelope.Submission envelope.
|
||||
func (en *Submission) Label() string { return L }
|
||||
|
||||
func (en *Submission) Id() []byte { return en.E.Id }
|
||||
func (en *Submission) Id() []byte { return en.E.ID }
|
||||
|
||||
// Write the Submission to a provided io.Writer.
|
||||
func (en *Submission) Write(w io.Writer) (err error) {
|
||||
@@ -104,7 +104,7 @@ func NewResultWith[V string | []byte](s V, ev *event.E) (
|
||||
return &Result{subscription.MustNew(s), ev}, nil
|
||||
}
|
||||
|
||||
func (en *Result) Id() []byte { return en.Event.Id }
|
||||
func (en *Result) Id() []byte { return en.Event.ID }
|
||||
|
||||
// Label returns the label of a event eventenvelope.Result envelope.
|
||||
func (en *Result) Label() string { return L }
|
||||
@@ -145,7 +145,7 @@ func (en *Result) Unmarshal(b []byte) (r []byte, err error) {
|
||||
return
|
||||
}
|
||||
en.Event = event.New()
|
||||
if r, err = en.Event.Unmarshal(r); chk.E(err) {
|
||||
if r, err = en.Event.Unmarshal(r); err != nil {
|
||||
return
|
||||
}
|
||||
if r, err = envelopes.SkipToTheEnd(r); chk.E(err) {
|
||||
@@ -158,7 +158,7 @@ func (en *Result) Unmarshal(b []byte) (r []byte, err error) {
|
||||
// envelope into it.
|
||||
func ParseResult(b []byte) (t *Result, rem []byte, err error) {
|
||||
t = NewResult()
|
||||
if rem, err = t.Unmarshal(b); chk.T(err) {
|
||||
if rem, err = t.Unmarshal(b); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
|
||||
@@ -30,7 +30,7 @@ func TestMarshalUnmarshal(t *testing.T) {
|
||||
if rem, err = req2.Unmarshal(rb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// log.I.Ln(req2.Id)
|
||||
// log.I.Ln(req2.ID)
|
||||
if len(rem) > 0 {
|
||||
t.Fatalf(
|
||||
"unmarshal failed, remainder\n%d %s",
|
||||
|
||||
@@ -40,7 +40,7 @@ func NewFrom[V string | []byte](eid V, ok bool, msg ...V) *T {
|
||||
}
|
||||
if len(eid) != sha256.Size {
|
||||
log.W.F(
|
||||
"event Id unexpected length, expect %d got %d",
|
||||
"event ID unexpected length, expect %d got %d",
|
||||
len(eid), sha256.Size,
|
||||
)
|
||||
}
|
||||
@@ -96,7 +96,7 @@ func (en *T) Unmarshal(b []byte) (r []byte, err error) {
|
||||
}
|
||||
if len(idHex) != sha256.Size {
|
||||
err = errorf.E(
|
||||
"invalid size for Id, require %d got %d",
|
||||
"invalid size for ID, require %d got %d",
|
||||
len(idHex), sha256.Size,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
// MarshalBinary writes a binary encoding of an event.
|
||||
//
|
||||
// [ 32 bytes Id ]
|
||||
// [ 32 bytes ID ]
|
||||
// [ 32 bytes Pubkey ]
|
||||
// [ varint CreatedAt ]
|
||||
// [ 2 bytes Kind ]
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
// [ varint Content length ]
|
||||
// [ 64 bytes Sig ]
|
||||
func (ev *E) MarshalBinary(w io.Writer) {
|
||||
_, _ = w.Write(ev.Id)
|
||||
_, _ = w.Write(ev.ID)
|
||||
_, _ = w.Write(ev.Pubkey)
|
||||
varint.Encode(w, uint64(ev.CreatedAt.V))
|
||||
varint.Encode(w, uint64(ev.Kind.K))
|
||||
@@ -46,8 +46,8 @@ func (ev *E) MarshalBinary(w io.Writer) {
|
||||
}
|
||||
|
||||
func (ev *E) UnmarshalBinary(r io.Reader) (err error) {
|
||||
ev.Id = make([]byte, 32)
|
||||
if _, err = r.Read(ev.Id); chk.E(err) {
|
||||
ev.ID = make([]byte, 32)
|
||||
if _, err = r.Read(ev.ID); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev.Pubkey = make([]byte, 32)
|
||||
|
||||
@@ -15,36 +15,53 @@ func TestTMarshalBinary_UnmarshalBinary(t *testing.T) {
|
||||
scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
|
||||
var rem, out []byte
|
||||
var err error
|
||||
buf := codecbuf.Get()
|
||||
ea, eb := New(), New()
|
||||
now := time.Now()
|
||||
var counter int
|
||||
for scanner.Scan() {
|
||||
// Create new event objects and buffer for each iteration
|
||||
buf := codecbuf.Get()
|
||||
ea, eb := New(), New()
|
||||
|
||||
chk.E(scanner.Err())
|
||||
b := scanner.Bytes()
|
||||
// log.I.F("%s", b)
|
||||
c := make([]byte, 0, len(b))
|
||||
c = append(c, b...)
|
||||
if rem, err = ea.Unmarshal(c); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// log.I.F("len %d\n%s\n", len(b), ea.SerializeIndented())
|
||||
if len(rem) != 0 {
|
||||
t.Fatalf(
|
||||
"some of input remaining after marshal/unmarshal: '%s'",
|
||||
rem,
|
||||
)
|
||||
}
|
||||
// Reset buffer before marshaling
|
||||
buf.Reset()
|
||||
ea.MarshalBinary(buf)
|
||||
// log.I.S(buf.Bytes())
|
||||
|
||||
// Create a new buffer for unmarshaling
|
||||
buf2 := bytes.NewBuffer(buf.Bytes())
|
||||
if err = eb.UnmarshalBinary(buf2); chk.E(err) {
|
||||
codecbuf.Put(buf)
|
||||
t.Fatal(err)
|
||||
}
|
||||
// log.I.F("len %d\n%s\n", len(b), eb.SerializeIndented())
|
||||
|
||||
// Marshal unmarshaled binary event back to JSON
|
||||
unmarshaledJSON := eb.Serialize()
|
||||
|
||||
// Compare the two JSON representations
|
||||
if !bytes.Equal(b, unmarshaledJSON) {
|
||||
t.Fatalf(
|
||||
"JSON representations don't match after binary marshaling/unmarshaling:\nOriginal: %s\nUnmarshaled: %s",
|
||||
b, unmarshaledJSON,
|
||||
)
|
||||
}
|
||||
|
||||
// Return buffer to pool
|
||||
codecbuf.Put(buf)
|
||||
|
||||
counter++
|
||||
out = out[:0]
|
||||
// break
|
||||
}
|
||||
chk.E(scanner.Err())
|
||||
t.Logf(
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
// ToCanonical converts the event to the canonical encoding used to derive the
|
||||
// event Id.
|
||||
// event ID.
|
||||
func (ev *E) ToCanonical(dst []byte) (b []byte) {
|
||||
b = dst
|
||||
b = append(b, "[0,\""...)
|
||||
@@ -88,8 +88,8 @@ func (ev *E) FromCanonical(b []byte) (rem []byte, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// create the event, use the Id hash to populate the Id
|
||||
ev.Id = id
|
||||
// create the event, use the ID hash to populate the ID
|
||||
ev.ID = id
|
||||
// unwrap the pubkey
|
||||
if v, ok := x[1].(*json.Hex); !ok {
|
||||
err = errorf.E(
|
||||
|
||||
@@ -114,7 +114,7 @@ func main() {
|
||||
}
|
||||
can := ev.ToCanonical(nil)
|
||||
eh := event.Hash(can)
|
||||
eq := bytes.Equal(ev.Id, eh)
|
||||
eq := bytes.Equal(ev.ID, eh)
|
||||
if !eq {
|
||||
_, err = fmt.Fprintf(ids, "%s\n", ev.Serialize())
|
||||
if chk.E(err) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Package event provides a codec for nostr events, for the wire format (with Id
|
||||
// and signature), for the canonical form, that is hashed to generate the Id,
|
||||
// Package event provides a codec for nostr events, for the wire format (with ID
|
||||
// and signature), for the canonical form, that is hashed to generate the ID,
|
||||
// and a fast binary form that uses io.Reader/io.Writer.
|
||||
package event
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
// defines its JSON string-based format.
|
||||
type E struct {
|
||||
|
||||
// Id is the SHA256 hash of the canonical encoding of the event in binary format
|
||||
Id []byte
|
||||
// ID is the SHA256 hash of the canonical encoding of the event in binary format
|
||||
ID []byte
|
||||
|
||||
// Pubkey is the public key of the event creator in binary format
|
||||
Pubkey []byte
|
||||
@@ -38,14 +38,14 @@ type E struct {
|
||||
Kind *kind.T
|
||||
|
||||
// Tags are a list of tags, which are a list of strings usually structured
|
||||
// as a 3 layer scheme indicating specific features of an event.
|
||||
// as a 3-layer scheme indicating specific features of an event.
|
||||
Tags *tags.T
|
||||
|
||||
// Content is an arbitrary string that can contain anything, but usually
|
||||
// conforming to a specification relating to the Kind and the Tags.
|
||||
Content []byte
|
||||
|
||||
// Sig is the signature on the Id hash that validates as coming from the
|
||||
// Sig is the signature on the ID hash that validates as coming from the
|
||||
// Pubkey in binary format.
|
||||
Sig []byte
|
||||
}
|
||||
@@ -77,15 +77,17 @@ func (ev *E) SerializeIndented() (b []byte) {
|
||||
return ev.MarshalWithWhitespace(nil, true)
|
||||
}
|
||||
|
||||
// EventId returns the event.E Id as an eventid.T.
|
||||
// EventId returns the event.E ID as an eventid.T.
|
||||
func (ev *E) EventId() (eid *eventid.T) {
|
||||
return eventid.NewWith(ev.Id)
|
||||
return eventid.NewWith(ev.ID)
|
||||
}
|
||||
|
||||
// stringy/numbery functions for retarded other libraries
|
||||
|
||||
// IdString returns the event Id as a hex-encoded string.
|
||||
func (ev *E) IdString() (s string) { return hex.Enc(ev.Id) }
|
||||
// IdString returns the event ID as a hex-encoded string.
|
||||
func (ev *E) IdString() (s string) { return hex.Enc(ev.ID) }
|
||||
|
||||
func (ev *E) Id() []byte { return ev.ID }
|
||||
|
||||
// CreatedAtInt64 returns the created_at timestamp as a standard int64.
|
||||
func (ev *E) CreatedAtInt64() (i int64) { return ev.CreatedAt.I64() }
|
||||
@@ -108,9 +110,9 @@ func (ev *E) ContentString() (s string) { return string(ev.Content) }
|
||||
// J is an event.E encoded in more basic types than used in this library.
|
||||
type J struct {
|
||||
Id string `json:"id"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
CreatedAt unix.Time `json:"created_at"`
|
||||
Kind int32 `json:"kind"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
CreatedAt unix.Time `json:"created_at"`
|
||||
Kind int32 `json:"kind"`
|
||||
Tags [][]string `json:"tags"`
|
||||
Content string `json:"content"`
|
||||
Sig string `json:"sig"`
|
||||
@@ -129,9 +131,9 @@ func (ev *E) ToEventJ() (j *J) {
|
||||
return
|
||||
}
|
||||
|
||||
// IdFromString decodes an event ID and loads it into an event.E Id.
|
||||
// IdFromString decodes an event ID and loads it into an event.E ID.
|
||||
func (ev *E) IdFromString(s string) (err error) {
|
||||
ev.Id, err = hex.Dec(s)
|
||||
ev.ID, err = hex.Dec(s)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
11596
pkg/encoders/event/examples/out.jsonl
Normal file
11596
pkg/encoders/event/examples/out.jsonl
Normal file
File diff suppressed because one or more lines are too long
@@ -2,6 +2,7 @@ package event
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/minio/sha256-simd"
|
||||
"io"
|
||||
"orly.dev/pkg/crypto/ec/schnorr"
|
||||
@@ -35,7 +36,7 @@ func (ev *E) Marshal(dst []byte) (b []byte) {
|
||||
func (ev *E) MarshalWithWhitespace(dst []byte, on bool) (b []byte) {
|
||||
// open parentheses
|
||||
dst = append(dst, '{')
|
||||
// Id
|
||||
// ID
|
||||
if on {
|
||||
dst = append(dst, '\n', '\t')
|
||||
}
|
||||
@@ -43,7 +44,7 @@ func (ev *E) MarshalWithWhitespace(dst []byte, on bool) (b []byte) {
|
||||
if on {
|
||||
dst = append(dst, ' ')
|
||||
}
|
||||
dst = text2.AppendQuote(dst, ev.Id, hex.EncAppend)
|
||||
dst = text2.AppendQuote(dst, ev.ID, hex.EncAppend)
|
||||
dst = append(dst, ',')
|
||||
// Pubkey
|
||||
if on {
|
||||
@@ -187,12 +188,12 @@ InVal:
|
||||
}
|
||||
if len(id) != sha256.Size {
|
||||
err = errorf.E(
|
||||
"invalid Id, require %d got %d", sha256.Size,
|
||||
"invalid ID, require %d got %d", sha256.Size,
|
||||
len(id),
|
||||
)
|
||||
return
|
||||
}
|
||||
ev.Id = id
|
||||
ev.ID = id
|
||||
goto BetweenKV
|
||||
case jPubkey[0]:
|
||||
if !bytes.Equal(jPubkey, key) {
|
||||
@@ -300,7 +301,7 @@ AfterClose:
|
||||
}
|
||||
return
|
||||
invalid:
|
||||
err = errorf.E(
|
||||
err = fmt.Errorf(
|
||||
"invalid key,\n'%s'\n'%s'\n'%s'", string(b), string(b[:len(r)]),
|
||||
string(r),
|
||||
)
|
||||
|
||||
456
pkg/encoders/event/json_tags_test.go
Normal file
456
pkg/encoders/event/json_tags_test.go
Normal file
@@ -0,0 +1,456 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/encoders/tags"
|
||||
text2 "orly.dev/pkg/encoders/text"
|
||||
"orly.dev/pkg/encoders/timestamp"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// compareTags compares two tags and reports any differences
|
||||
func compareTags(t *testing.T, expected, actual *tags.T, context string) {
|
||||
if expected == nil && actual == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if expected == nil || actual == nil {
|
||||
t.Errorf("%s: One of the tags is nil", context)
|
||||
return
|
||||
}
|
||||
|
||||
expectedSlice := expected.ToStringsSlice()
|
||||
actualSlice := actual.ToStringsSlice()
|
||||
|
||||
if len(expectedSlice) != len(actualSlice) {
|
||||
t.Errorf(
|
||||
"%s: Tags length mismatch: expected %d, got %d", context,
|
||||
len(expectedSlice), len(actualSlice),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
for i, expectedTag := range expectedSlice {
|
||||
actualTag := actualSlice[i]
|
||||
|
||||
if len(expectedTag) != len(actualTag) {
|
||||
t.Errorf(
|
||||
"%s: Tag[%d] length mismatch: expected %d, got %d", context, i,
|
||||
len(expectedTag), len(actualTag),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
for j, expectedElem := range expectedTag {
|
||||
if expectedElem != actualTag[j] {
|
||||
t.Errorf(
|
||||
"%s: Tag[%d][%d] mismatch: expected '%s', got '%s'",
|
||||
context, i, j, expectedElem, actualTag[j],
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUnmarshalEscapedJSONInTags tests that the Unmarshal function correctly handles
|
||||
// tags with fields containing escaped JSON that has been escaped using NostrEscape.
|
||||
func TestUnmarshalEscapedJSONInTags(t *testing.T) {
|
||||
// Test 1: Tag with a field containing escaped JSON
|
||||
t.Run(
|
||||
"SimpleEscapedJSON", func(t *testing.T) {
|
||||
// Create a tag with a field containing JSON that needs escaping
|
||||
jsonContent := `{"key":"value","nested":{"array":[1,2,3]}}`
|
||||
|
||||
// Create the event with the tag containing JSON
|
||||
originalEvent := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("Event with JSON in tag"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
// Add a tag with JSON content
|
||||
jsonTag := tag.New("j", jsonContent)
|
||||
originalEvent.Tags.AppendTags(jsonTag)
|
||||
|
||||
// Marshal the event
|
||||
marshaled := originalEvent.Marshal(nil)
|
||||
|
||||
// Unmarshal back into a new event
|
||||
unmarshaledEvent := &E{}
|
||||
_, err := unmarshaledEvent.Unmarshal(marshaled)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to unmarshal event with JSON in tag: %v", err)
|
||||
}
|
||||
|
||||
// Verify the tag was correctly unmarshaled
|
||||
if unmarshaledEvent.Tags.Len() != 1 {
|
||||
t.Fatalf("Expected 1 tag, got %d", unmarshaledEvent.Tags.Len())
|
||||
}
|
||||
|
||||
unmarshaledTag := unmarshaledEvent.Tags.GetTagElement(0)
|
||||
if unmarshaledTag.Len() != 2 {
|
||||
t.Fatalf(
|
||||
"Expected tag with 2 elements, got %d", unmarshaledTag.Len(),
|
||||
)
|
||||
}
|
||||
|
||||
if string(unmarshaledTag.B(0)) != "j" {
|
||||
t.Errorf("Expected tag key 'j', got '%s'", unmarshaledTag.B(0))
|
||||
}
|
||||
|
||||
if string(unmarshaledTag.B(1)) != jsonContent {
|
||||
t.Errorf(
|
||||
"Expected tag value '%s', got '%s'", jsonContent,
|
||||
unmarshaledTag.B(1),
|
||||
)
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
// Test 2: Tag with a field containing escaped JSON with special characters
|
||||
t.Run(
|
||||
"EscapedJSONWithSpecialChars", func(t *testing.T) {
|
||||
// JSON with characters that need escaping: quotes, backslashes, control chars
|
||||
jsonContent := `{"text":"This has \"quotes\" and \\ backslashes","newlines":"\n\r\t"}`
|
||||
|
||||
// Create the event with the tag containing JSON with special chars
|
||||
originalEvent := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("Event with JSON containing special chars in tag"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
// Add a tag with JSON content containing special chars
|
||||
jsonTag := tag.New("j", jsonContent)
|
||||
originalEvent.Tags.AppendTags(jsonTag)
|
||||
|
||||
// Marshal the event
|
||||
marshaled := originalEvent.Marshal(nil)
|
||||
|
||||
// Unmarshal back into a new event
|
||||
unmarshaledEvent := &E{}
|
||||
_, err := unmarshaledEvent.Unmarshal(marshaled)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to unmarshal event with JSON containing special chars: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
// Verify the tag was correctly unmarshaled
|
||||
unmarshaledTag := unmarshaledEvent.Tags.GetTagElement(0)
|
||||
if string(unmarshaledTag.B(1)) != jsonContent {
|
||||
t.Errorf(
|
||||
"Expected tag value '%s', got '%s'", jsonContent,
|
||||
unmarshaledTag.B(1),
|
||||
)
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
// Test 3: Tag with nested JSON that contains already escaped content
|
||||
t.Run(
|
||||
"NestedEscapedJSON", func(t *testing.T) {
|
||||
// JSON with already escaped content
|
||||
jsonContent := `{"escaped":"This JSON contains \\\"already escaped\\\" content"}`
|
||||
|
||||
// Create the event with the tag containing nested escaped JSON
|
||||
originalEvent := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("Event with nested escaped JSON in tag"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
// Add a tag with nested escaped JSON content
|
||||
jsonTag := tag.New("j", jsonContent)
|
||||
originalEvent.Tags.AppendTags(jsonTag)
|
||||
|
||||
// Marshal the event
|
||||
marshaled := originalEvent.Marshal(nil)
|
||||
|
||||
// Unmarshal back into a new event
|
||||
unmarshaledEvent := &E{}
|
||||
_, err := unmarshaledEvent.Unmarshal(marshaled)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to unmarshal event with nested escaped JSON: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
// Verify the tag was correctly unmarshaled
|
||||
unmarshaledTag := unmarshaledEvent.Tags.GetTagElement(0)
|
||||
if string(unmarshaledTag.B(1)) != jsonContent {
|
||||
t.Errorf(
|
||||
"Expected tag value '%s', got '%s'", jsonContent,
|
||||
unmarshaledTag.B(1),
|
||||
)
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
// Test 4: Tag with JSON that has been explicitly escaped using NostrEscape
|
||||
t.Run(
|
||||
"ExplicitlyEscapedJSON", func(t *testing.T) {
|
||||
// Original JSON with characters that need escaping
|
||||
originalJSON := []byte(`{"key":"value with "quotes"","nested":{"array":[1,2,3],"special":"\n\r\t"}}`)
|
||||
|
||||
// Explicitly escape the JSON using NostrEscape
|
||||
escapedJSON := make([]byte, 0, len(originalJSON)*2)
|
||||
escapedJSON = text2.NostrEscape(escapedJSON, originalJSON)
|
||||
|
||||
// Create the event with the tag containing explicitly escaped JSON
|
||||
originalEvent := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("Event with explicitly escaped JSON in tag"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
// Add a tag with the explicitly escaped JSON content
|
||||
jsonTag := tag.New("j", string(escapedJSON))
|
||||
originalEvent.Tags.AppendTags(jsonTag)
|
||||
|
||||
// Marshal the event
|
||||
marshaled := originalEvent.Marshal(nil)
|
||||
|
||||
// Unmarshal back into a new event
|
||||
unmarshaledEvent := &E{}
|
||||
_, err := unmarshaledEvent.Unmarshal(marshaled)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to unmarshal event with explicitly escaped JSON: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
// Verify the tag was correctly unmarshaled
|
||||
unmarshaledTag := unmarshaledEvent.Tags.GetTagElement(0)
|
||||
if string(unmarshaledTag.B(1)) != string(escapedJSON) {
|
||||
t.Errorf(
|
||||
"Expected tag value '%s', got '%s'", string(escapedJSON),
|
||||
unmarshaledTag.B(1),
|
||||
)
|
||||
}
|
||||
|
||||
// Unescape the unmarshaled JSON to verify it matches the original
|
||||
unescapedJSON := make([]byte, len(unmarshaledTag.B(1)))
|
||||
copy(unescapedJSON, unmarshaledTag.B(1))
|
||||
unescapedJSON = text2.NostrUnescape(unescapedJSON)
|
||||
|
||||
if string(unescapedJSON) != string(originalJSON) {
|
||||
t.Errorf(
|
||||
"Unescaped JSON doesn't match original. Expected '%s', got '%s'",
|
||||
string(originalJSON), string(unescapedJSON),
|
||||
)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUnmarshalTags(t *testing.T) {
|
||||
// Test 1: Simple event with empty tags
|
||||
t.Run(
|
||||
"EmptyTags", func(t *testing.T) {
|
||||
jsonWithEmptyTags := []byte(`{"id":"0101010101010101010101010101010101010101010101010101010101010101","pubkey":"0202020202020202020202020202020202020202020202020202020202020202","created_at":1609459200,"kind":1,"tags":[],"content":"This is a test event","sig":"03030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303"}`)
|
||||
|
||||
expected := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("This is a test event"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
actual := &E{}
|
||||
_, err := actual.Unmarshal(jsonWithEmptyTags)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to unmarshal JSON with empty tags: %v", err)
|
||||
}
|
||||
|
||||
compareTags(t, expected.Tags, actual.Tags, "EmptyTags")
|
||||
},
|
||||
)
|
||||
|
||||
// Test 2: Event with simple tags
|
||||
t.Run(
|
||||
"SimpleTags", func(t *testing.T) {
|
||||
jsonWithSimpleTags := []byte(`{"id":"0101010101010101010101010101010101010101010101010101010101010101","pubkey":"0202020202020202020202020202020202020202020202020202020202020202","created_at":1609459200,"kind":1,"tags":[["e","1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"],["p","abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"]],"content":"This is a test event","sig":"03030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303"}`)
|
||||
|
||||
expected := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("This is a test event"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
// Add tags
|
||||
eTag := tag.New(
|
||||
"e",
|
||||
"1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
|
||||
)
|
||||
pTag := tag.New(
|
||||
"p",
|
||||
"abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890",
|
||||
)
|
||||
expected.Tags.AppendTags(eTag, pTag)
|
||||
|
||||
actual := &E{}
|
||||
_, err := actual.Unmarshal(jsonWithSimpleTags)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to unmarshal JSON with simple tags: %v", err)
|
||||
}
|
||||
|
||||
compareTags(t, expected.Tags, actual.Tags, "SimpleTags")
|
||||
},
|
||||
)
|
||||
|
||||
// Test 3: Event with complex tags (more elements per tag)
|
||||
t.Run(
|
||||
"ComplexTags", func(t *testing.T) {
|
||||
jsonWithComplexTags := []byte(`{"id":"0101010101010101010101010101010101010101010101010101010101010101","pubkey":"0202020202020202020202020202020202020202020202020202020202020202","created_at":1609459200,"kind":1,"tags":[["e","1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef","wss://relay.example.com","root"],["p","abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890","wss://relay.example.com"],["t","hashtag","topic"]],"content":"This is a test event","sig":"03030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303"}`)
|
||||
|
||||
expected := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("This is a test event"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
// Add tags
|
||||
eTag := tag.New(
|
||||
"e",
|
||||
"1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
|
||||
"wss://relay.example.com", "root",
|
||||
)
|
||||
pTag := tag.New(
|
||||
"p",
|
||||
"abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890",
|
||||
"wss://relay.example.com",
|
||||
)
|
||||
tTag := tag.New("t", "hashtag", "topic")
|
||||
expected.Tags.AppendTags(eTag, pTag, tTag)
|
||||
|
||||
actual := &E{}
|
||||
_, err := actual.Unmarshal(jsonWithComplexTags)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to unmarshal JSON with complex tags: %v", err)
|
||||
}
|
||||
|
||||
compareTags(t, expected.Tags, actual.Tags, "ComplexTags")
|
||||
},
|
||||
)
|
||||
|
||||
// Test 4: Test using the Unmarshal function (not the method)
|
||||
t.Run(
|
||||
"UnmarshalFunction", func(t *testing.T) {
|
||||
jsonWithTags := []byte(`{
|
||||
"id": "0101010101010101010101010101010101010101010101010101010101010101",
|
||||
"pubkey": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"created_at": 1609459200,
|
||||
"kind": 1,
|
||||
"tags": [["e", "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"], ["p", "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"]],
|
||||
"content": "This is a test event",
|
||||
"sig": "03030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303"
|
||||
}`)
|
||||
|
||||
expected := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("This is a test event"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
// Add tags
|
||||
eTag := tag.New(
|
||||
"e",
|
||||
"1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
|
||||
)
|
||||
pTag := tag.New(
|
||||
"p",
|
||||
"abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890",
|
||||
)
|
||||
expected.Tags.AppendTags(eTag, pTag)
|
||||
|
||||
actual := &E{}
|
||||
_, err := Unmarshal(actual, jsonWithTags)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to unmarshal JSON with tags using Unmarshal function: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
compareTags(t, expected.Tags, actual.Tags, "UnmarshalFunction")
|
||||
},
|
||||
)
|
||||
|
||||
// Test 5: Event with nested empty tags
|
||||
t.Run(
|
||||
"NestedEmptyTags", func(t *testing.T) {
|
||||
jsonWithNestedEmptyTags := []byte(`{
|
||||
"id": "0101010101010101010101010101010101010101010101010101010101010101",
|
||||
"pubkey": "0202020202020202020202020202020202020202020202020202020202020202",
|
||||
"created_at": 1609459200,
|
||||
"kind": 1,
|
||||
"tags": [[], ["e"], ["p", ""]],
|
||||
"content": "This is a test event",
|
||||
"sig": "03030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303030303"
|
||||
}`)
|
||||
|
||||
expected := &E{
|
||||
ID: bytes.Repeat([]byte{0x01}, 32),
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32),
|
||||
CreatedAt: timestamp.FromUnix(1609459200),
|
||||
Kind: kind.TextNote,
|
||||
Tags: tags.New(),
|
||||
Content: []byte("This is a test event"),
|
||||
Sig: bytes.Repeat([]byte{0x03}, 64),
|
||||
}
|
||||
|
||||
// Add tags
|
||||
emptyTag := tag.New[string]()
|
||||
eTag := tag.New("e")
|
||||
pTag := tag.New("p", "")
|
||||
expected.Tags.AppendTags(emptyTag, eTag, pTag)
|
||||
|
||||
actual := &E{}
|
||||
_, err := actual.Unmarshal(jsonWithNestedEmptyTags)
|
||||
if err != nil {
|
||||
t.Fatalf(
|
||||
"Failed to unmarshal JSON with nested empty tags: %v", err,
|
||||
)
|
||||
}
|
||||
|
||||
compareTags(t, expected.Tags, actual.Tags, "NestedEmptyTags")
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
|
||||
// compareEvents compares two events and reports any differences
|
||||
func compareEvents(t *testing.T, expected, actual *E, context string) {
|
||||
if !bytes.Equal(expected.Id, actual.Id) {
|
||||
if !bytes.Equal(expected.ID, actual.ID) {
|
||||
t.Errorf(
|
||||
"%s: Id mismatch: expected %s, got %s", context,
|
||||
hex.Enc(expected.Id), hex.Enc(actual.Id),
|
||||
"%s: ID mismatch: expected %s, got %s", context,
|
||||
hex.Enc(expected.ID), hex.Enc(actual.ID),
|
||||
)
|
||||
}
|
||||
if !bytes.Equal(expected.Pubkey, actual.Pubkey) {
|
||||
@@ -52,7 +52,7 @@ func compareEvents(t *testing.T, expected, actual *E, context string) {
|
||||
func TestMarshalUnmarshalWithWhitespace(t *testing.T) {
|
||||
// Create a sample event with predefined values
|
||||
original := &E{
|
||||
Id: bytes.Repeat([]byte{0x01}, 32), // 32 bytes of 0x01
|
||||
ID: bytes.Repeat([]byte{0x01}, 32), // 32 bytes of 0x01
|
||||
Pubkey: bytes.Repeat([]byte{0x02}, 32), // 32 bytes of 0x02
|
||||
CreatedAt: timestamp.FromUnix(1609459200), // 2021-01-01 00:00:00 UTC
|
||||
Kind: kind.TextNote, // Kind 1 (text note)
|
||||
@@ -73,7 +73,7 @@ func TestMarshalUnmarshalWithWhitespace(t *testing.T) {
|
||||
// Test 2: Manually created JSON with extra whitespace
|
||||
jsonWithExtraWhitespace := []byte(`
|
||||
{
|
||||
"id": "` + hex.Enc(original.Id) + `",
|
||||
"id": "` + hex.Enc(original.ID) + `",
|
||||
"pubkey": "` + hex.Enc(original.Pubkey) + `",
|
||||
"created_at": 1609459200,
|
||||
"kind": 1,
|
||||
@@ -93,7 +93,7 @@ func TestMarshalUnmarshalWithWhitespace(t *testing.T) {
|
||||
|
||||
// Test 3: JSON with mixed whitespace (spaces, tabs, newlines)
|
||||
jsonWithMixedWhitespace := []byte(`{
|
||||
"id" : "` + hex.Enc(original.Id) + `",
|
||||
"id" : "` + hex.Enc(original.ID) + `",
|
||||
"pubkey": "` + hex.Enc(original.Pubkey) + `",
|
||||
"created_at": 1609459200 ,
|
||||
"kind":1,
|
||||
@@ -115,7 +115,7 @@ func TestMarshalUnmarshalWithWhitespace(t *testing.T) {
|
||||
|
||||
{
|
||||
|
||||
"id" : "` + hex.Enc(original.Id) + `" ,
|
||||
"id" : "` + hex.Enc(original.ID) + `" ,
|
||||
"pubkey" : "` + hex.Enc(original.Pubkey) + `" ,
|
||||
"created_at" : 1609459200 ,
|
||||
"kind" : 1 ,
|
||||
|
||||
@@ -12,12 +12,12 @@ import (
|
||||
// Sign the event using the signer.I. Uses github.com/bitcoin-core/secp256k1 if
|
||||
// available for much faster signatures.
|
||||
//
|
||||
// Note that this only populates the Pubkey, Id and Sig. The caller must
|
||||
// Note that this only populates the Pubkey, ID and Sig. The caller must
|
||||
// set the CreatedAt timestamp as intended.
|
||||
func (ev *E) Sign(keys signer.I) (err error) {
|
||||
ev.Pubkey = keys.Pub()
|
||||
ev.Id = ev.GetIDBytes()
|
||||
if ev.Sig, err = keys.Sign(ev.Id); chk.E(err) {
|
||||
ev.ID = ev.GetIDBytes()
|
||||
if ev.Sig, err = keys.Sign(ev.ID); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
@@ -30,17 +30,17 @@ func (ev *E) Verify() (valid bool, err error) {
|
||||
if err = keys.InitPub(ev.Pubkey); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if valid, err = keys.Verify(ev.Id, ev.Sig); chk.T(err) {
|
||||
// check that this isn't because of a bogus Id
|
||||
if valid, err = keys.Verify(ev.ID, ev.Sig); chk.T(err) {
|
||||
// check that this isn't because of a bogus ID
|
||||
id := ev.GetIDBytes()
|
||||
if !bytes.Equal(id, ev.Id) {
|
||||
log.E.Ln("event Id incorrect")
|
||||
ev.Id = id
|
||||
if !bytes.Equal(id, ev.ID) {
|
||||
log.E.Ln("event ID incorrect")
|
||||
ev.ID = id
|
||||
err = nil
|
||||
if valid, err = keys.Verify(ev.Id, ev.Sig); chk.E(err) {
|
||||
if valid, err = keys.Verify(ev.ID, ev.Sig); chk.E(err) {
|
||||
return
|
||||
}
|
||||
err = errorf.W("event Id incorrect but signature is valid on correct Id")
|
||||
err = errorf.W("event ID incorrect but signature is valid on correct ID")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func (ei *T) Set(b []byte) (err error) {
|
||||
}
|
||||
if len(b) != sha256.Size {
|
||||
err = errorf.E(
|
||||
"Id bytes incorrect size, got %d require %d",
|
||||
"ID bytes incorrect size, got %d require %d",
|
||||
len(b), sha256.Size,
|
||||
)
|
||||
return
|
||||
@@ -45,7 +45,7 @@ func (ei *T) Set(b []byte) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// NewFromBytes creates a new eventid.T from the raw event Id hash.
|
||||
// NewFromBytes creates a new eventid.T from the raw event ID hash.
|
||||
func NewFromBytes(b []byte) (ei *T, err error) {
|
||||
ei = New()
|
||||
if err = ei.Set(b); chk.E(err) {
|
||||
@@ -64,6 +64,9 @@ func (ei *T) String() string {
|
||||
|
||||
// ByteString renders an eventid.T as bytes in ASCII hex.
|
||||
func (ei *T) ByteString(src []byte) (b []byte) {
|
||||
if ei == nil {
|
||||
return
|
||||
}
|
||||
return hex.EncAppend(src, ei[:])
|
||||
}
|
||||
|
||||
@@ -104,7 +107,7 @@ func (ei *T) Unmarshal(b []byte) (rem []byte, err error) {
|
||||
b = b[1 : 2*sha256.Size+1]
|
||||
if len(b) != 2*sha256.Size {
|
||||
err = errorf.E(
|
||||
"event Id hex incorrect size, got %d require %d",
|
||||
"event ID hex incorrect size, got %d require %d",
|
||||
len(b), 2*sha256.Size,
|
||||
)
|
||||
log.E.Ln(string(b))
|
||||
@@ -123,7 +126,7 @@ func (ei *T) Unmarshal(b []byte) (rem []byte, err error) {
|
||||
func NewFromString(s string) (ei *T, err error) {
|
||||
if len(s) != 2*sha256.Size {
|
||||
return nil, errorf.E(
|
||||
"event Id hex wrong size, got %d require %d",
|
||||
"event ID hex wrong size, got %d require %d",
|
||||
len(s), 2*sha256.Size,
|
||||
)
|
||||
}
|
||||
@@ -134,7 +137,7 @@ func NewFromString(s string) (ei *T, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Gen creates a fake pseudorandom generated event Id for tests.
|
||||
// Gen creates a fake pseudorandom generated event ID for tests.
|
||||
func Gen() (ei *T) {
|
||||
b := frand.Bytes(sha256.Size)
|
||||
ei = &T{}
|
||||
|
||||
@@ -446,7 +446,7 @@ func (f *F) Matches(ev *event.E) bool {
|
||||
// log.F.ToSliceOfBytes("nil event")
|
||||
return false
|
||||
}
|
||||
if f.Ids.Len() > 0 && !f.Ids.Contains(ev.Id) {
|
||||
if f.Ids.Len() > 0 && !f.Ids.Contains(ev.ID) {
|
||||
// log.F.ToSliceOfBytes("no ids in filter match event\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
)
|
||||
|
||||
// S is a simplified filter that only covers the nip-01 REQ filter minus the
|
||||
// separate and superseding Id list. The search field is from a different NIP,
|
||||
// separate and superseding ID list. The search field is from a different NIP,
|
||||
// but it is a separate API for which reason it is also not here.
|
||||
type S struct {
|
||||
Kinds *kinds.T `json:"kinds,omitempty"`
|
||||
|
||||
@@ -32,7 +32,7 @@ func NewId[V string | []byte](s V) (*Id, error) {
|
||||
// remove invalid return value
|
||||
si.T = si.T[:0]
|
||||
return si, errorf.E(
|
||||
"invalid subscription Id - length %d < 1 or > 64", len(si.T),
|
||||
"invalid subscription ID - length %d < 1 or > 64", len(si.T),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func (si *Id) Marshal(dst []byte) (b []byte) {
|
||||
ue := text.NostrEscape(nil, si.T)
|
||||
if len(ue) < 1 || len(ue) > 64 {
|
||||
log.E.F(
|
||||
"invalid subscription Id, must be between 1 and 64 "+
|
||||
"invalid subscription ID, must be between 1 and 64 "+
|
||||
"characters, got %d (possibly due to escaping)", len(ue),
|
||||
)
|
||||
return
|
||||
|
||||
@@ -4,7 +4,7 @@ package text
|
||||
//
|
||||
// This is the efficient implementation based on the NIP-01 specification:
|
||||
//
|
||||
// To prevent implementation differences from creating a different event Id for
|
||||
// To prevent implementation differences from creating a different event ID for
|
||||
// the same event, the following rules MUST be followed while serializing:
|
||||
//
|
||||
// No whitespace, line breaks or other unnecessary formatting should be included
|
||||
@@ -90,7 +90,7 @@ func NostrUnescape(dst []byte) (b []byte) {
|
||||
dst[w] = '\r'
|
||||
w++
|
||||
|
||||
// special cases for non-nip-01 specified json escapes (must be preserved for Id
|
||||
// special cases for non-nip-01 specified json escapes (must be preserved for ID
|
||||
// generation).
|
||||
case c == 'u':
|
||||
dst[w] = '\\'
|
||||
@@ -103,7 +103,7 @@ func NostrUnescape(dst []byte) (b []byte) {
|
||||
dst[w] = '/'
|
||||
w++
|
||||
|
||||
// special case for octal escapes (must be preserved for Id generation).
|
||||
// special case for octal escapes (must be preserved for ID generation).
|
||||
case c >= '0' && c <= '9':
|
||||
dst[w] = '\\'
|
||||
w++
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"orly.dev/pkg/interfaces/relay"
|
||||
"orly.dev/pkg/interfaces/store"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"time"
|
||||
)
|
||||
|
||||
type I interface {
|
||||
@@ -20,12 +21,14 @@ type I interface {
|
||||
authedPubkey []byte, remote string,
|
||||
) (allowed *filters.T, accept bool, modified bool)
|
||||
AddEvent(
|
||||
c context.T, rl relay.I, ev *event.E, hr *http.Request,
|
||||
origin string, authedPubkey []byte,
|
||||
) (
|
||||
accepted bool,
|
||||
message []byte,
|
||||
)
|
||||
c context.T, rl relay.I, ev *event.E, hr *http.Request, origin string,
|
||||
) (accepted bool, message []byte)
|
||||
AdminAuth(
|
||||
r *http.Request, remote string, tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte)
|
||||
UserAuth(
|
||||
r *http.Request, remote string, tolerance ...time.Duration,
|
||||
) (authed bool, pubkey []byte)
|
||||
Context() context.T
|
||||
Publisher() *publish.S
|
||||
Publish(c context.T, evt *event.E) (err error)
|
||||
@@ -35,4 +38,5 @@ type I interface {
|
||||
AuthRequired() bool
|
||||
PublicReadable() bool
|
||||
ServiceURL(req *http.Request) (s string)
|
||||
OwnersPubkeys() (pks [][]byte)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ package store
|
||||
import (
|
||||
"io"
|
||||
"orly.dev/pkg/app/config"
|
||||
"orly.dev/pkg/database/indexes/types"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/eventid"
|
||||
"orly.dev/pkg/encoders/eventidserial"
|
||||
@@ -33,6 +34,7 @@ type I interface {
|
||||
LogLeveler
|
||||
EventIdSerialer
|
||||
Initer
|
||||
SerialByIder
|
||||
}
|
||||
|
||||
type Initer interface {
|
||||
@@ -81,7 +83,7 @@ type Deleter interface {
|
||||
|
||||
type Saver interface {
|
||||
// SaveEvent is called once relay.AcceptEvent reports true.
|
||||
SaveEvent(c context.T, ev *event.E) (kc, vc int, err error)
|
||||
SaveEvent(c context.T, ev *event.E, noVerify bool) (kc, vc int, err error)
|
||||
}
|
||||
|
||||
type Importer interface {
|
||||
@@ -129,3 +131,7 @@ type EventIdSerialer interface {
|
||||
err error,
|
||||
)
|
||||
}
|
||||
|
||||
type SerialByIder interface {
|
||||
GetSerialById(id []byte) (ser *types.Uint40, err error)
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ func CheckPrivilege(authedPubkey []byte, ev *event.E) (privileged bool) {
|
||||
}
|
||||
// authed users when auth is required must be present in the
|
||||
// event if it is privileged.
|
||||
authedIsAuthor := bytes.Equal(ev.Pubkey, authedPubkey)
|
||||
privileged = bytes.Equal(ev.Pubkey, authedPubkey)
|
||||
// if the authed pubkey matches the event author, it is
|
||||
// allowed.
|
||||
if !authedIsAuthor {
|
||||
if !privileged {
|
||||
// check whether one of the p (mention) tags is
|
||||
// present designating the authed pubkey, as this means
|
||||
// the author wants the designated pubkey to be able to
|
||||
@@ -29,17 +29,15 @@ func CheckPrivilege(authedPubkey []byte, ev *event.E) (privileged bool) {
|
||||
eTags := ev.Tags.GetAll(tag.New("p"))
|
||||
var hexAuthedKey []byte
|
||||
hex.EncAppend(hexAuthedKey, authedPubkey)
|
||||
var authedIsMentioned bool
|
||||
for _, e := range eTags.ToSliceOfTags() {
|
||||
if bytes.Equal(e.Value(), hexAuthedKey) {
|
||||
authedIsMentioned = true
|
||||
privileged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !authedIsMentioned {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
privileged = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
44
pkg/protocol/openapi/api-reference.js
Normal file
44
pkg/protocol/openapi/api-reference.js
Normal file
File diff suppressed because one or more lines are too long
403
pkg/protocol/openapi/event.go
Normal file
403
pkg/protocol/openapi/event.go
Normal file
@@ -0,0 +1,403 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
"net/http"
|
||||
"orly.dev/pkg/app/relay/helpers"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/eventid"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/encoders/hex"
|
||||
"orly.dev/pkg/encoders/ints"
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/encoders/tag"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
)
|
||||
|
||||
// EventInput is the parameters for the Event HTTP API method.
|
||||
type EventInput struct {
|
||||
Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"`
|
||||
Accept string `header:"Accept" default:"application/nostr+json"`
|
||||
Body string `doc:"event JSON"`
|
||||
}
|
||||
|
||||
// EventOutput is the return parameters for the HTTP API Event method.
|
||||
type EventOutput struct{ Body string }
|
||||
|
||||
// RegisterEvent is the implementation of the HTTP API Event method.
|
||||
func (x *Operations) RegisterEvent(api huma.API) {
|
||||
lol.Tracer("RegisterEvent")
|
||||
defer lol.Tracer("RegisterEvent")
|
||||
name := "Event"
|
||||
description := "Submit an event"
|
||||
path := x.path + "/event"
|
||||
scopes := []string{"user", "write"}
|
||||
method := http.MethodPost
|
||||
huma.Register(
|
||||
api, huma.Operation{
|
||||
OperationID: name,
|
||||
Summary: name,
|
||||
Path: path,
|
||||
Method: method,
|
||||
Tags: []string{"events"},
|
||||
Description: helpers.GenerateDescription(description, scopes),
|
||||
Security: []map[string][]string{{"auth": scopes}},
|
||||
}, func(ctx context.T, input *EventInput) (
|
||||
output *EventOutput, err error,
|
||||
) {
|
||||
r := ctx.Value("http-request").(*http.Request)
|
||||
remote := helpers.GetRemoteFromReq(r)
|
||||
log.T.F(
|
||||
"%s %s %s", r.URL.String(),
|
||||
remote, input.Body,
|
||||
)
|
||||
var authed bool
|
||||
var pubkey []byte
|
||||
if x.I.AuthRequired() {
|
||||
authed, pubkey = x.UserAuth(r, remote)
|
||||
if !authed {
|
||||
err = huma.Error401Unauthorized("Not Authorized")
|
||||
return
|
||||
}
|
||||
}
|
||||
ev := &event.E{}
|
||||
var rem []byte
|
||||
if rem, err = ev.Unmarshal([]byte(input.Body)); chk.T(err) {
|
||||
err = huma.Error422UnprocessableEntity(
|
||||
"Failed to parse event", err,
|
||||
)
|
||||
return
|
||||
}
|
||||
if len(rem) > 0 {
|
||||
log.D.F("remainder:\n%s", rem)
|
||||
}
|
||||
// these aliases make it so most of the following code can be copied
|
||||
// verbatim from its counterpart in socketapi.HandleEvent, with the
|
||||
// aid of a different implementation of the openapi.OK type.
|
||||
a := x
|
||||
env := ev
|
||||
c := x.Context()
|
||||
calculatedId := ev.GetIDBytes()
|
||||
if !bytes.Equal(calculatedId, ev.ID) {
|
||||
err = huma.Error422UnprocessableEntity(
|
||||
Ok.Invalid(
|
||||
a, env, "event id is computed incorrectly, "+
|
||||
"event has ID %0x, but when computed it is %0x",
|
||||
ev.ID, calculatedId,
|
||||
).Error(),
|
||||
)
|
||||
return
|
||||
}
|
||||
var ok bool
|
||||
if ok, err = env.Verify(); chk.T(err) {
|
||||
if err = Ok.Error(
|
||||
a, env, fmt.Sprintf(
|
||||
"failed to verify signature: %s",
|
||||
err.Error(),
|
||||
),
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
} else if !ok {
|
||||
if err = Ok.Invalid(
|
||||
a, env,
|
||||
"signature is invalid",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
// check that relay policy allows this event
|
||||
accept, notice, _ := x.I.AcceptEvent(c, env, r, pubkey, remote)
|
||||
if !accept {
|
||||
if err = Ok.Blocked(
|
||||
a, env, notice,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
// check for protected tag (NIP-70)
|
||||
protectedTag := ev.Tags.GetFirst(tag.New("-"))
|
||||
if protectedTag != nil && a.AuthRequired() {
|
||||
// check that the pubkey of the event matches the authed pubkey
|
||||
if !bytes.Equal(pubkey, ev.Pubkey) {
|
||||
if err = Ok.Blocked(
|
||||
a, env,
|
||||
"protected tag may only be published by user authed to the same pubkey",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
// check and process delete
|
||||
sto := x.I.Storage()
|
||||
if ev.Kind.K == kind.Deletion.K {
|
||||
log.I.F("delete event\n%s", ev.Serialize())
|
||||
for _, t := range ev.Tags.ToSliceOfTags() {
|
||||
var res []*event.E
|
||||
if t.Len() >= 2 {
|
||||
switch {
|
||||
case bytes.Equal(t.Key(), []byte("e")):
|
||||
// Process 'e' tag (event reference)
|
||||
eventId := make([]byte, sha256.Size)
|
||||
if _, err = hex.DecBytes(
|
||||
eventId, t.Value(),
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
// Create a filter to find the referenced event
|
||||
f := filter.New()
|
||||
f.Ids = f.Ids.Append(eventId)
|
||||
|
||||
// Query for the referenced event
|
||||
var referencedEvents []*event.E
|
||||
referencedEvents, err = sto.QueryEvents(c, f)
|
||||
if chk.E(err) {
|
||||
if err = Ok.Error(
|
||||
a, env,
|
||||
"failed to query for referenced event",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// If we found the referenced event, check if the author
|
||||
// matches
|
||||
if len(referencedEvents) > 0 {
|
||||
referencedEvent := referencedEvents[0]
|
||||
|
||||
// Check if the author of the deletion event matches the
|
||||
// author of the referenced event
|
||||
if !bytes.Equal(
|
||||
referencedEvent.Pubkey, env.Pubkey,
|
||||
) {
|
||||
if err = Ok.Blocked(
|
||||
a, env,
|
||||
"blocked: cannot delete events from other authors",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Create eventid.T from the event ID bytes
|
||||
var eid *eventid.T
|
||||
if eid, err = eventid.NewFromBytes(eventId); chk.E(err) {
|
||||
if err = Ok.Error(
|
||||
a, env, "failed to create event ID",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Use DeleteEvent to actually delete the referenced
|
||||
// event
|
||||
if err = sto.DeleteEvent(c, eid); chk.E(err) {
|
||||
if err = Ok.Error(
|
||||
a, env,
|
||||
"failed to delete referenced event",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.I.F(
|
||||
"successfully deleted event %x", eventId,
|
||||
)
|
||||
}
|
||||
case bytes.Equal(t.Key(), []byte("a")):
|
||||
split := bytes.Split(t.Value(), []byte{':'})
|
||||
if len(split) != 3 {
|
||||
continue
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = hex.DecAppend(
|
||||
nil, split[1],
|
||||
); chk.E(err) {
|
||||
if err = Ok.Invalid(
|
||||
a, env,
|
||||
"delete event a tag pubkey value invalid: %s",
|
||||
t.Value(),
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
kin := ints.New(uint16(0))
|
||||
if _, err = kin.Unmarshal(split[0]); chk.E(err) {
|
||||
if err = Ok.Invalid(
|
||||
a, env, "delete event a tag kind value "+
|
||||
"invalid: %s",
|
||||
t.Value(),
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
kk := kind.New(kin.Uint16())
|
||||
if kk.Equal(kind.Deletion) {
|
||||
if err = Ok.Blocked(
|
||||
a, env, "delete event kind may not be "+
|
||||
"deleted",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
if !kk.IsParameterizedReplaceable() {
|
||||
if err = Ok.Error(
|
||||
a, env,
|
||||
"delete tags with a tags containing "+
|
||||
"non-parameterized-replaceable events can't be processed",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(pk, ev.Pubkey) {
|
||||
if err = Ok.Blocked(
|
||||
a, env,
|
||||
"can't delete other users' events (delete by a tag)",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
f := filter.New()
|
||||
f.Kinds.K = []*kind.T{kk}
|
||||
f.Authors.Append(pk)
|
||||
f.Tags.AppendTags(
|
||||
tag.New(
|
||||
[]byte{'#', 'd'}, split[2],
|
||||
),
|
||||
)
|
||||
res, err = sto.QueryEvents(c, f)
|
||||
if chk.E(err) {
|
||||
if err = Ok.Error(
|
||||
a, env, "failed to query for target event",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(res) < 1 {
|
||||
continue
|
||||
}
|
||||
var resTmp []*event.E
|
||||
for _, v := range res {
|
||||
if ev.CreatedAt.U64() >= v.CreatedAt.U64() {
|
||||
resTmp = append(resTmp, v)
|
||||
}
|
||||
}
|
||||
res = resTmp
|
||||
for _, target := range res {
|
||||
if target.Kind.K == kind.Deletion.K {
|
||||
if err = Ok.Error(
|
||||
a, env, "cannot delete delete event %s", ev.ID,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if target.CreatedAt.Int() > ev.CreatedAt.Int() {
|
||||
log.I.F(
|
||||
"not deleting\n%d%\nbecause delete event is older\n%d",
|
||||
target.CreatedAt.Int(), ev.CreatedAt.Int(),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(target.Pubkey, env.Pubkey) {
|
||||
if err = Ok.Error(
|
||||
a, env, "only author can delete event",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Create eventid.T from the target event ID bytes
|
||||
var eid *eventid.T
|
||||
eid, err = eventid.NewFromBytes(target.EventId().Bytes())
|
||||
if chk.E(err) {
|
||||
if err = Ok.Error(
|
||||
a, env, "failed to create event ID",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Use DeleteEvent to actually delete the target event
|
||||
if err = sto.DeleteEvent(c, eid); chk.E(err) {
|
||||
if err = Ok.Error(
|
||||
a, env, "failed to delete target event",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.I.F(
|
||||
"successfully deleted event %x",
|
||||
target.EventId().Bytes(),
|
||||
)
|
||||
}
|
||||
res = nil
|
||||
}
|
||||
// Send a success response after processing all deletions
|
||||
if err = Ok.Ok(a, env, ""); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Check if this event has been deleted before
|
||||
if ev.Kind.K != kind.Deletion.K {
|
||||
// Create a filter to check for deletion events that reference this
|
||||
// event ID
|
||||
f := filter.New()
|
||||
f.Kinds.K = []*kind.T{kind.Deletion}
|
||||
f.Tags.AppendTags(tag.New([]byte{'e'}, ev.ID))
|
||||
|
||||
// Query for deletion events
|
||||
var deletionEvents []*event.E
|
||||
deletionEvents, err = sto.QueryEvents(c, f)
|
||||
if err == nil && len(deletionEvents) > 0 {
|
||||
// Found deletion events for this ID, don't save it
|
||||
if err = Ok.Blocked(
|
||||
a, env,
|
||||
"the event was deleted, not storing it again",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
var reason []byte
|
||||
ok, reason = x.I.AddEvent(
|
||||
c, x.Relay(), ev, r, remote,
|
||||
)
|
||||
log.I.F("event %0x added %v %s", ev.ID, ok, reason)
|
||||
if !ok {
|
||||
if err = Ok.Error(
|
||||
a, env, err.Error(),
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
output = &EventOutput{"event accepted"}
|
||||
return
|
||||
},
|
||||
)
|
||||
}
|
||||
66
pkg/protocol/openapi/export.go
Normal file
66
pkg/protocol/openapi/export.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
"net/http"
|
||||
"orly.dev/pkg/app/relay/helpers"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
// ExportInput is the parameters for the HTTP API Export method.
|
||||
type ExportInput struct {
|
||||
Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"`
|
||||
}
|
||||
|
||||
// ExportOutput is the return value of Export. It usually will be line
|
||||
// structured JSON.
|
||||
type ExportOutput struct{ RawBody []byte }
|
||||
|
||||
// RegisterExport implements the Export HTTP API method.
|
||||
func (x *Operations) RegisterExport(api huma.API) {
|
||||
name := "Export"
|
||||
description := "Export all events (only works with NIP-98 capable client, will not work with UI)"
|
||||
path := x.path + "/export"
|
||||
scopes := []string{"admin", "read"}
|
||||
method := http.MethodGet
|
||||
huma.Register(
|
||||
api, huma.Operation{
|
||||
OperationID: name,
|
||||
Summary: name,
|
||||
Path: path,
|
||||
Method: method,
|
||||
Tags: []string{"admin"},
|
||||
Description: helpers.GenerateDescription(description, scopes),
|
||||
Security: []map[string][]string{{"auth": scopes}},
|
||||
}, func(ctx context.T, input *ExportInput) (
|
||||
resp *huma.StreamResponse, err error,
|
||||
) {
|
||||
r := ctx.Value("http-request").(*http.Request)
|
||||
remote := helpers.GetRemoteFromReq(r)
|
||||
log.I.F("processing export from %s", remote)
|
||||
authed, pubkey := x.AdminAuth(r, remote)
|
||||
if !authed {
|
||||
err = huma.Error401Unauthorized("Not Authorized")
|
||||
return
|
||||
}
|
||||
log.I.F(
|
||||
"%s export of event data requested on admin port pubkey %0x",
|
||||
remote, pubkey,
|
||||
)
|
||||
sto := x.Storage()
|
||||
resp = &huma.StreamResponse{
|
||||
Body: func(ctx huma.Context) {
|
||||
ctx.SetHeader("Content-Type", "application/nostr+jsonl")
|
||||
sto.Export(x.Context(), ctx.BodyWriter())
|
||||
if f, ok := ctx.BodyWriter().(http.Flusher); ok {
|
||||
f.Flush()
|
||||
} else {
|
||||
log.W.F("error: unable to flush")
|
||||
}
|
||||
},
|
||||
}
|
||||
return
|
||||
},
|
||||
)
|
||||
}
|
||||
57
pkg/protocol/openapi/huma.go
Normal file
57
pkg/protocol/openapi/huma.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
"github.com/danielgtaylor/huma/v2/adapters/humago"
|
||||
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
)
|
||||
|
||||
// ExposeMiddleware adds the http.Request and http.ResponseWriter to the context
|
||||
// for the Operations handler.
|
||||
func ExposeMiddleware(ctx huma.Context, next func(huma.Context)) {
|
||||
// Unwrap the request and response objects.
|
||||
r, w := humago.Unwrap(ctx)
|
||||
ctx = huma.WithValue(ctx, "http-request", r)
|
||||
ctx = huma.WithValue(ctx, "http-response", w)
|
||||
next(ctx)
|
||||
}
|
||||
|
||||
// NewHuma creates a new huma.API with a Scalar docs UI, and a middleware that allows methods to
|
||||
// access the http.Request and http.ResponseWriter.
|
||||
func NewHuma(
|
||||
router *servemux.S, name, version, description string,
|
||||
) (api huma.API) {
|
||||
config := huma.DefaultConfig(name, version)
|
||||
config.Info.Description = description
|
||||
config.DocsPath = ""
|
||||
config.OpenAPIPath = "/api/openapi"
|
||||
router.HandleFunc(
|
||||
"/api", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
w.Write(
|
||||
[]byte(`<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>realy HTTP API UI</title>
|
||||
<meta charset="utf-8" />
|
||||
<meta
|
||||
name="viewport"
|
||||
content="width=device-width, initial-scale=1" />
|
||||
</head>
|
||||
<body>
|
||||
<script
|
||||
id="api-reference"
|
||||
data-url="/api/openapi.json"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/@scalar/api-reference"></script>
|
||||
</body>
|
||||
</html>`),
|
||||
)
|
||||
},
|
||||
)
|
||||
api = humago.New(router, config)
|
||||
api.UseMiddleware(ExposeMiddleware)
|
||||
return
|
||||
}
|
||||
66
pkg/protocol/openapi/import.go
Normal file
66
pkg/protocol/openapi/import.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
"io"
|
||||
"net/http"
|
||||
"orly.dev/pkg/app/relay/helpers"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"orly.dev/pkg/utils/lol"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ImportInput is the parameters of an import operation, authentication and the
|
||||
// stream of line structured JSON events.
|
||||
type ImportInput struct {
|
||||
Auth string `header:"Authorization" doc:"nostr nip-98 token for authentication" required:"true"`
|
||||
}
|
||||
|
||||
// ImportOutput is nothing, basically; a 204 or 200 status is expected.
|
||||
type ImportOutput struct{}
|
||||
|
||||
// RegisterImport is the implementation of the Import operation.
|
||||
func (x *Operations) RegisterImport(api huma.API) {
|
||||
name := "Import"
|
||||
description := "Import events from line structured JSON (jsonl)"
|
||||
path := x.path + "/import"
|
||||
scopes := []string{"admin", "write"}
|
||||
method := http.MethodPost
|
||||
huma.Register(
|
||||
api, huma.Operation{
|
||||
OperationID: name,
|
||||
Summary: name,
|
||||
Path: path,
|
||||
Method: method,
|
||||
Tags: []string{"admin"},
|
||||
Description: helpers.GenerateDescription(description, scopes),
|
||||
Security: []map[string][]string{{"auth": scopes}},
|
||||
DefaultStatus: 204,
|
||||
}, func(ctx context.T, input *ImportInput) (
|
||||
output *ImportOutput, err error,
|
||||
) {
|
||||
lol.Tracer("Import", input)
|
||||
defer func() { lol.Tracer("end Import", output, err) }()
|
||||
r := ctx.Value("http-request").(*http.Request)
|
||||
remote := helpers.GetRemoteFromReq(r)
|
||||
authed, pubkey := x.AdminAuth(r, remote, 10*time.Minute)
|
||||
if !authed {
|
||||
err = huma.Error401Unauthorized(
|
||||
fmt.Sprintf("user %0x not authorized for action", pubkey),
|
||||
)
|
||||
return
|
||||
}
|
||||
sto := x.Storage()
|
||||
log.I.F(
|
||||
"import of event data requested on admin port from %s pubkey %0x",
|
||||
remote,
|
||||
pubkey,
|
||||
)
|
||||
read := io.LimitReader(r.Body, r.ContentLength)
|
||||
sto.Import(read)
|
||||
return
|
||||
},
|
||||
)
|
||||
}
|
||||
124
pkg/protocol/openapi/ok.go
Normal file
124
pkg/protocol/openapi/ok.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
"orly.dev/pkg/encoders/reason"
|
||||
"orly.dev/pkg/interfaces/eventId"
|
||||
)
|
||||
|
||||
// OK represents a function that processes events or operations, using provided
|
||||
// parameters to generate formatted messages and return errors if any issues
|
||||
// occur during processing.
|
||||
type OK func(
|
||||
a *Operations, env eventId.Ider, format string, params ...any,
|
||||
) (err error)
|
||||
|
||||
// OKs provides a collection of handler functions for managing different types
|
||||
// of operational outcomes, each corresponding to specific error or status
|
||||
// conditions such as authentication requirements, rate limiting, and invalid
|
||||
// inputs.
|
||||
type OKs struct {
|
||||
Ok OK
|
||||
AuthRequired OK
|
||||
PoW OK
|
||||
Duplicate OK
|
||||
Blocked OK
|
||||
RateLimited OK
|
||||
Invalid OK
|
||||
Error OK
|
||||
Unsupported OK
|
||||
Restricted OK
|
||||
}
|
||||
|
||||
// Ok provides a collection of handler functions for managing different types of
|
||||
// operational outcomes, each corresponding to specific error or status
|
||||
// conditions such as authentication requirements, rate limiting, and invalid
|
||||
// inputs.
|
||||
var Ok = OKs{
|
||||
Ok: func(
|
||||
a *Operations, eid eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return nil
|
||||
},
|
||||
AuthRequired: func(
|
||||
a *Operations, eid eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error401Unauthorized(
|
||||
string(
|
||||
reason.AuthRequired.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
PoW: func(
|
||||
a *Operations, _ eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error406NotAcceptable(
|
||||
string(
|
||||
reason.PoW.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
Duplicate: func(
|
||||
a *Operations, _ eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error422UnprocessableEntity(
|
||||
string(
|
||||
reason.Duplicate.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
Blocked: func(
|
||||
a *Operations, _ eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error406NotAcceptable(
|
||||
string(
|
||||
reason.Blocked.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
RateLimited: func(
|
||||
a *Operations, _ eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error400BadRequest(
|
||||
string(
|
||||
reason.RateLimited.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
Invalid: func(
|
||||
a *Operations, _ eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error422UnprocessableEntity(
|
||||
string(
|
||||
reason.Invalid.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
Error: func(
|
||||
a *Operations, _ eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error500InternalServerError(
|
||||
string(
|
||||
reason.Error.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
Unsupported: func(
|
||||
a *Operations, _ eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error400BadRequest(
|
||||
string(
|
||||
reason.Unsupported.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
Restricted: func(
|
||||
a *Operations, _ eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return huma.Error403Forbidden(
|
||||
string(
|
||||
reason.Restricted.F(format, params...),
|
||||
),
|
||||
)
|
||||
},
|
||||
}
|
||||
24
pkg/protocol/openapi/openapi.go
Normal file
24
pkg/protocol/openapi/openapi.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
|
||||
"orly.dev/pkg/interfaces/server"
|
||||
"orly.dev/pkg/protocol/servemux"
|
||||
)
|
||||
|
||||
type Operations struct {
|
||||
server.I
|
||||
path string
|
||||
*servemux.S
|
||||
}
|
||||
|
||||
// New creates a new openapi.Operations and registers its methods.
|
||||
func New(
|
||||
s server.I, name, version, description string, path string,
|
||||
sm *servemux.S,
|
||||
) {
|
||||
a := NewHuma(sm, name, version, description)
|
||||
huma.AutoRegister(a, &Operations{I: s, path: path})
|
||||
return
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (a *A) HandleAuth(b []byte, srv server.I) (msg []byte) {
|
||||
return reason.Error.F("auth response does not validate")
|
||||
} else {
|
||||
if err = okenvelope.NewFrom(
|
||||
env.Event.Id, true,
|
||||
env.Event.ID, true,
|
||||
).Write(a.Listener); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -71,6 +71,16 @@ func (a *A) HandleAuth(b []byte, srv server.I) (msg []byte) {
|
||||
env.Event.Pubkey,
|
||||
)
|
||||
a.Listener.SetAuthedPubkey(env.Event.Pubkey)
|
||||
// ev := a.Listener.GetPendingEvent()
|
||||
// if ev != nil {
|
||||
// var accepted bool
|
||||
// if accepted, msg = a.I.AddEvent(
|
||||
// context.Bg(), srv.Relay(), ev, a.Listener.Request,
|
||||
// a.Listener.RealRemote(),
|
||||
// ); accepted {
|
||||
// log.W.F("saved event %0x", ev.Id)
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
@@ -2,8 +2,12 @@ package socketapi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"orly.dev/pkg/crypto/sha256"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/noticeenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/eventid"
|
||||
@@ -16,6 +20,7 @@ import (
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/context"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// HandleEvent processes an incoming event by validating its signature, verifying
|
||||
@@ -47,9 +52,11 @@ func (a *A) HandleEvent(
|
||||
c context.T, req []byte, srv server.I,
|
||||
) (msg []byte) {
|
||||
|
||||
log.T.F("handleEvent %s %s", a.RealRemote(), req)
|
||||
var err error
|
||||
var ok bool
|
||||
log.T.F(
|
||||
"handleEvent %s %s authed: %0x", a.RealRemote(), req,
|
||||
a.Listener.AuthedPubkey(),
|
||||
)
|
||||
var rem []byte
|
||||
sto := srv.Storage()
|
||||
if sto == nil {
|
||||
@@ -63,22 +70,58 @@ func (a *A) HandleEvent(
|
||||
if len(rem) > 0 {
|
||||
log.I.F("extra '%s'", rem)
|
||||
}
|
||||
if !bytes.Equal(env.GetIDBytes(), env.E.Id) {
|
||||
if a.I.AuthRequired() && !a.Listener.IsAuthed() {
|
||||
log.I.F("requesting auth from client from %s", a.Listener.RealRemote())
|
||||
a.Listener.RequestAuth()
|
||||
if err = Ok.AuthRequired(a, env.E, "auth required"); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = authenvelope.NewChallengeWith(a.Listener.Challenge()).
|
||||
Write(a.Listener); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// send a notice in case the client renders it to explain why auth is required
|
||||
opks := a.I.OwnersPubkeys()
|
||||
var npubList string
|
||||
for i, pk := range opks {
|
||||
var npub []byte
|
||||
if npub, err = bech32encoding.BinToNpub(pk); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
npubList += string(npub)
|
||||
if i < len(opks)-1 {
|
||||
npubList += ", "
|
||||
}
|
||||
}
|
||||
if err = noticeenvelope.NewFrom("relay whitelists write access to users within the second degree of the social graph of " + npubList).Write(a.Listener); chk.E(err) {
|
||||
err = nil
|
||||
}
|
||||
// a.Listener.SetPendingEvent(env.E)
|
||||
return
|
||||
}
|
||||
calculatedId := env.E.GetIDBytes()
|
||||
if !bytes.Equal(calculatedId, env.E.ID) {
|
||||
if err = Ok.Invalid(
|
||||
a, env, "event id is computed incorrectly",
|
||||
a, env, "event id is computed incorrectly, "+
|
||||
"event has ID %0x, but when computed it is %0x",
|
||||
env.E.ID, calculatedId,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
var ok bool
|
||||
if ok, err = env.Verify(); chk.T(err) {
|
||||
if err = Ok.Error(
|
||||
a, env, "failed to verify signature",
|
||||
a, env, fmt.Sprintf(
|
||||
"failed to verify signature: %s",
|
||||
err.Error(),
|
||||
),
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
} else if !ok {
|
||||
if err = Ok.Error(
|
||||
if err = Ok.Invalid(
|
||||
a, env,
|
||||
"signature is invalid",
|
||||
); chk.E(err) {
|
||||
@@ -86,6 +129,36 @@ func (a *A) HandleEvent(
|
||||
}
|
||||
return
|
||||
}
|
||||
// check that relay policy allows this event
|
||||
accept, notice, _ := srv.AcceptEvent(
|
||||
c, env.E, a.Listener.Request, a.Listener.AuthedPubkey(),
|
||||
a.Listener.RealRemote(),
|
||||
)
|
||||
if !accept {
|
||||
if strings.Contains(notice, "auth") {
|
||||
if err = Ok.AuthRequired(
|
||||
a, env, notice,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
// check for protected tag (NIP-70)
|
||||
protectedTag := env.E.Tags.GetFirst(tag.New("-"))
|
||||
if protectedTag != nil && a.AuthRequired() {
|
||||
// check that the pubkey of the event matches the authed pubkey
|
||||
if !bytes.Equal(a.Listener.AuthedPubkey(), env.E.Pubkey) {
|
||||
if err = Ok.Blocked(
|
||||
a, env,
|
||||
"protected tag may only be published by client authed to the same pubkey",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// check and process delete
|
||||
if env.E.Kind.K == kind.Deletion.K {
|
||||
log.I.F("delete event\n%s", env.E.Serialize())
|
||||
for _, t := range env.Tags.ToSliceOfTags() {
|
||||
@@ -161,16 +234,6 @@ func (a *A) HandleEvent(
|
||||
if len(split) != 3 {
|
||||
continue
|
||||
}
|
||||
// Check if the deletion event is trying to delete itself
|
||||
if bytes.Equal(split[2], env.E.Id) {
|
||||
if err = Ok.Blocked(
|
||||
a, env,
|
||||
"deletion event cannot reference its own ID",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = hex.DecAppend(nil, split[1]); chk.E(err) {
|
||||
if err = Ok.Invalid(
|
||||
@@ -185,7 +248,8 @@ func (a *A) HandleEvent(
|
||||
kin := ints.New(uint16(0))
|
||||
if _, err = kin.Unmarshal(split[0]); chk.E(err) {
|
||||
if err = Ok.Invalid(
|
||||
a, env, "delete event a tag kind value invalid: %s",
|
||||
a, env, "delete event a tag kind value "+
|
||||
"invalid: %s",
|
||||
t.Value(),
|
||||
); chk.E(err) {
|
||||
return
|
||||
@@ -195,7 +259,8 @@ func (a *A) HandleEvent(
|
||||
kk := kind.New(kin.Uint16())
|
||||
if kk.Equal(kind.Deletion) {
|
||||
if err = Ok.Blocked(
|
||||
a, env, "delete event kind may not be deleted",
|
||||
a, env, "delete event kind may not be "+
|
||||
"deleted",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -204,7 +269,8 @@ func (a *A) HandleEvent(
|
||||
if !kk.IsParameterizedReplaceable() {
|
||||
if err = Ok.Error(
|
||||
a, env,
|
||||
"delete tags with a tags containing non-parameterized-replaceable events can't be processed",
|
||||
"delete tags with a tags containing "+
|
||||
"non-parameterized-replaceable events can't be processed",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -247,7 +313,7 @@ func (a *A) HandleEvent(
|
||||
for _, target := range res {
|
||||
if target.Kind.K == kind.Deletion.K {
|
||||
if err = Ok.Error(
|
||||
a, env, "cannot delete delete event %s", env.E.Id,
|
||||
a, env, "cannot delete delete event %s", env.E.ID,
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -297,9 +363,7 @@ func (a *A) HandleEvent(
|
||||
res = nil
|
||||
}
|
||||
// Send a success response after processing all deletions
|
||||
if err = okenvelope.NewFrom(
|
||||
env.E.Id, ok,
|
||||
).Write(a.Listener); chk.E(err) {
|
||||
if err = Ok.Ok(a, env, ""); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Check if this event has been deleted before
|
||||
@@ -308,7 +372,7 @@ func (a *A) HandleEvent(
|
||||
// event ID
|
||||
f := filter.New()
|
||||
f.Kinds.K = []*kind.T{kind.Deletion}
|
||||
f.Tags.AppendTags(tag.New([]byte{'e'}, env.E.Id))
|
||||
f.Tags.AppendTags(tag.New([]byte{'e'}, env.E.ID))
|
||||
|
||||
// Query for deletion events
|
||||
var deletionEvents []*event.E
|
||||
@@ -316,7 +380,7 @@ func (a *A) HandleEvent(
|
||||
if err == nil && len(deletionEvents) > 0 {
|
||||
// Found deletion events for this ID, don't save it
|
||||
if err = Ok.Blocked(
|
||||
a, env, "event was deleted, not storing it again",
|
||||
a, env, "the event was deleted, not storing it again",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -325,11 +389,9 @@ func (a *A) HandleEvent(
|
||||
}
|
||||
}
|
||||
var reason []byte
|
||||
ok, reason = srv.AddEvent(
|
||||
c, rl, env.E, a.Req(), a.RealRemote(), a.Listener.AuthedPubkey(),
|
||||
)
|
||||
log.I.F("event %0x added %v, %s", env.E.Id, ok, reason)
|
||||
if err = okenvelope.NewFrom(env.E.Id, ok).Write(a.Listener); chk.E(err) {
|
||||
ok, reason = srv.AddEvent(c, rl, env.E, a.Req(), a.RealRemote())
|
||||
log.I.F("event %0x added %v %s", env.E.ID, ok, reason)
|
||||
if err = okenvelope.NewFrom(env.E.ID, ok).Write(a.Listener); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
|
||||
@@ -25,8 +25,8 @@ import (
|
||||
// Processes the message by identifying its envelope type, routes it to the
|
||||
// corresponding handler method, generates a notice for errors or unknown types,
|
||||
// logs the notice, and writes it back to the listener if required.
|
||||
func (a *A) HandleMessage(msg []byte) {
|
||||
log.T.F("received message:\n%s", string(msg))
|
||||
func (a *A) HandleMessage(msg, authedPubkey []byte) {
|
||||
log.T.F("%s received message:\n%s", a.Listener.RealRemote(), string(msg))
|
||||
var notice []byte
|
||||
var err error
|
||||
var t string
|
||||
@@ -38,10 +38,7 @@ func (a *A) HandleMessage(msg []byte) {
|
||||
case eventenvelope.L:
|
||||
notice = a.HandleEvent(a.Context(), rem, a.I)
|
||||
case reqenvelope.L:
|
||||
notice = a.HandleReq(
|
||||
a.Context(), rem,
|
||||
a.I,
|
||||
)
|
||||
notice = a.HandleReq(a.Context(), rem, a.I)
|
||||
case closeenvelope.L:
|
||||
notice = a.HandleClose(rem, a.I)
|
||||
case authenvelope.L:
|
||||
|
||||
@@ -3,11 +3,15 @@ package socketapi
|
||||
import (
|
||||
"errors"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/closedenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/eoseenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/noticeenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/reason"
|
||||
"orly.dev/pkg/interfaces/server"
|
||||
"orly.dev/pkg/protocol/auth"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
@@ -43,10 +47,12 @@ import (
|
||||
// through the associated subscription or writes error messages to the listener.
|
||||
// If the subscription should be cancelled due to completed query results, it
|
||||
// generates and sends a closure envelope.
|
||||
func (a *A) HandleReq(
|
||||
c context.T, req []byte, srv server.I,
|
||||
) (r []byte) {
|
||||
func (a *A) HandleReq(c context.T, req []byte, srv server.I) (r []byte) {
|
||||
var err error
|
||||
log.I.F(
|
||||
"auth required %v client authed %v", a.I.AuthRequired(),
|
||||
a.Listener.IsAuthed(),
|
||||
)
|
||||
log.I.F("REQ:\n%s", req)
|
||||
sto := srv.Storage()
|
||||
var rem []byte
|
||||
@@ -57,6 +63,39 @@ func (a *A) HandleReq(
|
||||
if len(rem) > 0 {
|
||||
log.I.F("extra '%s'", rem)
|
||||
}
|
||||
if a.I.AuthRequired() && !a.Listener.IsAuthed() {
|
||||
log.I.F("requesting auth from client from %s", a.Listener.RealRemote())
|
||||
a.Listener.RequestAuth()
|
||||
if err = authenvelope.NewChallengeWith(a.Listener.Challenge()).
|
||||
Write(a.Listener); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = closedenvelope.NewFrom(
|
||||
env.Subscription, reason.AuthRequired.F("auth enabled"),
|
||||
).Write(a.Listener); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if !a.I.PublicReadable() {
|
||||
// send a notice in case the client renders it to explain why auth is required
|
||||
opks := a.I.OwnersPubkeys()
|
||||
var npubList string
|
||||
for i, pk := range opks {
|
||||
var npub []byte
|
||||
if npub, err = bech32encoding.BinToNpub(pk); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
npubList += string(npub)
|
||||
if i < len(opks)-1 {
|
||||
npubList += ", "
|
||||
}
|
||||
}
|
||||
if err = noticeenvelope.NewFrom("relay whitelists read access to users within the second degree of the social graph of " + npubList).Write(a.Listener); chk.E(err) {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
var accept bool
|
||||
allowed, accept, _ := srv.AcceptReq(
|
||||
c, a.Request, env.Filters, a.Listener.AuthedPubkey(),
|
||||
@@ -78,8 +117,7 @@ func (a *A) HandleReq(
|
||||
continue
|
||||
}
|
||||
}
|
||||
if events, err = sto.QueryEvents(c, f); chk.E(err) {
|
||||
log.E.F("eventstore: %v", err)
|
||||
if events, err = sto.QueryEvents(c, f); err != nil {
|
||||
if errors.Is(err, badger.ErrDBClosed) {
|
||||
return
|
||||
}
|
||||
@@ -89,10 +127,11 @@ func (a *A) HandleReq(
|
||||
if srv.AuthRequired() {
|
||||
var tmp event.S
|
||||
for _, ev := range events {
|
||||
if auth.CheckPrivilege(a.Listener.AuthedPubkey(), ev) {
|
||||
if !auth.CheckPrivilege(a.Listener.AuthedPubkey(), ev) {
|
||||
log.W.F(
|
||||
"not privileged %0x ev pubkey %0x",
|
||||
a.Listener.AuthedPubkey(), ev.Pubkey,
|
||||
"not privileged: client pubkey '%0x' event pubkey '%0x' kind %s privileged: %v",
|
||||
a.Listener.AuthedPubkey(), ev.Pubkey, ev.Kind.Name(),
|
||||
ev.Kind.IsPrivileged(),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ type OK func(a *A, env eventId.Ider, format string, params ...any) (err error)
|
||||
// conditions such as authentication requirements, rate limiting, and invalid
|
||||
// inputs.
|
||||
type OKs struct {
|
||||
Ok OK
|
||||
AuthRequired OK
|
||||
PoW OK
|
||||
Duplicate OK
|
||||
@@ -32,6 +33,13 @@ type OKs struct {
|
||||
// conditions such as authentication requirements, rate limiting, and invalid
|
||||
// inputs.
|
||||
var Ok = OKs{
|
||||
Ok: func(
|
||||
a *A, env eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
return okenvelope.NewFrom(
|
||||
env.Id(), true, nil,
|
||||
).Write(a.Listener)
|
||||
},
|
||||
AuthRequired: func(
|
||||
a *A, env eventId.Ider, format string, params ...any,
|
||||
) (err error) {
|
||||
|
||||
@@ -10,16 +10,11 @@ import (
|
||||
"orly.dev/pkg/protocol/ws"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/log"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const Type = "socketapi"
|
||||
|
||||
var (
|
||||
NIP20prefixmatcher = regexp.MustCompile(`^\w+: `)
|
||||
)
|
||||
|
||||
// Map is a map of filters associated with a collection of ws.Listener
|
||||
// connections.
|
||||
type Map map[*ws.Listener]map[string]*filters.T
|
||||
@@ -132,22 +127,28 @@ func (p *S) Receive(msg publisher.Message) {
|
||||
// applies authentication checks if required by the server, and skips delivery
|
||||
// for unauthenticated users when events are privileged.
|
||||
func (p *S) Deliver(ev *event.E) {
|
||||
log.T.F("delivering event %0x to subscribers", ev.Id)
|
||||
log.T.F("delivering event %0x to subscribers", ev.ID)
|
||||
var err error
|
||||
p.Mx.Lock()
|
||||
defer p.Mx.Unlock()
|
||||
for w, subs := range p.Map {
|
||||
log.I.F("%v %s", subs, w.RealRemote())
|
||||
// log.I.F("%v %s", subs, w.RealRemote())
|
||||
for id, subscriber := range subs {
|
||||
log.T.F(
|
||||
"subscriber %s\n%s", w.RealRemote(),
|
||||
subscriber.Marshal(nil),
|
||||
)
|
||||
// log.T.F(
|
||||
// "subscriber %s\n%s", w.RealRemote(),
|
||||
// subscriber.Marshal(nil),
|
||||
// )
|
||||
if !subscriber.Match(ev) {
|
||||
continue
|
||||
}
|
||||
if p.Server.AuthRequired() {
|
||||
if auth.CheckPrivilege(w.AuthedPubkey(), ev) {
|
||||
if !auth.CheckPrivilege(w.AuthedPubkey(), ev) {
|
||||
log.W.F(
|
||||
"not privileged %0x ev pubkey %0x ev pubkey %0x kind %s privileged: %v",
|
||||
w.AuthedPubkey(), ev.Pubkey,
|
||||
w.AuthedPubkey(), ev.Kind.Name(),
|
||||
ev.Kind.IsPrivileged(),
|
||||
)
|
||||
continue
|
||||
}
|
||||
var res *eventenvelope.Result
|
||||
@@ -157,7 +158,7 @@ func (p *S) Deliver(ev *event.E) {
|
||||
if err = res.Write(w); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
log.T.F("dispatched event %0x to subscription %s", ev.Id, id)
|
||||
log.T.F("dispatched event %0x to subscription %s", ev.ID, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,7 +85,7 @@ func (a *A) Serve(w http.ResponseWriter, r *http.Request, s server.I) {
|
||||
},
|
||||
)
|
||||
if a.I.AuthRequired() {
|
||||
log.T.F("requesting auth from client from %s", a.Listener.RealRemote())
|
||||
log.I.F("requesting auth from client from %s", a.Listener.RealRemote())
|
||||
a.Listener.RequestAuth()
|
||||
if err = authenvelope.NewChallengeWith(a.Listener.Challenge()).
|
||||
Write(a.Listener); chk.E(err) {
|
||||
@@ -117,10 +117,11 @@ func (a *A) Serve(w http.ResponseWriter, r *http.Request, s server.I) {
|
||||
websocket.CloseGoingAway,
|
||||
websocket.CloseNoStatusReceived,
|
||||
websocket.CloseAbnormalClosure,
|
||||
websocket.CloseProtocolError, // invalid UTF error?
|
||||
) {
|
||||
log.W.F(
|
||||
"unexpected close error from %s: %v",
|
||||
helpers.GetRemoteFromReq(r), err,
|
||||
"unexpected close error from %s: %v\n%s",
|
||||
helpers.GetRemoteFromReq(r), err, message,
|
||||
)
|
||||
}
|
||||
return
|
||||
@@ -132,6 +133,6 @@ func (a *A) Serve(w http.ResponseWriter, r *http.Request, s server.I) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
go a.HandleMessage(message)
|
||||
go a.HandleMessage(message, a.Listener.AuthedPubkey())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ type writeRequest struct {
|
||||
}
|
||||
|
||||
// NewRelay returns a new relay. The relay connection will be closed when the
|
||||
// context is canceled.
|
||||
// context is cancelled.
|
||||
func NewRelay(c context.T, url string, opts ...RelayOption) *Client {
|
||||
ctx, cancel := context.Cancel(c)
|
||||
r := &Client{
|
||||
@@ -234,9 +234,10 @@ func (r *Client) ConnectWithTLS(ctx context.T, tlsConfig *tls.Config) error {
|
||||
// general message reader loop
|
||||
go func() {
|
||||
buf := new(bytes.Buffer)
|
||||
var err error
|
||||
for {
|
||||
buf.Reset()
|
||||
if err := conn.ReadMessage(r.connectionContext, buf); chk.T(err) {
|
||||
if err = conn.ReadMessage(r.connectionContext, buf); err != nil {
|
||||
r.ConnectionError = err
|
||||
r.Close()
|
||||
break
|
||||
@@ -270,10 +271,12 @@ func (r *Client) ConnectWithTLS(ctx context.T, tlsConfig *tls.Config) error {
|
||||
}
|
||||
r.challenge = env.Challenge
|
||||
case eventenvelope.L:
|
||||
// log.I.F("message: %s", message)
|
||||
env := eventenvelope.NewResult()
|
||||
if env, message, err = eventenvelope.ParseResult(message); chk.E(err) {
|
||||
if env, message, err = eventenvelope.ParseResult(message); err != nil {
|
||||
continue
|
||||
}
|
||||
// log.I.F("%s", env.Event.Marshal(nil))
|
||||
if len(env.Subscription.T) == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -299,7 +302,7 @@ func (r *Client) ConnectWithTLS(ctx context.T, tlsConfig *tls.Config) error {
|
||||
if ok = r.signatureChecker(env.Event); !ok {
|
||||
log.E.F(
|
||||
"{%s} bad signature on %s\n", r.URL,
|
||||
env.Event.Id,
|
||||
env.Event.ID,
|
||||
)
|
||||
continue
|
||||
}
|
||||
@@ -497,12 +500,13 @@ func (r *Client) PrepareSubscription(
|
||||
return sub
|
||||
}
|
||||
|
||||
// QuerySync is only used in tests. The realy query method is synchronous now
|
||||
// QuerySync is only used in tests. The relay query method is synchronous now
|
||||
// anyway (it ensures sort order is respected).
|
||||
func (r *Client) QuerySync(
|
||||
ctx context.T, f *filter.F,
|
||||
opts ...SubscriptionOption,
|
||||
) ([]*event.E, error) {
|
||||
// log.T.F("QuerySync:\n%s", f.Marshal(nil))
|
||||
sub, err := r.Subscribe(ctx, filters.New(f), opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -73,7 +73,7 @@ func TestPublish(t *testing.T) {
|
||||
// send back an ok nip-20 command result
|
||||
var res []byte
|
||||
if res = okenvelope.NewFrom(
|
||||
textNote.Id, true, nil,
|
||||
textNote.ID, true, nil,
|
||||
).Marshal(res); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -121,7 +121,7 @@ func TestPublishBlocked(t *testing.T) {
|
||||
// send back a not ok nip-20 command result
|
||||
var res []byte
|
||||
if res = okenvelope.NewFrom(
|
||||
textNote.Id, false,
|
||||
textNote.ID, false,
|
||||
normalize.Msg(normalize.Blocked, "no reason"),
|
||||
).Marshal(res); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
@@ -129,7 +129,7 @@ func TestPublishBlocked(t *testing.T) {
|
||||
if err := websocket.Message.Send(conn, res); chk.T(err) {
|
||||
t.Errorf("websocket.JSON.Send: %v", err)
|
||||
}
|
||||
// res := []any{"OK", textNote.Id, false, "blocked"}
|
||||
// res := []any{"OK", textNote.ID, false, "blocked"}
|
||||
chk.E(websocket.JSON.Send(conn, res))
|
||||
},
|
||||
)
|
||||
@@ -256,7 +256,9 @@ func newWebsocketServer(handler func(*websocket.Conn)) (server *httptest.Server)
|
||||
// anyOriginHandshake is an alternative to default in golang.org/x/net/websocket
|
||||
// which checks for origin. nostr client sends no origin and it makes no difference
|
||||
// for the tests here anyway.
|
||||
var anyOriginHandshake = func(conf *websocket.Config, r *http.Request) (err error) {
|
||||
var anyOriginHandshake = func(
|
||||
conf *websocket.Config, r *http.Request,
|
||||
) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"github.com/gobwas/httphead"
|
||||
"github.com/gobwas/ws"
|
||||
"github.com/gobwas/ws/wsflate"
|
||||
@@ -170,7 +171,7 @@ func (cn *Connection) ReadMessage(c context.T, buf io.Writer) (err error) {
|
||||
h, err := cn.reader.NextFrame()
|
||||
if err != nil {
|
||||
cn.conn.Close()
|
||||
return errorf.E(
|
||||
return fmt.Errorf(
|
||||
"%s failed to advance frame: %s",
|
||||
cn.conn.RemoteAddr(),
|
||||
err.Error(),
|
||||
|
||||
@@ -4,6 +4,7 @@ package ws
|
||||
import (
|
||||
"net/http"
|
||||
"orly.dev/pkg/app/relay/helpers"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/protocol/auth"
|
||||
atomic2 "orly.dev/pkg/utils/atomic"
|
||||
"strings"
|
||||
@@ -22,6 +23,7 @@ type Listener struct {
|
||||
authRequested atomic2.Bool
|
||||
isAuthed atomic2.Bool
|
||||
challenge atomic2.Bytes
|
||||
pendingEvent *event.E
|
||||
}
|
||||
|
||||
// NewListener creates a new Listener for listening for inbound connections for
|
||||
@@ -33,6 +35,7 @@ func NewListener(
|
||||
ws.setRemoteFromReq(req)
|
||||
if authRequired {
|
||||
ws.SetChallenge(auth.GenerateChallenge())
|
||||
ws.SetAuthedPubkey(nil)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -115,3 +118,13 @@ func (ws *Listener) AuthRequested() (read bool) {
|
||||
func (ws *Listener) RequestAuth() {
|
||||
ws.authRequested.Store(true)
|
||||
}
|
||||
|
||||
func (ws *Listener) SetPendingEvent(ev *event.E) {
|
||||
ws.pendingEvent = ev
|
||||
}
|
||||
|
||||
func (ws *Listener) GetPendingEvent() (ev *event.E) {
|
||||
ev = ws.pendingEvent
|
||||
ws.pendingEvent = nil
|
||||
return
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func (_ WithLabel) IsSubscriptionOption() {}
|
||||
|
||||
var _ SubscriptionOption = (WithLabel)("")
|
||||
|
||||
// GetID return the Nostr subscription Id as given to the Client it is a
|
||||
// GetID return the Nostr subscription ID as given to the Client it is a
|
||||
// concatenation of the label and a serial number.
|
||||
func (sub *Subscription) GetID() (id *subscription.Id) {
|
||||
var err error
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.1.1
|
||||
v0.2.19
|
||||
Reference in New Issue
Block a user