now can submit new event via JWT Bearer auth

This commit is contained in:
2025-03-11 13:59:52 -01:06
parent 7a1a013140
commit b417513dee
15 changed files with 674 additions and 268 deletions

View File

@@ -174,7 +174,8 @@ func Post(filePath string, ur *url.URL, bearer string) (err error) {
Host: ur.Host, Host: ur.Host,
} }
r.Header.Add("User-Agent", userAgent) r.Header.Add("User-Agent", userAgent)
r.Header.Add("Authorization", "Authorization "+bearer) r.Header.Add("Authorization", "Bearer "+bearer)
r.Header.Add("Accept", "application/nostr+json")
r.GetBody = func() (rc io.ReadCloser, err error) { r.GetBody = func() (rc io.ReadCloser, err error) {
rc = payload rc = payload
return return

View File

@@ -298,8 +298,7 @@ func (f *T) Unmarshal(b []byte) (r []byte, err error) {
goto invalid goto invalid
} }
var ff [][]byte var ff [][]byte
if ff, r, err = text.UnmarshalHexArray(r, if ff, r, err = text.UnmarshalHexArray(r, sha256.Size); chk.E(err) {
sha256.Size); chk.E(err) {
return return
} }
f.IDs = tag.New(ff...) f.IDs = tag.New(ff...)

View File

@@ -21,7 +21,7 @@ func TestMakeNIP98Request_ValidateNIP98Request(t *testing.T) {
// // } // // }
// var pk []byte // var pk []byte
// var valid bool // var valid bool
// if valid, pk, err = ValidateRequest(r, nil); chk.E(err) { // if valid, pk, err = CheckAuth(r, nil); chk.E(err) {
// t.Fatal(err) // t.Fatal(err)
// } // }
// if !valid { // if !valid {

View File

@@ -11,14 +11,14 @@ import (
"realy.lol/tag" "realy.lol/tag"
) )
// ValidateRequest verifies a received http.Request has got a valid // CheckAuth verifies a received http.Request has got a valid
// authentication event or token in it, and provides the public key that should be // authentication event or token in it, and provides the public key that should be
// verified to be authorized to access the resource associated with the request. // verified to be authorized to access the resource associated with the request.
// //
// A VerifyJWTFunc should be provided in order to search the event store for a // A VerifyJWTFunc should be provided in order to search the event store for a
// kind 13004 with a JWT signer pubkey that is granted authority for the request. // kind 13004 with a JWT signer pubkey that is granted authority for the request.
func ValidateRequest(r *http.Request, vfn VerifyJWTFunc) (valid bool, pubkey []byte, err error) { func CheckAuth(r *http.Request, vfn VerifyJWTFunc) (valid bool, pubkey []byte, err error) {
log.I.F("validating nip-98") log.I.F("validating auth %v", vfn)
val := r.Header.Get(HeaderKey) val := r.Header.Get(HeaderKey)
if val == "" { if val == "" {
err = errorf.E("'%s' key missing from request header", HeaderKey) err = errorf.E("'%s' key missing from request header", HeaderKey)
@@ -113,7 +113,7 @@ func ValidateRequest(r *http.Request, vfn VerifyJWTFunc) (valid bool, pubkey []b
} }
pubkey = ev.PubKey pubkey = ev.PubKey
case strings.HasPrefix(val, JWTPrefix): case strings.HasPrefix(val, JWTPrefix):
if vfn != nil { if vfn == nil {
err = errorf.E("JWT bearer header found but no JWT verifier function provided") err = errorf.E("JWT bearer header found but no JWT verifier function provided")
return return
} }

281
ratel/fetchByIds.go Normal file
View File

@@ -0,0 +1,281 @@
package ratel
import (
"bytes"
"errors"
"sort"
"strconv"
"time"
"github.com/dgraph-io/badger/v4"
"realy.lol/context"
"realy.lol/event"
"realy.lol/eventid"
"realy.lol/hex"
"realy.lol/ratel/keys/id"
"realy.lol/ratel/keys/serial"
"realy.lol/ratel/prefixes"
"realy.lol/sha256"
"realy.lol/tag"
"realy.lol/timestamp"
)
type idQuery struct {
index int
searchPrefix []byte
start []byte
}
func (r *T) FetchByIds(c context.T, ids [][]byte) (evs event.Ts, err error) {
var queries []idQuery
if queries, err = PrepareFetchByIdsQuery(ids); chk.E(err) {
return
}
eventKeys := make(map[string]struct{})
for _, q := range queries {
select {
case <-r.Ctx.Done():
return
case <-c.Done():
return
default:
}
// first, search for the event keys for fetching them next
err = r.View(func(txn *badger.Txn) (err error) {
// iterate only through keys and in reverse order
opts := badger.IteratorOptions{
Reverse: true,
}
it := txn.NewIterator(opts)
defer it.Close()
for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() {
select {
case <-r.Ctx.Done():
return
case <-c.Done():
return
default:
}
item := it.Item()
k := item.KeyCopy(nil)
ser := serial.FromKey(k)
idx := prefixes.Event.Key(ser)
eventKeys[string(idx)] = struct{}{}
}
return
})
if chk.E(err) {
// this means shutdown, probably
if errors.Is(err, badger.ErrDBClosed) {
return
}
}
log.T.F("found %d event indexes from %d queries", len(eventKeys), len(queries))
}
select {
case <-r.Ctx.Done():
return
case <-c.Done():
return
default:
}
// if events were found that should be deleted, delete them
var delEvs [][]byte
defer func() {
for _, d := range delEvs {
chk.E(r.DeleteEvent(r.Ctx, eventid.NewWith(d)))
}
}()
evMap := make(map[string]*event.T)
accessed := make(map[string]struct{})
for ek := range eventKeys {
eventKey := []byte(ek)
var done bool
// retrieve the event matching the event index key
err = r.View(func(txn *badger.Txn) (err error) {
select {
case <-r.Ctx.Done():
return
case <-c.Done():
return
default:
}
opts := badger.IteratorOptions{Reverse: true}
it := txn.NewIterator(opts)
defer it.Close()
for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() {
item := it.Item()
if r.HasL2 && item.ValueSize() == sha256.Size {
// this is a stub entry that indicates an L2 needs to be accessed for
// it, so we populate only the event.T.ID and return the result, the
// caller will expect this as a signal to query the L2 event store.
var eventValue []byte
ev := &event.T{}
if eventValue, err = item.ValueCopy(nil); chk.E(err) {
continue
}
log.T.F("found event stub %0x must seek in L2", eventValue)
ev.ID = eventValue
select {
case <-c.Done():
return
case <-r.Ctx.Done():
log.T.Ln("backend context canceled")
return
default:
}
evMap[hex.Enc(ev.ID)] = ev
return
}
ev := &event.T{}
if err = item.Value(func(eventValue []byte) (err error) {
var rem []byte
if rem, err = r.Unmarshal(ev, eventValue); chk.E(err) {
return
}
if len(rem) > 0 {
log.T.S(rem)
}
// check for expiration timestamps and note the event ID for deletion before
// *not* delivering them
if et := ev.Tags.GetFirst(tag.New("expiration")); et != nil {
var exp uint64
if exp, err = strconv.ParseUint(string(et.Value()), 10, 64); chk.E(err) {
return
}
if int64(exp) > time.Now().Unix() {
// this needs to be deleted
delEvs = append(delEvs, ev.ID)
return
}
}
// check if this event is replaced by one we already have in the result.
if ev.Kind.IsReplaceable() {
for i, evc := range evMap {
// replaceable means there should be only the newest for the
// pubkey and kind.
if bytes.Equal(ev.PubKey, evc.PubKey) && ev.Kind.Equal(evc.Kind) {
if ev.CreatedAt.I64() > evc.CreatedAt.I64() {
// replace the event, it is newer
delete(evMap, i)
break
} else {
// we won't add it to the results slice
eventValue = eventValue[:0]
ev = nil
return
}
}
}
} else if ev.Kind.IsParameterizedReplaceable() &&
ev.Tags.GetFirst(tag.New("d")) != nil {
for i, evc := range evMap {
// parameterized replaceable means there should only be the
// newest for a pubkey, kind and the value field of the `d` tag.
if ev.Kind.Equal(evc.Kind) && bytes.Equal(ev.PubKey, evc.PubKey) &&
bytes.Equal(ev.Tags.GetFirst(tag.New("d")).Value(),
evc.Tags.GetFirst(tag.New("d")).Value()) {
if ev.CreatedAt.I64() > evc.CreatedAt.I64() {
log.T.F("event %0x,%s\n->replaces\n%0x,%s",
ev.ID, ev.Serialize(),
evc.ID, evc.Serialize(),
)
// replace the event, it is newer
delete(evMap, i)
break
} else {
// we won't add it to the results slice
eventValue = eventValue[:0]
ev = nil
return
}
}
}
}
return
}); chk.E(err) {
continue
}
if ev == nil {
continue
}
evMap[hex.Enc(ev.ID)] = ev
// add event counter key to accessed
ser := serial.FromKey(eventKey)
accessed[string(ser.Val)] = struct{}{}
}
return
})
if err != nil {
// this means shutdown, probably
if errors.Is(err, badger.ErrDBClosed) {
return
}
}
if done {
err = nil
return
}
select {
case <-r.Ctx.Done():
return
case <-c.Done():
return
default:
}
}
if len(evMap) > 0 {
for i := range evMap {
if len(evMap[i].PubKey) == 0 {
log.I.S(evMap[i])
continue
}
evs = append(evs, evMap[i])
}
sort.Sort(event.Descending(evs))
// bump the access times on all retrieved events. do this in a goroutine so the
// user's events are delivered immediately
go func() {
for ser := range accessed {
seri := serial.New([]byte(ser))
now := timestamp.Now()
err = r.Update(func(txn *badger.Txn) (err error) {
key := GetCounterKey(seri)
it := txn.NewIterator(badger.IteratorOptions{})
defer it.Close()
if it.Seek(key); it.ValidForPrefix(key) {
// update access record
if err = txn.Set(key, now.Bytes()); chk.E(err) {
return
}
}
// log.T.Ln("last access for", seri.Uint64(), now.U64())
return nil
})
}
}()
}
return
}
// PrepareFetchByIdsQuery is extracted from PrepareQueries to cover just the
// search for a provided list of event IDs as a slice of bytes arrays.
func PrepareFetchByIdsQuery(ids [][]byte) (qs []idQuery, err error) {
qs = make([]idQuery, len(ids))
for i, idHex := range ids {
ih := id.New(eventid.NewWith([]byte(idHex)))
if ih == nil {
log.E.F("failed to decode event ID: %s", idHex)
// just ignore it, clients will be clients
continue
}
prf := prefixes.Id.Key(ih)
// log.T.F("id prefix to search on %0x from key %0x", prf, ih.Val)
qs[i] = idQuery{
index: i,
searchPrefix: prf,
}
}
return
}

View File

@@ -40,6 +40,7 @@ func PrepareQueries(f *filter.T) (
) { ) {
if f == nil { if f == nil {
err = errorf.E("filter cannot be nil") err = errorf.E("filter cannot be nil")
return
} }
switch { switch {
// first if there is IDs, just search for them, this overrides all other filters // first if there is IDs, just search for them, this overrides all other filters

View File

@@ -27,7 +27,7 @@ func (s *Server) addEvent(c context.T, rl relay.I, ev *event.T,
wrap := &wrapper.Relay{I: sto} wrap := &wrapper.Relay{I: sto}
advancedSaver, _ := sto.(relay.AdvancedSaver) advancedSaver, _ := sto.(relay.AdvancedSaver)
// don't allow storing event with protected marker as per nip-70 with auth enabled. // don't allow storing event with protected marker as per nip-70 with auth enabled.
if s.authRequired && ev.Tags.ContainsProtectedMarker() { if s.authRequired || !s.publicReadable && ev.Tags.ContainsProtectedMarker() {
if len(authedPubkey) == 0 || !bytes.Equal(ev.PubKey, authedPubkey) { if len(authedPubkey) == 0 || !bytes.Equal(ev.PubKey, authedPubkey) {
return false, return false,
[]byte(fmt.Sprintf("event with relay marker tag '-' (nip-70 protected event) "+ []byte(fmt.Sprintf("event with relay marker tag '-' (nip-70 protected event) "+

27
realy/getremotefromreq.go Normal file
View File

@@ -0,0 +1,27 @@
package realy
import (
"net/http"
"strings"
)
func GetRemoteFromReq(r *http.Request) (rr string) {
// reverse proxy should populate this field so we see the remote not the proxy
rr = r.Header.Get("X-Forwarded-For")
if rr != "" {
splitted := strings.Split(rr, " ")
if len(splitted) == 1 {
rr = splitted[0]
}
if len(splitted) == 2 {
rr = splitted[1]
}
// in case upstream doesn't set this or we are directly listening instead of
// via reverse proxy or just if the header field is missing, put the
// connection remote address into the websocket state data.
if rr == "" {
rr = r.RemoteAddr
}
}
return
}

View File

@@ -48,7 +48,7 @@ func (s *Server) handleEvent(c context.T, ws *web.Socket, req []byte, sto store.
if !ws.AuthRequested() { if !ws.AuthRequested() {
ws.RequestAuth() ws.RequestAuth()
if err = okenvelope.NewFrom(env.ID, false, if err = okenvelope.NewFrom(env.ID, false,
normalize.AuthRequired.F("auth required for request processing")).Write(ws); chk.T(err) { normalize.AuthRequired.F("auth required for storing events")).Write(ws); chk.T(err) {
} }
log.I.F("requesting auth from client %s", ws.RealRemote()) log.I.F("requesting auth from client %s", ws.RealRemote())
if err = authenvelope.NewChallengeWith(ws.Challenge()).Write(ws); chk.T(err) { if err = authenvelope.NewChallengeWith(ws.Challenge()).Write(ws); chk.T(err) {
@@ -122,12 +122,14 @@ func (s *Server) handleEvent(c context.T, ws *web.Socket, req []byte, sto store.
normalize.Blocked.F("not processing or storing delete event containing delete event references")).Write(ws); chk.E(err) { normalize.Blocked.F("not processing or storing delete event containing delete event references")).Write(ws); chk.E(err) {
return return
} }
return
} }
if !bytes.Equal(res[i].PubKey, env.T.PubKey) { if !bytes.Equal(res[i].PubKey, env.T.PubKey) {
if err = okenvelope.NewFrom(env.ID, false, if err = okenvelope.NewFrom(env.ID, false,
normalize.Blocked.F("cannot delete other users' events (delete by e tag)")).Write(ws); chk.E(err) { normalize.Blocked.F("cannot delete other users' events (delete by e tag)")).Write(ws); chk.E(err) {
return return
} }
return
} }
} }
case bytes.Equal(t.Key(), []byte("a")): case bytes.Equal(t.Key(), []byte("a")):
@@ -159,12 +161,14 @@ func (s *Server) handleEvent(c context.T, ws *web.Socket, req []byte, sto store.
normalize.Blocked.F("delete event kind may not be deleted")).Write(ws); chk.E(err) { normalize.Blocked.F("delete event kind may not be deleted")).Write(ws); chk.E(err) {
return return
} }
return
} }
if !kk.IsParameterizedReplaceable() { if !kk.IsParameterizedReplaceable() {
if err = okenvelope.NewFrom(env.ID, false, if err = okenvelope.NewFrom(env.ID, false,
normalize.Error.F("delete tags with a tags containing non-parameterized-replaceable events cannot be processed")).Write(ws); chk.E(err) { normalize.Error.F("delete tags with a tags containing non-parameterized-replaceable events cannot be processed")).Write(ws); chk.E(err) {
return return
} }
return
} }
if !bytes.Equal(pk, env.T.PubKey) { if !bytes.Equal(pk, env.T.PubKey) {
log.I.S(pk, env.T.PubKey, env.T) log.I.S(pk, env.T.PubKey, env.T)
@@ -172,6 +176,7 @@ func (s *Server) handleEvent(c context.T, ws *web.Socket, req []byte, sto store.
normalize.Blocked.F("cannot delete other users' events (delete by a tag)")).Write(ws); chk.E(err) { normalize.Blocked.F("cannot delete other users' events (delete by a tag)")).Write(ws); chk.E(err) {
return return
} }
return
} }
f := filter.New() f := filter.New()
f.Kinds.K = []*kind.T{kk} f.Kinds.K = []*kind.T{kk}

View File

@@ -48,7 +48,7 @@ func (s *Server) auth(r *http.Request) (authed bool) {
var valid bool var valid bool
var pubkey []byte var pubkey []byte
var err error var err error
if valid, pubkey, err = httpauth.ValidateRequest(r, s.JWTVerifyFunc); chk.E(err) { if valid, pubkey, err = httpauth.CheckAuth(r, s.JWTVerifyFunc); chk.E(err) {
return return
} }
if !valid { if !valid {
@@ -77,6 +77,7 @@ func (s *Server) HandleHTTP(h Handler) {
"application/nostr+json": { "application/nostr+json": {
"/relayinfo": s.handleRelayInfo, "/relayinfo": s.handleRelayInfo,
"/event": s.handleSimpleEvent, "/event": s.handleSimpleEvent,
"/events": s.handleEvents,
}, },
"": { "": {
"/export": s.exportHandler, "/export": s.exportHandler,

View File

@@ -1,255 +0,0 @@
package realy
import (
"bytes"
"io"
"net/http"
"strings"
"realy.lol/context"
"realy.lol/envelopes/okenvelope"
"realy.lol/event"
"realy.lol/filter"
"realy.lol/hex"
"realy.lol/httpauth"
"realy.lol/ints"
"realy.lol/kind"
"realy.lol/normalize"
"realy.lol/relay"
"realy.lol/sha256"
"realy.lol/tag"
)
func GetRemoteFromReq(r *http.Request) (rr string) {
// reverse proxy should populate this field so we see the remote not the proxy
rr = r.Header.Get("X-Forwarded-For")
if rr != "" {
splitted := strings.Split(rr, " ")
if len(splitted) == 1 {
rr = splitted[0]
}
if len(splitted) == 2 {
rr = splitted[1]
}
// in case upstream doesn't set this or we are directly listening instead of
// via reverse proxy or just if the header field is missing, put the
// connection remote address into the websocket state data.
if rr == "" {
rr = r.RemoteAddr
}
}
return
}
func (s *Server) handleSimpleEvent(h Handler) {
log.I.F("event")
var err error
var ok bool
sto := s.relay.Storage()
var req []byte
if req, err = io.ReadAll(h.Request.Body); chk.E(err) {
return
}
advancedDeleter, _ := sto.(relay.AdvancedDeleter)
ev := event.New()
if req, err = ev.Unmarshal(req); chk.T(err) {
return
}
var valid bool
var pubkey []byte
if valid, pubkey, err = httpauth.ValidateRequest(h.Request, s.JWTVerifyFunc); chk.E(err) {
return
}
log.I.F("valid request %0x", pubkey)
if !valid {
return
}
rr := GetRemoteFromReq(h.Request)
c := context.Bg()
rw := h.ResponseWriter
accept, notice, after := s.relay.AcceptEvent(c, ev, h.Request, rr, pubkey)
if !accept {
if strings.Contains(notice, "mute") {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Blocked.F(notice)).Write(rw); chk.T(err) {
}
return
}
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Invalid.F(notice)).Write(rw); chk.T(err) {
}
return
}
if !bytes.Equal(ev.GetIDBytes(), ev.ID) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Invalid.F("event id is computed incorrectly")).Write(rw); chk.E(err) {
return
}
return
}
if ok, err = ev.Verify(); chk.T(err) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Error.F("failed to verify signature")).Write(rw); chk.E(err) {
return
}
} else if !ok {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Error.F("signature is invalid")).Write(rw); chk.E(err) {
return
}
return
}
storage := s.relay.Storage()
if storage == nil {
panic("no event store has been set to store event")
}
if ev.Kind.K == kind.Deletion.K {
log.I.F("delete event\n%s", ev.Serialize())
for _, t := range ev.Tags.Value() {
var res []*event.T
if t.Len() >= 2 {
switch {
case bytes.Equal(t.Key(), []byte("e")):
evId := make([]byte, sha256.Size)
if _, err = hex.DecBytes(evId, t.Value()); chk.E(err) {
continue
}
res, err = storage.QueryEvents(c, &filter.T{IDs: tag.New(evId)})
if err != nil {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Error.F("failed to query for target event")).Write(rw); chk.E(err) {
return
}
return
}
for i := range res {
if res[i].Kind.Equal(kind.Deletion) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Blocked.F("not processing or storing delete event containing delete event references")).Write(rw); chk.E(err) {
return
}
}
if !bytes.Equal(res[i].PubKey, ev.PubKey) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Blocked.F("cannot delete other users' events (delete by e tag)")).Write(rw); chk.E(err) {
return
}
}
}
case bytes.Equal(t.Key(), []byte("a")):
split := bytes.Split(t.Value(), []byte{':'})
if len(split) != 3 {
continue
}
var pk []byte
if pk, err = hex.DecAppend(nil, split[1]); chk.E(err) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Invalid.F("delete event a tag pubkey value invalid: %s",
t.Value())).Write(rw); chk.E(err) {
return
}
return
}
kin := ints.New(uint16(0))
if _, err = kin.Unmarshal(split[0]); chk.E(err) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Invalid.F("delete event a tag kind value invalid: %s",
t.Value())).Write(rw); chk.E(err) {
return
}
return
}
kk := kind.New(kin.Uint16())
if kk.Equal(kind.Deletion) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Blocked.F("delete event kind may not be deleted")).Write(rw); chk.E(err) {
return
}
}
if !kk.IsParameterizedReplaceable() {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Error.F("delete tags with a tags containing non-parameterized-replaceable events cannot be processed")).Write(rw); chk.E(err) {
return
}
}
if !bytes.Equal(pk, ev.PubKey) {
log.I.S(pk, ev.PubKey, ev)
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Blocked.F("cannot delete other users' events (delete by a tag)")).Write(rw); chk.E(err) {
return
}
}
f := filter.New()
f.Kinds.K = []*kind.T{kk}
f.Authors.Append(pk)
f.Tags.AppendTags(tag.New([]byte{'#', 'd'}, split[2]))
res, err = storage.QueryEvents(c, f)
if err != nil {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Error.F("failed to query for target event")).Write(rw); chk.E(err) {
return
}
return
}
}
}
if len(res) < 1 {
continue
}
var resTmp []*event.T
for _, v := range res {
if ev.CreatedAt.U64() >= v.CreatedAt.U64() {
resTmp = append(resTmp, v)
}
}
res = resTmp
for _, target := range res {
if target.Kind.K == kind.Deletion.K {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Error.F("cannot delete delete event %s",
ev.ID)).Write(rw); chk.E(err) {
return
}
}
if target.CreatedAt.Int() > ev.CreatedAt.Int() {
log.I.F("not deleting\n%d%\nbecause delete event is older\n%d",
target.CreatedAt.Int(), ev.CreatedAt.Int())
continue
}
if !bytes.Equal(target.PubKey, ev.PubKey) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Error.F("only author can delete event")).Write(rw); chk.E(err) {
return
}
return
}
if advancedDeleter != nil {
advancedDeleter.BeforeDelete(c, t.Value(), ev.PubKey)
}
if err = sto.DeleteEvent(c, target.EventID()); chk.T(err) {
if err = okenvelope.NewFrom(ev.ID, false,
normalize.Error.F(err.Error())).Write(rw); chk.E(err) {
return
}
return
}
if advancedDeleter != nil {
advancedDeleter.AfterDelete(t.Value(), ev.PubKey)
}
}
res = nil
}
if err = okenvelope.NewFrom(ev.ID, true).Write(rw); chk.E(err) {
return
}
}
var reason []byte
ok, reason = s.addEvent(c, s.relay, ev, h.Request, rr, pubkey)
if err = okenvelope.NewFrom(ev.ID, ok, reason).Write(rw); chk.E(err) {
return
}
if after != nil {
after()
}
return
}

View File

@@ -0,0 +1,127 @@
package realy
import (
"bytes"
"io"
"net/http"
"sort"
"realy.lol/context"
"realy.lol/ec/schnorr"
"realy.lol/event"
"realy.lol/httpauth"
"realy.lol/relay"
"realy.lol/sha256"
"realy.lol/store"
"realy.lol/tag"
"realy.lol/text"
)
// the handleEvents HTTP endpoint accepts an array containing a list of hex
// encoded event IDs in JSON form, eg
//
// ["<hex>",...]
//
// and either returns a line structured JSON containing one event per line of
// the results, or an OK,false,"reason:..." message
//
// the relay should not inform the client if it has excluded events due to lack
// of required authentication for privileged events, the /api endpoint will have
// a list of event kinds that require auth and if the events contain this and
// auth was not made and if made, does not match a pubkey in the relevant events
// it is simply not returned.
func (s *Server) handleEvents(h Handler) {
log.I.F("events")
var fetcher store.FetchByIds
var ok bool
var err error
if fetcher, ok = s.relay.(store.FetchByIds); ok {
var pubkey []byte
if _, pubkey, err = httpauth.CheckAuth(h.Request, s.JWTVerifyFunc); chk.E(err) {
return
}
// if auth is enabled, and either required or not set to public readable, and
// the client did not auth with an Authorization header, send a HTTP
// Unauthorized status response.
var auther relay.Authenticator
if auther, ok = s.relay.(relay.Authenticator); ok &&
auther.AuthEnabled() &&
(s.authRequired || !s.publicReadable) &&
len(pubkey) < 1 {
http.Error(h.ResponseWriter,
"Authentication required for method", http.StatusUnauthorized)
return
}
var req []byte
if req, err = io.ReadAll(h.Request.Body); chk.E(err) {
return
}
// unmarshal the request
var t [][]byte
if t, req, err = text.UnmarshalHexArray(req, sha256.Size); chk.E(err) {
return
}
if len(req) > 0 {
log.I.S("extra bytes after hex array:\n%s", req)
}
var evs event.Ts
if evs, err = fetcher.FetchByIds(context.Bg(), t); chk.E(err) {
return
}
// filter out privileged kinds if there is no authed pubkey, auth is enabled but
// the relay is public readable.
if auther, ok = s.relay.(relay.Authenticator); ok &&
auther.AuthEnabled() && s.publicReadable {
var evTmp event.Ts
if len(pubkey) < 1 {
// if not authed, remove all privileged event kinds (user must auth if they want
// their own DMs
for _, ev := range evs {
if !ev.Kind.IsPrivileged() {
evTmp = append(evTmp, ev)
}
}
evs = evTmp
} else if len(pubkey) == schnorr.PubKeyBytesLen {
// if authed, filter out any privileged kinds that don't also contain the authed
// pubkey in either author or p tags
for _, ev := range evs {
if !ev.Kind.IsPrivileged() {
evTmp = append(evTmp, ev)
} else {
var containsPubkey bool
if ev.Tags != nil {
containsPubkey = ev.Tags.ContainsAny([]byte{'p'}, tag.New(pubkey))
}
if !bytes.Equal(ev.PubKey, pubkey) || containsPubkey {
log.I.F("authed user %0x not privileged to receive event\n%s",
pubkey, ev.Serialize())
} else {
// authed pubkey matches either author pubkey or is tagged in privileged event
evTmp = append(evTmp, ev)
}
}
}
evs = evTmp
}
}
// sort in descending order (reverse chronological order)
sort.Slice(evs, func(i, j int) bool {
return evs[i].CreatedAt.Int() > evs[j].CreatedAt.Int()
})
for _, ev := range evs {
if _, err = h.ResponseWriter.Write(ev.Marshal(nil)); chk.E(err) {
return
}
// results are jsonl format, one line per event
if _, err = h.ResponseWriter.Write([]byte{'\n'}); chk.E(err) {
return
}
}
http.Error(h.ResponseWriter, "", http.StatusOK)
} else {
http.Error(h.ResponseWriter, "Method not implemented", NI)
}
return
}

View File

@@ -0,0 +1,215 @@
package realy
import (
"bytes"
"fmt"
"io"
"net/http"
"realy.lol/context"
"realy.lol/event"
"realy.lol/filter"
"realy.lol/hex"
"realy.lol/httpauth"
"realy.lol/ints"
"realy.lol/kind"
"realy.lol/relay"
"realy.lol/sha256"
"realy.lol/tag"
)
const (
NA = http.StatusNotAcceptable
NI = http.StatusNotImplemented
ERR = http.StatusInternalServerError
)
func (s *Server) handleSimpleEvent(h Handler) {
log.I.F("event")
var err error
var ok bool
sto := s.relay.Storage()
var req []byte
if req, err = io.ReadAll(h.Request.Body); chk.E(err) {
return
}
advancedDeleter, _ := sto.(relay.AdvancedDeleter)
ev := event.New()
if req, err = ev.Unmarshal(req); chk.T(err) {
return
}
var valid bool
var pubkey []byte
if valid, pubkey, err = httpauth.CheckAuth(h.Request, s.JWTVerifyFunc); chk.E(err) {
return
}
rw := h.ResponseWriter
if !valid {
http.Error(rw,
fmt.Sprintf("invalid: %s", err.Error()), NA)
}
rr := GetRemoteFromReq(h.Request)
c := context.Bg()
accept, notice, after := s.relay.AcceptEvent(c, ev, h.Request, rr, pubkey)
if !accept {
http.Error(rw, notice, NA)
return
}
if !bytes.Equal(ev.GetIDBytes(), ev.ID) {
http.Error(rw,
"Event id is computed incorrectly", NA)
return
}
if ok, err = ev.Verify(); chk.T(err) {
http.Error(rw,
"failed to verify signature", NA)
return
} else if !ok {
http.Error(rw,
"signature is invalid", NA)
return
}
storage := s.relay.Storage()
if storage == nil {
panic("no event store has been set to store event")
}
if ev.Kind.K == kind.Deletion.K {
log.I.F("delete event\n%s", ev.Serialize())
for _, t := range ev.Tags.Value() {
var res []*event.T
if t.Len() >= 2 {
switch {
case bytes.Equal(t.Key(), []byte("e")):
evId := make([]byte, sha256.Size)
if _, err = hex.DecBytes(evId, t.Value()); chk.E(err) {
continue
}
res, err = storage.QueryEvents(c, &filter.T{IDs: tag.New(evId)})
if err != nil {
http.Error(rw,
err.Error(), ERR)
return
}
for i := range res {
if res[i].Kind.Equal(kind.Deletion) {
http.Error(rw,
"not processing or storing delete event containing delete event references",
NA)
}
if !bytes.Equal(res[i].PubKey, ev.PubKey) {
http.Error(rw,
"cannot delete other users' events (delete by e tag)",
NA)
return
}
}
case bytes.Equal(t.Key(), []byte("a")):
split := bytes.Split(t.Value(), []byte{':'})
if len(split) != 3 {
continue
}
var pk []byte
if pk, err = hex.DecAppend(nil, split[1]); chk.E(err) {
http.Error(rw,
fmt.Sprintf("delete event a tag pubkey value invalid: %s",
t.Value()), NA)
return
}
kin := ints.New(uint16(0))
if _, err = kin.Unmarshal(split[0]); chk.E(err) {
http.Error(rw,
fmt.Sprintf("delete event a tag kind value invalid: %s",
t.Value()), NA)
return
}
kk := kind.New(kin.Uint16())
if kk.Equal(kind.Deletion) {
http.Error(rw,
"delete event kind may not be deleted",
NA)
return
}
if !kk.IsParameterizedReplaceable() {
http.Error(rw,
"delete tags with a tags containing non-parameterized-replaceable events cannot be processed",
NA)
return
}
if !bytes.Equal(pk, ev.PubKey) {
log.I.S(pk, ev.PubKey, ev)
http.Error(rw,
"cannot delete other users' events (delete by a tag)",
NA)
return
}
f := filter.New()
f.Kinds.K = []*kind.T{kk}
f.Authors.Append(pk)
f.Tags.AppendTags(tag.New([]byte{'#', 'd'}, split[2]))
res, err = storage.QueryEvents(c, f)
if err != nil {
http.Error(rw, err.Error(), ERR)
return
}
}
}
if len(res) < 1 {
continue
}
var resTmp []*event.T
for _, v := range res {
if ev.CreatedAt.U64() >= v.CreatedAt.U64() {
resTmp = append(resTmp, v)
}
}
res = resTmp
for _, target := range res {
if target.Kind.K == kind.Deletion.K {
http.Error(rw,
fmt.Sprintf("cannot delete delete event %s",
ev.ID), NA)
return
}
if target.CreatedAt.Int() > ev.CreatedAt.Int() {
// todo: shouldn't this be an error?
log.I.F("not deleting\n%d%\nbecause delete event is older\n%d",
target.CreatedAt.Int(), ev.CreatedAt.Int())
continue
}
if !bytes.Equal(target.PubKey, ev.PubKey) {
http.Error(rw,
"only author can delete event",
NA)
return
}
if advancedDeleter != nil {
advancedDeleter.BeforeDelete(c, t.Value(), ev.PubKey)
}
if err = sto.DeleteEvent(c, target.EventID()); chk.T(err) {
http.Error(rw,
err.Error(), ERR)
return
}
if advancedDeleter != nil {
advancedDeleter.AfterDelete(t.Value(), ev.PubKey)
}
}
res = nil
}
http.Error(rw, "", http.StatusOK)
return
}
var reason []byte
ok, reason = s.addEvent(c, s.relay, ev, h.Request, rr, pubkey)
// return the response whether true or false and any reason if false
if ok {
http.Error(rw, "", http.StatusOK)
} else {
http.Error(rw, string(reason), ERR)
}
if after != nil {
// do this in the background and let the http response close
go after()
}
return
}

View File

@@ -33,8 +33,8 @@ type I interface {
// messages, that are not on the mute list, that do not yet have a reply, should accept // messages, that are not on the mute list, that do not yet have a reply, should accept
// direct and group message events until there is three and thereafter will be restricted // direct and group message events until there is three and thereafter will be restricted
// until the user adds them to their follow list. // until the user adds them to their follow list.
AcceptEvent(c context.T, ev *event.T, hr *http.Request, origin string, authedPubkey []byte) (accept bool, AcceptEvent(c context.T, ev *event.T, hr *http.Request, origin string,
notice string, afterSave func()) authedPubkey []byte) (accept bool, notice string, afterSave func())
// Storage returns the realy storage implementation. // Storage returns the realy storage implementation.
Storage() store.I Storage() store.I
// NoLimiter returns true if the provided npub should not be rate limited. // NoLimiter returns true if the provided npub should not be rate limited.

View File

@@ -49,6 +49,10 @@ type Querent interface {
QueryEvents(c context.T, f *filter.T) (evs event.Ts, err error) QueryEvents(c context.T, f *filter.T) (evs event.Ts, err error)
} }
type FetchByIds interface {
FetchByIds(c context.T, ids [][]byte) (evs event.Ts, err error)
}
type Counter interface { type Counter interface {
// CountEvents performs the same work as QueryEvents but instead of delivering // CountEvents performs the same work as QueryEvents but instead of delivering
// the events that were found it just returns the count of events // the events that were found it just returns the count of events