diff --git a/cmd/jurl/main.go b/cmd/jurl/main.go index 4ea87c8..bbe97c9 100644 --- a/cmd/jurl/main.go +++ b/cmd/jurl/main.go @@ -174,7 +174,8 @@ func Post(filePath string, ur *url.URL, bearer string) (err error) { Host: ur.Host, } r.Header.Add("User-Agent", userAgent) - r.Header.Add("Authorization", "Authorization "+bearer) + r.Header.Add("Authorization", "Bearer "+bearer) + r.Header.Add("Accept", "application/nostr+json") r.GetBody = func() (rc io.ReadCloser, err error) { rc = payload return diff --git a/filter/filter.go b/filter/filter.go index 3b0ffaa..f146b91 100644 --- a/filter/filter.go +++ b/filter/filter.go @@ -298,8 +298,7 @@ func (f *T) Unmarshal(b []byte) (r []byte, err error) { goto invalid } var ff [][]byte - if ff, r, err = text.UnmarshalHexArray(r, - sha256.Size); chk.E(err) { + if ff, r, err = text.UnmarshalHexArray(r, sha256.Size); chk.E(err) { return } f.IDs = tag.New(ff...) diff --git a/httpauth/nip98auth_test.go b/httpauth/nip98auth_test.go index 920e2c2..044574e 100644 --- a/httpauth/nip98auth_test.go +++ b/httpauth/nip98auth_test.go @@ -21,7 +21,7 @@ func TestMakeNIP98Request_ValidateNIP98Request(t *testing.T) { // // } // var pk []byte // var valid bool - // if valid, pk, err = ValidateRequest(r, nil); chk.E(err) { + // if valid, pk, err = CheckAuth(r, nil); chk.E(err) { // t.Fatal(err) // } // if !valid { diff --git a/httpauth/validate.go b/httpauth/validate.go index eba5321..f3a9236 100644 --- a/httpauth/validate.go +++ b/httpauth/validate.go @@ -11,14 +11,14 @@ import ( "realy.lol/tag" ) -// ValidateRequest verifies a received http.Request has got a valid +// CheckAuth verifies a received http.Request has got a valid // authentication event or token in it, and provides the public key that should be // verified to be authorized to access the resource associated with the request. // // A VerifyJWTFunc should be provided in order to search the event store for a // kind 13004 with a JWT signer pubkey that is granted authority for the request. -func ValidateRequest(r *http.Request, vfn VerifyJWTFunc) (valid bool, pubkey []byte, err error) { - log.I.F("validating nip-98") +func CheckAuth(r *http.Request, vfn VerifyJWTFunc) (valid bool, pubkey []byte, err error) { + log.I.F("validating auth %v", vfn) val := r.Header.Get(HeaderKey) if val == "" { err = errorf.E("'%s' key missing from request header", HeaderKey) @@ -113,7 +113,7 @@ func ValidateRequest(r *http.Request, vfn VerifyJWTFunc) (valid bool, pubkey []b } pubkey = ev.PubKey case strings.HasPrefix(val, JWTPrefix): - if vfn != nil { + if vfn == nil { err = errorf.E("JWT bearer header found but no JWT verifier function provided") return } diff --git a/ratel/fetchByIds.go b/ratel/fetchByIds.go new file mode 100644 index 0000000..18c89c8 --- /dev/null +++ b/ratel/fetchByIds.go @@ -0,0 +1,281 @@ +package ratel + +import ( + "bytes" + "errors" + "sort" + "strconv" + "time" + + "github.com/dgraph-io/badger/v4" + + "realy.lol/context" + "realy.lol/event" + "realy.lol/eventid" + "realy.lol/hex" + "realy.lol/ratel/keys/id" + "realy.lol/ratel/keys/serial" + "realy.lol/ratel/prefixes" + "realy.lol/sha256" + "realy.lol/tag" + "realy.lol/timestamp" +) + +type idQuery struct { + index int + searchPrefix []byte + start []byte +} + +func (r *T) FetchByIds(c context.T, ids [][]byte) (evs event.Ts, err error) { + var queries []idQuery + if queries, err = PrepareFetchByIdsQuery(ids); chk.E(err) { + return + } + eventKeys := make(map[string]struct{}) + for _, q := range queries { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + // first, search for the event keys for fetching them next + err = r.View(func(txn *badger.Txn) (err error) { + // iterate only through keys and in reverse order + opts := badger.IteratorOptions{ + Reverse: true, + } + it := txn.NewIterator(opts) + defer it.Close() + for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + item := it.Item() + k := item.KeyCopy(nil) + ser := serial.FromKey(k) + idx := prefixes.Event.Key(ser) + eventKeys[string(idx)] = struct{}{} + } + return + }) + if chk.E(err) { + // this means shutdown, probably + if errors.Is(err, badger.ErrDBClosed) { + return + } + } + log.T.F("found %d event indexes from %d queries", len(eventKeys), len(queries)) + } + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + // if events were found that should be deleted, delete them + var delEvs [][]byte + defer func() { + for _, d := range delEvs { + chk.E(r.DeleteEvent(r.Ctx, eventid.NewWith(d))) + } + }() + evMap := make(map[string]*event.T) + accessed := make(map[string]struct{}) + for ek := range eventKeys { + eventKey := []byte(ek) + var done bool + // retrieve the event matching the event index key + err = r.View(func(txn *badger.Txn) (err error) { + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + opts := badger.IteratorOptions{Reverse: true} + it := txn.NewIterator(opts) + defer it.Close() + for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() { + item := it.Item() + if r.HasL2 && item.ValueSize() == sha256.Size { + // this is a stub entry that indicates an L2 needs to be accessed for + // it, so we populate only the event.T.ID and return the result, the + // caller will expect this as a signal to query the L2 event store. + var eventValue []byte + ev := &event.T{} + if eventValue, err = item.ValueCopy(nil); chk.E(err) { + continue + } + log.T.F("found event stub %0x must seek in L2", eventValue) + ev.ID = eventValue + select { + case <-c.Done(): + return + case <-r.Ctx.Done(): + log.T.Ln("backend context canceled") + return + default: + } + evMap[hex.Enc(ev.ID)] = ev + return + } + ev := &event.T{} + if err = item.Value(func(eventValue []byte) (err error) { + var rem []byte + if rem, err = r.Unmarshal(ev, eventValue); chk.E(err) { + return + } + if len(rem) > 0 { + log.T.S(rem) + } + // check for expiration timestamps and note the event ID for deletion before + // *not* delivering them + if et := ev.Tags.GetFirst(tag.New("expiration")); et != nil { + var exp uint64 + if exp, err = strconv.ParseUint(string(et.Value()), 10, 64); chk.E(err) { + return + } + if int64(exp) > time.Now().Unix() { + // this needs to be deleted + delEvs = append(delEvs, ev.ID) + return + } + } + // check if this event is replaced by one we already have in the result. + if ev.Kind.IsReplaceable() { + for i, evc := range evMap { + // replaceable means there should be only the newest for the + // pubkey and kind. + if bytes.Equal(ev.PubKey, evc.PubKey) && ev.Kind.Equal(evc.Kind) { + if ev.CreatedAt.I64() > evc.CreatedAt.I64() { + // replace the event, it is newer + delete(evMap, i) + break + } else { + // we won't add it to the results slice + eventValue = eventValue[:0] + ev = nil + return + } + } + } + } else if ev.Kind.IsParameterizedReplaceable() && + ev.Tags.GetFirst(tag.New("d")) != nil { + for i, evc := range evMap { + // parameterized replaceable means there should only be the + // newest for a pubkey, kind and the value field of the `d` tag. + if ev.Kind.Equal(evc.Kind) && bytes.Equal(ev.PubKey, evc.PubKey) && + bytes.Equal(ev.Tags.GetFirst(tag.New("d")).Value(), + evc.Tags.GetFirst(tag.New("d")).Value()) { + if ev.CreatedAt.I64() > evc.CreatedAt.I64() { + log.T.F("event %0x,%s\n->replaces\n%0x,%s", + ev.ID, ev.Serialize(), + evc.ID, evc.Serialize(), + ) + // replace the event, it is newer + delete(evMap, i) + break + } else { + // we won't add it to the results slice + eventValue = eventValue[:0] + ev = nil + return + } + } + } + } + return + }); chk.E(err) { + continue + } + if ev == nil { + continue + } + evMap[hex.Enc(ev.ID)] = ev + // add event counter key to accessed + ser := serial.FromKey(eventKey) + accessed[string(ser.Val)] = struct{}{} + } + return + }) + if err != nil { + // this means shutdown, probably + if errors.Is(err, badger.ErrDBClosed) { + return + } + } + if done { + err = nil + return + } + select { + case <-r.Ctx.Done(): + return + case <-c.Done(): + return + default: + } + } + if len(evMap) > 0 { + for i := range evMap { + if len(evMap[i].PubKey) == 0 { + log.I.S(evMap[i]) + continue + } + evs = append(evs, evMap[i]) + } + sort.Sort(event.Descending(evs)) + // bump the access times on all retrieved events. do this in a goroutine so the + // user's events are delivered immediately + go func() { + for ser := range accessed { + seri := serial.New([]byte(ser)) + now := timestamp.Now() + err = r.Update(func(txn *badger.Txn) (err error) { + key := GetCounterKey(seri) + it := txn.NewIterator(badger.IteratorOptions{}) + defer it.Close() + if it.Seek(key); it.ValidForPrefix(key) { + // update access record + if err = txn.Set(key, now.Bytes()); chk.E(err) { + return + } + } + // log.T.Ln("last access for", seri.Uint64(), now.U64()) + return nil + }) + } + }() + } + return +} + +// PrepareFetchByIdsQuery is extracted from PrepareQueries to cover just the +// search for a provided list of event IDs as a slice of bytes arrays. +func PrepareFetchByIdsQuery(ids [][]byte) (qs []idQuery, err error) { + qs = make([]idQuery, len(ids)) + for i, idHex := range ids { + ih := id.New(eventid.NewWith([]byte(idHex))) + if ih == nil { + log.E.F("failed to decode event ID: %s", idHex) + // just ignore it, clients will be clients + continue + } + prf := prefixes.Id.Key(ih) + // log.T.F("id prefix to search on %0x from key %0x", prf, ih.Val) + qs[i] = idQuery{ + index: i, + searchPrefix: prf, + } + } + return +} diff --git a/ratel/preparequeries.go b/ratel/preparequeries.go index c4dc6dd..7962605 100644 --- a/ratel/preparequeries.go +++ b/ratel/preparequeries.go @@ -40,6 +40,7 @@ func PrepareQueries(f *filter.T) ( ) { if f == nil { err = errorf.E("filter cannot be nil") + return } switch { // first if there is IDs, just search for them, this overrides all other filters diff --git a/realy/addEvent.go b/realy/addEvent.go index b7eb23e..2a8a54a 100644 --- a/realy/addEvent.go +++ b/realy/addEvent.go @@ -27,7 +27,7 @@ func (s *Server) addEvent(c context.T, rl relay.I, ev *event.T, wrap := &wrapper.Relay{I: sto} advancedSaver, _ := sto.(relay.AdvancedSaver) // don't allow storing event with protected marker as per nip-70 with auth enabled. - if s.authRequired && ev.Tags.ContainsProtectedMarker() { + if s.authRequired || !s.publicReadable && ev.Tags.ContainsProtectedMarker() { if len(authedPubkey) == 0 || !bytes.Equal(ev.PubKey, authedPubkey) { return false, []byte(fmt.Sprintf("event with relay marker tag '-' (nip-70 protected event) "+ diff --git a/realy/getremotefromreq.go b/realy/getremotefromreq.go new file mode 100644 index 0000000..c8c6b1b --- /dev/null +++ b/realy/getremotefromreq.go @@ -0,0 +1,27 @@ +package realy + +import ( + "net/http" + "strings" +) + +func GetRemoteFromReq(r *http.Request) (rr string) { + // reverse proxy should populate this field so we see the remote not the proxy + rr = r.Header.Get("X-Forwarded-For") + if rr != "" { + splitted := strings.Split(rr, " ") + if len(splitted) == 1 { + rr = splitted[0] + } + if len(splitted) == 2 { + rr = splitted[1] + } + // in case upstream doesn't set this or we are directly listening instead of + // via reverse proxy or just if the header field is missing, put the + // connection remote address into the websocket state data. + if rr == "" { + rr = r.RemoteAddr + } + } + return +} diff --git a/realy/handleEvent.go b/realy/handleEvent.go index 9a0e4cb..0b78580 100644 --- a/realy/handleEvent.go +++ b/realy/handleEvent.go @@ -48,7 +48,7 @@ func (s *Server) handleEvent(c context.T, ws *web.Socket, req []byte, sto store. if !ws.AuthRequested() { ws.RequestAuth() if err = okenvelope.NewFrom(env.ID, false, - normalize.AuthRequired.F("auth required for request processing")).Write(ws); chk.T(err) { + normalize.AuthRequired.F("auth required for storing events")).Write(ws); chk.T(err) { } log.I.F("requesting auth from client %s", ws.RealRemote()) if err = authenvelope.NewChallengeWith(ws.Challenge()).Write(ws); chk.T(err) { @@ -122,12 +122,14 @@ func (s *Server) handleEvent(c context.T, ws *web.Socket, req []byte, sto store. normalize.Blocked.F("not processing or storing delete event containing delete event references")).Write(ws); chk.E(err) { return } + return } if !bytes.Equal(res[i].PubKey, env.T.PubKey) { if err = okenvelope.NewFrom(env.ID, false, normalize.Blocked.F("cannot delete other users' events (delete by e tag)")).Write(ws); chk.E(err) { return } + return } } case bytes.Equal(t.Key(), []byte("a")): @@ -159,12 +161,14 @@ func (s *Server) handleEvent(c context.T, ws *web.Socket, req []byte, sto store. normalize.Blocked.F("delete event kind may not be deleted")).Write(ws); chk.E(err) { return } + return } if !kk.IsParameterizedReplaceable() { if err = okenvelope.NewFrom(env.ID, false, normalize.Error.F("delete tags with a tags containing non-parameterized-replaceable events cannot be processed")).Write(ws); chk.E(err) { return } + return } if !bytes.Equal(pk, env.T.PubKey) { log.I.S(pk, env.T.PubKey, env.T) @@ -172,6 +176,7 @@ func (s *Server) handleEvent(c context.T, ws *web.Socket, req []byte, sto store. normalize.Blocked.F("cannot delete other users' events (delete by a tag)")).Write(ws); chk.E(err) { return } + return } f := filter.New() f.Kinds.K = []*kind.T{kk} diff --git a/realy/handleHTTP.go b/realy/handleHTTP.go index a715c9e..7671fbf 100644 --- a/realy/handleHTTP.go +++ b/realy/handleHTTP.go @@ -48,7 +48,7 @@ func (s *Server) auth(r *http.Request) (authed bool) { var valid bool var pubkey []byte var err error - if valid, pubkey, err = httpauth.ValidateRequest(r, s.JWTVerifyFunc); chk.E(err) { + if valid, pubkey, err = httpauth.CheckAuth(r, s.JWTVerifyFunc); chk.E(err) { return } if !valid { @@ -77,6 +77,7 @@ func (s *Server) HandleHTTP(h Handler) { "application/nostr+json": { "/relayinfo": s.handleRelayInfo, "/event": s.handleSimpleEvent, + "/events": s.handleEvents, }, "": { "/export": s.exportHandler, diff --git a/realy/nostrJSONhandleEvent.go b/realy/nostrJSONhandleEvent.go deleted file mode 100644 index d58ad12..0000000 --- a/realy/nostrJSONhandleEvent.go +++ /dev/null @@ -1,255 +0,0 @@ -package realy - -import ( - "bytes" - "io" - "net/http" - "strings" - - "realy.lol/context" - "realy.lol/envelopes/okenvelope" - "realy.lol/event" - "realy.lol/filter" - "realy.lol/hex" - "realy.lol/httpauth" - "realy.lol/ints" - "realy.lol/kind" - "realy.lol/normalize" - "realy.lol/relay" - "realy.lol/sha256" - "realy.lol/tag" -) - -func GetRemoteFromReq(r *http.Request) (rr string) { - // reverse proxy should populate this field so we see the remote not the proxy - rr = r.Header.Get("X-Forwarded-For") - if rr != "" { - splitted := strings.Split(rr, " ") - if len(splitted) == 1 { - rr = splitted[0] - } - if len(splitted) == 2 { - rr = splitted[1] - } - // in case upstream doesn't set this or we are directly listening instead of - // via reverse proxy or just if the header field is missing, put the - // connection remote address into the websocket state data. - if rr == "" { - rr = r.RemoteAddr - } - } - return -} - -func (s *Server) handleSimpleEvent(h Handler) { - log.I.F("event") - var err error - var ok bool - sto := s.relay.Storage() - var req []byte - if req, err = io.ReadAll(h.Request.Body); chk.E(err) { - return - } - advancedDeleter, _ := sto.(relay.AdvancedDeleter) - ev := event.New() - if req, err = ev.Unmarshal(req); chk.T(err) { - return - } - var valid bool - var pubkey []byte - if valid, pubkey, err = httpauth.ValidateRequest(h.Request, s.JWTVerifyFunc); chk.E(err) { - return - } - log.I.F("valid request %0x", pubkey) - if !valid { - return - } - rr := GetRemoteFromReq(h.Request) - c := context.Bg() - rw := h.ResponseWriter - accept, notice, after := s.relay.AcceptEvent(c, ev, h.Request, rr, pubkey) - if !accept { - if strings.Contains(notice, "mute") { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Blocked.F(notice)).Write(rw); chk.T(err) { - } - return - } - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Invalid.F(notice)).Write(rw); chk.T(err) { - } - return - } - if !bytes.Equal(ev.GetIDBytes(), ev.ID) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Invalid.F("event id is computed incorrectly")).Write(rw); chk.E(err) { - return - } - return - } - if ok, err = ev.Verify(); chk.T(err) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Error.F("failed to verify signature")).Write(rw); chk.E(err) { - return - } - } else if !ok { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Error.F("signature is invalid")).Write(rw); chk.E(err) { - return - } - return - } - storage := s.relay.Storage() - if storage == nil { - panic("no event store has been set to store event") - } - if ev.Kind.K == kind.Deletion.K { - log.I.F("delete event\n%s", ev.Serialize()) - for _, t := range ev.Tags.Value() { - var res []*event.T - if t.Len() >= 2 { - switch { - case bytes.Equal(t.Key(), []byte("e")): - evId := make([]byte, sha256.Size) - if _, err = hex.DecBytes(evId, t.Value()); chk.E(err) { - continue - } - res, err = storage.QueryEvents(c, &filter.T{IDs: tag.New(evId)}) - if err != nil { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Error.F("failed to query for target event")).Write(rw); chk.E(err) { - return - } - return - } - for i := range res { - if res[i].Kind.Equal(kind.Deletion) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Blocked.F("not processing or storing delete event containing delete event references")).Write(rw); chk.E(err) { - return - } - } - if !bytes.Equal(res[i].PubKey, ev.PubKey) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Blocked.F("cannot delete other users' events (delete by e tag)")).Write(rw); chk.E(err) { - return - } - } - } - case bytes.Equal(t.Key(), []byte("a")): - split := bytes.Split(t.Value(), []byte{':'}) - if len(split) != 3 { - continue - } - var pk []byte - if pk, err = hex.DecAppend(nil, split[1]); chk.E(err) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Invalid.F("delete event a tag pubkey value invalid: %s", - t.Value())).Write(rw); chk.E(err) { - return - } - return - } - kin := ints.New(uint16(0)) - if _, err = kin.Unmarshal(split[0]); chk.E(err) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Invalid.F("delete event a tag kind value invalid: %s", - t.Value())).Write(rw); chk.E(err) { - return - } - return - } - kk := kind.New(kin.Uint16()) - if kk.Equal(kind.Deletion) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Blocked.F("delete event kind may not be deleted")).Write(rw); chk.E(err) { - return - } - } - if !kk.IsParameterizedReplaceable() { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Error.F("delete tags with a tags containing non-parameterized-replaceable events cannot be processed")).Write(rw); chk.E(err) { - return - } - } - if !bytes.Equal(pk, ev.PubKey) { - log.I.S(pk, ev.PubKey, ev) - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Blocked.F("cannot delete other users' events (delete by a tag)")).Write(rw); chk.E(err) { - return - } - } - f := filter.New() - f.Kinds.K = []*kind.T{kk} - f.Authors.Append(pk) - f.Tags.AppendTags(tag.New([]byte{'#', 'd'}, split[2])) - res, err = storage.QueryEvents(c, f) - if err != nil { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Error.F("failed to query for target event")).Write(rw); chk.E(err) { - return - } - return - } - } - } - if len(res) < 1 { - continue - } - var resTmp []*event.T - for _, v := range res { - if ev.CreatedAt.U64() >= v.CreatedAt.U64() { - resTmp = append(resTmp, v) - } - } - res = resTmp - for _, target := range res { - if target.Kind.K == kind.Deletion.K { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Error.F("cannot delete delete event %s", - ev.ID)).Write(rw); chk.E(err) { - return - } - } - if target.CreatedAt.Int() > ev.CreatedAt.Int() { - log.I.F("not deleting\n%d%\nbecause delete event is older\n%d", - target.CreatedAt.Int(), ev.CreatedAt.Int()) - continue - } - if !bytes.Equal(target.PubKey, ev.PubKey) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Error.F("only author can delete event")).Write(rw); chk.E(err) { - return - } - return - } - if advancedDeleter != nil { - advancedDeleter.BeforeDelete(c, t.Value(), ev.PubKey) - } - if err = sto.DeleteEvent(c, target.EventID()); chk.T(err) { - if err = okenvelope.NewFrom(ev.ID, false, - normalize.Error.F(err.Error())).Write(rw); chk.E(err) { - return - } - return - } - if advancedDeleter != nil { - advancedDeleter.AfterDelete(t.Value(), ev.PubKey) - } - } - res = nil - } - if err = okenvelope.NewFrom(ev.ID, true).Write(rw); chk.E(err) { - return - } - } - var reason []byte - ok, reason = s.addEvent(c, s.relay, ev, h.Request, rr, pubkey) - if err = okenvelope.NewFrom(ev.ID, ok, reason).Write(rw); chk.E(err) { - return - } - if after != nil { - after() - } - return -} diff --git a/realy/nostrJSONhandleEvents.go b/realy/nostrJSONhandleEvents.go new file mode 100644 index 0000000..ccdf7b4 --- /dev/null +++ b/realy/nostrJSONhandleEvents.go @@ -0,0 +1,127 @@ +package realy + +import ( + "bytes" + "io" + "net/http" + "sort" + + "realy.lol/context" + "realy.lol/ec/schnorr" + "realy.lol/event" + "realy.lol/httpauth" + "realy.lol/relay" + "realy.lol/sha256" + "realy.lol/store" + "realy.lol/tag" + "realy.lol/text" +) + +// the handleEvents HTTP endpoint accepts an array containing a list of hex +// encoded event IDs in JSON form, eg +// +// ["",...] +// +// and either returns a line structured JSON containing one event per line of +// the results, or an OK,false,"reason:..." message +// +// the relay should not inform the client if it has excluded events due to lack +// of required authentication for privileged events, the /api endpoint will have +// a list of event kinds that require auth and if the events contain this and +// auth was not made and if made, does not match a pubkey in the relevant events +// it is simply not returned. +func (s *Server) handleEvents(h Handler) { + log.I.F("events") + var fetcher store.FetchByIds + var ok bool + var err error + if fetcher, ok = s.relay.(store.FetchByIds); ok { + var pubkey []byte + if _, pubkey, err = httpauth.CheckAuth(h.Request, s.JWTVerifyFunc); chk.E(err) { + return + } + // if auth is enabled, and either required or not set to public readable, and + // the client did not auth with an Authorization header, send a HTTP + // Unauthorized status response. + var auther relay.Authenticator + if auther, ok = s.relay.(relay.Authenticator); ok && + auther.AuthEnabled() && + (s.authRequired || !s.publicReadable) && + len(pubkey) < 1 { + + http.Error(h.ResponseWriter, + "Authentication required for method", http.StatusUnauthorized) + return + } + var req []byte + if req, err = io.ReadAll(h.Request.Body); chk.E(err) { + return + } + // unmarshal the request + var t [][]byte + if t, req, err = text.UnmarshalHexArray(req, sha256.Size); chk.E(err) { + return + } + if len(req) > 0 { + log.I.S("extra bytes after hex array:\n%s", req) + } + var evs event.Ts + if evs, err = fetcher.FetchByIds(context.Bg(), t); chk.E(err) { + return + } + // filter out privileged kinds if there is no authed pubkey, auth is enabled but + // the relay is public readable. + if auther, ok = s.relay.(relay.Authenticator); ok && + auther.AuthEnabled() && s.publicReadable { + var evTmp event.Ts + if len(pubkey) < 1 { + // if not authed, remove all privileged event kinds (user must auth if they want + // their own DMs + for _, ev := range evs { + if !ev.Kind.IsPrivileged() { + evTmp = append(evTmp, ev) + } + } + evs = evTmp + } else if len(pubkey) == schnorr.PubKeyBytesLen { + // if authed, filter out any privileged kinds that don't also contain the authed + // pubkey in either author or p tags + for _, ev := range evs { + if !ev.Kind.IsPrivileged() { + evTmp = append(evTmp, ev) + } else { + var containsPubkey bool + if ev.Tags != nil { + containsPubkey = ev.Tags.ContainsAny([]byte{'p'}, tag.New(pubkey)) + } + if !bytes.Equal(ev.PubKey, pubkey) || containsPubkey { + log.I.F("authed user %0x not privileged to receive event\n%s", + pubkey, ev.Serialize()) + } else { + // authed pubkey matches either author pubkey or is tagged in privileged event + evTmp = append(evTmp, ev) + } + } + } + evs = evTmp + } + } + // sort in descending order (reverse chronological order) + sort.Slice(evs, func(i, j int) bool { + return evs[i].CreatedAt.Int() > evs[j].CreatedAt.Int() + }) + for _, ev := range evs { + if _, err = h.ResponseWriter.Write(ev.Marshal(nil)); chk.E(err) { + return + } + // results are jsonl format, one line per event + if _, err = h.ResponseWriter.Write([]byte{'\n'}); chk.E(err) { + return + } + } + http.Error(h.ResponseWriter, "", http.StatusOK) + } else { + http.Error(h.ResponseWriter, "Method not implemented", NI) + } + return +} diff --git a/realy/nostrJSONhandleSimpleEvent.go b/realy/nostrJSONhandleSimpleEvent.go new file mode 100644 index 0000000..ea2cf15 --- /dev/null +++ b/realy/nostrJSONhandleSimpleEvent.go @@ -0,0 +1,215 @@ +package realy + +import ( + "bytes" + "fmt" + "io" + "net/http" + + "realy.lol/context" + "realy.lol/event" + "realy.lol/filter" + "realy.lol/hex" + "realy.lol/httpauth" + "realy.lol/ints" + "realy.lol/kind" + "realy.lol/relay" + "realy.lol/sha256" + "realy.lol/tag" +) + +const ( + NA = http.StatusNotAcceptable + NI = http.StatusNotImplemented + ERR = http.StatusInternalServerError +) + +func (s *Server) handleSimpleEvent(h Handler) { + log.I.F("event") + var err error + var ok bool + sto := s.relay.Storage() + var req []byte + if req, err = io.ReadAll(h.Request.Body); chk.E(err) { + return + } + advancedDeleter, _ := sto.(relay.AdvancedDeleter) + ev := event.New() + if req, err = ev.Unmarshal(req); chk.T(err) { + return + } + var valid bool + var pubkey []byte + if valid, pubkey, err = httpauth.CheckAuth(h.Request, s.JWTVerifyFunc); chk.E(err) { + return + } + rw := h.ResponseWriter + if !valid { + http.Error(rw, + fmt.Sprintf("invalid: %s", err.Error()), NA) + } + rr := GetRemoteFromReq(h.Request) + c := context.Bg() + accept, notice, after := s.relay.AcceptEvent(c, ev, h.Request, rr, pubkey) + if !accept { + http.Error(rw, notice, NA) + return + } + if !bytes.Equal(ev.GetIDBytes(), ev.ID) { + http.Error(rw, + "Event id is computed incorrectly", NA) + return + } + if ok, err = ev.Verify(); chk.T(err) { + http.Error(rw, + "failed to verify signature", NA) + return + } else if !ok { + http.Error(rw, + "signature is invalid", NA) + return + } + storage := s.relay.Storage() + if storage == nil { + panic("no event store has been set to store event") + } + if ev.Kind.K == kind.Deletion.K { + log.I.F("delete event\n%s", ev.Serialize()) + for _, t := range ev.Tags.Value() { + var res []*event.T + if t.Len() >= 2 { + switch { + case bytes.Equal(t.Key(), []byte("e")): + evId := make([]byte, sha256.Size) + if _, err = hex.DecBytes(evId, t.Value()); chk.E(err) { + continue + } + res, err = storage.QueryEvents(c, &filter.T{IDs: tag.New(evId)}) + if err != nil { + http.Error(rw, + err.Error(), ERR) + return + } + for i := range res { + if res[i].Kind.Equal(kind.Deletion) { + http.Error(rw, + "not processing or storing delete event containing delete event references", + NA) + } + if !bytes.Equal(res[i].PubKey, ev.PubKey) { + http.Error(rw, + "cannot delete other users' events (delete by e tag)", + NA) + return + } + } + case bytes.Equal(t.Key(), []byte("a")): + split := bytes.Split(t.Value(), []byte{':'}) + if len(split) != 3 { + continue + } + var pk []byte + if pk, err = hex.DecAppend(nil, split[1]); chk.E(err) { + http.Error(rw, + fmt.Sprintf("delete event a tag pubkey value invalid: %s", + t.Value()), NA) + return + } + kin := ints.New(uint16(0)) + if _, err = kin.Unmarshal(split[0]); chk.E(err) { + http.Error(rw, + fmt.Sprintf("delete event a tag kind value invalid: %s", + t.Value()), NA) + return + } + kk := kind.New(kin.Uint16()) + if kk.Equal(kind.Deletion) { + http.Error(rw, + "delete event kind may not be deleted", + NA) + return + } + if !kk.IsParameterizedReplaceable() { + http.Error(rw, + "delete tags with a tags containing non-parameterized-replaceable events cannot be processed", + NA) + return + } + if !bytes.Equal(pk, ev.PubKey) { + log.I.S(pk, ev.PubKey, ev) + http.Error(rw, + "cannot delete other users' events (delete by a tag)", + NA) + return + } + f := filter.New() + f.Kinds.K = []*kind.T{kk} + f.Authors.Append(pk) + f.Tags.AppendTags(tag.New([]byte{'#', 'd'}, split[2])) + res, err = storage.QueryEvents(c, f) + if err != nil { + http.Error(rw, err.Error(), ERR) + return + } + } + } + if len(res) < 1 { + continue + } + var resTmp []*event.T + for _, v := range res { + if ev.CreatedAt.U64() >= v.CreatedAt.U64() { + resTmp = append(resTmp, v) + } + } + res = resTmp + for _, target := range res { + if target.Kind.K == kind.Deletion.K { + http.Error(rw, + fmt.Sprintf("cannot delete delete event %s", + ev.ID), NA) + return + } + if target.CreatedAt.Int() > ev.CreatedAt.Int() { + // todo: shouldn't this be an error? + log.I.F("not deleting\n%d%\nbecause delete event is older\n%d", + target.CreatedAt.Int(), ev.CreatedAt.Int()) + continue + } + if !bytes.Equal(target.PubKey, ev.PubKey) { + http.Error(rw, + "only author can delete event", + NA) + return + } + if advancedDeleter != nil { + advancedDeleter.BeforeDelete(c, t.Value(), ev.PubKey) + } + if err = sto.DeleteEvent(c, target.EventID()); chk.T(err) { + http.Error(rw, + err.Error(), ERR) + return + } + if advancedDeleter != nil { + advancedDeleter.AfterDelete(t.Value(), ev.PubKey) + } + } + res = nil + } + http.Error(rw, "", http.StatusOK) + return + } + var reason []byte + ok, reason = s.addEvent(c, s.relay, ev, h.Request, rr, pubkey) + // return the response whether true or false and any reason if false + if ok { + http.Error(rw, "", http.StatusOK) + } else { + http.Error(rw, string(reason), ERR) + } + if after != nil { + // do this in the background and let the http response close + go after() + } + return +} diff --git a/relay/interface.go b/relay/interface.go index ce7f355..dbd70ce 100644 --- a/relay/interface.go +++ b/relay/interface.go @@ -33,8 +33,8 @@ type I interface { // messages, that are not on the mute list, that do not yet have a reply, should accept // direct and group message events until there is three and thereafter will be restricted // until the user adds them to their follow list. - AcceptEvent(c context.T, ev *event.T, hr *http.Request, origin string, authedPubkey []byte) (accept bool, - notice string, afterSave func()) + AcceptEvent(c context.T, ev *event.T, hr *http.Request, origin string, + authedPubkey []byte) (accept bool, notice string, afterSave func()) // Storage returns the realy storage implementation. Storage() store.I // NoLimiter returns true if the provided npub should not be rate limited. diff --git a/store/store_interface.go b/store/store_interface.go index 9282cb4..f180eb8 100644 --- a/store/store_interface.go +++ b/store/store_interface.go @@ -49,6 +49,10 @@ type Querent interface { QueryEvents(c context.T, f *filter.T) (evs event.Ts, err error) } +type FetchByIds interface { + FetchByIds(c context.T, ids [][]byte) (evs event.Ts, err error) +} + type Counter interface { // CountEvents performs the same work as QueryEvents but instead of delivering // the events that were found it just returns the count of events