Compare commits

...

10 Commits

Author SHA1 Message Date
b29ad5bd07 fix nasty names 2025-06-19 20:07:55 +01:00
3b0fd72290 Add codecs for encoding and decoding unsigned integers
Introduced Uint16, Uint24, Uint32, Uint40, and Uint64 codecs with support for encoding/decoding in BigEndian format. Each codec includes range validation, getter/setter methods, and comprehensive unit tests to ensure correctness and reliability.
2025-06-19 20:05:15 +01:00
eb0ba87ce6 Refactor database filters and enhance query handling
Updated several database queries to support limits, improve efficiency, and add better range handling. Introduced new tests for filter functionality, corrected bugs, and added logging for debugging purposes.
2025-06-17 13:47:51 +01:00
321a1b47bb "Refactor filtering and indexing logic for event queries"
The filtering logic has been streamlined, adding enhanced query support for filters involving tags, authors, kinds, and their combinations. Introduced new utility methods for deduplication, intersection, and sorting to improve efficiency in handling event serials. Adjusted indexing structures and encoding for better modularity and readability.
2025-06-15 11:18:18 +01:00
d5ae20ba94 Refactor timestamp handling to use integers directly.
Replaced `FromInt64` with `FromInt` to simplify timestamp operations. Updated related methods, tests, and logic to consistently handle timestamps as integers, improving code clarity and reducing unnecessary conversions.
2025-06-14 09:08:20 +01:00
6a7ddb8aea Refactor and reorganize prefix index definitions
Removed redundant comments and restructured index definitions for improved clarity and maintainability. Added categorization and detailed documentation to better describe the purpose and usage of search indexes and metadata keys.
2025-06-14 08:40:23 +01:00
e12fb03b03 partly completed filter search
since/until/kinds/authors combinations done
2025-06-12 11:32:44 +01:00
966f58f4c7 nicer names for filter(s) and small cleanups 2025-06-10 22:26:56 +01:00
789c7913e7 allow non-hex e and p tags
mostly because some clients use p tags in follow lists for whatever reason. follow lists are generally fetched by pubkey+kind so the missing index because it's not a pubkey is really nbd, and pubkey tag searches will also work with the kind.
2025-06-10 22:10:33 +01:00
faf3ebfdba use varints for indexes 2025-06-10 21:31:24 +01:00
35 changed files with 2063 additions and 765 deletions

1
.gitignore vendored
View File

@@ -86,3 +86,4 @@ node_modules/**
/blocklist.json
/gui/gui/main.wasm
/gui/gui/index.html
database/testrealy

192
database/filter.go Normal file
View File

@@ -0,0 +1,192 @@
package database
import (
"bytes"
"math"
"sort"
"x.realy.lol/chk"
"x.realy.lol/database/indexes"
"x.realy.lol/database/indexes/types/pubhash"
"x.realy.lol/database/indexes/types/varint"
"x.realy.lol/filter"
"x.realy.lol/hex"
"x.realy.lol/log"
"x.realy.lol/timestamp"
)
type Bitfield byte
const (
hasIds Bitfield = 1
hasKinds Bitfield = 2
hasAuthors Bitfield = 4
hasTags Bitfield = 8
hasSince Bitfield = 16
hasUntil Bitfield = 32
hasLimit Bitfield = 64
hasSearch Bitfield = 128
)
func ToBitfield(f *filter.F) (b Bitfield) {
if len(f.Ids) != 0 {
b += hasIds
}
if len(f.Kinds) != 0 {
b += hasKinds
}
if len(f.Authors) != 0 {
b += hasAuthors
}
if len(f.Tags) != 0 {
b += hasTags
}
if f.Since != nil {
b += hasSince
}
if f.Until != nil {
b += hasUntil
}
if f.Limit != nil {
b += hasLimit
}
if f.Search != "" {
b += hasSearch
}
return
}
// Filter runs a nip-01 type query on a provided filter and returns the database serial keys of
// the matching events, excluding a list of authors also provided from the result.
func (d *D) Filter(f filter.F, exclude []*pubhash.T) (evSerials varint.S, err error) {
var evs varint.S
bf := ToBitfield(&f)
// first, if there is Ids these override everything else
if bf&hasIds != 0 {
for _, v := range f.Ids {
var id []byte
if id, err = hex.Dec(v); chk.E(err) {
// just going to ignore it i guess
continue
}
var ev *varint.V
if ev, err = d.FindEventSerialById(id); chk.E(err) {
// just going to ignore it i guess
continue
}
evs = append(evs, ev)
}
return
}
var since, until *timestamp.Timestamp
if bf&hasSince == 0 || bf&hasUntil == 0 {
if bf&hasSince != 0 {
since = f.Since
}
if bf&hasUntil != 0 {
until = f.Until
} else {
m := timestamp.Timestamp(math.MaxInt64)
until = &m
}
}
limit := f.Limit
var postLimit bool
if limit != nil {
// put a reasonable cap on unlimited. the actual results may be a lot less for composite
// searches that intersect with tags.
limit = filter.IntToPointer(10000)
} else {
// this means trim the result at the end before returning.
postLimit = true
}
log.I.F("%b %b", bf, bf&(hasSince+hasUntil+hasLimit))
bf = bf &^ hasLimit
// next, check for filters that only have since and/or until
if bf&(hasSince+hasUntil) != 0 && ^(hasUntil+hasSince)&bf == 0 {
if evs, err = d.GetEventSerialsByCreatedAtRange(since, until, limit, postLimit); chk.E(err) {
return
}
goto done
}
// next, kinds
if bf&hasKinds == hasKinds && ^hasKinds&bf == 0 {
log.I.F("kinds")
if evs, err = d.GetEventSerialsByKinds(f.Kinds, f.Limit); chk.E(err) {
return
}
goto done
}
// next, kinds/created_at
if (bf&hasKinds+hasSince == hasKinds+hasSince ||
bf&hasKinds+hasUntil == hasKinds+hasUntil ||
bf&hasKinds+hasUntil+hasSince == hasKinds+hasUntil+hasSince) &&
^(hasKinds+hasUntil+hasSince)&bf == 0 {
if evs, err = d.GetEventSerialsByKindsCreatedAtRange(f.Kinds, since, until, limit); chk.E(err) {
return
}
goto done
}
// next authors
if bf&hasAuthors == hasAuthors && ^hasAuthors&bf == 0 {
if evs, err = d.GetEventSerialsByAuthorsCreatedAtRange(f.Authors, since, until, limit); chk.E(err) {
return
}
goto done
}
// next authors/kinds
if ak := hasAuthors + hasKinds; bf&(ak) == ak && ^ak&bf == 0 {
if evs, err = d.GetEventSerialsByKindsAuthorsCreatedAtRange(f.Kinds, f.Authors, since, until, limit); chk.E(err) {
return
}
goto done
}
// if there is tags, assemble them into an array of tags with the
if bf&hasTags != 0 && bf&^hasTags == 0 {
if evs, err = d.GetEventSerialsByTagsCreatedAtRange(f.Tags, limit); chk.E(err) {
}
}
// next authors/tags
if at := hasAuthors + hasTags; bf&(at) == at && ^at&bf == 0 {
if evs, err = d.GetEventSerialsByAuthorsTagsCreatedAtRange(f.Tags, f.Authors, since, until, limit); chk.E(err) {
return
}
goto done
}
// next kinds/tags
if kt := hasKinds + hasTags; bf&(kt) == kt && ^kt&bf == 0 {
if evs, err = d.GetEventSerialsByKindsTagsCreatedAtRange(f.Tags, f.Kinds, since,
until, limit); chk.E(err) {
return
}
goto done
}
// next kinds/authors/tags
if kat := hasAuthors + hasTags; bf&(kat) == kat && ^kat&bf == 0 {
if evs, err = d.GetEventSerialsByKindsAuthorsTagsCreatedAtRange(f.Tags, f.Kinds, f.Authors, since, until, limit); chk.E(err) {
return
}
goto done
}
done:
// scan the FullIndex for these serials, and sort them by descending created_at
var index []indexes.FullIndex
if index, err = d.GetFullIndexesFromSerials(evs); chk.E(err) {
return
}
// sort by reverse chronological order
sort.Slice(index, func(i, j int) bool {
return index[i].CreatedAt.ToTimestamp() > index[j].CreatedAt.ToTimestamp()
})
for _, item := range index {
for _, x := range exclude {
if bytes.Equal(item.Pubkey.Bytes(), x.Bytes()) {
continue
}
}
evSerials = append(evSerials, item.Ser)
}
return
}

80
database/filter_test.go Normal file
View File

@@ -0,0 +1,80 @@
package database
import (
"bufio"
"bytes"
"testing"
"x.realy.lol/apputil"
"x.realy.lol/chk"
"x.realy.lol/database/indexes/types/varint"
"x.realy.lol/event"
"x.realy.lol/filter"
"x.realy.lol/interrupt"
"x.realy.lol/log"
)
func TestD_Filter(t *testing.T) {
var err error
d := New()
tmpDir := "testrealy"
dbExists := !apputil.FileExists(tmpDir)
if err = d.Init(tmpDir); chk.E(err) {
t.Fatal(err)
}
interrupt.AddHandler(func() {
d.Close()
})
if dbExists {
buf := bytes.NewBuffer(ExampleEvents)
scan := bufio.NewScanner(buf)
scan.Buffer(make([]byte, 5120000), 5120000)
var count, errs int
for scan.Scan() {
b := scan.Bytes()
ev := event.New()
if err = ev.Unmarshal(b); chk.E(err) {
t.Fatalf("%s:\n%s", err, b)
}
// verify the signature on the event
var ok bool
if ok, err = ev.Verify(); chk.E(err) {
errs++
continue
}
if !ok {
errs++
log.E.F("event signature is invalid\n%s", b)
continue
}
count++
if count%1000 == 0 {
log.I.F("unmarshaled %d events", count)
}
if err = d.StoreEvent(ev); chk.E(err) {
continue
}
}
log.I.F("stored %d events", count)
}
// fetch some kind 0
var sers []*varint.V
if sers, err = d.Filter(filter.F{
Kinds: []int{0},
Limit: filter.IntToPointer(50),
}, nil); chk.E(err) {
t.Fatal(err)
}
// log.I.S(sers)
var fids [][]byte
for _, ser := range sers {
var evIds []byte
if evIds, err = d.GetEventIdFromSerial(ser); chk.E(err) {
// continue
log.I.S(ser)
t.Fatal(err)
}
fids = append(fids, evIds)
}
log.I.S(fids)
}

View File

@@ -2,16 +2,26 @@ package database
import (
"bytes"
"fmt"
"github.com/dgraph-io/badger/v4"
"x.realy.lol/chk"
"x.realy.lol/database/indexes"
"x.realy.lol/database/indexes/prefixes"
"x.realy.lol/database/indexes/types/idhash"
"x.realy.lol/database/indexes/types/prefix"
"x.realy.lol/database/indexes/types/varint"
"x.realy.lol/errorf"
"x.realy.lol/event"
"x.realy.lol/filter"
"x.realy.lol/log"
"x.realy.lol/tags"
"x.realy.lol/timestamp"
)
func (d *D) FindEvent(evId []byte) (ev *event.E, err error) {
id, ser := indexes.IdVars()
func (d *D) FindEventSerialById(evId []byte) (ser *varint.V, err error) {
id := idhash.New()
if err = id.FromId(evId); chk.E(err) {
return
}
@@ -27,6 +37,7 @@ func (d *D) FindEvent(evId []byte) (ev *event.E, err error) {
item := it.Item()
k := item.KeyCopy(nil)
buf := bytes.NewBuffer(k)
ser = varint.New()
if err = indexes.IdDec(id, ser).UnmarshalRead(buf); chk.E(err) {
return
}
@@ -35,28 +46,597 @@ func (d *D) FindEvent(evId []byte) (ev *event.E, err error) {
}); err != nil {
return
}
if ser == nil {
err = fmt.Errorf("event %0x not found", evId)
return
}
return
}
func (d *D) GetEventFromSerial(ser *varint.V) (ev *event.E, err error) {
if err = d.View(func(txn *badger.Txn) (err error) {
evk := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(evk); chk.E(err) {
enc := indexes.EventEnc(ser)
kb := new(bytes.Buffer)
if err = enc.MarshalWrite(kb); chk.E(err) {
return
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: evk.Bytes()})
defer it.Close()
for it.Seek(evk.Bytes()); it.Valid(); {
item := it.Item()
var val []byte
if val, err = item.ValueCopy(nil); chk.E(err) {
return
}
ev = event.New()
if err = ev.UnmarshalRead(bytes.NewBuffer(val)); chk.E(err) {
return
}
var item *badger.Item
if item, err = txn.Get(kb.Bytes()); err != nil {
return
}
var val []byte
if val, err = item.ValueCopy(nil); chk.E(err) {
return
}
ev = event.New()
vr := bytes.NewBuffer(val)
if err = ev.UnmarshalRead(vr); chk.E(err) {
return
}
return
}); err != nil {
return
}
return
}
func (d *D) GetEventIdFromSerial(ser *varint.V) (id []byte, err error) {
if err = d.View(func(txn *badger.Txn) (err error) {
enc := indexes.New(prefix.New(prefixes.FullIndex), ser)
prf := new(bytes.Buffer)
if err = enc.MarshalWrite(prf); chk.E(err) {
return
}
it := txn.NewIterator(badger.IteratorOptions{Prefix: prf.Bytes()})
defer it.Close()
for it.Seek(prf.Bytes()); it.Valid(); it.Next() {
item := it.Item()
key := item.KeyCopy(nil)
kbuf := bytes.NewBuffer(key)
_, t, p, ki, ca := indexes.FullIndexVars()
dec := indexes.FullIndexDec(ser, t, p, ki, ca)
if err = dec.UnmarshalRead(kbuf); chk.E(err) {
return
}
id = t.Bytes()
}
return
}); chk.E(err) {
return
}
return
}
func (d *D) GetEventById(evId []byte) (ev *event.E, err error) {
var ser *varint.V
if ser, err = d.FindEventSerialById(evId); err != nil {
return
}
ev, err = d.GetEventFromSerial(ser)
return
}
// GetEventSerialsByCreatedAtRange returns the serials of events with the given since/until
// range in reverse chronological order (starting at until, going back to since).
func (d *D) GetEventSerialsByCreatedAtRange(since, until *timestamp.Timestamp,
limit *int, postLimit bool) (sers varint.S, err error) {
log.I.F("GetEventSerialsByCreatedAtRange")
// get the start (end) max possible index prefix
startCreatedAt, _ := indexes.CreatedAtVars()
startCreatedAt.FromInt(until.ToInt())
prf := new(bytes.Buffer)
if err = indexes.CreatedAtEnc(startCreatedAt, nil).MarshalWrite(prf); chk.E(err) {
return
}
var count int
if err = d.View(func(txn *badger.Txn) (err error) {
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: prf.Bytes()})
defer it.Close()
key := make([]byte, 10)
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
key = item.KeyCopy(key)
ca, ser := indexes.CreatedAtVars()
buf := bytes.NewBuffer(key)
if err = indexes.CreatedAtDec(ca, ser).UnmarshalRead(buf); chk.E(err) {
// skip it then
continue
}
if ca.ToTimestamp() < *since {
break
}
sers = append(sers, ser)
count++
if !postLimit && count > *limit {
return
}
}
return
}); chk.E(err) {
return
}
if postLimit && len(sers) > *limit {
sers = sers[:*limit]
}
return
}
func (d *D) GetEventSerialsByKinds(kinds []int, limit *int) (sers varint.S, err error) {
log.I.F("GetEventSerialsByKinds")
// get the start (end) max possible index prefix, one for each kind in the list
var searchIdxs [][]byte
kind, _ := indexes.KindVars()
for _, k := range kinds {
kind.Set(k)
prf := new(bytes.Buffer)
if err = indexes.KindEnc(kind, nil).MarshalWrite(prf); chk.E(err) {
return
}
searchIdxs = append(searchIdxs, prf.Bytes())
}
// log.I.S(searchIdxs)
var count int
for _, idx := range searchIdxs {
if err = d.View(func(txn *badger.Txn) (err error) {
// it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
var key []byte
for it.Seek(idx); it.ValidForPrefix(idx); it.Next() {
item := it.Item()
key = item.KeyCopy(nil)
ki, ser := indexes.KindVars()
buf := bytes.NewBuffer(key)
if err = indexes.KindDec(ki, ser).UnmarshalRead(buf); chk.E(err) {
// skip it then
continue
}
sers = append(sers, ser)
count++
if limit != nil && count >= *limit {
return
}
}
return
}); chk.E(err) {
return
}
}
return
}
func (d *D) GetEventSerialsByKindsCreatedAtRange(kinds []int, since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
// get the start (end) max possible index prefix, one for each kind in the list
var searchIdxs [][]byte
kind, startCreatedAt, _ := indexes.KindCreatedAtVars()
startCreatedAt.FromInt(until.ToInt())
for _, k := range kinds {
kind.Set(k)
prf := new(bytes.Buffer)
if err = indexes.KindCreatedAtEnc(kind, startCreatedAt, nil).MarshalWrite(prf); chk.E(err) {
return
}
searchIdxs = append(searchIdxs, prf.Bytes())
}
var count int
for _, idx := range searchIdxs {
if err = d.View(func(txn *badger.Txn) (err error) {
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
defer it.Close()
var key []byte
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
key = item.KeyCopy(key)
ki, ca, ser := indexes.KindCreatedAtVars()
buf := bytes.NewBuffer(key)
if err = indexes.KindCreatedAtDec(ki, ca, ser).UnmarshalRead(buf); chk.E(err) {
// skip it then
continue
}
if ca.ToTimestamp() < *since {
break
}
sers = append(sers, ser)
count++
if count > *limit {
return
}
}
return
}); chk.E(err) {
return
}
}
return
}
func (d *D) GetEventSerialsByAuthors(pubkeys []string, limit *int) (sers varint.S, err error) {
// get the start (end) max possible index prefix, one for each kind in the list
var searchIdxs [][]byte
var pkDecodeErrs int
pubkey, _ := indexes.PubkeyVars()
for _, p := range pubkeys {
if err = pubkey.FromPubkeyHex(p); chk.E(err) {
// gracefully ignore wrong keys
pkDecodeErrs++
continue
}
if pkDecodeErrs == len(pubkeys) {
err = errorf.E("all pubkeys in authors field of filter failed to decode")
return
}
prf := new(bytes.Buffer)
if err = indexes.PubkeyEnc(pubkey, nil).MarshalWrite(prf); chk.E(err) {
return
}
searchIdxs = append(searchIdxs, prf.Bytes())
}
var count int
for _, idx := range searchIdxs {
if err = d.View(func(txn *badger.Txn) (err error) {
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
defer it.Close()
key := make([]byte, 10)
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
key = item.KeyCopy(key)
kind, ca, ser := indexes.KindCreatedAtVars()
buf := bytes.NewBuffer(key)
if err = indexes.KindCreatedAtDec(kind, ca, ser).UnmarshalRead(buf); chk.E(err) {
// skip it then
continue
}
sers = append(sers, ser)
count++
if count > *limit {
return
}
}
return
}); chk.E(err) {
return
}
}
return
}
func (d *D) GetEventSerialsByAuthorsCreatedAtRange(pubkeys []string,
since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
// get the start (end) max possible index prefix, one for each kind in the list
var searchIdxs [][]byte
var pkDecodeErrs int
pubkey, startCreatedAt, _ := indexes.PubkeyCreatedAtVars()
startCreatedAt.FromInt(until.ToInt())
for _, p := range pubkeys {
if err = pubkey.FromPubkeyHex(p); chk.E(err) {
// gracefully ignore wrong keys
pkDecodeErrs++
continue
}
if pkDecodeErrs == len(pubkeys) {
err = errorf.E("all pubkeys in authors field of filter failed to decode")
return
}
prf := new(bytes.Buffer)
if err = indexes.PubkeyCreatedAtEnc(pubkey, startCreatedAt, nil).MarshalWrite(prf); chk.E(err) {
return
}
searchIdxs = append(searchIdxs, prf.Bytes())
}
var count int
for _, idx := range searchIdxs {
if err = d.View(func(txn *badger.Txn) (err error) {
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
defer it.Close()
key := make([]byte, 10)
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
key = item.KeyCopy(key)
kind, ca, ser := indexes.KindCreatedAtVars()
buf := bytes.NewBuffer(key)
if err = indexes.KindCreatedAtDec(kind, ca, ser).UnmarshalRead(buf); chk.E(err) {
// skip it then
continue
}
if ca.ToTimestamp() < *since {
break
}
sers = append(sers, ser)
count++
if count > *limit {
return
}
}
return
}); chk.E(err) {
return
}
}
return
}
func (d *D) GetEventSerialsByKindsAuthorsCreatedAtRange(kinds []int, pubkeys []string,
since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
// get the start (end) max possible index prefix, one for each kind in the list
var searchIdxs [][]byte
var pkDecodeErrs int
kind, pubkey, startCreatedAt, _ := indexes.KindPubkeyCreatedAtVars()
startCreatedAt.FromInt(until.ToInt())
for _, k := range kinds {
for _, p := range pubkeys {
if err = pubkey.FromPubkeyHex(p); chk.E(err) {
// gracefully ignore wrong keys
pkDecodeErrs++
continue
}
if pkDecodeErrs == len(pubkeys) {
err = errorf.E("all pubkeys in authors field of filter failed to decode")
return
}
kind.Set(k)
prf := new(bytes.Buffer)
if err = indexes.KindPubkeyCreatedAtEnc(kind, pubkey, startCreatedAt, nil).MarshalWrite(prf); chk.E(err) {
return
}
searchIdxs = append(searchIdxs, prf.Bytes())
}
}
var count int
for _, idx := range searchIdxs {
if err = d.View(func(txn *badger.Txn) (err error) {
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
defer it.Close()
key := make([]byte, 10)
for it.Rewind(); it.Valid(); it.Next() {
item := it.Item()
key = item.KeyCopy(key)
ki, ca, ser := indexes.KindCreatedAtVars()
buf := bytes.NewBuffer(key)
if err = indexes.KindCreatedAtDec(ki, ca, ser).UnmarshalRead(buf); chk.E(err) {
// skip it then
continue
}
if ca.ToTimestamp() < *since {
break
}
sers = append(sers, ser)
count++
if count > *limit {
return
}
}
return
}); chk.E(err) {
return
}
}
return
}
// GetEventSerialsByTagsCreatedAtRange searches for events that match the tags in a filter and
// returns the list of serials that were found.
func (d *D) GetEventSerialsByTagsCreatedAtRange(t filter.TagMap, limit *int) (sers varint.S, err error) {
if len(t) < 1 {
err = errorf.E("no tags provided")
return
}
var searchIdxs [][]byte
for tk, tv := range t {
// the key of each element of the map must be `#X` where X is a-zA-Z
if len(tk) != 2 {
continue
}
if tk[0] != '#' {
log.E.F("invalid tag map key '%s'", tk)
}
switch tk[1] {
case 'a':
// not sure if this is a thing. maybe a prefix search?
for _, ta := range tv {
var atag tags.Tag_a
if atag, err = tags.Decode_a_Tag(ta); chk.E(err) {
err = nil
continue
}
if atag.Kind == 0 {
err = nil
continue
}
ki, pk, ident, _ := indexes.TagAVars()
ki.Set(atag.Kind)
if atag.Pubkey == nil {
err = nil
continue
}
if err = pk.FromPubkey(atag.Pubkey); chk.E(err) {
err = nil
continue
}
if len(atag.Ident) < 1 {
}
if err = ident.FromIdent([]byte(atag.Ident)); chk.E(err) {
err = nil
}
buf := new(bytes.Buffer)
if err = indexes.TagAEnc(ki, pk, ident, nil).MarshalWrite(buf); chk.E(err) {
err = nil
continue
}
searchIdxs = append(searchIdxs, buf.Bytes())
}
case 'd':
// d tags are identifiers used to mark replaceable events to create a namespace,
// that the references can be used to replace them, or referred to using 'a' tags.
for _, td := range tv {
ident, _ := indexes.TagIdentifierVars()
if err = ident.FromIdent([]byte(td)); chk.E(err) {
err = nil
continue
}
buf := new(bytes.Buffer)
if err = indexes.TagIdentifierEnc(ident, nil).MarshalWrite(buf); chk.E(err) {
err = nil
continue
}
searchIdxs = append(searchIdxs, buf.Bytes())
}
case 'e':
// e tags refer to events. they can have a third field such as 'root' and 'reply'
// but this third field isn't indexed.
for _, te := range tv {
evt, _ := indexes.TagEventVars()
if err = evt.FromIdHex(te); chk.E(err) {
err = nil
continue
}
buf := new(bytes.Buffer)
if err = indexes.TagEventEnc(evt, nil).MarshalWrite(buf); chk.E(err) {
err = nil
continue
}
searchIdxs = append(searchIdxs, buf.Bytes())
}
case 'p':
// p tags are references to author pubkeys of events. usually a 64 character hex
// string but sometimes is a hashtag in follow events.
for _, te := range tv {
pk, _ := indexes.TagPubkeyVars()
if err = pk.FromPubkeyHex(te); chk.E(err) {
err = nil
continue
}
buf := new(bytes.Buffer)
if err = indexes.TagPubkeyEnc(pk, nil).MarshalWrite(buf); chk.E(err) {
err = nil
continue
}
searchIdxs = append(searchIdxs, buf.Bytes())
}
case 't':
// t tags are hashtags, arbitrary strings that can be used to assist search for
// topics.
for _, tt := range tv {
ht, _ := indexes.TagHashtagVars()
if err = ht.FromIdent([]byte(tt)); chk.E(err) {
err = nil
continue
}
buf := new(bytes.Buffer)
if err = indexes.TagHashtagEnc(ht, nil).MarshalWrite(buf); chk.E(err) {
err = nil
continue
}
searchIdxs = append(searchIdxs, buf.Bytes())
}
default:
// everything else is arbitrary strings, that may have application specific
// semantics.
for _, tl := range tv {
l, val, _ := indexes.TagLetterVars()
l.Set(tk[1])
if err = val.FromIdent([]byte(tl)); chk.E(err) {
err = nil
continue
}
buf := new(bytes.Buffer)
if err = indexes.TagLetterEnc(l, val, nil).MarshalWrite(buf); chk.E(err) {
err = nil
continue
}
searchIdxs = append(searchIdxs, buf.Bytes())
}
}
// todo: implement
}
return
}
// GetEventSerialsByAuthorsTagsCreatedAtRange first performs
func (d *D) GetEventSerialsByAuthorsTagsCreatedAtRange(t filter.TagMap, pubkeys []string, since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
var acSers, tagSers varint.S
if acSers, err = d.GetEventSerialsByAuthorsCreatedAtRange(pubkeys, since, until, limit); chk.E(err) {
return
}
// now we have the most limited set of serials that are included by the pubkeys, we can then
// construct the tags searches for all of these serials to filter out the events that don't
// have both author AND one of the tags.
if tagSers, err = d.GetEventSerialsByTagsCreatedAtRange(t, limit); chk.E(err) {
return
}
// remove the serials that are not present in both lists.
sers = varint.Intersect(acSers, tagSers)
return
}
// GetEventSerialsByKindsTagsCreatedAtRange first performs
func (d *D) GetEventSerialsByKindsTagsCreatedAtRange(t filter.TagMap, kinds []int, since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
var acSers, tagSers varint.S
if acSers, err = d.GetEventSerialsByKindsCreatedAtRange(kinds, since, until, limit); chk.E(err) {
return
}
// now we have the most limited set of serials that are included by the pubkeys, we can then
// construct the tags searches for all of these serials to filter out the events that don't
// have both author AND one of the tags.
if tagSers, err = d.GetEventSerialsByTagsCreatedAtRange(t, limit); chk.E(err) {
return
}
// remove the serials that are not present in both lists.
sers = varint.Intersect(acSers, tagSers)
return
}
// GetEventSerialsByKindsAuthorsTagsCreatedAtRange first performs
func (d *D) GetEventSerialsByKindsAuthorsTagsCreatedAtRange(t filter.TagMap, kinds []int,
pubkeys []string, since, until *timestamp.Timestamp,
limit *int) (sers varint.S, err error) {
var acSers, tagSers varint.S
if acSers, err = d.GetEventSerialsByKindsAuthorsCreatedAtRange(kinds, pubkeys,
since, until, limit); chk.E(err) {
return
}
// now we have the most limited set of serials that are included by the pubkeys, we can then
// construct the tags searches for all of these serials to filter out the events that don't
// have both author AND one of the tags.
if tagSers, err = d.GetEventSerialsByTagsCreatedAtRange(t, limit); chk.E(err) {
return
}
// remove the serials that are not present in both lists.
sers = varint.Intersect(acSers, tagSers)
return
}
func (d *D) GetFullIndexesFromSerials(sers varint.S) (index []indexes.FullIndex, err error) {
log.I.F("GetFullIndexesFromSerials")
for _, ser := range sers {
if err = d.View(func(txn *badger.Txn) (err error) {
buf := new(bytes.Buffer)
if err = indexes.FullIndexEnc(ser, nil, nil, nil, nil).MarshalWrite(buf); chk.E(err) {
return
}
prf := buf.Bytes()
it := txn.NewIterator(badger.IteratorOptions{Prefix: prf})
defer it.Close()
for it.Seek(prf); it.Valid(); {
item := it.Item()
key := item.KeyCopy(nil)
kBuf := bytes.NewBuffer(key)
s, t, p, k, c := indexes.FullIndexVars()
if err = indexes.FullIndexDec(s, t, p, k, c).UnmarshalRead(kBuf); chk.E(err) {
return
}
index = append(index, indexes.FullIndex{
Ser: s,
Id: t,
Pubkey: p,
Kind: k,
CreatedAt: c,
})
return
}
return
}); chk.E(err) {
// just skip then.
}
}
return
}

View File

@@ -10,26 +10,25 @@ import (
"x.realy.lol/chk"
"x.realy.lol/database/indexes"
"x.realy.lol/database/indexes/types/fulltext"
"x.realy.lol/database/indexes/types/serial"
"x.realy.lol/database/indexes/types/size"
"x.realy.lol/database/indexes/types/varint"
"x.realy.lol/event"
"x.realy.lol/hex"
"x.realy.lol/kind"
)
type Words struct {
ser *serial.S
ser *varint.V
ev *event.E
wordMap map[string]int
}
func (d *D) GetFulltextKeys(ev *event.E, ser *serial.S) (keys [][]byte, err error) {
func (d *D) GetFulltextKeys(ev *event.E, ser *varint.V) (keys [][]byte, err error) {
w := d.GetWordsFromContent(ev)
for i := range w {
ft := fulltext.New()
ft.FromWord([]byte(i))
pos := size.New()
pos.FromUint32(uint32(w[i]))
pos := varint.New()
pos.FromUint64(uint64(w[i]))
buf := new(bytes.Buffer)
if err = indexes.FullTextWordEnc(ft, pos, ser).MarshalWrite(buf); chk.E(err) {
return
@@ -64,7 +63,7 @@ func (d *D) GetWordsFromContent(ev *event.E) (wordMap map[string]int) {
!IsEntity(w) &&
!bytes.Contains(w, []byte(".")) {
if len(w) == 64 || len(w) == 128 {
if _, err := hex.Dec(string(w)); !chk.E(err) {
if _, err := hex.Dec(string(w)); err == nil {
continue
}
}

View File

@@ -12,8 +12,8 @@ import (
"x.realy.lol/database/indexes/types/kindidx"
"x.realy.lol/database/indexes/types/letter"
"x.realy.lol/database/indexes/types/pubhash"
"x.realy.lol/database/indexes/types/serial"
"x.realy.lol/database/indexes/types/timestamp"
"x.realy.lol/database/indexes/types/varint"
"x.realy.lol/event"
"x.realy.lol/hex"
"x.realy.lol/tags"
@@ -21,15 +21,15 @@ import (
// GetEventIndexes generates a set of indexes for a new event record. The first record is the
// key that should have the binary encoded event as its value.
func (d *D) GetEventIndexes(ev *event.E) (indices [][]byte, ser *serial.S, err error) {
func (d *D) GetEventIndexes(ev *event.E) (indices [][]byte, ser *varint.V, err error) {
// log.I.F("getting event indices for\n%s", ev.Serialize())
// get a new serial
ser = serial.New()
ser = varint.New()
var s uint64
if s, err = d.Serial(); chk.E(err) {
return
}
ser.FromSerial(s)
ser.FromUint64(s)
// create the event id key
id := idhash.New()
var idb []byte
@@ -59,9 +59,9 @@ func (d *D) GetEventIndexes(ev *event.E) (indices [][]byte, ser *serial.S, err e
}
ki := kindidx.FromKind(ev.Kind)
ca := &timestamp.T{}
ca.FromInt64(int64(ev.CreatedAt))
ca.FromInt(ev.CreatedAt.ToInt())
evIFiB := new(bytes.Buffer)
if err = indexes.FullIndexEnc(fid, p, ki, ca, ser).MarshalWrite(evIFiB); chk.E(err) {
if err = indexes.FullIndexEnc(ser, fid, p, ki, ca).MarshalWrite(evIFiB); chk.E(err) {
return
}
indices = append(indices, evIFiB.Bytes())
@@ -97,6 +97,18 @@ func (d *D) GetEventIndexes(ev *event.E) (indices [][]byte, ser *serial.S, err e
return
}
indices = append(indices, evIKiB.Bytes())
// Kind index
evIKcB := new(bytes.Buffer)
if err = indexes.KindCreatedAtEnc(ki, ca, ser).MarshalWrite(evIKcB); chk.E(err) {
return
}
indices = append(indices, evIKcB.Bytes())
// Kind index
evIKpB := new(bytes.Buffer)
if err = indexes.KindPubkeyCreatedAtEnc(ki, p, ca, ser).MarshalWrite(evIKpB); chk.E(err) {
return
}
indices = append(indices, evIKpB.Bytes())
// tags
// TagA index
var atags []tags.Tag_a
@@ -152,6 +164,9 @@ func (d *D) GetEventIndexes(ev *event.E) (indices [][]byte, ser *serial.S, err e
continue
}
ph := pubhash.New()
if len(pkb) == 0 {
continue
}
if err = ph.FromPubkey(pkb); chk.E(err) {
err = nil
continue

View File

@@ -88,9 +88,8 @@ func TestGetEventIndexes(t *testing.T) {
// check the event encodes to binary, decodes, and produces the identical canonical form
binE := new(bytes.Buffer)
if err = ev.MarshalWrite(binE); chk.E(err) {
// log.I.F("bogus tags probably: %s", b)
log.I.F("bogus tags probably: %s", b)
encErrs++
// events that marshal with errors have e and p tag values that aren't hex and should not be accepted
continue
}
ev2 := event.New()
@@ -121,17 +120,16 @@ func TestGetEventIndexes(t *testing.T) {
if indices, _, err = d.GetEventIndexes(ev); chk.E(err) {
t.Fatal(err)
}
// log.I.F("%s", b)
// log.I.S(indices)
log.I.S(indices)
datasize += len(b)
for _, v := range indices {
size += len(v)
}
_ = indices
count++
// if count == 10000 {
// break
// }
if count > 1 {
break
}
}
log.I.F("unmarshaled, verified and indexed %d events in %s, %d Mb of indexes from %d Mb of events, %d Mb as binary, failed verify %d, failed encode %d", count, time.Now().Sub(start), size/units.Mb, datasize/units.Mb, binsize/units.Mb, errs, encErrs)
d.Close()

View File

@@ -2,6 +2,7 @@ package indexes
import (
"io"
"reflect"
"x.realy.lol/chk"
"x.realy.lol/codec"
@@ -14,9 +15,8 @@ import (
"x.realy.lol/database/indexes/types/letter"
"x.realy.lol/database/indexes/types/prefix"
"x.realy.lol/database/indexes/types/pubhash"
"x.realy.lol/database/indexes/types/serial"
"x.realy.lol/database/indexes/types/size"
"x.realy.lol/database/indexes/types/timestamp"
"x.realy.lol/database/indexes/types/varint"
)
type Encs []codec.I
@@ -33,6 +33,11 @@ func New(encoders ...codec.I) (i *T) { return &T{encoders} }
func (t *T) MarshalWrite(w io.Writer) (err error) {
for _, e := range t.Encs {
if e == nil || reflect.ValueOf(e).IsNil() {
// allow a field to be empty, as is needed for search indexes to create search
// prefixes.
return
}
if err = e.MarshalWrite(w); chk.E(err) {
return
}
@@ -49,249 +54,282 @@ func (t *T) UnmarshalRead(r io.Reader) (err error) {
return
}
func EventVars() (ser *serial.S) {
ser = serial.New()
func EventVars() (ser *varint.V) {
ser = varint.New()
return
}
func EventEnc(ser *serial.S) (enc *T) {
func EventEnc(ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.Event), ser)
}
func EventDec(ser *serial.S) (enc *T) {
func EventDec(ser *varint.V) (enc *T) {
return New(prefix.New(), ser)
}
func IdVars() (id *idhash.T, ser *serial.S) {
func IdVars() (id *idhash.T, ser *varint.V) {
id = idhash.New()
ser = serial.New()
ser = varint.New()
return
}
func IdEnc(id *idhash.T, ser *serial.S) (enc *T) {
func IdEnc(id *idhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.Id), id, ser)
}
func IdSearch(id *idhash.T) (enc *T) {
return New(prefix.New(prefixes.Id), id)
}
func IdDec(id *idhash.T, ser *serial.S) (enc *T) {
func IdDec(id *idhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), id, ser)
}
func FullIndexVars() (t *fullid.T, p *pubhash.T, ki *kindidx.T,
ca *timestamp.T, ser *serial.S) {
type FullIndex struct {
Ser *varint.V
Id *fullid.T
Pubkey *pubhash.T
Kind *kindidx.T
CreatedAt *timestamp.T
}
func FullIndexVars() (ser *varint.V, t *fullid.T, p *pubhash.T, ki *kindidx.T,
ca *timestamp.T) {
ser = varint.New()
t = fullid.New()
p = pubhash.New()
ki = kindidx.FromKind(0)
ca = &timestamp.T{}
ser = serial.New()
return
}
func FullIndexEnc(t *fullid.T, p *pubhash.T, ki *kindidx.T,
ca *timestamp.T, ser *serial.S) (enc *T) {
return New(prefix.New(prefixes.FullIndex), t, p, ki, ca, ser)
func FullIndexEnc(ser *varint.V, t *fullid.T, p *pubhash.T, ki *kindidx.T,
ca *timestamp.T) (enc *T) {
return New(prefix.New(prefixes.FullIndex), ser, t, p, ki, ca)
}
func FullIndexDec(t *fullid.T, p *pubhash.T, ki *kindidx.T,
ca *timestamp.T, ser *serial.S) (enc *T) {
return New(prefix.New(), t, p, ki, ca, ser)
func FullIndexDec(ser *varint.V, t *fullid.T, p *pubhash.T, ki *kindidx.T,
ca *timestamp.T) (enc *T) {
return New(prefix.New(), ser, t, p, ki, ca)
}
func PubkeyVars() (p *pubhash.T, ser *serial.S) {
func PubkeyVars() (p *pubhash.T, ser *varint.V) {
p = pubhash.New()
ser = serial.New()
ser = varint.New()
return
}
func PubkeyEnc(p *pubhash.T, ser *serial.S) (enc *T) {
func PubkeyEnc(p *pubhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.Pubkey), p, ser)
}
func PubkeyDec(p *pubhash.T, ser *serial.S) (enc *T) {
func PubkeyDec(p *pubhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), p, ser)
}
func PubkeyCreatedAtVars() (p *pubhash.T, ca *timestamp.T, ser *serial.S) {
func PubkeyCreatedAtVars() (p *pubhash.T, ca *timestamp.T, ser *varint.V) {
p = pubhash.New()
ca = &timestamp.T{}
ser = serial.New()
ser = varint.New()
return
}
func PubkeyCreatedAtEnc(p *pubhash.T, ca *timestamp.T, ser *serial.S) (enc *T) {
func PubkeyCreatedAtEnc(p *pubhash.T, ca *timestamp.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.PubkeyCreatedAt), p, ca, ser)
}
func PubkeyCreatedAtDec(p *pubhash.T, ca *timestamp.T, ser *serial.S) (enc *T) {
func PubkeyCreatedAtDec(p *pubhash.T, ca *timestamp.T, ser *varint.V) (enc *T) {
return New(prefix.New(), p, ca, ser)
}
func CreatedAtVars() (ca *timestamp.T, ser *serial.S) {
func CreatedAtVars() (ca *timestamp.T, ser *varint.V) {
ca = &timestamp.T{}
ser = serial.New()
ser = varint.New()
return
}
func CreatedAtEnc(ca *timestamp.T, ser *serial.S) (enc *T) {
func CreatedAtEnc(ca *timestamp.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.CreatedAt), ca, ser)
}
func CreatedAtDec(ca *timestamp.T, ser *serial.S) (enc *T) {
func CreatedAtDec(ca *timestamp.T, ser *varint.V) (enc *T) {
return New(prefix.New(), ca, ser)
}
func FirstSeenVars() (ser *serial.S, ts *timestamp.T) {
func FirstSeenVars() (ser *varint.V, ts *timestamp.T) {
ts = &timestamp.T{}
ser = serial.New()
ser = varint.New()
return
}
func FirstSeenEnc(ser *serial.S, ts *timestamp.T) (enc *T) {
func FirstSeenEnc(ser *varint.V, ts *timestamp.T) (enc *T) {
return New(prefix.New(prefixes.FirstSeen), ser, ts)
}
func FirstSeenDec(ser *serial.S, ts *timestamp.T) (enc *T) {
func FirstSeenDec(ser *varint.V, ts *timestamp.T) (enc *T) {
return New(prefix.New(), ser, ts)
}
func KindVars() (ki *kindidx.T, ser *serial.S) {
func KindVars() (ki *kindidx.T, ser *varint.V) {
ki = kindidx.FromKind(0)
ser = serial.New()
ser = varint.New()
return
}
func KindEnc(ki *kindidx.T, ser *serial.S) (enc *T) {
func KindEnc(ki *kindidx.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.Kind), ki, ser)
}
func KindDec(ki *kindidx.T, ser *serial.S) (enc *T) {
func KindDec(ki *kindidx.T, ser *varint.V) (enc *T) {
return New(prefix.New(), ki, ser)
}
func KindCreatedAtVars() (ki *kindidx.T, ca *timestamp.T, ser *varint.V) {
ki = kindidx.FromKind(0)
ca = &timestamp.T{}
ser = varint.New()
return
}
func KindCreatedAtEnc(ki *kindidx.T, ca *timestamp.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.KindCreatedAt), ki, ca, ser)
}
func KindCreatedAtDec(ki *kindidx.T, ca *timestamp.T, ser *varint.V) (enc *T) {
return New(prefix.New(), ki, ca, ser)
}
func KindPubkeyCreatedAtVars() (ki *kindidx.T, p *pubhash.T, ca *timestamp.T, ser *varint.V) {
ki = kindidx.FromKind(0)
ser = varint.New()
return
}
func KindPubkeyCreatedAtEnc(ki *kindidx.T, p *pubhash.T, ca *timestamp.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.KindPubkeyCreatedAt), ki, p, ca, ser)
}
func KindPubkeyCreatedAtDec(ki *kindidx.T, p *pubhash.T, ca *timestamp.T, ser *varint.V) (enc *T) {
return New(prefix.New(), ki, p, ca, ser)
}
type TagA struct {
Ki *kindidx.T
P *pubhash.T
Id *identhash.T
Ser *serial.S
Ser *varint.V
}
func TagAVars() (ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *serial.S) {
func TagAVars() (ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *varint.V) {
ki = kindidx.FromKind(0)
p = pubhash.New()
id = identhash.New()
ser = serial.New()
ser = varint.New()
return
}
func TagAEnc(ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *serial.S) (enc *T) {
func TagAEnc(ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.TagA), ki, p, id, ser)
}
func TagADec(ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *serial.S) (enc *T) {
func TagADec(ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), ki, p, id, ser)
}
func TagEventVars() (id *idhash.T, ser *serial.S) {
func TagEventVars() (id *idhash.T, ser *varint.V) {
id = idhash.New()
ser = serial.New()
ser = varint.New()
return
}
func TagEventEnc(id *idhash.T, ser *serial.S) (enc *T) {
func TagEventEnc(id *idhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.TagEvent), id, ser)
}
func TagEventDec(id *idhash.T, ser *serial.S) (enc *T) {
func TagEventDec(id *idhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), id, ser)
}
func TagPubkeyVars() (p *pubhash.T, ser *serial.S) {
func TagPubkeyVars() (p *pubhash.T, ser *varint.V) {
p = pubhash.New()
ser = serial.New()
ser = varint.New()
return
}
func TagPubkeyEnc(p *pubhash.T, ser *serial.S) (enc *T) {
func TagPubkeyEnc(p *pubhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.TagPubkey), p, ser)
}
func TagPubkeyDec(p *pubhash.T, ser *serial.S) (enc *T) {
func TagPubkeyDec(p *pubhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), p, ser)
}
func TagHashtagVars() (hashtag *identhash.T, ser *serial.S) {
func TagHashtagVars() (hashtag *identhash.T, ser *varint.V) {
hashtag = identhash.New()
ser = serial.New()
ser = varint.New()
return
}
func TagHashtagEnc(hashtag *identhash.T, ser *serial.S) (enc *T) {
func TagHashtagEnc(hashtag *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.TagHashtag), hashtag, ser)
}
func TagHashtagDec(hashtag *identhash.T, ser *serial.S) (enc *T) {
func TagHashtagDec(hashtag *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), hashtag, ser)
}
func TagIdentifierVars() (ident *identhash.T, ser *serial.S) {
func TagIdentifierVars() (ident *identhash.T, ser *varint.V) {
ident = identhash.New()
ser = serial.New()
ser = varint.New()
return
}
func TagIdentifierEnc(ident *identhash.T, ser *serial.S) (enc *T) {
func TagIdentifierEnc(ident *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.TagIdentifier), ident, ser)
}
func TagIdentifierDec(ident *identhash.T, ser *serial.S) (enc *T) {
func TagIdentifierDec(ident *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), ident, ser)
}
func TagLetterVars() (l *letter.T, val *identhash.T, ser *serial.S) {
func TagLetterVars() (l *letter.T, val *identhash.T, ser *varint.V) {
l = letter.New(0)
val = identhash.New()
ser = serial.New()
ser = varint.New()
return
}
func TagLetterEnc(l *letter.T, val *identhash.T, ser *serial.S) (enc *T) {
func TagLetterEnc(l *letter.T, val *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.TagLetter), l, val, ser)
}
func TagLetterDec(l *letter.T, val *identhash.T, ser *serial.S) (enc *T) {
func TagLetterDec(l *letter.T, val *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), l, val, ser)
}
func TagProtectedVars() (p *pubhash.T, ser *serial.S) {
func TagProtectedVars() (p *pubhash.T, ser *varint.V) {
p = pubhash.New()
ser = serial.New()
ser = varint.New()
return
}
func TagProtectedEnc(p *pubhash.T, ser *serial.S) (enc *T) {
func TagProtectedEnc(p *pubhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.TagProtected), p, ser)
}
func TagProtectedDec(p *pubhash.T, ser *serial.S) (enc *T) {
func TagProtectedDec(p *pubhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), p, ser)
}
func TagNonstandardVars() (key, value *identhash.T, ser *serial.S) {
func TagNonstandardVars() (key, value *identhash.T, ser *varint.V) {
key = identhash.New()
value = identhash.New()
ser = serial.New()
ser = varint.New()
return
}
func TagNonstandardEnc(key, value *identhash.T, ser *serial.S) (enc *T) {
func TagNonstandardEnc(key, value *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.TagNonstandard), key, value, ser)
}
func TagNonstandardDec(key, value *identhash.T, ser *serial.S) (enc *T) {
func TagNonstandardDec(key, value *identhash.T, ser *varint.V) (enc *T) {
return New(prefix.New(), key, value, ser)
}
func FullTextWordVars() (fw *fulltext.T, pos *size.T, ser *serial.S) {
func FullTextWordVars() (fw *fulltext.T, pos *varint.V, ser *varint.V) {
fw = fulltext.New()
pos = size.New()
ser = serial.New()
pos = varint.New()
ser = varint.New()
return
}
func FullTextWordEnc(fw *fulltext.T, pos *size.T, ser *serial.S) (enc *T) {
func FullTextWordEnc(fw *fulltext.T, pos *varint.V, ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.FulltextWord), fw, pos, ser)
}
func FullTextWordDec(fw *fulltext.T, pos *size.T, ser *serial.S) (enc *T) {
func FullTextWordDec(fw *fulltext.T, pos *varint.V, ser *varint.V) (enc *T) {
return New(prefix.New(), fw, pos, ser)
}
func LastAccessedVars() (ser *serial.S) {
ser = serial.New()
func LastAccessedVars() (ser *varint.V) {
ser = varint.New()
return
}
func LastAccessedEnc(ser *serial.S) (enc *T) {
func LastAccessedEnc(ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.LastAccessed), ser)
}
func LastAccessedDec(ser *serial.S) (enc *T) {
func LastAccessedDec(ser *varint.V) (enc *T) {
return New(prefix.New(), ser)
}
func AccessCounterVars() (ser *serial.S) {
ser = serial.New()
func AccessCounterVars() (ser *varint.V) {
ser = varint.New()
return
}
func AccessCounterEnc(ser *serial.S) (enc *T) {
func AccessCounterEnc(ser *varint.V) (enc *T) {
return New(prefix.New(prefixes.AccessCounter), ser)
}
func AccessCounterDec(ser *serial.S) (enc *T) {
func AccessCounterDec(ser *varint.V) (enc *T) {
return New(prefix.New(), ser)
}

View File

@@ -13,13 +13,14 @@ import (
"x.realy.lol/database/indexes/prefixes"
"x.realy.lol/database/indexes/types/prefix"
"x.realy.lol/ec/schnorr"
"x.realy.lol/log"
)
func TestEvent(t *testing.T) {
var err error
for range 100 {
ser := EventVars()
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
evIdx := EventEnc(ser)
evIdx.MarshalWrite(buf)
@@ -30,7 +31,7 @@ func TestEvent(t *testing.T) {
if err = evIdx2.UnmarshalRead(buf2); chk.E(err) {
t.Fatal(err)
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -58,7 +59,7 @@ func TestId(t *testing.T) {
if err = id.FromId(frand.Bytes(sha256.Size)); chk.E(err) {
t.Fatal(err)
}
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
evIdx := IdEnc(id, ser)
evIdx.MarshalWrite(buf)
@@ -72,7 +73,7 @@ func TestId(t *testing.T) {
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -81,7 +82,7 @@ func TestId(t *testing.T) {
func TestFullIndex(t *testing.T) {
var err error
for range 100 {
id, p, ki, ca, ser := FullIndexVars()
ser, id, p, ki, ca := FullIndexVars()
if err = id.FromId(frand.Bytes(sha256.Size)); chk.E(err) {
t.Fatal(err)
}
@@ -89,21 +90,24 @@ func TestFullIndex(t *testing.T) {
t.Fatal(err)
}
ki.Set(frand.Intn(math.MaxUint16))
ca.FromInt64(time.Now().Unix())
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ca.FromInt(int(time.Now().Unix()))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := FullIndexEnc(id, p, ki, ca, ser)
fi.MarshalWrite(buf)
fi := FullIndexEnc(ser, id, p, ki, ca)
if err = fi.MarshalWrite(buf); chk.E(err) {
t.Fatal(err)
}
// log.I.S(fi)
bin := buf.Bytes()
// log.I.S(bin)
buf2 := bytes.NewBuffer(bin)
id2, p2, ki2, ca2, ser2 := FullIndexVars()
fi2 := FullIndexDec(id2, p2, ki2, ca2, ser2)
ser2, id2, p2, ki2, ca2 := FullIndexVars()
fi2 := FullIndexDec(ser2, id2, p2, ki2, ca2)
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
t.Fatal(err)
}
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
log.I.S(id, id2)
t.Fatal("failed to recover same value as input")
}
if !bytes.Equal(p.Bytes(), p2.Bytes()) {
@@ -125,7 +129,7 @@ func TestPubkey(t *testing.T) {
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
t.Fatal(err)
}
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := PubkeyEnc(p, ser)
fi.MarshalWrite(buf)
@@ -139,7 +143,7 @@ func TestPubkey(t *testing.T) {
t.Fatal(err)
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -152,8 +156,8 @@ func TestPubkeyCreatedAt(t *testing.T) {
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
t.Fatal(err)
}
ca.FromInt64(time.Now().Unix())
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ca.FromInt(int(time.Now().Unix()))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := PubkeyCreatedAtEnc(p, ca, ser)
fi.MarshalWrite(buf)
@@ -167,7 +171,7 @@ func TestPubkeyCreatedAt(t *testing.T) {
if ca.ToTimestamp() != ca2.ToTimestamp() {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -177,8 +181,8 @@ func TestCreatedAt(t *testing.T) {
var err error
for range 100 {
ca, ser := CreatedAtVars()
ca.FromInt64(time.Now().Unix())
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ca.FromInt(int(time.Now().Unix()))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := CreatedAtEnc(ca, ser)
fi.MarshalWrite(buf)
@@ -192,7 +196,7 @@ func TestCreatedAt(t *testing.T) {
if ca.ToTimestamp() != ca2.ToTimestamp() {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -202,8 +206,8 @@ func TestFirstSeen(t *testing.T) {
var err error
for range 100 {
ser, ts := FirstSeenVars()
ts.FromInt64(time.Now().Unix())
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ts.FromInt(int(time.Now().Unix()))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fs := FirstSeenEnc(ser, ts)
fs.MarshalWrite(buf)
@@ -214,7 +218,7 @@ func TestFirstSeen(t *testing.T) {
if err = fs2.UnmarshalRead(buf2); chk.E(err) {
t.Fatal(err)
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
if ts.ToTimestamp() != ca2.ToTimestamp() {
@@ -228,7 +232,7 @@ func TestKind(t *testing.T) {
for range 100 {
ki, ser := KindVars()
ki.Set(frand.Intn(math.MaxUint16))
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
kIdx := KindEnc(ki, ser)
kIdx.MarshalWrite(buf)
@@ -242,7 +246,7 @@ func TestKind(t *testing.T) {
if ki.ToKind() != ki2.ToKind() {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -259,7 +263,7 @@ func TestTagA(t *testing.T) {
t.Fatal(err)
}
ki.Set(frand.Intn(math.MaxUint16))
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := TagAEnc(ki, p, id, ser)
fi.MarshalWrite(buf)
@@ -279,7 +283,7 @@ func TestTagA(t *testing.T) {
if ki.ToKind() != ki2.ToKind() {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -292,7 +296,7 @@ func TestTagEvent(t *testing.T) {
if err = id.FromId(frand.Bytes(sha256.Size)); chk.E(err) {
t.Fatal(err)
}
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
evIdx := TagEventEnc(id, ser)
evIdx.MarshalWrite(buf)
@@ -306,7 +310,7 @@ func TestTagEvent(t *testing.T) {
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -319,7 +323,7 @@ func TestTagPubkey(t *testing.T) {
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
t.Fatal(err)
}
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := TagPubkeyEnc(p, ser)
fi.MarshalWrite(buf)
@@ -332,7 +336,7 @@ func TestTagPubkey(t *testing.T) {
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
t.Fatal(err)
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -345,7 +349,7 @@ func TestTagHashtag(t *testing.T) {
if err = id.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
t.Fatal(err)
}
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := TagHashtagEnc(id, ser)
fi.MarshalWrite(buf)
@@ -359,7 +363,7 @@ func TestTagHashtag(t *testing.T) {
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -372,7 +376,7 @@ func TestTagIdentifier(t *testing.T) {
if err = id.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
t.Fatal(err)
}
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := TagIdentifierEnc(id, ser)
fi.MarshalWrite(buf)
@@ -386,7 +390,7 @@ func TestTagIdentifier(t *testing.T) {
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -401,7 +405,7 @@ func TestTagLetter(t *testing.T) {
}
lb := frand.Bytes(1)
l.Set(lb[0])
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := TagLetterEnc(l, id, ser)
fi.MarshalWrite(buf)
@@ -418,7 +422,7 @@ func TestTagLetter(t *testing.T) {
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -431,7 +435,7 @@ func TestTagProtected(t *testing.T) {
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
t.Fatal(err)
}
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := TagProtectedEnc(p, ser)
fi.MarshalWrite(buf)
@@ -445,7 +449,7 @@ func TestTagProtected(t *testing.T) {
t.Fatal(err)
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -461,7 +465,7 @@ func TestTagNonstandard(t *testing.T) {
if err = v.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
t.Fatal(err)
}
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := TagNonstandardEnc(k, v, ser)
fi.MarshalWrite(buf)
@@ -478,7 +482,7 @@ func TestTagNonstandard(t *testing.T) {
if !bytes.Equal(v.Bytes(), v2.Bytes()) {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -489,11 +493,13 @@ func TestFulltextWord(t *testing.T) {
for range 100 {
fw, pos, ser := FullTextWordVars()
fw.FromWord(frand.Bytes(frand.Intn(10) + 5))
pos.FromUint32(uint32(frand.Intn(math.MaxUint32)))
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
pos.FromUint64(uint64(frand.Intn(math.MaxUint32)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := FullTextWordEnc(fw, pos, ser)
fi.MarshalWrite(buf)
if err = fi.MarshalWrite(buf); chk.E(err) {
t.Fatal(err)
}
bin := buf.Bytes()
buf2 := bytes.NewBuffer(bin)
fw2, pos2, ser2 := FullTextWordVars()
@@ -507,7 +513,7 @@ func TestFulltextWord(t *testing.T) {
if pos.ToUint32() != pos2.ToUint32() {
t.Fatal("failed to recover same value as input")
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -517,7 +523,7 @@ func TestLastAccessed(t *testing.T) {
var err error
for range 100 {
ser := LastAccessedVars()
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := LastAccessedEnc(ser)
fi.MarshalWrite(buf)
@@ -528,7 +534,7 @@ func TestLastAccessed(t *testing.T) {
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
t.Fatal(err)
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}
@@ -538,7 +544,7 @@ func TestAccessCounter(t *testing.T) {
var err error
for range 100 {
ser := AccessCounterVars()
ser.FromSerial(uint64(frand.Intn(math.MaxInt64)))
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
buf := new(bytes.Buffer)
fi := AccessCounterEnc(ser)
fi.MarshalWrite(buf)
@@ -549,7 +555,7 @@ func TestAccessCounter(t *testing.T) {
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
t.Fatal(err)
}
if ser.ToSerial() != ser2.ToSerial() {
if ser.ToUint64() != ser2.ToUint64() {
t.Fatal("failed to recover same value as input")
}
}

View File

@@ -8,8 +8,6 @@ const Len = 2
type I string
// the following enumerations are separate from the prefix value for simpler reference.
const (
// Event is the whole event stored in binary format
//
@@ -21,42 +19,68 @@ const (
// [ prefix ] [ configuration in JSON format ]
Config
// Id contains a truncated 8 byte hash of an event index
// Id contains a truncated 8 byte hash of an event index. This is the secondary key of an
// event, the primary key is the serial found in the Event.
//
// [ prefix ][ 8 bytes truncated hash of Id ][ 8 serial ]
Id
// FullIndex is an index designed to enable sorting and filtering of results found via
// other indexes.
// other indexes, without having to decode the event.
//
// [ prefix ][ 32 bytes full event ID ][ 8 bytes truncated hash of pubkey ][ 2 bytes kind ][ 8 bytes created_at timestamp ][ 8 serial ]
// [ prefix ][ 8 serial ][ 32 bytes full event ID ][ 8 bytes truncated hash of pubkey ][ 2 bytes kind ][ 8 bytes created_at timestamp ]
FullIndex
// ------------------------------------------------------------------------
//
// The following are search indexes. This first category are primarily for kind, pubkey and
// created_at timestamps. These compose a set of 3 primary indexes alone, two that combine
// with the timestamp, and a third that combines all three, covering every combination of
// these.
_
// Pubkey is an index for searching for events authored by a pubkey.
//
// [ prefix ][ 8 bytes truncated hash of pubkey ][ 8 serial ]
Pubkey
// Kind is an index of event kind numbers.
//
// [ prefix ][ 2 bytes kind number ][ 8 serial ]
Kind
// CreatedAt is an index that allows search the timestamp on the event.
//
// [ prefix ][ created_at 8 bytes timestamp ][ 8 serial ]
CreatedAt
// PubkeyCreatedAt is a composite index that allows search by pubkey filtered by
// created_at.
//
// [ prefix ][ 8 bytes truncated hash of pubkey ][ 8 bytes created_at ][ 8 serial ]
PubkeyCreatedAt
// CreatedAt is an index that allows search the timestamp on the event.
// KindCreatedAt is an index of kind and created_at timestamp.
//
// [ prefix ][ created_at 8 bytes timestamp ][ 8 serial ]
CreatedAt
// [ prefix ][ 2 bytes kind number ][ created_at 8 bytes timestamp ][ 8 bytes serial ]
KindCreatedAt
// FirstSeen is an index that records the timestamp of when the event was first seen.
// KindPubkeyCreatedAt is an index of kind and created_at timestamp.
//
// [ prefix ][ 8 serial ][ 8 byte timestamp ]
FirstSeen
// [ prefix ][ 2 bytes kind number ][ 8 bytes hash of pubkey ][ created_at 8 bytes timestamp ][ 8 bytes serial ]
KindPubkeyCreatedAt
// Kind is an index of event kind numbers.
// ------------------------------------------------------------------------
//
// [ prefix ][ 2 bytes kind number ][ 8 serial ]
Kind
// The following are search indexes for tags, which are references to other categories,
// including events, replaceable event identities (d tags), public keys, hashtags, and
// arbitrary other kinds of keys including standard single letter and nonstandard word keys.
//
// Combining them with the previous set of 6 indexes involves using one query from the
// previous section according to the filter, and one or more of these tag indexes, to
// acquire a list of event serials from each query, and then intersecting the result sets
// from each one to yield the matches.
_
// TagA is an index of `a` tags, which contain kind, pubkey and hash of an arbitrary
// text, used to create an abstract reference for a multiplicity of replaceable event with a
@@ -66,6 +90,13 @@ const (
// [ prefix ][ 2 bytes kind number ][ 8 bytes hash of pubkey ][ 8 bytes hash of label ][ serial]
TagA
// TagIdentifier is a `d` tag identifier that creates an arbitrary label that can be used
// to refer to an event. This is used for parameterized replaceable events to identify them
// with `a` tags for reference.
//
// [ prefix ][ 8 byte hash of identifier ][ 8 serial ]
TagIdentifier
// TagEvent is a reference to an event.
//
// [ prefix ][ 8 bytes truncated hash of event Id ][ 8 serial ]
@@ -82,13 +113,6 @@ const (
// [ prefix ][ 8 bytes hash of hashtag ][ 8 serial ]
TagHashtag
// TagIdentifier is a `d` tag identifier that creates an arbitrary label that can be used
// to refer to an event. This is used for parameterized replaceable events to identify them
// with `a` tags for reference.
//
// [ prefix ][ 8 byte hash of identifier ][ 8 serial ]
TagIdentifier
// TagLetter covers all other types of single letter mandatory indexed tags, including
// such as `d` for identifiers and things like `m` for mimetype and other kinds of
// references, the actual letter is the second byte. The value is a truncated 8 byte hash.
@@ -108,6 +132,9 @@ const (
// [ prefix ][ 8 byte hash of key ][ 8 byte hash of value ][ 8 serial ]
TagNonstandard
// ------------------------------------------------------------------------
_
// FulltextWord is a fulltext word index, the index contains the whole word. This will
// also be searchable via the use of annotations in the filter search as whole match for the
// word and any word containing the word (contains), and ^ prefix indicates a prefix match,
@@ -117,6 +144,17 @@ const (
// [ prefix ][ varint word len ][ full word ][ 4 bytes word position in content field ][ 8 serial ]
FulltextWord
// ------------------------------------------------------------------------
//
// The following keys are event metadata that are needed to enable other types of
// functionality such as garbage collection and metadata queries.
_
// FirstSeen is an index that records the timestamp of when the event was first seen.
//
// [ prefix ][ 8 serial ][ 8 byte timestamp ]
FirstSeen
// LastAccessed is an index that stores the last time the referenced event was returned
// in a result.
//
@@ -132,43 +170,6 @@ const (
func (i I) Write(w io.Writer) (n int, err error) { return w.Write([]byte(i)) }
// func Identify(r io.Reader) (i int, err error) {
// var prefixes = map[I]int{
// "ev": Event,
// "cf": Config,
// "id": Id,
// "fi": FullIndex,
// "pk": Pubkey,
// "pc": PubkeyCreatedAt,
// "ca": CreatedAt,
// "fs": FirstSeen,
// "ki": Kind,
// "ta": TagA,
// "te": TagEvent,
// "tp": TagPubkey,
// "tt": TagHashtag,
// "td": TagIdentifier,
// "t*": TagLetter,
// "t-": TagProtected,
// "t?": TagNonstandard,
// "fw": FulltextWord,
// "la": LastAccessed,
// "ac": AccessCounter,
// }
// b := make([]byte, Len)
// if _, err = r.Read(b); chk.E(err) {
// return
// }
// s := string(b)
// for ii, v := range prefixes {
// if ii == I(s) {
// return v, nil
// }
// }
// err = errorf.E("no match to known prefix '%s'", s)
// return
// }
func Prefix(prf int) (i I) {
switch prf {
case Event:
@@ -189,6 +190,10 @@ func Prefix(prf int) (i I) {
return "fs"
case Kind:
return "ki"
case KindCreatedAt:
return "kc"
case KindPubkeyCreatedAt:
return "kp"
case TagA:
return "ta"
case TagEvent:

View File

@@ -0,0 +1,41 @@
package number
import (
"encoding/binary"
"io"
)
// Uint16 is a codec for encoding and decoding 16-bit unsigned integers.
type Uint16 struct {
value uint16
}
// Set sets the value as a uint16.
func (c *Uint16) Set(value uint16) {
c.value = value
}
// Get gets the value as a uint16.
func (c *Uint16) Get() uint16 {
return c.value
}
// SetInt sets the value as an int, converting it to uint16. Truncates values outside uint16 range (0-65535).
func (c *Uint16) SetInt(value int) {
c.value = uint16(value)
}
// GetInt gets the value as an int, converted from uint16.
func (c *Uint16) GetInt() int {
return int(c.value)
}
// MarshalWrite writes the uint16 value to the provided writer in BigEndian order.
func (c *Uint16) MarshalWrite(w io.Writer) error {
return binary.Write(w, binary.BigEndian, c.value)
}
// UnmarshalRead reads a uint16 value from the provided reader in BigEndian order.
func (c *Uint16) UnmarshalRead(r io.Reader) error {
return binary.Read(r, binary.BigEndian, &c.value)
}

View File

@@ -0,0 +1,66 @@
package number
import (
"bytes"
"math"
"testing"
"lukechampine.com/frand"
)
func TestUint16(t *testing.T) {
// Helper function to generate random 16-bit integers
generateRandomUint16 := func() uint16 {
return uint16(frand.Intn(math.MaxUint16)) // math.MaxUint16 == 65535
}
for i := 0; i < 100; i++ { // Run test 100 times for random values
// Generate a random value
randomUint16 := generateRandomUint16()
randomInt := int(randomUint16)
// Create a new encodedUint16
encodedUint16 := new(Uint16)
// Test UInt16 setter and getter
encodedUint16.Set(randomUint16)
if encodedUint16.Get() != randomUint16 {
t.Fatalf("Get mismatch: got %d, expected %d", encodedUint16.Get(), randomUint16)
}
// Test GetInt setter and getter
encodedUint16.SetInt(randomInt)
if encodedUint16.GetInt() != randomInt {
t.Fatalf("GetInt mismatch: got %d, expected %d", encodedUint16.GetInt(), randomInt)
}
// Test encoding to []byte and decoding back
bufEnc := new(bytes.Buffer)
// MarshalWrite
err := encodedUint16.MarshalWrite(bufEnc)
if err != nil {
t.Fatalf("MarshalWrite failed: %v", err)
}
encoded := bufEnc.Bytes()
// Create a copy of encoded bytes before decoding
bufDec := bytes.NewBuffer(encoded)
// Decode back the value
decodedUint16 := new(Uint16)
err = decodedUint16.UnmarshalRead(bufDec)
if err != nil {
t.Fatalf("UnmarshalRead failed: %v", err)
}
if decodedUint16.Get() != randomUint16 {
t.Fatalf("Decoded value mismatch: got %d, expected %d", decodedUint16.Get(), randomUint16)
}
// Compare encoded bytes to ensure correctness
if !bytes.Equal(encoded, bufEnc.Bytes()) {
t.Fatalf("Byte encoding mismatch: got %v, expected %v", bufEnc.Bytes(), encoded)
}
}
}

View File

@@ -0,0 +1,78 @@
package number
import (
"errors"
"io"
)
// MaxUint24 is the maximum value of a 24-bit unsigned integer: 2^24 - 1.
const MaxUint24 uint32 = 1<<24 - 1
// Uint24 is a codec for encoding and decoding 24-bit unsigned integers.
type Uint24 struct {
value uint32
}
// SetUint24 sets the value as a 24-bit unsigned integer.
// If the value exceeds the maximum allowable value for 24 bits, it returns an error.
func (c *Uint24) SetUint24(value uint32) error {
if value > MaxUint24 {
return errors.New("value exceeds 24-bit range")
}
c.value = value
return nil
}
// Uint24 gets the value as a 24-bit unsigned integer.
func (c *Uint24) Uint24() uint32 {
return c.value
}
// SetInt sets the value as an int, converting it to a 24-bit unsigned integer.
// If the value is out of the 24-bit range, it returns an error.
func (c *Uint24) SetInt(value int) error {
if value < 0 || uint32(value) > MaxUint24 {
return errors.New("value exceeds 24-bit range")
}
c.value = uint32(value)
return nil
}
// Int gets the value as an int, converted from the 24-bit unsigned integer.
func (c *Uint24) Int() int {
return int(c.value)
}
// MarshalWrite encodes the 24-bit unsigned integer and writes it directly to the provided io.Writer.
// The encoding uses 3 bytes in BigEndian order.
func (c *Uint24) MarshalWrite(w io.Writer) error {
if c.value > MaxUint24 {
return errors.New("value exceeds 24-bit range")
}
// Write the 3 bytes (BigEndian order) directly to the writer
var buf [3]byte
buf[0] = byte((c.value >> 16) & 0xFF) // Most significant byte
buf[1] = byte((c.value >> 8) & 0xFF)
buf[2] = byte(c.value & 0xFF) // Least significant byte
_, err := w.Write(buf[:]) // Write all 3 bytes to the writer
return err
}
// UnmarshalRead reads 3 bytes directly from the provided io.Reader and decodes it into a 24-bit unsigned integer.
func (c *Uint24) UnmarshalRead(r io.Reader) error {
// Read 3 bytes directly from the reader
var buf [3]byte
_, err := io.ReadFull(r, buf[:]) // Ensure exactly 3 bytes are read
if err != nil {
return err
}
// Decode the 3 bytes into a 24-bit unsigned integer
c.value = (uint32(buf[0]) << 16) |
(uint32(buf[1]) << 8) |
uint32(buf[2])
return nil
}

View File

@@ -0,0 +1,67 @@
package number
import (
"bytes"
"testing"
)
func TestUint24(t *testing.T) {
tests := []struct {
name string
value uint32
expectedErr bool
}{
{"Minimum Value", 0, false},
{"Maximum Value", MaxUint24, false},
{"Value in Range", 8374263, false}, // Example value within the range
{"Value Exceeds Range", MaxUint24 + 1, true}, // Exceeds 24-bit limit
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
codec := new(Uint24)
// Test SetUint24
err := codec.SetUint24(tt.value)
if tt.expectedErr {
if err == nil {
t.Errorf("expected error but got none")
}
return
} else if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
// Test Uint24 getter
if codec.Uint24() != tt.value {
t.Errorf("Uint24 mismatch: got %d, expected %d", codec.Uint24(), tt.value)
}
// Test MarshalWrite and UnmarshalRead
buf := new(bytes.Buffer)
// MarshalWrite directly to the buffer
if err := codec.MarshalWrite(buf); err != nil {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Validate encoded size is 3 bytes
encoded := buf.Bytes()
if len(encoded) != 3 {
t.Fatalf("encoded size mismatch: got %d bytes, expected 3 bytes", len(encoded))
}
// Decode from the buffer
decoded := new(Uint24)
if err := decoded.UnmarshalRead(buf); err != nil {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Validate decoded value
if decoded.Uint24() != tt.value {
t.Errorf("Decoded value mismatch: got %d, expected %d", decoded.Uint24(), tt.value)
}
})
}
}

View File

@@ -0,0 +1,42 @@
package number
import (
"encoding/binary"
"io"
)
// Uint32 is a codec for encoding and decoding 32-bit unsigned integers.
type Uint32 struct {
value uint32
}
// SetUint32 sets the value as a uint32.
func (c *Uint32) SetUint32(value uint32) {
c.value = value
}
// Uint32 gets the value as a uint32.
func (c *Uint32) Uint32() uint32 {
return c.value
}
// SetInt sets the value as an int, converting it to uint32.
// Values outside the range of uint32 (04294967295) will be truncated.
func (c *Uint32) SetInt(value int) {
c.value = uint32(value)
}
// Int gets the value as an int, converted from uint32.
func (c *Uint32) Int() int {
return int(c.value)
}
// MarshalWrite writes the uint32 value to the provided writer in BigEndian order.
func (c *Uint32) MarshalWrite(w io.Writer) error {
return binary.Write(w, binary.BigEndian, c.value)
}
// UnmarshalRead reads a uint32 value from the provided reader in BigEndian order.
func (c *Uint32) UnmarshalRead(r io.Reader) error {
return binary.Read(r, binary.BigEndian, &c.value)
}

View File

@@ -0,0 +1,66 @@
package number
import (
"bytes"
"math"
"testing"
"lukechampine.com/frand"
)
func TestUint32(t *testing.T) {
// Helper function to generate random 32-bit integers
generateRandomUint32 := func() uint32 {
return uint32(frand.Intn(math.MaxUint32)) // math.MaxUint32 == 4294967295
}
for i := 0; i < 100; i++ { // Run test 100 times for random values
// Generate a random value
randomUint32 := generateRandomUint32()
randomInt := int(randomUint32)
// Create a new codec
codec := new(Uint32)
// Test UInt32 setter and getter
codec.SetUint32(randomUint32)
if codec.Uint32() != randomUint32 {
t.Fatalf("Uint32 mismatch: got %d, expected %d", codec.Uint32(), randomUint32)
}
// Test Int setter and getter
codec.SetInt(randomInt)
if codec.Int() != randomInt {
t.Fatalf("Int mismatch: got %d, expected %d", codec.Int(), randomInt)
}
// Test encoding to []byte and decoding back
bufEnc := new(bytes.Buffer)
// MarshalWrite
err := codec.MarshalWrite(bufEnc)
if err != nil {
t.Fatalf("MarshalWrite failed: %v", err)
}
encoded := bufEnc.Bytes()
// Create a copy of encoded bytes before decoding
bufDec := bytes.NewBuffer(encoded)
// Decode back the value
decoded := new(Uint32)
err = decoded.UnmarshalRead(bufDec)
if err != nil {
t.Fatalf("UnmarshalRead failed: %v", err)
}
if decoded.Uint32() != randomUint32 {
t.Fatalf("Decoded value mismatch: got %d, expected %d", decoded.Uint32(), randomUint32)
}
// Compare encoded bytes to ensure correctness
if !bytes.Equal(encoded, bufEnc.Bytes()) {
t.Fatalf("Byte encoding mismatch: got %v, expected %v", bufEnc.Bytes(), encoded)
}
}
}

View File

@@ -0,0 +1,75 @@
package number
import (
"errors"
"io"
)
// MaxUint40 is the maximum value of a 40-bit unsigned integer: 2^40 - 1.
const MaxUint40 uint64 = 1<<40 - 1
// Uint40 is a codec for encoding and decoding 40-bit unsigned integers.
type Uint40 struct{ value uint64 }
// SetUint40 sets the value as a 40-bit unsigned integer.
// If the value exceeds the maximum allowable value for 40 bits, it returns an error.
func (c *Uint40) SetUint40(value uint64) error {
if value > MaxUint40 {
return errors.New("value exceeds 40-bit range")
}
c.value = value
return nil
}
// Uint40 gets the value as a 40-bit unsigned integer.
func (c *Uint40) Uint40() uint64 { return c.value }
// SetInt sets the value as an int, converting it to a 40-bit unsigned integer.
// If the value is out of the 40-bit range, it returns an error.
func (c *Uint40) SetInt(value int) error {
if value < 0 || uint64(value) > MaxUint40 {
return errors.New("value exceeds 40-bit range")
}
c.value = uint64(value)
return nil
}
// Int gets the value as an int, converted from the 40-bit unsigned integer.
// Note: If the value exceeds the int range, it will be truncated.
func (c *Uint40) Int() int { return int(c.value) }
// MarshalWrite encodes the 40-bit unsigned integer and writes it to the provided writer.
// The encoding uses 5 bytes in BigEndian order.
func (c *Uint40) MarshalWrite(w io.Writer) (err error) {
if c.value > MaxUint40 {
return errors.New("value exceeds 40-bit range")
}
// Buffer for the 5 bytes
buf := make([]byte, 5)
// Write the upper 5 bytes (ignoring the most significant 3 bytes of uint64)
buf[0] = byte((c.value >> 32) & 0xFF) // Most significant byte
buf[1] = byte((c.value >> 24) & 0xFF)
buf[2] = byte((c.value >> 16) & 0xFF)
buf[3] = byte((c.value >> 8) & 0xFF)
buf[4] = byte(c.value & 0xFF) // Least significant byte
_, err = w.Write(buf)
return err
}
// UnmarshalRead reads 5 bytes from the provided reader and decodes it into a 40-bit unsigned integer.
func (c *Uint40) UnmarshalRead(r io.Reader) (err error) {
// Buffer for the 5 bytes
buf := make([]byte, 5)
_, err = r.Read(buf)
if err != nil {
return err
}
// Decode the 5 bytes into a 40-bit unsigned integer
c.value = (uint64(buf[0]) << 32) |
(uint64(buf[1]) << 24) |
(uint64(buf[2]) << 16) |
(uint64(buf[3]) << 8) |
uint64(buf[4])
return nil
}

View File

@@ -0,0 +1,68 @@
package number
import (
"bytes"
"testing"
)
func TestUint40(t *testing.T) {
// Test cases for Uint40
tests := []struct {
name string
value uint64
expectedErr bool
}{
{"Minimum Value", 0, false},
{"Maximum Value", MaxUint40, false},
{"Value in Range", 109951162777, false}, // Example value within the range
{"Value Exceeds Range", MaxUint40 + 1, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
codec := new(Uint40)
// Test SetUint40
err := codec.SetUint40(tt.value)
if tt.expectedErr {
if err == nil {
t.Errorf("expected error but got none")
}
return
} else if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
// Test Uint40 getter
if codec.Uint40() != tt.value {
t.Errorf("Uint40 mismatch: got %d, expected %d", codec.Uint40(), tt.value)
}
// Test MarshalWrite and UnmarshalRead
buf := new(bytes.Buffer)
// Marshal to a buffer
if err = codec.MarshalWrite(buf); err != nil {
t.Fatalf("MarshalWrite failed: %v", err)
}
// Validate encoded size is 5 bytes
encoded := buf.Bytes()
if len(encoded) != 5 {
t.Fatalf("encoded size mismatch: got %d bytes, expected 5 bytes", len(encoded))
}
// Decode from the buffer
decoded := new(Uint40)
if err = decoded.UnmarshalRead(buf); err != nil {
t.Fatalf("UnmarshalRead failed: %v", err)
}
// Validate decoded value
if decoded.Uint40() != tt.value {
t.Errorf("Decoded value mismatch: got %d, expected %d", decoded.Uint40(), tt.value)
}
})
}
}

View File

@@ -0,0 +1,42 @@
package number
import (
"encoding/binary"
"io"
)
// Uint64 is a codec for encoding and decoding 64-bit unsigned integers.
type Uint64 struct {
value uint64
}
// SetUint64 sets the value as a uint64.
func (c *Uint64) SetUint64(value uint64) {
c.value = value
}
// Uint64 gets the value as a uint64.
func (c *Uint64) Uint64() uint64 {
return c.value
}
// SetInt sets the value as an int, converting it to uint64.
// Values outside the range of uint64 are truncated.
func (c *Uint64) SetInt(value int) {
c.value = uint64(value)
}
// Int gets the value as an int, converted from uint64. May truncate if the value exceeds the range of int.
func (c *Uint64) Int() int {
return int(c.value)
}
// MarshalWrite writes the uint64 value to the provided writer in BigEndian order.
func (c *Uint64) MarshalWrite(w io.Writer) error {
return binary.Write(w, binary.BigEndian, c.value)
}
// UnmarshalRead reads a uint64 value from the provided reader in BigEndian order.
func (c *Uint64) UnmarshalRead(r io.Reader) error {
return binary.Read(r, binary.BigEndian, &c.value)
}

View File

@@ -0,0 +1,66 @@
package number
import (
"bytes"
"math"
"testing"
"lukechampine.com/frand"
)
func TestUint64(t *testing.T) {
// Helper function to generate random 64-bit integers
generateRandomUint64 := func() uint64 {
return frand.Uint64n(math.MaxUint64) // math.MaxUint64 == 18446744073709551615
}
for i := 0; i < 100; i++ { // Run test 100 times for random values
// Generate a random value
randomUint64 := generateRandomUint64()
randomInt := int(randomUint64)
// Create a new codec
codec := new(Uint64)
// Test UInt64 setter and getter
codec.SetUint64(randomUint64)
if codec.Uint64() != randomUint64 {
t.Fatalf("Uint64 mismatch: got %d, expected %d", codec.Uint64(), randomUint64)
}
// Test Int setter and getter
codec.SetInt(randomInt)
if codec.Int() != randomInt {
t.Fatalf("Int mismatch: got %d, expected %d", codec.Int(), randomInt)
}
// Test encoding to []byte and decoding back
bufEnc := new(bytes.Buffer)
// MarshalWrite
err := codec.MarshalWrite(bufEnc)
if err != nil {
t.Fatalf("MarshalWrite failed: %v", err)
}
encoded := bufEnc.Bytes()
// Create a buffer for decoding
bufDec := bytes.NewBuffer(encoded)
// Decode back the value
decoded := new(Uint64)
err = decoded.UnmarshalRead(bufDec)
if err != nil {
t.Fatalf("UnmarshalRead failed: %v", err)
}
if decoded.Uint64() != randomUint64 {
t.Fatalf("Decoded value mismatch: got %d, expected %d", decoded.Uint64(), randomUint64)
}
// Compare encoded bytes to ensure correctness
if !bytes.Equal(encoded, bufEnc.Bytes()) {
t.Fatalf("Byte encoding mismatch: got %v, expected %v", bufEnc.Bytes(), encoded)
}
}
}

View File

@@ -3,9 +3,11 @@ package pubhash
import (
"io"
"x.realy.lol/chk"
"x.realy.lol/ec/schnorr"
"x.realy.lol/errorf"
"x.realy.lol/helpers"
"x.realy.lol/hex"
)
const Len = 8
@@ -23,6 +25,19 @@ func (ph *T) FromPubkey(pk []byte) (err error) {
return
}
func (ph *T) FromPubkeyHex(pk string) (err error) {
if len(pk) != schnorr.PubKeyBytesLen*2 {
err = errorf.E("invalid Pubkey length, got %d require %d", len(pk), schnorr.PubKeyBytesLen*2)
return
}
var pkb []byte
if pkb, err = hex.Dec(pk); chk.E(err) {
return
}
ph.val = helpers.Hash(pkb)[:Len]
return
}
func (ph *T) Bytes() (b []byte) { return ph.val }
func (ph *T) MarshalWrite(w io.Writer) (err error) {

View File

@@ -1,50 +0,0 @@
package serial
import (
"encoding/binary"
"io"
"x.realy.lol/errorf"
)
const Len = 8
type S struct{ val []byte }
func New() (s *S) { return &S{make([]byte, Len)} }
func (s *S) FromSerial(ser uint64) {
binary.LittleEndian.PutUint64(s.val, ser)
return
}
func FromBytes(ser []byte) (s *S, err error) {
if len(ser) != Len {
err = errorf.E("serial must be %d bytes long, got %d", Len, len(ser))
return
}
s = &S{val: ser}
return
}
func (s *S) ToSerial() (ser uint64) {
ser = binary.LittleEndian.Uint64(s.val)
return
}
func (s *S) Bytes() (b []byte) { return s.val }
func (s *S) MarshalWrite(w io.Writer) (err error) {
_, err = w.Write(s.val)
return
}
func (s *S) UnmarshalRead(r io.Reader) (err error) {
if len(s.val) < Len {
s.val = make([]byte, Len)
} else {
s.val = s.val[:Len]
}
_, err = r.Read(s.val)
return
}

View File

@@ -1,51 +0,0 @@
package size
import (
"encoding/binary"
"io"
"x.realy.lol/errorf"
)
const Len = 4
type T struct{ val []byte }
func New() (s *T) { return &T{make([]byte, Len)} }
func (s *T) FromUint32(n uint32) {
s.val = make([]byte, Len)
binary.LittleEndian.PutUint32(s.val, n)
return
}
func FromBytes(val []byte) (s *T, err error) {
if len(val) != Len {
err = errorf.E("size must be %d bytes long, got %d", Len, len(val))
return
}
s = &T{val: val}
return
}
func (s *T) ToUint32() (ser uint32) {
ser = binary.LittleEndian.Uint32(s.val)
return
}
func (s *T) Bytes() (b []byte) { return s.val }
func (s *T) MarshalWrite(w io.Writer) (err error) {
_, err = w.Write(s.val)
return
}
func (s *T) UnmarshalRead(r io.Reader) (err error) {
if len(s.val) < Len {
s.val = make([]byte, Len)
} else {
s.val = s.val[:Len]
}
_, err = r.Read(s.val)
return
}

View File

@@ -1,48 +1,56 @@
package timestamp
import (
"encoding/binary"
"bytes"
"io"
"x.realy.lol/errorf"
"x.realy.lol/chk"
"x.realy.lol/database/indexes/types/varint"
timeStamp "x.realy.lol/timestamp"
)
const Len = 8
type T struct{ val []byte }
type T struct{ val int }
func (ts *T) FromInt64(timestamp int64) {
ts.val = make([]byte, Len)
binary.LittleEndian.PutUint64(ts.val, uint64(timestamp))
return
}
func (ts *T) FromInt(t int) { ts.val = t }
func (ts *T) FromInt64(t int64) { ts.val = int(t) }
func FromBytes(timestampBytes []byte) (ts *T, err error) {
if len(timestampBytes) != Len {
err = errorf.E("kind must be %d bytes long, got %d", Len, len(timestampBytes))
v := varint.New()
if err = v.UnmarshalRead(bytes.NewBuffer(timestampBytes)); chk.E(err) {
return
}
ts = &T{val: timestampBytes}
ts = &T{val: v.ToInt()}
return
}
func (ts *T) ToTimestamp() (timestamp timeStamp.Timestamp) {
return timeStamp.Timestamp(binary.LittleEndian.Uint64(ts.val))
return
}
func (ts *T) Bytes() (b []byte, err error) {
v := varint.New()
buf := new(bytes.Buffer)
if err = v.MarshalWrite(buf); chk.E(err) {
return
}
b = buf.Bytes()
return
}
func (ts *T) Bytes() (b []byte) { return ts.val }
func (ts *T) MarshalWrite(w io.Writer) (err error) {
_, err = w.Write(ts.val)
v := varint.New()
if err = v.MarshalWrite(w); chk.E(err) {
return
}
return
}
func (ts *T) UnmarshalRead(r io.Reader) (err error) {
if len(ts.val) < Len {
ts.val = make([]byte, Len)
} else {
ts.val = ts.val[:Len]
v := varint.New()
if err = v.UnmarshalRead(r); chk.E(err) {
return
}
_, err = r.Read(ts.val)
ts.val = v.ToInt()
return
}

View File

@@ -0,0 +1,94 @@
package varint
import (
"bytes"
"io"
"x.realy.lol/chk"
"x.realy.lol/varint"
)
type V struct{ val uint64 }
type S []*V
func New() (s *V) { return &V{} }
func (vi *V) FromUint64(ser uint64) {
vi.val = ser
return
}
func FromBytes(ser []byte) (s *V, err error) {
s = &V{}
if s.val, err = varint.Decode(bytes.NewBuffer(ser)); chk.E(err) {
return
}
return
}
func (vi *V) ToUint64() (ser uint64) { return vi.val }
func (vi *V) ToInt() (ser int) { return int(vi.val) }
func (vi *V) ToUint32() (v uint32) { return uint32(vi.val) }
func (vi *V) Bytes() (b []byte) {
buf := new(bytes.Buffer)
varint.Encode(buf, vi.val)
return
}
func (vi *V) MarshalWrite(w io.Writer) (err error) {
varint.Encode(w, vi.val)
return
}
func (vi *V) UnmarshalRead(r io.Reader) (err error) {
vi.val, err = varint.Decode(r)
return
}
// DeduplicateInOrder removes duplicates from a slice of V.
func DeduplicateInOrder(s S) (v S) {
// for larger slices, this uses a lot less memory, at the cost of slower execution.
if len(s) > 10000 {
skip:
for i, sa := range s {
for j, sb := range s {
if i != j && sa.val == sb.val {
continue skip
}
}
v = append(v, sa)
}
} else {
// for small slices, this is faster but uses more memory.
seen := map[uint64]*V{}
for _, val := range s {
if _, ok := seen[val.val]; !ok {
v = append(v, val)
seen[val.val] = val
}
}
}
return
}
// Intersect deduplicates and performs a set intersection on two slices.
func Intersect(a, b []*V) (sers []*V) {
// first deduplicate to eliminate unnecessary iterations
a = DeduplicateInOrder(a)
b = DeduplicateInOrder(b)
for _, as := range a {
for _, bs := range b {
if as.val == bs.val {
// if the match is found, add to the result and move to the next candidate from
// the "a" serial list.
sers = append(sers, as)
break
}
}
}
return
}

View File

@@ -35,7 +35,7 @@ func (d *D) Path() string { return d.dataDir }
// Init sets up the database with the loaded configuration.
func (d *D) Init(path string) (err error) {
d.dataDir = path
log.I.Ln("opening realy event store at", d.dataDir)
log.I.Ln("opening realy database at", d.dataDir)
opts := badger.DefaultOptions(d.dataDir)
opts.BlockCacheSize = int64(d.BlockCacheSize)
opts.BlockSize = units.Gb

View File

@@ -60,11 +60,10 @@ func TestD_StoreEvent(t *testing.T) {
log.I.F("completed unmarshalling %d events", count)
for _, v := range evIds {
var ev *event.E
if ev, err = d.FindEvent(v); chk.E(err) {
if ev, err = d.GetEventById(v); chk.E(err) {
t.Fatal(err)
}
_ = ev
// log.I.S(ev)
}
log.I.F("stored and retrieved %d events", len(evIds))
return

View File

@@ -6,15 +6,15 @@ import (
"x.realy.lol/chk"
"x.realy.lol/database/indexes"
"x.realy.lol/database/indexes/types/serial"
"x.realy.lol/database/indexes/types/timestamp"
"x.realy.lol/database/indexes/types/varint"
"x.realy.lol/errorf"
"x.realy.lol/event"
)
func (d *D) StoreEvent(ev *event.E) (err error) {
var ev2 *event.E
if ev2, err = d.FindEvent(ev.GetIdBytes()); err != nil {
if ev2, err = d.GetEventById(ev.GetIdBytes()); err != nil {
// so we didn't find it?
}
if ev2 != nil {
@@ -24,7 +24,7 @@ func (d *D) StoreEvent(ev *event.E) (err error) {
return
}
}
var ser *serial.S
var ser *varint.V
var idxs [][]byte
if idxs, ser, err = d.GetEventIndexes(ev); chk.E(err) {
return
@@ -53,7 +53,11 @@ func (d *D) StoreEvent(ev *event.E) (err error) {
if err = indexes.LastAccessedEnc(ser).MarshalWrite(laI); chk.E(err) {
return
}
if err = d.Set(laI.Bytes(), ts.Bytes()); chk.E(err) {
var tsb []byte
if tsb, err = ts.Bytes(); chk.E(err) {
return
}
if err = d.Set(laI.Bytes(), tsb); chk.E(err) {
return
}
// AccessCounter
@@ -61,7 +65,7 @@ func (d *D) StoreEvent(ev *event.E) (err error) {
if err = indexes.AccessCounterEnc(ser).MarshalWrite(acI); chk.E(err) {
return
}
ac := serial.New()
ac := varint.New()
if err = d.Set(acI.Bytes(), ac.Bytes()); chk.E(err) {
return
}

View File

@@ -5,7 +5,6 @@ import (
"x.realy.lol/chk"
"x.realy.lol/ec/schnorr"
"x.realy.lol/errorf"
"x.realy.lol/hex"
"x.realy.lol/timestamp"
"x.realy.lol/varint"
@@ -49,14 +48,20 @@ func (ev *E) MarshalWrite(w io.Writer) (err error) {
for i, y := range x {
if i == 1 && isBin {
var b []byte
b, err = hex.Dec(y)
if err != nil {
err = errorf.E("e or p tag value not hex: %s", err.Error())
return
}
if len(b) != 32 {
err = errorf.E("e or p tag value with invalid decoded byte length %d", len(b))
return
if b, err = hex.Dec(y); err != nil {
b = []byte(y)
// non-hex "p" or "e" tags have a 1 prefix to indicate not to hex decode.
_, _ = w.Write([]byte{1})
err = nil
} else {
if len(b) != 32 {
// err = errorf.E("e or p tag value with invalid decoded byte length %d '%0x'", len(b), b)
b = []byte(y)
_, _ = w.Write([]byte{1})
} else {
// hex values have a 2 prefix
_, _ = w.Write([]byte{2})
}
}
varint.Encode(w, uint64(len(b)))
_, _ = w.Write(b)
@@ -111,6 +116,14 @@ func (ev *E) UnmarshalRead(r io.Reader) (err error) {
var t []string
var isBin bool
for i := range nField {
var pr byte
if i == 1 && isBin {
prf := make([]byte, 1)
if _, err = r.Read(prf); chk.E(err) {
return
}
pr = prf[0]
}
var lenField uint64
if lenField, err = varint.Decode(r); chk.E(err) {
return
@@ -119,16 +132,18 @@ func (ev *E) UnmarshalRead(r io.Reader) (err error) {
if _, err = r.Read(field); chk.E(err) {
return
}
// if it is first field, length 1 and is e or p, the value field must be binary
// if it is first field, length 1 and is e or p, the value field should be binary
if i == 0 && len(field) == 1 && (field[0] == 'e' || field[0] == 'p') {
isBin = true
}
if i == 1 && isBin {
// this is a binary value, was an e or p tag key, 32 bytes long, encode value
// field to hex
f := make([]byte, 64)
_ = hex.EncBytes(f, field)
field = f
if pr == 2 {
// this is a binary value, was an e or p tag key, 32 bytes long, encode
// value field to hex
f := make([]byte, 64)
_ = hex.EncBytes(f, field)
field = f
}
}
t = append(t, string(field))
}

View File

@@ -10,30 +10,27 @@ import (
"x.realy.lol/timestamp"
)
type Filters []Filter
type S []F
type Filter struct {
IDs []string
type F struct {
Ids []string
Kinds []int
Authors []string
Tags TagMap
Since *timestamp.Timestamp
Until *timestamp.Timestamp
Limit int
Limit *int
Search string
// LimitZero is or must be set when there is a "limit":0 in the filter, and not when "limit" is just omitted
LimitZero bool `json:"-"`
}
type TagMap map[string][]string
func (eff Filters) String() string {
func (eff S) String() string {
j, _ := json.Marshal(eff)
return string(j)
}
func (eff Filters) Match(event *event.E) bool {
func (eff S) Match(event *event.E) bool {
for _, filter := range eff {
if filter.Matches(event) {
return true
@@ -42,7 +39,7 @@ func (eff Filters) Match(event *event.E) bool {
return false
}
func (eff Filters) MatchIgnoringTimestampConstraints(event *event.E) bool {
func (eff S) MatchIgnoringTimestampConstraints(event *event.E) bool {
for _, filter := range eff {
if filter.MatchesIgnoringTimestampConstraints(event) {
return true
@@ -51,12 +48,12 @@ func (eff Filters) MatchIgnoringTimestampConstraints(event *event.E) bool {
return false
}
func (ef Filter) String() string {
func (ef F) String() string {
j, _ := json.Marshal(ef)
return string(j)
}
func (ef Filter) Matches(event *event.E) bool {
func (ef F) Matches(event *event.E) bool {
if !ef.MatchesIgnoringTimestampConstraints(event) {
return false
}
@@ -72,12 +69,12 @@ func (ef Filter) Matches(event *event.E) bool {
return true
}
func (ef Filter) MatchesIgnoringTimestampConstraints(event *event.E) bool {
func (ef F) MatchesIgnoringTimestampConstraints(event *event.E) bool {
if event == nil {
return false
}
if ef.IDs != nil && !slices.Contains(ef.IDs, event.Id) {
if ef.Ids != nil && !slices.Contains(ef.Ids, event.Id) {
return false
}
@@ -98,12 +95,12 @@ func (ef Filter) MatchesIgnoringTimestampConstraints(event *event.E) bool {
return true
}
func FilterEqual(a Filter, b Filter) bool {
func FilterEqual(a F, b F) bool {
if !helpers.Similar(a.Kinds, b.Kinds) {
return false
}
if !helpers.Similar(a.IDs, b.IDs) {
if !helpers.Similar(a.Ids, b.Ids) {
return false
}
@@ -137,21 +134,16 @@ func FilterEqual(a Filter, b Filter) bool {
return false
}
if a.LimitZero != b.LimitZero {
return false
}
return true
}
func (ef Filter) Clone() Filter {
clone := Filter{
IDs: slices.Clone(ef.IDs),
Authors: slices.Clone(ef.Authors),
Kinds: slices.Clone(ef.Kinds),
Limit: ef.Limit,
Search: ef.Search,
LimitZero: ef.LimitZero,
func (ef F) Clone() F {
clone := F{
Ids: slices.Clone(ef.Ids),
Authors: slices.Clone(ef.Authors),
Kinds: slices.Clone(ef.Kinds),
Limit: ef.Limit,
Search: ef.Search,
}
if ef.Tags != nil {
@@ -180,9 +172,9 @@ func (ef Filter) Clone() Filter {
// It returns -1 if there are no theoretical limits.
//
// The given .Limit present in the filter is ignored.
func GetTheoreticalLimit(filter Filter) int {
if len(filter.IDs) > 0 {
return len(filter.IDs)
func GetTheoreticalLimit(filter F) int {
if len(filter.Ids) > 0 {
return len(filter.Ids)
}
if len(filter.Kinds) == 0 {
@@ -217,3 +209,5 @@ func GetTheoreticalLimit(filter Filter) int {
return -1
}
func IntToPointer(i int) (ptr *int) { return &i }

View File

@@ -1,311 +0,0 @@
package filter
import (
"github.com/mailru/easyjson"
"github.com/mailru/easyjson/jlexer"
"github.com/mailru/easyjson/jwriter"
"x.realy.lol/timestamp"
)
// suppress unused package warning
var (
_ *jlexer.Lexer
_ *jwriter.Writer
_ easyjson.Marshaler
)
func easyjson4d398eaaDecodeGithubComNbdWtfGoNostr(in *jlexer.Lexer, out *Filter) {
isTopLevel := in.IsStart()
if in.IsNull() {
if isTopLevel {
in.Consumed()
}
in.Skip()
return
}
out.Tags = make(TagMap)
in.Delim('{')
for !in.IsDelim('}') {
key := in.UnsafeFieldName(false)
in.WantColon()
if in.IsNull() {
in.Skip()
in.WantComma()
continue
}
switch key {
case "ids":
if in.IsNull() {
in.Skip()
out.IDs = nil
} else {
in.Delim('[')
if out.IDs == nil {
if !in.IsDelim(']') {
out.IDs = make([]string, 0, 20)
} else {
out.IDs = []string{}
}
} else {
out.IDs = (out.IDs)[:0]
}
for !in.IsDelim(']') {
var v1 string
v1 = string(in.String())
out.IDs = append(out.IDs, v1)
in.WantComma()
}
in.Delim(']')
}
case "kinds":
if in.IsNull() {
in.Skip()
out.Kinds = nil
} else {
in.Delim('[')
if out.Kinds == nil {
if !in.IsDelim(']') {
out.Kinds = make([]int, 0, 8)
} else {
out.Kinds = []int{}
}
} else {
out.Kinds = (out.Kinds)[:0]
}
for !in.IsDelim(']') {
var v2 int
v2 = int(in.Int())
out.Kinds = append(out.Kinds, v2)
in.WantComma()
}
in.Delim(']')
}
case "authors":
if in.IsNull() {
in.Skip()
out.Authors = nil
} else {
in.Delim('[')
if out.Authors == nil {
if !in.IsDelim(']') {
out.Authors = make([]string, 0, 40)
} else {
out.Authors = []string{}
}
} else {
out.Authors = (out.Authors)[:0]
}
for !in.IsDelim(']') {
var v3 string
v3 = string(in.String())
out.Authors = append(out.Authors, v3)
in.WantComma()
}
in.Delim(']')
}
case "since":
if in.IsNull() {
in.Skip()
out.Since = nil
} else {
if out.Since == nil {
out.Since = new(timestamp.Timestamp)
}
*out.Since = timestamp.Timestamp(in.Int64())
}
case "until":
if in.IsNull() {
in.Skip()
out.Until = nil
} else {
if out.Until == nil {
out.Until = new(timestamp.Timestamp)
}
*out.Until = timestamp.Timestamp(in.Int64())
}
case "limit":
out.Limit = int(in.Int())
if out.Limit == 0 {
out.LimitZero = true
}
case "search":
out.Search = string(in.String())
default:
if len(key) > 1 && key[0] == '#' {
tagValues := make([]string, 0, 40)
if !in.IsNull() {
in.Delim('[')
if out.Authors == nil {
if !in.IsDelim(']') {
tagValues = make([]string, 0, 4)
} else {
tagValues = []string{}
}
} else {
tagValues = (tagValues)[:0]
}
for !in.IsDelim(']') {
var v3 string
v3 = string(in.String())
tagValues = append(tagValues, v3)
in.WantComma()
}
in.Delim(']')
}
out.Tags[key[1:]] = tagValues
} else {
in.SkipRecursive()
}
}
in.WantComma()
}
in.Delim('}')
if isTopLevel {
in.Consumed()
}
}
func easyjson4d398eaaEncodeGithubComNbdWtfGoNostr(out *jwriter.Writer, in Filter) {
out.RawByte('{')
first := true
_ = first
if len(in.IDs) != 0 {
const prefix string = ",\"ids\":"
first = false
out.RawString(prefix[1:])
{
out.RawByte('[')
for v4, v5 := range in.IDs {
if v4 > 0 {
out.RawByte(',')
}
out.String(string(v5))
}
out.RawByte(']')
}
}
if len(in.Kinds) != 0 {
const prefix string = ",\"kinds\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
{
out.RawByte('[')
for v6, v7 := range in.Kinds {
if v6 > 0 {
out.RawByte(',')
}
out.Int(int(v7))
}
out.RawByte(']')
}
}
if len(in.Authors) != 0 {
const prefix string = ",\"authors\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
{
out.RawByte('[')
for v8, v9 := range in.Authors {
if v8 > 0 {
out.RawByte(',')
}
out.String(string(v9))
}
out.RawByte(']')
}
}
if in.Since != nil {
const prefix string = ",\"since\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(*in.Since))
}
if in.Until != nil {
const prefix string = ",\"until\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int64(int64(*in.Until))
}
if in.Limit != 0 || in.LimitZero {
const prefix string = ",\"limit\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.Int(int(in.Limit))
}
if in.Search != "" {
const prefix string = ",\"search\":"
if first {
first = false
out.RawString(prefix[1:])
} else {
out.RawString(prefix)
}
out.String(string(in.Search))
}
for tag, values := range in.Tags {
const prefix string = ",\"authors\":"
if first {
first = false
out.RawString("\"#" + tag + "\":")
} else {
out.RawString(",\"#" + tag + "\":")
}
{
out.RawByte('[')
for i, v := range values {
if i > 0 {
out.RawByte(',')
}
out.String(string(v))
}
out.RawByte(']')
}
}
out.RawByte('}')
}
// MarshalJSON supports json.Marshaler interface
func (v Filter) MarshalJSON() ([]byte, error) {
w := jwriter.Writer{NoEscapeHTML: true}
easyjson4d398eaaEncodeGithubComNbdWtfGoNostr(&w, v)
return w.Buffer.BuildBytes(), w.Error
}
// MarshalEasyJSON supports easyjson.Marshaler interface
func (v Filter) MarshalEasyJSON(w *jwriter.Writer) {
w.NoEscapeHTML = true
easyjson4d398eaaEncodeGithubComNbdWtfGoNostr(w, v)
}
// UnmarshalJSON supports json.Unmarshaler interface
func (v *Filter) UnmarshalJSON(data []byte) error {
r := jlexer.Lexer{Data: data}
easyjson4d398eaaDecodeGithubComNbdWtfGoNostr(&r, v)
return r.Error()
}
// UnmarshalEasyJSON supports easyjson.Unmarshaler interface
func (v *Filter) UnmarshalEasyJSON(l *jlexer.Lexer) {
easyjson4d398eaaDecodeGithubComNbdWtfGoNostr(l, v)
}

View File

@@ -8,14 +8,16 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"x.realy.lol/chk"
"x.realy.lol/event"
"x.realy.lol/kind"
"x.realy.lol/log"
"x.realy.lol/timestamp"
)
func TestFilterUnmarshal(t *testing.T) {
raw := `{"ids": ["abc"],"#e":["zzz"],"#something":["nothing","bab"],"since":1644254609,"search":"test"}`
var f Filter
var f F
err := json.Unmarshal([]byte(raw), &f)
assert.NoError(t, err)
@@ -32,7 +34,7 @@ func TestFilterUnmarshal(t *testing.T) {
func TestFilterMarshal(t *testing.T) {
until := timestamp.Timestamp(12345678)
filterj, err := json.Marshal(Filter{
filterj, err := json.Marshal(F{
Kinds: []int{kind.TextNote, kind.RecommendServer, kind.EncryptedDirectMessage},
Tags: TagMap{"fruit": {"banana", "mango"}},
Until: &until,
@@ -45,7 +47,7 @@ func TestFilterMarshal(t *testing.T) {
func TestFilterUnmarshalWithLimitZero(t *testing.T) {
raw := `{"ids": ["abc"],"#e":["zzz"],"limit":0,"#something":["nothing","bab"],"since":1644254609,"search":"test"}`
var f Filter
var f F
err := json.Unmarshal([]byte(raw), &f)
assert.NoError(t, err)
@@ -54,8 +56,7 @@ func TestFilterUnmarshalWithLimitZero(t *testing.T) {
f.Since.Time().UTC().Format("2006-01-02") != "2022-02-07" ||
f.Until != nil ||
f.Tags == nil || len(f.Tags) != 2 || !slices.Contains(f.Tags["something"], "bab") ||
f.Search != "test" ||
f.LimitZero == false {
f.Search != "test" {
return false
}
return true
@@ -64,11 +65,10 @@ func TestFilterUnmarshalWithLimitZero(t *testing.T) {
func TestFilterMarshalWithLimitZero(t *testing.T) {
until := timestamp.Timestamp(12345678)
filterj, err := json.Marshal(Filter{
Kinds: []int{kind.TextNote, kind.RecommendServer, kind.EncryptedDirectMessage},
Tags: TagMap{"fruit": {"banana", "mango"}},
Until: &until,
LimitZero: true,
filterj, err := json.Marshal(F{
Kinds: []int{kind.TextNote, kind.RecommendServer, kind.EncryptedDirectMessage},
Tags: TagMap{"fruit": {"banana", "mango"}},
Until: &until,
})
assert.NoError(t, err)
@@ -77,7 +77,7 @@ func TestFilterMarshalWithLimitZero(t *testing.T) {
}
func TestFilterMatchingLive(t *testing.T) {
var filter Filter
var filter F
var event event.E
json.Unmarshal([]byte(`{"kinds":[1],"authors":["a8171781fd9e90ede3ea44ddca5d3abf828fe8eedeb0f3abb0dd3e563562e1fc","1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59","ed4ca520e9929dfe9efdadf4011b53d30afd0678a09aa026927e60e7a45d9244"],"since":1677033299}`), &filter)
@@ -88,50 +88,50 @@ func TestFilterMatchingLive(t *testing.T) {
func TestFilterEquality(t *testing.T) {
assert.True(t, FilterEqual(
Filter{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}},
Filter{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}},
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}},
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}},
), "kinds filters should be equal")
assert.True(t, FilterEqual(
Filter{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}, Tags: TagMap{"letter": {"a", "b"}}},
Filter{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}, Tags: TagMap{"letter": {"b", "a"}}},
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}, Tags: TagMap{"letter": {"a", "b"}}},
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}, Tags: TagMap{"letter": {"b", "a"}}},
), "kind+tags filters should be equal")
tm := timestamp.Now()
assert.True(t, FilterEqual(
Filter{
F{
Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion},
Tags: TagMap{"letter": {"a", "b"}, "fruit": {"banana"}},
Since: &tm,
IDs: []string{"aaaa", "bbbb"},
Ids: []string{"aaaa", "bbbb"},
},
Filter{
F{
Kinds: []int{kind.Deletion, kind.EncryptedDirectMessage},
Tags: TagMap{"letter": {"a", "b"}, "fruit": {"banana"}},
Since: &tm,
IDs: []string{"aaaa", "bbbb"},
Ids: []string{"aaaa", "bbbb"},
},
), "kind+2tags+since+ids filters should be equal")
assert.False(t, FilterEqual(
Filter{Kinds: []int{kind.TextNote, kind.EncryptedDirectMessage, kind.Deletion}},
Filter{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion, kind.Repost}},
F{Kinds: []int{kind.TextNote, kind.EncryptedDirectMessage, kind.Deletion}},
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion, kind.Repost}},
), "kinds filters shouldn't be equal")
}
func TestFilterClone(t *testing.T) {
ts := timestamp.Now() - 60*60
flt := Filter{
flt := F{
Kinds: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
Tags: TagMap{"letter": {"a", "b"}, "fruit": {"banana"}},
Since: &ts,
IDs: []string{"9894b4b5cb5166d23ee8899a4151cf0c66aec00bde101982a13b8e8ceb972df9"},
Ids: []string{"9894b4b5cb5166d23ee8899a4151cf0c66aec00bde101982a13b8e8ceb972df9"},
}
clone := flt.Clone()
assert.True(t, FilterEqual(flt, clone), "clone is not equal:\n %v !=\n %v", flt, clone)
clone1 := flt.Clone()
clone1.IDs = append(clone1.IDs, "88f0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d")
clone1.Ids = append(clone1.Ids, "88f0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d")
assert.False(t, FilterEqual(flt, clone1), "modifying the clone ids should cause it to not be equal anymore")
clone2 := flt.Clone()
@@ -148,11 +148,40 @@ func TestFilterClone(t *testing.T) {
}
func TestTheoreticalLimit(t *testing.T) {
require.Equal(t, 6, GetTheoreticalLimit(Filter{IDs: []string{"a", "b", "c", "d", "e", "f"}}))
require.Equal(t, 9, GetTheoreticalLimit(Filter{Authors: []string{"a", "b", "c"}, Kinds: []int{3, 0, 10002}}))
require.Equal(t, 4, GetTheoreticalLimit(Filter{Authors: []string{"a", "b", "c", "d"}, Kinds: []int{10050}}))
require.Equal(t, -1, GetTheoreticalLimit(Filter{Authors: []string{"a", "b", "c", "d"}}))
require.Equal(t, -1, GetTheoreticalLimit(Filter{Kinds: []int{3, 0, 10002}}))
require.Equal(t, 24, GetTheoreticalLimit(Filter{Authors: []string{"a", "b", "c", "d", "e", "f"}, Kinds: []int{30023, 30024}, Tags: TagMap{"d": []string{"aaa", "bbb"}}}))
require.Equal(t, -1, GetTheoreticalLimit(Filter{Authors: []string{"a", "b", "c", "d", "e", "f"}, Kinds: []int{30023, 30024}}))
require.Equal(t, 6, GetTheoreticalLimit(F{Ids: []string{"a", "b", "c", "d", "e", "f"}}))
require.Equal(t, 9, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c"}, Kinds: []int{3, 0, 10002}}))
require.Equal(t, 4, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c", "d"}, Kinds: []int{10050}}))
require.Equal(t, -1, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c", "d"}}))
require.Equal(t, -1, GetTheoreticalLimit(F{Kinds: []int{3, 0, 10002}}))
require.Equal(t, 24, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c", "d", "e", "f"}, Kinds: []int{30023, 30024}, Tags: TagMap{"d": []string{"aaa", "bbb"}}}))
require.Equal(t, -1, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c", "d", "e", "f"}, Kinds: []int{30023, 30024}}))
}
func TestFilter(t *testing.T) {
ts := timestamp.Now() - 60*60
now := timestamp.Now()
flt := &F{
Authors: []string{"1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59"},
Kinds: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
Tags: TagMap{
"#t": {"a", "b"},
"#e": {"9894b4b5cb5166d23ee8899a4151cf0c66aec00bde101982a13b8e8ceb972df9"},
"#p": {"1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59"},
},
Until: &now,
Since: &ts,
Ids: []string{"9894b4b5cb5166d23ee8899a4151cf0c66aec00bde101982a13b8e8ceb972df9"},
// Limit: IntToPointer(10),
}
var err error
var b []byte
if b, err = json.Marshal(flt); chk.E(err) {
t.Fatal(err)
}
log.I.F("%s", b)
var f2 F
if err = json.Unmarshal(b, &f2); chk.E(err) {
t.Fatal(err)
}
log.I.S(f2)
}

View File

@@ -1,6 +1,7 @@
package helpers
import (
"slices"
"strconv"
"strings"
"sync"
@@ -123,3 +124,15 @@ func Hash(in []byte) (out []byte) {
h := sha256.Sum256(in)
return h[:]
}
// RemoveDuplicates removes repeated items in any slice of comparable items. This would not be
// appropriate for pointers unless they were assembled from the same source where a pointer is
// equal to a unique reference to the content.
func RemoveDuplicates[T comparable](s []T) []T {
alreadySeen := make(map[T]struct{}, len(s))
return slices.DeleteFunc(s, func(val T) bool {
_, duplicate := alreadySeen[val]
alreadySeen[val] = struct{}{}
return duplicate
})
}

View File

@@ -8,6 +8,7 @@ import (
"strings"
"x.realy.lol/chk"
"x.realy.lol/ec/schnorr"
"x.realy.lol/helpers"
"x.realy.lol/hex"
"x.realy.lol/ints"
@@ -246,33 +247,44 @@ func (tags Tags) Get_a_Tags() (atags []Tag_a) {
if len(a) > 0 {
for _, v := range a {
if v[0] == "a" && len(v) > 1 {
// try to split it
parts := strings.Split(v[1], ":")
// there must be a kind first
ki := ints.New(0)
if _, err = ki.Unmarshal([]byte(parts[0])); chk.E(err) {
var atag Tag_a
if atag, err = Decode_a_Tag(v[1]); chk.E(err) {
continue
}
atag := Tag_a{
Kind: int(ki.Uint16()),
}
if len(parts) < 2 {
continue
}
// next must be a pubkey
var pk []byte
if pk, err = hex.Dec(parts[1]); chk.E(err) {
continue
}
atag.Pubkey = pk
// there possibly can be nothing after this
if len(parts) >= 3 {
// third part is the identifier (d tag)
atag.Ident = parts[2]
}
atags = append(atags, atag)
}
}
}
return
}
func Decode_a_Tag(a string) (ta Tag_a, err error) {
// try to split it
parts := strings.Split(a, ":")
// there must be a kind first
ki := ints.New(0)
if _, err = ki.Unmarshal([]byte(parts[0])); chk.E(err) {
return
}
ta = Tag_a{
Kind: int(ki.Uint16()),
}
if len(parts) < 2 {
return
}
// next must be a pubkey
if len(parts[1]) != 2*schnorr.PubKeyBytesLen {
return
}
var pk []byte
if pk, err = hex.Dec(parts[1]); err != nil {
return
}
ta.Pubkey = pk
// there possibly can be nothing after this
if len(parts) >= 3 {
// third part is the identifier (d tag)
ta.Ident = parts[2]
}
return
}

View File

@@ -15,3 +15,5 @@ func New[T constraints.Integer | constraints.Float](t T) Timestamp {
}
func (t Timestamp) Time() time.Time { return time.Unix(int64(t), 0) }
func (t Timestamp) ToInt64() int64 { return int64(t) }
func (t Timestamp) ToInt() int { return int(t) }