Compare commits
25 Commits
vainstr-v1
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
b29ad5bd07
|
|||
|
3b0fd72290
|
|||
|
eb0ba87ce6
|
|||
|
321a1b47bb
|
|||
|
d5ae20ba94
|
|||
|
6a7ddb8aea
|
|||
|
e12fb03b03
|
|||
|
966f58f4c7
|
|||
|
789c7913e7
|
|||
|
faf3ebfdba
|
|||
|
bff08415e0
|
|||
|
4408fd3cb7
|
|||
|
6ffc5b1de0
|
|||
|
8ac12ccf1d
|
|||
|
dbfb4b7f8d
|
|||
|
08b850e0ea
|
|||
|
9d0391c43d
|
|||
|
f5a0d49aff
|
|||
|
b0cbcf6fec
|
|||
|
1954a9a5a3
|
|||
|
034cbe17c9
|
|||
|
7ebea3e594
|
|||
|
73c8679a7c
|
|||
|
b383fadf47
|
|||
|
d14da6788f
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -86,3 +86,4 @@ node_modules/**
|
||||
/blocklist.json
|
||||
/gui/gui/main.wasm
|
||||
/gui/gui/index.html
|
||||
database/testrealy
|
||||
21
LICENSE.md
Normal file
21
LICENSE.md
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 nbd
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
24
apputil/apputil.go
Normal file
24
apputil/apputil.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package apputil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// EnsureDir checks a file could be written to a path, creates the directories
|
||||
// as needed
|
||||
func EnsureDir(fileName string) {
|
||||
dirName := filepath.Dir(fileName)
|
||||
if _, serr := os.Stat(dirName); serr != nil {
|
||||
merr := os.MkdirAll(dirName, os.ModePerm)
|
||||
if merr != nil {
|
||||
panic(merr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FileExists reports whether the named file or directory exists.
|
||||
func FileExists(filePath string) bool {
|
||||
_, e := os.Stat(filePath)
|
||||
return e == nil
|
||||
}
|
||||
2
apputil/doc.go
Normal file
2
apputil/doc.go
Normal file
@@ -0,0 +1,2 @@
|
||||
// Package apputil provides some simple filesystem functions
|
||||
package apputil
|
||||
10
codec/codec.go
Normal file
10
codec/codec.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package codec
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type I interface {
|
||||
MarshalWrite(w io.Writer) (err error)
|
||||
UnmarshalRead(r io.Reader) (err error)
|
||||
}
|
||||
192
database/filter.go
Normal file
192
database/filter.go
Normal file
@@ -0,0 +1,192 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/database/indexes"
|
||||
"x.realy.lol/database/indexes/types/pubhash"
|
||||
"x.realy.lol/database/indexes/types/varint"
|
||||
"x.realy.lol/filter"
|
||||
"x.realy.lol/hex"
|
||||
"x.realy.lol/log"
|
||||
"x.realy.lol/timestamp"
|
||||
)
|
||||
|
||||
type Bitfield byte
|
||||
|
||||
const (
|
||||
hasIds Bitfield = 1
|
||||
hasKinds Bitfield = 2
|
||||
hasAuthors Bitfield = 4
|
||||
hasTags Bitfield = 8
|
||||
hasSince Bitfield = 16
|
||||
hasUntil Bitfield = 32
|
||||
hasLimit Bitfield = 64
|
||||
hasSearch Bitfield = 128
|
||||
)
|
||||
|
||||
func ToBitfield(f *filter.F) (b Bitfield) {
|
||||
if len(f.Ids) != 0 {
|
||||
b += hasIds
|
||||
}
|
||||
if len(f.Kinds) != 0 {
|
||||
b += hasKinds
|
||||
}
|
||||
if len(f.Authors) != 0 {
|
||||
b += hasAuthors
|
||||
}
|
||||
if len(f.Tags) != 0 {
|
||||
b += hasTags
|
||||
}
|
||||
if f.Since != nil {
|
||||
b += hasSince
|
||||
}
|
||||
if f.Until != nil {
|
||||
b += hasUntil
|
||||
}
|
||||
if f.Limit != nil {
|
||||
b += hasLimit
|
||||
}
|
||||
if f.Search != "" {
|
||||
b += hasSearch
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Filter runs a nip-01 type query on a provided filter and returns the database serial keys of
|
||||
// the matching events, excluding a list of authors also provided from the result.
|
||||
func (d *D) Filter(f filter.F, exclude []*pubhash.T) (evSerials varint.S, err error) {
|
||||
var evs varint.S
|
||||
bf := ToBitfield(&f)
|
||||
// first, if there is Ids these override everything else
|
||||
if bf&hasIds != 0 {
|
||||
for _, v := range f.Ids {
|
||||
var id []byte
|
||||
if id, err = hex.Dec(v); chk.E(err) {
|
||||
// just going to ignore it i guess
|
||||
continue
|
||||
}
|
||||
var ev *varint.V
|
||||
if ev, err = d.FindEventSerialById(id); chk.E(err) {
|
||||
// just going to ignore it i guess
|
||||
continue
|
||||
}
|
||||
evs = append(evs, ev)
|
||||
}
|
||||
return
|
||||
}
|
||||
var since, until *timestamp.Timestamp
|
||||
if bf&hasSince == 0 || bf&hasUntil == 0 {
|
||||
if bf&hasSince != 0 {
|
||||
since = f.Since
|
||||
}
|
||||
if bf&hasUntil != 0 {
|
||||
until = f.Until
|
||||
} else {
|
||||
m := timestamp.Timestamp(math.MaxInt64)
|
||||
until = &m
|
||||
}
|
||||
}
|
||||
limit := f.Limit
|
||||
var postLimit bool
|
||||
if limit != nil {
|
||||
// put a reasonable cap on unlimited. the actual results may be a lot less for composite
|
||||
// searches that intersect with tags.
|
||||
limit = filter.IntToPointer(10000)
|
||||
} else {
|
||||
// this means trim the result at the end before returning.
|
||||
postLimit = true
|
||||
}
|
||||
log.I.F("%b %b", bf, bf&(hasSince+hasUntil+hasLimit))
|
||||
bf = bf &^ hasLimit
|
||||
// next, check for filters that only have since and/or until
|
||||
if bf&(hasSince+hasUntil) != 0 && ^(hasUntil+hasSince)&bf == 0 {
|
||||
if evs, err = d.GetEventSerialsByCreatedAtRange(since, until, limit, postLimit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto done
|
||||
}
|
||||
// next, kinds
|
||||
if bf&hasKinds == hasKinds && ^hasKinds&bf == 0 {
|
||||
log.I.F("kinds")
|
||||
if evs, err = d.GetEventSerialsByKinds(f.Kinds, f.Limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto done
|
||||
}
|
||||
// next, kinds/created_at
|
||||
if (bf&hasKinds+hasSince == hasKinds+hasSince ||
|
||||
bf&hasKinds+hasUntil == hasKinds+hasUntil ||
|
||||
bf&hasKinds+hasUntil+hasSince == hasKinds+hasUntil+hasSince) &&
|
||||
^(hasKinds+hasUntil+hasSince)&bf == 0 {
|
||||
if evs, err = d.GetEventSerialsByKindsCreatedAtRange(f.Kinds, since, until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto done
|
||||
}
|
||||
// next authors
|
||||
if bf&hasAuthors == hasAuthors && ^hasAuthors&bf == 0 {
|
||||
if evs, err = d.GetEventSerialsByAuthorsCreatedAtRange(f.Authors, since, until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto done
|
||||
}
|
||||
// next authors/kinds
|
||||
|
||||
if ak := hasAuthors + hasKinds; bf&(ak) == ak && ^ak&bf == 0 {
|
||||
if evs, err = d.GetEventSerialsByKindsAuthorsCreatedAtRange(f.Kinds, f.Authors, since, until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto done
|
||||
}
|
||||
// if there is tags, assemble them into an array of tags with the
|
||||
if bf&hasTags != 0 && bf&^hasTags == 0 {
|
||||
if evs, err = d.GetEventSerialsByTagsCreatedAtRange(f.Tags, limit); chk.E(err) {
|
||||
|
||||
}
|
||||
}
|
||||
// next authors/tags
|
||||
if at := hasAuthors + hasTags; bf&(at) == at && ^at&bf == 0 {
|
||||
if evs, err = d.GetEventSerialsByAuthorsTagsCreatedAtRange(f.Tags, f.Authors, since, until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto done
|
||||
}
|
||||
// next kinds/tags
|
||||
if kt := hasKinds + hasTags; bf&(kt) == kt && ^kt&bf == 0 {
|
||||
if evs, err = d.GetEventSerialsByKindsTagsCreatedAtRange(f.Tags, f.Kinds, since,
|
||||
until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto done
|
||||
}
|
||||
// next kinds/authors/tags
|
||||
if kat := hasAuthors + hasTags; bf&(kat) == kat && ^kat&bf == 0 {
|
||||
if evs, err = d.GetEventSerialsByKindsAuthorsTagsCreatedAtRange(f.Tags, f.Kinds, f.Authors, since, until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto done
|
||||
}
|
||||
done:
|
||||
// scan the FullIndex for these serials, and sort them by descending created_at
|
||||
var index []indexes.FullIndex
|
||||
if index, err = d.GetFullIndexesFromSerials(evs); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// sort by reverse chronological order
|
||||
sort.Slice(index, func(i, j int) bool {
|
||||
return index[i].CreatedAt.ToTimestamp() > index[j].CreatedAt.ToTimestamp()
|
||||
})
|
||||
for _, item := range index {
|
||||
for _, x := range exclude {
|
||||
if bytes.Equal(item.Pubkey.Bytes(), x.Bytes()) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
evSerials = append(evSerials, item.Ser)
|
||||
}
|
||||
return
|
||||
}
|
||||
80
database/filter_test.go
Normal file
80
database/filter_test.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"x.realy.lol/apputil"
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/database/indexes/types/varint"
|
||||
"x.realy.lol/event"
|
||||
"x.realy.lol/filter"
|
||||
"x.realy.lol/interrupt"
|
||||
"x.realy.lol/log"
|
||||
)
|
||||
|
||||
func TestD_Filter(t *testing.T) {
|
||||
var err error
|
||||
d := New()
|
||||
tmpDir := "testrealy"
|
||||
dbExists := !apputil.FileExists(tmpDir)
|
||||
if err = d.Init(tmpDir); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
interrupt.AddHandler(func() {
|
||||
d.Close()
|
||||
})
|
||||
if dbExists {
|
||||
buf := bytes.NewBuffer(ExampleEvents)
|
||||
scan := bufio.NewScanner(buf)
|
||||
scan.Buffer(make([]byte, 5120000), 5120000)
|
||||
var count, errs int
|
||||
for scan.Scan() {
|
||||
b := scan.Bytes()
|
||||
ev := event.New()
|
||||
if err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Fatalf("%s:\n%s", err, b)
|
||||
}
|
||||
// verify the signature on the event
|
||||
var ok bool
|
||||
if ok, err = ev.Verify(); chk.E(err) {
|
||||
errs++
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
errs++
|
||||
log.E.F("event signature is invalid\n%s", b)
|
||||
continue
|
||||
}
|
||||
count++
|
||||
if count%1000 == 0 {
|
||||
log.I.F("unmarshaled %d events", count)
|
||||
}
|
||||
if err = d.StoreEvent(ev); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
log.I.F("stored %d events", count)
|
||||
}
|
||||
// fetch some kind 0
|
||||
var sers []*varint.V
|
||||
if sers, err = d.Filter(filter.F{
|
||||
Kinds: []int{0},
|
||||
Limit: filter.IntToPointer(50),
|
||||
}, nil); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// log.I.S(sers)
|
||||
var fids [][]byte
|
||||
for _, ser := range sers {
|
||||
var evIds []byte
|
||||
if evIds, err = d.GetEventIdFromSerial(ser); chk.E(err) {
|
||||
// continue
|
||||
log.I.S(ser)
|
||||
t.Fatal(err)
|
||||
}
|
||||
fids = append(fids, evIds)
|
||||
}
|
||||
log.I.S(fids)
|
||||
}
|
||||
642
database/find.go
Normal file
642
database/find.go
Normal file
@@ -0,0 +1,642 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/database/indexes"
|
||||
"x.realy.lol/database/indexes/prefixes"
|
||||
"x.realy.lol/database/indexes/types/idhash"
|
||||
"x.realy.lol/database/indexes/types/prefix"
|
||||
"x.realy.lol/database/indexes/types/varint"
|
||||
"x.realy.lol/errorf"
|
||||
"x.realy.lol/event"
|
||||
"x.realy.lol/filter"
|
||||
"x.realy.lol/log"
|
||||
"x.realy.lol/tags"
|
||||
"x.realy.lol/timestamp"
|
||||
)
|
||||
|
||||
func (d *D) FindEventSerialById(evId []byte) (ser *varint.V, err error) {
|
||||
id := idhash.New()
|
||||
if err = id.FromId(evId); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// find by id
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
key := new(bytes.Buffer)
|
||||
if err = indexes.IdSearch(id).MarshalWrite(key); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: key.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Seek(key.Bytes()); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
k := item.KeyCopy(nil)
|
||||
buf := bytes.NewBuffer(k)
|
||||
ser = varint.New()
|
||||
if err = indexes.IdDec(id, ser).UnmarshalRead(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
if ser == nil {
|
||||
err = fmt.Errorf("event %0x not found", evId)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetEventFromSerial(ser *varint.V) (ev *event.E, err error) {
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
enc := indexes.EventEnc(ser)
|
||||
kb := new(bytes.Buffer)
|
||||
if err = enc.MarshalWrite(kb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var item *badger.Item
|
||||
if item, err = txn.Get(kb.Bytes()); err != nil {
|
||||
return
|
||||
}
|
||||
var val []byte
|
||||
if val, err = item.ValueCopy(nil); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev = event.New()
|
||||
vr := bytes.NewBuffer(val)
|
||||
if err = ev.UnmarshalRead(vr); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetEventIdFromSerial(ser *varint.V) (id []byte, err error) {
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
enc := indexes.New(prefix.New(prefixes.FullIndex), ser)
|
||||
prf := new(bytes.Buffer)
|
||||
if err = enc.MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prf.Bytes()})
|
||||
defer it.Close()
|
||||
for it.Seek(prf.Bytes()); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
kbuf := bytes.NewBuffer(key)
|
||||
_, t, p, ki, ca := indexes.FullIndexVars()
|
||||
dec := indexes.FullIndexDec(ser, t, p, ki, ca)
|
||||
if err = dec.UnmarshalRead(kbuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
id = t.Bytes()
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetEventById(evId []byte) (ev *event.E, err error) {
|
||||
var ser *varint.V
|
||||
if ser, err = d.FindEventSerialById(evId); err != nil {
|
||||
return
|
||||
}
|
||||
ev, err = d.GetEventFromSerial(ser)
|
||||
return
|
||||
}
|
||||
|
||||
// GetEventSerialsByCreatedAtRange returns the serials of events with the given since/until
|
||||
// range in reverse chronological order (starting at until, going back to since).
|
||||
func (d *D) GetEventSerialsByCreatedAtRange(since, until *timestamp.Timestamp,
|
||||
limit *int, postLimit bool) (sers varint.S, err error) {
|
||||
log.I.F("GetEventSerialsByCreatedAtRange")
|
||||
// get the start (end) max possible index prefix
|
||||
startCreatedAt, _ := indexes.CreatedAtVars()
|
||||
startCreatedAt.FromInt(until.ToInt())
|
||||
prf := new(bytes.Buffer)
|
||||
if err = indexes.CreatedAtEnc(startCreatedAt, nil).MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var count int
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: prf.Bytes()})
|
||||
defer it.Close()
|
||||
key := make([]byte, 10)
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key = item.KeyCopy(key)
|
||||
ca, ser := indexes.CreatedAtVars()
|
||||
buf := bytes.NewBuffer(key)
|
||||
if err = indexes.CreatedAtDec(ca, ser).UnmarshalRead(buf); chk.E(err) {
|
||||
// skip it then
|
||||
continue
|
||||
}
|
||||
if ca.ToTimestamp() < *since {
|
||||
break
|
||||
}
|
||||
sers = append(sers, ser)
|
||||
count++
|
||||
if !postLimit && count > *limit {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if postLimit && len(sers) > *limit {
|
||||
sers = sers[:*limit]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetEventSerialsByKinds(kinds []int, limit *int) (sers varint.S, err error) {
|
||||
log.I.F("GetEventSerialsByKinds")
|
||||
// get the start (end) max possible index prefix, one for each kind in the list
|
||||
var searchIdxs [][]byte
|
||||
kind, _ := indexes.KindVars()
|
||||
for _, k := range kinds {
|
||||
kind.Set(k)
|
||||
prf := new(bytes.Buffer)
|
||||
if err = indexes.KindEnc(kind, nil).MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
searchIdxs = append(searchIdxs, prf.Bytes())
|
||||
}
|
||||
// log.I.S(searchIdxs)
|
||||
var count int
|
||||
for _, idx := range searchIdxs {
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
// it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
var key []byte
|
||||
for it.Seek(idx); it.ValidForPrefix(idx); it.Next() {
|
||||
item := it.Item()
|
||||
key = item.KeyCopy(nil)
|
||||
ki, ser := indexes.KindVars()
|
||||
buf := bytes.NewBuffer(key)
|
||||
if err = indexes.KindDec(ki, ser).UnmarshalRead(buf); chk.E(err) {
|
||||
// skip it then
|
||||
continue
|
||||
}
|
||||
sers = append(sers, ser)
|
||||
count++
|
||||
if limit != nil && count >= *limit {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetEventSerialsByKindsCreatedAtRange(kinds []int, since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
|
||||
// get the start (end) max possible index prefix, one for each kind in the list
|
||||
var searchIdxs [][]byte
|
||||
kind, startCreatedAt, _ := indexes.KindCreatedAtVars()
|
||||
startCreatedAt.FromInt(until.ToInt())
|
||||
for _, k := range kinds {
|
||||
kind.Set(k)
|
||||
prf := new(bytes.Buffer)
|
||||
if err = indexes.KindCreatedAtEnc(kind, startCreatedAt, nil).MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
searchIdxs = append(searchIdxs, prf.Bytes())
|
||||
}
|
||||
var count int
|
||||
for _, idx := range searchIdxs {
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
|
||||
defer it.Close()
|
||||
var key []byte
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key = item.KeyCopy(key)
|
||||
ki, ca, ser := indexes.KindCreatedAtVars()
|
||||
buf := bytes.NewBuffer(key)
|
||||
if err = indexes.KindCreatedAtDec(ki, ca, ser).UnmarshalRead(buf); chk.E(err) {
|
||||
// skip it then
|
||||
continue
|
||||
}
|
||||
if ca.ToTimestamp() < *since {
|
||||
break
|
||||
}
|
||||
sers = append(sers, ser)
|
||||
count++
|
||||
if count > *limit {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetEventSerialsByAuthors(pubkeys []string, limit *int) (sers varint.S, err error) {
|
||||
// get the start (end) max possible index prefix, one for each kind in the list
|
||||
var searchIdxs [][]byte
|
||||
var pkDecodeErrs int
|
||||
pubkey, _ := indexes.PubkeyVars()
|
||||
for _, p := range pubkeys {
|
||||
if err = pubkey.FromPubkeyHex(p); chk.E(err) {
|
||||
// gracefully ignore wrong keys
|
||||
pkDecodeErrs++
|
||||
continue
|
||||
}
|
||||
if pkDecodeErrs == len(pubkeys) {
|
||||
err = errorf.E("all pubkeys in authors field of filter failed to decode")
|
||||
return
|
||||
}
|
||||
prf := new(bytes.Buffer)
|
||||
if err = indexes.PubkeyEnc(pubkey, nil).MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
searchIdxs = append(searchIdxs, prf.Bytes())
|
||||
}
|
||||
var count int
|
||||
for _, idx := range searchIdxs {
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
|
||||
defer it.Close()
|
||||
key := make([]byte, 10)
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key = item.KeyCopy(key)
|
||||
kind, ca, ser := indexes.KindCreatedAtVars()
|
||||
buf := bytes.NewBuffer(key)
|
||||
if err = indexes.KindCreatedAtDec(kind, ca, ser).UnmarshalRead(buf); chk.E(err) {
|
||||
// skip it then
|
||||
continue
|
||||
}
|
||||
sers = append(sers, ser)
|
||||
count++
|
||||
if count > *limit {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetEventSerialsByAuthorsCreatedAtRange(pubkeys []string,
|
||||
since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
|
||||
// get the start (end) max possible index prefix, one for each kind in the list
|
||||
var searchIdxs [][]byte
|
||||
var pkDecodeErrs int
|
||||
pubkey, startCreatedAt, _ := indexes.PubkeyCreatedAtVars()
|
||||
startCreatedAt.FromInt(until.ToInt())
|
||||
for _, p := range pubkeys {
|
||||
if err = pubkey.FromPubkeyHex(p); chk.E(err) {
|
||||
// gracefully ignore wrong keys
|
||||
pkDecodeErrs++
|
||||
continue
|
||||
}
|
||||
if pkDecodeErrs == len(pubkeys) {
|
||||
err = errorf.E("all pubkeys in authors field of filter failed to decode")
|
||||
return
|
||||
}
|
||||
prf := new(bytes.Buffer)
|
||||
if err = indexes.PubkeyCreatedAtEnc(pubkey, startCreatedAt, nil).MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
searchIdxs = append(searchIdxs, prf.Bytes())
|
||||
}
|
||||
var count int
|
||||
for _, idx := range searchIdxs {
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
|
||||
defer it.Close()
|
||||
key := make([]byte, 10)
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key = item.KeyCopy(key)
|
||||
kind, ca, ser := indexes.KindCreatedAtVars()
|
||||
buf := bytes.NewBuffer(key)
|
||||
if err = indexes.KindCreatedAtDec(kind, ca, ser).UnmarshalRead(buf); chk.E(err) {
|
||||
// skip it then
|
||||
continue
|
||||
}
|
||||
if ca.ToTimestamp() < *since {
|
||||
break
|
||||
}
|
||||
sers = append(sers, ser)
|
||||
count++
|
||||
if count > *limit {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetEventSerialsByKindsAuthorsCreatedAtRange(kinds []int, pubkeys []string,
|
||||
since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
|
||||
// get the start (end) max possible index prefix, one for each kind in the list
|
||||
var searchIdxs [][]byte
|
||||
var pkDecodeErrs int
|
||||
kind, pubkey, startCreatedAt, _ := indexes.KindPubkeyCreatedAtVars()
|
||||
startCreatedAt.FromInt(until.ToInt())
|
||||
for _, k := range kinds {
|
||||
for _, p := range pubkeys {
|
||||
if err = pubkey.FromPubkeyHex(p); chk.E(err) {
|
||||
// gracefully ignore wrong keys
|
||||
pkDecodeErrs++
|
||||
continue
|
||||
}
|
||||
if pkDecodeErrs == len(pubkeys) {
|
||||
err = errorf.E("all pubkeys in authors field of filter failed to decode")
|
||||
return
|
||||
}
|
||||
kind.Set(k)
|
||||
prf := new(bytes.Buffer)
|
||||
if err = indexes.KindPubkeyCreatedAtEnc(kind, pubkey, startCreatedAt, nil).MarshalWrite(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
searchIdxs = append(searchIdxs, prf.Bytes())
|
||||
}
|
||||
}
|
||||
var count int
|
||||
for _, idx := range searchIdxs {
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
it := txn.NewIterator(badger.IteratorOptions{Reverse: true, Prefix: idx})
|
||||
defer it.Close()
|
||||
key := make([]byte, 10)
|
||||
for it.Rewind(); it.Valid(); it.Next() {
|
||||
item := it.Item()
|
||||
key = item.KeyCopy(key)
|
||||
ki, ca, ser := indexes.KindCreatedAtVars()
|
||||
buf := bytes.NewBuffer(key)
|
||||
if err = indexes.KindCreatedAtDec(ki, ca, ser).UnmarshalRead(buf); chk.E(err) {
|
||||
// skip it then
|
||||
continue
|
||||
}
|
||||
if ca.ToTimestamp() < *since {
|
||||
break
|
||||
}
|
||||
sers = append(sers, ser)
|
||||
count++
|
||||
if count > *limit {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetEventSerialsByTagsCreatedAtRange searches for events that match the tags in a filter and
|
||||
// returns the list of serials that were found.
|
||||
func (d *D) GetEventSerialsByTagsCreatedAtRange(t filter.TagMap, limit *int) (sers varint.S, err error) {
|
||||
if len(t) < 1 {
|
||||
err = errorf.E("no tags provided")
|
||||
return
|
||||
}
|
||||
var searchIdxs [][]byte
|
||||
for tk, tv := range t {
|
||||
// the key of each element of the map must be `#X` where X is a-zA-Z
|
||||
if len(tk) != 2 {
|
||||
continue
|
||||
}
|
||||
if tk[0] != '#' {
|
||||
log.E.F("invalid tag map key '%s'", tk)
|
||||
}
|
||||
switch tk[1] {
|
||||
case 'a':
|
||||
// not sure if this is a thing. maybe a prefix search?
|
||||
for _, ta := range tv {
|
||||
var atag tags.Tag_a
|
||||
if atag, err = tags.Decode_a_Tag(ta); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
if atag.Kind == 0 {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
ki, pk, ident, _ := indexes.TagAVars()
|
||||
ki.Set(atag.Kind)
|
||||
if atag.Pubkey == nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
if err = pk.FromPubkey(atag.Pubkey); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
if len(atag.Ident) < 1 {
|
||||
}
|
||||
if err = ident.FromIdent([]byte(atag.Ident)); chk.E(err) {
|
||||
err = nil
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.TagAEnc(ki, pk, ident, nil).MarshalWrite(buf); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
searchIdxs = append(searchIdxs, buf.Bytes())
|
||||
}
|
||||
case 'd':
|
||||
// d tags are identifiers used to mark replaceable events to create a namespace,
|
||||
// that the references can be used to replace them, or referred to using 'a' tags.
|
||||
for _, td := range tv {
|
||||
ident, _ := indexes.TagIdentifierVars()
|
||||
if err = ident.FromIdent([]byte(td)); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.TagIdentifierEnc(ident, nil).MarshalWrite(buf); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
searchIdxs = append(searchIdxs, buf.Bytes())
|
||||
}
|
||||
case 'e':
|
||||
// e tags refer to events. they can have a third field such as 'root' and 'reply'
|
||||
// but this third field isn't indexed.
|
||||
for _, te := range tv {
|
||||
evt, _ := indexes.TagEventVars()
|
||||
if err = evt.FromIdHex(te); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.TagEventEnc(evt, nil).MarshalWrite(buf); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
searchIdxs = append(searchIdxs, buf.Bytes())
|
||||
}
|
||||
case 'p':
|
||||
// p tags are references to author pubkeys of events. usually a 64 character hex
|
||||
// string but sometimes is a hashtag in follow events.
|
||||
for _, te := range tv {
|
||||
pk, _ := indexes.TagPubkeyVars()
|
||||
if err = pk.FromPubkeyHex(te); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.TagPubkeyEnc(pk, nil).MarshalWrite(buf); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
searchIdxs = append(searchIdxs, buf.Bytes())
|
||||
}
|
||||
case 't':
|
||||
// t tags are hashtags, arbitrary strings that can be used to assist search for
|
||||
// topics.
|
||||
for _, tt := range tv {
|
||||
ht, _ := indexes.TagHashtagVars()
|
||||
if err = ht.FromIdent([]byte(tt)); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.TagHashtagEnc(ht, nil).MarshalWrite(buf); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
searchIdxs = append(searchIdxs, buf.Bytes())
|
||||
}
|
||||
default:
|
||||
// everything else is arbitrary strings, that may have application specific
|
||||
// semantics.
|
||||
for _, tl := range tv {
|
||||
l, val, _ := indexes.TagLetterVars()
|
||||
l.Set(tk[1])
|
||||
if err = val.FromIdent([]byte(tl)); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.TagLetterEnc(l, val, nil).MarshalWrite(buf); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
searchIdxs = append(searchIdxs, buf.Bytes())
|
||||
}
|
||||
}
|
||||
// todo: implement
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetEventSerialsByAuthorsTagsCreatedAtRange first performs
|
||||
func (d *D) GetEventSerialsByAuthorsTagsCreatedAtRange(t filter.TagMap, pubkeys []string, since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
|
||||
var acSers, tagSers varint.S
|
||||
if acSers, err = d.GetEventSerialsByAuthorsCreatedAtRange(pubkeys, since, until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// now we have the most limited set of serials that are included by the pubkeys, we can then
|
||||
// construct the tags searches for all of these serials to filter out the events that don't
|
||||
// have both author AND one of the tags.
|
||||
if tagSers, err = d.GetEventSerialsByTagsCreatedAtRange(t, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// remove the serials that are not present in both lists.
|
||||
sers = varint.Intersect(acSers, tagSers)
|
||||
return
|
||||
}
|
||||
|
||||
// GetEventSerialsByKindsTagsCreatedAtRange first performs
|
||||
func (d *D) GetEventSerialsByKindsTagsCreatedAtRange(t filter.TagMap, kinds []int, since, until *timestamp.Timestamp, limit *int) (sers varint.S, err error) {
|
||||
var acSers, tagSers varint.S
|
||||
if acSers, err = d.GetEventSerialsByKindsCreatedAtRange(kinds, since, until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// now we have the most limited set of serials that are included by the pubkeys, we can then
|
||||
// construct the tags searches for all of these serials to filter out the events that don't
|
||||
// have both author AND one of the tags.
|
||||
if tagSers, err = d.GetEventSerialsByTagsCreatedAtRange(t, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// remove the serials that are not present in both lists.
|
||||
sers = varint.Intersect(acSers, tagSers)
|
||||
return
|
||||
}
|
||||
|
||||
// GetEventSerialsByKindsAuthorsTagsCreatedAtRange first performs
|
||||
func (d *D) GetEventSerialsByKindsAuthorsTagsCreatedAtRange(t filter.TagMap, kinds []int,
|
||||
pubkeys []string, since, until *timestamp.Timestamp,
|
||||
limit *int) (sers varint.S, err error) {
|
||||
var acSers, tagSers varint.S
|
||||
if acSers, err = d.GetEventSerialsByKindsAuthorsCreatedAtRange(kinds, pubkeys,
|
||||
since, until, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// now we have the most limited set of serials that are included by the pubkeys, we can then
|
||||
// construct the tags searches for all of these serials to filter out the events that don't
|
||||
// have both author AND one of the tags.
|
||||
if tagSers, err = d.GetEventSerialsByTagsCreatedAtRange(t, limit); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// remove the serials that are not present in both lists.
|
||||
sers = varint.Intersect(acSers, tagSers)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetFullIndexesFromSerials(sers varint.S) (index []indexes.FullIndex, err error) {
|
||||
log.I.F("GetFullIndexesFromSerials")
|
||||
for _, ser := range sers {
|
||||
if err = d.View(func(txn *badger.Txn) (err error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.FullIndexEnc(ser, nil, nil, nil, nil).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
prf := buf.Bytes()
|
||||
it := txn.NewIterator(badger.IteratorOptions{Prefix: prf})
|
||||
defer it.Close()
|
||||
for it.Seek(prf); it.Valid(); {
|
||||
item := it.Item()
|
||||
key := item.KeyCopy(nil)
|
||||
kBuf := bytes.NewBuffer(key)
|
||||
s, t, p, k, c := indexes.FullIndexVars()
|
||||
if err = indexes.FullIndexDec(s, t, p, k, c).UnmarshalRead(kBuf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
index = append(index, indexes.FullIndex{
|
||||
Ser: s,
|
||||
Id: t,
|
||||
Pubkey: p,
|
||||
Kind: k,
|
||||
CreatedAt: c,
|
||||
})
|
||||
return
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
// just skip then.
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
114
database/fulltext.go
Normal file
114
database/fulltext.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/clipperhouse/uax29/words"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/database/indexes"
|
||||
"x.realy.lol/database/indexes/types/fulltext"
|
||||
"x.realy.lol/database/indexes/types/varint"
|
||||
"x.realy.lol/event"
|
||||
"x.realy.lol/hex"
|
||||
"x.realy.lol/kind"
|
||||
)
|
||||
|
||||
type Words struct {
|
||||
ser *varint.V
|
||||
ev *event.E
|
||||
wordMap map[string]int
|
||||
}
|
||||
|
||||
func (d *D) GetFulltextKeys(ev *event.E, ser *varint.V) (keys [][]byte, err error) {
|
||||
w := d.GetWordsFromContent(ev)
|
||||
for i := range w {
|
||||
ft := fulltext.New()
|
||||
ft.FromWord([]byte(i))
|
||||
pos := varint.New()
|
||||
pos.FromUint64(uint64(w[i]))
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.FullTextWordEnc(ft, pos, ser).MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
keys = append(keys, buf.Bytes())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) GetWordsFromContent(ev *event.E) (wordMap map[string]int) {
|
||||
wordMap = make(map[string]int)
|
||||
if kind.IsText(ev.Kind) {
|
||||
content := ev.Content
|
||||
seg := words.NewSegmenter([]byte(content))
|
||||
var counter int
|
||||
for seg.Next() {
|
||||
w := seg.Bytes()
|
||||
w = bytes.ToLower(w)
|
||||
var ru rune
|
||||
ru, _ = utf8.DecodeRune(w)
|
||||
// ignore the most common things that aren't words
|
||||
if !unicode.IsSpace(ru) &&
|
||||
!unicode.IsPunct(ru) &&
|
||||
!unicode.IsSymbol(ru) &&
|
||||
!bytes.HasSuffix(w, []byte(".jpg")) &&
|
||||
!bytes.HasSuffix(w, []byte(".png")) &&
|
||||
!bytes.HasSuffix(w, []byte(".jpeg")) &&
|
||||
!bytes.HasSuffix(w, []byte(".mp4")) &&
|
||||
!bytes.HasSuffix(w, []byte(".mov")) &&
|
||||
!bytes.HasSuffix(w, []byte(".aac")) &&
|
||||
!bytes.HasSuffix(w, []byte(".mp3")) &&
|
||||
!IsEntity(w) &&
|
||||
!bytes.Contains(w, []byte(".")) {
|
||||
if len(w) == 64 || len(w) == 128 {
|
||||
if _, err := hex.Dec(string(w)); err == nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
wordMap[string(w)] = counter
|
||||
counter++
|
||||
}
|
||||
}
|
||||
content = content[:0]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func IsEntity(w []byte) (is bool) {
|
||||
var b []byte
|
||||
b = []byte("nostr:")
|
||||
if bytes.Contains(w, b) && len(b)+10 < len(w) {
|
||||
return true
|
||||
}
|
||||
b = []byte("npub")
|
||||
if bytes.Contains(w, b) && len(b)+5 < len(w) {
|
||||
return true
|
||||
}
|
||||
b = []byte("nsec")
|
||||
if bytes.Contains(w, b) && len(b)+5 < len(w) {
|
||||
return true
|
||||
}
|
||||
b = []byte("nevent")
|
||||
if bytes.Contains(w, b) && len(b)+5 < len(w) {
|
||||
return true
|
||||
}
|
||||
b = []byte("naddr")
|
||||
if bytes.Contains(w, b) && len(b)+5 < len(w) {
|
||||
return true
|
||||
}
|
||||
b = []byte("note")
|
||||
if bytes.Contains(w, b) && len(b)+20 < len(w) {
|
||||
return true
|
||||
}
|
||||
b = []byte("lnurl")
|
||||
if bytes.Contains(w, b) && len(b)+20 < len(w) {
|
||||
return true
|
||||
}
|
||||
b = []byte("cashu")
|
||||
if bytes.Contains(w, b) && len(b)+20 < len(w) {
|
||||
return true
|
||||
}
|
||||
return
|
||||
}
|
||||
278
database/get-event-indexes.go
Normal file
278
database/get-event-indexes.go
Normal file
@@ -0,0 +1,278 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"time"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/database/indexes"
|
||||
"x.realy.lol/database/indexes/types/fullid"
|
||||
identhash "x.realy.lol/database/indexes/types/identHash"
|
||||
"x.realy.lol/database/indexes/types/idhash"
|
||||
"x.realy.lol/database/indexes/types/kindidx"
|
||||
"x.realy.lol/database/indexes/types/letter"
|
||||
"x.realy.lol/database/indexes/types/pubhash"
|
||||
"x.realy.lol/database/indexes/types/timestamp"
|
||||
"x.realy.lol/database/indexes/types/varint"
|
||||
"x.realy.lol/event"
|
||||
"x.realy.lol/hex"
|
||||
"x.realy.lol/tags"
|
||||
)
|
||||
|
||||
// GetEventIndexes generates a set of indexes for a new event record. The first record is the
|
||||
// key that should have the binary encoded event as its value.
|
||||
func (d *D) GetEventIndexes(ev *event.E) (indices [][]byte, ser *varint.V, err error) {
|
||||
// log.I.F("getting event indices for\n%s", ev.Serialize())
|
||||
// get a new serial
|
||||
ser = varint.New()
|
||||
var s uint64
|
||||
if s, err = d.Serial(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ser.FromUint64(s)
|
||||
// create the event id key
|
||||
id := idhash.New()
|
||||
var idb []byte
|
||||
if idb, err = ev.IdBytes(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = id.FromId(idb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
evIDB := new(bytes.Buffer)
|
||||
if err = indexes.IdEnc(id, ser).MarshalWrite(evIDB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIDB.Bytes())
|
||||
// create the full index key
|
||||
fid := fullid.New()
|
||||
if err = fid.FromId(idb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
p := pubhash.New()
|
||||
var pk []byte
|
||||
if pk, err = ev.PubBytes(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = p.FromPubkey(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ki := kindidx.FromKind(ev.Kind)
|
||||
ca := ×tamp.T{}
|
||||
ca.FromInt(ev.CreatedAt.ToInt())
|
||||
evIFiB := new(bytes.Buffer)
|
||||
if err = indexes.FullIndexEnc(ser, fid, p, ki, ca).MarshalWrite(evIFiB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIFiB.Bytes())
|
||||
// pubkey index
|
||||
evIPkB := new(bytes.Buffer)
|
||||
if err = indexes.PubkeyEnc(p, ser).MarshalWrite(evIPkB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIPkB.Bytes())
|
||||
// pubkey-created_at index
|
||||
evIPkCaB := new(bytes.Buffer)
|
||||
if err = indexes.PubkeyCreatedAtEnc(p, ca, ser).MarshalWrite(evIPkCaB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIPkCaB.Bytes())
|
||||
// created_at index
|
||||
evICaB := new(bytes.Buffer)
|
||||
if err = indexes.CreatedAtEnc(ca, ser).MarshalWrite(evICaB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evICaB.Bytes())
|
||||
// FirstSeen index
|
||||
evIFsB := new(bytes.Buffer)
|
||||
fs := ×tamp.T{}
|
||||
fs.FromInt64(time.Now().Unix())
|
||||
if err = indexes.FirstSeenEnc(ser, fs).MarshalWrite(evIFsB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIFsB.Bytes())
|
||||
// Kind index
|
||||
evIKiB := new(bytes.Buffer)
|
||||
if err = indexes.KindEnc(ki, ser).MarshalWrite(evIKiB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIKiB.Bytes())
|
||||
// Kind index
|
||||
evIKcB := new(bytes.Buffer)
|
||||
if err = indexes.KindCreatedAtEnc(ki, ca, ser).MarshalWrite(evIKcB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIKcB.Bytes())
|
||||
// Kind index
|
||||
evIKpB := new(bytes.Buffer)
|
||||
if err = indexes.KindPubkeyCreatedAtEnc(ki, p, ca, ser).MarshalWrite(evIKpB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIKpB.Bytes())
|
||||
// tags
|
||||
// TagA index
|
||||
var atags []tags.Tag_a
|
||||
var tagAs []indexes.TagA
|
||||
atags = ev.Tags.Get_a_Tags()
|
||||
for _, v := range atags {
|
||||
aki, apk, aid, _ := indexes.TagAVars()
|
||||
aki.Set(v.Kind)
|
||||
if err = apk.FromPubkey(v.Pubkey); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
if err = aid.FromIdent([]byte(v.Ident)); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
tagAs = append(tagAs, indexes.TagA{
|
||||
Ki: aki, P: apk, Id: aid, Ser: ser,
|
||||
})
|
||||
}
|
||||
for _, v := range tagAs {
|
||||
evITaB := new(bytes.Buffer)
|
||||
if err = indexes.TagAEnc(v.Ki, v.P, v.Id, ser).MarshalWrite(evITaB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evITaB.Bytes())
|
||||
}
|
||||
// TagEvent index
|
||||
eTags := ev.Tags.GetAllExactKeys("e")
|
||||
for _, v := range eTags {
|
||||
eid := v.Value()
|
||||
var eh []byte
|
||||
if eh, err = hex.Dec(eid); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
ih := idhash.New()
|
||||
if err = ih.FromId(eh); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
evIeB := new(bytes.Buffer)
|
||||
if err = indexes.TagEventEnc(ih, ser).MarshalWrite(evIeB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIeB.Bytes())
|
||||
}
|
||||
// TagPubkey index
|
||||
pTags := ev.Tags.GetAllExactKeys("p")
|
||||
for _, v := range pTags {
|
||||
pt := v.Value()
|
||||
var pkb []byte
|
||||
if pkb, err = hex.Dec(pt); err != nil {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
ph := pubhash.New()
|
||||
if len(pkb) == 0 {
|
||||
continue
|
||||
}
|
||||
if err = ph.FromPubkey(pkb); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
evIpB := new(bytes.Buffer)
|
||||
if err = indexes.TagPubkeyEnc(ph, ser).MarshalWrite(evIpB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIpB.Bytes())
|
||||
}
|
||||
// TagHashtag index
|
||||
ttags := ev.Tags.GetAllExactKeys("t")
|
||||
for _, v := range ttags {
|
||||
ht := v.Value()
|
||||
hh := identhash.New()
|
||||
if err = hh.FromIdent([]byte(ht)); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
evIhB := new(bytes.Buffer)
|
||||
if err = indexes.TagHashtagEnc(hh, ser).MarshalWrite(evIhB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIhB.Bytes())
|
||||
}
|
||||
// TagIdentifier index
|
||||
dtags := ev.Tags.GetAllExactKeys("d")
|
||||
for _, v := range dtags {
|
||||
dt := v.Value()
|
||||
dh := identhash.New()
|
||||
if err = dh.FromIdent([]byte(dt)); chk.E(err) {
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
evIidB := new(bytes.Buffer)
|
||||
if err = indexes.TagIdentifierEnc(dh, ser).MarshalWrite(evIidB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIidB.Bytes())
|
||||
}
|
||||
// TagLetter index, TagProtected, TagNonstandard
|
||||
for _, v := range ev.Tags {
|
||||
key := v.Key()
|
||||
if len(key) == 1 {
|
||||
switch key {
|
||||
case "t", "p", "e":
|
||||
// we already made indexes for these letters
|
||||
continue
|
||||
case "-":
|
||||
// TagProtected
|
||||
evIprotB := new(bytes.Buffer)
|
||||
if err = indexes.TagProtectedEnc(p, ser).MarshalWrite(evIprotB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIprotB.Bytes())
|
||||
default:
|
||||
if !((key[0] >= 'a' && key[0] <= 'z') || (key[0] >= 'A' && key[0] <= 'Z')) {
|
||||
// this is not a single letter tag or protected. nonstandard
|
||||
nk, nv := identhash.New(), identhash.New()
|
||||
_ = nk.FromIdent([]byte(key))
|
||||
if len(v) > 1 {
|
||||
_ = nv.FromIdent([]byte(v.Value()))
|
||||
} else {
|
||||
_ = nv.FromIdent([]byte{})
|
||||
}
|
||||
evInsB := new(bytes.Buffer)
|
||||
if err = indexes.TagNonstandardEnc(nk, nv, ser).MarshalWrite(evInsB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evInsB.Bytes())
|
||||
continue
|
||||
}
|
||||
}
|
||||
// we have a single letter that is not e, p or t
|
||||
l := letter.New(key[0])
|
||||
val := identhash.New()
|
||||
// this can be empty, but the hash would still be distinct
|
||||
if err = val.FromIdent([]byte(v.Value())); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
evIlB := new(bytes.Buffer)
|
||||
if err = indexes.TagLetterEnc(l, val, ser).MarshalWrite(evIlB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evIlB.Bytes())
|
||||
} else {
|
||||
// TagNonstandard
|
||||
nk, nv := identhash.New(), identhash.New()
|
||||
_ = nk.FromIdent([]byte(key))
|
||||
if len(v) > 1 {
|
||||
_ = nv.FromIdent([]byte(v.Value()))
|
||||
} else {
|
||||
_ = nv.FromIdent([]byte{})
|
||||
}
|
||||
evInsB := new(bytes.Buffer)
|
||||
if err = indexes.TagNonstandardEnc(nk, nv, ser).MarshalWrite(evInsB); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, evInsB.Bytes())
|
||||
}
|
||||
}
|
||||
// FullTextWord index
|
||||
var ftk [][]byte
|
||||
if ftk, err = d.GetFulltextKeys(ev, ser); chk.E(err) {
|
||||
return
|
||||
}
|
||||
indices = append(indices, ftk...)
|
||||
return
|
||||
}
|
||||
140
database/get-event-indexes_test.go
Normal file
140
database/get-event-indexes_test.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"x.realy.lol/apputil"
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/event"
|
||||
"x.realy.lol/log"
|
||||
"x.realy.lol/units"
|
||||
)
|
||||
|
||||
var ExampleEvents []byte
|
||||
|
||||
func init() {
|
||||
var err error
|
||||
if !apputil.FileExists("examples.jsonl") {
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest("GET", "https://files.mleku.dev/examples.jsonl", nil)
|
||||
if err != nil {
|
||||
panic("wtf")
|
||||
}
|
||||
var res *http.Response
|
||||
if res, err = http.DefaultClient.Do(req); chk.E(err) {
|
||||
panic("wtf")
|
||||
}
|
||||
var fh *os.File
|
||||
if fh, err = os.OpenFile("examples.jsonl", os.O_CREATE|os.O_RDWR, 0600); chk.E(err) {
|
||||
panic("wtf")
|
||||
}
|
||||
if _, err = io.Copy(fh, res.Body); chk.E(err) {
|
||||
panic("wtf")
|
||||
}
|
||||
res.Body.Close()
|
||||
}
|
||||
log.I.F("loading file...")
|
||||
var oh *os.File
|
||||
if oh, err = os.Open("examples.jsonl"); chk.E(err) {
|
||||
panic("wtf")
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err = io.Copy(buf, oh); chk.E(err) {
|
||||
panic("wtf")
|
||||
}
|
||||
ExampleEvents = buf.Bytes()
|
||||
oh.Close()
|
||||
}
|
||||
|
||||
func TestGetEventIndexes(t *testing.T) {
|
||||
var err error
|
||||
d := New()
|
||||
tmpDir := filepath.Join(os.TempDir(), "testrealy")
|
||||
if err = d.Init(tmpDir); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer d.Close()
|
||||
defer os.RemoveAll(tmpDir)
|
||||
buf := bytes.NewBuffer(ExampleEvents)
|
||||
scan := bufio.NewScanner(buf)
|
||||
scan.Buffer(make([]byte, 5120000), 5120000)
|
||||
var count, errs, encErrs, datasize, size, binsize int
|
||||
start := time.Now()
|
||||
for scan.Scan() {
|
||||
b := scan.Bytes()
|
||||
ev := event.New()
|
||||
if err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Fatalf("%s:\n%s", err, b)
|
||||
}
|
||||
// verify the signature on the event
|
||||
var ok bool
|
||||
if ok, err = ev.Verify(); chk.E(err) {
|
||||
errs++
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
errs++
|
||||
log.E.F("event signature is invalid\n%s", b)
|
||||
continue
|
||||
}
|
||||
// check the event encodes to binary, decodes, and produces the identical canonical form
|
||||
binE := new(bytes.Buffer)
|
||||
if err = ev.MarshalWrite(binE); chk.E(err) {
|
||||
log.I.F("bogus tags probably: %s", b)
|
||||
encErrs++
|
||||
continue
|
||||
}
|
||||
ev2 := event.New()
|
||||
bin2 := bytes.NewBuffer(binE.Bytes())
|
||||
if err = ev2.UnmarshalRead(bin2); chk.E(err) {
|
||||
encErrs++
|
||||
continue
|
||||
}
|
||||
var can1, can2 []byte
|
||||
ev.ToCanonical(can1)
|
||||
ev2.ToCanonical(can2)
|
||||
if !bytes.Equal(can1, can2) {
|
||||
encErrs++
|
||||
log.I.S(can1, can2)
|
||||
continue
|
||||
}
|
||||
binsize += len(binE.Bytes())
|
||||
var valid bool
|
||||
if valid, err = ev.Verify(); chk.E(err) {
|
||||
log.I.F("%s", b)
|
||||
encErrs++
|
||||
continue
|
||||
}
|
||||
if !valid {
|
||||
t.Fatalf("event failed to verify\n%s", b)
|
||||
}
|
||||
var indices [][]byte
|
||||
if indices, _, err = d.GetEventIndexes(ev); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
log.I.S(indices)
|
||||
datasize += len(b)
|
||||
for _, v := range indices {
|
||||
size += len(v)
|
||||
}
|
||||
_ = indices
|
||||
count++
|
||||
if count > 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
log.I.F("unmarshaled, verified and indexed %d events in %s, %d Mb of indexes from %d Mb of events, %d Mb as binary, failed verify %d, failed encode %d", count, time.Now().Sub(start), size/units.Mb, datasize/units.Mb, binsize/units.Mb, errs, encErrs)
|
||||
d.Close()
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
|
||||
var _ = `wdawdad\nhttps://cdn.discordapp.com/attachments/1277777226397388800/1278018649860472874/grain.png?ex=66cf471e&is=66cdf59e&hm=790aced618bb517ebd560e1fd3def537351ef130e239c0ee86d43ff63c44a146&","sig":"2abc1b3bb119071209daba6bf2b6c76cdad036249aad624938a5a2736739d6c139adb7aa94d24550bc53a972e75c40549513a74d9ace8c4435a5d262c172300b`
|
||||
var _ = ``
|
||||
335
database/indexes/indexes.go
Normal file
335
database/indexes/indexes.go
Normal file
@@ -0,0 +1,335 @@
|
||||
package indexes
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/codec"
|
||||
"x.realy.lol/database/indexes/prefixes"
|
||||
"x.realy.lol/database/indexes/types/fullid"
|
||||
"x.realy.lol/database/indexes/types/fulltext"
|
||||
"x.realy.lol/database/indexes/types/identHash"
|
||||
"x.realy.lol/database/indexes/types/idhash"
|
||||
"x.realy.lol/database/indexes/types/kindidx"
|
||||
"x.realy.lol/database/indexes/types/letter"
|
||||
"x.realy.lol/database/indexes/types/prefix"
|
||||
"x.realy.lol/database/indexes/types/pubhash"
|
||||
"x.realy.lol/database/indexes/types/timestamp"
|
||||
"x.realy.lol/database/indexes/types/varint"
|
||||
)
|
||||
|
||||
type Encs []codec.I
|
||||
|
||||
// T is a wrapper around an array of codec.I. The caller provides the Encs so they can then call
|
||||
// the accessor function of the codec.I implementation.
|
||||
type T struct {
|
||||
Encs
|
||||
}
|
||||
|
||||
// New creates a new indexes. The helper functions below have an encode and decode variant, the
|
||||
// decode variant does not add the prefix encoder because it has been read by prefixes.Identify.
|
||||
func New(encoders ...codec.I) (i *T) { return &T{encoders} }
|
||||
|
||||
func (t *T) MarshalWrite(w io.Writer) (err error) {
|
||||
for _, e := range t.Encs {
|
||||
if e == nil || reflect.ValueOf(e).IsNil() {
|
||||
// allow a field to be empty, as is needed for search indexes to create search
|
||||
// prefixes.
|
||||
return
|
||||
}
|
||||
if err = e.MarshalWrite(w); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
for _, e := range t.Encs {
|
||||
if err = e.UnmarshalRead(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func EventVars() (ser *varint.V) {
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func EventEnc(ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.Event), ser)
|
||||
}
|
||||
func EventDec(ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ser)
|
||||
}
|
||||
|
||||
func IdVars() (id *idhash.T, ser *varint.V) {
|
||||
id = idhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func IdEnc(id *idhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.Id), id, ser)
|
||||
}
|
||||
func IdSearch(id *idhash.T) (enc *T) {
|
||||
return New(prefix.New(prefixes.Id), id)
|
||||
}
|
||||
func IdDec(id *idhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), id, ser)
|
||||
}
|
||||
|
||||
type FullIndex struct {
|
||||
Ser *varint.V
|
||||
Id *fullid.T
|
||||
Pubkey *pubhash.T
|
||||
Kind *kindidx.T
|
||||
CreatedAt *timestamp.T
|
||||
}
|
||||
|
||||
func FullIndexVars() (ser *varint.V, t *fullid.T, p *pubhash.T, ki *kindidx.T,
|
||||
ca *timestamp.T) {
|
||||
ser = varint.New()
|
||||
t = fullid.New()
|
||||
p = pubhash.New()
|
||||
ki = kindidx.FromKind(0)
|
||||
ca = ×tamp.T{}
|
||||
return
|
||||
}
|
||||
func FullIndexEnc(ser *varint.V, t *fullid.T, p *pubhash.T, ki *kindidx.T,
|
||||
ca *timestamp.T) (enc *T) {
|
||||
return New(prefix.New(prefixes.FullIndex), ser, t, p, ki, ca)
|
||||
}
|
||||
func FullIndexDec(ser *varint.V, t *fullid.T, p *pubhash.T, ki *kindidx.T,
|
||||
ca *timestamp.T) (enc *T) {
|
||||
return New(prefix.New(), ser, t, p, ki, ca)
|
||||
}
|
||||
|
||||
func PubkeyVars() (p *pubhash.T, ser *varint.V) {
|
||||
p = pubhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func PubkeyEnc(p *pubhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.Pubkey), p, ser)
|
||||
}
|
||||
func PubkeyDec(p *pubhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), p, ser)
|
||||
}
|
||||
|
||||
func PubkeyCreatedAtVars() (p *pubhash.T, ca *timestamp.T, ser *varint.V) {
|
||||
p = pubhash.New()
|
||||
ca = ×tamp.T{}
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func PubkeyCreatedAtEnc(p *pubhash.T, ca *timestamp.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.PubkeyCreatedAt), p, ca, ser)
|
||||
}
|
||||
func PubkeyCreatedAtDec(p *pubhash.T, ca *timestamp.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), p, ca, ser)
|
||||
}
|
||||
|
||||
func CreatedAtVars() (ca *timestamp.T, ser *varint.V) {
|
||||
ca = ×tamp.T{}
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func CreatedAtEnc(ca *timestamp.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.CreatedAt), ca, ser)
|
||||
}
|
||||
func CreatedAtDec(ca *timestamp.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ca, ser)
|
||||
}
|
||||
|
||||
func FirstSeenVars() (ser *varint.V, ts *timestamp.T) {
|
||||
ts = ×tamp.T{}
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func FirstSeenEnc(ser *varint.V, ts *timestamp.T) (enc *T) {
|
||||
return New(prefix.New(prefixes.FirstSeen), ser, ts)
|
||||
}
|
||||
func FirstSeenDec(ser *varint.V, ts *timestamp.T) (enc *T) {
|
||||
return New(prefix.New(), ser, ts)
|
||||
}
|
||||
|
||||
func KindVars() (ki *kindidx.T, ser *varint.V) {
|
||||
ki = kindidx.FromKind(0)
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func KindEnc(ki *kindidx.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.Kind), ki, ser)
|
||||
}
|
||||
func KindDec(ki *kindidx.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ki, ser)
|
||||
}
|
||||
|
||||
func KindCreatedAtVars() (ki *kindidx.T, ca *timestamp.T, ser *varint.V) {
|
||||
ki = kindidx.FromKind(0)
|
||||
ca = ×tamp.T{}
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func KindCreatedAtEnc(ki *kindidx.T, ca *timestamp.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.KindCreatedAt), ki, ca, ser)
|
||||
}
|
||||
func KindCreatedAtDec(ki *kindidx.T, ca *timestamp.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ki, ca, ser)
|
||||
}
|
||||
|
||||
func KindPubkeyCreatedAtVars() (ki *kindidx.T, p *pubhash.T, ca *timestamp.T, ser *varint.V) {
|
||||
ki = kindidx.FromKind(0)
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func KindPubkeyCreatedAtEnc(ki *kindidx.T, p *pubhash.T, ca *timestamp.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.KindPubkeyCreatedAt), ki, p, ca, ser)
|
||||
}
|
||||
func KindPubkeyCreatedAtDec(ki *kindidx.T, p *pubhash.T, ca *timestamp.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ki, p, ca, ser)
|
||||
}
|
||||
|
||||
type TagA struct {
|
||||
Ki *kindidx.T
|
||||
P *pubhash.T
|
||||
Id *identhash.T
|
||||
Ser *varint.V
|
||||
}
|
||||
|
||||
func TagAVars() (ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *varint.V) {
|
||||
ki = kindidx.FromKind(0)
|
||||
p = pubhash.New()
|
||||
id = identhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func TagAEnc(ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.TagA), ki, p, id, ser)
|
||||
}
|
||||
func TagADec(ki *kindidx.T, p *pubhash.T, id *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ki, p, id, ser)
|
||||
}
|
||||
|
||||
func TagEventVars() (id *idhash.T, ser *varint.V) {
|
||||
id = idhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func TagEventEnc(id *idhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.TagEvent), id, ser)
|
||||
}
|
||||
func TagEventDec(id *idhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), id, ser)
|
||||
}
|
||||
|
||||
func TagPubkeyVars() (p *pubhash.T, ser *varint.V) {
|
||||
p = pubhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func TagPubkeyEnc(p *pubhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.TagPubkey), p, ser)
|
||||
}
|
||||
func TagPubkeyDec(p *pubhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), p, ser)
|
||||
}
|
||||
|
||||
func TagHashtagVars() (hashtag *identhash.T, ser *varint.V) {
|
||||
hashtag = identhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func TagHashtagEnc(hashtag *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.TagHashtag), hashtag, ser)
|
||||
}
|
||||
func TagHashtagDec(hashtag *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), hashtag, ser)
|
||||
}
|
||||
|
||||
func TagIdentifierVars() (ident *identhash.T, ser *varint.V) {
|
||||
ident = identhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func TagIdentifierEnc(ident *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.TagIdentifier), ident, ser)
|
||||
}
|
||||
func TagIdentifierDec(ident *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ident, ser)
|
||||
}
|
||||
|
||||
func TagLetterVars() (l *letter.T, val *identhash.T, ser *varint.V) {
|
||||
l = letter.New(0)
|
||||
val = identhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func TagLetterEnc(l *letter.T, val *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.TagLetter), l, val, ser)
|
||||
}
|
||||
func TagLetterDec(l *letter.T, val *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), l, val, ser)
|
||||
}
|
||||
|
||||
func TagProtectedVars() (p *pubhash.T, ser *varint.V) {
|
||||
p = pubhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func TagProtectedEnc(p *pubhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.TagProtected), p, ser)
|
||||
}
|
||||
func TagProtectedDec(p *pubhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), p, ser)
|
||||
}
|
||||
|
||||
func TagNonstandardVars() (key, value *identhash.T, ser *varint.V) {
|
||||
key = identhash.New()
|
||||
value = identhash.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func TagNonstandardEnc(key, value *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.TagNonstandard), key, value, ser)
|
||||
}
|
||||
func TagNonstandardDec(key, value *identhash.T, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), key, value, ser)
|
||||
}
|
||||
|
||||
func FullTextWordVars() (fw *fulltext.T, pos *varint.V, ser *varint.V) {
|
||||
fw = fulltext.New()
|
||||
pos = varint.New()
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func FullTextWordEnc(fw *fulltext.T, pos *varint.V, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.FulltextWord), fw, pos, ser)
|
||||
}
|
||||
func FullTextWordDec(fw *fulltext.T, pos *varint.V, ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), fw, pos, ser)
|
||||
}
|
||||
|
||||
func LastAccessedVars() (ser *varint.V) {
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func LastAccessedEnc(ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.LastAccessed), ser)
|
||||
}
|
||||
func LastAccessedDec(ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ser)
|
||||
}
|
||||
|
||||
func AccessCounterVars() (ser *varint.V) {
|
||||
ser = varint.New()
|
||||
return
|
||||
}
|
||||
func AccessCounterEnc(ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(prefixes.AccessCounter), ser)
|
||||
}
|
||||
func AccessCounterDec(ser *varint.V) (enc *T) {
|
||||
return New(prefix.New(), ser)
|
||||
}
|
||||
562
database/indexes/indexes_test.go
Normal file
562
database/indexes/indexes_test.go
Normal file
@@ -0,0 +1,562 @@
|
||||
package indexes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
"lukechampine.com/frand"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/database/indexes/prefixes"
|
||||
"x.realy.lol/database/indexes/types/prefix"
|
||||
"x.realy.lol/ec/schnorr"
|
||||
"x.realy.lol/log"
|
||||
)
|
||||
|
||||
func TestEvent(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
ser := EventVars()
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
evIdx := EventEnc(ser)
|
||||
evIdx.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
ser2 := EventVars()
|
||||
evIdx2 := EventDec(ser2)
|
||||
if err = evIdx2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
var err error
|
||||
cfg := prefix.New(prefixes.Config)
|
||||
buf := new(bytes.Buffer)
|
||||
cfg.MarshalWrite(buf)
|
||||
buf2 := bytes.NewBuffer(cfg.Bytes())
|
||||
cfg2 := prefix.New()
|
||||
if err = cfg2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(cfg.Bytes(), cfg2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
|
||||
func TestId(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
id, ser := IdVars()
|
||||
if err = id.FromId(frand.Bytes(sha256.Size)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
evIdx := IdEnc(id, ser)
|
||||
evIdx.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
id2, ser2 := IdVars()
|
||||
evIdx2 := IdDec(id2, ser2)
|
||||
if err = evIdx2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFullIndex(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
ser, id, p, ki, ca := FullIndexVars()
|
||||
if err = id.FromId(frand.Bytes(sha256.Size)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ki.Set(frand.Intn(math.MaxUint16))
|
||||
ca.FromInt(int(time.Now().Unix()))
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := FullIndexEnc(ser, id, p, ki, ca)
|
||||
if err = fi.MarshalWrite(buf); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// log.I.S(fi)
|
||||
bin := buf.Bytes()
|
||||
// log.I.S(bin)
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
ser2, id2, p2, ki2, ca2 := FullIndexVars()
|
||||
fi2 := FullIndexDec(ser2, id2, p2, ki2, ca2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
|
||||
log.I.S(id, id2)
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if !bytes.Equal(p.Bytes(), p2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ki.ToKind() != ki2.ToKind() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ca.ToTimestamp() != ca2.ToTimestamp() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubkey(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
p, ser := PubkeyVars()
|
||||
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := PubkeyEnc(p, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
// log.I.S(fi)
|
||||
bin := buf.Bytes()
|
||||
// log.I.S(bin)
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
p2, ser2 := PubkeyVars()
|
||||
fi2 := PubkeyDec(p2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPubkeyCreatedAt(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
p, ca, ser := PubkeyCreatedAtVars()
|
||||
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ca.FromInt(int(time.Now().Unix()))
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := PubkeyCreatedAtEnc(p, ca, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
p2, ca2, ser2 := PubkeyCreatedAtVars()
|
||||
fi2 := PubkeyCreatedAtDec(p2, ca2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ca.ToTimestamp() != ca2.ToTimestamp() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreatedAt(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
ca, ser := CreatedAtVars()
|
||||
ca.FromInt(int(time.Now().Unix()))
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := CreatedAtEnc(ca, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
ca2, ser2 := CreatedAtVars()
|
||||
fi2 := CreatedAtDec(ca2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ca.ToTimestamp() != ca2.ToTimestamp() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFirstSeen(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
ser, ts := FirstSeenVars()
|
||||
ts.FromInt(int(time.Now().Unix()))
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fs := FirstSeenEnc(ser, ts)
|
||||
fs.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
ser2, ca2 := FirstSeenVars()
|
||||
fs2 := FirstSeenDec(ser2, ca2)
|
||||
if err = fs2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ts.ToTimestamp() != ca2.ToTimestamp() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKind(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
ki, ser := KindVars()
|
||||
ki.Set(frand.Intn(math.MaxUint16))
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
kIdx := KindEnc(ki, ser)
|
||||
kIdx.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
ki2, ser2 := KindVars()
|
||||
fi2 := KindDec(ki2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ki.ToKind() != ki2.ToKind() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagA(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
ki, p, id, ser := TagAVars()
|
||||
if err = id.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ki.Set(frand.Intn(math.MaxUint16))
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := TagAEnc(ki, p, id, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
ki2, p2, id2, ser2 := TagAVars()
|
||||
fi2 := TagADec(ki2, p2, id2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if !bytes.Equal(p.Bytes(), p2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ki.ToKind() != ki2.ToKind() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagEvent(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
id, ser := TagEventVars()
|
||||
if err = id.FromId(frand.Bytes(sha256.Size)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
evIdx := TagEventEnc(id, ser)
|
||||
evIdx.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
id2, ser2 := TagEventVars()
|
||||
evIdx2 := TagEventDec(id2, ser2)
|
||||
if err = evIdx2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagPubkey(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
p, ser := TagPubkeyVars()
|
||||
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := TagPubkeyEnc(p, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
// log.I.S(fi)
|
||||
bin := buf.Bytes()
|
||||
// log.I.S(bin)
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
p2, ser2 := TagPubkeyVars()
|
||||
fi2 := TagPubkeyDec(p2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagHashtag(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
id, ser := TagHashtagVars()
|
||||
if err = id.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := TagHashtagEnc(id, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
id2, ser2 := TagHashtagVars()
|
||||
fi2 := TagHashtagDec(id2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagIdentifier(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
id, ser := TagIdentifierVars()
|
||||
if err = id.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := TagIdentifierEnc(id, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
id2, ser2 := TagIdentifierVars()
|
||||
fi2 := TagIdentifierDec(id2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagLetter(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
l, id, ser := TagLetterVars()
|
||||
if err = id.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lb := frand.Bytes(1)
|
||||
l.Set(lb[0])
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := TagLetterEnc(l, id, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
l2, id2, ser2 := TagLetterVars()
|
||||
fi2 := TagLetterDec(l2, id2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if l.Letter() != l2.Letter() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if !bytes.Equal(id.Bytes(), id2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagProtected(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
p, ser := TagProtectedVars()
|
||||
if err = p.FromPubkey(frand.Bytes(schnorr.PubKeyBytesLen)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := TagProtectedEnc(p, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
// log.I.S(fi)
|
||||
bin := buf.Bytes()
|
||||
// log.I.S(bin)
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
p2, ser2 := TagProtectedVars()
|
||||
fi2 := TagProtectedDec(p2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagNonstandard(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
k, v, ser := TagNonstandardVars()
|
||||
if err = k.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err = v.FromIdent(frand.Bytes(frand.Intn(16) + 8)); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := TagNonstandardEnc(k, v, ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
k2, v2, ser2 := TagNonstandardVars()
|
||||
fi2 := TagNonstandardDec(k2, v2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(k.Bytes(), k2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if !bytes.Equal(v.Bytes(), v2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFulltextWord(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
fw, pos, ser := FullTextWordVars()
|
||||
fw.FromWord(frand.Bytes(frand.Intn(10) + 5))
|
||||
pos.FromUint64(uint64(frand.Intn(math.MaxUint32)))
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := FullTextWordEnc(fw, pos, ser)
|
||||
if err = fi.MarshalWrite(buf); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
fw2, pos2, ser2 := FullTextWordVars()
|
||||
fi2 := FullTextWordDec(fw2, pos2, ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(fw.Bytes(), fw2.Bytes()) {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if pos.ToUint32() != pos2.ToUint32() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastAccessed(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
ser := LastAccessedVars()
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := LastAccessedEnc(ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
ser2 := LastAccessedVars()
|
||||
fi2 := LastAccessedDec(ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessCounter(t *testing.T) {
|
||||
var err error
|
||||
for range 100 {
|
||||
ser := AccessCounterVars()
|
||||
ser.FromUint64(uint64(frand.Intn(math.MaxInt64)))
|
||||
buf := new(bytes.Buffer)
|
||||
fi := AccessCounterEnc(ser)
|
||||
fi.MarshalWrite(buf)
|
||||
bin := buf.Bytes()
|
||||
buf2 := bytes.NewBuffer(bin)
|
||||
ser2 := AccessCounterVars()
|
||||
fi2 := AccessCounterDec(ser2)
|
||||
if err = fi2.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ser.ToUint64() != ser2.ToUint64() {
|
||||
t.Fatal("failed to recover same value as input")
|
||||
}
|
||||
}
|
||||
}
|
||||
221
database/indexes/prefixes/prefixes.go
Normal file
221
database/indexes/prefixes/prefixes.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package prefixes
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const Len = 2
|
||||
|
||||
type I string
|
||||
|
||||
const (
|
||||
// Event is the whole event stored in binary format
|
||||
//
|
||||
// [ prefix ][ 8 byte serial ] [ event in binary format ]
|
||||
Event = iota
|
||||
|
||||
// Config is a singular record containing a free-form configuration in JSON format
|
||||
//
|
||||
// [ prefix ] [ configuration in JSON format ]
|
||||
Config
|
||||
|
||||
// Id contains a truncated 8 byte hash of an event index. This is the secondary key of an
|
||||
// event, the primary key is the serial found in the Event.
|
||||
//
|
||||
// [ prefix ][ 8 bytes truncated hash of Id ][ 8 serial ]
|
||||
Id
|
||||
|
||||
// FullIndex is an index designed to enable sorting and filtering of results found via
|
||||
// other indexes, without having to decode the event.
|
||||
//
|
||||
// [ prefix ][ 8 serial ][ 32 bytes full event ID ][ 8 bytes truncated hash of pubkey ][ 2 bytes kind ][ 8 bytes created_at timestamp ]
|
||||
FullIndex
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
//
|
||||
// The following are search indexes. This first category are primarily for kind, pubkey and
|
||||
// created_at timestamps. These compose a set of 3 primary indexes alone, two that combine
|
||||
// with the timestamp, and a third that combines all three, covering every combination of
|
||||
// these.
|
||||
_
|
||||
|
||||
// Pubkey is an index for searching for events authored by a pubkey.
|
||||
//
|
||||
// [ prefix ][ 8 bytes truncated hash of pubkey ][ 8 serial ]
|
||||
Pubkey
|
||||
|
||||
// Kind is an index of event kind numbers.
|
||||
//
|
||||
// [ prefix ][ 2 bytes kind number ][ 8 serial ]
|
||||
Kind
|
||||
|
||||
// CreatedAt is an index that allows search the timestamp on the event.
|
||||
//
|
||||
// [ prefix ][ created_at 8 bytes timestamp ][ 8 serial ]
|
||||
CreatedAt
|
||||
|
||||
// PubkeyCreatedAt is a composite index that allows search by pubkey filtered by
|
||||
// created_at.
|
||||
//
|
||||
// [ prefix ][ 8 bytes truncated hash of pubkey ][ 8 bytes created_at ][ 8 serial ]
|
||||
PubkeyCreatedAt
|
||||
|
||||
// KindCreatedAt is an index of kind and created_at timestamp.
|
||||
//
|
||||
// [ prefix ][ 2 bytes kind number ][ created_at 8 bytes timestamp ][ 8 bytes serial ]
|
||||
KindCreatedAt
|
||||
|
||||
// KindPubkeyCreatedAt is an index of kind and created_at timestamp.
|
||||
//
|
||||
// [ prefix ][ 2 bytes kind number ][ 8 bytes hash of pubkey ][ created_at 8 bytes timestamp ][ 8 bytes serial ]
|
||||
KindPubkeyCreatedAt
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
//
|
||||
// The following are search indexes for tags, which are references to other categories,
|
||||
// including events, replaceable event identities (d tags), public keys, hashtags, and
|
||||
// arbitrary other kinds of keys including standard single letter and nonstandard word keys.
|
||||
//
|
||||
// Combining them with the previous set of 6 indexes involves using one query from the
|
||||
// previous section according to the filter, and one or more of these tag indexes, to
|
||||
// acquire a list of event serials from each query, and then intersecting the result sets
|
||||
// from each one to yield the matches.
|
||||
_
|
||||
|
||||
// TagA is an index of `a` tags, which contain kind, pubkey and hash of an arbitrary
|
||||
// text, used to create an abstract reference for a multiplicity of replaceable event with a
|
||||
// kind number. These labels also appear as `d` tags in inbound references, see
|
||||
// IdxTagIdentifier.
|
||||
//
|
||||
// [ prefix ][ 2 bytes kind number ][ 8 bytes hash of pubkey ][ 8 bytes hash of label ][ serial]
|
||||
TagA
|
||||
|
||||
// TagIdentifier is a `d` tag identifier that creates an arbitrary label that can be used
|
||||
// to refer to an event. This is used for parameterized replaceable events to identify them
|
||||
// with `a` tags for reference.
|
||||
//
|
||||
// [ prefix ][ 8 byte hash of identifier ][ 8 serial ]
|
||||
TagIdentifier
|
||||
|
||||
// TagEvent is a reference to an event.
|
||||
//
|
||||
// [ prefix ][ 8 bytes truncated hash of event Id ][ 8 serial ]
|
||||
TagEvent
|
||||
|
||||
// TagPubkey is a reference to a user's public key identifier (author).
|
||||
//
|
||||
// [ prefix ][ 8 bytes pubkey hash ][ 8 serial ]
|
||||
TagPubkey
|
||||
|
||||
// TagHashtag is a reference to a hashtag, user-created and externally labeled short
|
||||
// subject names.
|
||||
//
|
||||
// [ prefix ][ 8 bytes hash of hashtag ][ 8 serial ]
|
||||
TagHashtag
|
||||
|
||||
// TagLetter covers all other types of single letter mandatory indexed tags, including
|
||||
// such as `d` for identifiers and things like `m` for mimetype and other kinds of
|
||||
// references, the actual letter is the second byte. The value is a truncated 8 byte hash.
|
||||
//
|
||||
// [ prefix ][ letter ][ 8 bytes hash of value field of tag ][ 8 serial ]
|
||||
TagLetter
|
||||
|
||||
// TagProtected is a special tag that indicates that this event should only be accepted
|
||||
// if published by an authed user with the matching public key.
|
||||
//
|
||||
// [ prefix ][ 8 byte hash of public key ][ 8 serial ]
|
||||
TagProtected
|
||||
|
||||
// TagNonstandard is an index for index keys longer than 1 character, represented as an 8
|
||||
// byte truncated hash.
|
||||
//
|
||||
// [ prefix ][ 8 byte hash of key ][ 8 byte hash of value ][ 8 serial ]
|
||||
TagNonstandard
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
_
|
||||
|
||||
// FulltextWord is a fulltext word index, the index contains the whole word. This will
|
||||
// also be searchable via the use of annotations in the filter search as whole match for the
|
||||
// word and any word containing the word (contains), and ^ prefix indicates a prefix match,
|
||||
// $ indicates a suffix match, and this index also contains a sequence number for proximity
|
||||
// filtering.
|
||||
//
|
||||
// [ prefix ][ varint word len ][ full word ][ 4 bytes word position in content field ][ 8 serial ]
|
||||
FulltextWord
|
||||
|
||||
// ------------------------------------------------------------------------
|
||||
//
|
||||
// The following keys are event metadata that are needed to enable other types of
|
||||
// functionality such as garbage collection and metadata queries.
|
||||
_
|
||||
|
||||
// FirstSeen is an index that records the timestamp of when the event was first seen.
|
||||
//
|
||||
// [ prefix ][ 8 serial ][ 8 byte timestamp ]
|
||||
FirstSeen
|
||||
|
||||
// LastAccessed is an index that stores the last time the referenced event was returned
|
||||
// in a result.
|
||||
//
|
||||
// [ prefix ][ 8 serial ] [ last accessed timestamp 8 bytes ]
|
||||
LastAccessed
|
||||
|
||||
// AccessCounter is a counter that is increased when the referenced event is a result in
|
||||
// a query. This can enable a frequency of access search or sort.
|
||||
//
|
||||
// [ prefix ][ 8 serial ] [ 8 bytes access counter ]
|
||||
AccessCounter
|
||||
)
|
||||
|
||||
func (i I) Write(w io.Writer) (n int, err error) { return w.Write([]byte(i)) }
|
||||
|
||||
func Prefix(prf int) (i I) {
|
||||
switch prf {
|
||||
case Event:
|
||||
return "ev"
|
||||
case Config:
|
||||
return "cf"
|
||||
case Id:
|
||||
return "id"
|
||||
case FullIndex:
|
||||
return "fi"
|
||||
case Pubkey:
|
||||
return "pk"
|
||||
case PubkeyCreatedAt:
|
||||
return "pc"
|
||||
case CreatedAt:
|
||||
return "ca"
|
||||
case FirstSeen:
|
||||
return "fs"
|
||||
case Kind:
|
||||
return "ki"
|
||||
case KindCreatedAt:
|
||||
return "kc"
|
||||
case KindPubkeyCreatedAt:
|
||||
return "kp"
|
||||
case TagA:
|
||||
return "ta"
|
||||
case TagEvent:
|
||||
return "te"
|
||||
case TagPubkey:
|
||||
return "tp"
|
||||
case TagHashtag:
|
||||
return "tt"
|
||||
case TagIdentifier:
|
||||
return "td"
|
||||
case TagLetter:
|
||||
return "t*"
|
||||
case TagProtected:
|
||||
return "t-"
|
||||
case TagNonstandard:
|
||||
return "t?"
|
||||
case FulltextWord:
|
||||
return "fw"
|
||||
case LastAccessed:
|
||||
return "la"
|
||||
case AccessCounter:
|
||||
return "ac"
|
||||
}
|
||||
return
|
||||
}
|
||||
96
database/indexes/readme.md
Normal file
96
database/indexes/readme.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# database
|
||||
|
||||
## index key scheme
|
||||
|
||||
this scheme is designed to be stable and semi-human-readable, and use two bytes as the key prefix in most cases.
|
||||
|
||||
all keys further contain the database serial (sequence number) as the last 8 bytes
|
||||
|
||||
- `ev` - the event itself, encoded in binary format
|
||||
|
||||
|
||||
- `cf` - a free form configuration JSON
|
||||
|
||||
|
||||
- `id` - event id - truncated 8 bytes hash
|
||||
|
||||
these override any other filter
|
||||
|
||||
|
||||
- `fi` - full index: full event id, pubkey truncated hash, kind and created_at, enabling identifying and filtering search results to return only the event id of a match while enabling filtering by timestamp and allowing the exclusion of matches based on a user's mute list
|
||||
|
||||
|
||||
- `pk` - public key - truncated 8 byte hash of public key
|
||||
|
||||
|
||||
- `pc` - public key, created at - varint encoded (ltr encoder)
|
||||
|
||||
these index all events associated to a pubkey, easy to pick by timestamp
|
||||
|
||||
|
||||
- `ca` - created_at timestamp - varint encoded (ltr encoder)
|
||||
|
||||
these timestamps are not entirely reliable but a since/until filter these are sequential
|
||||
|
||||
|
||||
- `fs` - index that stores the timestamp when the event was received
|
||||
|
||||
this enables search by first-seen
|
||||
|
||||
|
||||
- `ki` - kind, created_at - 2 bytes kind, varint encoded created_at
|
||||
|
||||
kind and timestamp - to catch events by time window and kind
|
||||
|
||||
|
||||
- `ta` - kind, pubkey, hash of d tag (a tag value)
|
||||
|
||||
these are a reference used by parameterized replaceable events
|
||||
|
||||
|
||||
- `te` - event id - truncated 8 bytes hash
|
||||
|
||||
these are events that refer to another event (root, reply, etc)
|
||||
|
||||
|
||||
- `tp` - public key - truncated 8 bytes hash
|
||||
|
||||
these are references to another user
|
||||
|
||||
|
||||
- `td` - identifier - 8 byte truncated hash of identifier string
|
||||
|
||||
these are labels used with parameterized replaceable events to create a namespace.
|
||||
|
||||
|
||||
- `tt` - hashtag - 8 bytes hash of full hashtag
|
||||
|
||||
this enables fast hashtag searches
|
||||
|
||||
|
||||
- `t*` - tag for other letters (literally the letter), 8 bytes truncated hash of value
|
||||
|
||||
all other tags, with a distinguishable value compactly encoded
|
||||
|
||||
|
||||
- `t-` - 8 bytes hash of pubkey
|
||||
|
||||
these are protected events that cannot be saved unless the author has authed, they can't be broadcast by the relay either
|
||||
|
||||
|
||||
- `t?` - 8 bytes hash of key field - 8 bytes hash of value field
|
||||
|
||||
this in fact enables search by other tags but this is not exposed in filter syntax
|
||||
|
||||
|
||||
- `fw` - fulltext search index - the whole word follows, serial is last 8 bytes
|
||||
|
||||
when searching, whole match has no prefix, * for contains ^ for prefix $ suffix
|
||||
|
||||
|
||||
- `la` - serial, value is last accessed timestamp
|
||||
|
||||
|
||||
- `ac` - serial, value is incremented counter of accesses
|
||||
|
||||
increment at each time this event is matched by other indexes in a result
|
||||
42
database/indexes/types/fullid/fullid.go
Normal file
42
database/indexes/types/fullid/fullid.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package fullid
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
||||
"x.realy.lol/errorf"
|
||||
)
|
||||
|
||||
const Len = sha256.Size
|
||||
|
||||
type T struct {
|
||||
val []byte
|
||||
}
|
||||
|
||||
func New() (fi *T) { return &T{make([]byte, Len)} }
|
||||
|
||||
func (fi *T) FromId(id []byte) (err error) {
|
||||
if len(id) != Len {
|
||||
err = errorf.E("invalid Id length, got %d require %d", len(id), Len)
|
||||
return
|
||||
}
|
||||
fi.val = id
|
||||
return
|
||||
}
|
||||
func (fi *T) Bytes() (b []byte) { return fi.val }
|
||||
|
||||
func (fi *T) MarshalWrite(w io.Writer) (err error) {
|
||||
_, err = w.Write(fi.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (fi *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
if len(fi.val) < Len {
|
||||
fi.val = make([]byte, Len)
|
||||
} else {
|
||||
fi.val = fi.val[:Len]
|
||||
}
|
||||
_, err = r.Read(fi.val)
|
||||
return
|
||||
}
|
||||
39
database/indexes/types/fulltext/fulltext.go
Normal file
39
database/indexes/types/fulltext/fulltext.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package fulltext
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/varint"
|
||||
)
|
||||
|
||||
type T struct {
|
||||
val []byte
|
||||
}
|
||||
|
||||
func New() (ft *T) { return &T{} }
|
||||
|
||||
func (ft *T) FromWord(word []byte) { ft.val = word }
|
||||
|
||||
func (ft *T) Bytes() (b []byte) { return ft.val }
|
||||
|
||||
func (ft *T) MarshalWrite(w io.Writer) (err error) {
|
||||
varint.Encode(w, uint64(len(ft.val)))
|
||||
_, err = w.Write(ft.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (ft *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
var l uint64
|
||||
if l, err = varint.Decode(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
wl := int(l)
|
||||
if len(ft.val) < wl {
|
||||
ft.val = make([]byte, wl)
|
||||
} else {
|
||||
ft.val = ft.val[:wl]
|
||||
}
|
||||
_, err = r.Read(ft.val)
|
||||
return
|
||||
}
|
||||
35
database/indexes/types/identHash/identhash.go
Normal file
35
database/indexes/types/identHash/identhash.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package identhash
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"x.realy.lol/helpers"
|
||||
)
|
||||
|
||||
const Len = 8
|
||||
|
||||
type T struct{ val []byte }
|
||||
|
||||
func New() (i *T) { return &T{make([]byte, Len)} }
|
||||
|
||||
func (i *T) FromIdent(id []byte) (err error) {
|
||||
i.val = helpers.Hash(id)[:Len]
|
||||
return
|
||||
}
|
||||
|
||||
func (i *T) Bytes() (b []byte) { return i.val }
|
||||
|
||||
func (i *T) MarshalWrite(w io.Writer) (err error) {
|
||||
_, err = w.Write(i.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (i *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
if len(i.val) < Len {
|
||||
i.val = make([]byte, Len)
|
||||
} else {
|
||||
i.val = i.val[:Len]
|
||||
}
|
||||
_, err = r.Read(i.val)
|
||||
return
|
||||
}
|
||||
58
database/indexes/types/idhash/idhash.go
Normal file
58
database/indexes/types/idhash/idhash.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package idhash
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/errorf"
|
||||
"x.realy.lol/helpers"
|
||||
"x.realy.lol/hex"
|
||||
)
|
||||
|
||||
const Len = 8
|
||||
|
||||
type T struct{ val []byte }
|
||||
|
||||
func New() (i *T) { return &T{make([]byte, Len)} }
|
||||
|
||||
func (i *T) FromId(id []byte) (err error) {
|
||||
if len(id) != sha256.Size {
|
||||
err = errorf.E("invalid Id length, got %d require %d", len(id), sha256.Size)
|
||||
return
|
||||
}
|
||||
i.val = helpers.Hash(id)[:Len]
|
||||
return
|
||||
}
|
||||
|
||||
func (i *T) FromIdHex(idh string) (err error) {
|
||||
var id []byte
|
||||
if id, err = hex.Dec(idh); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if len(id) != sha256.Size {
|
||||
err = errorf.E("invalid Id length, got %d require %d", len(id), sha256.Size)
|
||||
return
|
||||
}
|
||||
i.val = helpers.Hash(id)[:Len]
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
func (i *T) Bytes() (b []byte) { return i.val }
|
||||
|
||||
func (i *T) MarshalWrite(w io.Writer) (err error) {
|
||||
_, err = w.Write(i.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (i *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
if len(i.val) < Len {
|
||||
i.val = make([]byte, Len)
|
||||
} else {
|
||||
i.val = i.val[:Len]
|
||||
}
|
||||
_, err = r.Read(i.val)
|
||||
return
|
||||
}
|
||||
53
database/indexes/types/kindidx/kindidx.go
Normal file
53
database/indexes/types/kindidx/kindidx.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package kindidx
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"x.realy.lol/errorf"
|
||||
)
|
||||
|
||||
const Len = 2
|
||||
|
||||
type T struct{ val []byte }
|
||||
|
||||
func FromKind(kind int) (k *T) {
|
||||
k = &T{val: make([]byte, Len)}
|
||||
binary.LittleEndian.PutUint16(k.val, uint16(kind))
|
||||
return
|
||||
}
|
||||
|
||||
func FromBytes(kindBytes []byte) (k *T, err error) {
|
||||
if len(kindBytes) != Len {
|
||||
err = errorf.E("kind must be %d bytes long, got %d", Len, len(kindBytes))
|
||||
return
|
||||
}
|
||||
k = &T{val: kindBytes}
|
||||
return
|
||||
}
|
||||
|
||||
func (k *T) Set(ki int) {
|
||||
kk := FromKind(ki)
|
||||
k.val = kk.val
|
||||
}
|
||||
|
||||
func (k *T) ToKind() (kind int) {
|
||||
kind = int(binary.LittleEndian.Uint16(k.val))
|
||||
return
|
||||
}
|
||||
func (k *T) Bytes() (b []byte) { return k.val }
|
||||
|
||||
func (k *T) MarshalWrite(w io.Writer) (err error) {
|
||||
_, err = w.Write(k.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (k *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
if len(k.val) < Len {
|
||||
k.val = make([]byte, Len)
|
||||
} else {
|
||||
k.val = k.val[:Len]
|
||||
}
|
||||
_, err = r.Read(k.val)
|
||||
return
|
||||
}
|
||||
29
database/indexes/types/letter/letter.go
Normal file
29
database/indexes/types/letter/letter.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package letter
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
const Len = 1
|
||||
|
||||
type T struct {
|
||||
val []byte
|
||||
}
|
||||
|
||||
func New(letter byte) (p *T) { return &T{[]byte{letter}} }
|
||||
|
||||
func (p *T) Set(lb byte) { p.val = []byte{lb} }
|
||||
|
||||
func (p *T) Letter() byte { return p.val[0] }
|
||||
|
||||
func (p *T) MarshalWrite(w io.Writer) (err error) {
|
||||
_, err = w.Write(p.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
val := make([]byte, 1)
|
||||
_, err = r.Read(val)
|
||||
p.val = val
|
||||
return
|
||||
}
|
||||
41
database/indexes/types/number/uint16.go
Normal file
41
database/indexes/types/number/uint16.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Uint16 is a codec for encoding and decoding 16-bit unsigned integers.
|
||||
type Uint16 struct {
|
||||
value uint16
|
||||
}
|
||||
|
||||
// Set sets the value as a uint16.
|
||||
func (c *Uint16) Set(value uint16) {
|
||||
c.value = value
|
||||
}
|
||||
|
||||
// Get gets the value as a uint16.
|
||||
func (c *Uint16) Get() uint16 {
|
||||
return c.value
|
||||
}
|
||||
|
||||
// SetInt sets the value as an int, converting it to uint16. Truncates values outside uint16 range (0-65535).
|
||||
func (c *Uint16) SetInt(value int) {
|
||||
c.value = uint16(value)
|
||||
}
|
||||
|
||||
// GetInt gets the value as an int, converted from uint16.
|
||||
func (c *Uint16) GetInt() int {
|
||||
return int(c.value)
|
||||
}
|
||||
|
||||
// MarshalWrite writes the uint16 value to the provided writer in BigEndian order.
|
||||
func (c *Uint16) MarshalWrite(w io.Writer) error {
|
||||
return binary.Write(w, binary.BigEndian, c.value)
|
||||
}
|
||||
|
||||
// UnmarshalRead reads a uint16 value from the provided reader in BigEndian order.
|
||||
func (c *Uint16) UnmarshalRead(r io.Reader) error {
|
||||
return binary.Read(r, binary.BigEndian, &c.value)
|
||||
}
|
||||
66
database/indexes/types/number/uint16_test.go
Normal file
66
database/indexes/types/number/uint16_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
)
|
||||
|
||||
func TestUint16(t *testing.T) {
|
||||
// Helper function to generate random 16-bit integers
|
||||
generateRandomUint16 := func() uint16 {
|
||||
return uint16(frand.Intn(math.MaxUint16)) // math.MaxUint16 == 65535
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ { // Run test 100 times for random values
|
||||
// Generate a random value
|
||||
randomUint16 := generateRandomUint16()
|
||||
randomInt := int(randomUint16)
|
||||
|
||||
// Create a new encodedUint16
|
||||
encodedUint16 := new(Uint16)
|
||||
|
||||
// Test UInt16 setter and getter
|
||||
encodedUint16.Set(randomUint16)
|
||||
if encodedUint16.Get() != randomUint16 {
|
||||
t.Fatalf("Get mismatch: got %d, expected %d", encodedUint16.Get(), randomUint16)
|
||||
}
|
||||
|
||||
// Test GetInt setter and getter
|
||||
encodedUint16.SetInt(randomInt)
|
||||
if encodedUint16.GetInt() != randomInt {
|
||||
t.Fatalf("GetInt mismatch: got %d, expected %d", encodedUint16.GetInt(), randomInt)
|
||||
}
|
||||
|
||||
// Test encoding to []byte and decoding back
|
||||
bufEnc := new(bytes.Buffer)
|
||||
|
||||
// MarshalWrite
|
||||
err := encodedUint16.MarshalWrite(bufEnc)
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
}
|
||||
encoded := bufEnc.Bytes()
|
||||
|
||||
// Create a copy of encoded bytes before decoding
|
||||
bufDec := bytes.NewBuffer(encoded)
|
||||
|
||||
// Decode back the value
|
||||
decodedUint16 := new(Uint16)
|
||||
err = decodedUint16.UnmarshalRead(bufDec)
|
||||
if err != nil {
|
||||
t.Fatalf("UnmarshalRead failed: %v", err)
|
||||
}
|
||||
|
||||
if decodedUint16.Get() != randomUint16 {
|
||||
t.Fatalf("Decoded value mismatch: got %d, expected %d", decodedUint16.Get(), randomUint16)
|
||||
}
|
||||
|
||||
// Compare encoded bytes to ensure correctness
|
||||
if !bytes.Equal(encoded, bufEnc.Bytes()) {
|
||||
t.Fatalf("Byte encoding mismatch: got %v, expected %v", bufEnc.Bytes(), encoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
78
database/indexes/types/number/uint24.go
Normal file
78
database/indexes/types/number/uint24.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// MaxUint24 is the maximum value of a 24-bit unsigned integer: 2^24 - 1.
|
||||
const MaxUint24 uint32 = 1<<24 - 1
|
||||
|
||||
// Uint24 is a codec for encoding and decoding 24-bit unsigned integers.
|
||||
type Uint24 struct {
|
||||
value uint32
|
||||
}
|
||||
|
||||
// SetUint24 sets the value as a 24-bit unsigned integer.
|
||||
// If the value exceeds the maximum allowable value for 24 bits, it returns an error.
|
||||
func (c *Uint24) SetUint24(value uint32) error {
|
||||
if value > MaxUint24 {
|
||||
return errors.New("value exceeds 24-bit range")
|
||||
}
|
||||
c.value = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Uint24 gets the value as a 24-bit unsigned integer.
|
||||
func (c *Uint24) Uint24() uint32 {
|
||||
return c.value
|
||||
}
|
||||
|
||||
// SetInt sets the value as an int, converting it to a 24-bit unsigned integer.
|
||||
// If the value is out of the 24-bit range, it returns an error.
|
||||
func (c *Uint24) SetInt(value int) error {
|
||||
if value < 0 || uint32(value) > MaxUint24 {
|
||||
return errors.New("value exceeds 24-bit range")
|
||||
}
|
||||
c.value = uint32(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Int gets the value as an int, converted from the 24-bit unsigned integer.
|
||||
func (c *Uint24) Int() int {
|
||||
return int(c.value)
|
||||
}
|
||||
|
||||
// MarshalWrite encodes the 24-bit unsigned integer and writes it directly to the provided io.Writer.
|
||||
// The encoding uses 3 bytes in BigEndian order.
|
||||
func (c *Uint24) MarshalWrite(w io.Writer) error {
|
||||
if c.value > MaxUint24 {
|
||||
return errors.New("value exceeds 24-bit range")
|
||||
}
|
||||
|
||||
// Write the 3 bytes (BigEndian order) directly to the writer
|
||||
var buf [3]byte
|
||||
buf[0] = byte((c.value >> 16) & 0xFF) // Most significant byte
|
||||
buf[1] = byte((c.value >> 8) & 0xFF)
|
||||
buf[2] = byte(c.value & 0xFF) // Least significant byte
|
||||
|
||||
_, err := w.Write(buf[:]) // Write all 3 bytes to the writer
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmarshalRead reads 3 bytes directly from the provided io.Reader and decodes it into a 24-bit unsigned integer.
|
||||
func (c *Uint24) UnmarshalRead(r io.Reader) error {
|
||||
// Read 3 bytes directly from the reader
|
||||
var buf [3]byte
|
||||
_, err := io.ReadFull(r, buf[:]) // Ensure exactly 3 bytes are read
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode the 3 bytes into a 24-bit unsigned integer
|
||||
c.value = (uint32(buf[0]) << 16) |
|
||||
(uint32(buf[1]) << 8) |
|
||||
uint32(buf[2])
|
||||
|
||||
return nil
|
||||
}
|
||||
67
database/indexes/types/number/uint24_test.go
Normal file
67
database/indexes/types/number/uint24_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUint24(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
value uint32
|
||||
expectedErr bool
|
||||
}{
|
||||
{"Minimum Value", 0, false},
|
||||
{"Maximum Value", MaxUint24, false},
|
||||
{"Value in Range", 8374263, false}, // Example value within the range
|
||||
{"Value Exceeds Range", MaxUint24 + 1, true}, // Exceeds 24-bit limit
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
codec := new(Uint24)
|
||||
|
||||
// Test SetUint24
|
||||
err := codec.SetUint24(tt.value)
|
||||
if tt.expectedErr {
|
||||
if err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
return
|
||||
} else if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Test Uint24 getter
|
||||
if codec.Uint24() != tt.value {
|
||||
t.Errorf("Uint24 mismatch: got %d, expected %d", codec.Uint24(), tt.value)
|
||||
}
|
||||
|
||||
// Test MarshalWrite and UnmarshalRead
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// MarshalWrite directly to the buffer
|
||||
if err := codec.MarshalWrite(buf); err != nil {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
}
|
||||
|
||||
// Validate encoded size is 3 bytes
|
||||
encoded := buf.Bytes()
|
||||
if len(encoded) != 3 {
|
||||
t.Fatalf("encoded size mismatch: got %d bytes, expected 3 bytes", len(encoded))
|
||||
}
|
||||
|
||||
// Decode from the buffer
|
||||
decoded := new(Uint24)
|
||||
if err := decoded.UnmarshalRead(buf); err != nil {
|
||||
t.Fatalf("UnmarshalRead failed: %v", err)
|
||||
}
|
||||
|
||||
// Validate decoded value
|
||||
if decoded.Uint24() != tt.value {
|
||||
t.Errorf("Decoded value mismatch: got %d, expected %d", decoded.Uint24(), tt.value)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
42
database/indexes/types/number/uint32.go
Normal file
42
database/indexes/types/number/uint32.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Uint32 is a codec for encoding and decoding 32-bit unsigned integers.
|
||||
type Uint32 struct {
|
||||
value uint32
|
||||
}
|
||||
|
||||
// SetUint32 sets the value as a uint32.
|
||||
func (c *Uint32) SetUint32(value uint32) {
|
||||
c.value = value
|
||||
}
|
||||
|
||||
// Uint32 gets the value as a uint32.
|
||||
func (c *Uint32) Uint32() uint32 {
|
||||
return c.value
|
||||
}
|
||||
|
||||
// SetInt sets the value as an int, converting it to uint32.
|
||||
// Values outside the range of uint32 (0–4294967295) will be truncated.
|
||||
func (c *Uint32) SetInt(value int) {
|
||||
c.value = uint32(value)
|
||||
}
|
||||
|
||||
// Int gets the value as an int, converted from uint32.
|
||||
func (c *Uint32) Int() int {
|
||||
return int(c.value)
|
||||
}
|
||||
|
||||
// MarshalWrite writes the uint32 value to the provided writer in BigEndian order.
|
||||
func (c *Uint32) MarshalWrite(w io.Writer) error {
|
||||
return binary.Write(w, binary.BigEndian, c.value)
|
||||
}
|
||||
|
||||
// UnmarshalRead reads a uint32 value from the provided reader in BigEndian order.
|
||||
func (c *Uint32) UnmarshalRead(r io.Reader) error {
|
||||
return binary.Read(r, binary.BigEndian, &c.value)
|
||||
}
|
||||
66
database/indexes/types/number/uint32_test.go
Normal file
66
database/indexes/types/number/uint32_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
)
|
||||
|
||||
func TestUint32(t *testing.T) {
|
||||
// Helper function to generate random 32-bit integers
|
||||
generateRandomUint32 := func() uint32 {
|
||||
return uint32(frand.Intn(math.MaxUint32)) // math.MaxUint32 == 4294967295
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ { // Run test 100 times for random values
|
||||
// Generate a random value
|
||||
randomUint32 := generateRandomUint32()
|
||||
randomInt := int(randomUint32)
|
||||
|
||||
// Create a new codec
|
||||
codec := new(Uint32)
|
||||
|
||||
// Test UInt32 setter and getter
|
||||
codec.SetUint32(randomUint32)
|
||||
if codec.Uint32() != randomUint32 {
|
||||
t.Fatalf("Uint32 mismatch: got %d, expected %d", codec.Uint32(), randomUint32)
|
||||
}
|
||||
|
||||
// Test Int setter and getter
|
||||
codec.SetInt(randomInt)
|
||||
if codec.Int() != randomInt {
|
||||
t.Fatalf("Int mismatch: got %d, expected %d", codec.Int(), randomInt)
|
||||
}
|
||||
|
||||
// Test encoding to []byte and decoding back
|
||||
bufEnc := new(bytes.Buffer)
|
||||
|
||||
// MarshalWrite
|
||||
err := codec.MarshalWrite(bufEnc)
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
}
|
||||
encoded := bufEnc.Bytes()
|
||||
|
||||
// Create a copy of encoded bytes before decoding
|
||||
bufDec := bytes.NewBuffer(encoded)
|
||||
|
||||
// Decode back the value
|
||||
decoded := new(Uint32)
|
||||
err = decoded.UnmarshalRead(bufDec)
|
||||
if err != nil {
|
||||
t.Fatalf("UnmarshalRead failed: %v", err)
|
||||
}
|
||||
|
||||
if decoded.Uint32() != randomUint32 {
|
||||
t.Fatalf("Decoded value mismatch: got %d, expected %d", decoded.Uint32(), randomUint32)
|
||||
}
|
||||
|
||||
// Compare encoded bytes to ensure correctness
|
||||
if !bytes.Equal(encoded, bufEnc.Bytes()) {
|
||||
t.Fatalf("Byte encoding mismatch: got %v, expected %v", bufEnc.Bytes(), encoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
75
database/indexes/types/number/uint40.go
Normal file
75
database/indexes/types/number/uint40.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// MaxUint40 is the maximum value of a 40-bit unsigned integer: 2^40 - 1.
|
||||
const MaxUint40 uint64 = 1<<40 - 1
|
||||
|
||||
// Uint40 is a codec for encoding and decoding 40-bit unsigned integers.
|
||||
type Uint40 struct{ value uint64 }
|
||||
|
||||
// SetUint40 sets the value as a 40-bit unsigned integer.
|
||||
// If the value exceeds the maximum allowable value for 40 bits, it returns an error.
|
||||
func (c *Uint40) SetUint40(value uint64) error {
|
||||
if value > MaxUint40 {
|
||||
return errors.New("value exceeds 40-bit range")
|
||||
}
|
||||
c.value = value
|
||||
return nil
|
||||
}
|
||||
|
||||
// Uint40 gets the value as a 40-bit unsigned integer.
|
||||
func (c *Uint40) Uint40() uint64 { return c.value }
|
||||
|
||||
// SetInt sets the value as an int, converting it to a 40-bit unsigned integer.
|
||||
// If the value is out of the 40-bit range, it returns an error.
|
||||
func (c *Uint40) SetInt(value int) error {
|
||||
if value < 0 || uint64(value) > MaxUint40 {
|
||||
return errors.New("value exceeds 40-bit range")
|
||||
}
|
||||
c.value = uint64(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Int gets the value as an int, converted from the 40-bit unsigned integer.
|
||||
// Note: If the value exceeds the int range, it will be truncated.
|
||||
func (c *Uint40) Int() int { return int(c.value) }
|
||||
|
||||
// MarshalWrite encodes the 40-bit unsigned integer and writes it to the provided writer.
|
||||
// The encoding uses 5 bytes in BigEndian order.
|
||||
func (c *Uint40) MarshalWrite(w io.Writer) (err error) {
|
||||
if c.value > MaxUint40 {
|
||||
return errors.New("value exceeds 40-bit range")
|
||||
}
|
||||
// Buffer for the 5 bytes
|
||||
buf := make([]byte, 5)
|
||||
// Write the upper 5 bytes (ignoring the most significant 3 bytes of uint64)
|
||||
buf[0] = byte((c.value >> 32) & 0xFF) // Most significant byte
|
||||
buf[1] = byte((c.value >> 24) & 0xFF)
|
||||
buf[2] = byte((c.value >> 16) & 0xFF)
|
||||
buf[3] = byte((c.value >> 8) & 0xFF)
|
||||
buf[4] = byte(c.value & 0xFF) // Least significant byte
|
||||
_, err = w.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmarshalRead reads 5 bytes from the provided reader and decodes it into a 40-bit unsigned integer.
|
||||
func (c *Uint40) UnmarshalRead(r io.Reader) (err error) {
|
||||
// Buffer for the 5 bytes
|
||||
buf := make([]byte, 5)
|
||||
_, err = r.Read(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Decode the 5 bytes into a 40-bit unsigned integer
|
||||
c.value = (uint64(buf[0]) << 32) |
|
||||
(uint64(buf[1]) << 24) |
|
||||
(uint64(buf[2]) << 16) |
|
||||
(uint64(buf[3]) << 8) |
|
||||
uint64(buf[4])
|
||||
|
||||
return nil
|
||||
}
|
||||
68
database/indexes/types/number/uint40_test.go
Normal file
68
database/indexes/types/number/uint40_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestUint40(t *testing.T) {
|
||||
// Test cases for Uint40
|
||||
tests := []struct {
|
||||
name string
|
||||
value uint64
|
||||
expectedErr bool
|
||||
}{
|
||||
{"Minimum Value", 0, false},
|
||||
{"Maximum Value", MaxUint40, false},
|
||||
{"Value in Range", 109951162777, false}, // Example value within the range
|
||||
{"Value Exceeds Range", MaxUint40 + 1, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
codec := new(Uint40)
|
||||
|
||||
// Test SetUint40
|
||||
err := codec.SetUint40(tt.value)
|
||||
if tt.expectedErr {
|
||||
if err == nil {
|
||||
t.Errorf("expected error but got none")
|
||||
}
|
||||
return
|
||||
} else if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Test Uint40 getter
|
||||
if codec.Uint40() != tt.value {
|
||||
t.Errorf("Uint40 mismatch: got %d, expected %d", codec.Uint40(), tt.value)
|
||||
}
|
||||
|
||||
// Test MarshalWrite and UnmarshalRead
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
// Marshal to a buffer
|
||||
if err = codec.MarshalWrite(buf); err != nil {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
}
|
||||
|
||||
// Validate encoded size is 5 bytes
|
||||
encoded := buf.Bytes()
|
||||
if len(encoded) != 5 {
|
||||
t.Fatalf("encoded size mismatch: got %d bytes, expected 5 bytes", len(encoded))
|
||||
}
|
||||
|
||||
// Decode from the buffer
|
||||
decoded := new(Uint40)
|
||||
if err = decoded.UnmarshalRead(buf); err != nil {
|
||||
t.Fatalf("UnmarshalRead failed: %v", err)
|
||||
}
|
||||
|
||||
// Validate decoded value
|
||||
if decoded.Uint40() != tt.value {
|
||||
t.Errorf("Decoded value mismatch: got %d, expected %d", decoded.Uint40(), tt.value)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
42
database/indexes/types/number/uint64.go
Normal file
42
database/indexes/types/number/uint64.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Uint64 is a codec for encoding and decoding 64-bit unsigned integers.
|
||||
type Uint64 struct {
|
||||
value uint64
|
||||
}
|
||||
|
||||
// SetUint64 sets the value as a uint64.
|
||||
func (c *Uint64) SetUint64(value uint64) {
|
||||
c.value = value
|
||||
}
|
||||
|
||||
// Uint64 gets the value as a uint64.
|
||||
func (c *Uint64) Uint64() uint64 {
|
||||
return c.value
|
||||
}
|
||||
|
||||
// SetInt sets the value as an int, converting it to uint64.
|
||||
// Values outside the range of uint64 are truncated.
|
||||
func (c *Uint64) SetInt(value int) {
|
||||
c.value = uint64(value)
|
||||
}
|
||||
|
||||
// Int gets the value as an int, converted from uint64. May truncate if the value exceeds the range of int.
|
||||
func (c *Uint64) Int() int {
|
||||
return int(c.value)
|
||||
}
|
||||
|
||||
// MarshalWrite writes the uint64 value to the provided writer in BigEndian order.
|
||||
func (c *Uint64) MarshalWrite(w io.Writer) error {
|
||||
return binary.Write(w, binary.BigEndian, c.value)
|
||||
}
|
||||
|
||||
// UnmarshalRead reads a uint64 value from the provided reader in BigEndian order.
|
||||
func (c *Uint64) UnmarshalRead(r io.Reader) error {
|
||||
return binary.Read(r, binary.BigEndian, &c.value)
|
||||
}
|
||||
66
database/indexes/types/number/uint64_test.go
Normal file
66
database/indexes/types/number/uint64_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package number
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
)
|
||||
|
||||
func TestUint64(t *testing.T) {
|
||||
// Helper function to generate random 64-bit integers
|
||||
generateRandomUint64 := func() uint64 {
|
||||
return frand.Uint64n(math.MaxUint64) // math.MaxUint64 == 18446744073709551615
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ { // Run test 100 times for random values
|
||||
// Generate a random value
|
||||
randomUint64 := generateRandomUint64()
|
||||
randomInt := int(randomUint64)
|
||||
|
||||
// Create a new codec
|
||||
codec := new(Uint64)
|
||||
|
||||
// Test UInt64 setter and getter
|
||||
codec.SetUint64(randomUint64)
|
||||
if codec.Uint64() != randomUint64 {
|
||||
t.Fatalf("Uint64 mismatch: got %d, expected %d", codec.Uint64(), randomUint64)
|
||||
}
|
||||
|
||||
// Test Int setter and getter
|
||||
codec.SetInt(randomInt)
|
||||
if codec.Int() != randomInt {
|
||||
t.Fatalf("Int mismatch: got %d, expected %d", codec.Int(), randomInt)
|
||||
}
|
||||
|
||||
// Test encoding to []byte and decoding back
|
||||
bufEnc := new(bytes.Buffer)
|
||||
|
||||
// MarshalWrite
|
||||
err := codec.MarshalWrite(bufEnc)
|
||||
if err != nil {
|
||||
t.Fatalf("MarshalWrite failed: %v", err)
|
||||
}
|
||||
encoded := bufEnc.Bytes()
|
||||
|
||||
// Create a buffer for decoding
|
||||
bufDec := bytes.NewBuffer(encoded)
|
||||
|
||||
// Decode back the value
|
||||
decoded := new(Uint64)
|
||||
err = decoded.UnmarshalRead(bufDec)
|
||||
if err != nil {
|
||||
t.Fatalf("UnmarshalRead failed: %v", err)
|
||||
}
|
||||
|
||||
if decoded.Uint64() != randomUint64 {
|
||||
t.Fatalf("Decoded value mismatch: got %d, expected %d", decoded.Uint64(), randomUint64)
|
||||
}
|
||||
|
||||
// Compare encoded bytes to ensure correctness
|
||||
if !bytes.Equal(encoded, bufEnc.Bytes()) {
|
||||
t.Fatalf("Byte encoding mismatch: got %v, expected %v", bufEnc.Bytes(), encoded)
|
||||
}
|
||||
}
|
||||
}
|
||||
33
database/indexes/types/prefix/prefix.go
Normal file
33
database/indexes/types/prefix/prefix.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package prefix
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"x.realy.lol/database/indexes/prefixes"
|
||||
)
|
||||
|
||||
const Len = 2
|
||||
|
||||
type T struct {
|
||||
val []byte
|
||||
}
|
||||
|
||||
func New(prf ...int) (p *T) {
|
||||
if len(prf) > 0 {
|
||||
return &T{[]byte(prefixes.Prefix(prf[0]))}
|
||||
} else {
|
||||
return &T{[]byte{0, 0}}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *T) Bytes() (b []byte) { return p.val }
|
||||
|
||||
func (p *T) MarshalWrite(w io.Writer) (err error) {
|
||||
_, err = w.Write(p.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
_, err = r.Read(p.val)
|
||||
return
|
||||
}
|
||||
56
database/indexes/types/pubhash/pubhash.go
Normal file
56
database/indexes/types/pubhash/pubhash.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package pubhash
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/ec/schnorr"
|
||||
"x.realy.lol/errorf"
|
||||
"x.realy.lol/helpers"
|
||||
"x.realy.lol/hex"
|
||||
)
|
||||
|
||||
const Len = 8
|
||||
|
||||
type T struct{ val []byte }
|
||||
|
||||
func New() (ph *T) { return &T{make([]byte, Len)} }
|
||||
|
||||
func (ph *T) FromPubkey(pk []byte) (err error) {
|
||||
if len(pk) != schnorr.PubKeyBytesLen {
|
||||
err = errorf.E("invalid Pubkey length, got %d require %d", len(pk), schnorr.PubKeyBytesLen)
|
||||
return
|
||||
}
|
||||
ph.val = helpers.Hash(pk)[:Len]
|
||||
return
|
||||
}
|
||||
|
||||
func (ph *T) FromPubkeyHex(pk string) (err error) {
|
||||
if len(pk) != schnorr.PubKeyBytesLen*2 {
|
||||
err = errorf.E("invalid Pubkey length, got %d require %d", len(pk), schnorr.PubKeyBytesLen*2)
|
||||
return
|
||||
}
|
||||
var pkb []byte
|
||||
if pkb, err = hex.Dec(pk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ph.val = helpers.Hash(pkb)[:Len]
|
||||
return
|
||||
}
|
||||
|
||||
func (ph *T) Bytes() (b []byte) { return ph.val }
|
||||
|
||||
func (ph *T) MarshalWrite(w io.Writer) (err error) {
|
||||
_, err = w.Write(ph.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (ph *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
if len(ph.val) < Len {
|
||||
ph.val = make([]byte, Len)
|
||||
} else {
|
||||
ph.val = ph.val[:Len]
|
||||
}
|
||||
_, err = r.Read(ph.val)
|
||||
return
|
||||
}
|
||||
56
database/indexes/types/timestamp/timestamp.go
Normal file
56
database/indexes/types/timestamp/timestamp.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/database/indexes/types/varint"
|
||||
timeStamp "x.realy.lol/timestamp"
|
||||
)
|
||||
|
||||
const Len = 8
|
||||
|
||||
type T struct{ val int }
|
||||
|
||||
func (ts *T) FromInt(t int) { ts.val = t }
|
||||
func (ts *T) FromInt64(t int64) { ts.val = int(t) }
|
||||
|
||||
func FromBytes(timestampBytes []byte) (ts *T, err error) {
|
||||
v := varint.New()
|
||||
if err = v.UnmarshalRead(bytes.NewBuffer(timestampBytes)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ts = &T{val: v.ToInt()}
|
||||
return
|
||||
}
|
||||
|
||||
func (ts *T) ToTimestamp() (timestamp timeStamp.Timestamp) {
|
||||
return
|
||||
}
|
||||
func (ts *T) Bytes() (b []byte, err error) {
|
||||
v := varint.New()
|
||||
buf := new(bytes.Buffer)
|
||||
if err = v.MarshalWrite(buf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
b = buf.Bytes()
|
||||
return
|
||||
}
|
||||
|
||||
func (ts *T) MarshalWrite(w io.Writer) (err error) {
|
||||
v := varint.New()
|
||||
if err = v.MarshalWrite(w); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ts *T) UnmarshalRead(r io.Reader) (err error) {
|
||||
v := varint.New()
|
||||
if err = v.UnmarshalRead(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ts.val = v.ToInt()
|
||||
return
|
||||
}
|
||||
94
database/indexes/types/varint/varint.go
Normal file
94
database/indexes/types/varint/varint.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package varint
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/varint"
|
||||
)
|
||||
|
||||
type V struct{ val uint64 }
|
||||
|
||||
type S []*V
|
||||
|
||||
func New() (s *V) { return &V{} }
|
||||
|
||||
func (vi *V) FromUint64(ser uint64) {
|
||||
vi.val = ser
|
||||
return
|
||||
}
|
||||
|
||||
func FromBytes(ser []byte) (s *V, err error) {
|
||||
s = &V{}
|
||||
if s.val, err = varint.Decode(bytes.NewBuffer(ser)); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (vi *V) ToUint64() (ser uint64) { return vi.val }
|
||||
|
||||
func (vi *V) ToInt() (ser int) { return int(vi.val) }
|
||||
|
||||
func (vi *V) ToUint32() (v uint32) { return uint32(vi.val) }
|
||||
|
||||
func (vi *V) Bytes() (b []byte) {
|
||||
buf := new(bytes.Buffer)
|
||||
varint.Encode(buf, vi.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (vi *V) MarshalWrite(w io.Writer) (err error) {
|
||||
varint.Encode(w, vi.val)
|
||||
return
|
||||
}
|
||||
|
||||
func (vi *V) UnmarshalRead(r io.Reader) (err error) {
|
||||
vi.val, err = varint.Decode(r)
|
||||
return
|
||||
}
|
||||
|
||||
// DeduplicateInOrder removes duplicates from a slice of V.
|
||||
func DeduplicateInOrder(s S) (v S) {
|
||||
// for larger slices, this uses a lot less memory, at the cost of slower execution.
|
||||
if len(s) > 10000 {
|
||||
skip:
|
||||
for i, sa := range s {
|
||||
for j, sb := range s {
|
||||
if i != j && sa.val == sb.val {
|
||||
continue skip
|
||||
}
|
||||
}
|
||||
v = append(v, sa)
|
||||
}
|
||||
} else {
|
||||
// for small slices, this is faster but uses more memory.
|
||||
seen := map[uint64]*V{}
|
||||
for _, val := range s {
|
||||
if _, ok := seen[val.val]; !ok {
|
||||
v = append(v, val)
|
||||
seen[val.val] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Intersect deduplicates and performs a set intersection on two slices.
|
||||
func Intersect(a, b []*V) (sers []*V) {
|
||||
// first deduplicate to eliminate unnecessary iterations
|
||||
a = DeduplicateInOrder(a)
|
||||
b = DeduplicateInOrder(b)
|
||||
for _, as := range a {
|
||||
for _, bs := range b {
|
||||
if as.val == bs.val {
|
||||
// if the match is found, add to the result and move to the next candidate from
|
||||
// the "a" serial list.
|
||||
sers = append(sers, as)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
69
database/log.go
Normal file
69
database/log.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"x.realy.lol/atomic"
|
||||
"x.realy.lol/log"
|
||||
"x.realy.lol/lol"
|
||||
)
|
||||
|
||||
// NewLogger creates a new badger logger.
|
||||
func NewLogger(logLevel int, label string) (l *logger) {
|
||||
log.T.Ln("getting logger for", label)
|
||||
l = &logger{Label: label}
|
||||
l.Level.Store(int32(logLevel))
|
||||
return
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
Level atomic.Int32
|
||||
Label string
|
||||
}
|
||||
|
||||
// SetLogLevel atomically adjusts the log level to the given log level code.
|
||||
func (l *logger) SetLogLevel(level int) {
|
||||
l.Level.Store(int32(level))
|
||||
}
|
||||
|
||||
// Errorf is a log printer for this level of message.
|
||||
func (l *logger) Errorf(s string, i ...interface{}) {
|
||||
if l.Level.Load() >= lol.Error {
|
||||
s = l.Label + ": " + s
|
||||
txt := fmt.Sprintf(s, i...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
log.E.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
|
||||
}
|
||||
}
|
||||
|
||||
// Warningf is a log printer for this level of message.
|
||||
func (l *logger) Warningf(s string, i ...interface{}) {
|
||||
if l.Level.Load() >= lol.Warn {
|
||||
s = l.Label + ": " + s
|
||||
txt := fmt.Sprintf(s, i...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
log.W.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
|
||||
}
|
||||
}
|
||||
|
||||
// Infof is a log printer for this level of message.
|
||||
func (l *logger) Infof(s string, i ...interface{}) {
|
||||
if l.Level.Load() >= lol.Info {
|
||||
s = l.Label + ": " + s
|
||||
txt := fmt.Sprintf(s, i...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
log.I.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
|
||||
}
|
||||
}
|
||||
|
||||
// Debugf is a log printer for this level of message.
|
||||
func (l *logger) Debugf(s string, i ...interface{}) {
|
||||
if l.Level.Load() >= lol.Debug {
|
||||
s = l.Label + ": " + s
|
||||
txt := fmt.Sprintf(s, i...)
|
||||
_, file, line, _ := runtime.Caller(2)
|
||||
log.D.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
|
||||
}
|
||||
}
|
||||
90
database/main.go
Normal file
90
database/main.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/log"
|
||||
"x.realy.lol/units"
|
||||
)
|
||||
|
||||
type D struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelCauseFunc
|
||||
dataDir string
|
||||
BlockCacheSize int
|
||||
Logger *logger
|
||||
InitLogLevel int
|
||||
// DB is the badger db
|
||||
*badger.DB
|
||||
// seq is the monotonic collision free index for raw event storage.
|
||||
seq *badger.Sequence
|
||||
}
|
||||
|
||||
func New() (d *D) {
|
||||
ctx, cancel := context.WithCancelCause(context.Background())
|
||||
d = &D{BlockCacheSize: units.Gb, ctx: ctx, cancel: cancel}
|
||||
return
|
||||
}
|
||||
|
||||
// Path returns the path where the database files are stored.
|
||||
func (d *D) Path() string { return d.dataDir }
|
||||
|
||||
// Init sets up the database with the loaded configuration.
|
||||
func (d *D) Init(path string) (err error) {
|
||||
d.dataDir = path
|
||||
log.I.Ln("opening realy database at", d.dataDir)
|
||||
opts := badger.DefaultOptions(d.dataDir)
|
||||
opts.BlockCacheSize = int64(d.BlockCacheSize)
|
||||
opts.BlockSize = units.Gb
|
||||
opts.CompactL0OnClose = true
|
||||
opts.LmaxCompaction = true
|
||||
d.Logger = NewLogger(d.InitLogLevel, d.dataDir)
|
||||
opts.Logger = d.Logger
|
||||
if d.DB, err = badger.Open(opts); chk.E(err) {
|
||||
return err
|
||||
}
|
||||
log.I.Ln("getting event store sequence index", d.dataDir)
|
||||
if d.seq, err = d.DB.GetSequence([]byte("events"), 1000); chk.E(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (d *D) Close() (err error) { return d.DB.Close() }
|
||||
|
||||
// Serial returns the next monotonic conflict free unique serial on the database.
|
||||
func (d *D) Serial() (ser uint64, err error) {
|
||||
if ser, err = d.seq.Next(); chk.E(err) {
|
||||
}
|
||||
// log.T.ToSliceOfBytes("serial %x", ser)
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) View(fn func(txn *badger.Txn) (err error)) (err error) {
|
||||
if err = d.DB.View(fn); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
func (d *D) Update(fn func(txn *badger.Txn) (err error)) (err error) {
|
||||
if err = d.DB.Update(fn); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) Set(k, v []byte) (err error) {
|
||||
if err = d.Update(func(txn *badger.Txn) (err error) {
|
||||
if err = txn.Set(k, v); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
70
database/store-find_test.go
Normal file
70
database/store-find_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/event"
|
||||
"x.realy.lol/interrupt"
|
||||
"x.realy.lol/log"
|
||||
)
|
||||
|
||||
func TestD_StoreEvent(t *testing.T) {
|
||||
var err error
|
||||
d := New()
|
||||
tmpDir := filepath.Join(os.TempDir(), "testrealy")
|
||||
os.RemoveAll(tmpDir)
|
||||
if err = d.Init(tmpDir); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
buf := bytes.NewBuffer(ExampleEvents)
|
||||
scan := bufio.NewScanner(buf)
|
||||
scan.Buffer(make([]byte, 5120000), 5120000)
|
||||
var count, errs int
|
||||
var evIds [][]byte
|
||||
interrupt.AddHandler(func() {
|
||||
d.Close()
|
||||
os.RemoveAll(tmpDir)
|
||||
})
|
||||
for scan.Scan() {
|
||||
b := scan.Bytes()
|
||||
ev := event.New()
|
||||
if err = ev.Unmarshal(b); chk.E(err) {
|
||||
t.Fatalf("%s:\n%s", err, b)
|
||||
}
|
||||
// verify the signature on the event
|
||||
var ok bool
|
||||
if ok, err = ev.Verify(); chk.E(err) {
|
||||
errs++
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
errs++
|
||||
log.E.F("event signature is invalid\n%s", b)
|
||||
continue
|
||||
}
|
||||
count++
|
||||
if count%1000 == 0 {
|
||||
log.I.F("unmarshaled %d events", count)
|
||||
break
|
||||
}
|
||||
if err = d.StoreEvent(ev); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
evIds = append(evIds, ev.GetIdBytes())
|
||||
}
|
||||
log.I.F("completed unmarshalling %d events", count)
|
||||
for _, v := range evIds {
|
||||
var ev *event.E
|
||||
if ev, err = d.GetEventById(v); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_ = ev
|
||||
}
|
||||
log.I.F("stored and retrieved %d events", len(evIds))
|
||||
return
|
||||
}
|
||||
85
database/store.go
Normal file
85
database/store.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"time"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/database/indexes"
|
||||
"x.realy.lol/database/indexes/types/timestamp"
|
||||
"x.realy.lol/database/indexes/types/varint"
|
||||
"x.realy.lol/errorf"
|
||||
"x.realy.lol/event"
|
||||
)
|
||||
|
||||
func (d *D) StoreEvent(ev *event.E) (err error) {
|
||||
var ev2 *event.E
|
||||
if ev2, err = d.GetEventById(ev.GetIdBytes()); err != nil {
|
||||
// so we didn't find it?
|
||||
}
|
||||
if ev2 != nil {
|
||||
// we did found it
|
||||
if ev.Id == ev2.Id {
|
||||
err = errorf.E("duplicate event")
|
||||
return
|
||||
}
|
||||
}
|
||||
var ser *varint.V
|
||||
var idxs [][]byte
|
||||
if idxs, ser, err = d.GetEventIndexes(ev); chk.E(err) {
|
||||
return
|
||||
}
|
||||
_ = idxs
|
||||
evK := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(evK); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ts := ×tamp.T{}
|
||||
ts.FromInt64(time.Now().Unix())
|
||||
// FirstSeen
|
||||
fsI := new(bytes.Buffer)
|
||||
if err = indexes.FirstSeenEnc(ser, ts).MarshalWrite(fsI); chk.E(err) {
|
||||
return
|
||||
}
|
||||
idxs = append(idxs, fsI.Bytes())
|
||||
// write indexes; none of the above have values.
|
||||
for _, v := range idxs {
|
||||
if err = d.Set(v, nil); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
// LastAccessed
|
||||
laI := new(bytes.Buffer)
|
||||
if err = indexes.LastAccessedEnc(ser).MarshalWrite(laI); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var tsb []byte
|
||||
if tsb, err = ts.Bytes(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = d.Set(laI.Bytes(), tsb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// AccessCounter
|
||||
acI := new(bytes.Buffer)
|
||||
if err = indexes.AccessCounterEnc(ser).MarshalWrite(acI); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ac := varint.New()
|
||||
if err = d.Set(acI.Bytes(), ac.Bytes()); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// lastly, the event
|
||||
evk := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(evk); chk.E(err) {
|
||||
return
|
||||
}
|
||||
evV := new(bytes.Buffer)
|
||||
if err = ev.MarshalWrite(evV); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if err = d.Set(evk.Bytes(), evV.Bytes()); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
167
event/binary.go
Normal file
167
event/binary.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/ec/schnorr"
|
||||
"x.realy.lol/hex"
|
||||
"x.realy.lol/timestamp"
|
||||
"x.realy.lol/varint"
|
||||
)
|
||||
|
||||
// todo: maybe we should make e and p tag values binary to reduce space usage
|
||||
|
||||
// MarshalWrite writes a binary encoding of an event.
|
||||
//
|
||||
// NOTE: Event must not be nil or this will panic. Use event.New.
|
||||
//
|
||||
// [ 32 bytes Id ]
|
||||
// [ 32 bytes Pubkey ]
|
||||
// [ varint CreatedAt ]
|
||||
// [ 2 bytes Kind ]
|
||||
// [ varint Tags length ]
|
||||
//
|
||||
// [ varint tag length ]
|
||||
// [ varint tag element length ]
|
||||
// [ tag element data ]
|
||||
// ...
|
||||
//
|
||||
// [ varint Content length ]
|
||||
// [ 64 bytes Sig ]
|
||||
func (ev *E) MarshalWrite(w io.Writer) (err error) {
|
||||
if ev == nil {
|
||||
panic("cannot marshal a nil event")
|
||||
}
|
||||
_, _ = w.Write(ev.GetIdBytes())
|
||||
_, _ = w.Write(ev.GetPubkeyBytes())
|
||||
varint.Encode(w, uint64(ev.CreatedAt))
|
||||
varint.Encode(w, uint64(ev.Kind))
|
||||
varint.Encode(w, uint64(len(ev.Tags)))
|
||||
for _, x := range ev.Tags {
|
||||
varint.Encode(w, uint64(len(x)))
|
||||
// e and p tag values should be hex
|
||||
var isBin bool
|
||||
if len(x) > 1 && (x[0] == "e" || x[0] == "p") {
|
||||
isBin = true
|
||||
}
|
||||
for i, y := range x {
|
||||
if i == 1 && isBin {
|
||||
var b []byte
|
||||
if b, err = hex.Dec(y); err != nil {
|
||||
b = []byte(y)
|
||||
// non-hex "p" or "e" tags have a 1 prefix to indicate not to hex decode.
|
||||
_, _ = w.Write([]byte{1})
|
||||
err = nil
|
||||
} else {
|
||||
if len(b) != 32 {
|
||||
// err = errorf.E("e or p tag value with invalid decoded byte length %d '%0x'", len(b), b)
|
||||
b = []byte(y)
|
||||
_, _ = w.Write([]byte{1})
|
||||
} else {
|
||||
// hex values have a 2 prefix
|
||||
_, _ = w.Write([]byte{2})
|
||||
}
|
||||
}
|
||||
varint.Encode(w, uint64(len(b)))
|
||||
_, _ = w.Write(b)
|
||||
} else {
|
||||
varint.Encode(w, uint64(len(y)))
|
||||
_, _ = w.Write([]byte(y))
|
||||
}
|
||||
}
|
||||
}
|
||||
varint.Encode(w, uint64(len(ev.Content)))
|
||||
_, _ = w.Write([]byte(ev.Content))
|
||||
_, _ = w.Write(ev.GetSigBytes())
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmarshalRead decodes an event in binary form into an allocated event struct.
|
||||
//
|
||||
// NOTE: Event must not be nil or this will panic. Use event.New.
|
||||
func (ev *E) UnmarshalRead(r io.Reader) (err error) {
|
||||
if ev == nil {
|
||||
panic("cannot unmarshal into nil event struct")
|
||||
}
|
||||
id := make([]byte, 32)
|
||||
if _, err = r.Read(id); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev.Id = hex.Enc(id)
|
||||
pubkey := make([]byte, 32)
|
||||
if _, err = r.Read(pubkey); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev.Pubkey = hex.Enc(pubkey)
|
||||
var ca uint64
|
||||
if ca, err = varint.Decode(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev.CreatedAt = timestamp.New(ca)
|
||||
var k uint64
|
||||
if k, err = varint.Decode(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev.Kind = int(k)
|
||||
var nTags uint64
|
||||
if nTags, err = varint.Decode(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
for range nTags {
|
||||
var nField uint64
|
||||
if nField, err = varint.Decode(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
var t []string
|
||||
var isBin bool
|
||||
for i := range nField {
|
||||
var pr byte
|
||||
if i == 1 && isBin {
|
||||
prf := make([]byte, 1)
|
||||
if _, err = r.Read(prf); chk.E(err) {
|
||||
return
|
||||
}
|
||||
pr = prf[0]
|
||||
}
|
||||
var lenField uint64
|
||||
if lenField, err = varint.Decode(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
field := make([]byte, lenField)
|
||||
if _, err = r.Read(field); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// if it is first field, length 1 and is e or p, the value field should be binary
|
||||
if i == 0 && len(field) == 1 && (field[0] == 'e' || field[0] == 'p') {
|
||||
isBin = true
|
||||
}
|
||||
if i == 1 && isBin {
|
||||
if pr == 2 {
|
||||
// this is a binary value, was an e or p tag key, 32 bytes long, encode
|
||||
// value field to hex
|
||||
f := make([]byte, 64)
|
||||
_ = hex.EncBytes(f, field)
|
||||
field = f
|
||||
}
|
||||
}
|
||||
t = append(t, string(field))
|
||||
}
|
||||
ev.Tags = append(ev.Tags, t)
|
||||
}
|
||||
var cLen uint64
|
||||
if cLen, err = varint.Decode(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
content := make([]byte, cLen)
|
||||
if _, err = r.Read(content); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev.Content = string(content)
|
||||
sig := make([]byte, schnorr.SignatureSize)
|
||||
if _, err = r.Read(sig); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev.Sig = hex.Enc(sig)
|
||||
return
|
||||
}
|
||||
43
event/binary_test.go
Normal file
43
event/binary_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/event/examples"
|
||||
)
|
||||
|
||||
func TestTMarshalBinary_UnmarshalBinary(t *testing.T) {
|
||||
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
|
||||
var rem, out []byte
|
||||
var err error
|
||||
buf := new(bytes.Buffer)
|
||||
ea, eb := New(), New()
|
||||
now := time.Now()
|
||||
var counter int
|
||||
for scanner.Scan() {
|
||||
b := scanner.Bytes()
|
||||
c := make([]byte, 0, len(b))
|
||||
c = append(c, b...)
|
||||
if err = ea.Unmarshal(c); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(rem) != 0 {
|
||||
t.Fatalf("some of input remaining after marshal/unmarshal: '%s'",
|
||||
rem)
|
||||
}
|
||||
ea.MarshalWrite(buf)
|
||||
buf2 := bytes.NewBuffer(buf.Bytes())
|
||||
if err = eb.UnmarshalRead(buf2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
counter++
|
||||
out = out[:0]
|
||||
}
|
||||
t.Logf("unmarshaled json, marshaled binary, unmarshaled binary, "+
|
||||
"%d events in %v av %v per event",
|
||||
counter, time.Since(now), time.Since(now)/time.Duration(counter))
|
||||
}
|
||||
@@ -6,37 +6,69 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/errorf"
|
||||
"x.realy.lol/helpers"
|
||||
"x.realy.lol/hex"
|
||||
"x.realy.lol/log"
|
||||
"x.realy.lol/p256k"
|
||||
"x.realy.lol/signer"
|
||||
"x.realy.lol/tags"
|
||||
"x.realy.lol/text"
|
||||
"x.realy.lol/timestamp"
|
||||
)
|
||||
|
||||
type E struct {
|
||||
Id string `json:"id"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
Kind uint16 `json:"kind`
|
||||
Tags [][]string `json:"tags"`
|
||||
Content string `json:"content"`
|
||||
Sig string `json:"sig"`
|
||||
Id string `json:"id"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
CreatedAt timestamp.Timestamp `json:"created_at"`
|
||||
Kind int `json:"kind"`
|
||||
Tags tags.Tags `json:"tags"`
|
||||
Content string `json:"content"`
|
||||
Sig string `json:"sig"`
|
||||
}
|
||||
|
||||
func New() (ev *E) { return &E{} }
|
||||
|
||||
func (ev *E) Marshal() (b []byte, err error) {
|
||||
if b, err = json.Marshal(ev); chk.E(err) {
|
||||
func (ev *E) IdBytes() (idBytes []byte, err error) {
|
||||
if idBytes, err = hex.Dec(ev.Id); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ev *E) PubBytes() (pubBytes []byte, err error) {
|
||||
if pubBytes, err = hex.Dec(ev.Pubkey); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ev *E) SigBytes() (sigBytes []byte, err error) {
|
||||
if sigBytes, err = hex.Dec(ev.Sig); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ev *E) Marshal() (b []byte, err error) {
|
||||
if ev == nil {
|
||||
panic("cannot marshal a nil event")
|
||||
}
|
||||
if b, err = json.Marshal(ev); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// there is a problem with some specific characters here
|
||||
b = bytes.ReplaceAll(b, []byte("\\u0026"), []byte("&"))
|
||||
return
|
||||
}
|
||||
|
||||
func (ev *E) Unmarshal(b []byte) (err error) {
|
||||
if ev == nil {
|
||||
panic("cannot unmarshal into a nil event")
|
||||
}
|
||||
// there is a problem with some specific characters here
|
||||
b = bytes.ReplaceAll(b, []byte("\\u0026"), []byte("&"))
|
||||
if err = json.Unmarshal(b, ev); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -47,9 +79,14 @@ func (ev *E) Unmarshal(b []byte) (err error) {
|
||||
// logging.
|
||||
func (ev *E) Serialize() (b []byte) {
|
||||
var err error
|
||||
if len(ev.Tags) == 1 && len(ev.Tags[0]) == 1 {
|
||||
ev.Tags = ev.Tags[:0]
|
||||
}
|
||||
if b, err = json.Marshal(ev); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// there is a problem with some specific characters here
|
||||
b = bytes.ReplaceAll(b, []byte("\\u0026"), []byte("&"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -79,7 +116,7 @@ func (ev *E) Verify() (valid bool, err error) {
|
||||
// check that this isn't because of a bogus Id
|
||||
id := ev.GenIdBytes()
|
||||
if !bytes.Equal(id, ev.GetIdBytes()) {
|
||||
log.E.Ln("event Id incorrect")
|
||||
log.E.F("event Id incorrect\n%s\n%s", ev.Serialize(), ev.ToCanonical(nil))
|
||||
ev.Id = hex.Enc(id)
|
||||
err = nil
|
||||
if valid, err = keys.Verify(ev.GetIdBytes(), ev.GetSigBytes()); chk.E(err) {
|
||||
@@ -114,7 +151,7 @@ func (ev *E) ToCanonical(dst []byte) (b []byte) {
|
||||
func (ev *E) GenIdBytes() (b []byte) {
|
||||
var can []byte
|
||||
can = ev.ToCanonical(can)
|
||||
return Hash(can)
|
||||
return helpers.Hash(can)
|
||||
}
|
||||
|
||||
func (ev *E) GetIdBytes() (i []byte) {
|
||||
@@ -143,7 +180,7 @@ func (ev *E) GetPubkeyBytes() (p []byte) {
|
||||
|
||||
func (ev *E) IdHex() (idHex string) {
|
||||
can := ev.ToCanonical(nil)
|
||||
idHex = hex.Enc(Hash(can))
|
||||
idHex = hex.Enc(helpers.Hash(can))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -152,13 +189,6 @@ func (ev *E) CheckId() (ok bool) {
|
||||
return idHex == ev.Id
|
||||
}
|
||||
|
||||
// Hash is a little helper generate a hash and return a slice instead of an
|
||||
// array.
|
||||
func Hash(in []byte) (out []byte) {
|
||||
h := sha256.Sum256(in)
|
||||
return h[:]
|
||||
}
|
||||
|
||||
// this is an absolute minimum length canonical encoded event
|
||||
var minimal = len(`[0,"0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef",1733739427,0,[],""]`)
|
||||
|
||||
@@ -189,13 +219,13 @@ func (ev *E) FromCanonical(b []byte) (err error) {
|
||||
err = errorf.E("failed to get created_at value, got type %v expected float64", reflect.TypeOf(un[2]))
|
||||
return
|
||||
}
|
||||
ev.CreatedAt = int64(createdAt)
|
||||
ev.CreatedAt = timestamp.New(createdAt)
|
||||
var kind float64
|
||||
if kind, ok = un[3].(float64); !ok {
|
||||
err = errorf.E("failed to get kind value, got type %v expected float64", reflect.TypeOf(un[3]))
|
||||
return
|
||||
}
|
||||
ev.Kind = uint16(kind)
|
||||
ev.Kind = int(kind)
|
||||
var tags []any
|
||||
if tags, ok = un[4].([]any); !ok {
|
||||
err = errorf.E("failed to get tags value, got type %v expected []interface", reflect.TypeOf(un[4]))
|
||||
@@ -211,8 +241,8 @@ func (ev *E) FromCanonical(b []byte) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func FromSliceInterface(in []any) (tags [][]string, err error) {
|
||||
tags = make([][]string, 0)
|
||||
func FromSliceInterface(in []any) (t tags.Tags, err error) {
|
||||
t = make(tags.Tags, 0)
|
||||
for _, v := range in {
|
||||
var ok bool
|
||||
var vv []any
|
||||
@@ -229,7 +259,7 @@ func FromSliceInterface(in []any) (tags [][]string, err error) {
|
||||
}
|
||||
tag = append(tag, x)
|
||||
}
|
||||
tags = append(tags, tag)
|
||||
t = append(t, tag)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
213
filter/filter.go
Normal file
213
filter/filter.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
|
||||
"x.realy.lol/event"
|
||||
"x.realy.lol/helpers"
|
||||
"x.realy.lol/kind"
|
||||
"x.realy.lol/timestamp"
|
||||
)
|
||||
|
||||
type S []F
|
||||
|
||||
type F struct {
|
||||
Ids []string
|
||||
Kinds []int
|
||||
Authors []string
|
||||
Tags TagMap
|
||||
Since *timestamp.Timestamp
|
||||
Until *timestamp.Timestamp
|
||||
Limit *int
|
||||
Search string
|
||||
}
|
||||
|
||||
type TagMap map[string][]string
|
||||
|
||||
func (eff S) String() string {
|
||||
j, _ := json.Marshal(eff)
|
||||
return string(j)
|
||||
}
|
||||
|
||||
func (eff S) Match(event *event.E) bool {
|
||||
for _, filter := range eff {
|
||||
if filter.Matches(event) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (eff S) MatchIgnoringTimestampConstraints(event *event.E) bool {
|
||||
for _, filter := range eff {
|
||||
if filter.MatchesIgnoringTimestampConstraints(event) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ef F) String() string {
|
||||
j, _ := json.Marshal(ef)
|
||||
return string(j)
|
||||
}
|
||||
|
||||
func (ef F) Matches(event *event.E) bool {
|
||||
if !ef.MatchesIgnoringTimestampConstraints(event) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ef.Since != nil && event.CreatedAt < *ef.Since {
|
||||
return false
|
||||
}
|
||||
|
||||
if ef.Until != nil && event.CreatedAt > *ef.Until {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (ef F) MatchesIgnoringTimestampConstraints(event *event.E) bool {
|
||||
if event == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if ef.Ids != nil && !slices.Contains(ef.Ids, event.Id) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ef.Kinds != nil && !slices.Contains(ef.Kinds, event.Kind) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ef.Authors != nil && !slices.Contains(ef.Authors, event.Pubkey) {
|
||||
return false
|
||||
}
|
||||
|
||||
for f, v := range ef.Tags {
|
||||
if v != nil && !event.Tags.ContainsAny(f, v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func FilterEqual(a F, b F) bool {
|
||||
if !helpers.Similar(a.Kinds, b.Kinds) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !helpers.Similar(a.Ids, b.Ids) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !helpers.Similar(a.Authors, b.Authors) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(a.Tags) != len(b.Tags) {
|
||||
return false
|
||||
}
|
||||
|
||||
for f, av := range a.Tags {
|
||||
if bv, ok := b.Tags[f]; !ok {
|
||||
return false
|
||||
} else {
|
||||
if !helpers.Similar(av, bv) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !helpers.ArePointerValuesEqual(a.Since, b.Since) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !helpers.ArePointerValuesEqual(a.Until, b.Until) {
|
||||
return false
|
||||
}
|
||||
|
||||
if a.Search != b.Search {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (ef F) Clone() F {
|
||||
clone := F{
|
||||
Ids: slices.Clone(ef.Ids),
|
||||
Authors: slices.Clone(ef.Authors),
|
||||
Kinds: slices.Clone(ef.Kinds),
|
||||
Limit: ef.Limit,
|
||||
Search: ef.Search,
|
||||
}
|
||||
|
||||
if ef.Tags != nil {
|
||||
clone.Tags = make(TagMap, len(ef.Tags))
|
||||
for k, v := range ef.Tags {
|
||||
clone.Tags[k] = slices.Clone(v)
|
||||
}
|
||||
}
|
||||
|
||||
if ef.Since != nil {
|
||||
since := *ef.Since
|
||||
clone.Since = &since
|
||||
}
|
||||
|
||||
if ef.Until != nil {
|
||||
until := *ef.Until
|
||||
clone.Until = &until
|
||||
}
|
||||
|
||||
return clone
|
||||
}
|
||||
|
||||
// GetTheoreticalLimit gets the maximum number of events that a normal filter would ever return, for example, if
|
||||
// there is a number of "ids" in the filter, the theoretical limit will be that number of ids.
|
||||
//
|
||||
// It returns -1 if there are no theoretical limits.
|
||||
//
|
||||
// The given .Limit present in the filter is ignored.
|
||||
func GetTheoreticalLimit(filter F) int {
|
||||
if len(filter.Ids) > 0 {
|
||||
return len(filter.Ids)
|
||||
}
|
||||
|
||||
if len(filter.Kinds) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
if len(filter.Authors) > 0 {
|
||||
allAreReplaceable := true
|
||||
for _, k := range filter.Kinds {
|
||||
if !kind.IsReplaceableKind(k) {
|
||||
allAreReplaceable = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allAreReplaceable {
|
||||
return len(filter.Authors) * len(filter.Kinds)
|
||||
}
|
||||
|
||||
if len(filter.Tags["d"]) > 0 {
|
||||
allAreAddressable := true
|
||||
for _, k := range filter.Kinds {
|
||||
if !kind.IsAddressableKind(k) {
|
||||
allAreAddressable = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allAreAddressable {
|
||||
return len(filter.Authors) * len(filter.Kinds) * len(filter.Tags["d"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
func IntToPointer(i int) (ptr *int) { return &i }
|
||||
187
filter/filter_test.go
Normal file
187
filter/filter_test.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package filter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/event"
|
||||
"x.realy.lol/kind"
|
||||
"x.realy.lol/log"
|
||||
"x.realy.lol/timestamp"
|
||||
)
|
||||
|
||||
func TestFilterUnmarshal(t *testing.T) {
|
||||
raw := `{"ids": ["abc"],"#e":["zzz"],"#something":["nothing","bab"],"since":1644254609,"search":"test"}`
|
||||
var f F
|
||||
err := json.Unmarshal([]byte(raw), &f)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Condition(t, func() (success bool) {
|
||||
if f.Since == nil || f.Since.Time().UTC().Format("2006-01-02") != "2022-02-07" ||
|
||||
f.Until != nil ||
|
||||
f.Tags == nil || len(f.Tags) != 2 || !slices.Contains(f.Tags["something"], "bab") ||
|
||||
f.Search != "test" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, "failed to parse filter correctly")
|
||||
}
|
||||
|
||||
func TestFilterMarshal(t *testing.T) {
|
||||
until := timestamp.Timestamp(12345678)
|
||||
filterj, err := json.Marshal(F{
|
||||
Kinds: []int{kind.TextNote, kind.RecommendServer, kind.EncryptedDirectMessage},
|
||||
Tags: TagMap{"fruit": {"banana", "mango"}},
|
||||
Until: &until,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expected := `{"kinds":[1,2,4],"until":12345678,"#fruit":["banana","mango"]}`
|
||||
assert.Equal(t, expected, string(filterj))
|
||||
}
|
||||
|
||||
func TestFilterUnmarshalWithLimitZero(t *testing.T) {
|
||||
raw := `{"ids": ["abc"],"#e":["zzz"],"limit":0,"#something":["nothing","bab"],"since":1644254609,"search":"test"}`
|
||||
var f F
|
||||
err := json.Unmarshal([]byte(raw), &f)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Condition(t, func() (success bool) {
|
||||
if f.Since == nil ||
|
||||
f.Since.Time().UTC().Format("2006-01-02") != "2022-02-07" ||
|
||||
f.Until != nil ||
|
||||
f.Tags == nil || len(f.Tags) != 2 || !slices.Contains(f.Tags["something"], "bab") ||
|
||||
f.Search != "test" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}, "failed to parse filter correctly")
|
||||
}
|
||||
|
||||
func TestFilterMarshalWithLimitZero(t *testing.T) {
|
||||
until := timestamp.Timestamp(12345678)
|
||||
filterj, err := json.Marshal(F{
|
||||
Kinds: []int{kind.TextNote, kind.RecommendServer, kind.EncryptedDirectMessage},
|
||||
Tags: TagMap{"fruit": {"banana", "mango"}},
|
||||
Until: &until,
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
expected := `{"kinds":[1,2,4],"until":12345678,"limit":0,"#fruit":["banana","mango"]}`
|
||||
assert.Equal(t, expected, string(filterj))
|
||||
}
|
||||
|
||||
func TestFilterMatchingLive(t *testing.T) {
|
||||
var filter F
|
||||
var event event.E
|
||||
|
||||
json.Unmarshal([]byte(`{"kinds":[1],"authors":["a8171781fd9e90ede3ea44ddca5d3abf828fe8eedeb0f3abb0dd3e563562e1fc","1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59","ed4ca520e9929dfe9efdadf4011b53d30afd0678a09aa026927e60e7a45d9244"],"since":1677033299}`), &filter)
|
||||
json.Unmarshal([]byte(`{"id":"5a127c9c931f392f6afc7fdb74e8be01c34035314735a6b97d2cf360d13cfb94","pubkey":"1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59","created_at":1677033299,"kind":1,"tags":[["t","japan"]],"content":"If you like my art,I'd appreciate a coin or two!!\nZap is welcome!! Thanks.\n\n\n#japan #bitcoin #art #bananaart\nhttps://void.cat/d/CgM1bzDgHUCtiNNwfX9ajY.webp","sig":"828497508487ca1e374f6b4f2bba7487bc09fccd5cc0d1baa82846a944f8c5766918abf5878a580f1e6615de91f5b57a32e34c42ee2747c983aaf47dbf2a0255"}`), &event)
|
||||
|
||||
assert.True(t, filter.Matches(&event), "live filter should match")
|
||||
}
|
||||
|
||||
func TestFilterEquality(t *testing.T) {
|
||||
assert.True(t, FilterEqual(
|
||||
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}},
|
||||
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}},
|
||||
), "kinds filters should be equal")
|
||||
|
||||
assert.True(t, FilterEqual(
|
||||
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}, Tags: TagMap{"letter": {"a", "b"}}},
|
||||
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion}, Tags: TagMap{"letter": {"b", "a"}}},
|
||||
), "kind+tags filters should be equal")
|
||||
|
||||
tm := timestamp.Now()
|
||||
assert.True(t, FilterEqual(
|
||||
F{
|
||||
Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion},
|
||||
Tags: TagMap{"letter": {"a", "b"}, "fruit": {"banana"}},
|
||||
Since: &tm,
|
||||
Ids: []string{"aaaa", "bbbb"},
|
||||
},
|
||||
F{
|
||||
Kinds: []int{kind.Deletion, kind.EncryptedDirectMessage},
|
||||
Tags: TagMap{"letter": {"a", "b"}, "fruit": {"banana"}},
|
||||
Since: &tm,
|
||||
Ids: []string{"aaaa", "bbbb"},
|
||||
},
|
||||
), "kind+2tags+since+ids filters should be equal")
|
||||
|
||||
assert.False(t, FilterEqual(
|
||||
F{Kinds: []int{kind.TextNote, kind.EncryptedDirectMessage, kind.Deletion}},
|
||||
F{Kinds: []int{kind.EncryptedDirectMessage, kind.Deletion, kind.Repost}},
|
||||
), "kinds filters shouldn't be equal")
|
||||
}
|
||||
|
||||
func TestFilterClone(t *testing.T) {
|
||||
ts := timestamp.Now() - 60*60
|
||||
flt := F{
|
||||
Kinds: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
Tags: TagMap{"letter": {"a", "b"}, "fruit": {"banana"}},
|
||||
Since: &ts,
|
||||
Ids: []string{"9894b4b5cb5166d23ee8899a4151cf0c66aec00bde101982a13b8e8ceb972df9"},
|
||||
}
|
||||
clone := flt.Clone()
|
||||
assert.True(t, FilterEqual(flt, clone), "clone is not equal:\n %v !=\n %v", flt, clone)
|
||||
|
||||
clone1 := flt.Clone()
|
||||
clone1.Ids = append(clone1.Ids, "88f0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d")
|
||||
assert.False(t, FilterEqual(flt, clone1), "modifying the clone ids should cause it to not be equal anymore")
|
||||
|
||||
clone2 := flt.Clone()
|
||||
clone2.Tags["letter"] = append(clone2.Tags["letter"], "c")
|
||||
assert.False(t, FilterEqual(flt, clone2), "modifying the clone tag items should cause it to not be equal anymore")
|
||||
|
||||
clone3 := flt.Clone()
|
||||
clone3.Tags["g"] = []string{"drt"}
|
||||
assert.False(t, FilterEqual(flt, clone3), "modifying the clone tag map should cause it to not be equal anymore")
|
||||
|
||||
clone4 := flt.Clone()
|
||||
*clone4.Since++
|
||||
assert.False(t, FilterEqual(flt, clone4), "modifying the clone since should cause it to not be equal anymore")
|
||||
}
|
||||
|
||||
func TestTheoreticalLimit(t *testing.T) {
|
||||
require.Equal(t, 6, GetTheoreticalLimit(F{Ids: []string{"a", "b", "c", "d", "e", "f"}}))
|
||||
require.Equal(t, 9, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c"}, Kinds: []int{3, 0, 10002}}))
|
||||
require.Equal(t, 4, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c", "d"}, Kinds: []int{10050}}))
|
||||
require.Equal(t, -1, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c", "d"}}))
|
||||
require.Equal(t, -1, GetTheoreticalLimit(F{Kinds: []int{3, 0, 10002}}))
|
||||
require.Equal(t, 24, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c", "d", "e", "f"}, Kinds: []int{30023, 30024}, Tags: TagMap{"d": []string{"aaa", "bbb"}}}))
|
||||
require.Equal(t, -1, GetTheoreticalLimit(F{Authors: []string{"a", "b", "c", "d", "e", "f"}, Kinds: []int{30023, 30024}}))
|
||||
}
|
||||
|
||||
func TestFilter(t *testing.T) {
|
||||
ts := timestamp.Now() - 60*60
|
||||
now := timestamp.Now()
|
||||
flt := &F{
|
||||
Authors: []string{"1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59"},
|
||||
Kinds: []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
|
||||
Tags: TagMap{
|
||||
"#t": {"a", "b"},
|
||||
"#e": {"9894b4b5cb5166d23ee8899a4151cf0c66aec00bde101982a13b8e8ceb972df9"},
|
||||
"#p": {"1d80e5588de010d137a67c42b03717595f5f510e73e42cfc48f31bae91844d59"},
|
||||
},
|
||||
Until: &now,
|
||||
Since: &ts,
|
||||
Ids: []string{"9894b4b5cb5166d23ee8899a4151cf0c66aec00bde101982a13b8e8ceb972df9"},
|
||||
// Limit: IntToPointer(10),
|
||||
}
|
||||
var err error
|
||||
var b []byte
|
||||
if b, err = json.Marshal(flt); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
log.I.F("%s", b)
|
||||
var f2 F
|
||||
if err = json.Unmarshal(b, &f2); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
log.I.S(f2)
|
||||
}
|
||||
29
go.mod
29
go.mod
@@ -4,33 +4,52 @@ go 1.24.3
|
||||
|
||||
require (
|
||||
github.com/alexflint/go-arg v1.5.1
|
||||
github.com/clipperhouse/uax29 v1.14.3
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/dgraph-io/badger/v4 v4.7.0
|
||||
github.com/fatih/color v1.18.0
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
github.com/mailru/easyjson v0.9.0
|
||||
github.com/minio/sha256-simd v1.0.1
|
||||
github.com/pkg/profile v1.7.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
|
||||
go-simpler.org/env v0.12.0
|
||||
golang.org/x/exp v0.0.0-20250530174510-65e920069ea6
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067
|
||||
honnef.co/go/tools v0.6.1
|
||||
lukechampine.com/frand v1.5.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/alexflint/go-scalar v1.2.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/fgprof v0.9.5 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/flatbuffers v25.2.10+incompatible // indirect
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/templexxx/cpu v0.1.1 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 // indirect
|
||||
golang.org/x/mod v0.23.0 // indirect
|
||||
golang.org/x/sync v0.11.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20250530174510-65e920069ea6 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/sync v0.14.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/tools v0.30.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
73
go.sum
73
go.sum
@@ -1,9 +1,11 @@
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/kWtOwnv/G+AzdKuy2ZrqINhenH4HyNs=
|
||||
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/alexflint/go-arg v1.5.1 h1:nBuWUCpuRy0snAG+uIJ6N0UvYxpxA0/ghA/AaHxlT8Y=
|
||||
github.com/alexflint/go-arg v1.5.1/go.mod h1:A7vTJzvjoaSTypg4biM5uYNTkJ27SkNTArtYXnlqVO8=
|
||||
github.com/alexflint/go-scalar v1.2.0 h1:WR7JPKkeNpnYIOfHRa7ivM21aWAdHD0gEWHCx+WQBRw=
|
||||
github.com/alexflint/go-scalar v1.2.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
|
||||
github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs=
|
||||
github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww=
|
||||
@@ -13,32 +15,58 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/clipperhouse/uax29 v1.14.3 h1:pJ0hZWycgsBrJ8SSsvPCrlMTpW8C+fdcA/0mehFDCU0=
|
||||
github.com/clipperhouse/uax29 v1.14.3/go.mod h1:paNABhygWmmjkg0ROxKQoenJAX4dM9AS8biVkXmAK0c=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/badger/v4 v4.7.0 h1:Q+J8HApYAY7UMpL8d9owqiB+odzEc0zn/aqOD9jhc6Y=
|
||||
github.com/dgraph-io/badger/v4 v4.7.0/go.mod h1:He7TzG3YBy3j4f5baj5B7Zl2XyfNe5bl4Udl0aPemVA=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38=
|
||||
github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
|
||||
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
|
||||
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
|
||||
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4=
|
||||
github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
|
||||
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
@@ -50,6 +78,8 @@ github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
|
||||
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@@ -64,20 +94,32 @@ github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3W
|
||||
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
|
||||
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
|
||||
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678 h1:1P7xPZEwZMoBoz0Yze5Nx2/4pxj6nw9ZqHWXqP0iRgQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20231108232855-2478ac86f678/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||
golang.org/x/exp v0.0.0-20250530174510-65e920069ea6 h1:gllJVKwONftmCc4KlNbN8o/LvmbxotqQy6zzi6yDQOQ=
|
||||
golang.org/x/exp v0.0.0-20250530174510-65e920069ea6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/exp/typeparams v0.0.0-20250530174510-65e920069ea6 h1:Gq937g8bNiCnWB/wsoyxuxnfDpAE9cpYo4sLIp9t0LA=
|
||||
golang.org/x/exp/typeparams v0.0.0-20250530174510-65e920069ea6/go.mod h1:LKZHyeOpPuZcMgxeHjJp4p5yvxrCX1xDvH10zYHhjjQ=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
|
||||
golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
|
||||
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -86,12 +128,17 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
|
||||
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
138
helpers/helpers.go
Normal file
138
helpers/helpers.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/minio/sha256-simd"
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
const MAX_LOCKS = 50
|
||||
|
||||
var (
|
||||
namedMutexPool = make([]sync.Mutex, MAX_LOCKS)
|
||||
)
|
||||
|
||||
//go:noescape
|
||||
//go:linkname memhash runtime.memhash
|
||||
func memhash(p unsafe.Pointer, h, s uintptr) uintptr
|
||||
|
||||
func NamedLock(name string) (unlock func()) {
|
||||
sptr := unsafe.StringData(name)
|
||||
idx := uint64(memhash(unsafe.Pointer(sptr), 0, uintptr(len(name)))) % MAX_LOCKS
|
||||
namedMutexPool[idx].Lock()
|
||||
return namedMutexPool[idx].Unlock
|
||||
}
|
||||
|
||||
func Similar[E constraints.Ordered](as, bs []E) bool {
|
||||
if len(as) != len(bs) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, a := range as {
|
||||
for _, b := range bs {
|
||||
if b == a {
|
||||
goto next
|
||||
}
|
||||
}
|
||||
// didn't find a B that corresponded to the current A
|
||||
return false
|
||||
|
||||
next:
|
||||
continue
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// EscapeString for JSON encoding according to RFC8259.
|
||||
// Also encloses result in quotation marks "".
|
||||
func EscapeString(dst []byte, s string) []byte {
|
||||
dst = append(dst, '"')
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
switch {
|
||||
case c == '"':
|
||||
// quotation mark
|
||||
dst = append(dst, []byte{'\\', '"'}...)
|
||||
case c == '\\':
|
||||
// reverse solidus
|
||||
dst = append(dst, []byte{'\\', '\\'}...)
|
||||
case c >= 0x20:
|
||||
// default, rest below are control chars
|
||||
dst = append(dst, c)
|
||||
case c == 0x08:
|
||||
dst = append(dst, []byte{'\\', 'b'}...)
|
||||
case c < 0x09:
|
||||
dst = append(dst, []byte{'\\', 'u', '0', '0', '0', '0' + c}...)
|
||||
case c == 0x09:
|
||||
dst = append(dst, []byte{'\\', 't'}...)
|
||||
case c == 0x0a:
|
||||
dst = append(dst, []byte{'\\', 'n'}...)
|
||||
case c == 0x0c:
|
||||
dst = append(dst, []byte{'\\', 'f'}...)
|
||||
case c == 0x0d:
|
||||
dst = append(dst, []byte{'\\', 'r'}...)
|
||||
case c < 0x10:
|
||||
dst = append(dst, []byte{'\\', 'u', '0', '0', '0', 0x57 + c}...)
|
||||
case c < 0x1a:
|
||||
dst = append(dst, []byte{'\\', 'u', '0', '0', '1', 0x20 + c}...)
|
||||
case c < 0x20: // maybe default?
|
||||
dst = append(dst, []byte{'\\', 'u', '0', '0', '1', 0x47 + c}...)
|
||||
}
|
||||
}
|
||||
dst = append(dst, '"')
|
||||
return dst
|
||||
}
|
||||
|
||||
func ArePointerValuesEqual[V comparable](a *V, b *V) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
if a != nil && b != nil {
|
||||
return *a == *b
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func SubIdToSerial(subId string) int64 {
|
||||
n := strings.Index(subId, ":")
|
||||
if n < 0 || n > len(subId) {
|
||||
return -1
|
||||
}
|
||||
serialId, _ := strconv.ParseInt(subId[0:n], 10, 64)
|
||||
return serialId
|
||||
}
|
||||
|
||||
func IsLowerHex(thing string) bool {
|
||||
for _, charNumber := range thing {
|
||||
if (charNumber >= 48 && charNumber <= 57) || (charNumber >= 97 && charNumber <= 102) {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Hash is a little helper generate a hash and return a slice instead of an
|
||||
// array.
|
||||
func Hash(in []byte) (out []byte) {
|
||||
h := sha256.Sum256(in)
|
||||
return h[:]
|
||||
}
|
||||
|
||||
// RemoveDuplicates removes repeated items in any slice of comparable items. This would not be
|
||||
// appropriate for pointers unless they were assembled from the same source where a pointer is
|
||||
// equal to a unique reference to the content.
|
||||
func RemoveDuplicates[T comparable](s []T) []T {
|
||||
alreadySeen := make(map[T]struct{}, len(s))
|
||||
return slices.DeleteFunc(s, func(val T) bool {
|
||||
_, duplicate := alreadySeen[val]
|
||||
alreadySeen[val] = struct{}{}
|
||||
return duplicate
|
||||
})
|
||||
}
|
||||
1
ints/base10k.txt
Normal file
1
ints/base10k.txt
Normal file
File diff suppressed because one or more lines are too long
20
ints/gen/pregen.go
Normal file
20
ints/gen/pregen.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Package main is a generator for the base10000 (4 digit) encoding of the ints
|
||||
// library.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fh, err := os.Create("pkg/ints/base10k.txt")
|
||||
if chk.E(err) {
|
||||
panic(err)
|
||||
}
|
||||
for i := range 10000 {
|
||||
fmt.Fprintf(fh, "%04d", i)
|
||||
}
|
||||
}
|
||||
134
ints/ints.go
Normal file
134
ints/ints.go
Normal file
@@ -0,0 +1,134 @@
|
||||
// Package ints is an optimised encoder for decimal numbers in ASCII format,
|
||||
// that simplifies and accelerates encoding and decoding decimal strings. It is
|
||||
// faster than strconv in part because it uses a base of 10000 and a lookup
|
||||
// table.
|
||||
package ints
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"io"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
|
||||
"x.realy.lol/errorf"
|
||||
)
|
||||
|
||||
// run this to regenerate (pointlessly) the base 10 array of 4 places per entry
|
||||
//go:generate go run ./gen/.
|
||||
|
||||
//go:embed base10k.txt
|
||||
var base10k []byte
|
||||
|
||||
// T is an integer with a fast codec to decimal ASCII.
|
||||
type T struct {
|
||||
N uint64
|
||||
}
|
||||
|
||||
func New[V constraints.Integer](n V) *T {
|
||||
return &T{uint64(n)}
|
||||
}
|
||||
|
||||
// Uint64 returns the int.T as a uint64 (the base type)
|
||||
func (n *T) Uint64() uint64 { return n.N }
|
||||
|
||||
// Int64 returns an int64 from the base number (may cause truncation)
|
||||
func (n *T) Int64() int64 { return int64(n.N) }
|
||||
|
||||
// Uint16 returns an uint16 from the base number (may cause truncation)
|
||||
func (n *T) Uint16() uint16 { return uint16(n.N) }
|
||||
|
||||
var powers = []*T{
|
||||
{1},
|
||||
{1_0000},
|
||||
{1_0000_0000},
|
||||
{1_0000_0000_0000},
|
||||
{1_0000_0000_0000_0000},
|
||||
}
|
||||
|
||||
const zero = '0'
|
||||
const nine = '9'
|
||||
|
||||
// Marshal the int.T into a byte string.
|
||||
func (n *T) Marshal(dst []byte) (b []byte) {
|
||||
nn := n.N
|
||||
b = dst
|
||||
if n.N == 0 {
|
||||
b = append(b, '0')
|
||||
return
|
||||
}
|
||||
var i int
|
||||
var trimmed bool
|
||||
k := len(powers)
|
||||
for k > 0 {
|
||||
k--
|
||||
q := n.N / powers[k].N
|
||||
if !trimmed && q == 0 {
|
||||
continue
|
||||
}
|
||||
offset := q * 4
|
||||
bb := base10k[offset : offset+4]
|
||||
if !trimmed {
|
||||
for i = range bb {
|
||||
if bb[i] != '0' {
|
||||
bb = bb[i:]
|
||||
trimmed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
b = append(b, bb...)
|
||||
n.N = n.N - q*powers[k].N
|
||||
}
|
||||
n.N = nn
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal reads a string, which must be a positive integer no larger than math.MaxUint64,
|
||||
// skipping any non-numeric content before it.
|
||||
//
|
||||
// Note that leading zeros are not considered valid, but basically no such thing as machine
|
||||
// generated JSON integers with leading zeroes. Until this is disproven, this is the fastest way
|
||||
// to read a positive json integer, and a leading zero is decoded as a zero, and the remainder
|
||||
// returned.
|
||||
func (n *T) Unmarshal(b []byte) (r []byte, err error) {
|
||||
if len(b) < 1 {
|
||||
err = errorf.E("zero length number")
|
||||
return
|
||||
}
|
||||
var sLen int
|
||||
if b[0] == zero {
|
||||
r = b[1:]
|
||||
n.N = 0
|
||||
return
|
||||
}
|
||||
// skip non-number characters
|
||||
for i, v := range b {
|
||||
if v >= '0' && v <= '9' {
|
||||
b = b[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(b) == 0 {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
// count the digits
|
||||
for ; sLen < len(b) && b[sLen] >= zero && b[sLen] <= nine && b[sLen] != ','; sLen++ {
|
||||
}
|
||||
if sLen == 0 {
|
||||
err = errorf.E("zero length number")
|
||||
return
|
||||
}
|
||||
if sLen > 20 {
|
||||
err = errorf.E("too big number for uint64")
|
||||
return
|
||||
}
|
||||
// the length of the string found
|
||||
r = b[sLen:]
|
||||
b = b[:sLen]
|
||||
for _, ch := range b {
|
||||
ch -= zero
|
||||
n.N = n.N*10 + uint64(ch)
|
||||
}
|
||||
return
|
||||
}
|
||||
79
ints/ints_test.go
Normal file
79
ints/ints_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package ints
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshal(t *testing.T) {
|
||||
b := make([]byte, 0, 8)
|
||||
var rem []byte
|
||||
var n *T
|
||||
var err error
|
||||
for _ = range 10000000 {
|
||||
n = New(uint64(frand.Intn(math.MaxInt64)))
|
||||
b = n.Marshal(b)
|
||||
m := New(0)
|
||||
if rem, err = m.Unmarshal(b); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if n.N != m.N {
|
||||
t.Fatalf("failed to convert to int64 at %d %s %d", n.N, b, m.N)
|
||||
}
|
||||
if len(rem) > 0 {
|
||||
t.Fatalf("leftover bytes after converting back: '%s'", rem)
|
||||
}
|
||||
b = b[:0]
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkByteStringToInt64(bb *testing.B) {
|
||||
b := make([]byte, 0, 19)
|
||||
var i int
|
||||
const nTests = 10000000
|
||||
testInts := make([]*T, nTests)
|
||||
for i = range nTests {
|
||||
testInts[i] = New(frand.Intn(math.MaxInt64))
|
||||
}
|
||||
bb.Run("Marshal", func(bb *testing.B) {
|
||||
bb.ReportAllocs()
|
||||
for i = 0; i < bb.N; i++ {
|
||||
n := testInts[i%10000]
|
||||
b = n.Marshal(b)
|
||||
b = b[:0]
|
||||
}
|
||||
})
|
||||
bb.Run("Itoa", func(bb *testing.B) {
|
||||
bb.ReportAllocs()
|
||||
var s string
|
||||
for i = 0; i < bb.N; i++ {
|
||||
n := testInts[i%10000]
|
||||
s = strconv.Itoa(int(n.N))
|
||||
_ = s
|
||||
}
|
||||
})
|
||||
bb.Run("MarshalUnmarshal", func(bb *testing.B) {
|
||||
bb.ReportAllocs()
|
||||
m := New(0)
|
||||
for i = 0; i < bb.N; i++ {
|
||||
n := testInts[i%10000]
|
||||
b = m.Marshal(b)
|
||||
_, _ = n.Unmarshal(b)
|
||||
b = b[:0]
|
||||
}
|
||||
})
|
||||
bb.Run("ItoaAtoi", func(bb *testing.B) {
|
||||
bb.ReportAllocs()
|
||||
var s string
|
||||
for i = 0; i < bb.N; i++ {
|
||||
n := testInts[i%10000]
|
||||
s = strconv.Itoa(int(n.N))
|
||||
_, _ = strconv.Atoi(s)
|
||||
}
|
||||
})
|
||||
}
|
||||
182
kind/kinds.go
Normal file
182
kind/kinds.go
Normal file
@@ -0,0 +1,182 @@
|
||||
package kind
|
||||
|
||||
const (
|
||||
ProfileMetadata = 0
|
||||
TextNote = 1
|
||||
RecommendServer = 2
|
||||
FollowList = 3
|
||||
EncryptedDirectMessage = 4
|
||||
Deletion = 5
|
||||
Repost = 6
|
||||
Reaction = 7
|
||||
BadgeAward = 8
|
||||
SimpleGroupChatMessage = 9
|
||||
SimpleGroupThreadedReply = 10
|
||||
SimpleGroupThread = 11
|
||||
SimpleGroupReply = 12
|
||||
Seal = 13
|
||||
DirectMessage = 14
|
||||
GenericRepost = 16
|
||||
ReactionToWebsite = 17
|
||||
ChannelCreation = 40
|
||||
ChannelMetadata = 41
|
||||
ChannelMessage = 42
|
||||
ChannelHideMessage = 43
|
||||
ChannelMuteUser = 44
|
||||
Chess = 64
|
||||
MergeRequests = 818
|
||||
Bid = 1021
|
||||
BidConfirmation = 1022
|
||||
OpenTimestamps = 1040
|
||||
GiftWrap = 1059
|
||||
FileMetadata = 1063
|
||||
LiveChatMessage = 1311
|
||||
Patch = 1617
|
||||
Issue = 1621
|
||||
Reply = 1622
|
||||
StatusOpen = 1630
|
||||
StatusApplied = 1631
|
||||
StatusClosed = 1632
|
||||
StatusDraft = 1633
|
||||
ProblemTracker = 1971
|
||||
Reporting = 1984
|
||||
Label = 1985
|
||||
RelayReviews = 1986
|
||||
AIEmbeddings = 1987
|
||||
Torrent = 2003
|
||||
TorrentComment = 2004
|
||||
CoinjoinPool = 2022
|
||||
CommunityPostApproval = 4550
|
||||
JobFeedback = 7000
|
||||
SimpleGroupPutUser = 9000
|
||||
SimpleGroupRemoveUser = 9001
|
||||
SimpleGroupEditMetadata = 9002
|
||||
SimpleGroupDeleteEvent = 9005
|
||||
SimpleGroupCreateGroup = 9007
|
||||
SimpleGroupDeleteGroup = 9008
|
||||
SimpleGroupCreateInvite = 9009
|
||||
SimpleGroupJoinRequest = 9021
|
||||
SimpleGroupLeaveRequest = 9022
|
||||
ZapGoal = 9041
|
||||
TidalLogin = 9467
|
||||
ZapRequest = 9734
|
||||
Zap = 9735
|
||||
Highlights = 9802
|
||||
MuteList = 10000
|
||||
PinList = 10001
|
||||
RelayListMetadata = 10002
|
||||
BookmarkList = 10003
|
||||
CommunityList = 10004
|
||||
PublicChatList = 10005
|
||||
BlockedRelayList = 10006
|
||||
SearchRelayList = 10007
|
||||
SimpleGroupList = 10009
|
||||
InterestList = 10015
|
||||
EmojiList = 10030
|
||||
DMRelayList = 10050
|
||||
UserServerList = 10063
|
||||
FileStorageServerList = 10096
|
||||
GoodWikiAuthorList = 10101
|
||||
GoodWikiRelayList = 10102
|
||||
NWCWalletInfo = 13194
|
||||
LightningPubRPC = 21000
|
||||
ClientAuthentication = 22242
|
||||
NWCWalletRequest = 23194
|
||||
NWCWalletResponse = 23195
|
||||
NostrConnect = 24133
|
||||
Blobs = 24242
|
||||
HTTPAuth = 27235
|
||||
CategorizedPeopleList = 30000
|
||||
CategorizedBookmarksList = 30001
|
||||
RelaySets = 30002
|
||||
BookmarkSets = 30003
|
||||
CuratedSets = 30004
|
||||
CuratedVideoSets = 30005
|
||||
MuteSets = 30007
|
||||
ProfileBadges = 30008
|
||||
BadgeDefinition = 30009
|
||||
InterestSets = 30015
|
||||
StallDefinition = 30017
|
||||
ProductDefinition = 30018
|
||||
MarketplaceUI = 30019
|
||||
ProductSoldAsAuction = 30020
|
||||
Article = 30023
|
||||
DraftArticle = 30024
|
||||
EmojiSets = 30030
|
||||
ModularArticleHeader = 30040
|
||||
ModularArticleContent = 30041
|
||||
ReleaseArtifactSets = 30063
|
||||
ApplicationSpecificData = 30078
|
||||
LiveEvent = 30311
|
||||
UserStatuses = 30315
|
||||
ClassifiedListing = 30402
|
||||
DraftClassifiedListing = 30403
|
||||
RepositoryAnnouncement = 30617
|
||||
RepositoryState = 30618
|
||||
SimpleGroupMetadata = 39000
|
||||
SimpleGroupAdmins = 39001
|
||||
SimpleGroupMembers = 39002
|
||||
SimpleGroupRoles = 39003
|
||||
WikiArticle = 30818
|
||||
Redirects = 30819
|
||||
Feed = 31890
|
||||
DateCalendarEvent = 31922
|
||||
TimeCalendarEvent = 31923
|
||||
Calendar = 31924
|
||||
CalendarEventRSVP = 31925
|
||||
HandlerRecommendation = 31989
|
||||
HandlerInformation = 31990
|
||||
VideoEvent = 34235
|
||||
ShortVideoEvent = 34236
|
||||
VideoViewEvent = 34237
|
||||
CommunityDefinition = 34550
|
||||
)
|
||||
|
||||
func IsRegularKind(kind int) bool {
|
||||
return kind < 10000 && kind != 0 && kind != 3
|
||||
}
|
||||
|
||||
func IsReplaceableKind(kind int) bool {
|
||||
return kind == 0 || kind == 3 || (10000 <= kind && kind < 20000)
|
||||
}
|
||||
|
||||
func IsEphemeralKind(kind int) bool {
|
||||
return 20000 <= kind && kind < 30000
|
||||
}
|
||||
|
||||
func IsAddressableKind(kind int) bool {
|
||||
return 30000 <= kind && kind < 40000
|
||||
}
|
||||
|
||||
var Text = []int{
|
||||
ProfileMetadata,
|
||||
TextNote,
|
||||
Article,
|
||||
SimpleGroupThread,
|
||||
Reply,
|
||||
Repost,
|
||||
Issue,
|
||||
Reply,
|
||||
MergeRequests,
|
||||
WikiArticle,
|
||||
Issue,
|
||||
StatusOpen,
|
||||
StatusApplied,
|
||||
StatusClosed,
|
||||
StatusDraft,
|
||||
Torrent,
|
||||
TorrentComment,
|
||||
DateCalendarEvent,
|
||||
TimeCalendarEvent,
|
||||
Calendar,
|
||||
CalendarEventRSVP,
|
||||
}
|
||||
|
||||
func IsText(ki int) bool {
|
||||
for _, v := range Text {
|
||||
if v == ki {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
19
kind/kinds_test.go
Normal file
19
kind/kinds_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package kind
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func KindKindTest(t *testing.T) {
|
||||
require.True(t, IsRegularKind(1))
|
||||
require.True(t, IsRegularKind(9))
|
||||
require.True(t, IsRegularKind(1111))
|
||||
require.True(t, IsReplaceableKind(0))
|
||||
require.True(t, IsReplaceableKind(3))
|
||||
require.True(t, IsReplaceableKind(10002))
|
||||
require.True(t, IsReplaceableKind(10050))
|
||||
require.True(t, IsAddressableKind(30023))
|
||||
require.True(t, IsAddressableKind(39000))
|
||||
}
|
||||
18
main.go
18
main.go
@@ -3,8 +3,12 @@ package main
|
||||
import (
|
||||
"os"
|
||||
|
||||
"x.realy.lol/bech32encoding"
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/config"
|
||||
"x.realy.lol/hex"
|
||||
"x.realy.lol/log"
|
||||
"x.realy.lol/p256k"
|
||||
"x.realy.lol/version"
|
||||
)
|
||||
|
||||
@@ -15,4 +19,18 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
log.I.F("starting %s version %s", version.Name, version.Version)
|
||||
a := cfg.Superuser
|
||||
var err error
|
||||
var dst []byte
|
||||
if dst, err = bech32encoding.NpubToBytes([]byte(a)); chk.E(err) {
|
||||
if _, err = hex.DecBytes(dst, []byte(a)); chk.E(err) {
|
||||
log.F.F("SUPERUSER is invalid: %s", a)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
super := &p256k.Signer{}
|
||||
if err = super.InitPub(dst); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
47
normalize/normalize.go
Normal file
47
normalize/normalize.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package normalize
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Url normalizes the url and replaces http://, https:// schemes with ws://, wss://
|
||||
// and normalizes the path.
|
||||
func Url(u string) string {
|
||||
if u == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
u = strings.TrimSpace(u)
|
||||
u = strings.ToLower(u)
|
||||
|
||||
if fqn := strings.Split(u, ":")[0]; fqn == "localhost" || fqn == "127.0.0.1" {
|
||||
u = "ws://" + u
|
||||
} else if !strings.HasPrefix(u, "http") && !strings.HasPrefix(u, "ws") {
|
||||
u = "wss://" + u
|
||||
}
|
||||
|
||||
p, err := url.Parse(u)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if p.Scheme == "http" {
|
||||
p.Scheme = "ws"
|
||||
} else if p.Scheme == "https" {
|
||||
p.Scheme = "wss"
|
||||
}
|
||||
|
||||
p.Path = strings.TrimRight(p.Path, "/")
|
||||
|
||||
return p.String()
|
||||
}
|
||||
|
||||
// OkMessage takes a string message that is to be sent in an `OK` or `CLOSED` command
|
||||
// and prefixes it with "<prefix>: " if it doesn't already have an acceptable prefix.
|
||||
func OkMessage(reason string, prefix string) string {
|
||||
if idx := strings.Index(reason, ": "); idx == -1 || strings.IndexByte(reason[0:idx], ' ') != -1 {
|
||||
return prefix + ": " + reason
|
||||
}
|
||||
return reason
|
||||
}
|
||||
37
normalize/normalize_test.go
Normal file
37
normalize/normalize_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package normalize
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type urlTest struct {
|
||||
url, expected string
|
||||
}
|
||||
|
||||
var urlTests = []urlTest{
|
||||
{"", ""},
|
||||
{"wss://x.com/y", "wss://x.com/y"},
|
||||
{"wss://x.com/y/", "wss://x.com/y"},
|
||||
{"http://x.com/y", "ws://x.com/y"},
|
||||
{Url("http://x.com/y"), "ws://x.com/y"},
|
||||
{Url("wss://x.com"), "wss://x.com"},
|
||||
{Url("wss://x.com/"), "wss://x.com"},
|
||||
{Url(Url(Url("wss://x.com/"))), "wss://x.com"},
|
||||
{"wss://x.com", "wss://x.com"},
|
||||
{"wss://x.com/", "wss://x.com"},
|
||||
{"x.com////", "wss://x.com"},
|
||||
{"x.com/?x=23", "wss://x.com?x=23"},
|
||||
{"localhost:4036", "ws://localhost:4036"},
|
||||
{"localhost:4036/relay", "ws://localhost:4036/relay"},
|
||||
{"localhostmagnanimus.com", "wss://localhostmagnanimus.com"},
|
||||
{Url("localhost:4036/relay"), "ws://localhost:4036/relay"},
|
||||
}
|
||||
|
||||
func TestUrl(t *testing.T) {
|
||||
for _, test := range urlTests {
|
||||
output := Url(test.url)
|
||||
assert.Equal(t, test.expected, output)
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,7 @@ type Signer struct {
|
||||
var _ signer.I = &Signer{}
|
||||
|
||||
// Generate creates a new Signer.
|
||||
func (s *Signer) Generate() (err error) {
|
||||
func (s *Signer) Generate(nobtcec ...bool) (err error) {
|
||||
if s.SecretKey, err = ec.NewSecretKey(); chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func (s *Signer) Generate() (err error) {
|
||||
}
|
||||
|
||||
// InitSec initialises a Signer using raw secret key bytes.
|
||||
func (s *Signer) InitSec(sec []byte) (err error) {
|
||||
func (s *Signer) InitSec(sec []byte, nobtcec ...bool) (err error) {
|
||||
if len(sec) != secp256k1.SecKeyBytesLen {
|
||||
err = errorf.E("sec key must be %d bytes", secp256k1.SecKeyBytesLen)
|
||||
return
|
||||
|
||||
@@ -44,7 +44,8 @@ func (s *Signer) Generate(nobtcec ...bool) (err error) {
|
||||
}
|
||||
s.SecretKey = &cs.Key
|
||||
s.PublicKey = cx.Key
|
||||
if len(nobtcec) > 0 && nobtcec[0] == false {
|
||||
if len(nobtcec) > 0 && nobtcec[0] != true {
|
||||
} else {
|
||||
s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb)
|
||||
}
|
||||
return
|
||||
@@ -65,7 +66,8 @@ func (s *Signer) InitSec(skb []byte, nobtcec ...bool) (err error) {
|
||||
s.PublicKey = cx.Key
|
||||
// s.ECPublicKey = cp.Key
|
||||
// needed for ecdh
|
||||
if len(nobtcec) > 0 && nobtcec[0] == false {
|
||||
if len(nobtcec) > 0 && nobtcec[0] != true {
|
||||
} else {
|
||||
s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb)
|
||||
}
|
||||
return
|
||||
|
||||
38
tags/tag_test.go
Normal file
38
tags/tag_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package tags
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTagHelpers(t *testing.T) {
|
||||
tags := Tags{
|
||||
Tag{"x"},
|
||||
Tag{"p", "abcdef", "wss://x.com"},
|
||||
Tag{"p", "123456", "wss://y.com"},
|
||||
Tag{"e", "eeeeee"},
|
||||
Tag{"e", "ffffff"},
|
||||
}
|
||||
|
||||
assert.NotNil(t, tags.GetFirst([]string{"x"}), "failed to get existing prefix")
|
||||
assert.Nil(t, tags.GetFirst([]string{"x", ""}), "got with wrong prefix")
|
||||
assert.NotNil(t, tags.GetFirst([]string{"p", "abcdef", "wss://"}), "failed to get with existing prefix")
|
||||
assert.NotNil(t, tags.GetFirst([]string{"p", "abcdef", ""}), "failed to get with existing prefix (blank last string)")
|
||||
assert.Equal(t, "ffffff", (*(tags.GetLast([]string{"e"})))[1], "failed to get last")
|
||||
assert.Equal(t, 2, len(tags.GetAll([]string{"e", ""})), "failed to get all")
|
||||
c := make(Tags, 0, 2)
|
||||
for _, tag := range tags.All([]string{"e", ""}) {
|
||||
c = append(c, tag)
|
||||
}
|
||||
assert.Equal(t, tags.GetAll([]string{"e", ""}), c)
|
||||
assert.Equal(t, 5, len(tags.AppendUnique(Tag{"e", "ffffff"})), "append unique changed the array size when existed")
|
||||
assert.Equal(t, 6, len(tags.AppendUnique(Tag{"e", "bbbbbb"})), "append unique failed to append when didn't exist")
|
||||
assert.Equal(t, "ffffff", tags.AppendUnique(Tag{"e", "eeeeee"})[4][1], "append unique changed the order")
|
||||
assert.Equal(t, "eeeeee", tags.AppendUnique(Tag{"e", "eeeeee"})[3][1], "append unique changed the order")
|
||||
|
||||
filtered := tags.FilterOut([]string{"e"})
|
||||
tags.FilterOutInPlace([]string{"e"})
|
||||
assert.ElementsMatch(t, filtered, tags)
|
||||
assert.Len(t, filtered, 3)
|
||||
}
|
||||
290
tags/tags.go
Normal file
290
tags/tags.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package tags
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"iter"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
"x.realy.lol/ec/schnorr"
|
||||
"x.realy.lol/helpers"
|
||||
"x.realy.lol/hex"
|
||||
"x.realy.lol/ints"
|
||||
"x.realy.lol/normalize"
|
||||
)
|
||||
|
||||
type Tag []string
|
||||
|
||||
// StartsWith checks if a tag contains a prefix.
|
||||
// for example,
|
||||
//
|
||||
// ["p", "abcdef...", "wss://relay.com"]
|
||||
//
|
||||
// would match against
|
||||
//
|
||||
// ["p", "abcdef..."]
|
||||
//
|
||||
// or even
|
||||
//
|
||||
// ["p", "abcdef...", "wss://"]
|
||||
func (tag Tag) StartsWith(prefix []string) bool {
|
||||
prefixLen := len(prefix)
|
||||
|
||||
if prefixLen > len(tag) {
|
||||
return false
|
||||
}
|
||||
// check initial elements for equality
|
||||
for i := 0; i < prefixLen-1; i++ {
|
||||
if prefix[i] != tag[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// check last element just for a prefix
|
||||
return strings.HasPrefix(tag[prefixLen-1], prefix[prefixLen-1])
|
||||
}
|
||||
|
||||
func (tag Tag) Key() string {
|
||||
if len(tag) > 0 {
|
||||
return tag[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (tag Tag) Value() string {
|
||||
if len(tag) > 1 {
|
||||
return tag[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (tag Tag) Relay() string {
|
||||
if len(tag) > 2 && (tag[0] == "e" || tag[0] == "p") {
|
||||
return normalize.Url(tag[2])
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Tags []Tag
|
||||
|
||||
// GetD gets the first "d" tag (for parameterized replaceable events) value or ""
|
||||
func (tags Tags) GetD() string {
|
||||
for _, v := range tags {
|
||||
if v.StartsWith([]string{"d", ""}) {
|
||||
return v[1]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetFirst gets the first tag in tags that matches the prefix, see [Tag.StartsWith]
|
||||
func (tags Tags) GetFirst(tagPrefix []string) *Tag {
|
||||
for _, v := range tags {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
return &v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLast gets the last tag in tags that matches the prefix, see [Tag.StartsWith]
|
||||
func (tags Tags) GetLast(tagPrefix []string) *Tag {
|
||||
for i := len(tags) - 1; i >= 0; i-- {
|
||||
v := tags[i]
|
||||
if v.StartsWith(tagPrefix) {
|
||||
return &v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAll gets all the tags that match the prefix, see [Tag.StartsWith]
|
||||
func (tags Tags) GetAll(tagPrefix []string) Tags {
|
||||
result := make(Tags, 0, len(tags))
|
||||
for _, v := range tags {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (tags Tags) GetAllExactKeys(key string) Tags {
|
||||
result := make(Tags, 0, len(tags))
|
||||
for _, v := range tags {
|
||||
if v.StartsWith([]string{key}) {
|
||||
if v.Key() == key {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// All returns an iterator for all the tags that match the prefix, see [Tag.StartsWith]
|
||||
func (tags Tags) All(tagPrefix []string) iter.Seq2[int, Tag] {
|
||||
return func(yield func(int, Tag) bool) {
|
||||
for i, v := range tags {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
if !yield(i, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FilterOut returns a new slice with only the elements that match the prefix, see [Tag.StartsWith]
|
||||
func (tags Tags) FilterOut(tagPrefix []string) Tags {
|
||||
filtered := make(Tags, 0, len(tags))
|
||||
for _, v := range tags {
|
||||
if !v.StartsWith(tagPrefix) {
|
||||
filtered = append(filtered, v)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
|
||||
// FilterOutInPlace removes all tags that match the prefix, but potentially reorders the tags in unpredictable ways, see [Tag.StartsWith]
|
||||
func (tags *Tags) FilterOutInPlace(tagPrefix []string) {
|
||||
for i := 0; i < len(*tags); i++ {
|
||||
tag := (*tags)[i]
|
||||
if tag.StartsWith(tagPrefix) {
|
||||
// remove this by swapping the last tag into this place
|
||||
last := len(*tags) - 1
|
||||
(*tags)[i] = (*tags)[last]
|
||||
*tags = (*tags)[0:last]
|
||||
i-- // this is so we can match this just swapped item in the next iteration
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendUnique appends a tag if it doesn't exist yet, otherwise does nothing.
|
||||
// the uniqueness comparison is done based only on the first 2 elements of the tag.
|
||||
func (tags Tags) AppendUnique(tag Tag) Tags {
|
||||
n := len(tag)
|
||||
if n > 2 {
|
||||
n = 2
|
||||
}
|
||||
|
||||
if tags.GetFirst(tag[:n]) == nil {
|
||||
return append(tags, tag)
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func (t *Tags) Scan(src any) error {
|
||||
var jtags []byte
|
||||
|
||||
switch v := src.(type) {
|
||||
case []byte:
|
||||
jtags = v
|
||||
case string:
|
||||
jtags = []byte(v)
|
||||
default:
|
||||
return errors.New("couldn't scan tags, it's not a json string")
|
||||
}
|
||||
|
||||
json.Unmarshal(jtags, &t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tags Tags) ContainsAny(tagName string, values []string) bool {
|
||||
for _, tag := range tags {
|
||||
if len(tag) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
if tag[0] != tagName {
|
||||
continue
|
||||
}
|
||||
|
||||
if slices.Contains(values, tag[1]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Marshal Tag. Used for Serialization so string escaping should be as in RFC8259.
|
||||
func (tag Tag) marshalTo(dst []byte) []byte {
|
||||
dst = append(dst, '[')
|
||||
for i, s := range tag {
|
||||
if i > 0 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
dst = helpers.EscapeString(dst, s)
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// MarshalTo appends the JSON encoded byte of Tags as [][]string to dst.
|
||||
// String escaping is as described in RFC8259.
|
||||
func (tags Tags) marshalTo(dst []byte) []byte {
|
||||
dst = append(dst, '[')
|
||||
for i, tag := range tags {
|
||||
if i > 0 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
dst = tag.marshalTo(dst)
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
type Tag_a struct {
|
||||
Kind int
|
||||
Pubkey []byte
|
||||
Ident string
|
||||
}
|
||||
|
||||
func (tags Tags) Get_a_Tags() (atags []Tag_a) {
|
||||
a := tags.GetAll([]string{"a"})
|
||||
var err error
|
||||
if len(a) > 0 {
|
||||
for _, v := range a {
|
||||
if v[0] == "a" && len(v) > 1 {
|
||||
var atag Tag_a
|
||||
if atag, err = Decode_a_Tag(v[1]); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
atags = append(atags, atag)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Decode_a_Tag(a string) (ta Tag_a, err error) {
|
||||
// try to split it
|
||||
parts := strings.Split(a, ":")
|
||||
// there must be a kind first
|
||||
ki := ints.New(0)
|
||||
if _, err = ki.Unmarshal([]byte(parts[0])); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ta = Tag_a{
|
||||
Kind: int(ki.Uint16()),
|
||||
}
|
||||
if len(parts) < 2 {
|
||||
return
|
||||
}
|
||||
// next must be a pubkey
|
||||
if len(parts[1]) != 2*schnorr.PubKeyBytesLen {
|
||||
return
|
||||
}
|
||||
var pk []byte
|
||||
if pk, err = hex.Dec(parts[1]); err != nil {
|
||||
return
|
||||
}
|
||||
ta.Pubkey = pk
|
||||
// there possibly can be nothing after this
|
||||
if len(parts) >= 3 {
|
||||
// third part is the identifier (d tag)
|
||||
ta.Ident = parts[2]
|
||||
}
|
||||
return
|
||||
}
|
||||
19
timestamp/timestamp.go
Normal file
19
timestamp/timestamp.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package timestamp
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/constraints"
|
||||
)
|
||||
|
||||
type Timestamp int64
|
||||
|
||||
func Now() Timestamp { return Timestamp(time.Now().Unix()) }
|
||||
|
||||
func New[T constraints.Integer | constraints.Float](t T) Timestamp {
|
||||
return Timestamp(t)
|
||||
}
|
||||
|
||||
func (t Timestamp) Time() time.Time { return time.Unix(int64(t), 0) }
|
||||
func (t Timestamp) ToInt64() int64 { return int64(t) }
|
||||
func (t Timestamp) ToInt() int { return int(t) }
|
||||
12
units/units.go
Normal file
12
units/units.go
Normal file
@@ -0,0 +1,12 @@
|
||||
// Package units is a convenient set of names designating data sizes in bytes
|
||||
// using common ISO names (base 10).
|
||||
package units
|
||||
|
||||
const (
|
||||
Kilobyte = 1000
|
||||
Kb = Kilobyte
|
||||
Megabyte = Kilobyte * Kilobyte
|
||||
Mb = Megabyte
|
||||
Gigabyte = Megabyte * Kilobyte
|
||||
Gb = Gigabyte
|
||||
)
|
||||
45
varint/varint.go
Normal file
45
varint/varint.go
Normal file
@@ -0,0 +1,45 @@
|
||||
// Package varint is a variable integer encoding that works in reverse compared to the stdlib
|
||||
// binary Varint. The terminal byte in the encoding is the one with the 8th bit set. This is
|
||||
// basically like a base 128 encoding. It reads forward using an io.Reader and writes forward
|
||||
// using an io.Writer.
|
||||
package varint
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
)
|
||||
|
||||
func Encode(w io.Writer, v uint64) {
|
||||
x := []byte{0}
|
||||
for {
|
||||
x[0] = byte(v) & 127
|
||||
v >>= 7
|
||||
if v == 0 {
|
||||
x[0] |= 128
|
||||
_, _ = w.Write(x)
|
||||
break
|
||||
} else {
|
||||
_, _ = w.Write(x)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Decode(r io.Reader) (v uint64, err error) {
|
||||
x := []byte{0}
|
||||
v += uint64(x[0])
|
||||
var i uint64
|
||||
for {
|
||||
if _, err = r.Read(x); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if x[0] >= 128 {
|
||||
v += uint64(x[0]&127) << (i * 7)
|
||||
return
|
||||
} else {
|
||||
v += uint64(x[0]) << (i * 7)
|
||||
i++
|
||||
}
|
||||
}
|
||||
// }
|
||||
}
|
||||
29
varint/varint_test.go
Normal file
29
varint/varint_test.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package varint
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"lukechampine.com/frand"
|
||||
|
||||
"x.realy.lol/chk"
|
||||
)
|
||||
|
||||
func TestEncode_Decode(t *testing.T) {
|
||||
var v uint64
|
||||
for range 10000000 {
|
||||
v = uint64(frand.Intn(math.MaxInt64))
|
||||
buf1 := new(bytes.Buffer)
|
||||
Encode(buf1, v)
|
||||
buf2 := bytes.NewBuffer(buf1.Bytes())
|
||||
u, err := Decode(buf2)
|
||||
if chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if u != v {
|
||||
t.Fatalf("expected %d got %d", v, u)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user