fixed issues with encapsulated tag api
This commit is contained in:
@@ -54,14 +54,14 @@ func EstimateSize(ev *T) (size int) {
|
||||
// be used
|
||||
size++
|
||||
// next a byte for the length of each tag list
|
||||
for i := range ev.Tags.T {
|
||||
for i := range ev.Tags.F() {
|
||||
size++
|
||||
for j := range ev.Tags.T[i].Field {
|
||||
for _ = range ev.Tags.N(i).F() {
|
||||
// plus a varint16 for each tag length prefix (very often will be 1
|
||||
// byte, occasionally 2, but no more than this
|
||||
size += binary.MaxVarintLen16
|
||||
// and the length of the actual tag
|
||||
size += len(ev.Tags.T[i].Field[j])
|
||||
size += ev.Tags.N(i).Len()
|
||||
}
|
||||
}
|
||||
// length prefix of the content field
|
||||
@@ -130,16 +130,16 @@ func (w *Writer) WriteCreatedAt(t *timestamp.T) (err E) {
|
||||
// event ID is disabled because of a wrong a tag in the test events cache.
|
||||
func (w *Writer) WriteTags(t *tags.T) (err E) {
|
||||
// first a byte for the number of tags
|
||||
w.Buf = appendUvarint(w.Buf, uint64(len(t.T)))
|
||||
for i := range t.T {
|
||||
w.Buf = appendUvarint(w.Buf, uint64(t.Len()))
|
||||
for i := range t.F() {
|
||||
var secondIsHex, secondIsDecimalHex bool
|
||||
// first the length of the tag
|
||||
w.Buf = appendUvarint(w.Buf, uint64(len(t.T[i].Field)))
|
||||
w.Buf = appendUvarint(w.Buf, uint64(t.N(i).Len()))
|
||||
scanning:
|
||||
for j := range t.T[i].Field {
|
||||
for j := range t.N(i).F() {
|
||||
// we know from this first tag certain conditions that allow
|
||||
// data optimizations
|
||||
ts := t.T[i].Field[j]
|
||||
ts := t.N(i).B(j)
|
||||
switch {
|
||||
case j == 0 && len(ts) == 1:
|
||||
for k := range HexInSecond {
|
||||
@@ -159,12 +159,12 @@ func (w *Writer) WriteTags(t *tags.T) (err E) {
|
||||
w.Buf = appendUvarint(w.Buf, uint64(32))
|
||||
if w.Buf, err = hex.DecAppend(w.Buf, ts); chk.E(err) {
|
||||
// the value MUST be hex by the spec
|
||||
log.W.Ln(t.T[i])
|
||||
log.W.Ln(t.N(i))
|
||||
return
|
||||
}
|
||||
continue scanning
|
||||
case secondIsDecimalHex:
|
||||
split := bytes.Split(t.T[i].Field[j], B(":"))
|
||||
split := bytes.Split(t.N(i).B(j), B(":"))
|
||||
// append the lengths accordingly
|
||||
// first is 2 bytes size
|
||||
var n int
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"realy.lol/hex"
|
||||
"realy.lol/kind"
|
||||
"realy.lol/sha256"
|
||||
"realy.lol/tag"
|
||||
"realy.lol/tags"
|
||||
"realy.lol/timestamp"
|
||||
)
|
||||
@@ -93,7 +92,8 @@ func (r *Reader) ReadTags() (t *tags.T, err error) {
|
||||
nTags := int(vi)
|
||||
var end int
|
||||
r.Pos += read
|
||||
t = &tags.T{T: make([]*tag.T, nTags)}
|
||||
t = tags.NewWithCap(nTags)
|
||||
// t = &tags.T{T: make([]*tag.T, nTags)}
|
||||
// t = make(tags.T, nTags)
|
||||
// iterate through the individual tags
|
||||
for i := 0; i < nTags; i++ {
|
||||
@@ -104,7 +104,8 @@ func (r *Reader) ReadTags() (t *tags.T, err error) {
|
||||
}
|
||||
lenTag := int(vi)
|
||||
r.Pos += read
|
||||
t.T[i] = tag.NewWithCap(lenTag)
|
||||
t.AddCap(i, lenTag)
|
||||
// t.T[i] = tag.NewWithCap(lenTag)
|
||||
// extract the individual tag strings
|
||||
var secondIsHex, secondIsDecimalHex bool
|
||||
reading:
|
||||
@@ -143,8 +144,11 @@ func (r *Reader) ReadTags() (t *tags.T, err error) {
|
||||
case j == 1:
|
||||
switch {
|
||||
case secondIsHex:
|
||||
t.T[i].Field = append(t.T[i].Field, make(B, 0, sha256.Size*2))
|
||||
t.T[i].Field[j] = hex.EncAppend(t.T[i].Field[j], r.Buf[r.Pos:end])
|
||||
hh := make(B, 0, sha256.Size*2)
|
||||
hh = hex.EncAppend(hh, r.Buf[r.Pos:end])
|
||||
t.AppendTo(i, hh)
|
||||
// t.N(i).Field = append(t.T[i].Field, make(B, 0, sha256.Size*2))
|
||||
// t.N(i).Field[j] = hex.EncAppend(t.N(i).B(j), r.Buf[r.Pos:end])
|
||||
r.Pos = end
|
||||
continue reading
|
||||
case secondIsDecimalHex:
|
||||
@@ -165,14 +169,17 @@ func (r *Reader) ReadTags() (t *tags.T, err error) {
|
||||
}
|
||||
pk = r.Buf[r.Pos:fieldEnd]
|
||||
r.Pos = fieldEnd
|
||||
t.T[i].Field = append(t.T[i].Field, B(fmt.Sprintf("%d:%0x:%s",
|
||||
k,
|
||||
hex.Enc(pk),
|
||||
string(r.Buf[r.Pos:end]))))
|
||||
t.AppendTo(i, B(fmt.Sprintf("%d:%0x:%s",
|
||||
k, hex.Enc(pk), string(r.Buf[r.Pos:end]))))
|
||||
// t.N(i).Field = append(t.N(i).Field, B(fmt.Sprintf("%d:%0x:%s",
|
||||
// k,
|
||||
// hex.Enc(pk),
|
||||
// string(r.Buf[r.Pos:end]))))
|
||||
r.Pos = end
|
||||
}
|
||||
}
|
||||
t.T[i].Field = append(t.T[i].Field, r.Buf[r.Pos:r.Pos+int(vi)])
|
||||
t.AppendTo(i, r.Buf[r.Pos:r.Pos+int(vi)])
|
||||
// t.N(i).Field = append(t.N(i).Field, r.Buf[r.Pos:r.Pos+int(vi)])
|
||||
r.Pos = end
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func (ev *T) Serialize() (b B) {
|
||||
return
|
||||
}
|
||||
|
||||
func (ev *T) String() (r S) { return S(ev.Serialize()) }
|
||||
// func (ev *T) String() (r S) { return S(ev.Serialize()) }
|
||||
|
||||
func (ev *T) ToCanonical() (b B) {
|
||||
b = append(b, "[0,\""...)
|
||||
|
||||
@@ -96,12 +96,12 @@ func (f *T) MarshalJSON(dst B) (b B, err error) {
|
||||
f.Sort()
|
||||
// open parentheses
|
||||
dst = append(dst, '{')
|
||||
if f.IDs != nil && len(f.IDs.Field) > 0 {
|
||||
if f.IDs != nil && f.IDs.Len() > 0 {
|
||||
first = true
|
||||
dst = text.JSONKey(dst, IDs)
|
||||
dst = text.MarshalHexArray(dst, f.IDs.ToByteSlice())
|
||||
}
|
||||
if f.Kinds != nil && len(f.Kinds.K) > 0 {
|
||||
if f.Kinds.Len() > 0 {
|
||||
if first {
|
||||
dst = append(dst, ',')
|
||||
} else {
|
||||
@@ -112,7 +112,7 @@ func (f *T) MarshalJSON(dst B) (b B, err error) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if f.Authors != nil && len(f.Authors.Field) > 0 {
|
||||
if f.Authors.Len() > 0 {
|
||||
if first {
|
||||
dst = append(dst, ',')
|
||||
} else {
|
||||
@@ -121,7 +121,7 @@ func (f *T) MarshalJSON(dst B) (b B, err error) {
|
||||
dst = text.JSONKey(dst, Authors)
|
||||
dst = text.MarshalHexArray(dst, f.Authors.ToByteSlice())
|
||||
}
|
||||
if f.Tags != nil && len(f.Tags.T) > 0 {
|
||||
if f.Tags.Len() > 0 {
|
||||
// if first {
|
||||
// dst = append(dst, ',')
|
||||
// } else {
|
||||
@@ -132,22 +132,22 @@ func (f *T) MarshalJSON(dst B) (b B, err error) {
|
||||
//
|
||||
// [["#p","<pubkey1>","<pubkey3"],["#t","hashtag","stuff"]]
|
||||
//
|
||||
for _, tg := range f.Tags.T {
|
||||
for _, tg := range f.Tags.Value() {
|
||||
if tg == nil {
|
||||
// nothing here
|
||||
continue
|
||||
}
|
||||
if len(tg.Field) < 1 || len(tg.Field[0]) != 2 {
|
||||
if tg.Len() < 1 || len(tg.Key()) != 2 {
|
||||
// if there is no values, skip; the "key" field must be 2 characters long,
|
||||
continue
|
||||
}
|
||||
tKey := tg.Field[0]
|
||||
tKey := tg.F()[0]
|
||||
if tKey[0] != '#' &&
|
||||
(tKey[1] < 'a' && tKey[1] > 'z' || tKey[1] < 'A' && tKey[1] > 'Z') {
|
||||
// first "key" field must begin with '#' and second be alpha
|
||||
continue
|
||||
}
|
||||
values := tg.Field[1:]
|
||||
values := tg.F()[1:]
|
||||
if len(values) == 0 {
|
||||
continue
|
||||
}
|
||||
@@ -157,7 +157,7 @@ func (f *T) MarshalJSON(dst B) (b B, err error) {
|
||||
first = true
|
||||
}
|
||||
// append the key
|
||||
dst = append(dst, '"', tg.Field[0][0], tg.Field[0][1], '"', ':')
|
||||
dst = append(dst, '"', tg.B(0)[0], tg.B(0)[1], '"', ':')
|
||||
dst = append(dst, '[')
|
||||
for i, value := range values {
|
||||
_ = i
|
||||
@@ -289,7 +289,8 @@ func (f *T) UnmarshalJSON(b B) (r B, err error) {
|
||||
return
|
||||
}
|
||||
ff = append([]B{k}, ff...)
|
||||
f.Tags.T = append(f.Tags.T, tag.New(ff...))
|
||||
f.Tags = f.Tags.AppendTags(tag.New(ff...))
|
||||
// f.Tags.T = append(f.Tags.T, tag.New(ff...))
|
||||
default:
|
||||
// other types of tags can be anything
|
||||
var ff []B
|
||||
@@ -297,7 +298,8 @@ func (f *T) UnmarshalJSON(b B) (r B, err error) {
|
||||
return
|
||||
}
|
||||
ff = append([]B{k}, ff...)
|
||||
f.Tags.T = append(f.Tags.T, tag.New(ff...))
|
||||
f.Tags = f.Tags.AppendTags(tag.New(ff...))
|
||||
// f.Tags.T = append(f.Tags.T, tag.New(ff...))
|
||||
}
|
||||
state = betweenKV
|
||||
case IDs[0]:
|
||||
@@ -418,26 +420,26 @@ func (f *T) Matches(ev *event.T) bool {
|
||||
// log.T.F("nil event")
|
||||
return false
|
||||
}
|
||||
if f.IDs != nil && len(f.IDs.Field) > 0 && !f.IDs.Contains(ev.ID) {
|
||||
if f.IDs.Len() > 0 && !f.IDs.Contains(ev.ID) {
|
||||
// log.T.F("no ids in filter match event\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
|
||||
return false
|
||||
}
|
||||
if f.Kinds != nil && len(f.Kinds.K) > 0 && !f.Kinds.Contains(ev.Kind) {
|
||||
if f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) {
|
||||
// log.T.F("no matching kinds in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
|
||||
return false
|
||||
}
|
||||
if f.Authors != nil && len(f.Authors.Field) > 0 && !f.Authors.Contains(ev.PubKey) {
|
||||
if f.Authors.Len() > 0 && !f.Authors.Contains(ev.PubKey) {
|
||||
// log.T.F("no matching authors in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
|
||||
return false
|
||||
}
|
||||
if f.Tags != nil && !ev.Tags.Intersects(f.Tags) {
|
||||
if f.Tags.Len() > 0 && !ev.Tags.Intersects(f.Tags) {
|
||||
return false
|
||||
}
|
||||
if f.Since != nil && f.Since.Int() != 0 && ev.CreatedAt != nil && ev.CreatedAt.I64() < f.Since.I64() {
|
||||
if f.Since.Int() != 0 && ev.CreatedAt.I64() < f.Since.I64() {
|
||||
// log.T.F("event is older than since\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
|
||||
return false
|
||||
}
|
||||
if f.Until != nil && f.Until.Int() != 0 && ev.CreatedAt.I64() > f.Until.I64() {
|
||||
if f.Until.Int() != 0 && ev.CreatedAt.I64() > f.Until.I64() {
|
||||
// log.T.F("event is newer than until\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
|
||||
return false
|
||||
}
|
||||
@@ -495,7 +497,7 @@ func Equal(a, b *T) bool {
|
||||
if !a.Kinds.Equals(b.Kinds) ||
|
||||
!a.IDs.Equal(b.IDs) ||
|
||||
!a.Authors.Equal(b.Authors) ||
|
||||
len(a.Tags.T) != len(b.Tags.T) ||
|
||||
a.Tags.Len() != b.Tags.Len() ||
|
||||
!arePointerValuesEqual(a.Since, b.Since) ||
|
||||
!arePointerValuesEqual(a.Until, b.Until) ||
|
||||
!equals(a.Search, b.Search) ||
|
||||
@@ -511,7 +513,8 @@ func GenFilter() (f *T, err error) {
|
||||
for _ = range n {
|
||||
id := make(B, sha256.Size)
|
||||
frand.Read(id)
|
||||
f.IDs.Field = append(f.IDs.Field, id)
|
||||
f.IDs = f.IDs.Append(id)
|
||||
// f.IDs.Field = append(f.IDs.Field, id)
|
||||
}
|
||||
n = frand.Intn(16)
|
||||
for _ = range n {
|
||||
@@ -524,7 +527,8 @@ func GenFilter() (f *T, err error) {
|
||||
return
|
||||
}
|
||||
pk := sk.PubKey()
|
||||
f.Authors.Field = append(f.Authors.Field, schnorr.SerializePubKey(pk))
|
||||
f.Authors = f.Authors.Append(schnorr.SerializePubKey(pk))
|
||||
// f.Authors.Field = append(f.Authors.Field, schnorr.SerializePubKey(pk))
|
||||
}
|
||||
a := frand.Intn(16)
|
||||
if a < n {
|
||||
@@ -532,7 +536,7 @@ func GenFilter() (f *T, err error) {
|
||||
}
|
||||
for i := range n {
|
||||
p := make(B, 0, schnorr.PubKeyBytesLen*2)
|
||||
p = hex.EncAppend(p, f.Authors.Field[i])
|
||||
p = hex.EncAppend(p, f.Authors.B(i))
|
||||
}
|
||||
for b := 'a'; b <= 'z'; b++ {
|
||||
l := frand.Intn(6)
|
||||
@@ -544,7 +548,8 @@ func GenFilter() (f *T, err error) {
|
||||
idb = append(idb, id)
|
||||
}
|
||||
idb = append([]B{{'#', byte(b)}}, idb...)
|
||||
f.Tags.T = append(f.Tags.T, tag.FromBytesSlice(idb...))
|
||||
f.Tags = f.Tags.AppendTags(tag.FromBytesSlice(idb...))
|
||||
// f.Tags.T = append(f.Tags.T, tag.FromBytesSlice(idb...))
|
||||
} else {
|
||||
var idb []B
|
||||
for range l {
|
||||
@@ -555,7 +560,8 @@ func GenFilter() (f *T, err error) {
|
||||
idb = append(idb, id)
|
||||
}
|
||||
idb = append([]B{{'#', byte(b)}}, idb...)
|
||||
f.Tags.T = append(f.Tags.T, tag.FromBytesSlice(idb...))
|
||||
f.Tags = f.Tags.AppendTags(tag.FromBytesSlice(idb...))
|
||||
// f.Tags.T = append(f.Tags.T, tag.FromBytesSlice(idb...))
|
||||
}
|
||||
}
|
||||
tn := int(timestamp.Now().I64())
|
||||
|
||||
21
kind/kind.go
21
kind/kind.go
@@ -15,9 +15,24 @@ type T struct {
|
||||
|
||||
func New[V uint16 | uint32 | int](k V) (ki *T) { return &T{uint16(k)} }
|
||||
|
||||
func (k *T) ToInt() int { return int(k.K) }
|
||||
func (k *T) ToU16() uint16 { return k.K }
|
||||
func (k *T) ToU64() uint64 { return uint64(k.K) }
|
||||
func (k *T) ToInt() int {
|
||||
if k == nil {
|
||||
return 0
|
||||
}
|
||||
return int(k.K)
|
||||
}
|
||||
func (k *T) ToU16() uint16 {
|
||||
if k == nil {
|
||||
return 0
|
||||
}
|
||||
return k.K
|
||||
}
|
||||
func (k *T) ToU64() uint64 {
|
||||
if k == nil {
|
||||
return 0
|
||||
}
|
||||
return uint64(k.K)
|
||||
}
|
||||
func (k *T) Name() string { return GetString(k) }
|
||||
func (k *T) Equal(k2 *T) bool { return *k == *k2 }
|
||||
|
||||
|
||||
@@ -20,7 +20,12 @@ func FromIntSlice(is []int) (k *T) {
|
||||
return
|
||||
}
|
||||
|
||||
func (k *T) Len() (l int) { return len(k.K) }
|
||||
func (k *T) Len() (l int) {
|
||||
if k == nil {
|
||||
return
|
||||
}
|
||||
return len(k.K)
|
||||
}
|
||||
|
||||
func (k *T) Less(i, j int) bool { return k.K[i].K < k.K[j].K }
|
||||
|
||||
|
||||
@@ -189,6 +189,7 @@ func BreakTo80(s string) (out string) {
|
||||
}
|
||||
}
|
||||
out = "\n" + strings.Join(ss, "\n")
|
||||
strings.ReplaceAll(out, "\n\n", "\n")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -328,6 +329,13 @@ func Timestamper() (s string) {
|
||||
|
||||
var wd, _ = os.Getwd()
|
||||
|
||||
func GetNLoc(n int) (output string) {
|
||||
for ; n > 1; n-- {
|
||||
output += fmt.Sprintf("%s\n", GetLoc(n))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetLoc(skip int) (output string) {
|
||||
_, file, line, _ := runtime.Caller(skip)
|
||||
split := strings.Split(file, wd+string(os.PathSeparator))
|
||||
|
||||
@@ -37,7 +37,7 @@ func (r *T) DeleteEvent(c Ctx, eid *eventid.T) (err E) {
|
||||
return
|
||||
}
|
||||
var indexKeys []B
|
||||
ev := &event.T{}
|
||||
ev := event.New()
|
||||
var evKey, evb, counterKey B
|
||||
// fetch the event to get its index keys
|
||||
err = r.View(func(txn *badger.Txn) (err error) {
|
||||
@@ -50,9 +50,13 @@ func (r *T) DeleteEvent(c Ctx, eid *eventid.T) (err E) {
|
||||
if evb, err = it.Item().ValueCopy(evb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if _, err = ev.MarshalJSON(evb); chk.E(err) {
|
||||
log.I.S(evb)
|
||||
var rem B
|
||||
if rem, err = ev.UnmarshalBinary(evb); chk.E(err) {
|
||||
return
|
||||
}
|
||||
_ = rem
|
||||
// log.I.S(rem, ev, seri)
|
||||
indexKeys = GetIndexKeysForEvent(ev, seri)
|
||||
counterKey = GetCounterKey(seri)
|
||||
return
|
||||
|
||||
@@ -49,23 +49,23 @@ func GetIndexKeysForEvent(ev *event.T, ser *serial.T) (keyz [][]byte) {
|
||||
keyz = append(keyz, k)
|
||||
}
|
||||
// ~ by tag value + date
|
||||
for i, t := range ev.Tags.T {
|
||||
for i, t := range ev.Tags.Value() {
|
||||
// there is no value field
|
||||
if len(t.Field) < 2 ||
|
||||
if t.Len() < 2 ||
|
||||
// the tag is not a-zA-Z probably (this would permit arbitrary other
|
||||
// single byte chars)
|
||||
len(t.Field[0]) != 1 ||
|
||||
len(t.F()[0]) != 1 ||
|
||||
// the second field is zero length
|
||||
len(t.Field[1]) == 0 ||
|
||||
len(t.F()[1]) == 0 ||
|
||||
// the second field is more than 100 characters long
|
||||
len(t.Field[1]) > 100 {
|
||||
len(t.F()[1]) > 100 {
|
||||
// any of the above is true then the tag is not indexable
|
||||
continue
|
||||
}
|
||||
var firstIndex int
|
||||
var tt *tag.T
|
||||
for firstIndex, tt = range ev.Tags.T {
|
||||
if len(tt.Field) >= 2 && equals(tt.Field[1], t.Field[1]) {
|
||||
for firstIndex, tt = range ev.Tags.Value() {
|
||||
if tt.Len() >= 2 && equals(tt.B(1), t.B(1)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -76,11 +76,11 @@ func GetIndexKeysForEvent(ev *event.T, ser *serial.T) (keyz [][]byte) {
|
||||
// get key prefix (with full length) and offset where to write the last
|
||||
// parts
|
||||
prf, elems := index.P(0), []keys.Element(nil)
|
||||
if prf, elems, err = GetTagKeyElements(S(t.Field[1]), CA, ser); chk.E(err) {
|
||||
if prf, elems, err = GetTagKeyElements(S(t.F()[1]), CA, ser); chk.E(err) {
|
||||
return
|
||||
}
|
||||
k := prf.Key(elems...)
|
||||
log.T.F("tag '%s': %s key %0x", t.Field[0], t.Field[1:], k)
|
||||
log.T.F("tag '%s': %s key %0x", t.F()[0], t.F()[1:], k)
|
||||
keyz = append(keyz, k)
|
||||
}
|
||||
{ // ~ by date only
|
||||
|
||||
@@ -43,9 +43,9 @@ func PrepareQueries(f *filter.T) (
|
||||
}
|
||||
switch {
|
||||
// first if there is IDs, just search for them, this overrides all other filters
|
||||
case len(f.IDs.Field) > 0:
|
||||
case f.IDs.Len() > 0:
|
||||
qs = make([]query, f.IDs.Len())
|
||||
for i, idHex := range f.IDs.Field {
|
||||
for i, idHex := range f.IDs.F() {
|
||||
ih := id.New(eventid.NewWith(B(idHex)))
|
||||
if ih == nil {
|
||||
log.E.F("failed to decode event ID: %s", idHex)
|
||||
@@ -67,7 +67,7 @@ func PrepareQueries(f *filter.T) (
|
||||
// if there is no kinds, we just make the queries based on the author pub keys
|
||||
if f.Kinds.Len() == 0 {
|
||||
qs = make([]query, f.Authors.Len())
|
||||
for i, pubkeyHex := range f.Authors.Field {
|
||||
for i, pubkeyHex := range f.Authors.F() {
|
||||
var pk *pubkey.T
|
||||
if pk, err = pubkey.New(pubkeyHex); chk.E(err) {
|
||||
// bogus filter, continue anyway
|
||||
@@ -87,7 +87,7 @@ func PrepareQueries(f *filter.T) (
|
||||
qs = make([]query, f.Authors.Len()*f.Kinds.Len())
|
||||
i := 0
|
||||
authors:
|
||||
for _, pubkeyHex := range f.Authors.Field {
|
||||
for _, pubkeyHex := range f.Authors.F() {
|
||||
for _, kind := range f.Kinds.K {
|
||||
var pk *pubkey.T
|
||||
if pk, err = pubkey.New(pubkeyHex); chk.E(err) {
|
||||
@@ -103,14 +103,14 @@ func PrepareQueries(f *filter.T) (
|
||||
}
|
||||
// log.T.S("authors/kinds", qs)
|
||||
}
|
||||
if f.Tags != nil && f.Tags.T != nil || f.Tags.Len() > 0 {
|
||||
if f.Tags.Len() > 0 {
|
||||
ext = &filter.T{Tags: f.Tags}
|
||||
// log.T.S("extra filter", ext)
|
||||
}
|
||||
case f.Tags.Len() > 0:
|
||||
// determine the size of the queries array by inspecting all tags sizes
|
||||
size := 0
|
||||
for _, values := range f.Tags.T {
|
||||
for _, values := range f.Tags.Value() {
|
||||
size += values.Len() - 1
|
||||
}
|
||||
if size == 0 {
|
||||
@@ -121,10 +121,10 @@ func PrepareQueries(f *filter.T) (
|
||||
// and any kinds mentioned as well in extra filter
|
||||
ext = &filter.T{Kinds: f.Kinds}
|
||||
i := 0
|
||||
log.T.S(f.Tags.T)
|
||||
for _, values := range f.Tags.T {
|
||||
log.T.S(values.Field)
|
||||
for _, value := range values.Field[1:] {
|
||||
log.T.S(f.Tags.Value())
|
||||
for _, values := range f.Tags.Value() {
|
||||
log.T.S(values.F())
|
||||
for _, value := range values.F()[1:] {
|
||||
// get key prefix (with full length) and offset where to write the last parts
|
||||
var prf []byte
|
||||
if prf, err = GetTagKeyPrefix(S(value)); chk.E(err) {
|
||||
|
||||
@@ -12,13 +12,14 @@ import (
|
||||
)
|
||||
|
||||
func (r *T) QueryEvents(c Ctx, f *filter.T) (evs []*event.T, err E) {
|
||||
log.T.F("query for events\n%s", f)
|
||||
log.I.F("query for events\n%s", f)
|
||||
var queries []query
|
||||
var extraFilter *filter.T
|
||||
var since uint64
|
||||
if queries, extraFilter, since, err = PrepareQueries(f); chk.E(err) {
|
||||
return
|
||||
}
|
||||
log.I.S(queries, extraFilter)
|
||||
// search for the keys generated from the filter
|
||||
var eventKeys [][]byte
|
||||
for _, q := range queries {
|
||||
@@ -99,6 +100,7 @@ func (r *T) QueryEvents(c Ctx, f *filter.T) (evs []*event.T, err E) {
|
||||
if len(rem) > 0 {
|
||||
log.T.S(rem)
|
||||
}
|
||||
log.I.S(ev)
|
||||
// check if this matches the other filters that were not part of the index.
|
||||
if extraFilter == nil || extraFilter.Matches(ev) {
|
||||
// check if this event is replaced by one we already have in the result.
|
||||
|
||||
@@ -101,6 +101,7 @@ func (s *Server) handleMessage(c Ctx, ws *WebSocket, msg B, store store.I) {
|
||||
case countenvelope.L:
|
||||
notice = s.doCount(c, ws, rem, store)
|
||||
case reqenvelope.L:
|
||||
log.I.F("%s", rem)
|
||||
notice = s.doReq(c, ws, rem, store)
|
||||
case closeenvelope.L:
|
||||
notice = s.doClose(c, ws, rem, store)
|
||||
@@ -180,8 +181,8 @@ func (s *Server) doEvent(c Ctx, ws *WebSocket, req B, sto store.I) (msg B) {
|
||||
|
||||
if env.Kind.K == kind.Deletion.K {
|
||||
// event deletion -- nip09
|
||||
for _, t := range env.Tags.T {
|
||||
if len(t.Field) >= 2 && equals(t.Key(), B("e")) {
|
||||
for _, t := range env.Tags.Value() {
|
||||
if t.Len() >= 2 && equals(t.Key(), B("e")) {
|
||||
ctx, cancel := context.WithTimeout(c, time.Millisecond*200)
|
||||
defer cancel()
|
||||
|
||||
@@ -254,7 +255,7 @@ func (s *Server) doEvent(c Ctx, ws *WebSocket, req B, sto store.I) (msg B) {
|
||||
}
|
||||
|
||||
ok, reason := AddEvent(c, s.relay, env.T)
|
||||
if err = okenvelope.NewFrom(env.ID, true, reason).Write(ws); chk.E(err) {
|
||||
if err = okenvelope.NewFrom(env.ID, ok, reason).Write(ws); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// ws.WriteJSON(nostr.OKEnvelope{EventID: evt.ID, OK: ok, Reason: reason})
|
||||
@@ -309,11 +310,11 @@ func (s *Server) doCount(c context.Context, ws *WebSocket, req B,
|
||||
" does your client implement NIP-42?")
|
||||
case senders.Len() == 1 &&
|
||||
receivers.Len() < 2 &&
|
||||
equals(senders.Field[0], ws.authed):
|
||||
equals(senders.F()[0], ws.authed):
|
||||
// allowed filter: ws.authed is sole sender (filter specifies one or all receivers)
|
||||
case receivers.Len() == 1 &&
|
||||
senders.Len() < 2 &&
|
||||
equals(receivers.T[0].Value(), ws.authed):
|
||||
equals(receivers.N(0).Value(), ws.authed):
|
||||
// allowed filter: ws.authed is sole receiver (filter specifies one or all senders)
|
||||
default:
|
||||
// restricted filter: do not return any events,
|
||||
@@ -384,11 +385,11 @@ func (s *Server) doReq(c Ctx, ws *WebSocket, req B, sto store.I) (r B) {
|
||||
" does your client implement NIP-42?")
|
||||
case senders.Len() == 1 &&
|
||||
receivers.Len() < 2 &&
|
||||
equals(senders.Field[0], ws.authed):
|
||||
equals(senders.Key(), ws.authed):
|
||||
// allowed filter: ws.authed is sole sender (filter specifies one or all receivers)
|
||||
case receivers.Len() == 1 &&
|
||||
senders.Len() < 2 &&
|
||||
equals(receivers.T[0].Value(), ws.authed):
|
||||
equals(receivers.N(0).Value(), ws.authed):
|
||||
// allowed filter: ws.authed is sole receiver (filter specifies one or all senders)
|
||||
default:
|
||||
// restricted filter: do not return any events,
|
||||
|
||||
@@ -7,14 +7,18 @@ import (
|
||||
|
||||
"realy.lol/event"
|
||||
"realy.lol/filter"
|
||||
"realy.lol/kinds"
|
||||
"realy.lol/normalize"
|
||||
"realy.lol/tag"
|
||||
"realy.lol/tags"
|
||||
"realy.lol/ws"
|
||||
)
|
||||
|
||||
// RelayInterface is a wrapper thing that unifies Store and Relay under a// common API.
|
||||
// RelayInterface is a wrapper thing that unifies Store and Relay under a common
|
||||
// API.
|
||||
type RelayInterface interface {
|
||||
Publish(c Ctx, evt *event.T) E
|
||||
QuerySync(c Ctx, f *filter.T,
|
||||
opts ...ws.SubscriptionOption) ([]*event.T, E)
|
||||
QuerySync(c Ctx, f *filter.T, opts ...ws.SubscriptionOption) ([]*event.T, E)
|
||||
}
|
||||
|
||||
type RelayWrapper struct {
|
||||
@@ -24,44 +28,58 @@ type RelayWrapper struct {
|
||||
var _ RelayInterface = (*RelayWrapper)(nil)
|
||||
|
||||
func (w RelayWrapper) Publish(c Ctx, evt *event.T) (err E) {
|
||||
// var ch event.C
|
||||
// defer close(ch)
|
||||
if evt.Kind.IsEphemeral() {
|
||||
// do not store ephemeral events
|
||||
return nil
|
||||
// todo: rewrite to fit new API
|
||||
// } else if evt.Kind.IsReplaceable() {
|
||||
// // replaceable event, delete before storing
|
||||
// ch, err = w.Store.QueryEvents(c, &filter.T{
|
||||
// Authors: []string{evt.PubKey},
|
||||
// Kinds: kinds.T{evt.Kind},
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to query before replacing: %w", err)
|
||||
// }
|
||||
// if previous := <-ch; previous != nil && isOlder(previous, evt) {
|
||||
// if err = w.Store.DeleteEvent(c, previous); err != nil {
|
||||
// return fmt.Errorf("failed to delete event for replacing: %w", err)
|
||||
// }
|
||||
// }
|
||||
// } else if evt.Kind.IsParameterizedReplaceable() {
|
||||
} else if evt.Kind.IsReplaceable() {
|
||||
// replaceable event, delete before storing
|
||||
var evs []*event.T
|
||||
f := filter.New()
|
||||
f.Authors = tag.New(evt.PubKey)
|
||||
f.Kinds = kinds.New(evt.Kind)
|
||||
evs, err = w.I.QueryEvents(c, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query before replacing: %w", err)
|
||||
}
|
||||
if len(evs) > 0 {
|
||||
for _, ev := range evs {
|
||||
if ev.CreatedAt.Int() > evt.CreatedAt.Int() {
|
||||
return errorf.W(S(normalize.Invalid.F("not replacing newer event")))
|
||||
}
|
||||
log.I.F("%s\nreplacing\n%s", evt.Serialize(), ev.Serialize())
|
||||
if err = w.I.DeleteEvent(c, ev.EventID()); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if evt.Kind.IsParameterizedReplaceable() {
|
||||
log.I.F("parameterized replaceable %s", evt.Serialize())
|
||||
// parameterized replaceable event, delete before storing
|
||||
// d := evt.Tags.GetFirst([]string{"d", ""})
|
||||
// if d != nil {
|
||||
// ch, err = w.Store.QueryEvents(c, &filter.T{
|
||||
// Authors: []string{evt.PubKey},
|
||||
// Kinds: kinds.T{evt.Kind},
|
||||
// Tags: filter.TagMap{"d": []string{d.Value()}},
|
||||
// })
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("failed to query before parameterized replacing: %w", err)
|
||||
// }
|
||||
// if previous := <-ch; previous != nil && isOlder(previous, evt) {
|
||||
// if err = w.Store.DeleteEvent(c, previous); chk.D(err) {
|
||||
// return fmt.Errorf("failed to delete event for parameterized replacing: %w", err)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
var evs []*event.T
|
||||
f := filter.New()
|
||||
f.Authors = tag.New(evt.PubKey)
|
||||
f.Kinds = kinds.New(evt.Kind)
|
||||
d := evt.Tags.GetFirst(tag.New("d", ""))
|
||||
f.Tags = tags.New(tag.New(d.Key(), d.Value()))
|
||||
log.I.F("filter for parameterized replaceable %s", f.Serialize())
|
||||
evs, err = w.I.QueryEvents(c, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query before replacing: %w", err)
|
||||
}
|
||||
|
||||
if len(evs) > 0 {
|
||||
for _, ev := range evs {
|
||||
log.I.F("maybe replace %s", ev.Serialize())
|
||||
if ev.CreatedAt.Int() > evt.CreatedAt.Int() {
|
||||
return errorf.W(S(normalize.Invalid.F("not replacing newer event")))
|
||||
}
|
||||
log.I.F("%s\nreplacing\n%s", evt.Serialize(), ev.Serialize())
|
||||
if err = w.I.DeleteEvent(c, ev.EventID()); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err = w.SaveEvent(c, evt); chk.E(err) && !errors.Is(err, ErrDupEvent) {
|
||||
return errorf.E("failed to save: %w", err)
|
||||
|
||||
@@ -22,14 +22,16 @@ func GetAddrTagElements(tagValue S) (k uint16, pkb B, d S) {
|
||||
}
|
||||
|
||||
func TagSorter(a, b tag.T) int {
|
||||
if len(a.Field) < 2 {
|
||||
if len(b.Field) < 2 {
|
||||
if a.Len() < 2 {
|
||||
if b.Len() < 2 {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if len(b.Field) < 2 {
|
||||
if b.Len() < 2 {
|
||||
return 1
|
||||
}
|
||||
return bytes.Compare(a.Field[1], b.Field[1])
|
||||
return bytes.Compare(a.B(1), b.B(1))
|
||||
}
|
||||
|
||||
func Less(a, b tag.T) bool { return TagSorter(a, b) < 0 }
|
||||
|
||||
127
tag/tag.go
127
tag/tag.go
@@ -27,15 +27,51 @@ type BS[Z B | S] B
|
||||
//
|
||||
// Not a set, there can be repeating elements.
|
||||
type T struct {
|
||||
Field []BS[B]
|
||||
field []BS[B]
|
||||
}
|
||||
|
||||
func (t *T) Len() int { return len(t.Field) }
|
||||
func (t *T) S(i int) (s S) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
if t.Len() <= i {
|
||||
return
|
||||
}
|
||||
return S(t.field[i])
|
||||
}
|
||||
|
||||
func (t *T) B(i int) (b B) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
if t.Len() <= i {
|
||||
return
|
||||
}
|
||||
return B(t.field[i])
|
||||
}
|
||||
|
||||
func (t *T) F() (b []B) {
|
||||
if t == nil {
|
||||
return []B{}
|
||||
}
|
||||
b = make([]B, t.Len())
|
||||
for i := range t.field {
|
||||
b[i] = t.B(i)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *T) Len() int {
|
||||
if t == nil {
|
||||
return 0
|
||||
}
|
||||
return len(t.field)
|
||||
}
|
||||
|
||||
func (t *T) Less(i, j int) bool {
|
||||
var cursor N
|
||||
for len(t.Field[i]) < cursor-1 && len(t.Field[j]) < cursor-1 {
|
||||
if bytes.Compare(t.Field[i], t.Field[j]) < 0 {
|
||||
for len(t.field[i]) < cursor-1 && len(t.field[j]) < cursor-1 {
|
||||
if bytes.Compare(t.field[i], t.field[j]) < 0 {
|
||||
return true
|
||||
}
|
||||
cursor++
|
||||
@@ -43,54 +79,63 @@ func (t *T) Less(i, j int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *T) Swap(i, j int) { t.Field[i], t.Field[j] = t.Field[j], t.Field[i] }
|
||||
func (t *T) Swap(i, j int) { t.field[i], t.field[j] = t.field[j], t.field[i] }
|
||||
|
||||
func NewWithCap(c int) *T { return &T{make([]BS[B], 0, c)} }
|
||||
|
||||
func New[V S | B](fields ...V) (t *T) {
|
||||
t = &T{Field: make([]BS[B], len(fields))}
|
||||
t = &T{field: make([]BS[B], len(fields))}
|
||||
for i, field := range fields {
|
||||
t.Field[i] = B(field)
|
||||
t.field[i] = B(field)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func FromBytesSlice(fields ...B) (t *T) {
|
||||
t = &T{Field: make([]BS[B], len(fields))}
|
||||
t = &T{field: make([]BS[B], len(fields))}
|
||||
for i, field := range fields {
|
||||
t.Field[i] = field
|
||||
t.field[i] = field
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Clone makes a new tag.T with the same members.
|
||||
func (t *T) Clone() (c *T) {
|
||||
c = &T{Field: make([]BS[B], 0, len(t.Field))}
|
||||
for _, f := range t.Field {
|
||||
c = &T{field: make([]BS[B], 0, len(t.field))}
|
||||
for _, f := range t.field {
|
||||
l := len(f)
|
||||
b := make([]byte, l)
|
||||
copy(b, f)
|
||||
c.Field = append(c.Field, b)
|
||||
c.field = append(c.field, b)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *T) Append(b B) { t.Field = append(t.Field, b) }
|
||||
func (t *T) Cap() int { return cap(t.Field) }
|
||||
func (t *T) Clear() { t.Field = t.Field[:0] }
|
||||
func (t *T) Slice(start, end int) *T { return &T{t.Field[start:end]} }
|
||||
func (t *T) Append(b ...B) (tt *T) {
|
||||
if t == nil {
|
||||
// we are propagating back this to tt if t was nil, else it appends
|
||||
t = &T{make([]BS[B], 0, len(t.field))}
|
||||
}
|
||||
for _, bb := range b {
|
||||
t.field = append(t.field, bb)
|
||||
}
|
||||
return t
|
||||
}
|
||||
func (t *T) Cap() int { return cap(t.field) }
|
||||
func (t *T) Clear() { t.field = t.field[:0] }
|
||||
func (t *T) Slice(start, end int) *T { return &T{t.field[start:end]} }
|
||||
|
||||
func (t *T) ToByteSlice() (b []B) {
|
||||
for i := range t.Field {
|
||||
b = append(b, t.Field[i])
|
||||
for i := range t.field {
|
||||
b = append(b, t.field[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *T) ToStringSlice() (b []S) {
|
||||
b = make([]S, 0, len(t.Field))
|
||||
for i := range t.Field {
|
||||
b = append(b, S(t.Field[i]))
|
||||
b = make([]S, 0, len(t.field))
|
||||
for i := range t.field {
|
||||
b = append(b, S(t.field[i]))
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -100,19 +145,19 @@ func (t *T) ToStringSlice() (b []S) {
|
||||
// The last element is treated specially in that it is considered to match if
|
||||
// the candidate has the same initial substring as its corresponding element.
|
||||
func (t *T) StartsWith(prefix *T) bool {
|
||||
prefixLen := len(prefix.Field)
|
||||
prefixLen := len(prefix.field)
|
||||
|
||||
if prefixLen > len(t.Field) {
|
||||
if prefixLen > len(t.field) {
|
||||
return false
|
||||
}
|
||||
// check initial elements for equality
|
||||
for i := 0; i < prefixLen-1; i++ {
|
||||
if !equals(prefix.Field[i], t.Field[i]) {
|
||||
if !equals(prefix.field[i], t.field[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// check last element just for a prefix
|
||||
return bytes.HasPrefix(t.Field[prefixLen-1], prefix.Field[prefixLen-1])
|
||||
return bytes.HasPrefix(t.field[prefixLen-1], prefix.field[prefixLen-1])
|
||||
}
|
||||
|
||||
// Key returns the first element of the tags.
|
||||
@@ -120,8 +165,8 @@ func (t *T) Key() B {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
if len(t.Field) > Key {
|
||||
return t.Field[Key]
|
||||
if t.Len() > Key {
|
||||
return t.field[Key]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -131,8 +176,8 @@ func (t *T) FilterKey() B {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
if len(t.Field) > Key {
|
||||
return t.Field[Key][1:]
|
||||
if len(t.field) > Key {
|
||||
return t.field[Key][1:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -142,8 +187,8 @@ func (t *T) Value() B {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
if len(t.Field) > Value {
|
||||
return t.Field[Value]
|
||||
if len(t.field) > Value {
|
||||
return t.field[Value]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -157,9 +202,9 @@ func (t *T) Relay() (s B) {
|
||||
}
|
||||
if (equals(t.Key(), etag) ||
|
||||
equals(t.Key(), ptag)) &&
|
||||
len(t.Field) >= Relay {
|
||||
len(t.field) >= Relay {
|
||||
|
||||
return normalize.URL(B(t.Field[Relay]))
|
||||
return normalize.URL(B(t.field[Relay]))
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -167,7 +212,7 @@ func (t *T) Relay() (s B) {
|
||||
// MarshalJSON appends the JSON form to the passed bytes.
|
||||
func (t *T) MarshalJSON(dst B) (b B, err error) {
|
||||
dst = append(dst, '[')
|
||||
for i, s := range t.Field {
|
||||
for i, s := range t.field {
|
||||
if i > 0 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
@@ -196,14 +241,14 @@ func (t *T) UnmarshalJSON(b B) (r B, err error) {
|
||||
i++
|
||||
} else if b[i] == '"' {
|
||||
inQuotes = false
|
||||
t.Field = append(t.Field, text.NostrUnescape(b[quoteStart:i]))
|
||||
t.field = append(t.field, text.NostrUnescape(b[quoteStart:i]))
|
||||
}
|
||||
}
|
||||
if !openedBracket || inQuotes {
|
||||
log.I.F("\n%v\n%s", t, r)
|
||||
return nil, errorf.E("tag: failed to parse tag")
|
||||
}
|
||||
log.I.S(t.Field)
|
||||
log.I.S(t.field)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -214,8 +259,8 @@ func (t *T) UnmarshalJSON(b B) (r B, err error) {
|
||||
|
||||
// Contains returns true if the provided element is found in the tag slice.
|
||||
func (t *T) Contains(s B) bool {
|
||||
for i := range t.Field {
|
||||
if equals(t.Field[i], s) {
|
||||
for i := range t.field {
|
||||
if equals(t.field[i], s) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -224,11 +269,11 @@ func (t *T) Contains(s B) bool {
|
||||
|
||||
// Equal checks that the provided tag list matches.
|
||||
func (t *T) Equal(ta *T) bool {
|
||||
if len(t.Field) != len(ta.Field) {
|
||||
if len(t.field) != len(ta.field) {
|
||||
return false
|
||||
}
|
||||
for i := range t.Field {
|
||||
if !equals(t.Field[i], ta.Field[i]) {
|
||||
for i := range t.field {
|
||||
if !equals(t.field[i], ta.field[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestMarshalJSONUnmarshalJSON(t *testing.T) {
|
||||
for _ = range n {
|
||||
b1 := make(B, frand.Intn(8))
|
||||
_, _ = frand.Read(b1)
|
||||
tg.Field = append(tg.Field, b1)
|
||||
tg.field = append(tg.field, b1)
|
||||
}
|
||||
// log.I.S(tg)
|
||||
b, _ = tg.MarshalJSON(b)
|
||||
@@ -71,7 +71,7 @@ func BenchmarkMarshalJSONUnmarshalJSON(bb *testing.B) {
|
||||
for _ = range n {
|
||||
b1 := make(B, 128)
|
||||
_, _ = frand.Read(b1)
|
||||
tg.Field = append(tg.Field, b1)
|
||||
tg.field = append(tg.field, b1)
|
||||
}
|
||||
bb.Run("tag.MarshalJSON", func(bb *testing.B) {
|
||||
bb.ReportAllocs()
|
||||
@@ -100,7 +100,7 @@ func TestT_Clone_Equal(t *testing.T) {
|
||||
for _ = range n {
|
||||
f := make(B, frand.Intn(128)+2)
|
||||
_, _ = frand.Read(f)
|
||||
t1.Field = append(t1.Field, f)
|
||||
t1.field = append(t1.field, f)
|
||||
}
|
||||
t2 := t1.Clone()
|
||||
if !t1.Equal(t2) {
|
||||
|
||||
172
tags/tags.go
172
tags/tags.go
@@ -4,38 +4,113 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"realy.lol/lol"
|
||||
"realy.lol/tag"
|
||||
)
|
||||
|
||||
// T is a list of T - which are lists of string elements with ordering and no
|
||||
// uniqueness constraint (not a set).
|
||||
type T struct {
|
||||
T []*tag.T
|
||||
t []*tag.T
|
||||
}
|
||||
|
||||
func New(fields ...*tag.T) (t *T) {
|
||||
// t = &T{T: make([]*tag.T, 0, len(fields))}
|
||||
t = &T{}
|
||||
for _, field := range fields {
|
||||
t.T = append(t.T, field)
|
||||
t.t = append(t.t, field)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func NewWithCap(c int) (t *T) {
|
||||
return &T{t: make([]*tag.T, 0, c)}
|
||||
}
|
||||
|
||||
func (t *T) F() (tt []*tag.T) {
|
||||
if t == nil {
|
||||
return []*tag.T{tag.New(B{})}
|
||||
}
|
||||
return t.t
|
||||
}
|
||||
|
||||
func (t *T) N(i int) (tt *tag.T) {
|
||||
if t == nil {
|
||||
return tag.NewWithCap(0)
|
||||
}
|
||||
if len(t.t) <= i {
|
||||
return tag.New(B{})
|
||||
}
|
||||
return t.t[i]
|
||||
}
|
||||
|
||||
func (t *T) AppendTo(n int, b ...B) (tt *T) {
|
||||
if t == nil {
|
||||
log.E.S(t, b)
|
||||
return
|
||||
}
|
||||
if t.Len() < n+1 {
|
||||
log.E.F("cannot append to nonexistent tags field %d with tags len %d",
|
||||
n, t.Len())
|
||||
fmt.Fprint(os.Stderr, lol.GetNLoc(7))
|
||||
return
|
||||
}
|
||||
for _, bb := range b {
|
||||
t.N(n).Append(bb)
|
||||
// t.T[n].Field = append(t.T[n].Field, bb)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *T) AddCap(i, c int) (tt *T) {
|
||||
if t == nil {
|
||||
log.E.F("cannot add capacity to index %d of nil tags", i)
|
||||
fmt.Fprint(os.Stderr, lol.GetNLoc(7))
|
||||
return t
|
||||
}
|
||||
if len(t.t) == 0 && i == 0 {
|
||||
t.t = append(t.t, tag.NewWithCap(c))
|
||||
}
|
||||
if len(t.t) == 1 && i == 1 {
|
||||
t.t = append(t.t, tag.NewWithCap(c))
|
||||
}
|
||||
if len(t.t) == 2 && i == 2 {
|
||||
t.t = append(t.t, tag.NewWithCap(c))
|
||||
}
|
||||
if len(t.t) <= i {
|
||||
log.I.Ln("len", t.Len(), "i", i)
|
||||
log.E.F("cannot add capacity to nonexistent tag field of tags %d of len %d",
|
||||
i, t.Len())
|
||||
fmt.Fprint(os.Stderr, lol.GetNLoc(7))
|
||||
return t
|
||||
}
|
||||
t.t[i] = tag.NewWithCap(c)
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *T) Value() (tt []*tag.T) {
|
||||
if t == nil {
|
||||
return []*tag.T{}
|
||||
}
|
||||
return t.t
|
||||
}
|
||||
|
||||
func (t *T) ToStringSlice() (b [][]S) {
|
||||
b = make([][]S, 0, len(t.T))
|
||||
for i := range t.T {
|
||||
b = append(b, t.T[i].ToStringSlice())
|
||||
b = make([][]S, 0, len(t.t))
|
||||
for i := range t.t {
|
||||
b = append(b, t.t[i].ToStringSlice())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *T) Clone() (c *T) {
|
||||
c = &T{T: make([]*tag.T, len(t.T))}
|
||||
for i, field := range t.T {
|
||||
c.T[i] = field.Clone()
|
||||
c = &T{t: make([]*tag.T, len(t.t))}
|
||||
for i, field := range t.t {
|
||||
c.t[i] = field.Clone()
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -46,8 +121,8 @@ func (t *T) Equal(ta *T) bool {
|
||||
sort.Sort(t1)
|
||||
t2 := ta.Clone()
|
||||
sort.Sort(t2)
|
||||
for i := range t.T {
|
||||
if !t1.T[i].Equal(t2.T[i]) {
|
||||
for i := range t.t {
|
||||
if !t1.t[i].Equal(t2.t[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -56,33 +131,36 @@ func (t *T) Equal(ta *T) bool {
|
||||
|
||||
// Less returns which tag's first element is first lexicographically
|
||||
func (t *T) Less(i, j int) (less bool) {
|
||||
a, b := t.T[i], t.T[j]
|
||||
if len(a.Field) < 1 && len(b.Field) < 1 {
|
||||
a, b := t.t[i], t.t[j]
|
||||
if a.Len() < 1 && b.Len() < 1 {
|
||||
return false // they are equal
|
||||
}
|
||||
if len(a.Field) < 1 || len(b.Field) < 1 {
|
||||
return len(a.Field) < len(b.Field)
|
||||
if a.Len() < 1 || b.Len() < 1 {
|
||||
return a.Len() < b.Len()
|
||||
}
|
||||
if bytes.Compare(a.Field[0], b.Field[0]) < 0 {
|
||||
if bytes.Compare(a.Key(), b.Key()) < 0 {
|
||||
return true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *T) Swap(i, j int) {
|
||||
t.T[i], t.T[j] = t.T[j], t.T[i]
|
||||
t.t[i], t.t[j] = t.t[j], t.t[i]
|
||||
}
|
||||
|
||||
func (t *T) Len() (l int) {
|
||||
if t.T != nil {
|
||||
return len(t.T)
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
if t.t != nil {
|
||||
return len(t.t)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetFirst gets the first tag in tags that matches the prefix, see [T.StartsWith]
|
||||
func (t *T) GetFirst(tagPrefix *tag.T) *tag.T {
|
||||
for _, v := range t.T {
|
||||
for _, v := range t.t {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
return v
|
||||
}
|
||||
@@ -92,8 +170,8 @@ func (t *T) GetFirst(tagPrefix *tag.T) *tag.T {
|
||||
|
||||
// GetLast gets the last tag in tags that matches the prefix, see [T.StartsWith]
|
||||
func (t *T) GetLast(tagPrefix *tag.T) *tag.T {
|
||||
for i := len(t.T) - 1; i >= 0; i-- {
|
||||
v := t.T[i]
|
||||
for i := len(t.t) - 1; i >= 0; i-- {
|
||||
v := t.t[i]
|
||||
if v.StartsWith(tagPrefix) {
|
||||
return v
|
||||
}
|
||||
@@ -103,10 +181,10 @@ func (t *T) GetLast(tagPrefix *tag.T) *tag.T {
|
||||
|
||||
// GetAll gets all the tags that match the prefix, see [T.StartsWith]
|
||||
func (t *T) GetAll(tagPrefix *tag.T) *T {
|
||||
result := &T{T: make([]*tag.T, 0, len(t.T))}
|
||||
for _, v := range t.T {
|
||||
result := &T{t: make([]*tag.T, 0, len(t.t))}
|
||||
for _, v := range t.t {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
result.T = append(result.T, v)
|
||||
result.t = append(result.t, v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
@@ -114,10 +192,10 @@ func (t *T) GetAll(tagPrefix *tag.T) *T {
|
||||
|
||||
// FilterOut removes all tags that match the prefix, see [T.StartsWith]
|
||||
func (t *T) FilterOut(tagPrefix []B) *T {
|
||||
filtered := &T{T: make([]*tag.T, 0, len(t.T))}
|
||||
for _, v := range t.T {
|
||||
filtered := &T{t: make([]*tag.T, 0, len(t.t))}
|
||||
for _, v := range t.t {
|
||||
if !v.StartsWith(tag.New(tagPrefix...)) {
|
||||
filtered.T = append(filtered.T, v)
|
||||
filtered.t = append(filtered.t, v)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
@@ -132,17 +210,29 @@ func (t *T) AppendUnique(tag *tag.T) *T {
|
||||
n = 2
|
||||
}
|
||||
if t.GetFirst(tag.Slice(0, n)) == nil {
|
||||
return &T{append(t.T, tag)}
|
||||
return &T{append(t.t, tag)}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *T) Append(ttt ...*T) {
|
||||
for _, tt := range ttt {
|
||||
for _, v := range tt.T {
|
||||
t.T = append(t.T, v)
|
||||
func (t *T) Append(ttt ...*T) (tt *T) {
|
||||
if t == nil {
|
||||
t = NewWithCap(len(ttt))
|
||||
}
|
||||
for _, tf := range ttt {
|
||||
for _, v := range tf.t {
|
||||
t.t = append(t.t, v)
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t *T) AppendTags(ttt ...*tag.T) (tt *T) {
|
||||
if t == nil {
|
||||
t = NewWithCap(len(ttt))
|
||||
}
|
||||
t.t = append(t.t, ttt...)
|
||||
return t
|
||||
}
|
||||
|
||||
// Scan parses a string or raw bytes that should be a string and embeds the values into the tags variable from which
|
||||
@@ -173,13 +263,13 @@ func (t *T) Intersects(f *T) (has bool) {
|
||||
// that's not the same as an intersection).
|
||||
return
|
||||
}
|
||||
matches := len(f.T)
|
||||
for _, v := range f.T {
|
||||
for _, w := range t.T {
|
||||
matches := len(f.t)
|
||||
for _, v := range f.t {
|
||||
for _, w := range t.t {
|
||||
if equals(v.FilterKey(), w.Key()) {
|
||||
// we have a matching tag key, and both have a first field, check if tag has any
|
||||
// of the subsequent values in the filter tag.
|
||||
for _, val := range v.Field[1:] {
|
||||
for _, val := range v.F()[1:] {
|
||||
if equals(val, w.Value()) {
|
||||
matches--
|
||||
}
|
||||
@@ -221,7 +311,7 @@ func (t *T) Intersects(f *T) (has bool) {
|
||||
// MarshalTo appends the JSON encoded byte of T as [][]string to dst. String escaping is as described in RFC8259.
|
||||
func (t *T) MarshalTo(dst B) []byte {
|
||||
dst = append(dst, '[')
|
||||
for i, tt := range t.T {
|
||||
for i, tt := range t.t {
|
||||
if i > 0 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
@@ -248,14 +338,14 @@ func (t *T) MarshalTo(dst B) []byte {
|
||||
func (t *T) MarshalJSON(dst B) (b B, err error) {
|
||||
b = dst
|
||||
b = append(b, '[')
|
||||
if t == nil || t.T == nil {
|
||||
if t == nil || t.t == nil {
|
||||
b = append(b, ']')
|
||||
return
|
||||
}
|
||||
if len(t.T) == 0 {
|
||||
if len(t.t) == 0 {
|
||||
b = append(b, '[', ']')
|
||||
}
|
||||
for i, s := range t.T {
|
||||
for i, s := range t.t {
|
||||
if i > 0 {
|
||||
b = append(b, ',')
|
||||
}
|
||||
@@ -290,7 +380,7 @@ func (t *T) UnmarshalJSON(b B) (r B, err error) {
|
||||
if r, err = tt.UnmarshalJSON(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
t.T = append(t.T, tt)
|
||||
t.t = append(t.t, tt)
|
||||
case ',':
|
||||
r = r[1:]
|
||||
// next
|
||||
|
||||
@@ -19,9 +19,9 @@ func TestMarshalUnmarshal(t *testing.T) {
|
||||
for _ = range n1 {
|
||||
b1 := make(B, frand.Intn(40)+2)
|
||||
_, _ = frand.Read(b1)
|
||||
tg.Field = append(tg.Field, b1)
|
||||
tg = tg.Append(b1)
|
||||
}
|
||||
tgs.T = append(tgs.T, tg)
|
||||
tgs.t = append(tgs.t, tg)
|
||||
}
|
||||
b, _ = tgs.MarshalJSON(b)
|
||||
bo := make(B, len(b))
|
||||
@@ -97,9 +97,10 @@ func BenchmarkMarshalJSONUnmarshalJSON(bb *testing.B) {
|
||||
for _ = range n1 {
|
||||
b1 := make(B, frand.Intn(40)+2)
|
||||
_, _ = frand.Read(b1)
|
||||
tg.Field = append(tg.Field, b1)
|
||||
tg = tg.Append(b1)
|
||||
// tg.Field = append(tg.Field, b1)
|
||||
}
|
||||
tgs.T = append(tgs.T, tg)
|
||||
tgs.t = append(tgs.t, tg)
|
||||
}
|
||||
b, _ = tgs.MarshalJSON(b)
|
||||
b = b[:0]
|
||||
@@ -116,9 +117,9 @@ func BenchmarkMarshalJSONUnmarshalJSON(bb *testing.B) {
|
||||
for _ = range n1 {
|
||||
b1 := make(B, frand.Intn(40)+2)
|
||||
_, _ = frand.Read(b1)
|
||||
tg.Field = append(tg.Field, b1)
|
||||
tg = tg.Append(b1)
|
||||
}
|
||||
tgs.T = append(tgs.T, tg)
|
||||
tgs.t = append(tgs.t, tg)
|
||||
}
|
||||
b, _ = tgs.MarshalJSON(b)
|
||||
ta := New()
|
||||
@@ -146,9 +147,9 @@ func TestT_Clone_Equal(t *testing.T) {
|
||||
for _ = range n1 {
|
||||
b1 := make(B, frand.Intn(40)+2)
|
||||
_, _ = frand.Read(b1)
|
||||
tg.Field = append(tg.Field, b1)
|
||||
tg = tg.Append(b1)
|
||||
}
|
||||
t1.T = append(t1.T, tg)
|
||||
t1.t = append(t1.t, tg)
|
||||
}
|
||||
t2 := t1.Clone()
|
||||
if !t1.Equal(t2) {
|
||||
@@ -157,3 +158,44 @@ func TestT_Clone_Equal(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagHelpers(t *testing.T) {
|
||||
tags := New(
|
||||
tag.New("x"),
|
||||
tag.New("p", "abcdef", "wss://x.com"),
|
||||
tag.New("p", "123456", "wss://y.com"),
|
||||
tag.New("e", "eeeeee"),
|
||||
tag.New("e", "ffffff"),
|
||||
)
|
||||
|
||||
if tags.GetFirst(tag.New("x")) == nil {
|
||||
t.Error("failed to get existing prefix")
|
||||
}
|
||||
if tags.GetFirst(tag.New("x", "")) != nil {
|
||||
t.Error("got with wrong prefix")
|
||||
}
|
||||
if tags.GetFirst(tag.New("p", "abcdef", "wss://")) == nil {
|
||||
t.Error("failed to get with existing prefix")
|
||||
}
|
||||
if tags.GetFirst(tag.New("p", "abcdef", "")) == nil {
|
||||
t.Error("failed to get with existing prefix (blank last string)")
|
||||
}
|
||||
if S(tags.GetLast(tag.New("e")).S(1)) != "ffffff" {
|
||||
t.Error("failed to get last")
|
||||
}
|
||||
if tags.GetAll(tag.New("e", "")).Len() != 2 {
|
||||
t.Error("failed to get all")
|
||||
}
|
||||
if tags.AppendUnique(tag.New("e", "ffffff")).Len() != 5 {
|
||||
t.Error("append unique changed the array size when existed")
|
||||
}
|
||||
if tags.AppendUnique(tag.New("e", "bbbbbb")).Len() != 6 {
|
||||
t.Error("append unique failed to append when didn't exist")
|
||||
}
|
||||
if S(tags.AppendUnique(tag.New("e", "eeeeee")).N(4).S(1)) != "ffffff" {
|
||||
t.Error("append unique changed the order")
|
||||
}
|
||||
if S(tags.AppendUnique(tag.New("e", "eeeeee")).N(3).S(1)) != "eeeeee" {
|
||||
t.Error("append unique changed the order")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,18 +25,31 @@ func Now() *T {
|
||||
|
||||
// U64 returns the current UNIX timestamp of the current second as uint64.
|
||||
func (t *T) U64() uint64 {
|
||||
if t == nil {
|
||||
return 0
|
||||
}
|
||||
return uint64(*t)
|
||||
}
|
||||
|
||||
// I64 returns the current UNIX timestamp of the current second as int64.
|
||||
func (t *T) I64() int64 { return int64(*t) }
|
||||
func (t *T) I64() int64 {
|
||||
if t == nil {
|
||||
return 0
|
||||
}
|
||||
return int64(*t)
|
||||
}
|
||||
|
||||
// Time converts a timestamp.Time value into a canonical UNIX 64 bit 1 second
|
||||
// precision timestamp.
|
||||
func (t *T) Time() time.Time { return time.Unix(int64(*t), 0) }
|
||||
|
||||
// Int returns the timestamp as an int.
|
||||
func (t *T) Int() int { return int(*t) }
|
||||
func (t *T) Int() int {
|
||||
if t == nil {
|
||||
return 0
|
||||
}
|
||||
return int(*t)
|
||||
}
|
||||
|
||||
func (t *T) Bytes() (b B) {
|
||||
b = make(B, 8)
|
||||
|
||||
Reference in New Issue
Block a user