implemented event and req
This commit is contained in:
@@ -2,6 +2,7 @@ package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -43,7 +44,8 @@ func New(
|
||||
return
|
||||
}
|
||||
|
||||
// Also ensure the directory exists using apputil.EnsureDir for any potential subdirectories
|
||||
// Also ensure the directory exists using apputil.EnsureDir for any
|
||||
// potential subdirectories
|
||||
dummyFile := filepath.Join(dataDir, "dummy.sst")
|
||||
if err = apputil.EnsureDir(dummyFile); chk.E(err) {
|
||||
return
|
||||
@@ -87,8 +89,8 @@ func New(
|
||||
func (d *D) Path() string { return d.dataDir }
|
||||
|
||||
func (d *D) Wipe() (err error) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
err = errors.New("not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
func (d *D) SetLogLevel(level string) {
|
||||
@@ -98,8 +100,8 @@ func (d *D) SetLogLevel(level string) {
|
||||
func (d *D) EventIdsBySerial(start uint64, count int) (
|
||||
evs []uint64, err error,
|
||||
) {
|
||||
// TODO implement me
|
||||
panic("implement me")
|
||||
err = errors.New("not implemented")
|
||||
return
|
||||
}
|
||||
|
||||
// Init initializes the database with the given path.
|
||||
@@ -118,6 +120,7 @@ func (d *D) Sync() (err error) {
|
||||
|
||||
// Close releases resources and closes the database.
|
||||
func (d *D) Close() (err error) {
|
||||
log.D.F("%s: closing database", d.dataDir)
|
||||
if d.seq != nil {
|
||||
if err = d.seq.Release(); chk.E(err) {
|
||||
return
|
||||
|
||||
@@ -76,20 +76,21 @@ func GetIndexesForEvent(ev *event.E, serial uint64) (
|
||||
}
|
||||
// Process tags for tag-related indexes
|
||||
if ev.Tags != nil && ev.Tags.Len() > 0 {
|
||||
for _, tag := range ev.Tags.ToSliceOfTags() {
|
||||
for _, t := range *ev.Tags {
|
||||
// only index tags with a value field and the key is a single character
|
||||
if tag.Len() >= 2 {
|
||||
if t.Len() >= 2 {
|
||||
// Get the key and value from the tag
|
||||
keyBytes := tag.Key()
|
||||
keyBytes := t.Key()
|
||||
// require single-letter key
|
||||
if len(keyBytes) != 1 {
|
||||
continue
|
||||
}
|
||||
// if the key is not a-zA-Z skip
|
||||
if (keyBytes[0] < 'a' || keyBytes[0] > 'z') && (keyBytes[0] < 'A' || keyBytes[0] > 'Z') {
|
||||
if (keyBytes[0] < 'a' || keyBytes[0] > 'z') &&
|
||||
(keyBytes[0] < 'A' || keyBytes[0] > 'Z') {
|
||||
continue
|
||||
}
|
||||
valueBytes := tag.Value()
|
||||
valueBytes := t.Value()
|
||||
// Create tag key and value
|
||||
key := new(Letter)
|
||||
key.Set(keyBytes[0])
|
||||
|
||||
@@ -82,7 +82,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
// If there is any Ids in the filter, none of the other fields matter. It
|
||||
// should be an error, but convention just ignores it.
|
||||
if f.Ids.Len() > 0 {
|
||||
for _, id := range f.Ids.ToSliceOfBytes() {
|
||||
for _, id := range f.Ids.T {
|
||||
if err = func() (err error) {
|
||||
var i *types2.IdHash
|
||||
if i, err = CreateIdHashFromData(id); chk.E(err) {
|
||||
@@ -123,7 +123,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
|
||||
if f.Tags != nil && f.Tags.Len() > 0 {
|
||||
// sort the tags so they are in iteration order (reverse)
|
||||
tmp := f.Tags.ToSliceOfTags()
|
||||
tmp := *f.Tags
|
||||
sort.Slice(
|
||||
tmp, func(i, j int) bool {
|
||||
return bytes.Compare(tmp[i].Key(), tmp[j].Key()) > 0
|
||||
@@ -134,17 +134,17 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
// TagKindPubkey tkp
|
||||
if f.Kinds != nil && f.Kinds.Len() > 0 && f.Authors != nil && f.Authors.Len() > 0 && f.Tags != nil && f.Tags.Len() > 0 {
|
||||
for _, k := range f.Kinds.ToUint16() {
|
||||
for _, author := range f.Authors.ToSliceOfBytes() {
|
||||
for _, tag := range f.Tags.ToSliceOfTags() {
|
||||
for _, author := range f.Authors.T {
|
||||
for _, t := range *f.Tags {
|
||||
// accept single-letter keys like "e" or filter-style keys like "#e"
|
||||
if tag.Len() >= 2 && (len(tag.Key()) == 1 || (len(tag.Key()) == 2 && tag.Key()[0] == '#')) {
|
||||
if t.Len() >= 2 && (len(t.Key()) == 1 || (len(t.Key()) == 2 && t.Key()[0] == '#')) {
|
||||
kind := new(types2.Uint16)
|
||||
kind.Set(k)
|
||||
var p *types2.PubHash
|
||||
if p, err = CreatePubHashFromData(author); chk.E(err) {
|
||||
return
|
||||
}
|
||||
keyBytes := tag.Key()
|
||||
keyBytes := t.Key()
|
||||
key := new(types2.Letter)
|
||||
// If the tag key starts with '#', use the second character as the key
|
||||
if len(keyBytes) == 2 && keyBytes[0] == '#' {
|
||||
@@ -152,7 +152,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
} else {
|
||||
key.Set(keyBytes[0])
|
||||
}
|
||||
for _, valueBytes := range tag.ToSliceOfBytes()[1:] {
|
||||
for _, valueBytes := range t.T[1:] {
|
||||
valueHash := new(types2.Ident)
|
||||
valueHash.FromIdent(valueBytes)
|
||||
start, end := new(bytes.Buffer), new(bytes.Buffer)
|
||||
@@ -184,11 +184,11 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
// TagKind tkc
|
||||
if f.Kinds != nil && f.Kinds.Len() > 0 && f.Tags != nil && f.Tags.Len() > 0 {
|
||||
for _, k := range f.Kinds.ToUint16() {
|
||||
for _, tag := range f.Tags.ToSliceOfTags() {
|
||||
if tag.Len() >= 2 && (len(tag.Key()) == 1 || (len(tag.Key()) == 2 && tag.Key()[0] == '#')) {
|
||||
for _, t := range *f.Tags {
|
||||
if t.Len() >= 2 && (len(t.Key()) == 1 || (len(t.Key()) == 2 && t.Key()[0] == '#')) {
|
||||
kind := new(types2.Uint16)
|
||||
kind.Set(k)
|
||||
keyBytes := tag.Key()
|
||||
keyBytes := t.Key()
|
||||
key := new(types2.Letter)
|
||||
// If the tag key starts with '#', use the second character as the key
|
||||
if len(keyBytes) == 2 && keyBytes[0] == '#' {
|
||||
@@ -196,7 +196,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
} else {
|
||||
key.Set(keyBytes[0])
|
||||
}
|
||||
for _, valueBytes := range tag.ToSliceOfBytes()[1:] {
|
||||
for _, valueBytes := range t.T[1:] {
|
||||
valueHash := new(types2.Ident)
|
||||
valueHash.FromIdent(valueBytes)
|
||||
start, end := new(bytes.Buffer), new(bytes.Buffer)
|
||||
@@ -226,14 +226,14 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
|
||||
// TagPubkey tpc
|
||||
if f.Authors != nil && f.Authors.Len() > 0 && f.Tags != nil && f.Tags.Len() > 0 {
|
||||
for _, author := range f.Authors.ToSliceOfBytes() {
|
||||
for _, tag := range f.Tags.ToSliceOfTags() {
|
||||
if tag.Len() >= 2 && (len(tag.Key()) == 1 || (len(tag.Key()) == 2 && tag.Key()[0] == '#')) {
|
||||
for _, author := range f.Authors.T {
|
||||
for _, t := range *f.Tags {
|
||||
if t.Len() >= 2 && (len(t.Key()) == 1 || (len(t.Key()) == 2 && t.Key()[0] == '#')) {
|
||||
var p *types2.PubHash
|
||||
if p, err = CreatePubHashFromData(author); chk.E(err) {
|
||||
return
|
||||
}
|
||||
keyBytes := tag.Key()
|
||||
keyBytes := t.Key()
|
||||
key := new(types2.Letter)
|
||||
// If the tag key starts with '#', use the second character as the key
|
||||
if len(keyBytes) == 2 && keyBytes[0] == '#' {
|
||||
@@ -241,7 +241,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
} else {
|
||||
key.Set(keyBytes[0])
|
||||
}
|
||||
for _, valueBytes := range tag.ToSliceOfBytes()[1:] {
|
||||
for _, valueBytes := range t.T[1:] {
|
||||
valueHash := new(types2.Ident)
|
||||
valueHash.FromIdent(valueBytes)
|
||||
start, end := new(bytes.Buffer), new(bytes.Buffer)
|
||||
@@ -269,9 +269,9 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
|
||||
// Tag tc-
|
||||
if f.Tags != nil && f.Tags.Len() > 0 && (f.Authors == nil || f.Authors.Len() == 0) && (f.Kinds == nil || f.Kinds.Len() == 0) {
|
||||
for _, tag := range f.Tags.ToSliceOfTags() {
|
||||
if tag.Len() >= 2 && (len(tag.Key()) == 1 || (len(tag.Key()) == 2 && tag.Key()[0] == '#')) {
|
||||
keyBytes := tag.Key()
|
||||
for _, t := range *f.Tags {
|
||||
if t.Len() >= 2 && (len(t.Key()) == 1 || (len(t.Key()) == 2 && t.Key()[0] == '#')) {
|
||||
keyBytes := t.Key()
|
||||
key := new(types2.Letter)
|
||||
// If the tag key starts with '#', use the second character as the key
|
||||
if len(keyBytes) == 2 && keyBytes[0] == '#' {
|
||||
@@ -279,7 +279,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
} else {
|
||||
key.Set(keyBytes[0])
|
||||
}
|
||||
for _, valueBytes := range tag.ToSliceOfBytes()[1:] {
|
||||
for _, valueBytes := range t.T[1:] {
|
||||
valueHash := new(types2.Ident)
|
||||
valueHash.FromIdent(valueBytes)
|
||||
start, end := new(bytes.Buffer), new(bytes.Buffer)
|
||||
@@ -303,7 +303,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
// KindPubkey kpc
|
||||
if f.Kinds != nil && f.Kinds.Len() > 0 && f.Authors != nil && f.Authors.Len() > 0 {
|
||||
for _, k := range f.Kinds.ToUint16() {
|
||||
for _, author := range f.Authors.ToSliceOfBytes() {
|
||||
for _, author := range f.Authors.T {
|
||||
kind := new(types2.Uint16)
|
||||
kind.Set(k)
|
||||
p := new(types2.PubHash)
|
||||
@@ -350,7 +350,7 @@ func GetIndexesFromFilter(f *filter.F) (idxs []Range, err error) {
|
||||
|
||||
// Pubkey pc-
|
||||
if f.Authors != nil && f.Authors.Len() > 0 {
|
||||
for _, author := range f.Authors.ToSliceOfBytes() {
|
||||
for _, author := range f.Authors.T {
|
||||
p := new(types2.PubHash)
|
||||
if err = p.FromPubkey(author); chk.E(err) {
|
||||
return
|
||||
|
||||
@@ -42,7 +42,7 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
||||
var expDeletes types.Uint40s
|
||||
var expEvs event.S
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
for _, idx := range f.Ids.ToSliceOfBytes() {
|
||||
for _, idx := range f.Ids.T {
|
||||
// we know there is only Ids in this, so run the ID query and fetch.
|
||||
var ser *types.Uint40
|
||||
if ser, err = d.GetSerialById(idx); chk.E(err) {
|
||||
|
||||
@@ -17,7 +17,7 @@ func (d *D) QueryForSerials(c context.Context, f *filter.F) (
|
||||
var founds []*types.Uint40
|
||||
var idPkTs []*store.IdPkTs
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
for _, id := range f.Ids.ToSliceOfBytes() {
|
||||
for _, id := range f.Ids.T {
|
||||
var ser *types.Uint40
|
||||
if ser, err = d.GetSerialById(id); chk.E(err) {
|
||||
return
|
||||
@@ -29,25 +29,6 @@ func (d *D) QueryForSerials(c context.Context, f *filter.F) (
|
||||
return
|
||||
}
|
||||
idPkTs = append(idPkTs, tmp...)
|
||||
|
||||
// // fetch the events full id indexes so we can sort them
|
||||
// for _, ser := range founds {
|
||||
// // scan for the IdPkTs
|
||||
// var fidpk *store.IdPkTs
|
||||
// if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||
// return
|
||||
// }
|
||||
// if fidpk == nil {
|
||||
// continue
|
||||
// }
|
||||
// idPkTs = append(idPkTs, fidpk)
|
||||
// // sort by timestamp
|
||||
// sort.Slice(
|
||||
// idPkTs, func(i, j int) bool {
|
||||
// return idPkTs[i].Ts > idPkTs[j].Ts
|
||||
// },
|
||||
// )
|
||||
// }
|
||||
} else {
|
||||
if idPkTs, err = d.QueryForIds(c, f); chk.E(err) {
|
||||
return
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"interfaces.orly/store"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
)
|
||||
|
||||
// SaveEvent saves an event to the database, generating all the necessary indexes.
|
||||
@@ -71,16 +72,6 @@ func (d *D) SaveEvent(
|
||||
return
|
||||
}
|
||||
idPkTss = append(idPkTss, tmp...)
|
||||
// for _, ser := range sers {
|
||||
// var fidpk *store.IdPkTs
|
||||
// if fidpk, err = d.GetFullIdPubkeyBySerial(ser); chk.E(err) {
|
||||
// return
|
||||
// }
|
||||
// if fidpk == nil {
|
||||
// continue
|
||||
// }
|
||||
// idPkTss = append(idPkTss, fidpk)
|
||||
// }
|
||||
// sort by timestamp, so the first is the newest
|
||||
sort.Slice(
|
||||
idPkTss, func(i, j int) bool {
|
||||
@@ -177,6 +168,6 @@ func (d *D) SaveEvent(
|
||||
return
|
||||
},
|
||||
)
|
||||
// log.T.F("total data written: %d bytes keys %d bytes values", kc, vc)
|
||||
log.T.F("total data written: %d bytes keys %d bytes values", kc, vc)
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user