Delete outdated benchmark reports and results.

Removed old benchmark reports and detailed logs from the repository to clean up unnecessary files. These reports appear to be auto-generated and no longer relevant for ongoing development.
This commit is contained in:
2025-09-15 05:00:19 +01:00
parent f5cce92bf8
commit e521b788fb
43 changed files with 1025 additions and 3270 deletions

View File

@@ -0,0 +1,87 @@
package database
import (
"bytes"
"sort"
"strconv"
"github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
)
// FetchEventsBySerials processes multiple serials in ascending order and retrieves
// the corresponding events from the database. It optimizes database access by
// sorting the serials and seeking to each one sequentially.
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (evMap map[string]*event.E, err error) {
log.T.F("FetchEventsBySerials: processing %d serials", len(serials))
// Initialize the result map
evMap = make(map[string]*event.E)
// Return early if no serials are provided
if len(serials) == 0 {
return
}
// Sort serials in ascending order for more efficient database access
sortedSerials := make([]*types.Uint40, len(serials))
copy(sortedSerials, serials)
sort.Slice(sortedSerials, func(i, j int) bool {
return sortedSerials[i].Get() < sortedSerials[j].Get()
})
// Process all serials in a single transaction
if err = d.View(
func(txn *badger.Txn) (err error) {
// Create an iterator with default options
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
// Process each serial sequentially
for _, ser := range sortedSerials {
// Create the key for this serial
buf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
continue
}
key := buf.Bytes()
// Seek to this key in the database
it.Seek(key)
if it.Valid() {
item := it.Item()
// Verify the key matches exactly (should always be true after a Seek)
if !bytes.Equal(item.Key(), key) {
continue
}
// Get the item value
var v []byte
if v, err = item.ValueCopy(nil); chk.E(err) {
continue
}
// Unmarshal the event
ev := new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); chk.E(err) {
continue
}
// Store the event in the result map using the serial value as string key
evMap[strconv.FormatUint(ser.Get(), 10)] = ev
}
}
return
},
); chk.E(err) {
return
}
log.T.F("FetchEventsBySerials: found %d events out of %d requested serials", len(evMap), len(serials))
return
}

View File

@@ -8,6 +8,7 @@ import (
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/tag"
@@ -64,6 +65,99 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
return
}
// GetSerialsByIds takes a tag.T containing multiple IDs and returns a map of IDs to their
// corresponding serial numbers. It directly queries the IdPrefix index for matching IDs,
// which is more efficient than using GetIndexesFromFilter.
func (d *D) GetSerialsByIds(ids *tag.T) (
serials map[string]*types.Uint40, err error,
) {
return d.GetSerialsByIdsWithFilter(ids, nil)
}
// GetSerialsByIdsWithFilter takes a tag.T containing multiple IDs and returns a
// map of IDs to their corresponding serial numbers, applying a filter function
// to each event. The function directly creates ID index prefixes for efficient querying.
func (d *D) GetSerialsByIdsWithFilter(
ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool,
) (serials map[string]*types.Uint40, err error) {
log.T.F("GetSerialsByIdsWithFilter: input ids count=%d", ids.Len())
// Initialize the result map
serials = make(map[string]*types.Uint40)
// Return early if no IDs are provided
if ids.Len() == 0 {
return
}
// Process all IDs in a single transaction
if err = d.View(
func(txn *badger.Txn) (err error) {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
// Process each ID sequentially
for _, id := range ids.T {
// idHex := hex.Enc(id)
// Get the index prefix for this ID
var idxs []Range
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
// Skip this ID if we can't create its index
continue
}
// Skip if no index was created
if len(idxs) == 0 {
continue
}
// Seek to the start of this ID's range in the database
it.Seek(idxs[0].Start)
if it.ValidForPrefix(idxs[0].Start) {
// Found an entry for this ID
item := it.Item()
key := item.Key()
// Extract the serial number from the key
ser := new(types.Uint40)
buf := bytes.NewBuffer(key[len(key)-5:])
if err = ser.UnmarshalRead(buf); chk.E(err) {
continue
}
// If a filter function is provided, fetch the event and apply the filter
if fn != nil {
var ev *event.E
if ev, err = d.FetchEventBySerial(ser); err != nil {
// Skip this event if we can't fetch it
continue
}
// Apply the filter
if !fn(ev, ser) {
// Skip this event if it doesn't pass the filter
continue
}
}
// Store the serial in the result map using the hex-encoded ID as the key
serials[string(id)] = ser
}
}
return
},
); chk.E(err) {
return
}
log.T.F(
"GetSerialsByIdsWithFilter: found %d serials out of %d requested ids",
len(serials), ids.Len(),
)
return
}
// func (d *D) GetSerialBytesById(id []byte) (ser []byte, err error) {
// var idxs []Range
// if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.New(id)}); chk.E(err) {

View File

@@ -48,9 +48,11 @@ func TestGetSerialById(t *testing.T) {
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
ev.Free()
t.Fatal(err)
}
ev.Free()
events = append(events, ev)
// Save the event to the database

View File

@@ -55,8 +55,10 @@ func TestGetSerialsByRange(t *testing.T) {
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
ev.Free()
t.Fatal(err)
}
ev.Free()
events = append(events, ev)

View File

@@ -5,7 +5,6 @@ import (
"context"
"sort"
"strconv"
"strings"
"time"
"lol.mleku.dev/chk"
@@ -43,73 +42,49 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
var expDeletes types.Uint40s
var expEvs event.S
if f.Ids != nil && f.Ids.Len() > 0 {
// for _, id := range f.Ids.T {
// log.T.F("QueryEvents: looking for ID=%s", hex.Enc(id))
// }
// log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
for _, idx := range f.Ids.T {
// log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx))
// we know there is only Ids in this, so run the ID query and fetch.
var ser *types.Uint40
var idErr error
if ser, idErr = d.GetSerialById(idx); idErr != nil {
// Check if this is a "not found" error which is expected for IDs we don't have
if strings.Contains(idErr.Error(), "id not found in database") {
// log.T.F(
// "QueryEvents: ID not found in database: %s",
// hex.Enc(idx),
// )
} else {
// Log unexpected errors but continue processing other IDs
// log.E.F(
// "QueryEvents: error looking up id=%s err=%v",
// hex.Enc(idx), idErr,
// )
}
continue
}
// Check if the serial is nil, which indicates the ID wasn't found
if ser == nil {
// log.T.F("QueryEvents: Serial is nil for ID: %s", hex.Enc(idx))
continue
}
// fetch the events
// Get all serials for the requested IDs in a single batch operation
log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
// Use GetSerialsByIds to batch process all IDs at once
serials, idErr := d.GetSerialsByIds(f.Ids)
if idErr != nil {
log.E.F("QueryEvents: error looking up ids: %v", idErr)
// Continue with whatever IDs we found
}
// Process each found serial, fetch the event, and apply filters
for idHex, ser := range serials {
// fetch the event
var ev *event.E
if ev, err = d.FetchEventBySerial(ser); err != nil {
// log.T.F(
// "QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
// hex.Enc(idx), ser, err,
// )
log.T.F(
"QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
idHex, ser, err,
)
continue
}
// log.T.F(
// "QueryEvents: found id=%s kind=%d created_at=%d",
// hex.Enc(ev.ID), ev.Kind, ev.CreatedAt,
// )
// check for an expiration tag and delete after returning the result
if CheckExpiration(ev) {
log.T.F(
"QueryEvents: id=%s filtered out due to expiration",
hex.Enc(ev.ID),
"QueryEvents: id=%s filtered out due to expiration", idHex,
)
expDeletes = append(expDeletes, ser)
expEvs = append(expEvs, ev)
continue
}
// skip events that have been deleted by a proper deletion event
if derr := d.CheckForDeleted(ev, nil); derr != nil {
// log.T.F(
// "QueryEvents: id=%s filtered out due to deletion: %v",
// hex.Enc(ev.ID), derr,
// )
// log.T.F("QueryEvents: id=%s filtered out due to deletion: %v", idHex, derr)
continue
}
// log.T.F(
// "QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results",
// hex.Enc(ev.ID),
// )
// Add the event to the results
evs = append(evs, ev)
// log.T.F("QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results", idHex)
}
// sort the events by timestamp
sort.Slice(
evs, func(i, j int) bool {

View File

@@ -56,8 +56,10 @@ func setupTestDB(t *testing.T) (
// Unmarshal the event
if _, err = ev.Unmarshal(b); chk.E(err) {
ev.Free()
t.Fatal(err)
}
ev.Free()
events = append(events, ev)

View File

@@ -17,11 +17,12 @@ func (d *D) QueryForSerials(c context.Context, f *filter.F) (
var founds []*types.Uint40
var idPkTs []*store.IdPkTs
if f.Ids != nil && f.Ids.Len() > 0 {
for _, id := range f.Ids.T {
var ser *types.Uint40
if ser, err = d.GetSerialById(id); chk.E(err) {
return
}
// Use batch lookup to minimize transactions when resolving IDs to serials
var serialMap map[string]*types.Uint40
if serialMap, err = d.GetSerialsByIds(f.Ids); chk.E(err) {
return
}
for _, ser := range serialMap {
founds = append(founds, ser)
}
var tmp []*store.IdPkTs