Refactor publishCacheEvents for concurrent publishing and optimize database access.

- Updated `publishCacheEvents` to utilize multiple concurrent connections for event publishing.
- Introduced worker-based architecture leveraging `runtime.NumCPU` for parallel uploads.
- Optimized database fetch logic in `FetchEventsBySerials` for improved maintainability and performance.
- Bumped version to `v0.4.8`.
This commit is contained in:
2025-09-20 04:10:59 +01:00
parent 58a9e83038
commit 81a40c04e5
3 changed files with 131 additions and 75 deletions

View File

@@ -13,34 +13,38 @@ import (
"next.orly.dev/pkg/encoders/event"
)
// FetchEventsBySerials processes multiple serials in ascending order and retrieves
// FetchEventsBySerials processes multiple serials in ascending order and retrieves
// the corresponding events from the database. It optimizes database access by
// sorting the serials and seeking to each one sequentially.
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (evMap map[string]*event.E, err error) {
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (
evMap map[string]*event.E, err error,
) {
log.T.F("FetchEventsBySerials: processing %d serials", len(serials))
// Initialize the result map
evMap = make(map[string]*event.E)
// Return early if no serials are provided
if len(serials) == 0 {
return
}
// Sort serials in ascending order for more efficient database access
sortedSerials := make([]*types.Uint40, len(serials))
copy(sortedSerials, serials)
sort.Slice(sortedSerials, func(i, j int) bool {
return sortedSerials[i].Get() < sortedSerials[j].Get()
})
sort.Slice(
sortedSerials, func(i, j int) bool {
return sortedSerials[i].Get() < sortedSerials[j].Get()
},
)
// Process all serials in a single transaction
if err = d.View(
func(txn *badger.Txn) (err error) {
// Create an iterator with default options
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
// Process each serial sequentially
for _, ser := range sortedSerials {
// Create the key for this serial
@@ -49,31 +53,43 @@ func (d *D) FetchEventsBySerials(serials []*types.Uint40) (evMap map[string]*eve
continue
}
key := buf.Bytes()
// Seek to this key in the database
it.Seek(key)
if it.Valid() {
item := it.Item()
// Verify the key matches exactly (should always be true after a Seek)
if !bytes.Equal(item.Key(), key) {
continue
}
// Get the item value
var v []byte
if v, err = item.ValueCopy(nil); chk.E(err) {
continue
}
// Unmarshal the event
ev := new(event.E)
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); chk.E(err) {
if err = item.Value(
func(val []byte) (err error) {
// Unmarshal the event
if err = ev.UnmarshalBinary(bytes.NewBuffer(val)); chk.E(err) {
return
}
// Store the event in the result map using the serial value as string key
return
},
); chk.E(err) {
continue
}
// Store the event in the result map using the serial value as string key
evMap[strconv.FormatUint(ser.Get(), 10)] = ev
// // Get the item value
// var v []byte
// if v, err = item.ValueCopy(nil); chk.E(err) {
// continue
// }
//
// // Unmarshal the event
// ev := new(event.E)
// if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); chk.E(err) {
// continue
// }
}
}
return
@@ -81,7 +97,10 @@ func (d *D) FetchEventsBySerials(serials []*types.Uint40) (evMap map[string]*eve
); chk.E(err) {
return
}
log.T.F("FetchEventsBySerials: found %d events out of %d requested serials", len(evMap), len(serials))
log.T.F(
"FetchEventsBySerials: found %d events out of %d requested serials",
len(evMap), len(serials),
)
return
}
}