badger: use a different transaction for each goroutine.
This commit is contained in:
200
badger/query.go
200
badger/query.go
@@ -35,131 +35,119 @@ func (b BadgerBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (ch
|
|||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
err := b.View(func(txn *badger.Txn) error {
|
defer close(ch)
|
||||||
// iterate only through keys and in reverse order
|
|
||||||
opts := badger.IteratorOptions{
|
|
||||||
Reverse: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// actually iterate
|
// actually iterate
|
||||||
iteratorClosers := make([]func(), len(queries))
|
for _, q := range queries {
|
||||||
for i, q := range queries {
|
q := q
|
||||||
go func(i int, q query) {
|
go b.View(func(txn *badger.Txn) error {
|
||||||
it := txn.NewIterator(opts)
|
// iterate only through keys and in reverse order
|
||||||
iteratorClosers[i] = it.Close
|
opts := badger.IteratorOptions{
|
||||||
|
Reverse: true,
|
||||||
|
}
|
||||||
|
|
||||||
defer close(q.results)
|
it := txn.NewIterator(opts)
|
||||||
|
defer it.Close()
|
||||||
|
defer close(q.results)
|
||||||
|
|
||||||
for it.Seek(q.startingPoint); it.ValidForPrefix(q.prefix); it.Next() {
|
for it.Seek(q.startingPoint); it.ValidForPrefix(q.prefix); it.Next() {
|
||||||
item := it.Item()
|
item := it.Item()
|
||||||
key := item.Key()
|
key := item.Key()
|
||||||
|
|
||||||
idxOffset := len(key) - 4 // this is where the idx actually starts
|
idxOffset := len(key) - 4 // this is where the idx actually starts
|
||||||
|
|
||||||
// "id" indexes don't contain a timestamp
|
// "id" indexes don't contain a timestamp
|
||||||
if !q.skipTimestamp {
|
if !q.skipTimestamp {
|
||||||
createdAt := binary.BigEndian.Uint32(key[idxOffset-4 : idxOffset])
|
createdAt := binary.BigEndian.Uint32(key[idxOffset-4 : idxOffset])
|
||||||
if createdAt < since {
|
if createdAt < since {
|
||||||
break
|
break
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
idx := make([]byte, 5)
|
|
||||||
idx[0] = rawEventStorePrefix
|
|
||||||
copy(idx[1:], key[idxOffset:])
|
|
||||||
|
|
||||||
// fetch actual event
|
|
||||||
item, err := txn.Get(idx)
|
|
||||||
if err != nil {
|
|
||||||
if err == badger.ErrDiscardedTxn {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Printf("badger: failed to get %x based on prefix %x, index key %x from raw event store: %s\n",
|
|
||||||
idx, q.prefix, key, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
item.Value(func(val []byte) error {
|
|
||||||
evt := &nostr.Event{}
|
|
||||||
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
|
||||||
log.Printf("badger: value read error (id %x): %s\n", val[0:32], err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if this matches the other filters that were not part of the index
|
|
||||||
if extraFilter == nil || extraFilter.Matches(evt) {
|
|
||||||
q.results <- evt
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}(i, q)
|
|
||||||
}
|
|
||||||
|
|
||||||
// max number of events we'll return
|
idx := make([]byte, 5)
|
||||||
limit := b.MaxLimit
|
idx[0] = rawEventStorePrefix
|
||||||
if filter.Limit > 0 && filter.Limit < limit {
|
copy(idx[1:], key[idxOffset:])
|
||||||
limit = filter.Limit
|
|
||||||
}
|
|
||||||
|
|
||||||
// receive results and ensure we only return the most recent ones always
|
// fetch actual event
|
||||||
emittedEvents := 0
|
item, err := txn.Get(idx)
|
||||||
|
if err != nil {
|
||||||
|
if err == badger.ErrDiscardedTxn {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.Printf("badger: failed to get %x based on prefix %x, index key %x from raw event store: %s\n",
|
||||||
|
idx, q.prefix, key, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
item.Value(func(val []byte) error {
|
||||||
|
evt := &nostr.Event{}
|
||||||
|
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
||||||
|
log.Printf("badger: value read error (id %x): %s\n", val[0:32], err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// first pass
|
// check if this matches the other filters that were not part of the index
|
||||||
emitQueue := make(priorityQueue, 0, len(queries)+limit)
|
if extraFilter == nil || extraFilter.Matches(evt) {
|
||||||
for _, q := range queries {
|
q.results <- evt
|
||||||
evt, ok := <-q.results
|
}
|
||||||
if ok {
|
|
||||||
emitQueue = append(emitQueue, &queryEvent{Event: evt, query: q.i})
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// now it's a good time to schedule this
|
|
||||||
defer func() {
|
|
||||||
close(ch)
|
|
||||||
for _, itclose := range iteratorClosers {
|
|
||||||
itclose()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// queue may be empty here if we have literally nothing
|
|
||||||
if len(emitQueue) == 0 {
|
|
||||||
return nil
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// max number of events we'll return
|
||||||
|
limit := b.MaxLimit
|
||||||
|
if filter.Limit > 0 && filter.Limit < limit {
|
||||||
|
limit = filter.Limit
|
||||||
|
}
|
||||||
|
|
||||||
|
// receive results and ensure we only return the most recent ones always
|
||||||
|
emittedEvents := 0
|
||||||
|
|
||||||
|
// first pass
|
||||||
|
emitQueue := make(priorityQueue, 0, len(queries)+limit)
|
||||||
|
for _, q := range queries {
|
||||||
|
evt, ok := <-q.results
|
||||||
|
if ok {
|
||||||
|
emitQueue = append(emitQueue, &queryEvent{Event: evt, query: q.i})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// queue may be empty here if we have literally nothing
|
||||||
|
if len(emitQueue) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
heap.Init(&emitQueue)
|
||||||
|
|
||||||
|
// iterate until we've emitted all events required
|
||||||
|
for {
|
||||||
|
// emit latest event in queue
|
||||||
|
latest := emitQueue[0]
|
||||||
|
ch <- latest.Event
|
||||||
|
|
||||||
|
// stop when reaching limit
|
||||||
|
emittedEvents++
|
||||||
|
if emittedEvents == limit {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
heap.Init(&emitQueue)
|
// fetch a new one from query results and replace the previous one with it
|
||||||
|
if evt, ok := <-queries[latest.query].results; ok {
|
||||||
|
emitQueue[0].Event = evt
|
||||||
|
heap.Fix(&emitQueue, 0)
|
||||||
|
} else {
|
||||||
|
// if this query has no more events we just remove this and proceed normally
|
||||||
|
heap.Remove(&emitQueue, 0)
|
||||||
|
|
||||||
// iterate until we've emitted all events required
|
// check if the list is empty and end
|
||||||
for {
|
if len(emitQueue) == 0 {
|
||||||
// emit latest event in queue
|
|
||||||
latest := emitQueue[0]
|
|
||||||
ch <- latest.Event
|
|
||||||
|
|
||||||
// stop when reaching limit
|
|
||||||
emittedEvents++
|
|
||||||
if emittedEvents == limit {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetch a new one from query results and replace the previous one with it
|
|
||||||
if evt, ok := <-queries[latest.query].results; ok {
|
|
||||||
emitQueue[0].Event = evt
|
|
||||||
heap.Fix(&emitQueue, 0)
|
|
||||||
} else {
|
|
||||||
// if this query has no more events we just remove this and proceed normally
|
|
||||||
heap.Remove(&emitQueue, 0)
|
|
||||||
|
|
||||||
// check if the list is empty and end
|
|
||||||
if len(emitQueue) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("badger: query txn error: %s\n", err)
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user