Merge branch 'master' into broken-connectons-locking
This commit is contained in:
@@ -67,7 +67,7 @@ func (b BadgerBackend) CountEvents(ctx context.Context, filter nostr.Filter) (in
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check if this matches the other filters that were not part of the index
|
// check if this matches the other filters that were not part of the index
|
||||||
if extraFilter == nil || extraFilter.Matches(evt) {
|
if extraFilter.Matches(evt) {
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
@@ -26,20 +27,36 @@ type queryEvent struct {
|
|||||||
query int
|
query int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var exit = errors.New("exit")
|
||||||
|
|
||||||
func (b BadgerBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
func (b BadgerBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||||
ch := make(chan *nostr.Event)
|
ch := make(chan *nostr.Event)
|
||||||
|
|
||||||
|
if filter.Search != "" {
|
||||||
|
close(ch)
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
queries, extraFilter, since, err := prepareQueries(filter)
|
queries, extraFilter, since, err := prepareQueries(filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// max number of events we'll return
|
||||||
|
limit := b.MaxLimit / 4
|
||||||
|
if filter.Limit > 0 && filter.Limit < b.MaxLimit {
|
||||||
|
limit = filter.Limit
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
|
|
||||||
// actually iterate
|
// actually iterate
|
||||||
for _, q := range queries {
|
for _, q := range queries {
|
||||||
q := q
|
q := q
|
||||||
|
|
||||||
|
pulled := 0 // this query will be hardcapped at this global limit
|
||||||
|
|
||||||
go b.View(func(txn *badger.Txn) error {
|
go b.View(func(txn *badger.Txn) error {
|
||||||
// iterate only through keys and in reverse order
|
// iterate only through keys and in reverse order
|
||||||
opts := badger.IteratorOptions{
|
opts := badger.IteratorOptions{
|
||||||
@@ -78,7 +95,8 @@ func (b BadgerBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (ch
|
|||||||
idx, q.prefix, key, err)
|
idx, q.prefix, key, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
item.Value(func(val []byte) error {
|
|
||||||
|
if err := item.Value(func(val []byte) error {
|
||||||
evt := &nostr.Event{}
|
evt := &nostr.Event{}
|
||||||
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
||||||
log.Printf("badger: value read error (id %x): %s\n", val[0:32], err)
|
log.Printf("badger: value read error (id %x): %s\n", val[0:32], err)
|
||||||
@@ -87,28 +105,34 @@ func (b BadgerBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (ch
|
|||||||
|
|
||||||
// check if this matches the other filters that were not part of the index
|
// check if this matches the other filters that were not part of the index
|
||||||
if extraFilter == nil || extraFilter.Matches(evt) {
|
if extraFilter == nil || extraFilter.Matches(evt) {
|
||||||
q.results <- evt
|
select {
|
||||||
|
case q.results <- evt:
|
||||||
|
pulled++
|
||||||
|
if pulled > limit {
|
||||||
|
return exit
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return exit
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
}); err == exit {
|
||||||
|
return nil
|
||||||
|
} else if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// max number of events we'll return
|
|
||||||
limit := b.MaxLimit
|
|
||||||
if filter.Limit > 0 && filter.Limit < limit {
|
|
||||||
limit = filter.Limit
|
|
||||||
}
|
|
||||||
|
|
||||||
// receive results and ensure we only return the most recent ones always
|
// receive results and ensure we only return the most recent ones always
|
||||||
emittedEvents := 0
|
emittedEvents := 0
|
||||||
|
|
||||||
// first pass
|
// first pass
|
||||||
emitQueue := make(priorityQueue, 0, len(queries)+limit)
|
emitQueue := make(priorityQueue, 0, len(queries))
|
||||||
for _, q := range queries {
|
for _, q := range queries {
|
||||||
evt, ok := <-q.results
|
evt, ok := <-q.results
|
||||||
if ok {
|
if ok {
|
||||||
|
|||||||
81
bluge/bluge_test.go
Normal file
81
bluge/bluge_test.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package bluge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/fiatjaf/eventstore/bolt"
|
||||||
|
"github.com/nbd-wtf/go-nostr"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestBlugeFlow(t *testing.T) {
|
||||||
|
os.RemoveAll("/tmp/blugetest-bolt")
|
||||||
|
os.RemoveAll("/tmp/blugetest-bluge")
|
||||||
|
|
||||||
|
bb := &bolt.BoltBackend{Path: "/tmp/blugetest-bolt"}
|
||||||
|
bb.Init()
|
||||||
|
defer bb.Close()
|
||||||
|
|
||||||
|
bl := BlugeBackend{
|
||||||
|
Path: "/tmp/blugetest-bluge",
|
||||||
|
RawEventStore: bb,
|
||||||
|
}
|
||||||
|
bl.Init()
|
||||||
|
defer bl.Close()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
willDelete := make([]*nostr.Event, 0, 3)
|
||||||
|
|
||||||
|
for i, content := range []string{
|
||||||
|
"good morning mr paper maker",
|
||||||
|
"good night",
|
||||||
|
"I'll see you again in the paper house",
|
||||||
|
"tonight we dine in my house",
|
||||||
|
"the paper in this house if very good, mr",
|
||||||
|
} {
|
||||||
|
evt := &nostr.Event{Content: content, Tags: nostr.Tags{}}
|
||||||
|
evt.Sign("0000000000000000000000000000000000000000000000000000000000000001")
|
||||||
|
|
||||||
|
bb.SaveEvent(ctx, evt)
|
||||||
|
bl.SaveEvent(ctx, evt)
|
||||||
|
|
||||||
|
if i%2 == 0 {
|
||||||
|
willDelete = append(willDelete, evt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
ch, err := bl.QueryEvents(ctx, nostr.Filter{Search: "good"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("QueryEvents error: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n := 0
|
||||||
|
for range ch {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
assert.Equal(t, 3, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, evt := range willDelete {
|
||||||
|
bl.DeleteEvent(ctx, evt)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
ch, err := bl.QueryEvents(ctx, nostr.Filter{Search: "good"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("QueryEvents error: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n := 0
|
||||||
|
for res := range ch {
|
||||||
|
n++
|
||||||
|
assert.Equal(t, res.Content, "good night")
|
||||||
|
assert.Equal(t, res.PubKey, "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798")
|
||||||
|
}
|
||||||
|
assert.Equal(t, 1, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
11
bluge/delete.go
Normal file
11
bluge/delete.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
package bluge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/nbd-wtf/go-nostr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (b *BlugeBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||||
|
return b.writer.Delete(eventIdentifier(evt.ID))
|
||||||
|
}
|
||||||
23
bluge/helpers.go
Normal file
23
bluge/helpers.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
package bluge
|
||||||
|
|
||||||
|
import "encoding/hex"
|
||||||
|
|
||||||
|
const (
|
||||||
|
contentField = "c"
|
||||||
|
kindField = "k"
|
||||||
|
createdAtField = "a"
|
||||||
|
pubkeyField = "p"
|
||||||
|
)
|
||||||
|
|
||||||
|
type eventIdentifier string
|
||||||
|
|
||||||
|
const idField = "i"
|
||||||
|
|
||||||
|
func (id eventIdentifier) Field() string {
|
||||||
|
return idField
|
||||||
|
}
|
||||||
|
|
||||||
|
func (id eventIdentifier) Term() []byte {
|
||||||
|
v, _ := hex.DecodeString(string(id))
|
||||||
|
return v
|
||||||
|
}
|
||||||
50
bluge/lib.go
Normal file
50
bluge/lib.go
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
package bluge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/blugelabs/bluge"
|
||||||
|
"github.com/blugelabs/bluge/analysis/token"
|
||||||
|
"github.com/fiatjaf/eventstore"
|
||||||
|
"golang.org/x/text/unicode/norm"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ eventstore.Store = (*BlugeBackend)(nil)
|
||||||
|
|
||||||
|
type BlugeBackend struct {
|
||||||
|
// Path is where the index will be saved
|
||||||
|
Path string
|
||||||
|
|
||||||
|
// RawEventStore is where we'll fetch the raw events from
|
||||||
|
// bluge will only store ids, so the actual events must be somewhere else
|
||||||
|
RawEventStore eventstore.Store
|
||||||
|
|
||||||
|
searchConfig bluge.Config
|
||||||
|
writer *bluge.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BlugeBackend) Close() {
|
||||||
|
defer b.writer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BlugeBackend) Init() error {
|
||||||
|
if b.Path == "" {
|
||||||
|
return fmt.Errorf("missing Path")
|
||||||
|
}
|
||||||
|
if b.RawEventStore == nil {
|
||||||
|
return fmt.Errorf("missing RawEventStore")
|
||||||
|
}
|
||||||
|
|
||||||
|
b.searchConfig = bluge.DefaultConfig(b.Path)
|
||||||
|
b.searchConfig.DefaultSearchAnalyzer.TokenFilters = append(b.searchConfig.DefaultSearchAnalyzer.TokenFilters,
|
||||||
|
token.NewUnicodeNormalizeFilter(norm.NFKC),
|
||||||
|
)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
b.writer, err = bluge.OpenWriter(b.searchConfig)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error opening writer: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
117
bluge/query.go
Normal file
117
bluge/query.go
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
package bluge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/blugelabs/bluge"
|
||||||
|
"github.com/blugelabs/bluge/search"
|
||||||
|
"github.com/nbd-wtf/go-nostr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (b *BlugeBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||||
|
ch := make(chan *nostr.Event)
|
||||||
|
|
||||||
|
if len(filter.Search) < 2 {
|
||||||
|
close(ch)
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := b.writer.Reader()
|
||||||
|
if err != nil {
|
||||||
|
close(ch)
|
||||||
|
return nil, fmt.Errorf("unable to open reader: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
searchQ := bluge.NewMatchQuery(filter.Search)
|
||||||
|
searchQ.SetField(contentField)
|
||||||
|
var q bluge.Query = searchQ
|
||||||
|
|
||||||
|
complicatedQuery := bluge.NewBooleanQuery().AddMust(searchQ)
|
||||||
|
|
||||||
|
if len(filter.Kinds) > 0 {
|
||||||
|
eitherKind := bluge.NewBooleanQuery()
|
||||||
|
eitherKind.SetMinShould(1)
|
||||||
|
for _, kind := range filter.Kinds {
|
||||||
|
kindQ := bluge.NewTermQuery(strconv.Itoa(kind))
|
||||||
|
kindQ.SetField(kindField)
|
||||||
|
eitherKind.AddShould(kindQ)
|
||||||
|
}
|
||||||
|
complicatedQuery.AddMust(eitherKind)
|
||||||
|
q = complicatedQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(filter.Authors) > 0 {
|
||||||
|
eitherPubkey := bluge.NewBooleanQuery()
|
||||||
|
eitherPubkey.SetMinShould(1)
|
||||||
|
for _, pubkey := range filter.Authors {
|
||||||
|
if len(pubkey) != 64 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
pubkeyQ := bluge.NewTermQuery(pubkey[56:])
|
||||||
|
pubkeyQ.SetField(pubkeyField)
|
||||||
|
eitherPubkey.AddShould(pubkeyQ)
|
||||||
|
}
|
||||||
|
complicatedQuery.AddMust(eitherPubkey)
|
||||||
|
q = complicatedQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.Since != nil || filter.Until != nil {
|
||||||
|
min := 0.0
|
||||||
|
if filter.Since != nil {
|
||||||
|
min = float64(*filter.Since)
|
||||||
|
}
|
||||||
|
max := float64(nostr.Now())
|
||||||
|
if filter.Until != nil {
|
||||||
|
max = float64(*filter.Until)
|
||||||
|
}
|
||||||
|
dateRangeQ := bluge.NewNumericRangeInclusiveQuery(min, max, true, true)
|
||||||
|
dateRangeQ.SetField(createdAtField)
|
||||||
|
complicatedQuery.AddMust(dateRangeQ)
|
||||||
|
q = complicatedQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
limit := 40
|
||||||
|
if filter.Limit != 0 {
|
||||||
|
limit = filter.Limit
|
||||||
|
if filter.Limit > 150 {
|
||||||
|
limit = 150
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req := bluge.NewTopNSearch(limit, q)
|
||||||
|
|
||||||
|
dmi, err := reader.Search(context.Background(), req)
|
||||||
|
if err != nil {
|
||||||
|
close(ch)
|
||||||
|
reader.Close()
|
||||||
|
return ch, fmt.Errorf("error executing search: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
defer reader.Close()
|
||||||
|
defer close(ch)
|
||||||
|
|
||||||
|
var next *search.DocumentMatch
|
||||||
|
for next, err = dmi.Next(); next != nil; next, err = dmi.Next() {
|
||||||
|
next.VisitStoredFields(func(field string, value []byte) bool {
|
||||||
|
id := hex.EncodeToString(value)
|
||||||
|
rawch, err := b.RawEventStore.QueryEvents(ctx, nostr.Filter{IDs: []string{id}})
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for evt := range rawch {
|
||||||
|
ch <- evt
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
28
bluge/save.go
Normal file
28
bluge/save.go
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
package bluge
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/blugelabs/bluge"
|
||||||
|
"github.com/nbd-wtf/go-nostr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (b *BlugeBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||||
|
id := eventIdentifier(evt.ID)
|
||||||
|
doc := &bluge.Document{
|
||||||
|
bluge.NewKeywordFieldBytes(id.Field(), id.Term()).Sortable().StoreValue(),
|
||||||
|
}
|
||||||
|
|
||||||
|
doc.AddField(bluge.NewTextField(contentField, evt.Content))
|
||||||
|
doc.AddField(bluge.NewTextField(kindField, strconv.Itoa(evt.Kind)))
|
||||||
|
doc.AddField(bluge.NewTextField(pubkeyField, evt.PubKey[56:]))
|
||||||
|
doc.AddField(bluge.NewNumericField(createdAtField, float64(evt.CreatedAt)))
|
||||||
|
|
||||||
|
if err := b.writer.Update(doc.ID(), doc); err != nil {
|
||||||
|
return fmt.Errorf("failed to write '%s' document: %w", evt.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -6,9 +6,9 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/boltdb/bolt"
|
|
||||||
"github.com/nbd-wtf/go-nostr"
|
"github.com/nbd-wtf/go-nostr"
|
||||||
nostr_binary "github.com/nbd-wtf/go-nostr/binary"
|
nostr_binary "github.com/nbd-wtf/go-nostr/binary"
|
||||||
|
bolt "go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (b *BoltBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
func (b *BoltBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||||
@@ -35,17 +35,21 @@ func (b *BoltBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetch actual event
|
if extraFilter == nil {
|
||||||
val := raw.Get(v)
|
|
||||||
evt := &nostr.Event{}
|
|
||||||
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
|
||||||
log.Printf("bolt: value read error (id %x): %s\n", val[0:32], err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if this matches the other filters that were not part of the index before yielding
|
|
||||||
if extraFilter == nil || extraFilter.Matches(evt) {
|
|
||||||
count++
|
count++
|
||||||
|
} else {
|
||||||
|
// fetch actual event
|
||||||
|
val := raw.Get(v)
|
||||||
|
evt := &nostr.Event{}
|
||||||
|
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
||||||
|
log.Printf("bolt: value read error (id %x): %s\n", val[0:32], err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if this matches the other filters that were not part of the index before yielding
|
||||||
|
if extraFilter.Matches(evt) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
|
||||||
"github.com/boltdb/bolt"
|
|
||||||
"github.com/nbd-wtf/go-nostr"
|
"github.com/nbd-wtf/go-nostr"
|
||||||
|
bolt "go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (b *BoltBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
func (b *BoltBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||||
@@ -22,7 +22,7 @@ func (b *BoltBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
|||||||
// calculate all index keys we have for this event and delete them
|
// calculate all index keys we have for this event and delete them
|
||||||
for _, k := range getIndexKeysForEvent(evt) {
|
for _, k := range getIndexKeysForEvent(evt) {
|
||||||
bucket := txn.Bucket(k.bucket)
|
bucket := txn.Bucket(k.bucket)
|
||||||
bucket.Delete(k.key)
|
bucket.Delete(append(k.key, seqb...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete the raw event
|
// delete the raw event
|
||||||
|
|||||||
17
bolt/lib.go
17
bolt/lib.go
@@ -1,10 +1,12 @@
|
|||||||
package bolt
|
package bolt
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/boltdb/bolt"
|
|
||||||
"github.com/fiatjaf/eventstore"
|
"github.com/fiatjaf/eventstore"
|
||||||
|
bolt "go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -42,7 +44,18 @@ func (b *BoltBackend) Init() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// open boltdb
|
// open boltdb
|
||||||
db, err := bolt.Open(b.Path, 0644, nil)
|
var db *bolt.DB
|
||||||
|
var err error
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
db, err = bolt.Open(b.Path, 0644, nil)
|
||||||
|
done <- struct{}{}
|
||||||
|
}()
|
||||||
|
select {
|
||||||
|
case <-done:
|
||||||
|
case <-time.After(20 * time.Second):
|
||||||
|
return fmt.Errorf("taking too long to open the bolt database at '%s', please make sure that database is not being used elsewhere because there may be a lock in place there", b.Path)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package bolt
|
|||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
|
||||||
"github.com/boltdb/bolt"
|
bolt "go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -9,9 +9,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
"github.com/boltdb/bolt"
|
|
||||||
"github.com/nbd-wtf/go-nostr"
|
"github.com/nbd-wtf/go-nostr"
|
||||||
nostr_binary "github.com/nbd-wtf/go-nostr/binary"
|
nostr_binary "github.com/nbd-wtf/go-nostr/binary"
|
||||||
|
bolt "go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
type query struct {
|
type query struct {
|
||||||
@@ -29,17 +29,31 @@ type queryEvent struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *BoltBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
func (b *BoltBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||||
|
ch := make(chan *nostr.Event)
|
||||||
|
|
||||||
queries, extraFilter, since, err := prepareQueries(filter)
|
queries, extraFilter, since, err := prepareQueries(filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ch := make(chan *nostr.Event)
|
if filter.Search != "" {
|
||||||
|
close(ch)
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// max number of events we'll return
|
||||||
|
limit := b.MaxLimit / 4
|
||||||
|
if filter.Limit > 0 && filter.Limit < b.MaxLimit {
|
||||||
|
limit = filter.Limit
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
|
|
||||||
for _, q := range queries {
|
for _, q := range queries {
|
||||||
q := q
|
q := q
|
||||||
|
pulled := 0 // this query will be hardcapped at this global limit
|
||||||
|
|
||||||
go b.db.View(func(txn *bolt.Tx) error {
|
go b.db.View(func(txn *bolt.Tx) error {
|
||||||
defer close(q.results)
|
defer close(q.results)
|
||||||
|
|
||||||
@@ -48,54 +62,53 @@ func (b *BoltBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (cha
|
|||||||
|
|
||||||
c := bucket.Cursor()
|
c := bucket.Cursor()
|
||||||
|
|
||||||
k, v := c.Seek(q.startingPoint)
|
k, _ := c.Seek(q.startingPoint)
|
||||||
if k == nil {
|
if k == nil {
|
||||||
k, v = c.Last()
|
k, _ = c.Last()
|
||||||
} else {
|
} else {
|
||||||
k, v = c.Prev()
|
k, _ = c.Prev()
|
||||||
}
|
}
|
||||||
|
|
||||||
for ; k != nil && bytes.HasPrefix(k, q.prefix); k, v = c.Prev() {
|
for ; k != nil && bytes.HasPrefix(k, q.prefix); k, _ = c.Prev() {
|
||||||
// "id" indexes don't contain a timestamp
|
// "id" indexes don't contain a timestamp
|
||||||
if !q.skipTimestamp {
|
if !q.skipTimestamp {
|
||||||
createdAt := binary.BigEndian.Uint32(k[len(k)-4:])
|
createdAt := binary.BigEndian.Uint32(k[len(k)-4:])
|
||||||
if createdAt < since {
|
if createdAt < since {
|
||||||
break
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetch actual event
|
// fetch actual event
|
||||||
val := raw.Get(v)
|
val := raw.Get(k[len(k)-8:])
|
||||||
evt := &nostr.Event{}
|
evt := &nostr.Event{}
|
||||||
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
||||||
log.Printf("bolt: value read error (id %x): %s\n", val[0:32], err)
|
log.Printf("bolt: value read error (id %x): %s\n", val[0:32], err)
|
||||||
break
|
return fmt.Errorf("error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if this matches the other filters that were not part of the index before yielding
|
// check if this matches the other filters that were not part of the index before yielding
|
||||||
if extraFilter == nil || extraFilter.Matches(evt) {
|
if extraFilter == nil || extraFilter.Matches(evt) {
|
||||||
select {
|
select {
|
||||||
case q.results <- evt:
|
case q.results <- evt:
|
||||||
|
pulled++
|
||||||
|
if pulled > limit {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
break
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// max number of events we'll return
|
|
||||||
limit := b.MaxLimit
|
|
||||||
if filter.Limit > 0 && filter.Limit < limit {
|
|
||||||
limit = filter.Limit
|
|
||||||
}
|
|
||||||
|
|
||||||
// receive results and ensure we only return the most recent ones always
|
// receive results and ensure we only return the most recent ones always
|
||||||
emittedEvents := 0
|
emittedEvents := 0
|
||||||
|
|
||||||
// first pass
|
// first pass
|
||||||
emitQueue := make(priorityQueue, 0, len(queries)+limit)
|
emitQueue := make(priorityQueue, 0, len(queries))
|
||||||
for _, q := range queries {
|
for _, q := range queries {
|
||||||
evt, ok := <-q.results
|
evt, ok := <-q.results
|
||||||
if ok {
|
if ok {
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/boltdb/bolt"
|
|
||||||
"github.com/fiatjaf/eventstore"
|
"github.com/fiatjaf/eventstore"
|
||||||
"github.com/nbd-wtf/go-nostr"
|
"github.com/nbd-wtf/go-nostr"
|
||||||
nostr_binary "github.com/nbd-wtf/go-nostr/binary"
|
nostr_binary "github.com/nbd-wtf/go-nostr/binary"
|
||||||
|
bolt "go.etcd.io/bbolt"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (b *BoltBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
func (b *BoltBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||||
@@ -44,7 +44,7 @@ func (b *BoltBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
|||||||
|
|
||||||
for _, km := range getIndexKeysForEvent(evt) {
|
for _, km := range getIndexKeysForEvent(evt) {
|
||||||
bucket := txn.Bucket(km.bucket)
|
bucket := txn.Bucket(km.bucket)
|
||||||
if err := bucket.Put(km.key, seqb); err != nil {
|
if err := bucket.Put(append(km.key, seqb...), nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/fiatjaf/eventstore/mysql"
|
"github.com/fiatjaf/eventstore/mysql"
|
||||||
"github.com/fiatjaf/eventstore/postgresql"
|
"github.com/fiatjaf/eventstore/postgresql"
|
||||||
"github.com/fiatjaf/eventstore/sqlite3"
|
"github.com/fiatjaf/eventstore/sqlite3"
|
||||||
|
"github.com/fiatjaf/eventstore/strfry"
|
||||||
"github.com/urfave/cli/v3"
|
"github.com/urfave/cli/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -52,6 +53,8 @@ var app = &cli.Command{
|
|||||||
case strings.HasPrefix(path, "https://"):
|
case strings.HasPrefix(path, "https://"):
|
||||||
// if we ever add something else that uses URLs we'll have to modify this
|
// if we ever add something else that uses URLs we'll have to modify this
|
||||||
typ = "elasticsearch"
|
typ = "elasticsearch"
|
||||||
|
case strings.HasSuffix(path, ".conf"):
|
||||||
|
typ = "strfry"
|
||||||
default:
|
default:
|
||||||
// try to detect based on the form and names of disk files
|
// try to detect based on the form and names of disk files
|
||||||
dbname, err := detect(path)
|
dbname, err := detect(path)
|
||||||
@@ -102,6 +105,8 @@ var app = &cli.Command{
|
|||||||
}
|
}
|
||||||
case "elasticsearch":
|
case "elasticsearch":
|
||||||
db = &elasticsearch.ElasticsearchStorage{URL: path}
|
db = &elasticsearch.ElasticsearchStorage{URL: path}
|
||||||
|
case "strfry":
|
||||||
|
db = &strfry.StrfryBackend{ConfigPath: path}
|
||||||
case "":
|
case "":
|
||||||
return fmt.Errorf("couldn't determine store type, you can use --type to specify it manually")
|
return fmt.Errorf("couldn't determine store type, you can use --type to specify it manually")
|
||||||
default:
|
default:
|
||||||
|
|||||||
27
go.mod
27
go.mod
@@ -5,7 +5,7 @@ go 1.21
|
|||||||
require (
|
require (
|
||||||
github.com/PowerDNS/lmdb-go v1.9.2
|
github.com/PowerDNS/lmdb-go v1.9.2
|
||||||
github.com/aquasecurity/esquery v0.2.0
|
github.com/aquasecurity/esquery v0.2.0
|
||||||
github.com/boltdb/bolt v1.3.1
|
github.com/blugelabs/bluge v0.2.2
|
||||||
github.com/dgraph-io/badger/v4 v4.2.0
|
github.com/dgraph-io/badger/v4 v4.2.0
|
||||||
github.com/elastic/go-elasticsearch/v8 v8.10.1
|
github.com/elastic/go-elasticsearch/v8 v8.10.1
|
||||||
github.com/go-sql-driver/mysql v1.7.1
|
github.com/go-sql-driver/mysql v1.7.1
|
||||||
@@ -13,20 +13,36 @@ require (
|
|||||||
github.com/lib/pq v1.10.9
|
github.com/lib/pq v1.10.9
|
||||||
github.com/mailru/easyjson v0.7.7
|
github.com/mailru/easyjson v0.7.7
|
||||||
github.com/mattn/go-sqlite3 v1.14.18
|
github.com/mattn/go-sqlite3 v1.14.18
|
||||||
github.com/nbd-wtf/go-nostr v0.28.5
|
github.com/nbd-wtf/go-nostr v0.34.0
|
||||||
github.com/stretchr/testify v1.8.4
|
github.com/opensearch-project/opensearch-go/v4 v4.0.0
|
||||||
|
github.com/stretchr/testify v1.9.0
|
||||||
github.com/urfave/cli/v3 v3.0.0-alpha7
|
github.com/urfave/cli/v3 v3.0.0-alpha7
|
||||||
|
go.etcd.io/bbolt v1.3.9
|
||||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
|
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
|
||||||
|
golang.org/x/text v0.15.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/RoaringBitmap/roaring v1.9.3 // indirect
|
||||||
|
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 // indirect
|
||||||
|
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||||
|
github.com/blevesearch/go-porterstemmer v1.0.3 // indirect
|
||||||
|
github.com/blevesearch/mmap-go v1.0.4 // indirect
|
||||||
|
github.com/blevesearch/segment v0.9.1 // indirect
|
||||||
|
github.com/blevesearch/snowballstem v0.9.0 // indirect
|
||||||
|
github.com/blevesearch/vellum v1.0.10 // indirect
|
||||||
|
github.com/blugelabs/bluge_segment_api v0.2.0 // indirect
|
||||||
|
github.com/blugelabs/ice v1.0.0 // indirect
|
||||||
|
github.com/blugelabs/ice/v2 v2.0.1 // indirect
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect
|
||||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect
|
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 // indirect
|
||||||
|
github.com/caio/go-tdigest v3.1.0+incompatible // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
|
github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect
|
||||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||||
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
github.com/dgraph-io/ristretto v0.1.1 // indirect
|
||||||
|
github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140 // indirect
|
||||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
github.com/elastic/elastic-transport-go/v8 v8.3.0 // indirect
|
github.com/elastic/elastic-transport-go/v8 v8.3.0 // indirect
|
||||||
github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect
|
github.com/elastic/go-elasticsearch/v7 v7.17.10 // indirect
|
||||||
@@ -41,8 +57,9 @@ require (
|
|||||||
github.com/golang/snappy v0.0.4 // indirect
|
github.com/golang/snappy v0.0.4 // indirect
|
||||||
github.com/google/flatbuffers v23.5.26+incompatible // indirect
|
github.com/google/flatbuffers v23.5.26+incompatible // indirect
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.17.2 // indirect
|
github.com/klauspost/compress v1.17.8 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
github.com/mschoch/smat v0.2.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.0.2 // indirect
|
github.com/puzpuzpuz/xsync/v3 v3.0.2 // indirect
|
||||||
@@ -52,7 +69,7 @@ require (
|
|||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
golang.org/x/net v0.17.0 // indirect
|
golang.org/x/net v0.17.0 // indirect
|
||||||
golang.org/x/sys v0.14.0 // indirect
|
golang.org/x/sys v0.20.0 // indirect
|
||||||
google.golang.org/protobuf v1.31.0 // indirect
|
google.golang.org/protobuf v1.31.0 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
|||||||
125
go.sum
125
go.sum
@@ -1,21 +1,65 @@
|
|||||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/PowerDNS/lmdb-go v1.9.2 h1:Cmgerh9y3ZKBZGz1irxSShhfmFyRUh+Zdk4cZk7ZJvU=
|
github.com/PowerDNS/lmdb-go v1.9.2 h1:Cmgerh9y3ZKBZGz1irxSShhfmFyRUh+Zdk4cZk7ZJvU=
|
||||||
github.com/PowerDNS/lmdb-go v1.9.2/go.mod h1:TE0l+EZK8Z1B4dx070ZxkWTlp8RG1mjN0/+FkFRQMtU=
|
github.com/PowerDNS/lmdb-go v1.9.2/go.mod h1:TE0l+EZK8Z1B4dx070ZxkWTlp8RG1mjN0/+FkFRQMtU=
|
||||||
|
github.com/RoaringBitmap/gocroaring v0.4.0/go.mod h1:NieMwz7ZqwU2DD73/vvYwv7r4eWBKuPVSXZIpsaMwCI=
|
||||||
|
github.com/RoaringBitmap/real-roaring-datasets v0.0.0-20190726190000-eb7c87156f76/go.mod h1:oM0MHmQ3nDsq609SS36p+oYbRi16+oVvU2Bw4Ipv0SE=
|
||||||
|
github.com/RoaringBitmap/roaring v0.9.1/go.mod h1:h1B7iIUOmnAeb5ytYMvnHJwxMc6LUrwBnzXWRuqTQUc=
|
||||||
|
github.com/RoaringBitmap/roaring v0.9.4/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA=
|
||||||
|
github.com/RoaringBitmap/roaring v1.9.3 h1:t4EbC5qQwnisr5PrP9nt0IRhRTb9gMUgQF4t4S2OByM=
|
||||||
|
github.com/RoaringBitmap/roaring v1.9.3/go.mod h1:6AXUsoIEzDTFFQCe1RbGA6uFONMhvejWj5rqITANK90=
|
||||||
|
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
|
||||||
github.com/aquasecurity/esquery v0.2.0 h1:9WWXve95TE8hbm3736WB7nS6Owl8UGDeu+0jiyE9ttA=
|
github.com/aquasecurity/esquery v0.2.0 h1:9WWXve95TE8hbm3736WB7nS6Owl8UGDeu+0jiyE9ttA=
|
||||||
github.com/aquasecurity/esquery v0.2.0/go.mod h1:VU+CIFR6C+H142HHZf9RUkp4Eedpo9UrEKeCQHWf9ao=
|
github.com/aquasecurity/esquery v0.2.0/go.mod h1:VU+CIFR6C+H142HHZf9RUkp4Eedpo9UrEKeCQHWf9ao=
|
||||||
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
github.com/axiomhq/hyperloglog v0.0.0-20191112132149-a4c4c47bc57f/go.mod h1:2stgcRjl6QmW+gU2h5E7BQXg4HU0gzxKWDuT5HviN9s=
|
||||||
|
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27 h1:60m4tnanN1ctzIu4V3bfCNJ39BiOPSm1gHFlFjTkRE0=
|
||||||
|
github.com/axiomhq/hyperloglog v0.0.0-20240507144631-af9851f82b27/go.mod h1:k08r+Yj1PRAmuayFiRK6MYuR5Ve4IuZtTfxErMIh0+c=
|
||||||
|
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||||
|
github.com/bits-and-blooms/bitset v1.12.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||||
|
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||||
|
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||||
|
github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo=
|
||||||
|
github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M=
|
||||||
|
github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA=
|
||||||
|
github.com/blevesearch/mmap-go v1.0.3/go.mod h1:pYvKl/grLQrBxuaRYgoTssa4rVujYYeenDp++2E+yvs=
|
||||||
|
github.com/blevesearch/mmap-go v1.0.4 h1:OVhDhT5B/M1HNPpYPBKIEJaD0F3Si+CrEKULGCDPWmc=
|
||||||
|
github.com/blevesearch/mmap-go v1.0.4/go.mod h1:EWmEAOmdAS9z/pi/+Toxu99DnsbhG1TIxUoRmJw/pSs=
|
||||||
|
github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ=
|
||||||
|
github.com/blevesearch/segment v0.9.1 h1:+dThDy+Lvgj5JMxhmOVlgFfkUtZV2kw49xax4+jTfSU=
|
||||||
|
github.com/blevesearch/segment v0.9.1/go.mod h1:zN21iLm7+GnBHWTao9I+Au/7MBiL8pPFtJBJTsk6kQw=
|
||||||
|
github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s=
|
||||||
|
github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
|
||||||
|
github.com/blevesearch/vellum v1.0.5/go.mod h1:atE0EH3fvk43zzS7t1YNdNC7DbmcC3uz+eMD5xZ2OyQ=
|
||||||
|
github.com/blevesearch/vellum v1.0.7/go.mod h1:doBZpmRhwTsASB4QdUZANlJvqVAUdUyX0ZK7QJCTeBE=
|
||||||
|
github.com/blevesearch/vellum v1.0.10 h1:HGPJDT2bTva12hrHepVT3rOyIKFFF4t7Gf6yMxyMIPI=
|
||||||
|
github.com/blevesearch/vellum v1.0.10/go.mod h1:ul1oT0FhSMDIExNjIxHqJoGpVrBpKCdgDQNxfqgJt7k=
|
||||||
|
github.com/blugelabs/bluge v0.2.2 h1:gat8CqE6P6tOgeX30XGLOVNTC26cpM2RWVcreXWtYcM=
|
||||||
|
github.com/blugelabs/bluge v0.2.2/go.mod h1:am1LU9jS8dZgWkRzkGLQN3757EgMs3upWrU2fdN9foE=
|
||||||
|
github.com/blugelabs/bluge_segment_api v0.2.0 h1:cCX1Y2y8v0LZ7+EEJ6gH7dW6TtVTW4RhG0vp3R+N2Lo=
|
||||||
|
github.com/blugelabs/bluge_segment_api v0.2.0/go.mod h1:95XA+ZXfRj/IXADm7gZ+iTcWOJPg5jQTY1EReIzl3LA=
|
||||||
|
github.com/blugelabs/ice v1.0.0 h1:um7wf9e6jbkTVCrOyQq3tKK43fBMOvLUYxbj3Qtc4eo=
|
||||||
|
github.com/blugelabs/ice v1.0.0/go.mod h1:gNfFPk5zM+yxJROhthxhVQYjpBO9amuxWXJQ2Lo+IbQ=
|
||||||
|
github.com/blugelabs/ice/v2 v2.0.1 h1:mzHbntLjk2v7eDRgoXCgzOsPKN1Tenu9Svo6l9cTLS4=
|
||||||
|
github.com/blugelabs/ice/v2 v2.0.1/go.mod h1:QxAWSPNwZwsIqS25c3lbIPFQrVvT1sphf5x5DfMLH5M=
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U=
|
||||||
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM=
|
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2 h1:KdUfX2zKommPRa+PD0sWZUyXe9w277ABlgELO7H04IM=
|
||||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.2/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||||
|
github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds=
|
||||||
|
github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||||
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
|
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||||
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
|
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
@@ -30,6 +74,9 @@ github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWa
|
|||||||
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
|
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
|
||||||
|
github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
|
||||||
|
github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140 h1:y7y0Oa6UawqTFPCDw9JG6pdKt4F9pAhHv0B7FMGaGD0=
|
||||||
|
github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
|
||||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
@@ -46,6 +93,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
|||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
|
||||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||||
|
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
|
||||||
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||||
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
|
||||||
@@ -57,6 +106,7 @@ github.com/gobwas/ws v1.3.1 h1:Qi34dfLMWJbiKaNbDVzM9x27nZBjmkaW6i4+Ku+pGVU=
|
|||||||
github.com/gobwas/ws v1.3.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
github.com/gobwas/ws v1.3.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
|
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
|
||||||
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
|
||||||
@@ -76,6 +126,7 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
|||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg=
|
github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg=
|
||||||
@@ -90,6 +141,9 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
|
github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY=
|
||||||
github.com/jgroeneveld/schema v1.0.0 h1:J0E10CrOkiSEsw6dfb1IfrDJD14pf6QLVJ3tRPl/syI=
|
github.com/jgroeneveld/schema v1.0.0 h1:J0E10CrOkiSEsw6dfb1IfrDJD14pf6QLVJ3tRPl/syI=
|
||||||
github.com/jgroeneveld/schema v1.0.0/go.mod h1:M14lv7sNMtGvo3ops1MwslaSYgDYxrSmbzWIQ0Mr5rs=
|
github.com/jgroeneveld/schema v1.0.0/go.mod h1:M14lv7sNMtGvo3ops1MwslaSYgDYxrSmbzWIQ0Mr5rs=
|
||||||
github.com/jgroeneveld/trial v2.0.0+incompatible h1:d59ctdgor+VqdZCAiUfVN8K13s0ALDioG5DWwZNtRuQ=
|
github.com/jgroeneveld/trial v2.0.0+incompatible h1:d59ctdgor+VqdZCAiUfVN8K13s0ALDioG5DWwZNtRuQ=
|
||||||
@@ -98,26 +152,38 @@ github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
|||||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
|
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
|
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||||
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
|
||||||
|
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4=
|
||||||
|
github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U=
|
||||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||||
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
|
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
|
||||||
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||||
github.com/nbd-wtf/go-nostr v0.28.5 h1:5vBAFKGVJ6Rhq2Jrtj+v+j8bUVLdsao5SFdBIQ7PJR4=
|
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||||
github.com/nbd-wtf/go-nostr v0.28.5/go.mod h1:aFcp8NO3erHg+glzBfh4wpaMrV1/ahcUPAgITdptxwA=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
|
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||||
|
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||||
|
github.com/nbd-wtf/go-nostr v0.34.0 h1:E7tDHFx42gvWwFv1Eysn+NxJqGLmo21x/VEwj2+F21E=
|
||||||
|
github.com/nbd-wtf/go-nostr v0.34.0/go.mod h1:NZQkxl96ggbO8rvDpVjcsojJqKTPwqhP4i82O7K5DJs=
|
||||||
|
github.com/opensearch-project/opensearch-go/v4 v4.0.0 h1:Nrh30HhaknKcaPcIzlqA6Jf0CBgWP5XUaSp0HMsRBlA=
|
||||||
|
github.com/opensearch-project/opensearch-go/v4 v4.0.0/go.mod h1:amlBgHgAX9AwwW50eOuzYa5n/8aD18LoWO8eDLoe8KQ=
|
||||||
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
@@ -125,15 +191,25 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
|
|||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.0.2 h1:3yESHrRFYr6xzkz61LLkvNiPFXxJEAABanTQpKbAaew=
|
github.com/puzpuzpuz/xsync/v3 v3.0.2 h1:3yESHrRFYr6xzkz61LLkvNiPFXxJEAABanTQpKbAaew=
|
||||||
github.com/puzpuzpuz/xsync/v3 v3.0.2/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
github.com/puzpuzpuz/xsync/v3 v3.0.2/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||||
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
|
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||||
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
|
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||||
github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM=
|
github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM=
|
||||||
github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||||
@@ -141,20 +217,33 @@ github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JT
|
|||||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||||
|
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||||
|
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||||
|
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||||
github.com/urfave/cli/v3 v3.0.0-alpha7 h1:dj+WjtBA2StTinGwue+o2oyFFvo8aQ/AGb5MYvUqk/8=
|
github.com/urfave/cli/v3 v3.0.0-alpha7 h1:dj+WjtBA2StTinGwue+o2oyFFvo8aQ/AGb5MYvUqk/8=
|
||||||
github.com/urfave/cli/v3 v3.0.0-alpha7/go.mod h1:0kK/RUFHyh+yIKSfWxwheGndfnrvYSmYFVeKCh03ZUc=
|
github.com/urfave/cli/v3 v3.0.0-alpha7/go.mod h1:0kK/RUFHyh+yIKSfWxwheGndfnrvYSmYFVeKCh03ZUc=
|
||||||
|
github.com/wI2L/jsondiff v0.5.1 h1:xS4zYUspH4U3IB0Lwo9+jv+MSRJSWMF87Y4BpDbFMHo=
|
||||||
|
github.com/wI2L/jsondiff v0.5.1/go.mod h1:qqG6hnK0Lsrz2BpIVCxWiK9ItsBCpIZQiv0izJjOZ9s=
|
||||||
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
|
go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI=
|
||||||
|
go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE=
|
||||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||||
|
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||||
|
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
@@ -177,18 +266,28 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||||
|
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
|
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||||
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
|
||||||
|
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
|
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
@@ -199,6 +298,11 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
|
||||||
|
gonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw=
|
||||||
|
gonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM=
|
||||||
|
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||||
|
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||||
@@ -231,3 +335,4 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
|||||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
|
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||||
|
|||||||
@@ -46,34 +46,37 @@ func (b *LMDBBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
// we already have a k and a v and an err from the cursor setup, so check and use these
|
// we already have a k and a v and an err from the cursor setup, so check and use these
|
||||||
if iterr != nil || !bytes.HasPrefix(k, q.prefix) {
|
if iterr != nil ||
|
||||||
break
|
len(k) != q.prefixSize+q.timestampSize ||
|
||||||
|
!bytes.Equal(k[:q.prefixSize], q.prefix) {
|
||||||
|
// either iteration has errored or we reached the end of this prefix
|
||||||
|
break // stop this cursor and move to the next one
|
||||||
}
|
}
|
||||||
|
|
||||||
// "id" indexes don't contain a timestamp
|
// "id" indexes don't contain a timestamp
|
||||||
if !q.skipTimestamp {
|
if q.timestampSize == 4 {
|
||||||
createdAt := binary.BigEndian.Uint32(k[len(k)-4:])
|
createdAt := binary.BigEndian.Uint32(k[len(k)-4:])
|
||||||
if createdAt < since {
|
if createdAt < since {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetch actual event
|
|
||||||
val, err := txn.Get(b.rawEventStore, idx)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if extraFilter == nil {
|
if extraFilter == nil {
|
||||||
count++
|
count++
|
||||||
} else {
|
} else {
|
||||||
|
// fetch actual event
|
||||||
|
val, err := txn.Get(b.rawEventStore, idx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
evt := &nostr.Event{}
|
evt := &nostr.Event{}
|
||||||
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if this matches the other filters that were not part of the index
|
// check if this matches the other filters that were not part of the index
|
||||||
if extraFilter == nil || extraFilter.Matches(evt) {
|
if extraFilter.Matches(evt) {
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -18,9 +18,10 @@ type query struct {
|
|||||||
i int
|
i int
|
||||||
dbi lmdb.DBI
|
dbi lmdb.DBI
|
||||||
prefix []byte
|
prefix []byte
|
||||||
startingPoint []byte
|
|
||||||
results chan *nostr.Event
|
results chan *nostr.Event
|
||||||
skipTimestamp bool
|
prefixSize int
|
||||||
|
timestampSize int
|
||||||
|
startingPoint []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type queryEvent struct {
|
type queryEvent struct {
|
||||||
@@ -29,17 +30,32 @@ type queryEvent struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *LMDBBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
func (b *LMDBBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||||
|
ch := make(chan *nostr.Event)
|
||||||
|
|
||||||
queries, extraFilter, since, err := b.prepareQueries(filter)
|
queries, extraFilter, since, err := b.prepareQueries(filter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ch := make(chan *nostr.Event)
|
// max number of events we'll return
|
||||||
|
limit := b.MaxLimit / 4
|
||||||
|
if filter.Limit > 0 && filter.Limit < b.MaxLimit {
|
||||||
|
limit = filter.Limit
|
||||||
|
}
|
||||||
|
|
||||||
|
if filter.Search != "" {
|
||||||
|
close(ch)
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer close(ch)
|
defer close(ch)
|
||||||
|
|
||||||
for _, q := range queries {
|
for _, q := range queries {
|
||||||
q := q
|
q := q
|
||||||
|
|
||||||
|
pulled := 0 // this will be hard-capped at the global limit of the query
|
||||||
|
|
||||||
go b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
go b.lmdbEnv.View(func(txn *lmdb.Txn) error {
|
||||||
txn.RawRead = true
|
txn.RawRead = true
|
||||||
defer close(q.results)
|
defer close(q.results)
|
||||||
@@ -70,16 +86,18 @@ func (b *LMDBBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (cha
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
// we already have a k and a v and an err from the cursor setup, so check and use these
|
// we already have a k and a v and an err from the cursor setup, so check and use these
|
||||||
if iterr != nil || !bytes.HasPrefix(k, q.prefix) {
|
if iterr != nil ||
|
||||||
|
len(k) != q.prefixSize+q.timestampSize ||
|
||||||
|
!bytes.Equal(k[:q.prefixSize], q.prefix) {
|
||||||
// either iteration has errored or we reached the end of this prefix
|
// either iteration has errored or we reached the end of this prefix
|
||||||
break // stop this cursor and move to the next one
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// "id" indexes don't contain a timestamp
|
// "id" indexes don't contain a timestamp
|
||||||
if !q.skipTimestamp {
|
if q.timestampSize == 4 {
|
||||||
createdAt := binary.BigEndian.Uint32(k[len(k)-4:])
|
createdAt := binary.BigEndian.Uint32(k[len(k)-4:])
|
||||||
if createdAt < since {
|
if createdAt < since {
|
||||||
break
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,45 +107,42 @@ func (b *LMDBBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (cha
|
|||||||
log.Printf(
|
log.Printf(
|
||||||
"lmdb: failed to get %x based on prefix %x, index key %x from raw event store: %s\n",
|
"lmdb: failed to get %x based on prefix %x, index key %x from raw event store: %s\n",
|
||||||
idx, q.prefix, k, err)
|
idx, q.prefix, k, err)
|
||||||
break
|
return fmt.Errorf("error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
evt := &nostr.Event{}
|
evt := &nostr.Event{}
|
||||||
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
if err := nostr_binary.Unmarshal(val, evt); err != nil {
|
||||||
log.Printf("lmdb: value read error (id %x): %s\n", val[0:32], err)
|
log.Printf("lmdb: value read error (id %x): %s\n", val[0:32], err)
|
||||||
break
|
return fmt.Errorf("error: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// check if this matches the other filters that were not part of the index before yielding
|
// check if this matches the other filters that were not part of the index before yielding
|
||||||
if extraFilter == nil || extraFilter.Matches(evt) {
|
if extraFilter == nil || extraFilter.Matches(evt) {
|
||||||
select {
|
select {
|
||||||
case q.results <- evt:
|
case q.results <- evt:
|
||||||
|
pulled++
|
||||||
|
if pulled >= limit {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
break
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// move one back (we'll look into k and v and err in the next iteration)
|
// move one back (we'll look into k and v and err in the next iteration)
|
||||||
k, idx, iterr = cursor.Get(nil, nil, lmdb.Prev)
|
k, idx, iterr = cursor.Get(nil, nil, lmdb.Prev)
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("lmdb: error on cursor iteration: %v\n", err)
|
log.Printf("lmdb: error on cursor iteration: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// max number of events we'll return
|
|
||||||
limit := b.MaxLimit
|
|
||||||
if filter.Limit > 0 && filter.Limit < limit {
|
|
||||||
limit = filter.Limit
|
|
||||||
}
|
|
||||||
|
|
||||||
// receive results and ensure we only return the most recent ones always
|
// receive results and ensure we only return the most recent ones always
|
||||||
emittedEvents := 0
|
emittedEvents := 0
|
||||||
|
|
||||||
// first pass
|
// first pass
|
||||||
emitQueue := make(priorityQueue, 0, len(queries)+limit)
|
emitQueue := make(priorityQueue, 0, len(queries))
|
||||||
for _, q := range queries {
|
for _, q := range queries {
|
||||||
evt, ok := <-q.results
|
evt, ok := <-q.results
|
||||||
if ok {
|
if ok {
|
||||||
@@ -216,7 +231,7 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
|||||||
return nil, nil, 0, fmt.Errorf("invalid id '%s'", idHex)
|
return nil, nil, 0, fmt.Errorf("invalid id '%s'", idHex)
|
||||||
}
|
}
|
||||||
prefix, _ := hex.DecodeString(idHex[0 : 8*2])
|
prefix, _ := hex.DecodeString(idHex[0 : 8*2])
|
||||||
queries[i] = query{i: i, dbi: b.indexId, prefix: prefix, skipTimestamp: true}
|
queries[i] = query{i: i, dbi: b.indexId, prefix: prefix, prefixSize: 8, timestampSize: 0}
|
||||||
}
|
}
|
||||||
} else if len(filter.Authors) > 0 {
|
} else if len(filter.Authors) > 0 {
|
||||||
if len(filter.Kinds) == 0 {
|
if len(filter.Kinds) == 0 {
|
||||||
@@ -226,7 +241,7 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
|||||||
return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex)
|
return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex)
|
||||||
}
|
}
|
||||||
prefix, _ := hex.DecodeString(pubkeyHex[0 : 8*2])
|
prefix, _ := hex.DecodeString(pubkeyHex[0 : 8*2])
|
||||||
queries[i] = query{i: i, dbi: b.indexPubkey, prefix: prefix}
|
queries[i] = query{i: i, dbi: b.indexPubkey, prefix: prefix, prefixSize: 8, timestampSize: 4}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
queries = make([]query, len(filter.Authors)*len(filter.Kinds))
|
queries = make([]query, len(filter.Authors)*len(filter.Kinds))
|
||||||
@@ -238,7 +253,7 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
|||||||
}
|
}
|
||||||
pubkey, _ := hex.DecodeString(pubkeyHex[0 : 8*2])
|
pubkey, _ := hex.DecodeString(pubkeyHex[0 : 8*2])
|
||||||
prefix := binary.BigEndian.AppendUint16(pubkey, uint16(kind))
|
prefix := binary.BigEndian.AppendUint16(pubkey, uint16(kind))
|
||||||
queries[i] = query{i: i, dbi: b.indexPubkeyKind, prefix: prefix}
|
queries[i] = query{i: i, dbi: b.indexPubkeyKind, prefix: prefix, prefixSize: 10, timestampSize: 4}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -264,7 +279,7 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
|||||||
dbi, k, offset := b.getTagIndexPrefix(value)
|
dbi, k, offset := b.getTagIndexPrefix(value)
|
||||||
// remove the last parts part to get just the prefix we want here
|
// remove the last parts part to get just the prefix we want here
|
||||||
prefix := k[0:offset]
|
prefix := k[0:offset]
|
||||||
queries[i] = query{i: i, dbi: dbi, prefix: prefix}
|
queries[i] = query{i: i, dbi: dbi, prefix: prefix, prefixSize: len(prefix), timestampSize: 4}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -273,12 +288,12 @@ func (b *LMDBBackend) prepareQueries(filter nostr.Filter) (
|
|||||||
for i, kind := range filter.Kinds {
|
for i, kind := range filter.Kinds {
|
||||||
prefix := make([]byte, 2)
|
prefix := make([]byte, 2)
|
||||||
binary.BigEndian.PutUint16(prefix[:], uint16(kind))
|
binary.BigEndian.PutUint16(prefix[:], uint16(kind))
|
||||||
queries[i] = query{i: i, dbi: b.indexKind, prefix: prefix}
|
queries[i] = query{i: i, dbi: b.indexKind, prefix: prefix, prefixSize: 2, timestampSize: 4}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
queries = make([]query, 1)
|
queries = make([]query, 1)
|
||||||
prefix := make([]byte, 0)
|
prefix := make([]byte, 0)
|
||||||
queries[0] = query{i: 0, dbi: b.indexCreatedAt, prefix: prefix}
|
queries[0] = query{i: 0, dbi: b.indexCreatedAt, prefix: prefix, prefixSize: 0, timestampSize: 4}
|
||||||
extraFilter = nil
|
extraFilter = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ func (b MySQLBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (ch
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := b.DB.Query(query, params...)
|
rows, err := b.DB.QueryContext(ctx, query, params...)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
close(ch)
|
close(ch)
|
||||||
return nil, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
return nil, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
||||||
@@ -55,7 +55,7 @@ func (b MySQLBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int
|
|||||||
}
|
}
|
||||||
|
|
||||||
var count int64
|
var count int64
|
||||||
if err = b.DB.QueryRow(query, params...).Scan(&count); err != nil && err != sql.ErrNoRows {
|
if err = b.DB.QueryRowContext(ctx, query, params...).Scan(&count); err != nil && err != sql.ErrNoRows {
|
||||||
return 0, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
return 0, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
||||||
}
|
}
|
||||||
return count, nil
|
return count, nil
|
||||||
@@ -158,7 +158,7 @@ func (b MySQLBackend) queryEventsSql(filter nostr.Filter, doCount bool) (string,
|
|||||||
COUNT(*)
|
COUNT(*)
|
||||||
FROM event WHERE `+
|
FROM event WHERE `+
|
||||||
strings.Join(conditions, " AND ")+
|
strings.Join(conditions, " AND ")+
|
||||||
" ORDER BY created_at DESC LIMIT ?")
|
" LIMIT ?")
|
||||||
} else {
|
} else {
|
||||||
query = sqlx.Rebind(sqlx.BindType("mysql"), `SELECT
|
query = sqlx.Rebind(sqlx.BindType("mysql"), `SELECT
|
||||||
id, pubkey, created_at, kind, tags, content, sig
|
id, pubkey, created_at, kind, tags, content, sig
|
||||||
|
|||||||
207
opensearch/opensearch.go
Normal file
207
opensearch/opensearch.go
Normal file
@@ -0,0 +1,207 @@
|
|||||||
|
package opensearch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"crypto/tls"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/fiatjaf/eventstore"
|
||||||
|
"github.com/nbd-wtf/go-nostr"
|
||||||
|
"github.com/opensearch-project/opensearch-go/v4"
|
||||||
|
"github.com/opensearch-project/opensearch-go/v4/opensearchapi"
|
||||||
|
"github.com/opensearch-project/opensearch-go/v4/opensearchutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ eventstore.Store = (*OpensearchStorage)(nil)
|
||||||
|
|
||||||
|
type IndexedEvent struct {
|
||||||
|
Event nostr.Event `json:"event"`
|
||||||
|
ContentSearch string `json:"content_search"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var indexMapping = `
|
||||||
|
{
|
||||||
|
"settings": {
|
||||||
|
"number_of_shards": 1,
|
||||||
|
"number_of_replicas": 0
|
||||||
|
},
|
||||||
|
"mappings": {
|
||||||
|
"dynamic": false,
|
||||||
|
"properties": {
|
||||||
|
"event": {
|
||||||
|
"dynamic": false,
|
||||||
|
"properties": {
|
||||||
|
"id": {"type": "keyword"},
|
||||||
|
"pubkey": {"type": "keyword"},
|
||||||
|
"kind": {"type": "integer"},
|
||||||
|
"tags": {"type": "keyword"},
|
||||||
|
"created_at": {"type": "date"}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"content_search": {"type": "text"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
type OpensearchStorage struct {
|
||||||
|
URL string
|
||||||
|
IndexName string
|
||||||
|
Insecure bool
|
||||||
|
|
||||||
|
client *opensearchapi.Client
|
||||||
|
bi opensearchutil.BulkIndexer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oss *OpensearchStorage) Close() {}
|
||||||
|
|
||||||
|
func (oss *OpensearchStorage) Init() error {
|
||||||
|
if oss.IndexName == "" {
|
||||||
|
oss.IndexName = "events"
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := opensearchapi.Config{}
|
||||||
|
if oss.URL != "" {
|
||||||
|
cfg.Client.Addresses = strings.Split(oss.URL, ",")
|
||||||
|
}
|
||||||
|
if oss.Insecure {
|
||||||
|
transport := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||||
|
cfg.Client.Transport = transport
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := opensearchapi.NewClient(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
createIndexResponse, err := client.Indices.Create(
|
||||||
|
ctx,
|
||||||
|
opensearchapi.IndicesCreateReq{
|
||||||
|
Index: oss.IndexName,
|
||||||
|
Body: strings.NewReader(indexMapping),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
var opensearchError *opensearch.StructError
|
||||||
|
|
||||||
|
// Load err into opensearch.Error to access the fields and tolerate if the index already exists
|
||||||
|
if errors.As(err, &opensearchError) {
|
||||||
|
if opensearchError.Err.Type != "resource_already_exists_exception" {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Printf("Created Index: %s\n Shards Acknowledged: %t\n", createIndexResponse.Index, createIndexResponse.ShardsAcknowledged)
|
||||||
|
|
||||||
|
// bulk indexer
|
||||||
|
bi, err := opensearchutil.NewBulkIndexer(opensearchutil.BulkIndexerConfig{
|
||||||
|
Index: oss.IndexName,
|
||||||
|
Client: client,
|
||||||
|
NumWorkers: 2,
|
||||||
|
FlushInterval: 3 * time.Second,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error creating the indexer: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
oss.client = client
|
||||||
|
oss.bi = bi
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oss *OpensearchStorage) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||||
|
done := make(chan error)
|
||||||
|
err := oss.bi.Add(
|
||||||
|
ctx,
|
||||||
|
opensearchutil.BulkIndexerItem{
|
||||||
|
Action: "delete",
|
||||||
|
DocumentID: evt.ID,
|
||||||
|
OnSuccess: func(ctx context.Context, item opensearchutil.BulkIndexerItem, res opensearchapi.BulkRespItem) {
|
||||||
|
close(done)
|
||||||
|
},
|
||||||
|
OnFailure: func(ctx context.Context, item opensearchutil.BulkIndexerItem, res opensearchapi.BulkRespItem, err error) {
|
||||||
|
if err != nil {
|
||||||
|
done <- err
|
||||||
|
} else {
|
||||||
|
// ok if deleted item not found
|
||||||
|
if res.Status == 404 {
|
||||||
|
close(done)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
txt, _ := json.Marshal(res)
|
||||||
|
err := fmt.Errorf("ERROR: %s", txt)
|
||||||
|
done <- err
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = <-done
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oss *OpensearchStorage) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||||
|
ie := &IndexedEvent{
|
||||||
|
Event: *evt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// post processing: index for FTS
|
||||||
|
// some ideas:
|
||||||
|
// - index kind=0 fields a set of dedicated mapped fields
|
||||||
|
// (or use a separate index for profiles with a dedicated mapping)
|
||||||
|
// - if it's valid JSON just index the "values" and not the keys
|
||||||
|
// - more content introspection: language detection
|
||||||
|
// - denormalization... attach profile + ranking signals to events
|
||||||
|
if evt.Kind != 4 {
|
||||||
|
ie.ContentSearch = evt.Content
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := json.Marshal(ie)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan error)
|
||||||
|
|
||||||
|
// adapted from:
|
||||||
|
// https://github.com/elastic/go-elasticsearch/blob/main/_examples/bulk/indexer.go#L196
|
||||||
|
err = oss.bi.Add(
|
||||||
|
ctx,
|
||||||
|
opensearchutil.BulkIndexerItem{
|
||||||
|
Action: "index",
|
||||||
|
DocumentID: evt.ID,
|
||||||
|
Body: bytes.NewReader(data),
|
||||||
|
OnSuccess: func(ctx context.Context, item opensearchutil.BulkIndexerItem, res opensearchapi.BulkRespItem) {
|
||||||
|
close(done)
|
||||||
|
},
|
||||||
|
OnFailure: func(ctx context.Context, item opensearchutil.BulkIndexerItem, res opensearchapi.BulkRespItem, err error) {
|
||||||
|
if err != nil {
|
||||||
|
done <- err
|
||||||
|
} else {
|
||||||
|
err := fmt.Errorf("ERROR: %s: %s", res.Error.Type, res.Error.Reason)
|
||||||
|
done <- err
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = <-done
|
||||||
|
return err
|
||||||
|
}
|
||||||
235
opensearch/query.go
Normal file
235
opensearch/query.go
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
package opensearch
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/aquasecurity/esquery"
|
||||||
|
"github.com/nbd-wtf/go-nostr"
|
||||||
|
"github.com/opensearch-project/opensearch-go/v4/opensearchapi"
|
||||||
|
"github.com/opensearch-project/opensearch-go/v4/opensearchutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildDsl(filter nostr.Filter) ([]byte, error) {
|
||||||
|
dsl := esquery.Bool()
|
||||||
|
|
||||||
|
prefixFilter := func(fieldName string, values []string) {
|
||||||
|
if len(values) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
prefixQ := esquery.Bool()
|
||||||
|
for _, v := range values {
|
||||||
|
if len(v) < 64 {
|
||||||
|
prefixQ.Should(esquery.Prefix(fieldName, v))
|
||||||
|
} else {
|
||||||
|
prefixQ.Should(esquery.Term(fieldName, v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dsl.Must(prefixQ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ids
|
||||||
|
prefixFilter("event.id", filter.IDs)
|
||||||
|
|
||||||
|
// authors
|
||||||
|
prefixFilter("event.pubkey", filter.Authors)
|
||||||
|
|
||||||
|
// kinds
|
||||||
|
if len(filter.Kinds) > 0 {
|
||||||
|
dsl.Must(esquery.Terms("event.kind", toInterfaceSlice(filter.Kinds)...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// tags
|
||||||
|
if len(filter.Tags) > 0 {
|
||||||
|
tagQ := esquery.Bool()
|
||||||
|
for char, terms := range filter.Tags {
|
||||||
|
vs := toInterfaceSlice(append(terms, char))
|
||||||
|
tagQ.Should(esquery.Terms("event.tags", vs...))
|
||||||
|
}
|
||||||
|
dsl.Must(tagQ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// since
|
||||||
|
if filter.Since != nil {
|
||||||
|
dsl.Must(esquery.Range("event.created_at").Gte(filter.Since))
|
||||||
|
}
|
||||||
|
|
||||||
|
// until
|
||||||
|
if filter.Until != nil {
|
||||||
|
dsl.Must(esquery.Range("event.created_at").Lte(filter.Until))
|
||||||
|
}
|
||||||
|
|
||||||
|
// search
|
||||||
|
if filter.Search != "" {
|
||||||
|
dsl.Must(esquery.Match("content_search", filter.Search))
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Marshal(esquery.Query(dsl))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oss *OpensearchStorage) getByID(filter nostr.Filter) ([]*nostr.Event, error) {
|
||||||
|
ctx := context.Background()
|
||||||
|
mgetResponse, err := oss.client.MGet(
|
||||||
|
ctx,
|
||||||
|
opensearchapi.MGetReq{
|
||||||
|
Body: opensearchutil.NewJSONReader(filter),
|
||||||
|
Index: oss.IndexName,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
events := make([]*nostr.Event, 0, len(mgetResponse.Docs))
|
||||||
|
for _, e := range mgetResponse.Docs {
|
||||||
|
if e.Found {
|
||||||
|
if b, err := e.Source.MarshalJSON(); err == nil {
|
||||||
|
var payload struct {
|
||||||
|
Event nostr.Event `json:"event"`
|
||||||
|
}
|
||||||
|
if err = json.Unmarshal(b, &payload); err == nil {
|
||||||
|
events = append(events, &payload.Event)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return events, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oss *OpensearchStorage) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||||
|
ch := make(chan *nostr.Event)
|
||||||
|
|
||||||
|
// optimization: get by id
|
||||||
|
if isGetByID(filter) {
|
||||||
|
if evts, err := oss.getByID(filter); err == nil {
|
||||||
|
for _, evt := range evts {
|
||||||
|
ch <- evt
|
||||||
|
}
|
||||||
|
close(ch)
|
||||||
|
ch = nil
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("error getting by id: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dsl, err := buildDsl(filter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
limit := 1000
|
||||||
|
if filter.Limit > 0 && filter.Limit < limit {
|
||||||
|
limit = filter.Limit
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = context.Background()
|
||||||
|
searchResponse, err := oss.client.Search(
|
||||||
|
ctx,
|
||||||
|
&opensearchapi.SearchReq{
|
||||||
|
Indices: []string{oss.IndexName},
|
||||||
|
Body: bytes.NewReader(dsl),
|
||||||
|
Params: opensearchapi.SearchParams{
|
||||||
|
Size: opensearchapi.ToPointer(limit),
|
||||||
|
Sort: []string{"event.created_at:desc"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for _, e := range searchResponse.Hits.Hits {
|
||||||
|
if b, err := e.Source.MarshalJSON(); err == nil {
|
||||||
|
var payload struct {
|
||||||
|
Event nostr.Event `json:"event"`
|
||||||
|
}
|
||||||
|
if err = json.Unmarshal(b, &payload); err == nil {
|
||||||
|
ch <- &payload.Event
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ch != nil {
|
||||||
|
close(ch)
|
||||||
|
ch = nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isGetByID(filter nostr.Filter) bool {
|
||||||
|
isGetById := len(filter.IDs) > 0 &&
|
||||||
|
len(filter.Authors) == 0 &&
|
||||||
|
len(filter.Kinds) == 0 &&
|
||||||
|
len(filter.Tags) == 0 &&
|
||||||
|
len(filter.Search) == 0 &&
|
||||||
|
filter.Since == nil &&
|
||||||
|
filter.Until == nil
|
||||||
|
|
||||||
|
if isGetById {
|
||||||
|
for _, id := range filter.IDs {
|
||||||
|
if len(id) != 64 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return isGetById
|
||||||
|
}
|
||||||
|
|
||||||
|
// from: https://stackoverflow.com/a/12754757
|
||||||
|
func toInterfaceSlice(slice interface{}) []interface{} {
|
||||||
|
s := reflect.ValueOf(slice)
|
||||||
|
if s.Kind() != reflect.Slice {
|
||||||
|
panic("InterfaceSlice() given a non-slice type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep the distinction between nil and empty slice input
|
||||||
|
if s.IsNil() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]interface{}, s.Len())
|
||||||
|
|
||||||
|
for i := 0; i < s.Len(); i++ {
|
||||||
|
ret[i] = s.Index(i).Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oss *OpensearchStorage) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||||
|
count := int64(0)
|
||||||
|
|
||||||
|
// optimization: get by id
|
||||||
|
if isGetByID(filter) {
|
||||||
|
if evts, err := oss.getByID(filter); err == nil {
|
||||||
|
count += int64(len(evts))
|
||||||
|
} else {
|
||||||
|
return 0, fmt.Errorf("error getting by id: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dsl, err := buildDsl(filter)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = context.Background()
|
||||||
|
countRes, err := oss.client.Indices.Count(
|
||||||
|
ctx,
|
||||||
|
&opensearchapi.IndicesCountReq{
|
||||||
|
Indices: []string{oss.IndexName},
|
||||||
|
Body: bytes.NewReader(dsl),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return int64(countRes.Count) + count, nil
|
||||||
|
}
|
||||||
@@ -12,6 +12,7 @@ type PostgresBackend struct {
|
|||||||
QueryAuthorsLimit int
|
QueryAuthorsLimit int
|
||||||
QueryKindsLimit int
|
QueryKindsLimit int
|
||||||
QueryTagsLimit int
|
QueryTagsLimit int
|
||||||
|
KeepRecentEvents bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *PostgresBackend) Close() {
|
func (b *PostgresBackend) Close() {
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ func (b PostgresBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := b.DB.Query(query, params...)
|
rows, err := b.DB.QueryContext(ctx, query, params...)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
return nil, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
return nil, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
||||||
}
|
}
|
||||||
@@ -52,7 +52,7 @@ func (b PostgresBackend) CountEvents(ctx context.Context, filter nostr.Filter) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
var count int64
|
var count int64
|
||||||
if err = b.DB.QueryRow(query, params...).Scan(&count); err != nil && err != sql.ErrNoRows {
|
if err = b.DB.QueryRowContext(ctx, query, params...).Scan(&count); err != nil && err != sql.ErrNoRows {
|
||||||
return 0, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
return 0, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
||||||
}
|
}
|
||||||
return count, nil
|
return count, nil
|
||||||
@@ -87,7 +87,7 @@ func (b PostgresBackend) queryEventsSql(filter nostr.Filter, doCount bool) (stri
|
|||||||
for _, v := range filter.Authors {
|
for _, v := range filter.Authors {
|
||||||
params = append(params, v)
|
params = append(params, v)
|
||||||
}
|
}
|
||||||
conditions = append(conditions, ` pubkey IN (`+makePlaceHolders(len(filter.IDs))+`)`)
|
conditions = append(conditions, ` pubkey IN (`+makePlaceHolders(len(filter.Authors))+`)`)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(filter.Kinds) > 0 {
|
if len(filter.Kinds) > 0 {
|
||||||
@@ -123,7 +123,7 @@ func (b PostgresBackend) queryEventsSql(filter nostr.Filter, doCount bool) (stri
|
|||||||
params = append(params, tagValue)
|
params = append(params, tagValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
conditions = append(conditions, `tagvalues && ARRAY[`+makePlaceHolders(len(tagQuery))+`]`)
|
conditions = append(conditions, `tagvalues @> ARRAY[`+makePlaceHolders(len(tagQuery))+`]`)
|
||||||
}
|
}
|
||||||
|
|
||||||
if filter.Since != nil {
|
if filter.Since != nil {
|
||||||
@@ -156,7 +156,7 @@ func (b PostgresBackend) queryEventsSql(filter nostr.Filter, doCount bool) (stri
|
|||||||
COUNT(*)
|
COUNT(*)
|
||||||
FROM event WHERE `+
|
FROM event WHERE `+
|
||||||
strings.Join(conditions, " AND ")+
|
strings.Join(conditions, " AND ")+
|
||||||
" ORDER BY created_at DESC LIMIT ?")
|
" LIMIT ?")
|
||||||
} else {
|
} else {
|
||||||
query = sqlx.Rebind(sqlx.BindType("postgres"), `SELECT
|
query = sqlx.Rebind(sqlx.BindType("postgres"), `SELECT
|
||||||
id, pubkey, created_at, kind, tags, content, sig
|
id, pubkey, created_at, kind, tags, content, sig
|
||||||
|
|||||||
@@ -32,6 +32,9 @@ func (b *PostgresBackend) BeforeSave(ctx context.Context, evt *nostr.Event) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *PostgresBackend) AfterSave(evt *nostr.Event) {
|
func (b *PostgresBackend) AfterSave(evt *nostr.Event) {
|
||||||
|
if b.KeepRecentEvents {
|
||||||
|
return
|
||||||
|
}
|
||||||
// delete all but the 100 most recent ones for each key
|
// delete all but the 100 most recent ones for each key
|
||||||
b.DB.Exec(`DELETE FROM event WHERE pubkey = $1 AND kind = $2 AND created_at < (
|
b.DB.Exec(`DELETE FROM event WHERE pubkey = $1 AND kind = $2 AND created_at < (
|
||||||
SELECT created_at FROM event WHERE pubkey = $1
|
SELECT created_at FROM event WHERE pubkey = $1
|
||||||
|
|||||||
@@ -23,25 +23,53 @@ func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to query before replacing: %w", err)
|
return fmt.Errorf("failed to query before replacing: %w", err)
|
||||||
}
|
}
|
||||||
if previous := <-ch; previous != nil && isOlder(previous, &evt) {
|
isNewer := true
|
||||||
if err := w.Store.DeleteEvent(ctx, previous); err != nil {
|
for previous := range ch {
|
||||||
return fmt.Errorf("failed to delete event for replacing: %w", err)
|
if previous == nil {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
if isOlder(previous, &evt) {
|
||||||
|
if err := w.Store.DeleteEvent(ctx, previous); err != nil {
|
||||||
|
return fmt.Errorf("failed to delete event for replacing: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// already, newer event is stored.
|
||||||
|
isNewer = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !isNewer {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
} else if 30000 <= evt.Kind && evt.Kind < 40000 {
|
} else if 30000 <= evt.Kind && evt.Kind < 40000 {
|
||||||
// parameterized replaceable event, delete before storing
|
// parameterized replaceable event, delete before storing
|
||||||
d := evt.Tags.GetFirst([]string{"d", ""})
|
d := evt.Tags.GetFirst([]string{"d", ""})
|
||||||
if d != nil {
|
if d == nil {
|
||||||
ch, err := w.Store.QueryEvents(ctx, nostr.Filter{Authors: []string{evt.PubKey}, Kinds: []int{evt.Kind}, Tags: nostr.TagMap{"d": []string{d.Value()}}})
|
return fmt.Errorf("failed to add event missing d tag for parameterized replacing")
|
||||||
if err != nil {
|
}
|
||||||
return fmt.Errorf("failed to query before parameterized replacing: %w", err)
|
ch, err := w.Store.QueryEvents(ctx, nostr.Filter{Authors: []string{evt.PubKey}, Kinds: []int{evt.Kind}, Tags: nostr.TagMap{"d": []string{d.Value()}}})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to query before parameterized replacing: %w", err)
|
||||||
|
}
|
||||||
|
isNewer := true
|
||||||
|
for previous := range ch {
|
||||||
|
if previous == nil {
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
if previous := <-ch; previous != nil && isOlder(previous, &evt) {
|
|
||||||
|
if !isOlder(previous, &evt) {
|
||||||
if err := w.Store.DeleteEvent(ctx, previous); err != nil {
|
if err := w.Store.DeleteEvent(ctx, previous); err != nil {
|
||||||
return fmt.Errorf("failed to delete event for parameterized replacing: %w", err)
|
return fmt.Errorf("failed to delete event for parameterized replacing: %w", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// already, newer event is stored.
|
||||||
|
isNewer = false
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if !isNewer {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.SaveEvent(ctx, &evt); err != nil && err != ErrDupEvent {
|
if err := w.SaveEvent(ctx, &evt); err != nil && err != ErrDupEvent {
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ func (b *SliceStore) QueryEvents(ctx context.Context, filter nostr.Filter) (chan
|
|||||||
if count == filter.Limit {
|
if count == filter.Limit {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if filter.Matches(event) {
|
if filter.Matches(event) {
|
||||||
select {
|
select {
|
||||||
case ch <- event:
|
case ch <- event:
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ func (b SQLite3Backend) QueryEvents(ctx context.Context, filter nostr.Filter) (c
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, err := b.DB.Query(query, params...)
|
rows, err := b.DB.QueryContext(ctx, query, params...)
|
||||||
if err != nil && err != sql.ErrNoRows {
|
if err != nil && err != sql.ErrNoRows {
|
||||||
return nil, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
return nil, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
||||||
}
|
}
|
||||||
@@ -52,7 +52,7 @@ func (b SQLite3Backend) CountEvents(ctx context.Context, filter nostr.Filter) (i
|
|||||||
}
|
}
|
||||||
|
|
||||||
var count int64
|
var count int64
|
||||||
if err = b.DB.QueryRow(query, params...).Scan(&count); err != nil && err != sql.ErrNoRows {
|
if err = b.DB.QueryRowContext(ctx, query, params...).Scan(&count); err != nil && err != sql.ErrNoRows {
|
||||||
return 0, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
return 0, fmt.Errorf("failed to fetch events using query %q: %w", query, err)
|
||||||
}
|
}
|
||||||
return count, nil
|
return count, nil
|
||||||
@@ -75,7 +75,7 @@ func (b SQLite3Backend) queryEventsSql(filter nostr.Filter, doCount bool) (strin
|
|||||||
for _, v := range filter.IDs {
|
for _, v := range filter.IDs {
|
||||||
params = append(params, v)
|
params = append(params, v)
|
||||||
}
|
}
|
||||||
conditions = append(conditions, ` id IN (`+makePlaceHolders(len(filter.IDs))+`)`)
|
conditions = append(conditions, `id IN (`+makePlaceHolders(len(filter.IDs))+`)`)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(filter.Authors) > 0 {
|
if len(filter.Authors) > 0 {
|
||||||
@@ -87,7 +87,7 @@ func (b SQLite3Backend) queryEventsSql(filter nostr.Filter, doCount bool) (strin
|
|||||||
for _, v := range filter.Authors {
|
for _, v := range filter.Authors {
|
||||||
params = append(params, v)
|
params = append(params, v)
|
||||||
}
|
}
|
||||||
conditions = append(conditions, ` pubkey IN (`+makePlaceHolders(len(filter.Authors))+`)`)
|
conditions = append(conditions, `pubkey IN (`+makePlaceHolders(len(filter.Authors))+`)`)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(filter.Kinds) > 0 {
|
if len(filter.Kinds) > 0 {
|
||||||
@@ -155,7 +155,7 @@ func (b SQLite3Backend) queryEventsSql(filter nostr.Filter, doCount bool) (strin
|
|||||||
COUNT(*)
|
COUNT(*)
|
||||||
FROM event WHERE `+
|
FROM event WHERE `+
|
||||||
strings.Join(conditions, " AND ")+
|
strings.Join(conditions, " AND ")+
|
||||||
" ORDER BY created_at DESC LIMIT ?")
|
" LIMIT ?")
|
||||||
} else {
|
} else {
|
||||||
query = sqlx.Rebind(sqlx.BindType("sqlite3"), `SELECT
|
query = sqlx.Rebind(sqlx.BindType("sqlite3"), `SELECT
|
||||||
id, pubkey, created_at, kind, tags, content, sig
|
id, pubkey, created_at, kind, tags, content, sig
|
||||||
|
|||||||
160
strfry/lib.go
Normal file
160
strfry/lib.go
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
package strfry
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/fiatjaf/eventstore"
|
||||||
|
"github.com/mailru/easyjson"
|
||||||
|
"github.com/nbd-wtf/go-nostr"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ eventstore.Store = (*StrfryBackend)(nil)
|
||||||
|
|
||||||
|
type StrfryBackend struct {
|
||||||
|
ConfigPath string
|
||||||
|
ExecutablePath string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *StrfryBackend) Init() error {
|
||||||
|
if s.ExecutablePath == "" {
|
||||||
|
configPath := filepath.Dir(s.ConfigPath)
|
||||||
|
os.Setenv("PATH", configPath+":"+os.Getenv("PATH"))
|
||||||
|
exe, err := exec.LookPath("strfry")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to find strfry executable: %w (better provide it manually)", err)
|
||||||
|
}
|
||||||
|
s.ExecutablePath = exe
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_ StrfryBackend) Close() {}
|
||||||
|
|
||||||
|
func (s StrfryBackend) QueryEvents(ctx context.Context, filter nostr.Filter) (chan *nostr.Event, error) {
|
||||||
|
stdout, err := s.baseStrfryScan(ctx, filter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := make(chan *nostr.Event)
|
||||||
|
go func() {
|
||||||
|
defer close(ch)
|
||||||
|
for {
|
||||||
|
line, err := stdout.ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
evt := &nostr.Event{}
|
||||||
|
easyjson.Unmarshal(line, evt)
|
||||||
|
if evt.ID == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
ch <- evt
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return ch, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s StrfryBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||||
|
args := make([]string, 0, 4)
|
||||||
|
if s.ConfigPath != "" {
|
||||||
|
args = append(args, "--config="+s.ConfigPath)
|
||||||
|
}
|
||||||
|
args = append(args, "import")
|
||||||
|
args = append(args, "--show-rejected")
|
||||||
|
args = append(args, "--no-verify")
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, s.ExecutablePath, args...)
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
|
||||||
|
// event is sent on stdin
|
||||||
|
j, _ := easyjson.Marshal(evt)
|
||||||
|
cmd.Stdin = bytes.NewBuffer(j)
|
||||||
|
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s %s failed: %w, (%s)",
|
||||||
|
s.ExecutablePath, strings.Join(args, " "), err, stderr.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s StrfryBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||||
|
args := make([]string, 0, 3)
|
||||||
|
if s.ConfigPath != "" {
|
||||||
|
args = append(args, "--config="+s.ConfigPath)
|
||||||
|
}
|
||||||
|
args = append(args, "delete")
|
||||||
|
args = append(args, "--filter={\"ids\":[\""+evt.ID+"\"]}")
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, s.ExecutablePath, args...)
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"%s %s failed: %w, (%s)",
|
||||||
|
s.ExecutablePath, strings.Join(args, " "), err, stderr.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s StrfryBackend) CountEvents(ctx context.Context, filter nostr.Filter) (int64, error) {
|
||||||
|
stdout, err := s.baseStrfryScan(ctx, filter)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var count int64
|
||||||
|
for {
|
||||||
|
_, err := stdout.ReadBytes('\n')
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s StrfryBackend) baseStrfryScan(ctx context.Context, filter nostr.Filter) (*bytes.Buffer, error) {
|
||||||
|
args := make([]string, 0, 3)
|
||||||
|
if s.ConfigPath != "" {
|
||||||
|
args = append(args, "--config="+s.ConfigPath)
|
||||||
|
}
|
||||||
|
args = append(args, "scan")
|
||||||
|
args = append(args, filter.String())
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, s.ExecutablePath, args...)
|
||||||
|
var stdout bytes.Buffer
|
||||||
|
cmd.Stdout = &stdout
|
||||||
|
var stderr bytes.Buffer
|
||||||
|
cmd.Stderr = &stderr
|
||||||
|
|
||||||
|
err := cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf(
|
||||||
|
"%s %s failed: %w, (%s)",
|
||||||
|
s.ExecutablePath, strings.Join(args, " "), err, stderr.String(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &stdout, nil
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user