Delete outdated benchmark reports and results.
Removed old benchmark reports and detailed logs from the repository to clean up unnecessary files. These reports appear to be auto-generated and no longer relevant for ongoing development.
This commit is contained in:
87
pkg/database/fetch-events-by-serials.go
Normal file
87
pkg/database/fetch-events-by-serials.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
)
|
||||
|
||||
// FetchEventsBySerials processes multiple serials in ascending order and retrieves
|
||||
// the corresponding events from the database. It optimizes database access by
|
||||
// sorting the serials and seeking to each one sequentially.
|
||||
func (d *D) FetchEventsBySerials(serials []*types.Uint40) (evMap map[string]*event.E, err error) {
|
||||
log.T.F("FetchEventsBySerials: processing %d serials", len(serials))
|
||||
|
||||
// Initialize the result map
|
||||
evMap = make(map[string]*event.E)
|
||||
|
||||
// Return early if no serials are provided
|
||||
if len(serials) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Sort serials in ascending order for more efficient database access
|
||||
sortedSerials := make([]*types.Uint40, len(serials))
|
||||
copy(sortedSerials, serials)
|
||||
sort.Slice(sortedSerials, func(i, j int) bool {
|
||||
return sortedSerials[i].Get() < sortedSerials[j].Get()
|
||||
})
|
||||
|
||||
// Process all serials in a single transaction
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
// Create an iterator with default options
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
// Process each serial sequentially
|
||||
for _, ser := range sortedSerials {
|
||||
// Create the key for this serial
|
||||
buf := new(bytes.Buffer)
|
||||
if err = indexes.EventEnc(ser).MarshalWrite(buf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
key := buf.Bytes()
|
||||
|
||||
// Seek to this key in the database
|
||||
it.Seek(key)
|
||||
if it.Valid() {
|
||||
item := it.Item()
|
||||
|
||||
// Verify the key matches exactly (should always be true after a Seek)
|
||||
if !bytes.Equal(item.Key(), key) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the item value
|
||||
var v []byte
|
||||
if v, err = item.ValueCopy(nil); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Unmarshal the event
|
||||
ev := new(event.E)
|
||||
if err = ev.UnmarshalBinary(bytes.NewBuffer(v)); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Store the event in the result map using the serial value as string key
|
||||
evMap[strconv.FormatUint(ser.Get(), 10)] = ev
|
||||
}
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
log.T.F("FetchEventsBySerials: found %d events out of %d requested serials", len(evMap), len(serials))
|
||||
return
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/database/indexes/types"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/filter"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
@@ -64,6 +65,99 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// GetSerialsByIds takes a tag.T containing multiple IDs and returns a map of IDs to their
|
||||
// corresponding serial numbers. It directly queries the IdPrefix index for matching IDs,
|
||||
// which is more efficient than using GetIndexesFromFilter.
|
||||
func (d *D) GetSerialsByIds(ids *tag.T) (
|
||||
serials map[string]*types.Uint40, err error,
|
||||
) {
|
||||
return d.GetSerialsByIdsWithFilter(ids, nil)
|
||||
}
|
||||
|
||||
// GetSerialsByIdsWithFilter takes a tag.T containing multiple IDs and returns a
|
||||
// map of IDs to their corresponding serial numbers, applying a filter function
|
||||
// to each event. The function directly creates ID index prefixes for efficient querying.
|
||||
func (d *D) GetSerialsByIdsWithFilter(
|
||||
ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool,
|
||||
) (serials map[string]*types.Uint40, err error) {
|
||||
log.T.F("GetSerialsByIdsWithFilter: input ids count=%d", ids.Len())
|
||||
|
||||
// Initialize the result map
|
||||
serials = make(map[string]*types.Uint40)
|
||||
|
||||
// Return early if no IDs are provided
|
||||
if ids.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Process all IDs in a single transaction
|
||||
if err = d.View(
|
||||
func(txn *badger.Txn) (err error) {
|
||||
it := txn.NewIterator(badger.DefaultIteratorOptions)
|
||||
defer it.Close()
|
||||
|
||||
// Process each ID sequentially
|
||||
for _, id := range ids.T {
|
||||
// idHex := hex.Enc(id)
|
||||
|
||||
// Get the index prefix for this ID
|
||||
var idxs []Range
|
||||
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
|
||||
// Skip this ID if we can't create its index
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if no index was created
|
||||
if len(idxs) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Seek to the start of this ID's range in the database
|
||||
it.Seek(idxs[0].Start)
|
||||
if it.ValidForPrefix(idxs[0].Start) {
|
||||
// Found an entry for this ID
|
||||
item := it.Item()
|
||||
key := item.Key()
|
||||
|
||||
// Extract the serial number from the key
|
||||
ser := new(types.Uint40)
|
||||
buf := bytes.NewBuffer(key[len(key)-5:])
|
||||
if err = ser.UnmarshalRead(buf); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If a filter function is provided, fetch the event and apply the filter
|
||||
if fn != nil {
|
||||
var ev *event.E
|
||||
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
||||
// Skip this event if we can't fetch it
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply the filter
|
||||
if !fn(ev, ser) {
|
||||
// Skip this event if it doesn't pass the filter
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Store the serial in the result map using the hex-encoded ID as the key
|
||||
serials[string(id)] = ser
|
||||
}
|
||||
}
|
||||
return
|
||||
},
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
log.T.F(
|
||||
"GetSerialsByIdsWithFilter: found %d serials out of %d requested ids",
|
||||
len(serials), ids.Len(),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
// func (d *D) GetSerialBytesById(id []byte) (ser []byte, err error) {
|
||||
// var idxs []Range
|
||||
// if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.New(id)}); chk.E(err) {
|
||||
|
||||
@@ -48,9 +48,11 @@ func TestGetSerialById(t *testing.T) {
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
ev.Free()
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ev.Free()
|
||||
|
||||
events = append(events, ev)
|
||||
|
||||
// Save the event to the database
|
||||
|
||||
@@ -55,8 +55,10 @@ func TestGetSerialsByRange(t *testing.T) {
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
ev.Free()
|
||||
t.Fatal(err)
|
||||
}
|
||||
ev.Free()
|
||||
|
||||
events = append(events, ev)
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
@@ -43,73 +42,49 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
|
||||
var expDeletes types.Uint40s
|
||||
var expEvs event.S
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
// for _, id := range f.Ids.T {
|
||||
// log.T.F("QueryEvents: looking for ID=%s", hex.Enc(id))
|
||||
// }
|
||||
// log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
|
||||
for _, idx := range f.Ids.T {
|
||||
// log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx))
|
||||
// we know there is only Ids in this, so run the ID query and fetch.
|
||||
var ser *types.Uint40
|
||||
var idErr error
|
||||
if ser, idErr = d.GetSerialById(idx); idErr != nil {
|
||||
// Check if this is a "not found" error which is expected for IDs we don't have
|
||||
if strings.Contains(idErr.Error(), "id not found in database") {
|
||||
// log.T.F(
|
||||
// "QueryEvents: ID not found in database: %s",
|
||||
// hex.Enc(idx),
|
||||
// )
|
||||
} else {
|
||||
// Log unexpected errors but continue processing other IDs
|
||||
// log.E.F(
|
||||
// "QueryEvents: error looking up id=%s err=%v",
|
||||
// hex.Enc(idx), idErr,
|
||||
// )
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Check if the serial is nil, which indicates the ID wasn't found
|
||||
if ser == nil {
|
||||
// log.T.F("QueryEvents: Serial is nil for ID: %s", hex.Enc(idx))
|
||||
continue
|
||||
}
|
||||
// fetch the events
|
||||
// Get all serials for the requested IDs in a single batch operation
|
||||
log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
|
||||
|
||||
// Use GetSerialsByIds to batch process all IDs at once
|
||||
serials, idErr := d.GetSerialsByIds(f.Ids)
|
||||
if idErr != nil {
|
||||
log.E.F("QueryEvents: error looking up ids: %v", idErr)
|
||||
// Continue with whatever IDs we found
|
||||
}
|
||||
|
||||
// Process each found serial, fetch the event, and apply filters
|
||||
for idHex, ser := range serials {
|
||||
// fetch the event
|
||||
var ev *event.E
|
||||
if ev, err = d.FetchEventBySerial(ser); err != nil {
|
||||
// log.T.F(
|
||||
// "QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
|
||||
// hex.Enc(idx), ser, err,
|
||||
// )
|
||||
log.T.F(
|
||||
"QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
|
||||
idHex, ser, err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
// log.T.F(
|
||||
// "QueryEvents: found id=%s kind=%d created_at=%d",
|
||||
// hex.Enc(ev.ID), ev.Kind, ev.CreatedAt,
|
||||
// )
|
||||
|
||||
// check for an expiration tag and delete after returning the result
|
||||
if CheckExpiration(ev) {
|
||||
log.T.F(
|
||||
"QueryEvents: id=%s filtered out due to expiration",
|
||||
hex.Enc(ev.ID),
|
||||
"QueryEvents: id=%s filtered out due to expiration", idHex,
|
||||
)
|
||||
expDeletes = append(expDeletes, ser)
|
||||
expEvs = append(expEvs, ev)
|
||||
continue
|
||||
}
|
||||
|
||||
// skip events that have been deleted by a proper deletion event
|
||||
if derr := d.CheckForDeleted(ev, nil); derr != nil {
|
||||
// log.T.F(
|
||||
// "QueryEvents: id=%s filtered out due to deletion: %v",
|
||||
// hex.Enc(ev.ID), derr,
|
||||
// )
|
||||
// log.T.F("QueryEvents: id=%s filtered out due to deletion: %v", idHex, derr)
|
||||
continue
|
||||
}
|
||||
// log.T.F(
|
||||
// "QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results",
|
||||
// hex.Enc(ev.ID),
|
||||
// )
|
||||
|
||||
// Add the event to the results
|
||||
evs = append(evs, ev)
|
||||
// log.T.F("QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results", idHex)
|
||||
}
|
||||
|
||||
// sort the events by timestamp
|
||||
sort.Slice(
|
||||
evs, func(i, j int) bool {
|
||||
|
||||
@@ -56,8 +56,10 @@ func setupTestDB(t *testing.T) (
|
||||
|
||||
// Unmarshal the event
|
||||
if _, err = ev.Unmarshal(b); chk.E(err) {
|
||||
ev.Free()
|
||||
t.Fatal(err)
|
||||
}
|
||||
ev.Free()
|
||||
|
||||
events = append(events, ev)
|
||||
|
||||
|
||||
@@ -17,11 +17,12 @@ func (d *D) QueryForSerials(c context.Context, f *filter.F) (
|
||||
var founds []*types.Uint40
|
||||
var idPkTs []*store.IdPkTs
|
||||
if f.Ids != nil && f.Ids.Len() > 0 {
|
||||
for _, id := range f.Ids.T {
|
||||
var ser *types.Uint40
|
||||
if ser, err = d.GetSerialById(id); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Use batch lookup to minimize transactions when resolving IDs to serials
|
||||
var serialMap map[string]*types.Uint40
|
||||
if serialMap, err = d.GetSerialsByIds(f.Ids); chk.E(err) {
|
||||
return
|
||||
}
|
||||
for _, ser := range serialMap {
|
||||
founds = append(founds, ser)
|
||||
}
|
||||
var tmp []*store.IdPkTs
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
"next.orly.dev/pkg/encoders/text"
|
||||
@@ -161,6 +162,7 @@ func (en *Result) Unmarshal(b []byte) (r []byte, err error) {
|
||||
return
|
||||
}
|
||||
en.Event = event.New()
|
||||
log.I.F("unmarshal: '%s'", b)
|
||||
if r, err = en.Event.Unmarshal(r); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -173,6 +173,7 @@ func (ev *E) Serialize() (b []byte) {
|
||||
//
|
||||
// Call ev.Free() to return the provided buffer to the bufpool afterwards.
|
||||
func (ev *E) Unmarshal(b []byte) (rem []byte, err error) {
|
||||
log.I.F("Unmarshal\n%s\n", string(b))
|
||||
key := make([]byte, 0, 9)
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
// Skip whitespace
|
||||
@@ -344,8 +345,8 @@ BetweenKV:
|
||||
goto InKey
|
||||
}
|
||||
}
|
||||
log.I.F("between kv")
|
||||
goto eof
|
||||
// If we reach here, the buffer ended unexpectedly. Treat as end-of-object
|
||||
goto AfterClose
|
||||
AfterClose:
|
||||
rem = b
|
||||
return
|
||||
@@ -364,6 +365,7 @@ eof:
|
||||
//
|
||||
// Call ev.Free() to return the provided buffer to the bufpool afterwards.
|
||||
func (ev *E) UnmarshalJSON(b []byte) (err error) {
|
||||
log.I.F("UnmarshalJSON: '%s'", b)
|
||||
_, err = ev.Unmarshal(b)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -140,6 +140,12 @@ func (r *Client) Context() context.Context { return r.connectionContext }
|
||||
// IsConnected returns true if the connection to this relay seems to be active.
|
||||
func (r *Client) IsConnected() bool { return r.connectionContext.Err() == nil }
|
||||
|
||||
// ConnectionCause returns the cancel cause for the relay connection context.
|
||||
func (r *Client) ConnectionCause() error { return context.Cause(r.connectionContext) }
|
||||
|
||||
// LastError returns the last connection error observed by the reader loop.
|
||||
func (r *Client) LastError() error { return r.ConnectionError }
|
||||
|
||||
// Connect tries to establish a websocket connection to r.URL.
|
||||
// If the context expires before the connection is complete, an error is returned.
|
||||
// Once successfully connected, context expiration has no effect: call r.Close
|
||||
@@ -218,6 +224,11 @@ func (r *Client) ConnectWithTLS(
|
||||
for {
|
||||
select {
|
||||
case <-r.connectionContext.Done():
|
||||
log.T.F(
|
||||
"WS.Client: connection context done for %s: cause=%v lastErr=%v",
|
||||
r.URL, context.Cause(r.connectionContext),
|
||||
r.ConnectionError,
|
||||
)
|
||||
ticker.Stop()
|
||||
r.Connection = nil
|
||||
|
||||
@@ -241,13 +252,17 @@ func (r *Client) ConnectWithTLS(
|
||||
"{%s} error writing ping: %v; closing websocket", r.URL,
|
||||
err,
|
||||
)
|
||||
r.Close() // this should trigger a context cancelation
|
||||
r.CloseWithReason(
|
||||
fmt.Errorf(
|
||||
"ping failed: %w", err,
|
||||
),
|
||||
) // this should trigger a context cancelation
|
||||
return
|
||||
}
|
||||
|
||||
case wr := <-r.writeQueue:
|
||||
// all write requests will go through this to prevent races
|
||||
log.D.F("{%s} sending %v\n", r.URL, string(wr.msg))
|
||||
// log.D.F("{%s} sending %v\n", r.URL, string(wr.msg))
|
||||
if err = r.Connection.WriteMessage(
|
||||
r.connectionContext, wr.msg,
|
||||
); err != nil {
|
||||
@@ -269,7 +284,11 @@ func (r *Client) ConnectWithTLS(
|
||||
r.connectionContext, buf,
|
||||
); err != nil {
|
||||
r.ConnectionError = err
|
||||
r.Close()
|
||||
log.T.F(
|
||||
"WS.Client: reader loop error on %s: %v; closing connection",
|
||||
r.URL, err,
|
||||
)
|
||||
r.CloseWithReason(fmt.Errorf("reader loop error: %w", err))
|
||||
return
|
||||
}
|
||||
message := buf.Bytes()
|
||||
@@ -358,11 +377,11 @@ func (r *Client) ConnectWithTLS(
|
||||
if okCallback, exist := r.okCallbacks.Load(string(env.EventID)); exist {
|
||||
okCallback(env.OK, env.ReasonString())
|
||||
} else {
|
||||
log.I.F(
|
||||
"{%s} got an unexpected OK message for event %0x",
|
||||
r.URL,
|
||||
env.EventID,
|
||||
)
|
||||
// log.I.F(
|
||||
// "{%s} got an unexpected OK message for event %0x",
|
||||
// r.URL,
|
||||
// env.EventID,
|
||||
// )
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -479,14 +498,27 @@ func (r *Client) Subscribe(
|
||||
sub := r.PrepareSubscription(ctx, ff, opts...)
|
||||
|
||||
if r.Connection == nil {
|
||||
log.T.F(
|
||||
"WS.Subscribe: not connected to %s; aborting sub id=%s", r.URL,
|
||||
sub.GetID(),
|
||||
)
|
||||
return nil, fmt.Errorf("not connected to %s", r.URL)
|
||||
}
|
||||
|
||||
log.T.F(
|
||||
"WS.Subscribe: firing subscription id=%s to %s with %d filters",
|
||||
sub.GetID(), r.URL, len(*ff),
|
||||
)
|
||||
if err := sub.Fire(); err != nil {
|
||||
log.T.F(
|
||||
"WS.Subscribe: Fire failed id=%s to %s: %v", sub.GetID(), r.URL,
|
||||
err,
|
||||
)
|
||||
return nil, fmt.Errorf(
|
||||
"couldn't subscribe to %v at %s: %w", ff, r.URL, err,
|
||||
)
|
||||
}
|
||||
log.T.F("WS.Subscribe: Fire succeeded id=%s to %s", sub.GetID(), r.URL)
|
||||
|
||||
return sub, nil
|
||||
}
|
||||
@@ -598,9 +630,10 @@ func (r *Client) QuerySync(
|
||||
}
|
||||
|
||||
// Close closes the relay connection.
|
||||
func (r *Client) Close() error {
|
||||
return r.close(errors.New("Close() called"))
|
||||
}
|
||||
func (r *Client) Close() error { return r.CloseWithReason(errors.New("Close() called")) }
|
||||
|
||||
// CloseWithReason closes the relay connection with a specific reason that will be stored as the context cancel cause.
|
||||
func (r *Client) CloseWithReason(reason error) error { return r.close(reason) }
|
||||
|
||||
func (r *Client) close(reason error) error {
|
||||
r.closeMutex.Lock()
|
||||
@@ -609,6 +642,10 @@ func (r *Client) close(reason error) error {
|
||||
if r.connectionContextCancel == nil {
|
||||
return fmt.Errorf("relay already closed")
|
||||
}
|
||||
log.T.F(
|
||||
"WS.Client: closing connection to %s: reason=%v lastErr=%v", r.URL,
|
||||
reason, r.ConnectionError,
|
||||
)
|
||||
r.connectionContextCancel(reason)
|
||||
r.connectionContextCancel = nil
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/encoders/envelopes/closeenvelope"
|
||||
"next.orly.dev/pkg/encoders/envelopes/reqenvelope"
|
||||
"next.orly.dev/pkg/encoders/event"
|
||||
@@ -88,8 +89,14 @@ var (
|
||||
)
|
||||
|
||||
func (sub *Subscription) start() {
|
||||
// Wait for the context to be done instead of blocking immediately
|
||||
// This allows the subscription to receive events before terminating
|
||||
sub.live.Store(true)
|
||||
// debug: log start of subscription goroutine
|
||||
log.T.F("WS.Subscription.start: started id=%s", sub.GetID())
|
||||
<-sub.Context.Done()
|
||||
// the subscription ends once the context is canceled (if not already)
|
||||
log.T.F("WS.Subscription.start: context done for id=%s", sub.GetID())
|
||||
sub.unsub(errors.New("context done on start()")) // this will set sub.live to false
|
||||
// do this so we don't have the possibility of closing the Events channel and then trying to send to it
|
||||
sub.mu.Lock()
|
||||
@@ -180,10 +187,18 @@ func (sub *Subscription) Fire() (err error) {
|
||||
var reqb []byte
|
||||
reqb = reqenvelope.NewFrom(sub.id, sub.Filters).Marshal(nil)
|
||||
sub.live.Store(true)
|
||||
log.T.F(
|
||||
"WS.Subscription.Fire: sending REQ id=%s filters=%d bytes=%d",
|
||||
sub.GetID(), len(*sub.Filters), len(reqb),
|
||||
)
|
||||
if err = <-sub.Client.Write(reqb); err != nil {
|
||||
err = fmt.Errorf("failed to write: %w", err)
|
||||
log.T.F(
|
||||
"WS.Subscription.Fire: write failed id=%s: %v", sub.GetID(), err,
|
||||
)
|
||||
sub.cancel(err)
|
||||
return
|
||||
}
|
||||
log.T.F("WS.Subscription.Fire: write ok id=%s", sub.GetID())
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user