refactor logging to use closures for intensive tasks
This commit is contained in:
@@ -63,14 +63,20 @@ type QueryProfiler struct {
|
||||
|
||||
func NewQueryProfiler(relayURL string) *QueryProfiler {
|
||||
return &QueryProfiler{
|
||||
relay: relayURL,
|
||||
subscriptions: make(map[string]*ws.Subscription),
|
||||
metrics: &QueryMetrics{Latencies: make([]time.Duration, 0, 10000)},
|
||||
relay: relayURL,
|
||||
subscriptions: make(map[string]*ws.Subscription),
|
||||
metrics: &QueryMetrics{
|
||||
Latencies: make(
|
||||
[]time.Duration, 0, 10000,
|
||||
),
|
||||
},
|
||||
stopMemMonitor: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) ExecuteProfile(c context.T, iterations int, concurrency int) error {
|
||||
func (qp *QueryProfiler) ExecuteProfile(
|
||||
c context.T, iterations int, concurrency int,
|
||||
) error {
|
||||
qp.startMemoryMonitor()
|
||||
defer qp.stopMemoryMonitor()
|
||||
|
||||
@@ -102,7 +108,9 @@ func (qp *QueryProfiler) ExecuteProfile(c context.T, iterations int, concurrency
|
||||
|
||||
relay, err := ws.RelayConnect(c, qp.relay)
|
||||
if chk.E(err) {
|
||||
errorChan <- fmt.Errorf("worker %d connection failed: %w", workerID, err)
|
||||
errorChan <- fmt.Errorf(
|
||||
"worker %d connection failed: %w", workerID, err,
|
||||
)
|
||||
return
|
||||
}
|
||||
defer relay.Close()
|
||||
@@ -117,7 +125,9 @@ func (qp *QueryProfiler) ExecuteProfile(c context.T, iterations int, concurrency
|
||||
f := qp.generateFilter(filterType)
|
||||
|
||||
startTime := time.Now()
|
||||
events, err := relay.QuerySync(c, f, ws.WithLabel(fmt.Sprintf("profiler-%d-%d", workerID, j)))
|
||||
events, err := relay.QuerySync(
|
||||
c, f,
|
||||
) // , ws.WithLabel(fmt.Sprintf("profiler-%d-%d", workerID, j)))
|
||||
latency := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
@@ -125,7 +135,9 @@ func (qp *QueryProfiler) ExecuteProfile(c context.T, iterations int, concurrency
|
||||
atomic.AddInt64(&qp.metrics.FailedQueries, 1)
|
||||
} else {
|
||||
latencyChan <- latency
|
||||
atomic.AddInt64(&qp.metrics.EventsReturned, int64(len(events)))
|
||||
atomic.AddInt64(
|
||||
&qp.metrics.EventsReturned, int64(len(events)),
|
||||
)
|
||||
atomic.AddInt64(&qp.metrics.TotalQueries, 1)
|
||||
}
|
||||
}
|
||||
@@ -271,7 +283,9 @@ func (qp *QueryProfiler) generateFilter(filterType FilterType) *filter.F {
|
||||
}
|
||||
}
|
||||
|
||||
func (qp *QueryProfiler) TestSubscriptionPerformance(c context.T, duration time.Duration, subscriptionCount int) error {
|
||||
func (qp *QueryProfiler) TestSubscriptionPerformance(
|
||||
c context.T, duration time.Duration, subscriptionCount int,
|
||||
) error {
|
||||
qp.startMemoryMonitor()
|
||||
defer qp.stopMemoryMonitor()
|
||||
|
||||
@@ -293,7 +307,9 @@ func (qp *QueryProfiler) TestSubscriptionPerformance(c context.T, duration time.
|
||||
label := fmt.Sprintf("sub-perf-%d", subID)
|
||||
|
||||
eventChan := make(chan *event.E, 100)
|
||||
sub, err := relay.Subscribe(c, &filters.T{F: []*filter.F{f}}, ws.WithLabel(label))
|
||||
sub, err := relay.Subscribe(
|
||||
c, &filters.T{F: []*filter.F{f}}, ws.WithLabel(label),
|
||||
)
|
||||
if chk.E(err) {
|
||||
return
|
||||
}
|
||||
@@ -359,9 +375,11 @@ func (qp *QueryProfiler) calculatePercentiles() {
|
||||
return
|
||||
}
|
||||
|
||||
sort.Slice(qp.metrics.Latencies, func(i, j int) bool {
|
||||
return qp.metrics.Latencies[i] < qp.metrics.Latencies[j]
|
||||
})
|
||||
sort.Slice(
|
||||
qp.metrics.Latencies, func(i, j int) bool {
|
||||
return qp.metrics.Latencies[i] < qp.metrics.Latencies[j]
|
||||
},
|
||||
)
|
||||
|
||||
qp.metrics.Min = qp.metrics.Latencies[0]
|
||||
qp.metrics.Max = qp.metrics.Latencies[len(qp.metrics.Latencies)-1]
|
||||
@@ -415,5 +433,8 @@ func (qp *QueryProfiler) PrintReport() {
|
||||
fmt.Printf(" Before: %.2f MB\n", float64(metrics.MemoryBefore)/1024/1024)
|
||||
fmt.Printf(" After: %.2f MB\n", float64(metrics.MemoryAfter)/1024/1024)
|
||||
fmt.Printf(" Peak: %.2f MB\n", float64(metrics.MemoryPeak)/1024/1024)
|
||||
fmt.Printf(" Delta: %.2f MB\n", float64(int64(metrics.MemoryAfter)-int64(metrics.MemoryBefore))/1024/1024)
|
||||
fmt.Printf(
|
||||
" Delta: %.2f MB\n",
|
||||
float64(int64(metrics.MemoryAfter)-int64(metrics.MemoryBefore))/1024/1024,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -93,7 +93,9 @@ func (ti *TimingInstrumentation) Connect(c context.T, relayURL string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) TrackEventLifecycle(c context.T, ev *event.E) (*EventLifecycle, error) {
|
||||
func (ti *TimingInstrumentation) TrackEventLifecycle(
|
||||
c context.T, ev *event.E,
|
||||
) (*EventLifecycle, error) {
|
||||
evID := ev.ID
|
||||
lifecycle := &EventLifecycle{
|
||||
EventID: string(evID),
|
||||
@@ -122,7 +124,7 @@ func (ti *TimingInstrumentation) TrackEventLifecycle(c context.T, ev *event.E) (
|
||||
Ids: tag.New(ev.ID),
|
||||
}
|
||||
|
||||
events, err := ti.relay.QuerySync(c, f, ws.WithLabel("timing"))
|
||||
events, err := ti.relay.QuerySync(c, f) // , ws.WithLabel("timing"))
|
||||
queryEnd := time.Now()
|
||||
|
||||
if err != nil {
|
||||
@@ -169,7 +171,9 @@ func (ti *TimingInstrumentation) MeasureWriteAmplification(inputEvent *event.E)
|
||||
return amp
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) TrackWebSocketFrame(frameType string, payload []byte) *FrameTiming {
|
||||
func (ti *TimingInstrumentation) TrackWebSocketFrame(
|
||||
frameType string, payload []byte,
|
||||
) *FrameTiming {
|
||||
frame := &FrameTiming{
|
||||
FrameType: frameType,
|
||||
SendTime: time.Now(),
|
||||
@@ -253,7 +257,9 @@ func (ti *TimingInstrumentation) IdentifyBottlenecks() map[string]*PipelineBottl
|
||||
return ti.bottlenecks
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) RunFullInstrumentation(c context.T, eventCount int, eventSize int) error {
|
||||
func (ti *TimingInstrumentation) RunFullInstrumentation(
|
||||
c context.T, eventCount int, eventSize int,
|
||||
) error {
|
||||
fmt.Printf("Starting end-to-end timing instrumentation...\n")
|
||||
|
||||
signer := newTestSigner()
|
||||
@@ -282,8 +288,10 @@ func (ti *TimingInstrumentation) RunFullInstrumentation(c context.T, eventCount
|
||||
totalEndToEnd += lifecycle.TotalDuration
|
||||
|
||||
if (i+1)%100 == 0 {
|
||||
fmt.Printf(" Processed %d/%d events (%.1f%% success)\n",
|
||||
i+1, eventCount, float64(successCount)*100/float64(i+1))
|
||||
fmt.Printf(
|
||||
" Processed %d/%d events (%.1f%% success)\n",
|
||||
i+1, eventCount, float64(successCount)*100/float64(i+1),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,9 +300,18 @@ func (ti *TimingInstrumentation) RunFullInstrumentation(c context.T, eventCount
|
||||
fmt.Printf("\n=== Timing Instrumentation Results ===\n")
|
||||
fmt.Printf("Events Tracked: %d/%d\n", successCount, eventCount)
|
||||
if successCount > 0 {
|
||||
fmt.Printf("Average Publish Latency: %v\n", totalPublishLatency/time.Duration(successCount))
|
||||
fmt.Printf("Average Query Latency: %v\n", totalQueryLatency/time.Duration(successCount))
|
||||
fmt.Printf("Average End-to-End: %v\n", totalEndToEnd/time.Duration(successCount))
|
||||
fmt.Printf(
|
||||
"Average Publish Latency: %v\n",
|
||||
totalPublishLatency/time.Duration(successCount),
|
||||
)
|
||||
fmt.Printf(
|
||||
"Average Query Latency: %v\n",
|
||||
totalQueryLatency/time.Duration(successCount),
|
||||
)
|
||||
fmt.Printf(
|
||||
"Average End-to-End: %v\n",
|
||||
totalEndToEnd/time.Duration(successCount),
|
||||
)
|
||||
} else {
|
||||
fmt.Printf("No events successfully tracked\n")
|
||||
}
|
||||
@@ -333,7 +350,9 @@ func (ti *TimingInstrumentation) printWriteAmplificationStats() {
|
||||
count := float64(len(ti.amplifications))
|
||||
fmt.Printf("\n=== Write Amplification ===\n")
|
||||
fmt.Printf("Average Amplification: %.2fx\n", totalAmp/count)
|
||||
fmt.Printf("Average Index Overhead: %.2f%%\n", (totalIndexOverhead/count)*100)
|
||||
fmt.Printf(
|
||||
"Average Index Overhead: %.2f%%\n", (totalIndexOverhead/count)*100,
|
||||
)
|
||||
fmt.Printf("Total I/O Operations: %d\n", totalIOOps)
|
||||
}
|
||||
|
||||
@@ -356,14 +375,18 @@ func (ti *TimingInstrumentation) printFrameTimingStats() {
|
||||
fmt.Printf("\n=== WebSocket Frame Timings ===\n")
|
||||
fmt.Printf("Total Frames: %d\n", count)
|
||||
fmt.Printf("Average Frame Latency: %v\n", totalLatency/time.Duration(count))
|
||||
fmt.Printf("Average Compression: %.1f%%\n", (totalCompression/float64(count))*100)
|
||||
fmt.Printf(
|
||||
"Average Compression: %.1f%%\n", (totalCompression/float64(count))*100,
|
||||
)
|
||||
|
||||
for frameType, cnt := range frameTypes {
|
||||
fmt.Printf(" %s frames: %d\n", frameType, cnt)
|
||||
}
|
||||
}
|
||||
|
||||
func (ti *TimingInstrumentation) TestSubscriptionTiming(c context.T, duration time.Duration) error {
|
||||
func (ti *TimingInstrumentation) TestSubscriptionTiming(
|
||||
c context.T, duration time.Duration,
|
||||
) error {
|
||||
fmt.Printf("Testing subscription timing for %v...\n", duration)
|
||||
|
||||
f := &filter.F{}
|
||||
@@ -385,8 +408,10 @@ func (ti *TimingInstrumentation) TestSubscriptionTiming(c context.T, duration ti
|
||||
eventCount++
|
||||
|
||||
if eventCount%100 == 0 {
|
||||
fmt.Printf(" Received %d events, avg latency: %v\n",
|
||||
eventCount, totalLatency/time.Duration(eventCount))
|
||||
fmt.Printf(
|
||||
" Received %d events, avg latency: %v\n",
|
||||
eventCount, totalLatency/time.Duration(eventCount),
|
||||
)
|
||||
}
|
||||
case <-c.Done():
|
||||
return
|
||||
@@ -400,8 +425,12 @@ func (ti *TimingInstrumentation) TestSubscriptionTiming(c context.T, duration ti
|
||||
fmt.Printf("\nSubscription Timing Results:\n")
|
||||
fmt.Printf(" Total Events: %d\n", eventCount)
|
||||
if eventCount > 0 {
|
||||
fmt.Printf(" Average Latency: %v\n", totalLatency/time.Duration(eventCount))
|
||||
fmt.Printf(" Events/Second: %.2f\n", float64(eventCount)/duration.Seconds())
|
||||
fmt.Printf(
|
||||
" Average Latency: %v\n", totalLatency/time.Duration(eventCount),
|
||||
)
|
||||
fmt.Printf(
|
||||
" Events/Second: %.2f\n", float64(eventCount)/duration.Seconds(),
|
||||
)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -334,7 +334,7 @@ func setProxy(mapping map[string]string) (h http.Handler, err error) {
|
||||
mux.HandleFunc(
|
||||
hn+"/favicon.ico",
|
||||
func(writer http.ResponseWriter, request *http.Request) {
|
||||
log.I.F("serving favicon to %s", hn)
|
||||
log.T.F("serving favicon to %s", hn)
|
||||
if _, err = writer.Write(fi); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -62,7 +62,13 @@ for generating extended expiration NIP-98 tokens:
|
||||
if err = ev.Sign(sign); err != nil {
|
||||
fail(err.Error())
|
||||
}
|
||||
log.T.F("nip-98 http auth event:\n%s\n", ev.SerializeIndented())
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"nip-98 http auth event:\n%s\n", ev.SerializeIndented(),
|
||||
)
|
||||
},
|
||||
)
|
||||
b64 := base64.URLEncoding.EncodeToString(ev.Serialize())
|
||||
fmt.Println("Nostr " + b64)
|
||||
}
|
||||
|
||||
@@ -196,7 +196,6 @@ out:
|
||||
break out
|
||||
}
|
||||
fmt.Printf("\rgenerating key: %s", r.npub)
|
||||
// log.I.F("%s", r.npub)
|
||||
switch where {
|
||||
case PositionBeginning:
|
||||
if bytes.HasPrefix(r.npub, append(prefix, []byte(str)...)) {
|
||||
|
||||
@@ -96,7 +96,7 @@ func New() (cfg *C, err error) {
|
||||
return
|
||||
}
|
||||
lol.SetLogLevel(cfg.LogLevel)
|
||||
log.I.F("loaded configuration from %s", envPath)
|
||||
log.T.F("loaded configuration from %s", envPath)
|
||||
}
|
||||
// if spider seeds has no elements, there still is a single entry with an
|
||||
// empty string; and also if any of the fields are empty strings, they need
|
||||
|
||||
@@ -127,9 +127,13 @@ func (s *Server) AddEvent(
|
||||
// us here matches the index of this address, we can skip it.
|
||||
for _, pk := range pubkeys {
|
||||
if bytes.Equal(s.Peers.Pubkeys[i], pk) {
|
||||
log.I.F(
|
||||
"not sending back to replica that just sent us this event %0x %s",
|
||||
ev.ID, a,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"not sending back to replica that just sent us this event %0x %s",
|
||||
ev.ID, a,
|
||||
)
|
||||
},
|
||||
)
|
||||
continue replica
|
||||
}
|
||||
@@ -175,9 +179,13 @@ func (s *Server) AddEvent(
|
||||
if _, err = client.Do(r); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
log.I.F(
|
||||
"event pushed to replica %s\n%s",
|
||||
ur.String(), evb,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"event pushed to replica %s\n%s",
|
||||
ur.String(), evb,
|
||||
)
|
||||
},
|
||||
)
|
||||
break
|
||||
}
|
||||
|
||||
@@ -71,8 +71,13 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
),
|
||||
)
|
||||
}
|
||||
log.I.F(
|
||||
"maybe replace %s with %s", ev.Serialize(), evt.Serialize(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"maybe replace %s with %s", ev.Serialize(),
|
||||
evt.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
if ev.CreatedAt.Int() > evt.CreatedAt.Int() {
|
||||
return errorf.W(
|
||||
@@ -164,7 +169,13 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
}
|
||||
} else if evt.Kind.IsParameterizedReplaceable() {
|
||||
log.I.F("parameterized replaceable %s", evt.Serialize())
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"parameterized replaceable %s", evt.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
// parameterized replaceable event, delete before storing
|
||||
var evs []*event.E
|
||||
f := filter.New()
|
||||
@@ -177,21 +188,30 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
tag.New([]byte{'d'}, dTag.Value()),
|
||||
)
|
||||
}
|
||||
log.I.F(
|
||||
"filter for parameterized replaceable %v %s",
|
||||
f.Tags.ToStringsSlice(),
|
||||
f.Serialize(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"filter for parameterized replaceable %v %s",
|
||||
f.Tags.ToStringsSlice(),
|
||||
f.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
if evs, err = sto.QueryEvents(c, f); err != nil {
|
||||
return errorf.E("failed to query before replacing: %w", err)
|
||||
return errorf.E("failed to query before replacing: %v", err)
|
||||
}
|
||||
// log.I.S(evs)
|
||||
if len(evs) > 0 {
|
||||
for _, ev := range evs {
|
||||
del := true
|
||||
err = nil
|
||||
log.I.F(
|
||||
"maybe replace %s with %s", ev.Serialize(), evt.Serialize(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"maybe replace %s with %s", ev.Serialize(),
|
||||
evt.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
if ev.CreatedAt.Int() > evt.CreatedAt.Int() {
|
||||
return errorf.D(string(normalize.Error.F("not replacing newer parameterized replaceable event")))
|
||||
@@ -204,9 +224,13 @@ func (s *Server) Publish(c context.T, evt *event.E) (err error) {
|
||||
}
|
||||
evdt := ev.Tags.GetFirst(tag.New("d"))
|
||||
evtdt := evt.Tags.GetFirst(tag.New("d"))
|
||||
log.I.F(
|
||||
"%s != %s %v", evdt.Value(), evtdt.Value(),
|
||||
!bytes.Equal(evdt.Value(), evtdt.Value()),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"%s != %s %v", evdt.Value(), evtdt.Value(),
|
||||
!bytes.Equal(evdt.Value(), evtdt.Value()),
|
||||
)
|
||||
},
|
||||
)
|
||||
if !bytes.Equal(evdt.Value(), evtdt.Value()) {
|
||||
continue
|
||||
|
||||
@@ -181,9 +181,13 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
}
|
||||
log.I.F(
|
||||
"http request: %s from %s",
|
||||
r.URL.String(), helpers.GetRemoteFromReq(r),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"http request: %s from %s",
|
||||
r.URL.String(), helpers.GetRemoteFromReq(r),
|
||||
)
|
||||
},
|
||||
)
|
||||
s.mux.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
@@ -99,13 +99,10 @@ func (s *Server) SpiderFetch(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Nil the event to free memory
|
||||
ev = nil
|
||||
}
|
||||
|
||||
log.I.F("%d events found of type %s", len(pkKindMap), kindsList)
|
||||
|
||||
if !noFetch && len(s.C.SpiderSeeds) > 0 {
|
||||
// we need to search the spider seeds.
|
||||
// Break up pubkeys into batches of 128
|
||||
|
||||
@@ -17,7 +17,7 @@ func (d *D) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
|
||||
return
|
||||
}
|
||||
var item *badger.Item
|
||||
if item, err = txn.Get(buf.Bytes()); chk.E(err) {
|
||||
if item, err = txn.Get(buf.Bytes()); err != nil {
|
||||
return
|
||||
}
|
||||
var v []byte
|
||||
|
||||
@@ -104,8 +104,13 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
||||
idPkTs = append(idPkTs, deletionIdPkTs...)
|
||||
}
|
||||
// First pass: collect all deletion events
|
||||
fmt.Printf(
|
||||
"Debug: Starting first pass - processing %d events\n", len(idPkTs),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"Debug: Starting first pass - processing %d events\n",
|
||||
len(idPkTs),
|
||||
)
|
||||
},
|
||||
)
|
||||
for _, idpk := range idPkTs {
|
||||
var ev *event.E
|
||||
@@ -124,8 +129,13 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
||||
}
|
||||
// Process deletion events to build our deletion maps
|
||||
if ev.Kind.Equal(kind.Deletion) {
|
||||
log.D.F(
|
||||
"found deletion event with ID: %s\n", hex.Enc(ev.ID),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"found deletion event with ID: %s\n",
|
||||
hex.Enc(ev.ID),
|
||||
)
|
||||
},
|
||||
)
|
||||
// Check for 'e' tags that directly reference event IDs
|
||||
eTags := ev.Tags.GetAll(tag.New([]byte{'e'}))
|
||||
@@ -138,12 +148,22 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
||||
}
|
||||
// Check for 'a' tags that reference parameterized replaceable
|
||||
// events
|
||||
log.D.F(
|
||||
"processing deletion event with ID: %s\n",
|
||||
hex.Enc(ev.ID),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"processing deletion event with ID: %s\n",
|
||||
hex.Enc(ev.ID),
|
||||
)
|
||||
},
|
||||
)
|
||||
aTags := ev.Tags.GetAll(tag.New([]byte{'a'}))
|
||||
log.D.F("Debug: Found %d a-tags\n", aTags.Len())
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"Found %d a-tags\n", aTags.Len(),
|
||||
)
|
||||
},
|
||||
)
|
||||
for _, aTag := range aTags.ToSliceOfTags() {
|
||||
if aTag.Len() < 2 {
|
||||
continue
|
||||
@@ -184,12 +204,20 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
||||
dValue := string(split[2])
|
||||
deletionsByKindPubkeyDTag[key][dValue] = true
|
||||
// Debug logging
|
||||
log.D.F(
|
||||
"processing a-tag: %s\n", string(aTag.Value()),
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"processing a-tag: %s\n", string(aTag.Value()),
|
||||
)
|
||||
},
|
||||
)
|
||||
log.D.F(
|
||||
"adding to deletion map - key: %s, d-tag: %s\n",
|
||||
key, dValue,
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"adding to deletion map - key: %s, d-tag: %s\n",
|
||||
key, dValue,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
// For replaceable events, we need to check if there are any
|
||||
@@ -325,19 +353,26 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
|
||||
// Check if this event has been deleted via an a-tag
|
||||
if deletionMap, exists := deletionsByKindPubkeyDTag[key]; exists {
|
||||
// Debug logging
|
||||
fmt.Printf(
|
||||
"Debug: Checking deletion map - key: %s, d-tag: %s\n",
|
||||
key, dValue,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"Checking deletion map - key: %s, d-tag: %s",
|
||||
key, dValue,
|
||||
)
|
||||
},
|
||||
)
|
||||
fmt.Printf(
|
||||
"Debug: Deletion map contains key: %v, d-tag in map: %v\n",
|
||||
exists, deletionMap[dValue],
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"Deletion map contains key: %v, d-tag in map: %v",
|
||||
exists, deletionMap[dValue],
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
// If the d-tag value is in the deletion map and this event
|
||||
// is not specifically requested by ID, skip it
|
||||
if deletionMap[dValue] && !isIdInFilter {
|
||||
fmt.Printf("Debug: Event deleted - skipping\n")
|
||||
log.T.F("Debug: Event deleted - skipping")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"orly.dev/pkg/encoders/kind"
|
||||
"orly.dev/pkg/utils/chk"
|
||||
"orly.dev/pkg/utils/errorf"
|
||||
"orly.dev/pkg/utils/log"
|
||||
)
|
||||
|
||||
// T is an array of kind.T, used in filter.T and filter.S for searches.
|
||||
@@ -132,7 +131,6 @@ func (k *T) Unmarshal(b []byte) (r []byte, err error) {
|
||||
}
|
||||
}
|
||||
if !openedBracket {
|
||||
log.I.F("\n%v\n%s", k, r)
|
||||
return nil, errorf.E(
|
||||
"kinds: failed to unmarshal\n%s\n%s\n%s", k,
|
||||
b, r,
|
||||
|
||||
@@ -41,7 +41,7 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) (
|
||||
tolerance[0] = time.Minute
|
||||
}
|
||||
tolerate := int64(tolerance[0] / time.Second)
|
||||
log.I.F("validating auth '%s'", val)
|
||||
log.T.C(func() string { return fmt.Sprintf("validating auth '%s'", val) })
|
||||
switch {
|
||||
case strings.HasPrefix(val, NIP98Prefix):
|
||||
split := strings.Split(val, " ")
|
||||
|
||||
@@ -109,7 +109,12 @@ func (x *Operations) RegisterEvent(api huma.API) {
|
||||
// Check if the IP is blocked due to too many failed auth attempts
|
||||
if iptracker.Global.IsBlocked(remote) {
|
||||
blockedUntil := iptracker.Global.GetBlockedUntil(remote)
|
||||
err = huma.Error403Forbidden(fmt.Sprintf("Too many failed authentication attempts. Blocked until %s", blockedUntil.Format(time.RFC3339)))
|
||||
err = huma.Error403Forbidden(
|
||||
fmt.Sprintf(
|
||||
"Too many failed authentication attempts. Blocked until %s",
|
||||
blockedUntil.Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -119,7 +124,12 @@ func (x *Operations) RegisterEvent(api huma.API) {
|
||||
blocked := iptracker.Global.RecordFailedAttempt(remote)
|
||||
if blocked {
|
||||
blockedUntil := iptracker.Global.GetBlockedUntil(remote)
|
||||
err = huma.Error403Forbidden(fmt.Sprintf("Too many failed authentication attempts. Blocked until %s", blockedUntil.Format(time.RFC3339)))
|
||||
err = huma.Error403Forbidden(
|
||||
fmt.Sprintf(
|
||||
"Too many failed authentication attempts. Blocked until %s",
|
||||
blockedUntil.Format(time.RFC3339),
|
||||
),
|
||||
)
|
||||
} else {
|
||||
err = huma.Error401Unauthorized("Not Authorized")
|
||||
}
|
||||
@@ -216,7 +226,13 @@ func (x *Operations) RegisterEvent(api huma.API) {
|
||||
// check and process delete
|
||||
sto := x.I.Storage()
|
||||
if ev.Kind.K == kind.Deletion.K {
|
||||
log.I.F("delete event\n%s", ev.Serialize())
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"delete event\n%s", ev.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
for _, t := range ev.Tags.ToSliceOfTags() {
|
||||
var res []*event.E
|
||||
if t.Len() >= 2 {
|
||||
@@ -276,7 +292,6 @@ func (x *Operations) RegisterEvent(api huma.API) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Use DeleteEvent to actually delete the
|
||||
// referenced event
|
||||
if err = sto.DeleteEvent(c, eid); chk.E(err) {
|
||||
@@ -288,9 +303,13 @@ func (x *Operations) RegisterEvent(api huma.API) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
log.I.F(
|
||||
"successfully deleted event %x", eventId,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"successfully deleted event %x",
|
||||
eventId,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
case bytes.Equal(t.Key(), []byte("a")):
|
||||
@@ -389,10 +408,6 @@ func (x *Operations) RegisterEvent(api huma.API) {
|
||||
}
|
||||
}
|
||||
if target.CreatedAt.Int() > ev.CreatedAt.Int() {
|
||||
log.I.F(
|
||||
"not deleting\n%d%\nbecause delete event is older\n%d",
|
||||
target.CreatedAt.Int(), ev.CreatedAt.Int(),
|
||||
)
|
||||
continue
|
||||
}
|
||||
if !bytes.Equal(target.Pubkey, env.Pubkey) {
|
||||
@@ -426,9 +441,13 @@ func (x *Operations) RegisterEvent(api huma.API) {
|
||||
return
|
||||
}
|
||||
|
||||
log.I.F(
|
||||
"successfully deleted event %x",
|
||||
target.EventId().Bytes(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"successfully deleted event %x",
|
||||
target.EventId().Bytes(),
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
res = nil
|
||||
@@ -464,7 +483,13 @@ func (x *Operations) RegisterEvent(api huma.API) {
|
||||
ok, reason = x.I.AddEvent(
|
||||
c, x.Relay(), ev, r, remote, pubkeys,
|
||||
)
|
||||
log.I.F("http API event %0x added %v %s", ev.ID, ok, reason)
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"http API event %0x added %v %s", ev.ID, ok, reason,
|
||||
)
|
||||
},
|
||||
)
|
||||
if !ok && err != nil {
|
||||
if err = Ok.Error(
|
||||
a, env, err.Error(),
|
||||
|
||||
@@ -3,6 +3,7 @@ package openapi
|
||||
import (
|
||||
"errors"
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"math"
|
||||
"net/http"
|
||||
@@ -623,7 +624,7 @@ Returns events as a JSON array of event objects.`
|
||||
}
|
||||
f := filter.New()
|
||||
var rem []byte
|
||||
log.I.S(input)
|
||||
log.T.C(func() string { return spew.Sdump(input) })
|
||||
if len(rem) > 0 {
|
||||
log.I.F("extra '%s'", rem)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/danielgtaylor/huma/v2"
|
||||
"net/http"
|
||||
"orly.dev/pkg/app/relay/helpers"
|
||||
@@ -46,9 +47,13 @@ Returns the events as line structured JSON (JSONL) in the order that they were r
|
||||
err = huma.Error401Unauthorized("Not Authorized")
|
||||
return
|
||||
}
|
||||
log.I.F(
|
||||
"%s export of event data requested on admin port pubkey %0x",
|
||||
remote, pubkey,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"%s export of event data requested on admin port pubkey %0x",
|
||||
remote, pubkey,
|
||||
)
|
||||
},
|
||||
)
|
||||
sto := x.Storage()
|
||||
resp = &huma.StreamResponse{
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package openapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filter"
|
||||
"orly.dev/pkg/interfaces/publisher"
|
||||
@@ -123,7 +124,10 @@ func (p *Publisher) Receive(msg typer.T) {
|
||||
if m.FilterMap != nil {
|
||||
for id, f := range m.FilterMap {
|
||||
listener.FilterMap[id] = f
|
||||
log.T.F("added subscription %s for new listener %s", id, m.Id)
|
||||
log.T.F(
|
||||
"added subscription %s for new listener %s", id,
|
||||
m.Id,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,20 +170,28 @@ func (p *Publisher) Deliver(ev *event.E) {
|
||||
for listenerId, listener := range p.ListenMap {
|
||||
for subId, filter := range listener.FilterMap {
|
||||
if !filter.Matches(ev) {
|
||||
log.I.F(
|
||||
"listener %s, subscription id %s event\n%s\ndoes not match filter\n%s",
|
||||
listenerId, subId, ev.Marshal(nil),
|
||||
filter.Marshal(nil),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"listener %s, subscription id %s event\n%s\ndoes not match filter\n%s",
|
||||
listenerId, subId, ev.Marshal(nil),
|
||||
filter.Marshal(nil),
|
||||
)
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
if p.Server.AuthRequired() {
|
||||
if !auth.CheckPrivilege(listener.Pubkey, ev) {
|
||||
log.W.F(
|
||||
"not privileged %0x ev pubkey %0x listener pubkey %0x kind %s privileged: %v",
|
||||
listener.Pubkey, ev.Pubkey,
|
||||
listener.Pubkey, ev.Kind.Name(),
|
||||
ev.Kind.IsPrivileged(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"not privileged %0x ev pubkey %0x listener pubkey %0x kind %s privileged: %v",
|
||||
listener.Pubkey, ev.Pubkey,
|
||||
listener.Pubkey, ev.Kind.Name(),
|
||||
ev.Kind.IsPrivileged(),
|
||||
)
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
@@ -233,7 +245,9 @@ func (p *Publisher) ListenerExists(id string) bool {
|
||||
}
|
||||
|
||||
// SubscriptionExists checks if a subscription with the given ID exists for a specific listener.
|
||||
func (p *Publisher) SubscriptionExists(listenerId string, subscriptionId string) bool {
|
||||
func (p *Publisher) SubscriptionExists(
|
||||
listenerId string, subscriptionId string,
|
||||
) bool {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
listener, exists := p.ListenMap[listenerId]
|
||||
@@ -282,7 +296,9 @@ func CheckListenerExists(clientId string, publishers ...publisher.I) bool {
|
||||
|
||||
// CheckSubscriptionExists is a package-level function that checks if a subscription exists for a specific listener.
|
||||
// This function is used by the Unsubscribe API to check if a subscription ID exists before attempting to unsubscribe.
|
||||
func CheckSubscriptionExists(clientId string, subscriptionId string, publishers ...publisher.I) bool {
|
||||
func CheckSubscriptionExists(
|
||||
clientId string, subscriptionId string, publishers ...publisher.I,
|
||||
) bool {
|
||||
for _, p := range publishers {
|
||||
// Check if the publisher is of type *Publisher
|
||||
if pub, ok := p.(*Publisher); ok {
|
||||
@@ -304,7 +320,9 @@ func CheckSubscriptionExists(clientId string, subscriptionId string, publishers
|
||||
pub := field.Index(i).Interface().(publisher.I)
|
||||
// Check if this publisher is of type *Publisher
|
||||
if openPub, ok := pub.(*Publisher); ok {
|
||||
if openPub.SubscriptionExists(clientId, subscriptionId) {
|
||||
if openPub.SubscriptionExists(
|
||||
clientId, subscriptionId,
|
||||
) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package socketapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
"orly.dev/pkg/encoders/envelopes/okenvelope"
|
||||
"orly.dev/pkg/encoders/reason"
|
||||
@@ -36,7 +37,7 @@ import (
|
||||
// validation.
|
||||
func (a *A) HandleAuth(b []byte, srv server.I) (msg []byte) {
|
||||
if a.I.AuthRequired() {
|
||||
log.I.F("AUTH:\n%s", b)
|
||||
log.T.C(func() string { return fmt.Sprintf("AUTH:\n%s", b) })
|
||||
var err error
|
||||
var rem []byte
|
||||
env := authenvelope.NewResponse()
|
||||
|
||||
@@ -55,9 +55,13 @@ func (a *A) HandleEvent(
|
||||
) (msg []byte) {
|
||||
|
||||
var err error
|
||||
log.T.F(
|
||||
"handleEvent %s %s authed: %0x", a.RealRemote(), req,
|
||||
a.Listener.AuthedPubkey(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"handleEvent %s %s authed: %0x", a.RealRemote(), req,
|
||||
a.Listener.AuthedPubkey(),
|
||||
)
|
||||
},
|
||||
)
|
||||
var rem []byte
|
||||
sto := srv.Storage()
|
||||
@@ -213,7 +217,13 @@ func (a *A) HandleEvent(
|
||||
}
|
||||
// check and process delete
|
||||
if env.E.Kind.K == kind.Deletion.K {
|
||||
log.I.F("delete event\n%s", env.E.Serialize())
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"delete event\n%s", env.E.Serialize(),
|
||||
)
|
||||
},
|
||||
)
|
||||
var ownerDelete bool
|
||||
for _, pk := range a.OwnersPubkeys() {
|
||||
if bytes.Equal(pk, env.Pubkey) {
|
||||
@@ -288,7 +298,13 @@ func (a *A) HandleEvent(
|
||||
return
|
||||
}
|
||||
|
||||
log.I.F("successfully deleted event %x", eventId)
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"successfully deleted event %x", eventId,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
case bytes.Equal(t.Key(), []byte("a")):
|
||||
split := bytes.Split(t.Value(), []byte{':'})
|
||||
@@ -380,9 +396,13 @@ func (a *A) HandleEvent(
|
||||
}
|
||||
}
|
||||
if target.CreatedAt.Int() > env.E.CreatedAt.Int() {
|
||||
log.I.F(
|
||||
"not deleting\n%d%\nbecause delete event is older\n%d",
|
||||
target.CreatedAt.Int(), env.E.CreatedAt.Int(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"not deleting %d because delete event is older %d",
|
||||
target.CreatedAt.Int(), env.E.CreatedAt.Int(),
|
||||
)
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
@@ -417,42 +437,56 @@ func (a *A) HandleEvent(
|
||||
return
|
||||
}
|
||||
|
||||
log.I.F(
|
||||
"successfully deleted event %x", target.EventId().Bytes(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"successfully deleted event %x",
|
||||
target.EventId().Bytes(),
|
||||
)
|
||||
},
|
||||
)
|
||||
res = nil
|
||||
}
|
||||
res = nil
|
||||
}
|
||||
// Send a success response after processing all deletions
|
||||
if err = Ok.Ok(a, env, ""); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Check if this event has been deleted before
|
||||
if env.E.Kind.K != kind.Deletion.K {
|
||||
// Create a filter to check for deletion events that reference this
|
||||
// event ID
|
||||
f := filter.New()
|
||||
f.Kinds.K = []*kind.T{kind.Deletion}
|
||||
f.Tags.AppendTags(tag.New([]byte{'e'}, env.E.ID))
|
||||
|
||||
// Query for deletion events
|
||||
var deletionEvents []*event.E
|
||||
deletionEvents, err = sto.QueryEvents(c, f)
|
||||
if err == nil && len(deletionEvents) > 0 {
|
||||
// Found deletion events for this ID, don't save it
|
||||
if err = Ok.Blocked(
|
||||
a, env, "the event was deleted, not storing it again",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Send a success response after processing all deletions
|
||||
if err = Ok.Ok(a, env, ""); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// Check if this event has been deleted before
|
||||
if env.E.Kind.K != kind.Deletion.K {
|
||||
// Create a filter to check for deletion events that reference this
|
||||
// event ID
|
||||
f := filter.New()
|
||||
f.Kinds.K = []*kind.T{kind.Deletion}
|
||||
f.Tags.AppendTags(tag.New([]byte{'e'}, env.E.ID))
|
||||
|
||||
// Query for deletion events
|
||||
var deletionEvents []*event.E
|
||||
deletionEvents, err = sto.QueryEvents(c, f)
|
||||
if err == nil && len(deletionEvents) > 0 {
|
||||
// Found deletion events for this ID, don't save it
|
||||
if err = Ok.Blocked(
|
||||
a, env, "the event was deleted, not storing it again",
|
||||
); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
var reason []byte
|
||||
ok, reason = srv.AddEvent(c, rl, env.E, a.Req(), a.RealRemote(), nil)
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"event %0x added %v %s", env.E.ID, ok, reason,
|
||||
)
|
||||
},
|
||||
)
|
||||
if err = okenvelope.NewFrom(
|
||||
env.E.ID, ok,
|
||||
).Write(a.Listener); chk.E(err) {
|
||||
return
|
||||
}
|
||||
}
|
||||
var reason []byte
|
||||
ok, reason = srv.AddEvent(c, rl, env.E, a.Req(), a.RealRemote(), nil)
|
||||
log.I.F("event %0x added %v %s", env.E.ID, ok, reason)
|
||||
if err = okenvelope.NewFrom(env.E.ID, ok).Write(a.Listener); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
|
||||
@@ -27,7 +27,13 @@ import (
|
||||
// logs the notice, and writes it back to the listener if required.
|
||||
func (a *A) HandleMessage(msg, authedPubkey []byte) {
|
||||
remote := a.Listener.RealRemote()
|
||||
log.T.F("%s received message:\n%s", remote, string(msg))
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"%s received message:\n%s", remote, string(msg),
|
||||
)
|
||||
},
|
||||
)
|
||||
var notice []byte
|
||||
var err error
|
||||
var t string
|
||||
@@ -48,7 +54,13 @@ func (a *A) HandleMessage(msg, authedPubkey []byte) {
|
||||
notice = []byte(fmt.Sprintf("unknown envelope type %s\n%s", t, rem))
|
||||
}
|
||||
if len(notice) > 0 {
|
||||
log.D.F("notice->%s %s", a.RealRemote(), notice)
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"notice->%s %s", a.RealRemote(), notice,
|
||||
)
|
||||
},
|
||||
)
|
||||
if err = noticeenvelope.NewFrom(notice).Write(a.Listener); chk.E(err) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package socketapi
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"orly.dev/pkg/encoders/bech32encoding"
|
||||
"orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
@@ -49,11 +50,15 @@ import (
|
||||
// generates and sends a closure envelope.
|
||||
func (a *A) HandleReq(c context.T, req []byte, srv server.I) (r []byte) {
|
||||
var err error
|
||||
log.I.F(
|
||||
"auth required %v client authed %v %0x", a.I.AuthRequired(),
|
||||
a.Listener.IsAuthed(), a.Listener.AuthedPubkey(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"auth required %v client authed %v %0x", a.I.AuthRequired(),
|
||||
a.Listener.IsAuthed(), a.Listener.AuthedPubkey(),
|
||||
)
|
||||
},
|
||||
)
|
||||
log.I.F("REQ:\n%s", req)
|
||||
log.T.C(func() string { return fmt.Sprintf("REQ:\n%s", req) })
|
||||
sto := srv.Storage()
|
||||
var rem []byte
|
||||
env := reqenvelope.New()
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package socketapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"orly.dev/pkg/encoders/envelopes/eventenvelope"
|
||||
"orly.dev/pkg/encoders/event"
|
||||
"orly.dev/pkg/encoders/filters"
|
||||
@@ -90,9 +91,13 @@ func (p *S) Receive(msg typer.T) {
|
||||
log.T.F("removed listener %s", m.Listener.RealRemote())
|
||||
} else {
|
||||
p.removeSubscriberId(m.Listener, m.Id)
|
||||
log.T.F(
|
||||
"removed subscription %s for %s", m.Id,
|
||||
m.Listener.RealRemote(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"removed subscription %s for %s", m.Id,
|
||||
m.Listener.RealRemote(),
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
return
|
||||
@@ -103,14 +108,24 @@ func (p *S) Receive(msg typer.T) {
|
||||
subs = make(map[string]*filters.T)
|
||||
subs[m.Id] = m.Filters
|
||||
p.Map[m.Listener] = subs
|
||||
log.T.F(
|
||||
"created new subscription for %s, %s", m.Listener.RealRemote(),
|
||||
m.Filters.Marshal(nil),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"created new subscription for %s, %s",
|
||||
m.Listener.RealRemote(),
|
||||
m.Filters.Marshal(nil),
|
||||
)
|
||||
},
|
||||
)
|
||||
} else {
|
||||
subs[m.Id] = m.Filters
|
||||
log.T.F(
|
||||
"added subscription %s for %s", m.Id, m.Listener.RealRemote(),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"added subscription %s for %s", m.Id,
|
||||
m.Listener.RealRemote(),
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -131,20 +146,35 @@ func (p *S) Deliver(ev *event.E) {
|
||||
var err error
|
||||
p.Mx.Lock()
|
||||
defer p.Mx.Unlock()
|
||||
log.T.F(
|
||||
"delivering event %0x to websocket subscribers %d", ev.ID, len(p.Map),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"delivering event %0x to websocket subscribers %d", ev.ID,
|
||||
len(p.Map),
|
||||
)
|
||||
},
|
||||
)
|
||||
for w, subs := range p.Map {
|
||||
log.I.F("%v %s", subs, w.RealRemote())
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"%v %s", subs, w.RealRemote(),
|
||||
)
|
||||
},
|
||||
)
|
||||
for id, subscriber := range subs {
|
||||
log.T.F(
|
||||
"subscriber %s\n%s", w.RealRemote(),
|
||||
subscriber.Marshal(nil),
|
||||
)
|
||||
if !subscriber.Match(ev) {
|
||||
log.I.F(
|
||||
"subscriber %s filter %s not match", id,
|
||||
subscriber.Marshal(nil),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"subscriber %s filter %s not match", id,
|
||||
subscriber.Marshal(nil),
|
||||
)
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
@@ -165,7 +195,14 @@ func (p *S) Deliver(ev *event.E) {
|
||||
if err = res.Write(w); chk.E(err) {
|
||||
continue
|
||||
}
|
||||
log.T.F("dispatched event %0x to subscription %s", ev.ID, id)
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"dispatched event %0x to subscription %s", ev.ID,
|
||||
id,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package socketapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"orly.dev/pkg/app/relay/helpers"
|
||||
"orly.dev/pkg/encoders/envelopes/authenvelope"
|
||||
@@ -61,8 +62,14 @@ func (a *A) Serve(w http.ResponseWriter, r *http.Request, s server.I) {
|
||||
// Check if the IP is blocked due to too many failed auth attempts
|
||||
if iptracker.Global.IsBlocked(remote) {
|
||||
blockedUntil := iptracker.Global.GetBlockedUntil(remote)
|
||||
log.I.F("rejecting websocket connection from banned IP %s (blocked until %s)",
|
||||
remote, blockedUntil.Format(time.RFC3339))
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"rejecting websocket connection from banned IP %s (blocked until %s)",
|
||||
remote, blockedUntil.Format(time.RFC3339),
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
// We can't send a notice to the client here because the websocket connection
|
||||
// hasn't been established yet, so we just reject the connection
|
||||
@@ -114,7 +121,14 @@ func (a *A) Serve(w http.ResponseWriter, r *http.Request, s server.I) {
|
||||
},
|
||||
)
|
||||
if a.I.AuthRequired() {
|
||||
log.I.F("requesting auth from client from %s", a.Listener.RealRemote())
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"requesting auth from client from %s",
|
||||
a.Listener.RealRemote(),
|
||||
)
|
||||
},
|
||||
)
|
||||
a.Listener.RequestAuth()
|
||||
if err = authenvelope.NewChallengeWith(a.Listener.Challenge()).
|
||||
Write(a.Listener); chk.E(err) {
|
||||
|
||||
@@ -223,16 +223,28 @@ func (r *Client) ConnectWithTLS(
|
||||
if err != nil && !strings.Contains(
|
||||
err.Error(), "failed to wait for pong",
|
||||
) {
|
||||
log.I.F(
|
||||
"{%s} error writing ping: %v; closing websocket", r.URL,
|
||||
err,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"{%s} error writing ping: %v; closing websocket",
|
||||
r.URL,
|
||||
err,
|
||||
)
|
||||
},
|
||||
)
|
||||
r.Close() // this should trigger a context cancelation
|
||||
return
|
||||
}
|
||||
case writeRequest := <-r.writeQueue:
|
||||
// all write requests will go through this to prevent races
|
||||
log.D.F("{%s} sending %v\n", r.URL, string(writeRequest.msg))
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"{%s} sending %v\n", r.URL,
|
||||
string(writeRequest.msg),
|
||||
)
|
||||
},
|
||||
)
|
||||
if err := r.Connection.WriteMessage(
|
||||
r.connectionContext, writeRequest.msg,
|
||||
); err != nil {
|
||||
@@ -276,10 +288,10 @@ func (r *Client) ConnectWithTLS(
|
||||
}
|
||||
r.challenge = env.Challenge
|
||||
case eventenvelope.L:
|
||||
log.I.F("%s", rem)
|
||||
// log.I.F("%s", rem)
|
||||
var env *eventenvelope.Result
|
||||
env = eventenvelope.NewResult()
|
||||
if _, err = env.Unmarshal(rem); chk.E(err) {
|
||||
if _, err = env.Unmarshal(rem); err != nil {
|
||||
continue
|
||||
}
|
||||
subid := env.Subscription.String()
|
||||
@@ -292,17 +304,25 @@ func (r *Client) ConnectWithTLS(
|
||||
continue
|
||||
}
|
||||
if !sub.Filters.Match(env.Event) {
|
||||
log.I.F(
|
||||
"{%s} filter does not match: %v ~ %s\n", r.URL,
|
||||
sub.Filters, env.Event.Marshal(nil),
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"{%s} filter does not match: %v ~ %s\n", r.URL,
|
||||
sub.Filters, env.Event.Marshal(nil),
|
||||
)
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
if !r.AssumeValid {
|
||||
if ok, err = env.Event.Verify(); !ok || chk.E(err) {
|
||||
log.I.F(
|
||||
"{%s} bad signature on %s\n", r.URL,
|
||||
env.Event.ID,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"{%s} bad signature on %s\n", r.URL,
|
||||
env.Event.ID,
|
||||
)
|
||||
},
|
||||
)
|
||||
continue
|
||||
}
|
||||
@@ -350,9 +370,14 @@ func (r *Client) ConnectWithTLS(
|
||||
if okCallback, exist := r.okCallbacks.Load(env.EventID.String()); exist {
|
||||
okCallback(env.OK, string(env.Reason))
|
||||
} else {
|
||||
log.I.F(
|
||||
"{%s} got an unexpected OK message for event %s", r.URL,
|
||||
env.EventID,
|
||||
log.T.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"{%s} got an unexpected OK message for event %s",
|
||||
r.URL,
|
||||
env.EventID,
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
default:
|
||||
|
||||
@@ -405,7 +405,13 @@ func (p *Pool) FetchManyReplaceable(
|
||||
|
||||
relay, err := p.EnsureRelay(nm)
|
||||
if err != nil {
|
||||
log.D.F("error connecting to %s with %v: %s", nm, f, err)
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"error connecting to %s with %v: %s", nm, f, err,
|
||||
)
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -414,8 +420,13 @@ func (p *Pool) FetchManyReplaceable(
|
||||
subscribe:
|
||||
sub, err := relay.Subscribe(ctx, filters.New(f), opts...)
|
||||
if err != nil {
|
||||
log.D.F(
|
||||
"error subscribing to %s with %v: %s", relay, f, err,
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"error subscribing to %s with %v: %s", relay, f,
|
||||
err,
|
||||
)
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -546,7 +557,7 @@ func (p *Pool) subMany(
|
||||
}
|
||||
|
||||
// otherwise (if we were connected and got disconnected) keep trying to reconnect
|
||||
log.D.F("%s reconnecting because connection failed\n", nm)
|
||||
log.D.F("%s reconnecting because connection failed", nm)
|
||||
goto reconnect
|
||||
}
|
||||
firstConnection = false
|
||||
@@ -569,7 +580,7 @@ func (p *Pool) subMany(
|
||||
// )...,
|
||||
)
|
||||
if err != nil {
|
||||
log.D.F("%s reconnecting because subscription died\n", nm)
|
||||
log.D.F("%s reconnecting because subscription died", nm)
|
||||
goto reconnect
|
||||
}
|
||||
|
||||
@@ -597,7 +608,7 @@ func (p *Pool) subMany(
|
||||
ff.F[i].Since = now
|
||||
}
|
||||
log.D.F(
|
||||
"%s reconnecting because sub.Events is broken\n",
|
||||
"%s reconnecting because sub.Events is broken",
|
||||
nm,
|
||||
)
|
||||
goto reconnect
|
||||
@@ -719,8 +730,13 @@ func (p *Pool) subManyEoseNonOverwriteCheckDuplicate(
|
||||
|
||||
relay, err := p.EnsureRelay(nm)
|
||||
if err != nil {
|
||||
log.D.F(
|
||||
"error connecting to %s with %v: %s", nm, filters, err,
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"error connecting to %s with %v: %s", nm, filters,
|
||||
err,
|
||||
)
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
@@ -730,8 +746,14 @@ func (p *Pool) subManyEoseNonOverwriteCheckDuplicate(
|
||||
subscribe:
|
||||
sub, err := relay.Subscribe(ctx, filters, opts...)
|
||||
if err != nil {
|
||||
log.D.F(
|
||||
"error subscribing to %s with %v: %s", relay, filters, err,
|
||||
log.D.C(
|
||||
func() string {
|
||||
return fmt.Sprintf(
|
||||
"error subscribing to %s with %v: %s", relay,
|
||||
filters,
|
||||
err,
|
||||
)
|
||||
},
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -341,7 +341,7 @@ func TestNew(t *testing.T) {
|
||||
log.D.Ln("test message")
|
||||
if !strings.Contains(buf.String(), "test message") {
|
||||
t.Errorf(
|
||||
"log.D.Ln() output %q does not contain 'test message'",
|
||||
"log.D.Ln() output %q doesn't contain 'test message'",
|
||||
buf.String(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1 +1 @@
|
||||
v0.6.2
|
||||
v0.7.0
|
||||
Reference in New Issue
Block a user