Replace errorf with errors and fmt.Errorf, remove redundant logging across database operations, minimize unused imports, and improve concurrent event delivery logic. Added CPU utilization optimization in the main runtime configuration.

This commit is contained in:
2025-09-13 00:47:53 +01:00
parent c45276ef08
commit fc546ddc0b
9 changed files with 109 additions and 114 deletions

View File

@@ -8,13 +8,13 @@ import (
"lol.mleku.dev/chk" "lol.mleku.dev/chk"
"lol.mleku.dev/log" "lol.mleku.dev/log"
acl "next.orly.dev/pkg/acl" "next.orly.dev/pkg/acl"
"next.orly.dev/pkg/encoders/envelopes/authenvelope" "next.orly.dev/pkg/encoders/envelopes/authenvelope"
"next.orly.dev/pkg/encoders/envelopes/eventenvelope" "next.orly.dev/pkg/encoders/envelopes/eventenvelope"
"next.orly.dev/pkg/encoders/envelopes/okenvelope" "next.orly.dev/pkg/encoders/envelopes/okenvelope"
"next.orly.dev/pkg/encoders/kind" "next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/reason" "next.orly.dev/pkg/encoders/reason"
utils "next.orly.dev/pkg/utils" "next.orly.dev/pkg/utils"
) )
func (l *Listener) HandleEvent(msg []byte) (err error) { func (l *Listener) HandleEvent(msg []byte) (err error) {
@@ -151,7 +151,7 @@ func (l *Listener) HandleEvent(msg []byte) (err error) {
return return
} }
// Deliver the event to subscribers immediately after sending OK response // Deliver the event to subscribers immediately after sending OK response
l.publishers.Deliver(env.E) go l.publishers.Deliver(env.E)
log.D.F("saved event %0x", env.E.ID) log.D.F("saved event %0x", env.E.ID)
var isNewFromAdmin bool var isNewFromAdmin bool
for _, admin := range l.Admins { for _, admin := range l.Admins {

View File

@@ -6,6 +6,7 @@ import (
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
"runtime"
"time" "time"
"github.com/pkg/profile" "github.com/pkg/profile"
@@ -19,6 +20,7 @@ import (
) )
func main() { func main() {
runtime.GOMAXPROCS(runtime.NumCPU() * 4)
var err error var err error
var cfg *config.C var cfg *config.C
if cfg, err = config.New(); chk.T(err) { if cfg, err = config.New(); chk.T(err) {

View File

@@ -12,7 +12,7 @@ import (
"lol.mleku.dev/errorf" "lol.mleku.dev/errorf"
"lol.mleku.dev/log" "lol.mleku.dev/log"
"next.orly.dev/app/config" "next.orly.dev/app/config"
database "next.orly.dev/pkg/database" "next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes/types" "next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/bech32encoding" "next.orly.dev/pkg/encoders/bech32encoding"
"next.orly.dev/pkg/encoders/envelopes" "next.orly.dev/pkg/encoders/envelopes"
@@ -25,7 +25,7 @@ import (
"next.orly.dev/pkg/encoders/kind" "next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag" "next.orly.dev/pkg/encoders/tag"
"next.orly.dev/pkg/protocol/publish" "next.orly.dev/pkg/protocol/publish"
utils "next.orly.dev/pkg/utils" "next.orly.dev/pkg/utils"
"next.orly.dev/pkg/utils/normalize" "next.orly.dev/pkg/utils/normalize"
"next.orly.dev/pkg/utils/values" "next.orly.dev/pkg/utils/values"
) )
@@ -298,7 +298,7 @@ func (f *Follows) startSubscriptions(ctx context.Context) {
} else { } else {
// Only dispatch if the event was newly saved (no error) // Only dispatch if the event was newly saved (no error)
if f.pubs != nil { if f.pubs != nil {
f.pubs.Deliver(res.Event) go f.pubs.Deliver(res.Event)
} }
log.I.F( log.I.F(
"saved new event from follows syncer: %0x", "saved new event from follows syncer: %0x",

View File

@@ -19,18 +19,16 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) { if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.NewFromBytesSlice(id)}); chk.E(err) {
return return
} }
// for i, idx := range idxs {
for i, idx := range idxs { // log.T.F(
log.T.F( // "GetSerialById: searching range %d: start=%x, end=%x", i, idx.Start,
"GetSerialById: searching range %d: start=%x, end=%x", i, idx.Start, // idx.End,
idx.End, // )
) // }
}
if len(idxs) == 0 { if len(idxs) == 0 {
err = errorf.E("no indexes found for id %0x", id) err = errorf.E("no indexes found for id %0x", id)
return return
} }
idFound := false idFound := false
if err = d.View( if err = d.View(
func(txn *badger.Txn) (err error) { func(txn *badger.Txn) (err error) {
@@ -49,16 +47,15 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
idFound = true idFound = true
} else { } else {
// Item not found in database // Item not found in database
log.T.F( // log.T.F(
"GetSerialById: ID not found in database: %s", hex.Enc(id), // "GetSerialById: ID not found in database: %s", hex.Enc(id),
) // )
} }
return return
}, },
); chk.E(err) { ); chk.E(err) {
return return
} }
if !idFound { if !idFound {
err = errorf.T("id not found in database: %s", hex.Enc(id)) err = errorf.T("id not found in database: %s", hex.Enc(id))
return return
@@ -67,7 +64,6 @@ func (d *D) GetSerialById(id []byte) (ser *types.Uint40, err error) {
return return
} }
//
// func (d *D) GetSerialBytesById(id []byte) (ser []byte, err error) { // func (d *D) GetSerialBytesById(id []byte) (ser []byte, err error) {
// var idxs []Range // var idxs []Range
// if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.New(id)}); chk.E(err) { // if idxs, err = GetIndexesFromFilter(&filter.F{Ids: tag.New(id)}); chk.E(err) {

View File

@@ -43,49 +43,49 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
var expDeletes types.Uint40s var expDeletes types.Uint40s
var expEvs event.S var expEvs event.S
if f.Ids != nil && f.Ids.Len() > 0 { if f.Ids != nil && f.Ids.Len() > 0 {
for _, id := range f.Ids.T { // for _, id := range f.Ids.T {
log.T.F("QueryEvents: looking for ID=%s", hex.Enc(id)) // log.T.F("QueryEvents: looking for ID=%s", hex.Enc(id))
} // }
log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len()) // log.T.F("QueryEvents: ids path, count=%d", f.Ids.Len())
for _, idx := range f.Ids.T { for _, idx := range f.Ids.T {
log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx)) // log.T.F("QueryEvents: lookup id=%s", hex.Enc(idx))
// we know there is only Ids in this, so run the ID query and fetch. // we know there is only Ids in this, so run the ID query and fetch.
var ser *types.Uint40 var ser *types.Uint40
var idErr error var idErr error
if ser, idErr = d.GetSerialById(idx); idErr != nil { if ser, idErr = d.GetSerialById(idx); idErr != nil {
// Check if this is a "not found" error which is expected for IDs we don't have // Check if this is a "not found" error which is expected for IDs we don't have
if strings.Contains(idErr.Error(), "id not found in database") { if strings.Contains(idErr.Error(), "id not found in database") {
log.T.F( // log.T.F(
"QueryEvents: ID not found in database: %s", // "QueryEvents: ID not found in database: %s",
hex.Enc(idx), // hex.Enc(idx),
) // )
} else { } else {
// Log unexpected errors but continue processing other IDs // Log unexpected errors but continue processing other IDs
log.E.F( // log.E.F(
"QueryEvents: error looking up id=%s err=%v", // "QueryEvents: error looking up id=%s err=%v",
hex.Enc(idx), idErr, // hex.Enc(idx), idErr,
) // )
} }
continue continue
} }
// Check if the serial is nil, which indicates the ID wasn't found // Check if the serial is nil, which indicates the ID wasn't found
if ser == nil { if ser == nil {
log.T.F("QueryEvents: Serial is nil for ID: %s", hex.Enc(idx)) // log.T.F("QueryEvents: Serial is nil for ID: %s", hex.Enc(idx))
continue continue
} }
// fetch the events // fetch the events
var ev *event.E var ev *event.E
if ev, err = d.FetchEventBySerial(ser); err != nil { if ev, err = d.FetchEventBySerial(ser); err != nil {
log.T.F( // log.T.F(
"QueryEvents: fetch by serial failed for id=%s ser=%v err=%v", // "QueryEvents: fetch by serial failed for id=%s ser=%v err=%v",
hex.Enc(idx), ser, err, // hex.Enc(idx), ser, err,
) // )
continue continue
} }
log.T.F( // log.T.F(
"QueryEvents: found id=%s kind=%d created_at=%d", // "QueryEvents: found id=%s kind=%d created_at=%d",
hex.Enc(ev.ID), ev.Kind, ev.CreatedAt, // hex.Enc(ev.ID), ev.Kind, ev.CreatedAt,
) // )
// check for an expiration tag and delete after returning the result // check for an expiration tag and delete after returning the result
if CheckExpiration(ev) { if CheckExpiration(ev) {
log.T.F( log.T.F(
@@ -98,16 +98,16 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
} }
// skip events that have been deleted by a proper deletion event // skip events that have been deleted by a proper deletion event
if derr := d.CheckForDeleted(ev, nil); derr != nil { if derr := d.CheckForDeleted(ev, nil); derr != nil {
log.T.F( // log.T.F(
"QueryEvents: id=%s filtered out due to deletion: %v", // "QueryEvents: id=%s filtered out due to deletion: %v",
hex.Enc(ev.ID), derr, // hex.Enc(ev.ID), derr,
) // )
continue continue
} }
log.T.F( // log.T.F(
"QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results", // "QueryEvents: id=%s SUCCESSFULLY FOUND, adding to results",
hex.Enc(ev.ID), // hex.Enc(ev.ID),
) // )
evs = append(evs, ev) evs = append(evs, ev)
} }
// sort the events by timestamp // sort the events by timestamp
@@ -301,20 +301,19 @@ func (d *D) QueryEvents(c context.Context, f *filter.F) (
if ev, err = d.FetchEventBySerial(ser); err != nil { if ev, err = d.FetchEventBySerial(ser); err != nil {
continue continue
} }
// Add logging for tag filter debugging // Add logging for tag filter debugging
if f.Tags != nil && f.Tags.Len() > 0 { if f.Tags != nil && f.Tags.Len() > 0 {
var eventTags []string // var eventTags []string
if ev.Tags != nil && ev.Tags.Len() > 0 { // if ev.Tags != nil && ev.Tags.Len() > 0 {
for _, t := range *ev.Tags { // for _, t := range *ev.Tags {
if t.Len() >= 2 { // if t.Len() >= 2 {
eventTags = append( // eventTags = append(
eventTags, // eventTags,
string(t.Key())+"="+string(t.Value()), // string(t.Key())+"="+string(t.Value()),
) // )
} // }
} // }
} // }
// log.T.F( // log.T.F(
// "QueryEvents: processing event ID=%s kind=%d tags=%v", // "QueryEvents: processing event ID=%s kind=%d tags=%v",
// hex.Enc(ev.ID), ev.Kind, eventTags, // hex.Enc(ev.ID), ev.Kind, eventTags,

View File

@@ -123,7 +123,7 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
} }
} }
if ev.CreatedAt < maxTs { if ev.CreatedAt < maxTs {
err = errorf.E( err = fmt.Errorf(
"blocked: %0x was deleted: the event is older than the delete event %0x: event: %d delete: %d", "blocked: %0x was deleted: the event is older than the delete event %0x: event: %d delete: %d",
ev.ID, maxId, ev.CreatedAt, maxTs, ev.ID, maxId, ev.CreatedAt, maxTs,
) )
@@ -165,17 +165,17 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
idPkTss = append(idPkTss, tmp...) idPkTss = append(idPkTss, tmp...)
// find the newest deletion without sorting to reduce cost // find the newest deletion without sorting to reduce cost
maxTs := idPkTss[0].Ts maxTs := idPkTss[0].Ts
maxId := idPkTss[0].Id // maxId := idPkTss[0].Id
for i := 1; i < len(idPkTss); i++ { for i := 1; i < len(idPkTss); i++ {
if idPkTss[i].Ts > maxTs { if idPkTss[i].Ts > maxTs {
maxTs = idPkTss[i].Ts maxTs = idPkTss[i].Ts
maxId = idPkTss[i].Id // maxId = idPkTss[i].Id
} }
} }
if ev.CreatedAt < maxTs { if ev.CreatedAt < maxTs {
err = errorf.E( err = fmt.Errorf(
"blocked: %0x was deleted by address %s: event is older than the delete: event: %d delete: %d", "blocked: was deleted by address %s: event is older than the delete: event: %d delete: %d",
ev.ID, at, maxId, ev.CreatedAt, maxTs, at, ev.CreatedAt, maxTs,
) )
return return
} }
@@ -206,8 +206,8 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted // For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
// regardless of timestamp, since it's a specific deletion of this exact event // regardless of timestamp, since it's a specific deletion of this exact event
err = errorf.E( err = errorf.E(
"blocked: %0x was deleted by ID and cannot be resubmitted", "blocked: was deleted by ID and cannot be resubmitted",
ev.ID, // ev.ID,
) )
return return
} }
@@ -216,8 +216,8 @@ func (d *D) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
// For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted // For e-tag deletions (delete by ID), any deletion event means the event cannot be resubmitted
// regardless of timestamp, since it's a specific deletion of this exact event // regardless of timestamp, since it's a specific deletion of this exact event
err = errorf.E( err = errorf.E(
"blocked: %0x was deleted by ID and cannot be resubmitted", "blocked: was deleted by ID and cannot be resubmitted",
ev.ID, // ev.ID,
) )
return return
} }

View File

@@ -2,10 +2,10 @@ package database
import ( import (
"context" "context"
"errors"
"sort" "sort"
"lol.mleku.dev/chk" "lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"next.orly.dev/pkg/database/indexes/types" "next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/filter" "next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/interfaces/store" "next.orly.dev/pkg/interfaces/store"
@@ -20,7 +20,7 @@ func (d *D) QueryForIds(c context.Context, f *filter.F) (
) { ) {
if f.Ids != nil && f.Ids.Len() > 0 { if f.Ids != nil && f.Ids.Len() > 0 {
// if there is Ids in the query, this is an error for this query // if there is Ids in the query, this is an error for this query
err = errorf.E("query for Ids is invalid for a filter with Ids") err = errors.New("query for Ids is invalid for a filter with Ids")
return return
} }
var idxs []Range var idxs []Range

View File

@@ -3,18 +3,16 @@ package database
import ( import (
"bytes" "bytes"
"context" "context"
"errors"
"fmt" "fmt"
"strings" "strings"
"github.com/dgraph-io/badger/v4" "github.com/dgraph-io/badger/v4"
"lol.mleku.dev/chk" "lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"lol.mleku.dev/log"
"next.orly.dev/pkg/database/indexes" "next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types" "next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/encoders/event" "next.orly.dev/pkg/encoders/event"
"next.orly.dev/pkg/encoders/filter" "next.orly.dev/pkg/encoders/filter"
"next.orly.dev/pkg/encoders/hex"
"next.orly.dev/pkg/encoders/kind" "next.orly.dev/pkg/encoders/kind"
"next.orly.dev/pkg/encoders/tag" "next.orly.dev/pkg/encoders/tag"
) )
@@ -39,13 +37,13 @@ func (d *D) GetSerialsFromFilter(f *filter.F) (
// SaveEvent saves an event to the database, generating all the necessary indexes. // SaveEvent saves an event to the database, generating all the necessary indexes.
func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) { func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
if ev == nil { if ev == nil {
err = errorf.E("nil event") err = errors.New("nil event")
return return
} }
// check if the event already exists // check if the event already exists
var ser *types.Uint40 var ser *types.Uint40
if ser, err = d.GetSerialById(ev.ID); err == nil && ser != nil { if ser, err = d.GetSerialById(ev.ID); err == nil && ser != nil {
err = errorf.E("blocked: event already exists: %0x", ev.ID) err = errors.New("blocked: event already exists")
return return
} }
@@ -55,7 +53,7 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
err = nil err = nil
} else if err != nil { } else if err != nil {
// For any other error, return it // For any other error, return it
log.E.F("error checking if event exists: %s", err) // log.E.F("error checking if event exists: %s", err)
return return
} }
@@ -65,7 +63,7 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
// "SaveEvent: rejecting resubmission of deleted event ID=%s: %v", // "SaveEvent: rejecting resubmission of deleted event ID=%s: %v",
// hex.Enc(ev.ID), err, // hex.Enc(ev.ID), err,
// ) // )
err = errorf.E("blocked: %s", err.Error()) err = fmt.Errorf("blocked: %s", err.Error())
return return
} }
// check for replacement // check for replacement
@@ -89,11 +87,11 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
} }
// Only replace if the new event is newer or same timestamp // Only replace if the new event is newer or same timestamp
if ev.CreatedAt < oldEv.CreatedAt { if ev.CreatedAt < oldEv.CreatedAt {
log.I.F( // log.I.F(
"SaveEvent: rejecting older replaceable event ID=%s (created_at=%d) - existing event ID=%s (created_at=%d)", // "SaveEvent: rejecting older replaceable event ID=%s (created_at=%d) - existing event ID=%s (created_at=%d)",
hex.Enc(ev.ID), ev.CreatedAt, hex.Enc(oldEv.ID), // hex.Enc(ev.ID), ev.CreatedAt, hex.Enc(oldEv.ID),
oldEv.CreatedAt, // oldEv.CreatedAt,
) // )
shouldReplace = false shouldReplace = false
break break
} }
@@ -104,11 +102,11 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) { if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
continue continue
} }
log.I.F( // log.I.F(
"SaveEvent: replacing older replaceable event ID=%s (created_at=%d) with newer event ID=%s (created_at=%d)", // "SaveEvent: replacing older replaceable event ID=%s (created_at=%d) with newer event ID=%s (created_at=%d)",
hex.Enc(oldEv.ID), oldEv.CreatedAt, hex.Enc(ev.ID), // hex.Enc(oldEv.ID), oldEv.CreatedAt, hex.Enc(ev.ID),
ev.CreatedAt, // ev.CreatedAt,
) // )
if err = d.DeleteEventBySerial( if err = d.DeleteEventBySerial(
c, s, oldEv, c, s, oldEv,
); chk.E(err) { ); chk.E(err) {
@@ -117,7 +115,7 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
} }
} else { } else {
// Don't save the older event - return an error // Don't save the older event - return an error
err = errorf.E("blocked: event is older than existing replaceable event") err = errors.New("blocked: event is older than existing replaceable event")
return return
} }
} }
@@ -125,7 +123,7 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
// find the events and check timestamps before deleting // find the events and check timestamps before deleting
dTag := ev.Tags.GetFirst([]byte("d")) dTag := ev.Tags.GetFirst([]byte("d"))
if dTag == nil { if dTag == nil {
err = errorf.E("event is missing a d tag identifier") err = errors.New("event is missing a d tag identifier")
return return
} }
f := &filter.F{ f := &filter.F{
@@ -149,11 +147,11 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
} }
// Only replace if the new event is newer or same timestamp // Only replace if the new event is newer or same timestamp
if ev.CreatedAt < oldEv.CreatedAt { if ev.CreatedAt < oldEv.CreatedAt {
log.I.F( // log.I.F(
"SaveEvent: rejecting older addressable event ID=%s (created_at=%d) - existing event ID=%s (created_at=%d)", // "SaveEvent: rejecting older addressable event ID=%s (created_at=%d) - existing event ID=%s (created_at=%d)",
hex.Enc(ev.ID), ev.CreatedAt, hex.Enc(oldEv.ID), // hex.Enc(ev.ID), ev.CreatedAt, hex.Enc(oldEv.ID),
oldEv.CreatedAt, // oldEv.CreatedAt,
) // )
shouldReplace = false shouldReplace = false
break break
} }
@@ -164,11 +162,11 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
if oldEv, err = d.FetchEventBySerial(s); chk.E(err) { if oldEv, err = d.FetchEventBySerial(s); chk.E(err) {
continue continue
} }
log.I.F( // log.I.F(
"SaveEvent: replacing older addressable event ID=%s (created_at=%d) with newer event ID=%s (created_at=%d)", // "SaveEvent: replacing older addressable event ID=%s (created_at=%d) with newer event ID=%s (created_at=%d)",
hex.Enc(oldEv.ID), oldEv.CreatedAt, hex.Enc(ev.ID), // hex.Enc(oldEv.ID), oldEv.CreatedAt, hex.Enc(ev.ID),
ev.CreatedAt, // ev.CreatedAt,
) // )
if err = d.DeleteEventBySerial( if err = d.DeleteEventBySerial(
c, s, oldEv, c, s, oldEv,
); chk.E(err) { ); chk.E(err) {
@@ -177,7 +175,7 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
} }
} else { } else {
// Don't save the older event - return an error // Don't save the older event - return an error
err = errorf.E("blocked: event is older than existing addressable event") err = errors.New("blocked: event is older than existing addressable event")
return return
} }
} }
@@ -232,14 +230,14 @@ func (d *D) SaveEvent(c context.Context, ev *event.E) (kc, vc int, err error) {
return return
}, },
) )
log.T.F( // log.T.F(
"total data written: %d bytes keys %d bytes values for event ID %s", kc, // "total data written: %d bytes keys %d bytes values for event ID %s", kc,
vc, hex.Enc(ev.ID), // vc, hex.Enc(ev.ID),
) // )
log.T.C( // log.T.C(
func() string { // func() string {
return fmt.Sprintf("event:\n%s\n", ev.Serialize()) // return fmt.Sprintf("event:\n%s\n", ev.Serialize())
}, // },
) // )
return return
} }

View File

@@ -48,7 +48,7 @@ func (d *D) IsSubscriptionActive(pubkey []byte) (bool, error) {
err := d.DB.Update( err := d.DB.Update(
func(txn *badger.Txn) error { func(txn *badger.Txn) error {
item, err := txn.Get([]byte(key)) item, err := txn.Get([]byte(key))
if err == badger.ErrKeyNotFound { if errors.Is(err, badger.ErrKeyNotFound) {
sub := &Subscription{TrialEnd: now.AddDate(0, 0, 30)} sub := &Subscription{TrialEnd: now.AddDate(0, 0, 30)}
data, err := json.Marshal(sub) data, err := json.Marshal(sub)
if err != nil { if err != nil {
@@ -90,7 +90,7 @@ func (d *D) ExtendSubscription(pubkey []byte, days int) error {
func(txn *badger.Txn) error { func(txn *badger.Txn) error {
var sub Subscription var sub Subscription
item, err := txn.Get([]byte(key)) item, err := txn.Get([]byte(key))
if err == badger.ErrKeyNotFound { if errors.Is(err, badger.ErrKeyNotFound) {
sub.PaidUntil = now.AddDate(0, 0, days) sub.PaidUntil = now.AddDate(0, 0, days)
} else if err != nil { } else if err != nil {
return err return err