implement wasm/js specific database engine

This commit is contained in:
2025-12-03 12:31:25 +00:00
parent c8fac06f24
commit 0a61f274d5
59 changed files with 8651 additions and 38 deletions

614
pkg/wasmdb/delete-event.go Normal file
View File

@@ -0,0 +1,614 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"context"
"fmt"
"sort"
"strconv"
"time"
"github.com/aperturerobotics/go-indexeddb/idb"
"lol.mleku.dev/chk"
"lol.mleku.dev/errorf"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
hexenc "git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/ints"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"git.mleku.dev/mleku/nostr/encoders/tag/atag"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/interfaces/store"
"next.orly.dev/pkg/utils"
)
// DeleteEvent removes an event from the database identified by `eid`.
func (w *W) DeleteEvent(c context.Context, eid []byte) (err error) {
w.Logger.Warnf("deleting event %0x", eid)
// Get the serial number for the event ID
var ser *types.Uint40
ser, err = w.GetSerialById(eid)
if chk.E(err) {
return
}
if ser == nil {
// Event wasn't found, nothing to delete
return
}
// Fetch the event to get its data
var ev *event.E
ev, err = w.FetchEventBySerial(ser)
if chk.E(err) {
return
}
if ev == nil {
// Event wasn't found, nothing to delete
return
}
if err = w.DeleteEventBySerial(c, ser, ev); chk.E(err) {
return
}
return
}
// DeleteEventBySerial removes an event and all its indexes by serial number.
func (w *W) DeleteEventBySerial(c context.Context, ser *types.Uint40, ev *event.E) (err error) {
w.Logger.Infof("DeleteEventBySerial: deleting event %0x (serial %d)", ev.ID, ser.Get())
// Get all indexes for the event
var idxs [][]byte
idxs, err = database.GetIndexesForEvent(ev, ser.Get())
if chk.E(err) {
w.Logger.Errorf("DeleteEventBySerial: failed to get indexes for event %0x: %v", ev.ID, err)
return
}
w.Logger.Infof("DeleteEventBySerial: found %d indexes for event %0x", len(idxs), ev.ID)
// Collect all unique store names we need to access
storeNames := make(map[string]struct{})
for _, key := range idxs {
if len(key) >= 3 {
storeNames[string(key[:3])] = struct{}{}
}
}
// Also include event stores
storeNames[string(indexes.EventPrefix)] = struct{}{}
storeNames[string(indexes.SmallEventPrefix)] = struct{}{}
// Convert to slice
storeList := make([]string, 0, len(storeNames))
for name := range storeNames {
storeList = append(storeList, name)
}
if len(storeList) == 0 {
return nil
}
// Start a transaction to delete the event and all its indexes
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeList[0], storeList[1:]...)
if err != nil {
return fmt.Errorf("failed to start delete transaction: %w", err)
}
// Delete all indexes
for _, key := range idxs {
if len(key) < 3 {
continue
}
storeName := string(key[:3])
objStore, storeErr := tx.ObjectStore(storeName)
if storeErr != nil {
w.Logger.Warnf("DeleteEventBySerial: failed to get object store %s: %v", storeName, storeErr)
continue
}
keyJS := bytesToSafeValue(key)
if _, delErr := objStore.Delete(keyJS); delErr != nil {
w.Logger.Warnf("DeleteEventBySerial: failed to delete index from %s: %v", storeName, delErr)
}
}
// Delete from small event store
sevKeyBuf := new(bytes.Buffer)
if err = indexes.SmallEventEnc(ser).MarshalWrite(sevKeyBuf); err == nil {
if objStore, storeErr := tx.ObjectStore(string(indexes.SmallEventPrefix)); storeErr == nil {
// For small events, the key includes size and data, so we need to scan
w.deleteKeysByPrefix(objStore, sevKeyBuf.Bytes())
}
}
// Delete from large event store
evtKeyBuf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(evtKeyBuf); err == nil {
if objStore, storeErr := tx.ObjectStore(string(indexes.EventPrefix)); storeErr == nil {
keyJS := bytesToSafeValue(evtKeyBuf.Bytes())
objStore.Delete(keyJS)
}
}
// Commit transaction
if err = tx.Await(c); err != nil {
return fmt.Errorf("failed to commit delete transaction: %w", err)
}
w.Logger.Infof("DeleteEventBySerial: successfully deleted event %0x and all indexes", ev.ID)
return nil
}
// deleteKeysByPrefix deletes all keys starting with the given prefix from an object store
func (w *W) deleteKeysByPrefix(store *idb.ObjectStore, prefix []byte) {
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return
}
var keysToDelete [][]byte
cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
keysToDelete = append(keysToDelete, keyBytes)
}
return cursor.Continue()
})
// Delete collected keys
for _, key := range keysToDelete {
keyJS := bytesToSafeValue(key)
store.Delete(keyJS)
}
}
// DeleteExpired scans for events with expiration timestamps that have passed and deletes them.
func (w *W) DeleteExpired() {
now := time.Now().Unix()
// Open read transaction to find expired events
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.ExpirationPrefix))
if err != nil {
w.Logger.Warnf("DeleteExpired: failed to start transaction: %v", err)
return
}
objStore, err := tx.ObjectStore(string(indexes.ExpirationPrefix))
if err != nil {
w.Logger.Warnf("DeleteExpired: failed to get expiration store: %v", err)
return
}
var expiredSerials types.Uint40s
cursorReq, err := objStore.OpenCursor(idb.CursorNext)
if err != nil {
w.Logger.Warnf("DeleteExpired: failed to open cursor: %v", err)
return
}
cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) < 8 { // exp prefix (3) + expiration (variable) + serial (5)
return cursor.Continue()
}
// Parse expiration key: exp|expiration_timestamp|serial
exp, ser := indexes.ExpirationVars()
buf := bytes.NewBuffer(keyBytes)
if err := indexes.ExpirationDec(exp, ser).UnmarshalRead(buf); err != nil {
return cursor.Continue()
}
if int64(exp.Get()) > now {
// Not expired yet
return cursor.Continue()
}
expiredSerials = append(expiredSerials, ser)
return cursor.Continue()
})
// Delete expired events
for _, ser := range expiredSerials {
ev, fetchErr := w.FetchEventBySerial(ser)
if fetchErr != nil || ev == nil {
continue
}
if err := w.DeleteEventBySerial(context.Background(), ser, ev); err != nil {
w.Logger.Warnf("DeleteExpired: failed to delete expired event: %v", err)
}
}
}
// ProcessDelete processes a kind 5 deletion event, deleting referenced events.
func (w *W) ProcessDelete(ev *event.E, admins [][]byte) (err error) {
eTags := ev.Tags.GetAll([]byte("e"))
aTags := ev.Tags.GetAll([]byte("a"))
kTags := ev.Tags.GetAll([]byte("k"))
// Process e-tags: delete specific events by ID
for _, eTag := range eTags {
if eTag.Len() < 2 {
continue
}
// Use ValueHex() to handle both binary and hex storage formats
eventIdHex := eTag.ValueHex()
if len(eventIdHex) != 64 { // hex encoded event ID
continue
}
// Decode hex event ID
var eid []byte
if eid, err = hexenc.DecAppend(nil, eventIdHex); chk.E(err) {
continue
}
// Fetch the event to verify ownership
var ser *types.Uint40
if ser, err = w.GetSerialById(eid); chk.E(err) || ser == nil {
continue
}
var targetEv *event.E
if targetEv, err = w.FetchEventBySerial(ser); chk.E(err) || targetEv == nil {
continue
}
// Only allow users to delete their own events
if !utils.FastEqual(targetEv.Pubkey, ev.Pubkey) {
continue
}
// Delete the event
if err = w.DeleteEvent(context.Background(), eid); chk.E(err) {
w.Logger.Warnf("failed to delete event %x via e-tag: %v", eid, err)
continue
}
w.Logger.Debugf("deleted event %x via e-tag deletion", eid)
}
// Process a-tags: delete addressable events by kind:pubkey:d-tag
for _, aTag := range aTags {
if aTag.Len() < 2 {
continue
}
// Parse the 'a' tag value: kind:pubkey:d-tag (for parameterized) or kind:pubkey (for regular)
split := bytes.Split(aTag.Value(), []byte{':'})
if len(split) < 2 {
continue
}
// Parse the kind
kindStr := string(split[0])
kindInt, parseErr := strconv.Atoi(kindStr)
if parseErr != nil {
continue
}
kk := kind.New(uint16(kindInt))
// Parse the pubkey
var pk []byte
if pk, err = hexenc.DecAppend(nil, split[1]); chk.E(err) {
continue
}
// Only allow users to delete their own events
if !utils.FastEqual(pk, ev.Pubkey) {
continue
}
// Build filter for events to delete
delFilter := &filter.F{
Authors: tag.NewFromBytesSlice(pk),
Kinds: kind.NewS(kk),
}
// For parameterized replaceable events, add d-tag filter
if kind.IsParameterizedReplaceable(kk.K) && len(split) >= 3 {
dValue := split[2]
delFilter.Tags = tag.NewS(tag.NewFromAny([]byte("d"), dValue))
}
// Find matching events
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(delFilter); chk.E(err) {
continue
}
var sers types.Uint40s
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
continue
}
sers = append(sers, s...)
}
// Delete events older than the deletion event
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
continue
}
idPkTss = append(idPkTss, tmp...)
// Sort by timestamp
sort.Slice(idPkTss, func(i, j int) bool {
return idPkTss[i].Ts > idPkTss[j].Ts
})
for _, v := range idPkTss {
if v.Ts < ev.CreatedAt {
if err = w.DeleteEvent(context.Background(), v.Id); chk.E(err) {
w.Logger.Warnf("failed to delete event %x via a-tag: %v", v.Id, err)
continue
}
w.Logger.Debugf("deleted event %x via a-tag deletion", v.Id)
}
}
}
}
// If there are no e or a tags, delete all replaceable events of the kinds
// specified by the k tags for the pubkey of the delete event.
if len(eTags) == 0 && len(aTags) == 0 {
// Parse the kind tags
var kinds []*kind.K
for _, k := range kTags {
kv := k.Value()
iv := ints.New(0)
if _, err = iv.Unmarshal(kv); chk.E(err) {
continue
}
kinds = append(kinds, kind.New(iv.N))
}
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: tag.NewFromBytesSlice(ev.Pubkey),
Kinds: kind.NewS(kinds...),
},
); chk.E(err) {
return
}
var sers types.Uint40s
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
sers = append(sers, s...)
}
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
return
}
idPkTss = append(idPkTss, tmp...)
// Sort by timestamp
sort.Slice(idPkTss, func(i, j int) bool {
return idPkTss[i].Ts > idPkTss[j].Ts
})
for _, v := range idPkTss {
if v.Ts < ev.CreatedAt {
if err = w.DeleteEvent(context.Background(), v.Id); chk.E(err) {
continue
}
}
}
}
}
return
}
// CheckForDeleted checks if the event has been deleted, and returns an error with
// prefix "blocked:" if it is. This function also allows designating admin
// pubkeys that may also delete the event.
func (w *W) CheckForDeleted(ev *event.E, admins [][]byte) (err error) {
keys := append([][]byte{ev.Pubkey}, admins...)
authors := tag.NewFromBytesSlice(keys...)
// If the event is addressable, check for a deletion event with the same
// kind/pubkey/dtag
if kind.IsParameterizedReplaceable(ev.Kind) {
var idxs []database.Range
// Construct an a-tag
t := ev.Tags.GetFirst([]byte("d"))
var dTagValue []byte
if t != nil {
dTagValue = t.Value()
}
a := atag.T{
Kind: kind.New(ev.Kind),
Pubkey: ev.Pubkey,
DTag: dTagValue,
}
at := a.Marshal(nil)
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: authors,
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(tag.NewFromAny("#a", at)),
},
); chk.E(err) {
return
}
var sers types.Uint40s
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
sers = append(sers, s...)
}
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
return
}
idPkTss = append(idPkTss, tmp...)
// Find the newest deletion timestamp
maxTs := idPkTss[0].Ts
for i := 1; i < len(idPkTss); i++ {
if idPkTss[i].Ts > maxTs {
maxTs = idPkTss[i].Ts
}
}
if ev.CreatedAt < maxTs {
err = errorf.E(
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
ev.ID, at, ev.CreatedAt, maxTs,
)
return
}
return
}
return
}
// If the event is replaceable, check if there is a deletion event newer
// than the event
if kind.IsReplaceable(ev.Kind) {
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: tag.NewFromBytesSlice(ev.Pubkey),
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(
tag.NewFromAny("#k", fmt.Sprint(ev.Kind)),
),
},
); chk.E(err) {
return
}
var sers types.Uint40s
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
sers = append(sers, s...)
}
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
return
}
idPkTss = append(idPkTss, tmp...)
// Find the newest deletion
maxTs := idPkTss[0].Ts
maxId := idPkTss[0].Id
for i := 1; i < len(idPkTss); i++ {
if idPkTss[i].Ts > maxTs {
maxTs = idPkTss[i].Ts
maxId = idPkTss[i].Id
}
}
if ev.CreatedAt < maxTs {
err = fmt.Errorf(
"blocked: %0x was deleted: the event is older than the delete event %0x: event: %d delete: %d",
ev.ID, maxId, ev.CreatedAt, maxTs,
)
return
}
}
// This type of delete can also use an a tag to specify kind and author
idxs = nil
a := atag.T{
Kind: kind.New(ev.Kind),
Pubkey: ev.Pubkey,
}
at := a.Marshal(nil)
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: authors,
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(tag.NewFromAny("#a", at)),
},
); chk.E(err) {
return
}
sers = nil
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
sers = append(sers, s...)
}
if len(sers) > 0 {
var idPkTss []*store.IdPkTs
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(sers); chk.E(err) {
return
}
idPkTss = append(idPkTss, tmp...)
// Find the newest deletion
maxTs := idPkTss[0].Ts
for i := 1; i < len(idPkTss); i++ {
if idPkTss[i].Ts > maxTs {
maxTs = idPkTss[i].Ts
}
}
if ev.CreatedAt < maxTs {
err = errorf.E(
"blocked: %0x was deleted by address %s because it is older than the delete: event: %d delete: %d",
ev.ID, at, ev.CreatedAt, maxTs,
)
return
}
return
}
return
}
// Otherwise check for a delete by event id
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(
&filter.F{
Authors: authors,
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(
tag.NewFromAny("e", hexenc.Enc(ev.ID)),
),
},
); chk.E(err) {
return
}
for _, idx := range idxs {
var s types.Uint40s
if s, err = w.GetSerialsByRange(idx); chk.E(err) {
return
}
if len(s) > 0 {
// Any e-tag deletion found means the exact event was deleted
err = errorf.E("blocked: %0x has been deleted", ev.ID)
return
}
}
return
}

256
pkg/wasmdb/fetch-event.go Normal file
View File

@@ -0,0 +1,256 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"errors"
"github.com/aperturerobotics/go-indexeddb/idb"
"lol.mleku.dev/chk"
"git.mleku.dev/mleku/nostr/encoders/event"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/interfaces/store"
)
// FetchEventBySerial retrieves an event by its serial number
func (w *W) FetchEventBySerial(ser *types.Uint40) (ev *event.E, err error) {
if ser == nil {
return nil, errors.New("nil serial")
}
// First try small event store (sev prefix)
ev, err = w.fetchSmallEvent(ser)
if err == nil && ev != nil {
return ev, nil
}
// Then try large event store (evt prefix)
ev, err = w.fetchLargeEvent(ser)
if err == nil && ev != nil {
return ev, nil
}
return nil, errors.New("event not found")
}
// fetchSmallEvent fetches an event from the small event store
func (w *W) fetchSmallEvent(ser *types.Uint40) (*event.E, error) {
// Build the key prefix
keyBuf := new(bytes.Buffer)
if err := indexes.SmallEventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return nil, err
}
prefix := keyBuf.Bytes()
// Open transaction
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.SmallEventPrefix))
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(string(indexes.SmallEventPrefix))
if err != nil {
return nil, err
}
// Use cursor to find matching key
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
var foundEvent *event.E
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
// Found matching key
// Format: sev|serial(5)|size(2)|data(variable)
if len(keyBytes) > 10 { // 3 + 5 + 2 = 10 minimum
sizeOffset := 8 // 3 prefix + 5 serial
if len(keyBytes) > sizeOffset+2 {
size := int(keyBytes[sizeOffset])<<8 | int(keyBytes[sizeOffset+1])
dataStart := sizeOffset + 2
if len(keyBytes) >= dataStart+size {
eventData := keyBytes[dataStart : dataStart+size]
ev := new(event.E)
if unmarshalErr := ev.UnmarshalBinary(bytes.NewReader(eventData)); unmarshalErr == nil {
foundEvent = ev
return errors.New("found") // Stop iteration
}
}
}
}
}
return cursor.Continue()
})
if foundEvent != nil {
return foundEvent, nil
}
if err != nil && err.Error() != "found" {
return nil, err
}
return nil, errors.New("small event not found")
}
// fetchLargeEvent fetches an event from the large event store
func (w *W) fetchLargeEvent(ser *types.Uint40) (*event.E, error) {
// Build the key
keyBuf := new(bytes.Buffer)
if err := indexes.EventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return nil, err
}
// Open transaction
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.EventPrefix))
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(string(indexes.EventPrefix))
if err != nil {
return nil, err
}
// Get the value directly
keyJS := bytesToSafeValue(keyBuf.Bytes())
req, err := store.Get(keyJS)
if err != nil {
return nil, err
}
val, err := req.Await(w.ctx)
if err != nil {
return nil, err
}
if val.IsUndefined() || val.IsNull() {
return nil, errors.New("large event not found")
}
eventData := safeValueToBytes(val)
if len(eventData) == 0 {
return nil, errors.New("empty event data")
}
ev := new(event.E)
if err := ev.UnmarshalBinary(bytes.NewReader(eventData)); err != nil {
return nil, err
}
return ev, nil
}
// FetchEventsBySerials retrieves multiple events by their serial numbers
func (w *W) FetchEventsBySerials(serials []*types.Uint40) (events map[uint64]*event.E, err error) {
events = make(map[uint64]*event.E)
for _, ser := range serials {
if ser == nil {
continue
}
ev, fetchErr := w.FetchEventBySerial(ser)
if fetchErr == nil && ev != nil {
events[ser.Get()] = ev
}
}
return events, nil
}
// GetFullIdPubkeyBySerial retrieves the ID, pubkey hash, and timestamp for a serial
func (w *W) GetFullIdPubkeyBySerial(ser *types.Uint40) (fidpk *store.IdPkTs, err error) {
if ser == nil {
return nil, errors.New("nil serial")
}
// Build the prefix to search for
keyBuf := new(bytes.Buffer)
indexes.FullIdPubkeyEnc(ser, nil, nil, nil).MarshalWrite(keyBuf)
prefix := keyBuf.Bytes()[:8] // 3 prefix + 5 serial
// Search in the fpc object store
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.FullIdPubkeyPrefix))
if err != nil {
return nil, err
}
objStore, err := tx.ObjectStore(string(indexes.FullIdPubkeyPrefix))
if err != nil {
return nil, err
}
// Use cursor to find matching key
cursorReq, err := objStore.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
// Found matching key
// Format: fpc|serial(5)|id(32)|pubkey_hash(8)|timestamp(8)
if len(keyBytes) >= 56 { // 3 + 5 + 32 + 8 + 8 = 56
fidpk = &store.IdPkTs{
Id: make([]byte, 32),
Pub: make([]byte, 8),
Ts: 0,
}
copy(fidpk.Id, keyBytes[8:40])
copy(fidpk.Pub, keyBytes[40:48])
// Parse timestamp (big-endian uint64)
var ts int64
for i := 0; i < 8; i++ {
ts = (ts << 8) | int64(keyBytes[48+i])
}
fidpk.Ts = ts
fidpk.Ser = ser.Get()
return errors.New("found") // Stop iteration
}
}
return cursor.Continue()
})
if fidpk != nil {
return fidpk, nil
}
if err != nil && err.Error() != "found" {
return nil, err
}
return nil, errors.New("full id pubkey not found")
}
// GetFullIdPubkeyBySerials retrieves ID/pubkey/timestamp for multiple serials
func (w *W) GetFullIdPubkeyBySerials(sers []*types.Uint40) (fidpks []*store.IdPkTs, err error) {
fidpks = make([]*store.IdPkTs, 0, len(sers))
for _, ser := range sers {
if ser == nil {
continue
}
fidpk, fetchErr := w.GetFullIdPubkeyBySerial(ser)
if fetchErr == nil && fidpk != nil {
fidpks = append(fidpks, fidpk)
}
}
return fidpks, nil
}

162
pkg/wasmdb/helpers.go Normal file
View File

@@ -0,0 +1,162 @@
//go:build js && wasm
package wasmdb
import (
"syscall/js"
"github.com/hack-pad/safejs"
)
// safeValueToBytes converts a safejs.Value to a []byte
// This handles Uint8Array, ArrayBuffer, and strings from IndexedDB
func safeValueToBytes(val safejs.Value) []byte {
if val.IsUndefined() || val.IsNull() {
return nil
}
// Get global Uint8Array and ArrayBuffer constructors
uint8ArrayType := safejs.MustGetGlobal("Uint8Array")
arrayBufferType := safejs.MustGetGlobal("ArrayBuffer")
// Check if it's a Uint8Array
isUint8Array, _ := val.InstanceOf(uint8ArrayType)
if isUint8Array {
length, err := val.Length()
if err != nil {
return nil
}
buf := make([]byte, length)
// Copy bytes - we need to iterate since safejs doesn't have CopyBytesToGo
for i := 0; i < length; i++ {
elem, err := val.Index(i)
if err != nil {
return nil
}
intVal, err := elem.Int()
if err != nil {
return nil
}
buf[i] = byte(intVal)
}
return buf
}
// Check if it's an ArrayBuffer
isArrayBuffer, _ := val.InstanceOf(arrayBufferType)
if isArrayBuffer {
// Create a Uint8Array view of the ArrayBuffer
uint8Array, err := uint8ArrayType.New(val)
if err != nil {
return nil
}
return safeValueToBytes(uint8Array)
}
// Try to treat it as a typed array-like object
length, err := val.Length()
if err == nil && length > 0 {
buf := make([]byte, length)
for i := 0; i < length; i++ {
elem, err := val.Index(i)
if err != nil {
return nil
}
intVal, err := elem.Int()
if err != nil {
return nil
}
buf[i] = byte(intVal)
}
return buf
}
// Last resort: check if it's a string (for string keys in IndexedDB)
if val.Type() == safejs.TypeString {
str, err := val.String()
if err == nil {
return []byte(str)
}
}
return nil
}
// bytesToSafeValue converts a []byte to a safejs.Value (Uint8Array)
func bytesToSafeValue(buf []byte) safejs.Value {
if buf == nil {
return safejs.Null()
}
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
js.CopyBytesToJS(uint8Array, buf)
return safejs.Safe(uint8Array)
}
// cryptoRandom fills the provided byte slice with cryptographically secure random bytes
// using the Web Crypto API (crypto.getRandomValues) or Node.js crypto.randomFillSync
func cryptoRandom(buf []byte) error {
if len(buf) == 0 {
return nil
}
// First try browser's crypto.getRandomValues
crypto := js.Global().Get("crypto")
if crypto.IsUndefined() {
// Fallback to msCrypto for older IE
crypto = js.Global().Get("msCrypto")
}
if !crypto.IsUndefined() {
// Try getRandomValues (browser API)
getRandomValues := crypto.Get("getRandomValues")
if !getRandomValues.IsUndefined() && getRandomValues.Type() == js.TypeFunction {
// Create a Uint8Array to receive random bytes
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
// Call crypto.getRandomValues - may throw in Node.js
defer func() {
// Recover from panic if this method doesn't work
recover()
}()
getRandomValues.Invoke(uint8Array)
// Copy the random bytes to our Go slice
js.CopyBytesToGo(buf, uint8Array)
return nil
}
// Try randomFillSync (Node.js API)
randomFillSync := crypto.Get("randomFillSync")
if !randomFillSync.IsUndefined() && randomFillSync.Type() == js.TypeFunction {
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
randomFillSync.Invoke(uint8Array)
js.CopyBytesToGo(buf, uint8Array)
return nil
}
}
// Try to load Node.js crypto module via require
requireFunc := js.Global().Get("require")
if !requireFunc.IsUndefined() && requireFunc.Type() == js.TypeFunction {
nodeCrypto := requireFunc.Invoke("crypto")
if !nodeCrypto.IsUndefined() {
randomFillSync := nodeCrypto.Get("randomFillSync")
if !randomFillSync.IsUndefined() && randomFillSync.Type() == js.TypeFunction {
uint8Array := js.Global().Get("Uint8Array").New(len(buf))
randomFillSync.Invoke(uint8Array)
js.CopyBytesToGo(buf, uint8Array)
return nil
}
}
}
return errNoCryptoAPI
}
// errNoCryptoAPI is returned when the Web Crypto API is not available
type cryptoAPIError struct{}
func (cryptoAPIError) Error() string { return "Web Crypto API not available" }
var errNoCryptoAPI = cryptoAPIError{}

293
pkg/wasmdb/import-export.go Normal file
View File

@@ -0,0 +1,293 @@
//go:build js && wasm
package wasmdb
import (
"bufio"
"bytes"
"context"
"encoding/json"
"io"
"github.com/aperturerobotics/go-indexeddb/idb"
"lol.mleku.dev/chk"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/tag"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
)
// Import reads events from a JSONL reader and imports them into the database
func (w *W) Import(rr io.Reader) {
ctx := context.Background()
scanner := bufio.NewScanner(rr)
// Increase buffer size for large events
buf := make([]byte, 1024*1024) // 1MB buffer
scanner.Buffer(buf, len(buf))
imported := 0
for scanner.Scan() {
line := scanner.Bytes()
if len(line) == 0 {
continue
}
ev := event.New()
if err := json.Unmarshal(line, ev); err != nil {
w.Logger.Warnf("Import: failed to unmarshal event: %v", err)
continue
}
if _, err := w.SaveEvent(ctx, ev); err != nil {
w.Logger.Debugf("Import: failed to save event: %v", err)
continue
}
imported++
}
if err := scanner.Err(); err != nil {
w.Logger.Errorf("Import: scanner error: %v", err)
}
w.Logger.Infof("Import: imported %d events", imported)
}
// Export writes events to a JSONL writer, optionally filtered by pubkeys
func (w *W) Export(c context.Context, wr io.Writer, pubkeys ...[]byte) {
var evs event.S
var err error
// Query events
if len(pubkeys) > 0 {
// Export only events from specified pubkeys
for _, pk := range pubkeys {
// Get all serials for this pubkey
serials, err := w.GetSerialsByPubkey(pk)
if err != nil {
w.Logger.Warnf("Export: failed to get serials for pubkey: %v", err)
continue
}
for _, ser := range serials {
ev, err := w.FetchEventBySerial(ser)
if err != nil || ev == nil {
continue
}
evs = append(evs, ev)
}
}
} else {
// Export all events
evs, err = w.getAllEvents(c)
if err != nil {
w.Logger.Errorf("Export: failed to get all events: %v", err)
return
}
}
// Write events as JSONL
exported := 0
for _, ev := range evs {
data, err := json.Marshal(ev)
if err != nil {
w.Logger.Warnf("Export: failed to marshal event: %v", err)
continue
}
wr.Write(data)
wr.Write([]byte("\n"))
exported++
}
w.Logger.Infof("Export: exported %d events", exported)
}
// ImportEventsFromReader imports events from a JSONL reader with context support
func (w *W) ImportEventsFromReader(ctx context.Context, rr io.Reader) error {
scanner := bufio.NewScanner(rr)
buf := make([]byte, 1024*1024)
scanner.Buffer(buf, len(buf))
imported := 0
for scanner.Scan() {
select {
case <-ctx.Done():
w.Logger.Infof("ImportEventsFromReader: cancelled after %d events", imported)
return ctx.Err()
default:
}
line := scanner.Bytes()
if len(line) == 0 {
continue
}
ev := event.New()
if err := json.Unmarshal(line, ev); err != nil {
w.Logger.Warnf("ImportEventsFromReader: failed to unmarshal: %v", err)
continue
}
if _, err := w.SaveEvent(ctx, ev); err != nil {
w.Logger.Debugf("ImportEventsFromReader: failed to save: %v", err)
continue
}
imported++
}
if err := scanner.Err(); err != nil {
return err
}
w.Logger.Infof("ImportEventsFromReader: imported %d events", imported)
return nil
}
// ImportEventsFromStrings imports events from JSON strings with policy checking
func (w *W) ImportEventsFromStrings(
ctx context.Context,
eventJSONs []string,
policyManager interface {
CheckPolicy(action string, ev *event.E, pubkey []byte, remote string) (bool, error)
},
) error {
imported := 0
for _, jsonStr := range eventJSONs {
select {
case <-ctx.Done():
w.Logger.Infof("ImportEventsFromStrings: cancelled after %d events", imported)
return ctx.Err()
default:
}
ev := event.New()
if err := json.Unmarshal([]byte(jsonStr), ev); err != nil {
w.Logger.Warnf("ImportEventsFromStrings: failed to unmarshal: %v", err)
continue
}
// Check policy if manager is provided
if policyManager != nil {
allowed, err := policyManager.CheckPolicy("write", ev, ev.Pubkey, "import")
if err != nil || !allowed {
w.Logger.Debugf("ImportEventsFromStrings: policy rejected event")
continue
}
}
if _, err := w.SaveEvent(ctx, ev); err != nil {
w.Logger.Debugf("ImportEventsFromStrings: failed to save: %v", err)
continue
}
imported++
}
w.Logger.Infof("ImportEventsFromStrings: imported %d events", imported)
return nil
}
// GetSerialsByPubkey returns all event serials for a given pubkey
func (w *W) GetSerialsByPubkey(pubkey []byte) ([]*types.Uint40, error) {
// Build range for pubkey index
idx, err := database.GetIndexesFromFilter(&filter.F{
Authors: tag.NewFromBytesSlice(pubkey),
})
if chk.E(err) {
return nil, err
}
var serials []*types.Uint40
for _, r := range idx {
sers, err := w.GetSerialsByRange(r)
if err != nil {
continue
}
serials = append(serials, sers...)
}
return serials, nil
}
// getAllEvents retrieves all events from the database
func (w *W) getAllEvents(c context.Context) (event.S, error) {
// Scan through the small event store and large event store
var events event.S
// Get events from small event store
sevEvents, err := w.scanEventStore(string(indexes.SmallEventPrefix), true)
if err == nil {
events = append(events, sevEvents...)
}
// Get events from large event store
evtEvents, err := w.scanEventStore(string(indexes.EventPrefix), false)
if err == nil {
events = append(events, evtEvents...)
}
return events, nil
}
// scanEventStore scans an event store and returns all events
func (w *W) scanEventStore(storeName string, isSmallEvent bool) (event.S, error) {
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(storeName)
if err != nil {
return nil, err
}
var events event.S
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
var eventData []byte
if isSmallEvent {
// Small events: data is embedded in the key
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
// Format: sev|serial|size_uint16|event_data
if len(keyBytes) > 10 { // 3 + 5 + 2 minimum
sizeOffset := 8 // 3 prefix + 5 serial
if len(keyBytes) > sizeOffset+2 {
size := int(keyBytes[sizeOffset])<<8 | int(keyBytes[sizeOffset+1])
if len(keyBytes) >= sizeOffset+2+size {
eventData = keyBytes[sizeOffset+2 : sizeOffset+2+size]
}
}
}
} else {
// Large events: data is in the value
val, valErr := cursor.Value()
if valErr != nil {
return valErr
}
eventData = safeValueToBytes(val)
}
if len(eventData) > 0 {
ev := event.New()
if err := ev.UnmarshalBinary(bytes.NewReader(eventData)); err == nil {
events = append(events, ev)
}
}
return cursor.Continue()
})
return events, err
}

75
pkg/wasmdb/logger.go Normal file
View File

@@ -0,0 +1,75 @@
//go:build js && wasm
package wasmdb
import (
"fmt"
"syscall/js"
"time"
"lol.mleku.dev"
)
// logger provides logging functionality for the wasmdb package
// It outputs to the browser console via console.log/warn/error
type logger struct {
level int
}
// NewLogger creates a new logger with the specified level
func NewLogger(level int) *logger {
return &logger{level: level}
}
// SetLogLevel changes the logging level
func (l *logger) SetLogLevel(level int) {
l.level = level
}
// formatMessage creates a formatted log message with timestamp
func (l *logger) formatMessage(level, format string, args ...interface{}) string {
msg := fmt.Sprintf(format, args...)
return fmt.Sprintf("[%s] [wasmdb] [%s] %s",
time.Now().Format("15:04:05.000"),
level,
msg,
)
}
// Debugf logs a debug message
func (l *logger) Debugf(format string, args ...interface{}) {
if l.level <= lol.Debug {
msg := l.formatMessage("DEBUG", format, args...)
js.Global().Get("console").Call("log", msg)
}
}
// Infof logs an info message
func (l *logger) Infof(format string, args ...interface{}) {
if l.level <= lol.Info {
msg := l.formatMessage("INFO", format, args...)
js.Global().Get("console").Call("log", msg)
}
}
// Warnf logs a warning message
func (l *logger) Warnf(format string, args ...interface{}) {
if l.level <= lol.Warn {
msg := l.formatMessage("WARN", format, args...)
js.Global().Get("console").Call("warn", msg)
}
}
// Errorf logs an error message
func (l *logger) Errorf(format string, args ...interface{}) {
if l.level <= lol.Error {
msg := l.formatMessage("ERROR", format, args...)
js.Global().Get("console").Call("error", msg)
}
}
// Fatalf logs a fatal message (does not exit in WASM)
func (l *logger) Fatalf(format string, args ...interface{}) {
msg := l.formatMessage("FATAL", format, args...)
js.Global().Get("console").Call("error", msg)
}

213
pkg/wasmdb/nip43.go Normal file
View File

@@ -0,0 +1,213 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"encoding/binary"
"errors"
"time"
"github.com/aperturerobotics/go-indexeddb/idb"
"github.com/hack-pad/safejs"
"next.orly.dev/pkg/database"
)
const (
// NIP43StoreName is the object store for NIP-43 membership
NIP43StoreName = "nip43"
// InvitesStoreName is the object store for invite codes
InvitesStoreName = "invites"
)
// AddNIP43Member adds a pubkey as a NIP-43 member with the given invite code
func (w *W) AddNIP43Member(pubkey []byte, inviteCode string) error {
if len(pubkey) != 32 {
return errors.New("invalid pubkey length")
}
// Create membership record
membership := &database.NIP43Membership{
Pubkey: make([]byte, 32),
InviteCode: inviteCode,
AddedAt: time.Now(),
}
copy(membership.Pubkey, pubkey)
// Serialize membership
data := w.serializeNIP43Membership(membership)
// Store using pubkey as key
return w.setStoreValue(NIP43StoreName, string(pubkey), data)
}
// RemoveNIP43Member removes a pubkey from NIP-43 membership
func (w *W) RemoveNIP43Member(pubkey []byte) error {
return w.deleteStoreValue(NIP43StoreName, string(pubkey))
}
// IsNIP43Member checks if a pubkey is a NIP-43 member
func (w *W) IsNIP43Member(pubkey []byte) (isMember bool, err error) {
data, err := w.getStoreValue(NIP43StoreName, string(pubkey))
if err != nil {
return false, nil // Not found is not an error, just not a member
}
return data != nil, nil
}
// GetNIP43Membership returns the full membership details for a pubkey
func (w *W) GetNIP43Membership(pubkey []byte) (*database.NIP43Membership, error) {
data, err := w.getStoreValue(NIP43StoreName, string(pubkey))
if err != nil {
return nil, err
}
if data == nil {
return nil, errors.New("membership not found")
}
return w.deserializeNIP43Membership(data)
}
// GetAllNIP43Members returns all NIP-43 member pubkeys
func (w *W) GetAllNIP43Members() ([][]byte, error) {
tx, err := w.db.Transaction(idb.TransactionReadOnly, NIP43StoreName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(NIP43StoreName)
if err != nil {
return nil, err
}
var members [][]byte
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
// Key is the pubkey stored as string
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) == 32 {
pubkey := make([]byte, 32)
copy(pubkey, keyBytes)
members = append(members, pubkey)
}
return cursor.Continue()
})
if err != nil && err.Error() != "found" {
return nil, err
}
return members, nil
}
// StoreInviteCode stores an invite code with expiration time
func (w *W) StoreInviteCode(code string, expiresAt time.Time) error {
// Serialize expiration time as unix timestamp
data := make([]byte, 8)
binary.BigEndian.PutUint64(data, uint64(expiresAt.Unix()))
return w.setStoreValue(InvitesStoreName, code, data)
}
// ValidateInviteCode checks if an invite code is valid (exists and not expired)
func (w *W) ValidateInviteCode(code string) (valid bool, err error) {
data, err := w.getStoreValue(InvitesStoreName, code)
if err != nil {
return false, nil
}
if data == nil || len(data) < 8 {
return false, nil
}
// Check expiration
expiresAt := time.Unix(int64(binary.BigEndian.Uint64(data)), 0)
if time.Now().After(expiresAt) {
return false, nil
}
return true, nil
}
// DeleteInviteCode removes an invite code
func (w *W) DeleteInviteCode(code string) error {
return w.deleteStoreValue(InvitesStoreName, code)
}
// PublishNIP43MembershipEvent is a no-op in WASM (events are handled by the relay)
func (w *W) PublishNIP43MembershipEvent(kind int, pubkey []byte) error {
// In WASM context, this would typically be handled by the client
// This is a no-op implementation
return nil
}
// serializeNIP43Membership converts a membership to bytes for storage
func (w *W) serializeNIP43Membership(m *database.NIP43Membership) []byte {
buf := new(bytes.Buffer)
// Write pubkey (32 bytes)
buf.Write(m.Pubkey)
// Write AddedAt as unix timestamp (8 bytes)
ts := make([]byte, 8)
binary.BigEndian.PutUint64(ts, uint64(m.AddedAt.Unix()))
buf.Write(ts)
// Write invite code length (4 bytes) + invite code
codeBytes := []byte(m.InviteCode)
codeLen := make([]byte, 4)
binary.BigEndian.PutUint32(codeLen, uint32(len(codeBytes)))
buf.Write(codeLen)
buf.Write(codeBytes)
return buf.Bytes()
}
// deserializeNIP43Membership converts bytes back to a membership
func (w *W) deserializeNIP43Membership(data []byte) (*database.NIP43Membership, error) {
if len(data) < 44 { // 32 + 8 + 4 minimum
return nil, errors.New("invalid membership data")
}
m := &database.NIP43Membership{}
// Read pubkey
m.Pubkey = make([]byte, 32)
copy(m.Pubkey, data[:32])
// Read AddedAt
m.AddedAt = time.Unix(int64(binary.BigEndian.Uint64(data[32:40])), 0)
// Read invite code
codeLen := binary.BigEndian.Uint32(data[40:44])
if len(data) < int(44+codeLen) {
return nil, errors.New("invalid invite code length")
}
m.InviteCode = string(data[44 : 44+codeLen])
return m, nil
}
// Helper to convert safejs.Value to string for keys
func safeValueToString(v safejs.Value) string {
if v.IsUndefined() || v.IsNull() {
return ""
}
str, err := v.String()
if err != nil {
return ""
}
return str
}

767
pkg/wasmdb/query-events.go Normal file
View File

@@ -0,0 +1,767 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"context"
"errors"
"sort"
"strconv"
"time"
"github.com/aperturerobotics/go-indexeddb/idb"
"lol.mleku.dev/chk"
sha256 "github.com/minio/sha256-simd"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/ints"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes/types"
"next.orly.dev/pkg/interfaces/store"
"next.orly.dev/pkg/utils"
)
// CheckExpiration checks if an event has expired based on its "expiration" tag
func CheckExpiration(ev *event.E) (expired bool) {
var err error
expTag := ev.Tags.GetFirst([]byte("expiration"))
if expTag != nil {
expTS := ints.New(0)
if _, err = expTS.Unmarshal(expTag.Value()); err == nil {
if int64(expTS.N) < time.Now().Unix() {
return true
}
}
}
return
}
// GetSerialsByRange retrieves serials from an index range using cursor iteration.
// The index keys must end with a 5-byte serial number.
func (w *W) GetSerialsByRange(idx database.Range) (sers types.Uint40s, err error) {
if len(idx.Start) < 3 {
return nil, errors.New("invalid range: start key too short")
}
// Extract the object store name from the 3-byte prefix
storeName := string(idx.Start[:3])
// Open a read transaction
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
if err != nil {
return nil, err
}
objStore, err := tx.ObjectStore(storeName)
if err != nil {
return nil, err
}
// Open cursor in reverse order (newest first like Badger)
cursorReq, err := objStore.OpenCursor(idb.CursorPrevious)
if err != nil {
return nil, err
}
// Pre-allocate slice
sers = make(types.Uint40s, 0, 100)
// Create end boundary with 0xff suffix for inclusive range
endBoundary := make([]byte, len(idx.End)+5)
copy(endBoundary, idx.End)
for i := len(idx.End); i < len(endBoundary); i++ {
endBoundary[i] = 0xff
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
key := safeValueToBytes(keyVal)
if len(key) < 8 { // minimum: 3 prefix + 5 serial
return cursor.Continue()
}
// Check if key is within range
keyWithoutSerial := key[:len(key)-5]
// Compare with start (lower bound)
cmp := bytes.Compare(keyWithoutSerial, idx.Start)
if cmp < 0 {
// Key is before range start, stop iteration
return errors.New("done")
}
// Compare with end boundary
if bytes.Compare(key, endBoundary) > 0 {
// Key is after range end, continue to find keys in range
return cursor.Continue()
}
// Extract serial from last 5 bytes
ser := new(types.Uint40)
if err := ser.UnmarshalRead(bytes.NewReader(key[len(key)-5:])); err == nil {
sers = append(sers, ser)
}
return cursor.Continue()
})
if err != nil && err.Error() != "done" {
return nil, err
}
// Sort by serial (ascending)
sort.Slice(sers, func(i, j int) bool {
return sers[i].Get() < sers[j].Get()
})
return sers, nil
}
// QueryForIds retrieves IdPkTs records based on a filter.
// Results are sorted by timestamp in reverse chronological order.
func (w *W) QueryForIds(c context.Context, f *filter.F) (idPkTs []*store.IdPkTs, err error) {
if f.Ids != nil && f.Ids.Len() > 0 {
err = errors.New("query for Ids is invalid for a filter with Ids")
return
}
var idxs []database.Range
if idxs, err = database.GetIndexesFromFilter(f); chk.E(err) {
return
}
var results []*store.IdPkTs
results = make([]*store.IdPkTs, 0, len(idxs)*100)
// Track match counts for search ranking
counts := make(map[uint64]int)
for _, idx := range idxs {
var founds types.Uint40s
if founds, err = w.GetSerialsByRange(idx); err != nil {
w.Logger.Warnf("QueryForIds: GetSerialsByRange error: %v", err)
continue
}
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(founds); err != nil {
w.Logger.Warnf("QueryForIds: GetFullIdPubkeyBySerials error: %v", err)
continue
}
// Track match counts for search queries
if len(f.Search) > 0 {
for _, v := range tmp {
counts[v.Ser]++
}
}
results = append(results, tmp...)
}
// Deduplicate results
seen := make(map[uint64]struct{}, len(results))
idPkTs = make([]*store.IdPkTs, 0, len(results))
for _, idpk := range results {
if _, ok := seen[idpk.Ser]; !ok {
seen[idpk.Ser] = struct{}{}
idPkTs = append(idPkTs, idpk)
}
}
// For search queries combined with other filters, verify matches
if len(f.Search) > 0 && ((f.Authors != nil && f.Authors.Len() > 0) ||
(f.Kinds != nil && f.Kinds.Len() > 0) ||
(f.Tags != nil && f.Tags.Len() > 0)) {
// Build serial list for fetching
serials := make([]*types.Uint40, 0, len(idPkTs))
for _, v := range idPkTs {
s := new(types.Uint40)
s.Set(v.Ser)
serials = append(serials, s)
}
var evs map[uint64]*event.E
if evs, err = w.FetchEventsBySerials(serials); chk.E(err) {
return
}
filtered := make([]*store.IdPkTs, 0, len(idPkTs))
for _, v := range idPkTs {
ev, ok := evs[v.Ser]
if !ok || ev == nil {
continue
}
matchesAll := true
if f.Authors != nil && f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) {
matchesAll = false
}
if matchesAll && f.Kinds != nil && f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) {
matchesAll = false
}
if matchesAll && f.Tags != nil && f.Tags.Len() > 0 {
tagOK := true
for _, t := range *f.Tags {
if t.Len() < 2 {
continue
}
key := t.Key()
values := t.T[1:]
if !ev.Tags.ContainsAny(key, values) {
tagOK = false
break
}
}
if !tagOK {
matchesAll = false
}
}
if matchesAll {
filtered = append(filtered, v)
}
}
idPkTs = filtered
}
// Sort by timestamp (newest first)
if len(f.Search) == 0 {
sort.Slice(idPkTs, func(i, j int) bool {
return idPkTs[i].Ts > idPkTs[j].Ts
})
} else {
// Search ranking: blend match count with recency
var maxCount int
var minTs, maxTs int64
if len(idPkTs) > 0 {
minTs, maxTs = idPkTs[0].Ts, idPkTs[0].Ts
}
for _, v := range idPkTs {
if c := counts[v.Ser]; c > maxCount {
maxCount = c
}
if v.Ts < minTs {
minTs = v.Ts
}
if v.Ts > maxTs {
maxTs = v.Ts
}
}
tsSpan := maxTs - minTs
if tsSpan <= 0 {
tsSpan = 1
}
if maxCount <= 0 {
maxCount = 1
}
sort.Slice(idPkTs, func(i, j int) bool {
ci := float64(counts[idPkTs[i].Ser]) / float64(maxCount)
cj := float64(counts[idPkTs[j].Ser]) / float64(maxCount)
ai := float64(idPkTs[i].Ts-minTs) / float64(tsSpan)
aj := float64(idPkTs[j].Ts-minTs) / float64(tsSpan)
si := 0.5*ci + 0.5*ai
sj := 0.5*cj + 0.5*aj
if si == sj {
return idPkTs[i].Ts > idPkTs[j].Ts
}
return si > sj
})
}
// Apply limit
if f.Limit != nil && len(idPkTs) > int(*f.Limit) {
idPkTs = idPkTs[:*f.Limit]
}
return
}
// QueryForSerials takes a filter and returns matching event serials
func (w *W) QueryForSerials(c context.Context, f *filter.F) (sers types.Uint40s, err error) {
var founds []*types.Uint40
var idPkTs []*store.IdPkTs
if f.Ids != nil && f.Ids.Len() > 0 {
// Use batch lookup for IDs
var serialMap map[string]*types.Uint40
if serialMap, err = w.GetSerialsByIds(f.Ids); chk.E(err) {
return
}
for _, ser := range serialMap {
founds = append(founds, ser)
}
var tmp []*store.IdPkTs
if tmp, err = w.GetFullIdPubkeyBySerials(founds); chk.E(err) {
return
}
idPkTs = append(idPkTs, tmp...)
} else {
if idPkTs, err = w.QueryForIds(c, f); chk.E(err) {
return
}
}
// Extract serials
for _, idpk := range idPkTs {
ser := new(types.Uint40)
if err = ser.Set(idpk.Ser); chk.E(err) {
continue
}
sers = append(sers, ser)
}
return
}
// QueryEvents queries events based on a filter
func (w *W) QueryEvents(c context.Context, f *filter.F) (evs event.S, err error) {
return w.QueryEventsWithOptions(c, f, true, false)
}
// QueryAllVersions queries events and returns all versions of replaceable events
func (w *W) QueryAllVersions(c context.Context, f *filter.F) (evs event.S, err error) {
return w.QueryEventsWithOptions(c, f, true, true)
}
// QueryEventsWithOptions queries events with additional options for deletion and versioning
func (w *W) QueryEventsWithOptions(c context.Context, f *filter.F, includeDeleteEvents bool, showAllVersions bool) (evs event.S, err error) {
wantMultipleVersions := showAllVersions || (f.Limit != nil && *f.Limit > 1)
var expDeletes types.Uint40s
var expEvs event.S
// Handle ID-based queries
if f.Ids != nil && f.Ids.Len() > 0 {
w.Logger.Debugf("QueryEvents: ids path, count=%d", f.Ids.Len())
serials, idErr := w.GetSerialsByIds(f.Ids)
if idErr != nil {
w.Logger.Warnf("QueryEvents: error looking up ids: %v", idErr)
}
// Convert to slice for batch fetch
var serialsSlice []*types.Uint40
idHexToSerial := make(map[uint64]string, len(serials))
for idHex, ser := range serials {
serialsSlice = append(serialsSlice, ser)
idHexToSerial[ser.Get()] = idHex
}
// Batch fetch events
var fetchedEvents map[uint64]*event.E
if fetchedEvents, err = w.FetchEventsBySerials(serialsSlice); err != nil {
w.Logger.Warnf("QueryEvents: batch fetch failed: %v", err)
return
}
// Process fetched events
for serialValue, ev := range fetchedEvents {
idHex := idHexToSerial[serialValue]
ser := new(types.Uint40)
if err = ser.Set(serialValue); err != nil {
continue
}
// Check expiration
if CheckExpiration(ev) {
w.Logger.Debugf("QueryEvents: id=%s filtered out due to expiration", idHex)
expDeletes = append(expDeletes, ser)
expEvs = append(expEvs, ev)
continue
}
// Check for deletion
if derr := w.CheckForDeleted(ev, nil); derr != nil {
w.Logger.Debugf("QueryEvents: id=%s filtered out due to deletion: %v", idHex, derr)
continue
}
evs = append(evs, ev)
}
// Sort and apply limit
sort.Slice(evs, func(i, j int) bool {
return evs[i].CreatedAt > evs[j].CreatedAt
})
if f.Limit != nil && len(evs) > int(*f.Limit) {
evs = evs[:*f.Limit]
}
} else {
// Non-IDs path
var idPkTs []*store.IdPkTs
if idPkTs, err = w.QueryForIds(c, f); chk.E(err) {
return
}
// Maps for replaceable event handling
replaceableEvents := make(map[string]*event.E)
replaceableEventVersions := make(map[string]event.S)
paramReplaceableEvents := make(map[string]map[string]*event.E)
paramReplaceableEventVersions := make(map[string]map[string]event.S)
var regularEvents event.S
// Deletion tracking maps
deletionsByKindPubkey := make(map[string]bool)
deletionsByKindPubkeyDTag := make(map[string]map[string]int64)
deletedEventIds := make(map[string]bool)
// Query for deletion events if we have authors
if f.Authors != nil && f.Authors.Len() > 0 {
deletionFilter := &filter.F{
Kinds: kind.NewS(kind.New(5)),
Authors: f.Authors,
}
var deletionIdPkTs []*store.IdPkTs
if deletionIdPkTs, err = w.QueryForIds(c, deletionFilter); err == nil {
idPkTs = append(idPkTs, deletionIdPkTs...)
}
}
// Prepare serials for batch fetch
var allSerials []*types.Uint40
serialToIdPk := make(map[uint64]*store.IdPkTs, len(idPkTs))
for _, idpk := range idPkTs {
ser := new(types.Uint40)
if err = ser.Set(idpk.Ser); err != nil {
continue
}
allSerials = append(allSerials, ser)
serialToIdPk[ser.Get()] = idpk
}
// Batch fetch all events
var allEvents map[uint64]*event.E
if allEvents, err = w.FetchEventsBySerials(allSerials); err != nil {
w.Logger.Warnf("QueryEvents: batch fetch failed in non-IDs path: %v", err)
return
}
// First pass: collect deletion events
for serialValue, ev := range allEvents {
ser := new(types.Uint40)
if err = ser.Set(serialValue); err != nil {
continue
}
if CheckExpiration(ev) {
expDeletes = append(expDeletes, ser)
expEvs = append(expEvs, ev)
continue
}
if ev.Kind == kind.Deletion.K {
// Process e-tags and a-tags for deletion tracking
aTags := ev.Tags.GetAll([]byte("a"))
for _, aTag := range aTags {
if aTag.Len() < 2 {
continue
}
split := bytes.Split(aTag.Value(), []byte{':'})
if len(split) < 2 {
continue
}
kindInt, parseErr := strconv.Atoi(string(split[0]))
if parseErr != nil {
continue
}
kk := kind.New(uint16(kindInt))
if !kind.IsReplaceable(kk.K) {
continue
}
var pk []byte
if pk, err = hex.DecAppend(nil, split[1]); err != nil {
continue
}
if !utils.FastEqual(pk, ev.Pubkey) {
continue
}
key := hex.Enc(pk) + ":" + strconv.Itoa(int(kk.K))
if kind.IsParameterizedReplaceable(kk.K) {
if len(split) < 3 {
continue
}
if _, exists := deletionsByKindPubkeyDTag[key]; !exists {
deletionsByKindPubkeyDTag[key] = make(map[string]int64)
}
dValue := string(split[2])
if ts, ok := deletionsByKindPubkeyDTag[key][dValue]; !ok || ev.CreatedAt > ts {
deletionsByKindPubkeyDTag[key][dValue] = ev.CreatedAt
}
} else {
deletionsByKindPubkey[key] = true
}
}
// Process e-tags for specific event deletions
eTags := ev.Tags.GetAll([]byte("e"))
for _, eTag := range eTags {
eTagHex := eTag.ValueHex()
if len(eTagHex) != 64 {
continue
}
evId := make([]byte, sha256.Size)
if _, hexErr := hex.DecBytes(evId, eTagHex); hexErr != nil {
continue
}
// Look for target in current batch
var targetEv *event.E
for _, candidateEv := range allEvents {
if utils.FastEqual(candidateEv.ID, evId) {
targetEv = candidateEv
break
}
}
// Try to fetch if not in batch
if targetEv == nil {
ser, serErr := w.GetSerialById(evId)
if serErr != nil || ser == nil {
continue
}
targetEv, serErr = w.FetchEventBySerial(ser)
if serErr != nil || targetEv == nil {
continue
}
}
if !utils.FastEqual(targetEv.Pubkey, ev.Pubkey) {
continue
}
deletedEventIds[hex.Enc(targetEv.ID)] = true
}
}
}
// Second pass: process all events, filtering deleted ones
for _, ev := range allEvents {
// Tag filter verification
if f.Tags != nil && f.Tags.Len() > 0 {
tagMatches := 0
for _, filterTag := range *f.Tags {
if filterTag.Len() >= 2 {
filterKey := filterTag.Key()
var actualKey []byte
if len(filterKey) == 2 && filterKey[0] == '#' {
actualKey = filterKey[1:]
} else {
actualKey = filterKey
}
eventHasTag := false
if ev.Tags != nil {
for _, eventTag := range *ev.Tags {
if eventTag.Len() >= 2 && bytes.Equal(eventTag.Key(), actualKey) {
for _, filterValue := range filterTag.T[1:] {
if database.TagValuesMatchUsingTagMethods(eventTag, filterValue) {
eventHasTag = true
break
}
}
if eventHasTag {
break
}
}
}
}
if eventHasTag {
tagMatches++
}
}
}
if tagMatches < f.Tags.Len() {
continue
}
}
// Skip deletion events unless explicitly requested
if ev.Kind == kind.Deletion.K {
kind5Requested := false
if f.Kinds != nil && f.Kinds.Len() > 0 {
for i := 0; i < f.Kinds.Len(); i++ {
if f.Kinds.K[i].K == kind.Deletion.K {
kind5Requested = true
break
}
}
}
if !kind5Requested {
continue
}
}
// Check if event ID is in filter
isIdInFilter := false
if f.Ids != nil && f.Ids.Len() > 0 {
for i := 0; i < f.Ids.Len(); i++ {
if utils.FastEqual(ev.ID, (*f.Ids).T[i]) {
isIdInFilter = true
break
}
}
}
// Check if specifically deleted
eventIdHex := hex.Enc(ev.ID)
if deletedEventIds[eventIdHex] {
continue
}
// Handle replaceable events
if kind.IsReplaceable(ev.Kind) {
key := hex.Enc(ev.Pubkey) + ":" + strconv.Itoa(int(ev.Kind))
if deletionsByKindPubkey[key] && !isIdInFilter {
continue
} else if wantMultipleVersions {
replaceableEventVersions[key] = append(replaceableEventVersions[key], ev)
} else {
existing, exists := replaceableEvents[key]
if !exists || ev.CreatedAt > existing.CreatedAt {
replaceableEvents[key] = ev
}
}
} else if kind.IsParameterizedReplaceable(ev.Kind) {
key := hex.Enc(ev.Pubkey) + ":" + strconv.Itoa(int(ev.Kind))
dTag := ev.Tags.GetFirst([]byte("d"))
var dValue string
if dTag != nil && dTag.Len() > 1 {
dValue = string(dTag.Value())
}
if deletionMap, exists := deletionsByKindPubkeyDTag[key]; exists {
if delTs, ok := deletionMap[dValue]; ok && ev.CreatedAt < delTs && !isIdInFilter {
continue
}
}
if wantMultipleVersions {
if _, exists := paramReplaceableEventVersions[key]; !exists {
paramReplaceableEventVersions[key] = make(map[string]event.S)
}
paramReplaceableEventVersions[key][dValue] = append(paramReplaceableEventVersions[key][dValue], ev)
} else {
if _, exists := paramReplaceableEvents[key]; !exists {
paramReplaceableEvents[key] = make(map[string]*event.E)
}
existing, exists := paramReplaceableEvents[key][dValue]
if !exists || ev.CreatedAt > existing.CreatedAt {
paramReplaceableEvents[key][dValue] = ev
}
}
} else {
regularEvents = append(regularEvents, ev)
}
}
// Collect results
if wantMultipleVersions {
for _, versions := range replaceableEventVersions {
sort.Slice(versions, func(i, j int) bool {
return versions[i].CreatedAt > versions[j].CreatedAt
})
limit := len(versions)
if f.Limit != nil && int(*f.Limit) < limit {
limit = int(*f.Limit)
}
for i := 0; i < limit; i++ {
evs = append(evs, versions[i])
}
}
} else {
for _, ev := range replaceableEvents {
evs = append(evs, ev)
}
}
if wantMultipleVersions {
for _, dTagMap := range paramReplaceableEventVersions {
for _, versions := range dTagMap {
sort.Slice(versions, func(i, j int) bool {
return versions[i].CreatedAt > versions[j].CreatedAt
})
limit := len(versions)
if f.Limit != nil && int(*f.Limit) < limit {
limit = int(*f.Limit)
}
for i := 0; i < limit; i++ {
evs = append(evs, versions[i])
}
}
}
} else {
for _, innerMap := range paramReplaceableEvents {
for _, ev := range innerMap {
evs = append(evs, ev)
}
}
}
evs = append(evs, regularEvents...)
// Sort and limit
sort.Slice(evs, func(i, j int) bool {
return evs[i].CreatedAt > evs[j].CreatedAt
})
if f.Limit != nil && len(evs) > int(*f.Limit) {
evs = evs[:*f.Limit]
}
// Delete expired events in background
go func() {
for i, ser := range expDeletes {
w.DeleteEventBySerial(context.Background(), ser, expEvs[i])
}
}()
}
return
}
// QueryDeleteEventsByTargetId queries for delete events targeting a specific event ID
func (w *W) QueryDeleteEventsByTargetId(c context.Context, targetEventId []byte) (evs event.S, err error) {
f := &filter.F{
Kinds: kind.NewS(kind.Deletion),
Tags: tag.NewS(
tag.NewFromAny("#e", hex.Enc(targetEventId)),
),
}
return w.QueryEventsWithOptions(c, f, true, false)
}
// CountEvents counts events matching a filter
func (w *W) CountEvents(c context.Context, f *filter.F) (count int, approx bool, err error) {
approx = false
if f == nil {
return 0, false, nil
}
// For ID-based queries, count resolved IDs
if f.Ids != nil && f.Ids.Len() > 0 {
serials, idErr := w.GetSerialsByIds(f.Ids)
if idErr != nil {
return 0, false, idErr
}
return len(serials), false, nil
}
// For other queries, get serials and count
var sers types.Uint40s
if sers, err = w.QueryForSerials(c, f); err != nil {
return 0, false, err
}
return len(sers), false, nil
}
// GetSerialsFromFilter is an alias for QueryForSerials for interface compatibility
func (w *W) GetSerialsFromFilter(f *filter.F) (serials types.Uint40s, err error) {
return w.QueryForSerials(w.ctx, f)
}

26
pkg/wasmdb/run-tests.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
# Run wasmdb tests using Node.js with fake-indexeddb
# This script builds the test binary and runs it in Node.js
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TESTDATA_DIR="$SCRIPT_DIR/testdata"
WASM_FILE="$TESTDATA_DIR/wasmdb_test.wasm"
# Ensure Node.js dependencies are installed
if [ ! -d "$TESTDATA_DIR/node_modules" ]; then
echo "Installing Node.js dependencies..."
cd "$TESTDATA_DIR"
npm install
cd - > /dev/null
fi
# Build the test binary
echo "Building WASM test binary..."
GOOS=js GOARCH=wasm CGO_ENABLED=0 go test -c -o "$WASM_FILE" "$SCRIPT_DIR"
# Run the tests
echo "Running tests in Node.js..."
node "$TESTDATA_DIR/run_wasm_tests.mjs" "$WASM_FILE" "$@"

423
pkg/wasmdb/save-event.go Normal file
View File

@@ -0,0 +1,423 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"context"
"errors"
"fmt"
"strings"
"github.com/aperturerobotics/go-indexeddb/idb"
"github.com/hack-pad/safejs"
"lol.mleku.dev/chk"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/hex"
"git.mleku.dev/mleku/nostr/encoders/kind"
"git.mleku.dev/mleku/nostr/encoders/tag"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes"
"next.orly.dev/pkg/database/indexes/types"
)
var (
// ErrOlderThanExisting is returned when a candidate event is older than an existing replaceable/addressable event.
ErrOlderThanExisting = errors.New("older than existing event")
// ErrMissingDTag is returned when a parameterized replaceable event lacks the required 'd' tag.
ErrMissingDTag = errors.New("event is missing a d tag identifier")
)
// SaveEvent saves an event to the database, generating all necessary indexes.
func (w *W) SaveEvent(c context.Context, ev *event.E) (replaced bool, err error) {
if ev == nil {
err = errors.New("nil event")
return
}
// Reject ephemeral events (kinds 20000-29999) - they should never be stored
if ev.Kind >= 20000 && ev.Kind <= 29999 {
err = errors.New("blocked: ephemeral events should not be stored")
return
}
// Validate kind 3 (follow list) events have at least one p tag
if ev.Kind == 3 {
hasPTag := false
if ev.Tags != nil {
for _, t := range *ev.Tags {
if t != nil && t.Len() >= 2 {
key := t.Key()
if len(key) == 1 && key[0] == 'p' {
hasPTag = true
break
}
}
}
}
if !hasPTag {
w.Logger.Warnf("SaveEvent: rejecting kind 3 event without p tags from pubkey %x", ev.Pubkey)
err = errors.New("blocked: kind 3 follow list events must have at least one p tag")
return
}
}
// Check if the event already exists
var ser *types.Uint40
if ser, err = w.GetSerialById(ev.ID); err == nil && ser != nil {
err = errors.New("blocked: event already exists: " + hex.Enc(ev.ID[:]))
return
}
// If the error is "id not found", we can proceed
if err != nil && strings.Contains(err.Error(), "id not found") {
err = nil
} else if err != nil {
return
}
// Check for replacement - only validate, don't delete old events
if kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind) {
var werr error
if replaced, _, werr = w.WouldReplaceEvent(ev); werr != nil {
if errors.Is(werr, ErrOlderThanExisting) {
if kind.IsReplaceable(ev.Kind) {
err = errors.New("blocked: event is older than existing replaceable event")
} else {
err = errors.New("blocked: event is older than existing addressable event")
}
return
}
if errors.Is(werr, ErrMissingDTag) {
err = ErrMissingDTag
return
}
return
}
}
// Get the next sequence number for the event
serial, err := w.nextEventSerial()
if err != nil {
return
}
// Generate all indexes for the event
idxs, err := database.GetIndexesForEvent(ev, serial)
if err != nil {
return
}
// Serialize event to binary
eventDataBuf := new(bytes.Buffer)
ev.MarshalBinary(eventDataBuf)
eventData := eventDataBuf.Bytes()
// Determine storage strategy
smallEventThreshold := 1024 // Could be made configurable
isSmallEvent := len(eventData) <= smallEventThreshold
isReplaceableEvent := kind.IsReplaceable(ev.Kind)
isAddressableEvent := kind.IsParameterizedReplaceable(ev.Kind)
// Create serial type
ser = new(types.Uint40)
if err = ser.Set(serial); chk.E(err) {
return
}
// Start a transaction to save the event and all its indexes
// We need to include all object stores we'll write to
storesToWrite := []string{
string(indexes.IdPrefix),
string(indexes.FullIdPubkeyPrefix),
string(indexes.CreatedAtPrefix),
string(indexes.PubkeyPrefix),
string(indexes.KindPrefix),
string(indexes.KindPubkeyPrefix),
string(indexes.TagPrefix),
string(indexes.TagKindPrefix),
string(indexes.TagPubkeyPrefix),
string(indexes.TagKindPubkeyPrefix),
string(indexes.WordPrefix),
}
// Add event storage store
if isSmallEvent {
storesToWrite = append(storesToWrite, string(indexes.SmallEventPrefix))
} else {
storesToWrite = append(storesToWrite, string(indexes.EventPrefix))
}
// Add specialized stores if needed
if isAddressableEvent && isSmallEvent {
storesToWrite = append(storesToWrite, string(indexes.AddressableEventPrefix))
} else if isReplaceableEvent && isSmallEvent {
storesToWrite = append(storesToWrite, string(indexes.ReplaceableEventPrefix))
}
// Start transaction
tx, err := w.db.Transaction(idb.TransactionReadWrite, storesToWrite[0], storesToWrite[1:]...)
if err != nil {
return false, fmt.Errorf("failed to start transaction: %w", err)
}
// Save each index to its respective object store
for _, key := range idxs {
if len(key) < 3 {
continue
}
// Extract store name from 3-byte prefix
storeName := string(key[:3])
store, storeErr := tx.ObjectStore(storeName)
if storeErr != nil {
w.Logger.Warnf("SaveEvent: failed to get object store %s: %v", storeName, storeErr)
continue
}
// Use the full key as the IndexedDB key, empty value
keyJS := bytesToSafeValue(key)
_, putErr := store.PutKey(keyJS, safejs.Null())
if putErr != nil {
w.Logger.Warnf("SaveEvent: failed to put index %s: %v", storeName, putErr)
}
}
// Store the event data
if isSmallEvent {
// Small event: store inline with sev prefix
// Format: sev|serial|size_uint16|event_data
keyBuf := new(bytes.Buffer)
if err = indexes.SmallEventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return
}
// Append size as uint16 big-endian
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
keyBuf.Write(sizeBytes)
keyBuf.Write(eventData)
store, storeErr := tx.ObjectStore(string(indexes.SmallEventPrefix))
if storeErr == nil {
keyJS := bytesToSafeValue(keyBuf.Bytes())
store.PutKey(keyJS, safejs.Null())
}
} else {
// Large event: store separately with evt prefix
keyBuf := new(bytes.Buffer)
if err = indexes.EventEnc(ser).MarshalWrite(keyBuf); chk.E(err) {
return
}
store, storeErr := tx.ObjectStore(string(indexes.EventPrefix))
if storeErr == nil {
keyJS := bytesToSafeValue(keyBuf.Bytes())
valueJS := bytesToSafeValue(eventData)
store.PutKey(keyJS, valueJS)
}
}
// Store specialized keys for replaceable/addressable events
if isAddressableEvent && isSmallEvent {
dTag := ev.Tags.GetFirst([]byte("d"))
if dTag != nil {
pubHash := new(types.PubHash)
pubHash.FromPubkey(ev.Pubkey)
kindVal := new(types.Uint16)
kindVal.Set(ev.Kind)
dTagHash := new(types.Ident)
dTagHash.FromIdent(dTag.Value())
keyBuf := new(bytes.Buffer)
if err = indexes.AddressableEventEnc(pubHash, kindVal, dTagHash).MarshalWrite(keyBuf); chk.E(err) {
return
}
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
keyBuf.Write(sizeBytes)
keyBuf.Write(eventData)
store, storeErr := tx.ObjectStore(string(indexes.AddressableEventPrefix))
if storeErr == nil {
keyJS := bytesToSafeValue(keyBuf.Bytes())
store.PutKey(keyJS, safejs.Null())
}
}
} else if isReplaceableEvent && isSmallEvent {
pubHash := new(types.PubHash)
pubHash.FromPubkey(ev.Pubkey)
kindVal := new(types.Uint16)
kindVal.Set(ev.Kind)
keyBuf := new(bytes.Buffer)
if err = indexes.ReplaceableEventEnc(pubHash, kindVal).MarshalWrite(keyBuf); chk.E(err) {
return
}
sizeBytes := []byte{byte(len(eventData) >> 8), byte(len(eventData))}
keyBuf.Write(sizeBytes)
keyBuf.Write(eventData)
store, storeErr := tx.ObjectStore(string(indexes.ReplaceableEventPrefix))
if storeErr == nil {
keyJS := bytesToSafeValue(keyBuf.Bytes())
store.PutKey(keyJS, safejs.Null())
}
}
// Commit transaction
if err = tx.Await(c); err != nil {
return false, fmt.Errorf("failed to commit transaction: %w", err)
}
w.Logger.Debugf("SaveEvent: saved event %x (kind %d, %d bytes, %d indexes)",
ev.ID[:8], ev.Kind, len(eventData), len(idxs))
return
}
// WouldReplaceEvent checks if the provided event would replace existing events
func (w *W) WouldReplaceEvent(ev *event.E) (bool, types.Uint40s, error) {
// Only relevant for replaceable or parameterized replaceable kinds
if !(kind.IsReplaceable(ev.Kind) || kind.IsParameterizedReplaceable(ev.Kind)) {
return false, nil, nil
}
// Build filter for existing events
var f interface{}
if kind.IsReplaceable(ev.Kind) {
// For now, simplified check - would need full filter implementation
return false, nil, nil
} else {
// Parameterized replaceable requires 'd' tag
dTag := ev.Tags.GetFirst([]byte("d"))
if dTag == nil {
return false, nil, ErrMissingDTag
}
// Simplified - full implementation would query existing events
_ = f
}
// Simplified implementation - assume no conflicts for now
// Full implementation would query the database and compare timestamps
return false, nil, nil
}
// GetSerialById looks up the serial number for an event ID
func (w *W) GetSerialById(id []byte) (ser *types.Uint40, err error) {
if len(id) != 32 {
return nil, errors.New("invalid event ID length")
}
// Create ID hash
idHash := new(types.IdHash)
if err = idHash.FromId(id); chk.E(err) {
return nil, err
}
// Build the prefix to search for
keyBuf := new(bytes.Buffer)
indexes.IdEnc(idHash, nil).MarshalWrite(keyBuf)
prefix := keyBuf.Bytes()[:11] // 3 prefix + 8 id hash
// Search in the eid object store
tx, err := w.db.Transaction(idb.TransactionReadOnly, string(indexes.IdPrefix))
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(string(indexes.IdPrefix))
if err != nil {
return nil, err
}
// Use cursor to find matching key
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if len(keyBytes) >= len(prefix) && bytes.HasPrefix(keyBytes, prefix) {
// Found matching key, extract serial from last 5 bytes
if len(keyBytes) >= 16 { // 3 + 8 + 5
ser = new(types.Uint40)
ser.UnmarshalRead(bytes.NewReader(keyBytes[11:16]))
return errors.New("found") // Stop iteration
}
}
return cursor.Continue()
})
if ser != nil {
return ser, nil
}
if err != nil && err.Error() != "found" {
return nil, err
}
return nil, errors.New("id not found in database")
}
// GetSerialsByIds looks up serial numbers for multiple event IDs
func (w *W) GetSerialsByIds(ids *tag.T) (serials map[string]*types.Uint40, err error) {
serials = make(map[string]*types.Uint40)
if ids == nil {
return
}
for i := 1; i < ids.Len(); i++ {
idBytes := ids.T[i]
if len(idBytes) == 64 {
// Hex encoded ID
var decoded []byte
decoded, err = hex.Dec(string(idBytes))
if err != nil {
continue
}
idBytes = decoded
}
if len(idBytes) == 32 {
var ser *types.Uint40
ser, err = w.GetSerialById(idBytes)
if err == nil && ser != nil {
serials[hex.Enc(idBytes)] = ser
}
}
}
err = nil
return
}
// GetSerialsByIdsWithFilter looks up serial numbers with a filter function
func (w *W) GetSerialsByIdsWithFilter(ids *tag.T, fn func(ev *event.E, ser *types.Uint40) bool) (serials map[string]*types.Uint40, err error) {
allSerials, err := w.GetSerialsByIds(ids)
if err != nil {
return nil, err
}
if fn == nil {
return allSerials, nil
}
serials = make(map[string]*types.Uint40)
for idHex, ser := range allSerials {
ev, fetchErr := w.FetchEventBySerial(ser)
if fetchErr != nil {
continue
}
if fn(ev, ser) {
serials[idHex] = ser
}
}
return serials, nil
}

332
pkg/wasmdb/subscriptions.go Normal file
View File

@@ -0,0 +1,332 @@
//go:build js && wasm
package wasmdb
import (
"bytes"
"encoding/binary"
"encoding/json"
"errors"
"time"
"github.com/aperturerobotics/go-indexeddb/idb"
"next.orly.dev/pkg/database"
)
const (
// SubscriptionsStoreName is the object store for payment subscriptions
SubscriptionsStoreName = "subscriptions"
// PaymentsPrefix is the key prefix for payment records
PaymentsPrefix = "payment:"
)
// GetSubscription retrieves a subscription for a pubkey
func (w *W) GetSubscription(pubkey []byte) (*database.Subscription, error) {
key := "sub:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return nil, err
}
if data == nil {
return nil, nil
}
return w.deserializeSubscription(data)
}
// IsSubscriptionActive checks if a pubkey has an active subscription
// If no subscription exists, creates a 30-day trial
func (w *W) IsSubscriptionActive(pubkey []byte) (bool, error) {
key := "sub:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return false, err
}
now := time.Now()
if data == nil {
// Create new trial subscription
sub := &database.Subscription{
TrialEnd: now.AddDate(0, 0, 30),
}
subData := w.serializeSubscription(sub)
if err := w.setStoreValue(SubscriptionsStoreName, key, subData); err != nil {
return false, err
}
return true, nil
}
sub, err := w.deserializeSubscription(data)
if err != nil {
return false, err
}
// Active if within trial or paid period
return now.Before(sub.TrialEnd) || (!sub.PaidUntil.IsZero() && now.Before(sub.PaidUntil)), nil
}
// ExtendSubscription extends a subscription by the given number of days
func (w *W) ExtendSubscription(pubkey []byte, days int) error {
if days <= 0 {
return errors.New("invalid days")
}
key := "sub:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return err
}
now := time.Now()
var sub *database.Subscription
if data == nil {
// Create new subscription
sub = &database.Subscription{
PaidUntil: now.AddDate(0, 0, days),
}
} else {
sub, err = w.deserializeSubscription(data)
if err != nil {
return err
}
// Extend from current paid date if still active, otherwise from now
extendFrom := now
if !sub.PaidUntil.IsZero() && sub.PaidUntil.After(now) {
extendFrom = sub.PaidUntil
}
sub.PaidUntil = extendFrom.AddDate(0, 0, days)
}
// Serialize and store
subData := w.serializeSubscription(sub)
return w.setStoreValue(SubscriptionsStoreName, key, subData)
}
// RecordPayment records a payment for a pubkey
func (w *W) RecordPayment(pubkey []byte, amount int64, invoice, preimage string) error {
now := time.Now()
payment := &database.Payment{
Amount: amount,
Timestamp: now,
Invoice: invoice,
Preimage: preimage,
}
data := w.serializePayment(payment)
// Create unique key with timestamp
key := PaymentsPrefix + string(pubkey) + ":" + now.Format(time.RFC3339Nano)
return w.setStoreValue(SubscriptionsStoreName, key, data)
}
// GetPaymentHistory retrieves all payments for a pubkey
func (w *W) GetPaymentHistory(pubkey []byte) ([]database.Payment, error) {
prefix := PaymentsPrefix + string(pubkey) + ":"
tx, err := w.db.Transaction(idb.TransactionReadOnly, SubscriptionsStoreName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(SubscriptionsStoreName)
if err != nil {
return nil, err
}
var payments []database.Payment
cursorReq, err := store.OpenCursor(idb.CursorNext)
if err != nil {
return nil, err
}
prefixBytes := []byte(prefix)
err = cursorReq.Iter(w.ctx, func(cursor *idb.CursorWithValue) error {
keyVal, keyErr := cursor.Key()
if keyErr != nil {
return keyErr
}
keyBytes := safeValueToBytes(keyVal)
if bytes.HasPrefix(keyBytes, prefixBytes) {
val, valErr := cursor.Value()
if valErr != nil {
return valErr
}
valBytes := safeValueToBytes(val)
if payment, err := w.deserializePayment(valBytes); err == nil {
payments = append(payments, *payment)
}
}
return cursor.Continue()
})
if err != nil {
return nil, err
}
return payments, nil
}
// ExtendBlossomSubscription extends a blossom subscription with storage quota
func (w *W) ExtendBlossomSubscription(pubkey []byte, level string, storageMB int64, days int) error {
if days <= 0 {
return errors.New("invalid days")
}
key := "sub:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return err
}
now := time.Now()
var sub *database.Subscription
if data == nil {
sub = &database.Subscription{
PaidUntil: now.AddDate(0, 0, days),
BlossomLevel: level,
BlossomStorage: storageMB,
}
} else {
sub, err = w.deserializeSubscription(data)
if err != nil {
return err
}
// Extend from current paid date if still active
extendFrom := now
if !sub.PaidUntil.IsZero() && sub.PaidUntil.After(now) {
extendFrom = sub.PaidUntil
}
sub.PaidUntil = extendFrom.AddDate(0, 0, days)
// Set level and accumulate storage
sub.BlossomLevel = level
if sub.BlossomStorage > 0 && sub.PaidUntil.After(now) {
sub.BlossomStorage += storageMB
} else {
sub.BlossomStorage = storageMB
}
}
subData := w.serializeSubscription(sub)
return w.setStoreValue(SubscriptionsStoreName, key, subData)
}
// GetBlossomStorageQuota returns the storage quota for a pubkey
func (w *W) GetBlossomStorageQuota(pubkey []byte) (quotaMB int64, err error) {
sub, err := w.GetSubscription(pubkey)
if err != nil {
return 0, err
}
if sub == nil {
return 0, nil
}
// Only return quota if subscription is active
if sub.PaidUntil.IsZero() || time.Now().After(sub.PaidUntil) {
return 0, nil
}
return sub.BlossomStorage, nil
}
// IsFirstTimeUser checks if a pubkey is a first-time user (no subscription history)
func (w *W) IsFirstTimeUser(pubkey []byte) (bool, error) {
key := "firstlogin:" + string(pubkey)
data, err := w.getStoreValue(SubscriptionsStoreName, key)
if err != nil {
return false, err
}
if data == nil {
// First time - record the login
now := time.Now()
loginData, _ := json.Marshal(map[string]interface{}{
"first_login": now,
})
_ = w.setStoreValue(SubscriptionsStoreName, key, loginData)
return true, nil
}
return false, nil
}
// serializeSubscription converts a subscription to bytes using JSON
func (w *W) serializeSubscription(s *database.Subscription) []byte {
data, _ := json.Marshal(s)
return data
}
// deserializeSubscription converts bytes to a subscription
func (w *W) deserializeSubscription(data []byte) (*database.Subscription, error) {
s := &database.Subscription{}
if err := json.Unmarshal(data, s); err != nil {
return nil, err
}
return s, nil
}
// serializePayment converts a payment to bytes
func (w *W) serializePayment(p *database.Payment) []byte {
buf := new(bytes.Buffer)
// Amount (8 bytes)
amt := make([]byte, 8)
binary.BigEndian.PutUint64(amt, uint64(p.Amount))
buf.Write(amt)
// Timestamp (8 bytes)
ts := make([]byte, 8)
binary.BigEndian.PutUint64(ts, uint64(p.Timestamp.Unix()))
buf.Write(ts)
// Invoice length (4 bytes) + Invoice
invBytes := []byte(p.Invoice)
invLen := make([]byte, 4)
binary.BigEndian.PutUint32(invLen, uint32(len(invBytes)))
buf.Write(invLen)
buf.Write(invBytes)
// Preimage length (4 bytes) + Preimage
preBytes := []byte(p.Preimage)
preLen := make([]byte, 4)
binary.BigEndian.PutUint32(preLen, uint32(len(preBytes)))
buf.Write(preLen)
buf.Write(preBytes)
return buf.Bytes()
}
// deserializePayment converts bytes to a payment
func (w *W) deserializePayment(data []byte) (*database.Payment, error) {
if len(data) < 24 { // 8 + 8 + 4 + 4 minimum
return nil, errors.New("invalid payment data")
}
p := &database.Payment{}
p.Amount = int64(binary.BigEndian.Uint64(data[0:8]))
p.Timestamp = time.Unix(int64(binary.BigEndian.Uint64(data[8:16])), 0)
invLen := binary.BigEndian.Uint32(data[16:20])
if len(data) < int(20+invLen+4) {
return nil, errors.New("invalid invoice length")
}
p.Invoice = string(data[20 : 20+invLen])
offset := 20 + invLen
preLen := binary.BigEndian.Uint32(data[offset : offset+4])
if len(data) < int(offset+4+preLen) {
return nil, errors.New("invalid preimage length")
}
p.Preimage = string(data[offset+4 : offset+4+preLen])
return p, nil
}

24
pkg/wasmdb/testdata/package-lock.json generated vendored Normal file
View File

@@ -0,0 +1,24 @@
{
"name": "wasmdb-test",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "wasmdb-test",
"version": "1.0.0",
"dependencies": {
"fake-indexeddb": "^6.0.0"
}
},
"node_modules/fake-indexeddb": {
"version": "6.2.5",
"resolved": "https://registry.npmjs.org/fake-indexeddb/-/fake-indexeddb-6.2.5.tgz",
"integrity": "sha512-CGnyrvbhPlWYMngksqrSSUT1BAVP49dZocrHuK0SvtR0D5TMs5wP0o3j7jexDJW01KSadjBp1M/71o/KR3nD1w==",
"license": "Apache-2.0",
"engines": {
"node": ">=18"
}
}
}
}

12
pkg/wasmdb/testdata/package.json vendored Normal file
View File

@@ -0,0 +1,12 @@
{
"name": "wasmdb-test",
"version": "1.0.0",
"description": "Node.js test harness for wasmdb WASM tests",
"type": "module",
"scripts": {
"test": "node run_wasm_tests.mjs"
},
"dependencies": {
"fake-indexeddb": "^6.0.0"
}
}

572
pkg/wasmdb/wasmdb.go Normal file
View File

@@ -0,0 +1,572 @@
//go:build js && wasm
// Package wasmdb provides a WebAssembly-compatible database implementation
// using IndexedDB as the storage backend. It replicates the Badger database's
// index schema for full query compatibility.
//
// This implementation uses aperturerobotics/go-indexeddb (a fork of hack-pad/go-indexeddb)
// which provides full IndexedDB bindings with cursor/range support and transaction retry
// mechanisms to handle IndexedDB's transaction expiration issues in Go WASM.
//
// Architecture:
// - Each index type (evt, eid, kc-, pc-, etc.) maps to an IndexedDB object store
// - Keys are binary-encoded using the same format as the Badger implementation
// - Range queries use IndexedDB cursors with KeyRange bounds
// - Serial numbers are managed using a dedicated "meta" object store
package wasmdb
import (
"context"
"encoding/binary"
"errors"
"fmt"
"sync"
"github.com/aperturerobotics/go-indexeddb/idb"
"github.com/hack-pad/safejs"
"lol.mleku.dev"
"lol.mleku.dev/chk"
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"next.orly.dev/pkg/database"
"next.orly.dev/pkg/database/indexes"
)
const (
// DatabaseName is the IndexedDB database name
DatabaseName = "orly-nostr-relay"
// DatabaseVersion is incremented when schema changes require migration
DatabaseVersion = 1
// MetaStoreName holds metadata like serial counters
MetaStoreName = "meta"
// EventSerialKey is the key for the event serial counter in meta store
EventSerialKey = "event_serial"
// PubkeySerialKey is the key for the pubkey serial counter in meta store
PubkeySerialKey = "pubkey_serial"
// RelayIdentityKey is the key for the relay identity secret
RelayIdentityKey = "relay_identity"
)
// Object store names matching Badger index prefixes
var objectStoreNames = []string{
MetaStoreName,
string(indexes.EventPrefix), // "evt" - full events
string(indexes.SmallEventPrefix), // "sev" - small events inline
string(indexes.ReplaceableEventPrefix), // "rev" - replaceable events
string(indexes.AddressableEventPrefix), // "aev" - addressable events
string(indexes.IdPrefix), // "eid" - event ID index
string(indexes.FullIdPubkeyPrefix), // "fpc" - full ID + pubkey + timestamp
string(indexes.CreatedAtPrefix), // "c--" - created_at index
string(indexes.KindPrefix), // "kc-" - kind index
string(indexes.PubkeyPrefix), // "pc-" - pubkey index
string(indexes.KindPubkeyPrefix), // "kpc" - kind + pubkey index
string(indexes.TagPrefix), // "tc-" - tag index
string(indexes.TagKindPrefix), // "tkc" - tag + kind index
string(indexes.TagPubkeyPrefix), // "tpc" - tag + pubkey index
string(indexes.TagKindPubkeyPrefix), // "tkp" - tag + kind + pubkey index
string(indexes.WordPrefix), // "wrd" - word search index
string(indexes.ExpirationPrefix), // "exp" - expiration index
string(indexes.VersionPrefix), // "ver" - schema version
string(indexes.PubkeySerialPrefix), // "pks" - pubkey serial index
string(indexes.SerialPubkeyPrefix), // "spk" - serial to pubkey
string(indexes.EventPubkeyGraphPrefix), // "epg" - event-pubkey graph
string(indexes.PubkeyEventGraphPrefix), // "peg" - pubkey-event graph
"markers", // metadata key-value storage
"subscriptions", // payment subscriptions
"nip43", // NIP-43 membership
"invites", // invite codes
}
// W implements the database.Database interface using IndexedDB
type W struct {
ctx context.Context
cancel context.CancelFunc
dataDir string // Not really used in WASM, but kept for interface compatibility
Logger *logger
db *idb.Database
dbMu sync.RWMutex
ready chan struct{}
// Serial counters (cached in memory, persisted to IndexedDB)
eventSerial uint64
pubkeySerial uint64
serialMu sync.Mutex
}
// Ensure W implements database.Database interface at compile time
var _ database.Database = (*W)(nil)
// init registers the wasmdb database factory
func init() {
database.RegisterWasmDBFactory(func(
ctx context.Context,
cancel context.CancelFunc,
cfg *database.DatabaseConfig,
) (database.Database, error) {
return NewWithConfig(ctx, cancel, cfg)
})
}
// NewWithConfig creates a new IndexedDB-based database instance
func NewWithConfig(
ctx context.Context, cancel context.CancelFunc, cfg *database.DatabaseConfig,
) (*W, error) {
w := &W{
ctx: ctx,
cancel: cancel,
dataDir: cfg.DataDir,
Logger: NewLogger(lol.GetLogLevel(cfg.LogLevel)),
ready: make(chan struct{}),
}
// Open or create the IndexedDB database
if err := w.openDatabase(); err != nil {
return nil, fmt.Errorf("failed to open IndexedDB: %w", err)
}
// Load serial counters from storage
if err := w.loadSerialCounters(); err != nil {
return nil, fmt.Errorf("failed to load serial counters: %w", err)
}
// Start warmup goroutine
go w.warmup()
// Setup shutdown handler
go func() {
<-w.ctx.Done()
w.cancel()
w.Close()
}()
return w, nil
}
// New creates a new IndexedDB-based database instance with default configuration
func New(
ctx context.Context, cancel context.CancelFunc, dataDir, logLevel string,
) (*W, error) {
cfg := &database.DatabaseConfig{
DataDir: dataDir,
LogLevel: logLevel,
}
return NewWithConfig(ctx, cancel, cfg)
}
// openDatabase opens or creates the IndexedDB database with all required object stores
func (w *W) openDatabase() error {
w.dbMu.Lock()
defer w.dbMu.Unlock()
// Get the IndexedDB factory (panics if not available)
factory := idb.Global()
// Open the database with upgrade handler
openReq, err := factory.Open(w.ctx, DatabaseName, DatabaseVersion, func(db *idb.Database, oldVersion, newVersion uint) error {
// This is called when the database needs to be created or upgraded
w.Logger.Infof("IndexedDB upgrade: version %d -> %d", oldVersion, newVersion)
// Create all object stores
for _, storeName := range objectStoreNames {
// Check if store already exists
if !w.hasObjectStore(db, storeName) {
// Create object store without auto-increment (we manage keys manually)
opts := idb.ObjectStoreOptions{}
if _, err := db.CreateObjectStore(storeName, opts); err != nil {
return fmt.Errorf("failed to create object store %s: %w", storeName, err)
}
w.Logger.Debugf("created object store: %s", storeName)
}
}
return nil
})
if err != nil {
return fmt.Errorf("failed to open IndexedDB: %w", err)
}
db, err := openReq.Await(w.ctx)
if err != nil {
return fmt.Errorf("failed to await IndexedDB open: %w", err)
}
w.db = db
return nil
}
// hasObjectStore checks if an object store exists in the database
func (w *W) hasObjectStore(db *idb.Database, name string) bool {
names, err := db.ObjectStoreNames()
if err != nil {
return false
}
for _, n := range names {
if n == name {
return true
}
}
return false
}
// loadSerialCounters loads the event and pubkey serial counters from IndexedDB
func (w *W) loadSerialCounters() error {
w.serialMu.Lock()
defer w.serialMu.Unlock()
// Load event serial
eventSerialBytes, err := w.getMeta(EventSerialKey)
if err != nil {
return err
}
if eventSerialBytes != nil && len(eventSerialBytes) == 8 {
w.eventSerial = binary.BigEndian.Uint64(eventSerialBytes)
}
// Load pubkey serial
pubkeySerialBytes, err := w.getMeta(PubkeySerialKey)
if err != nil {
return err
}
if pubkeySerialBytes != nil && len(pubkeySerialBytes) == 8 {
w.pubkeySerial = binary.BigEndian.Uint64(pubkeySerialBytes)
}
w.Logger.Infof("loaded serials: event=%d, pubkey=%d", w.eventSerial, w.pubkeySerial)
return nil
}
// getMeta retrieves a value from the meta object store
func (w *W) getMeta(key string) ([]byte, error) {
tx, err := w.db.Transaction(idb.TransactionReadOnly, MetaStoreName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(MetaStoreName)
if err != nil {
return nil, err
}
keyVal, err := safejs.ValueOf(key)
if err != nil {
return nil, err
}
req, err := store.Get(keyVal)
if err != nil {
return nil, err
}
val, err := req.Await(w.ctx)
if err != nil {
// Key not found is not an error
return nil, nil
}
if val.IsUndefined() || val.IsNull() {
return nil, nil
}
// Convert safejs.Value to []byte
return safeValueToBytes(val), nil
}
// setMeta stores a value in the meta object store
func (w *W) setMeta(key string, value []byte) error {
tx, err := w.db.Transaction(idb.TransactionReadWrite, MetaStoreName)
if err != nil {
return err
}
store, err := tx.ObjectStore(MetaStoreName)
if err != nil {
return err
}
// Convert value to Uint8Array for IndexedDB storage
valueJS := bytesToSafeValue(value)
// Put with key - using PutKey since we're managing keys
keyVal, err := safejs.ValueOf(key)
if err != nil {
return err
}
_, err = store.PutKey(keyVal, valueJS)
if err != nil {
return err
}
return tx.Await(w.ctx)
}
// nextEventSerial returns the next event serial number and persists it
func (w *W) nextEventSerial() (uint64, error) {
w.serialMu.Lock()
defer w.serialMu.Unlock()
w.eventSerial++
serial := w.eventSerial
// Persist to IndexedDB
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, serial)
if err := w.setMeta(EventSerialKey, buf); err != nil {
return 0, err
}
return serial, nil
}
// nextPubkeySerial returns the next pubkey serial number and persists it
func (w *W) nextPubkeySerial() (uint64, error) {
w.serialMu.Lock()
defer w.serialMu.Unlock()
w.pubkeySerial++
serial := w.pubkeySerial
// Persist to IndexedDB
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, serial)
if err := w.setMeta(PubkeySerialKey, buf); err != nil {
return 0, err
}
return serial, nil
}
// warmup performs database warmup and closes the ready channel when complete
func (w *W) warmup() {
defer close(w.ready)
// IndexedDB is ready immediately after opening
w.Logger.Infof("IndexedDB database warmup complete, ready to serve requests")
}
// Path returns the database path (not used in WASM)
func (w *W) Path() string { return w.dataDir }
// Init initializes the database (no-op, done in New)
func (w *W) Init(path string) error { return nil }
// Sync flushes pending writes (IndexedDB handles persistence automatically)
func (w *W) Sync() error { return nil }
// Close closes the database
func (w *W) Close() error {
w.dbMu.Lock()
defer w.dbMu.Unlock()
if w.db != nil {
w.db.Close()
w.db = nil
}
return nil
}
// Wipe removes all data and recreates object stores
func (w *W) Wipe() error {
w.dbMu.Lock()
defer w.dbMu.Unlock()
// Close the current database
if w.db != nil {
w.db.Close()
w.db = nil
}
// Delete the database
factory := idb.Global()
delReq, err := factory.DeleteDatabase(DatabaseName)
if err != nil {
return fmt.Errorf("failed to delete IndexedDB: %w", err)
}
if err := delReq.Await(w.ctx); err != nil {
return fmt.Errorf("failed to await IndexedDB delete: %w", err)
}
// Reset serial counters
w.serialMu.Lock()
w.eventSerial = 0
w.pubkeySerial = 0
w.serialMu.Unlock()
// Reopen the database (this will recreate all object stores)
w.dbMu.Unlock()
err = w.openDatabase()
w.dbMu.Lock()
return err
}
// SetLogLevel sets the logging level
func (w *W) SetLogLevel(level string) {
w.Logger.SetLogLevel(lol.GetLogLevel(level))
}
// Ready returns a channel that closes when the database is ready
func (w *W) Ready() <-chan struct{} { return w.ready }
// RunMigrations runs database migrations (handled by IndexedDB upgrade)
func (w *W) RunMigrations() {}
// EventIdsBySerial retrieves event IDs by serial range
func (w *W) EventIdsBySerial(start uint64, count int) ([]uint64, error) {
return nil, errors.New("not implemented")
}
// Query cache methods (simplified for WASM - no caching)
func (w *W) GetCachedJSON(f *filter.F) ([][]byte, bool) { return nil, false }
func (w *W) CacheMarshaledJSON(f *filter.F, marshaledJSON [][]byte) {}
func (w *W) GetCachedEvents(f *filter.F) (event.S, bool) { return nil, false }
func (w *W) CacheEvents(f *filter.F, events event.S) {}
func (w *W) InvalidateQueryCache() {}
// Placeholder implementations for remaining interface methods
// Query methods are implemented in query-events.go
// Delete methods are implemented in delete-event.go
// Import, Export, and ImportEvents methods are implemented in import-export.go
func (w *W) GetRelayIdentitySecret() (skb []byte, err error) {
return w.getMeta(RelayIdentityKey)
}
func (w *W) SetRelayIdentitySecret(skb []byte) error {
return w.setMeta(RelayIdentityKey, skb)
}
func (w *W) GetOrCreateRelayIdentitySecret() (skb []byte, err error) {
skb, err = w.GetRelayIdentitySecret()
if err != nil {
return nil, err
}
if skb != nil {
return skb, nil
}
// Generate new secret key (32 random bytes)
// In WASM, we use crypto.getRandomValues
skb = make([]byte, 32)
if err := cryptoRandom(skb); err != nil {
return nil, err
}
if err := w.SetRelayIdentitySecret(skb); err != nil {
return nil, err
}
return skb, nil
}
func (w *W) SetMarker(key string, value []byte) error {
return w.setStoreValue("markers", key, value)
}
func (w *W) GetMarker(key string) (value []byte, err error) {
return w.getStoreValue("markers", key)
}
func (w *W) HasMarker(key string) bool {
val, err := w.GetMarker(key)
return err == nil && val != nil
}
func (w *W) DeleteMarker(key string) error {
return w.deleteStoreValue("markers", key)
}
// Subscription methods are implemented in subscriptions.go
// NIP-43 methods are implemented in nip43.go
// Helper methods for object store operations
func (w *W) setStoreValue(storeName, key string, value []byte) error {
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeName)
if err != nil {
return err
}
store, err := tx.ObjectStore(storeName)
if err != nil {
return err
}
keyVal, err := safejs.ValueOf(key)
if err != nil {
return err
}
valueJS := bytesToSafeValue(value)
_, err = store.PutKey(keyVal, valueJS)
if err != nil {
return err
}
return tx.Await(w.ctx)
}
func (w *W) getStoreValue(storeName, key string) ([]byte, error) {
tx, err := w.db.Transaction(idb.TransactionReadOnly, storeName)
if err != nil {
return nil, err
}
store, err := tx.ObjectStore(storeName)
if err != nil {
return nil, err
}
keyVal, err := safejs.ValueOf(key)
if err != nil {
return nil, err
}
req, err := store.Get(keyVal)
if err != nil {
return nil, err
}
val, err := req.Await(w.ctx)
if err != nil {
return nil, nil
}
if val.IsUndefined() || val.IsNull() {
return nil, nil
}
return safeValueToBytes(val), nil
}
func (w *W) deleteStoreValue(storeName, key string) error {
tx, err := w.db.Transaction(idb.TransactionReadWrite, storeName)
if err != nil {
return err
}
store, err := tx.ObjectStore(storeName)
if err != nil {
return err
}
keyVal, err := safejs.ValueOf(key)
if err != nil {
return err
}
_, err = store.Delete(keyVal)
if err != nil {
return err
}
return tx.Await(w.ctx)
}
// Placeholder for unused variable
var _ = chk.E

1739
pkg/wasmdb/wasmdb_test.go Normal file

File diff suppressed because it is too large Load Diff