badger/lmdb: update go-nostr binary encoding to fix limited nubmer of tags; migration script requires manual procedure; update badger to use just 8 bytes of ids and pubkeys.
This commit is contained in:
@@ -19,17 +19,17 @@ func (b *BadgerBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error
|
||||
idx[0] = rawEventStorePrefix
|
||||
|
||||
// query event by id to get its idx
|
||||
id, _ := hex.DecodeString(evt.ID)
|
||||
prefix := make([]byte, 1+32)
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
prefix := make([]byte, 1+8)
|
||||
prefix[0] = indexIdPrefix
|
||||
copy(prefix[1:], id)
|
||||
copy(prefix[1:], idPrefix8)
|
||||
opts := badger.IteratorOptions{
|
||||
PrefetchValues: false,
|
||||
}
|
||||
it := txn.NewIterator(opts)
|
||||
it.Seek(prefix)
|
||||
if it.ValidForPrefix(prefix) {
|
||||
idx = append(idx, it.Item().Key()[1+32:]...)
|
||||
idx = append(idx, it.Item().Key()[1+8:]...)
|
||||
}
|
||||
it.Close()
|
||||
|
||||
|
||||
@@ -15,18 +15,18 @@ func getTagIndexPrefix(tagValue string) ([]byte, int) {
|
||||
|
||||
if kind, pkb, d := eventstore.GetAddrTagElements(tagValue); len(pkb) == 32 {
|
||||
// store value in the new special "a" tag index
|
||||
k = make([]byte, 1+2+32+len(d)+4+4)
|
||||
k = make([]byte, 1+2+8+len(d)+4+4)
|
||||
k[0] = indexTagAddrPrefix
|
||||
binary.BigEndian.PutUint16(k[1:], kind)
|
||||
copy(k[1+2:], pkb)
|
||||
copy(k[1+2+32:], d)
|
||||
offset = 1 + 2 + 32 + len(d)
|
||||
copy(k[1+2:], pkb[0:8])
|
||||
copy(k[1+2+8:], d)
|
||||
offset = 1 + 2 + 8 + len(d)
|
||||
} else if vb, _ := hex.DecodeString(tagValue); len(vb) == 32 {
|
||||
// store value as bytes
|
||||
k = make([]byte, 1+32+4+4)
|
||||
k = make([]byte, 1+8+4+4)
|
||||
k[0] = indexTag32Prefix
|
||||
copy(k[1:], vb)
|
||||
offset = 1 + 32
|
||||
copy(k[1:], vb[0:8])
|
||||
offset = 1 + 8
|
||||
} else {
|
||||
// store whatever as utf-8
|
||||
k = make([]byte, 1+len(tagValue)+4+4)
|
||||
@@ -44,22 +44,22 @@ func getIndexKeysForEvent(evt *nostr.Event, idx []byte) [][]byte {
|
||||
// indexes
|
||||
{
|
||||
// ~ by id
|
||||
id, _ := hex.DecodeString(evt.ID)
|
||||
k := make([]byte, 1+32+4)
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
k := make([]byte, 1+8+4)
|
||||
k[0] = indexIdPrefix
|
||||
copy(k[1:], id)
|
||||
copy(k[1+32:], idx)
|
||||
copy(k[1:], idPrefix8)
|
||||
copy(k[1+8:], idx)
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
{
|
||||
// ~ by pubkey+date
|
||||
pubkey, _ := hex.DecodeString(evt.PubKey)
|
||||
k := make([]byte, 1+32+4+4)
|
||||
pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2])
|
||||
k := make([]byte, 1+8+4+4)
|
||||
k[0] = indexPubkeyPrefix
|
||||
copy(k[1:], pubkey)
|
||||
binary.BigEndian.PutUint32(k[1+32:], uint32(evt.CreatedAt))
|
||||
copy(k[1+32+4:], idx)
|
||||
copy(k[1:], pubkeyPrefix8)
|
||||
binary.BigEndian.PutUint32(k[1+8:], uint32(evt.CreatedAt))
|
||||
copy(k[1+8+4:], idx)
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
@@ -75,13 +75,13 @@ func getIndexKeysForEvent(evt *nostr.Event, idx []byte) [][]byte {
|
||||
|
||||
{
|
||||
// ~ by pubkey+kind+date
|
||||
pubkey, _ := hex.DecodeString(evt.PubKey)
|
||||
k := make([]byte, 1+32+2+4+4)
|
||||
pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2])
|
||||
k := make([]byte, 1+8+2+4+4)
|
||||
k[0] = indexPubkeyKindPrefix
|
||||
copy(k[1:], pubkey)
|
||||
binary.BigEndian.PutUint16(k[1+32:], uint16(evt.Kind))
|
||||
binary.BigEndian.PutUint32(k[1+32+2:], uint32(evt.CreatedAt))
|
||||
copy(k[1+32+2+4:], idx)
|
||||
copy(k[1:], pubkeyPrefix8)
|
||||
binary.BigEndian.PutUint16(k[1+8:], uint16(evt.Kind))
|
||||
binary.BigEndian.PutUint32(k[1+8+2:], uint32(evt.CreatedAt))
|
||||
copy(k[1+8+2+4:], idx)
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
|
||||
@@ -2,10 +2,9 @@ package badger
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"fmt"
|
||||
|
||||
"github.com/dgraph-io/badger/v4"
|
||||
"github.com/fiatjaf/eventstore"
|
||||
)
|
||||
|
||||
func (b *BadgerBackend) runMigrations() error {
|
||||
@@ -27,80 +26,32 @@ func (b *BadgerBackend) runMigrations() error {
|
||||
// do the migrations in increasing steps (there is no rollback)
|
||||
//
|
||||
|
||||
if version < 1 {
|
||||
log.Println("migration 1: move all keys from indexTag to indexTag32 if they are 32-bytes")
|
||||
prefix := []byte{indexTagPrefix}
|
||||
it := txn.NewIterator(badger.IteratorOptions{
|
||||
PrefetchValues: true,
|
||||
PrefetchSize: 100,
|
||||
Prefix: prefix,
|
||||
})
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.Key()
|
||||
|
||||
if len(key) == 1+32+4+4 {
|
||||
// it's 32 bytes
|
||||
log.Printf("moving key %x", key)
|
||||
if err := txn.Delete(key); err != nil {
|
||||
return err
|
||||
}
|
||||
key[0] = indexTag32Prefix
|
||||
if err := txn.Set(key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// bump version
|
||||
version = 1
|
||||
if err := b.bumpVersion(txn, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if version < 2 {
|
||||
log.Println("migration 2: move all keys from indexTag to indexTagAddr if they are like 'a' tags")
|
||||
prefix := []byte{indexTagPrefix}
|
||||
it := txn.NewIterator(badger.IteratorOptions{
|
||||
PrefetchValues: true,
|
||||
PrefetchSize: 100,
|
||||
Prefix: prefix,
|
||||
})
|
||||
defer it.Close()
|
||||
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
item := it.Item()
|
||||
key := item.Key()
|
||||
|
||||
if kind, pkb, d := eventstore.GetAddrTagElements(string(key[1 : len(key)-4-4])); len(pkb) == 32 {
|
||||
// it's an 'a' tag or alike
|
||||
if err := txn.Delete(key); err != nil {
|
||||
return err
|
||||
}
|
||||
k := make([]byte, 1+2+32+len(d)+4+4)
|
||||
k[0] = indexTagAddrPrefix
|
||||
binary.BigEndian.PutUint16(k[1:], kind)
|
||||
copy(k[1+2:], pkb)
|
||||
copy(k[1+2+32:], d)
|
||||
copy(k[1+2+32+len(d):], key[len(key)-4-4:])
|
||||
if err := txn.Set(k, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("moved key %x to %x", key, k)
|
||||
}
|
||||
}
|
||||
|
||||
// bump version
|
||||
version = 2
|
||||
if err := b.bumpVersion(txn, 2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// the 3 first migrations go to trash because on version 3 we need to export and import all the data anyway
|
||||
if version < 3 {
|
||||
// if there is any data in the relay we will stop and notify the user,
|
||||
// otherwise we just set version to 3 and proceed
|
||||
prefix := []byte{indexIdPrefix}
|
||||
it := txn.NewIterator(badger.IteratorOptions{
|
||||
PrefetchValues: true,
|
||||
PrefetchSize: 100,
|
||||
Prefix: prefix,
|
||||
})
|
||||
defer it.Close()
|
||||
|
||||
hasAnyEntries := false
|
||||
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
|
||||
hasAnyEntries = true
|
||||
break
|
||||
}
|
||||
|
||||
if hasAnyEntries {
|
||||
return fmt.Errorf("your database is at version %d, but in order to migrate up to version 3 you must manually export all the events and then import again: run an old version of this software, export the data, then delete the database files, run the new version, import the data back in.", version)
|
||||
}
|
||||
|
||||
b.bumpVersion(txn, 3)
|
||||
}
|
||||
|
||||
if version < 4 {
|
||||
// ...
|
||||
}
|
||||
|
||||
|
||||
@@ -206,13 +206,13 @@ func prepareQueries(filter nostr.Filter) (
|
||||
index = indexIdPrefix
|
||||
queries = make([]query, len(filter.IDs))
|
||||
for i, idHex := range filter.IDs {
|
||||
prefix := make([]byte, 1+32)
|
||||
prefix := make([]byte, 1+8)
|
||||
prefix[0] = index
|
||||
id, _ := hex.DecodeString(idHex)
|
||||
if len(id) != 32 {
|
||||
if len(idHex) != 64 {
|
||||
return nil, nil, 0, fmt.Errorf("invalid id '%s'", idHex)
|
||||
}
|
||||
copy(prefix[1:], id)
|
||||
idPrefix8, _ := hex.DecodeString(idHex[0 : 8*2])
|
||||
copy(prefix[1:], idPrefix8)
|
||||
queries[i] = query{i: i, prefix: prefix, skipTimestamp: true}
|
||||
}
|
||||
} else if len(filter.Authors) > 0 {
|
||||
@@ -220,13 +220,13 @@ func prepareQueries(filter nostr.Filter) (
|
||||
index = indexPubkeyPrefix
|
||||
queries = make([]query, len(filter.Authors))
|
||||
for i, pubkeyHex := range filter.Authors {
|
||||
pubkey, _ := hex.DecodeString(pubkeyHex)
|
||||
if len(pubkey) != 32 {
|
||||
if len(pubkeyHex) != 64 {
|
||||
return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex)
|
||||
}
|
||||
prefix := make([]byte, 1+32)
|
||||
pubkeyPrefix8, _ := hex.DecodeString(pubkeyHex[0 : 8*2])
|
||||
prefix := make([]byte, 1+8)
|
||||
prefix[0] = index
|
||||
copy(prefix[1:], pubkey)
|
||||
copy(prefix[1:], pubkeyPrefix8)
|
||||
queries[i] = query{i: i, prefix: prefix}
|
||||
}
|
||||
} else {
|
||||
@@ -235,14 +235,14 @@ func prepareQueries(filter nostr.Filter) (
|
||||
i := 0
|
||||
for _, pubkeyHex := range filter.Authors {
|
||||
for _, kind := range filter.Kinds {
|
||||
pubkey, _ := hex.DecodeString(pubkeyHex)
|
||||
if len(pubkey) != 32 {
|
||||
if len(pubkeyHex) != 64 {
|
||||
return nil, nil, 0, fmt.Errorf("invalid pubkey '%s'", pubkeyHex)
|
||||
}
|
||||
prefix := make([]byte, 1+32+2)
|
||||
pubkeyPrefix8, _ := hex.DecodeString(pubkeyHex[0 : 8*2])
|
||||
prefix := make([]byte, 1+8+2)
|
||||
prefix[0] = index
|
||||
copy(prefix[1:], pubkey)
|
||||
binary.BigEndian.PutUint16(prefix[1+32:], uint16(kind))
|
||||
copy(prefix[1:], pubkeyPrefix8)
|
||||
binary.BigEndian.PutUint16(prefix[1+8:], uint16(kind))
|
||||
queries[i] = query{i: i, prefix: prefix}
|
||||
i++
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ func (b *BadgerBackend) SaveEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
return b.Update(func(txn *badger.Txn) error {
|
||||
// query event by id to ensure we don't save duplicates
|
||||
id, _ := hex.DecodeString(evt.ID)
|
||||
prefix := make([]byte, 1+32)
|
||||
prefix := make([]byte, 1+8)
|
||||
prefix[0] = indexIdPrefix
|
||||
copy(prefix[1:], id)
|
||||
it := txn.NewIterator(badger.IteratorOptions{})
|
||||
|
||||
2
go.mod
2
go.mod
@@ -12,7 +12,7 @@ require (
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mailru/easyjson v0.7.7
|
||||
github.com/mattn/go-sqlite3 v1.14.18
|
||||
github.com/nbd-wtf/go-nostr v0.27.0
|
||||
github.com/nbd-wtf/go-nostr v0.27.1
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
|
||||
|
||||
4
go.sum
4
go.sum
@@ -116,8 +116,8 @@ github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
|
||||
github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
|
||||
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
|
||||
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/nbd-wtf/go-nostr v0.27.0 h1:h6JmMMmfNcAORTL2kk/K3+U6Mju6rk/IjcHA/PMeOc8=
|
||||
github.com/nbd-wtf/go-nostr v0.27.0/go.mod h1:bkffJI+x914sPQWum9ZRUn66D7NpDnAoWo1yICvj3/0=
|
||||
github.com/nbd-wtf/go-nostr v0.27.1 h1:DAwXpAUGxq3/B8KZIWlZmJIoDNkMvlKqQwB/OM/49xk=
|
||||
github.com/nbd-wtf/go-nostr v0.27.1/go.mod h1:bkffJI+x914sPQWum9ZRUn66D7NpDnAoWo1yICvj3/0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
|
||||
func (b *LMDBBackend) DeleteEvent(ctx context.Context, evt *nostr.Event) error {
|
||||
err := b.lmdbEnv.Update(func(txn *lmdb.Txn) error {
|
||||
id, _ := hex.DecodeString(evt.ID)
|
||||
idx, err := txn.Get(b.indexId, id)
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
idx, err := txn.Get(b.indexId, idPrefix8)
|
||||
if operr, ok := err.(*lmdb.OpError); ok && operr.Errno == lmdb.NotFound {
|
||||
// we already do not have this
|
||||
return nil
|
||||
|
||||
@@ -46,15 +46,16 @@ func (b *LMDBBackend) getIndexKeysForEvent(evt *nostr.Event) []key {
|
||||
// indexes
|
||||
{
|
||||
// ~ by id
|
||||
k, _ := hex.DecodeString(evt.ID)
|
||||
idPrefix8, _ := hex.DecodeString(evt.ID[0 : 8*2])
|
||||
k := idPrefix8
|
||||
keys = append(keys, key{dbi: b.indexId, key: k})
|
||||
}
|
||||
|
||||
{
|
||||
// ~ by pubkey+date
|
||||
pubkey, _ := hex.DecodeString(evt.PubKey)
|
||||
pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2])
|
||||
k := make([]byte, 8+4)
|
||||
copy(k[:], pubkey[0:8])
|
||||
copy(k[:], pubkeyPrefix8)
|
||||
binary.BigEndian.PutUint32(k[8:], uint32(evt.CreatedAt))
|
||||
keys = append(keys, key{dbi: b.indexPubkey, key: k})
|
||||
}
|
||||
@@ -69,9 +70,9 @@ func (b *LMDBBackend) getIndexKeysForEvent(evt *nostr.Event) []key {
|
||||
|
||||
{
|
||||
// ~ by pubkey+kind+date
|
||||
pubkey, _ := hex.DecodeString(evt.PubKey)
|
||||
pubkeyPrefix8, _ := hex.DecodeString(evt.PubKey[0 : 8*2])
|
||||
k := make([]byte, 8+2+4)
|
||||
copy(k[:], pubkey[0:8])
|
||||
copy(k[:], pubkeyPrefix8)
|
||||
binary.BigEndian.PutUint16(k[8:], uint16(evt.Kind))
|
||||
binary.BigEndian.PutUint32(k[8+2:], uint32(evt.CreatedAt))
|
||||
keys = append(keys, key{dbi: b.indexPubkeyKind, key: k})
|
||||
|
||||
@@ -3,10 +3,8 @@ package lmdb
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/PowerDNS/lmdb-go/lmdb"
|
||||
"github.com/fiatjaf/eventstore"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -30,124 +28,32 @@ func (b *LMDBBackend) runMigrations() error {
|
||||
// do the migrations in increasing steps (there is no rollback)
|
||||
//
|
||||
|
||||
if version < 1 {
|
||||
log.Println("migration 1: move all keys from indexTag to indexTag32 if they are 32-bytes")
|
||||
cursor, err := txn.OpenCursor(b.indexTag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open cursor in migration 1: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
key, val, err := cursor.Get(nil, nil, lmdb.First)
|
||||
for err == nil {
|
||||
if len(key)-4 /* uint32 created_at */ == 32 {
|
||||
log.Printf("moving key %x->%x", key, val)
|
||||
if err := txn.Put(b.indexTag32, key, val, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := txn.Del(b.indexTag, key, val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// next -- will end on err
|
||||
key, val, err = cursor.Get(nil, nil, lmdb.Next)
|
||||
}
|
||||
if lmdbErr, ok := err.(*lmdb.OpError); ok && lmdbErr.Errno != lmdb.NotFound {
|
||||
// exited the loop with an error different from NOTFOUND
|
||||
return err
|
||||
}
|
||||
|
||||
// bump version
|
||||
if err := b.bumpVersion(txn, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if version < 2 {
|
||||
log.Println("migration 2: use just 8 bytes for pubkeys and ids instead of 32 bytes")
|
||||
// rewrite all keys from indexTag32, indexId, indexPubkey and indexPubkeyKind
|
||||
for _, dbi := range []lmdb.DBI{b.indexTag32, b.indexId, b.indexPubkey, b.indexPubkeyKind} {
|
||||
cursor, err := txn.OpenCursor(dbi)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open cursor in migration 2: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
key, val, err := cursor.Get(nil, nil, lmdb.First)
|
||||
for err == nil {
|
||||
if err := txn.Del(dbi, key, val); err != nil {
|
||||
return err
|
||||
}
|
||||
oldkey := fmt.Sprintf("%x", key)
|
||||
|
||||
// these keys are always 32 bytes of an id or pubkey, then something afterwards, doesn't matter
|
||||
// so we just keep 8 bytes and overwrite the rest
|
||||
if len(key) > 32 {
|
||||
copy(key[8:], key[32:])
|
||||
key = key[0 : len(key)-24]
|
||||
if err := txn.Put(dbi, key, val, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("moved key %s:%x to %x:%x", oldkey, val, key, val)
|
||||
}
|
||||
|
||||
// next -- will end on err
|
||||
key, val, err = cursor.Get(nil, nil, lmdb.Next)
|
||||
}
|
||||
if lmdbErr, ok := err.(*lmdb.OpError); ok && lmdbErr.Errno != lmdb.NotFound {
|
||||
// exited the loop with an error different from NOTFOUND
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// bump version
|
||||
if err := b.bumpVersion(txn, 2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if version < 3 {
|
||||
log.Println("migration 3: move all keys from indexTag to indexTagAddr if they are like 'a' tags")
|
||||
cursor, err := txn.OpenCursor(b.indexTag)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open cursor in migration 2: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
key, val, err := cursor.Get(nil, nil, lmdb.First)
|
||||
for err == nil {
|
||||
if kind, pkb, d := eventstore.GetAddrTagElements(string(key[1 : len(key)-4])); len(pkb) == 32 {
|
||||
// it's an 'a' tag or alike
|
||||
if err := txn.Del(b.indexTag, key, val); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k := make([]byte, 2+8+len(d)+4)
|
||||
binary.BigEndian.PutUint16(k[1:], kind)
|
||||
copy(k[2:], pkb[0:8]) // use only the first 8 bytes of the public key in the index
|
||||
copy(k[2+8:], d)
|
||||
copy(k[2+8+len(d):], key[len(key)-4:])
|
||||
if err := txn.Put(b.indexTagAddr, k, val, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("moved key %x:%x to %x:%x", key, val, k, val)
|
||||
}
|
||||
|
||||
// next -- will end on err
|
||||
key, val, err = cursor.Get(nil, nil, lmdb.Next)
|
||||
}
|
||||
if lmdbErr, ok := err.(*lmdb.OpError); ok && lmdbErr.Errno != lmdb.NotFound {
|
||||
// exited the loop with an error different from NOTFOUND
|
||||
return err
|
||||
}
|
||||
|
||||
// bump version
|
||||
if err := b.bumpVersion(txn, 3); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// the 4 first migrations go to trash because on version 3 we need to export and import all the data anyway
|
||||
if version < 4 {
|
||||
// if there is any data in the relay we will stop and notify the user,
|
||||
// otherwise we just set version to 3 and proceed
|
||||
|
||||
cursor, err := txn.OpenCursor(b.indexId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open cursor in migration 4: %w", err)
|
||||
}
|
||||
defer cursor.Close()
|
||||
|
||||
hasAnyEntries := false
|
||||
_, _, err = cursor.Get(nil, nil, lmdb.First)
|
||||
for err == nil {
|
||||
hasAnyEntries = true
|
||||
break
|
||||
}
|
||||
|
||||
if hasAnyEntries {
|
||||
return fmt.Errorf("your database is at version %d, but in order to migrate up to version 4 you must manually export all the events and then import again: run an old version of this software, export the data, then delete the database files, run the new version, import the data back in.", version)
|
||||
}
|
||||
|
||||
b.bumpVersion(txn, 3)
|
||||
}
|
||||
|
||||
if version < 5 {
|
||||
// ...
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user