Add fixed-size type support for IdPkTs and EventRef

- Update nostr dependency to v1.0.11 with new types package
- Add IDFixed(), PubFixed(), IDHex(), PubHex() methods to IdPkTs
- Add EventRef type: 80-byte stack-allocated event reference
- Add ToEventRef()/ToIdPkTs() conversion methods
- Update tests to use IDHex() instead of hex.Enc(r.Id)

EventRef provides:
- Copy-on-assignment semantics (arrays vs slices)
- Zero heap allocations for event reference passing
- Type-safe fixed-size fields (EventID, Pubkey)

Files modified:
- go.mod, go.sum: Update nostr to v1.0.11
- pkg/interfaces/store/store_interface.go: Add methods and EventRef type
- pkg/interfaces/store/store_interface_test.go: New test file
- pkg/database/binary_tag_filter_test.go: Use IDHex()
- pkg/neo4j/fetch-event_test.go: Use IDHex(), PubHex()

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-23 14:47:50 +01:00
parent 0addc61549
commit 06063750e7
6 changed files with 366 additions and 33 deletions

View File

@@ -120,7 +120,7 @@ func TestBinaryTagFilterRegression(t *testing.T) {
// Verify we got the correct event
found := false
for _, r := range results {
if hex.Enc(r.Id) == testEventIdHex {
if r.IDHex() == testEventIdHex {
found = true
break
}
@@ -156,7 +156,7 @@ func TestBinaryTagFilterRegression(t *testing.T) {
// Verify we got the correct event
found := false
for _, r := range results {
if hex.Enc(r.Id) == testEventIdHex {
if r.IDHex() == testEventIdHex {
found = true
break
}
@@ -192,7 +192,7 @@ func TestBinaryTagFilterRegression(t *testing.T) {
// Verify we got the correct event
found := false
for _, r := range results {
if hex.Enc(r.Id) == testEventIdHex {
if r.IDHex() == testEventIdHex {
found = true
break
}
@@ -229,7 +229,7 @@ func TestBinaryTagFilterRegression(t *testing.T) {
// Verify we got the correct event
found := false
for _, r := range results {
if hex.Enc(r.Id) == testEventIdHex {
if r.IDHex() == testEventIdHex {
found = true
break
}

View File

@@ -15,6 +15,7 @@ import (
"git.mleku.dev/mleku/nostr/encoders/event"
"git.mleku.dev/mleku/nostr/encoders/filter"
"git.mleku.dev/mleku/nostr/encoders/tag"
ntypes "git.mleku.dev/mleku/nostr/types"
)
// I am a type for a persistence layer for nostr events handled by a relay.
@@ -60,6 +61,9 @@ type Accountant interface {
EventCount() (count uint64, err error)
}
// IdPkTs holds event reference data with slice fields for backward compatibility.
// For new code preferring stack-allocated, copy-on-assignment semantics,
// use the IDFixed() and PubFixed() methods or convert to EventRef.
type IdPkTs struct {
Id []byte
Pub []byte
@@ -67,6 +71,87 @@ type IdPkTs struct {
Ser uint64
}
// IDFixed returns the event ID as a fixed-size array (stack-allocated, copied on assignment).
func (i *IdPkTs) IDFixed() ntypes.EventID {
return ntypes.EventIDFromBytes(i.Id)
}
// PubFixed returns the pubkey as a fixed-size array (stack-allocated, copied on assignment).
func (i *IdPkTs) PubFixed() ntypes.Pubkey {
return ntypes.PubkeyFromBytes(i.Pub)
}
// IDHex returns the event ID as a lowercase hex string.
func (i *IdPkTs) IDHex() string {
return ntypes.EventIDFromBytes(i.Id).Hex()
}
// PubHex returns the pubkey as a lowercase hex string.
func (i *IdPkTs) PubHex() string {
return ntypes.PubkeyFromBytes(i.Pub).Hex()
}
// ToEventRef converts IdPkTs to an EventRef (fully stack-allocated).
func (i *IdPkTs) ToEventRef() EventRef {
return NewEventRef(i.Id, i.Pub, i.Ts, i.Ser)
}
// EventRef is a stack-friendly event reference using fixed-size arrays.
// Total size: 80 bytes (32+32+8+8), fits in a cache line, copies stay on stack.
// Use this type when you need safe, immutable event references.
type EventRef struct {
id ntypes.EventID // 32 bytes
pub ntypes.Pubkey // 32 bytes
ts int64 // 8 bytes
ser uint64 // 8 bytes
}
// NewEventRef creates an EventRef from byte slices.
// The slices are copied into fixed-size arrays.
func NewEventRef(id, pub []byte, ts int64, ser uint64) EventRef {
return EventRef{
id: ntypes.EventIDFromBytes(id),
pub: ntypes.PubkeyFromBytes(pub),
ts: ts,
ser: ser,
}
}
// ID returns the event ID (copy, stays on stack).
func (r EventRef) ID() ntypes.EventID { return r.id }
// Pub returns the pubkey (copy, stays on stack).
func (r EventRef) Pub() ntypes.Pubkey { return r.pub }
// Ts returns the timestamp.
func (r EventRef) Ts() int64 { return r.ts }
// Ser returns the serial number.
func (r EventRef) Ser() uint64 { return r.ser }
// IDHex returns the event ID as lowercase hex.
func (r EventRef) IDHex() string { return r.id.Hex() }
// PubHex returns the pubkey as lowercase hex.
func (r EventRef) PubHex() string { return r.pub.Hex() }
// IDSlice returns a slice view of the ID (shares memory, use carefully).
func (r *EventRef) IDSlice() []byte { return r.id.Bytes() }
// PubSlice returns a slice view of the pubkey (shares memory, use carefully).
func (r *EventRef) PubSlice() []byte { return r.pub.Bytes() }
// ToIdPkTs converts EventRef to IdPkTs for backward compatibility.
// Note: This allocates new slices.
func (r EventRef) ToIdPkTs() *IdPkTs {
return &IdPkTs{
Id: r.id.Copy(),
Pub: r.pub.Copy(),
Ts: r.ts,
Ser: r.ser,
}
}
type Querier interface {
QueryForIds(c context.Context, f *filter.F) (evs []*IdPkTs, err error)
}

View File

@@ -0,0 +1,248 @@
package store
import (
"bytes"
"testing"
ntypes "git.mleku.dev/mleku/nostr/types"
)
func TestIdPkTsFixedMethods(t *testing.T) {
// Create an IdPkTs with sample data
id := make([]byte, 32)
pub := make([]byte, 32)
for i := 0; i < 32; i++ {
id[i] = byte(i)
pub[i] = byte(i + 32)
}
ipk := &IdPkTs{
Id: id,
Pub: pub,
Ts: 1234567890,
Ser: 42,
}
// Test IDFixed returns correct data
idFixed := ipk.IDFixed()
if !bytes.Equal(idFixed[:], id) {
t.Errorf("IDFixed: got %x, want %x", idFixed[:], id)
}
// Test IDFixed returns a copy
idFixed[0] = 0xFF
if ipk.Id[0] == 0xFF {
t.Error("IDFixed should return a copy, not a reference")
}
// Test PubFixed returns correct data
pubFixed := ipk.PubFixed()
if !bytes.Equal(pubFixed[:], pub) {
t.Errorf("PubFixed: got %x, want %x", pubFixed[:], pub)
}
// Test hex methods
idHex := ipk.IDHex()
expectedIDHex := "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
if idHex != expectedIDHex {
t.Errorf("IDHex: got %s, want %s", idHex, expectedIDHex)
}
}
func TestEventRef(t *testing.T) {
id := make([]byte, 32)
pub := make([]byte, 32)
for i := 0; i < 32; i++ {
id[i] = byte(i)
pub[i] = byte(i + 100)
}
// Create EventRef
ref := NewEventRef(id, pub, 1234567890, 42)
// Test accessors - need to get addressable values for slicing
refID := ref.ID()
refPub := ref.Pub()
if !bytes.Equal(refID[:], id) {
t.Error("ID() mismatch")
}
if !bytes.Equal(refPub[:], pub) {
t.Error("Pub() mismatch")
}
if ref.Ts() != 1234567890 {
t.Error("Ts() mismatch")
}
if ref.Ser() != 42 {
t.Error("Ser() mismatch")
}
// Test copy-on-assignment
ref2 := ref
testID := ref.ID()
testID[0] = 0xFF
ref2ID := ref2.ID()
if ref2ID[0] == 0xFF {
t.Error("EventRef should copy on assignment")
}
// Test hex methods
if len(ref.IDHex()) != 64 {
t.Errorf("IDHex length: got %d, want 64", len(ref.IDHex()))
}
}
func TestEventRefToIdPkTs(t *testing.T) {
id := make([]byte, 32)
pub := make([]byte, 32)
for i := 0; i < 32; i++ {
id[i] = byte(i)
pub[i] = byte(i + 100)
}
ref := NewEventRef(id, pub, 1234567890, 42)
ipk := ref.ToIdPkTs()
// Verify conversion
if !bytes.Equal(ipk.Id, id) {
t.Error("ToIdPkTs: Id mismatch")
}
if !bytes.Equal(ipk.Pub, pub) {
t.Error("ToIdPkTs: Pub mismatch")
}
if ipk.Ts != 1234567890 {
t.Error("ToIdPkTs: Ts mismatch")
}
if ipk.Ser != 42 {
t.Error("ToIdPkTs: Ser mismatch")
}
// Verify independence (modifications don't affect original)
ipk.Id[0] = 0xFF
if ref.ID()[0] == 0xFF {
t.Error("ToIdPkTs should create independent copy")
}
}
func TestIdPkTsToEventRef(t *testing.T) {
id := make([]byte, 32)
pub := make([]byte, 32)
for i := 0; i < 32; i++ {
id[i] = byte(i)
pub[i] = byte(i + 100)
}
ipk := &IdPkTs{
Id: id,
Pub: pub,
Ts: 1234567890,
Ser: 42,
}
ref := ipk.ToEventRef()
// Verify conversion - need addressable values for slicing
refID := ref.ID()
refPub := ref.Pub()
if !bytes.Equal(refID[:], id) {
t.Error("ToEventRef: ID mismatch")
}
if !bytes.Equal(refPub[:], pub) {
t.Error("ToEventRef: Pub mismatch")
}
if ref.Ts() != 1234567890 {
t.Error("ToEventRef: Ts mismatch")
}
if ref.Ser() != 42 {
t.Error("ToEventRef: Ser mismatch")
}
}
func BenchmarkEventRefCopy(b *testing.B) {
id := make([]byte, 32)
pub := make([]byte, 32)
for i := 0; i < 32; i++ {
id[i] = byte(i)
pub[i] = byte(i + 100)
}
ref := NewEventRef(id, pub, 1234567890, 42)
b.ResetTimer()
for i := 0; i < b.N; i++ {
ref2 := ref // Copy (should stay on stack)
_ = ref2
}
}
func BenchmarkIdPkTsToEventRef(b *testing.B) {
id := make([]byte, 32)
pub := make([]byte, 32)
for i := 0; i < 32; i++ {
id[i] = byte(i)
pub[i] = byte(i + 100)
}
ipk := &IdPkTs{
Id: id,
Pub: pub,
Ts: 1234567890,
Ser: 42,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
ref := ipk.ToEventRef()
_ = ref
}
}
func BenchmarkEventRefAccess(b *testing.B) {
id := make([]byte, 32)
pub := make([]byte, 32)
for i := 0; i < 32; i++ {
id[i] = byte(i)
pub[i] = byte(i + 100)
}
ref := NewEventRef(id, pub, 1234567890, 42)
b.ResetTimer()
for i := 0; i < b.N; i++ {
idCopy := ref.ID()
pubCopy := ref.Pub()
_ = idCopy
_ = pubCopy
}
}
func BenchmarkIdPkTsFixedAccess(b *testing.B) {
id := make([]byte, 32)
pub := make([]byte, 32)
for i := 0; i < 32; i++ {
id[i] = byte(i)
pub[i] = byte(i + 100)
}
ipk := &IdPkTs{
Id: id,
Pub: pub,
Ts: 1234567890,
Ser: 42,
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
idCopy := ipk.IDFixed()
pubCopy := ipk.PubFixed()
_ = idCopy
_ = pubCopy
}
}
// Ensure EventRef implements expected interface at compile time
var _ interface {
ID() ntypes.EventID
Pub() ntypes.Pubkey
Ts() int64
Ser() uint64
} = EventRef{}

View File

@@ -339,11 +339,11 @@ func TestGetFullIdPubkeyBySerial(t *testing.T) {
t.Fatal("Expected non-nil result")
}
if hex.Enc(idPkTs.Id) != hex.Enc(ev.ID[:]) {
if idPkTs.IDHex() != hex.Enc(ev.ID[:]) {
t.Fatalf("ID mismatch")
}
if hex.Enc(idPkTs.Pub) != hex.Enc(ev.Pubkey[:]) {
if idPkTs.PubHex() != hex.Enc(ev.Pubkey[:]) {
t.Fatalf("Pubkey mismatch")
}