complete the marshal/unmarshal of events using the new pool enabled tag codecs
This commit is contained in:
@@ -1,7 +1,26 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/templexxx/xhex"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
"next.orly.dev/pkg/encoders/ints"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/encoders/text"
|
||||
"next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/bufpool"
|
||||
)
|
||||
|
||||
// E is the primary datatype of nostr. This is the form of the structure that
|
||||
// defines its JSON string-based format.
|
||||
// defines its JSON string-based format. Always use New() and Free() to create
|
||||
// and free event.E.
|
||||
type E struct {
|
||||
|
||||
// ID is the SHA256 hash of the canonical encoding of the event in binary format
|
||||
@@ -19,7 +38,7 @@ type E struct {
|
||||
|
||||
// Tags are a list of tags, which are a list of strings usually structured
|
||||
// as a 3-layer scheme indicating specific features of an event.
|
||||
Tags [][]byte
|
||||
Tags *tag.S
|
||||
|
||||
// Content is an arbitrary string that can contain anything, but usually
|
||||
// conforming to a specification relating to the Kind and the Tags.
|
||||
@@ -28,6 +47,305 @@ type E struct {
|
||||
// Sig is the signature on the ID hash that validates as coming from the
|
||||
// Pubkey in binary format.
|
||||
Sig []byte
|
||||
|
||||
// b is the decode buffer for the event.E. this is where the UnmarshalJSON will
|
||||
// source the memory to store all of the fields except for the tags.
|
||||
b bufpool.B
|
||||
}
|
||||
|
||||
var (
|
||||
jId = []byte("id")
|
||||
jPubkey = []byte("pubkey")
|
||||
jCreatedAt = []byte("created_at")
|
||||
jKind = []byte("kind")
|
||||
jTags = []byte("tags")
|
||||
jContent = []byte("content")
|
||||
jSig = []byte("sig")
|
||||
)
|
||||
|
||||
// New returns a new event.E. The returned event.E should be freed with Free()
|
||||
// to return the unmarshalling buffer to the bufpool.
|
||||
func New() *E {
|
||||
return &E{
|
||||
b: bufpool.Get(),
|
||||
}
|
||||
}
|
||||
|
||||
// Free returns the event.E to the pool, as well as nilling all of the fields.
|
||||
// This should hint to the GC that the event.E can be freed, and the memory
|
||||
// reused. The decode buffer will be returned to the pool for reuse.
|
||||
func (ev *E) Free() {
|
||||
bufpool.Put(ev.b)
|
||||
ev.ID = nil
|
||||
ev.Pubkey = nil
|
||||
ev.Tags = nil
|
||||
ev.Content = nil
|
||||
ev.Sig = nil
|
||||
ev.b = nil
|
||||
}
|
||||
|
||||
// MarshalJSON marshals an event.E into a JSON byte string.
|
||||
//
|
||||
// Call bufpool.PutBytes(b) to return the buffer to the bufpool after use.
|
||||
func (ev *E) MarshalJSON() (b []byte, err error) {
|
||||
b = bufpool.Get()
|
||||
b = b[:0]
|
||||
b = append(b, '{')
|
||||
b = append(b, '"')
|
||||
b = append(b, jId...)
|
||||
b = append(b, `":"`...)
|
||||
b = b[:len(b)+2*sha256.Size]
|
||||
xhex.Encode(b[len(b)-2*sha256.Size:], ev.ID)
|
||||
b = append(b, `","`...)
|
||||
b = append(b, jPubkey...)
|
||||
b = append(b, `":"`...)
|
||||
b = b[:len(b)+2*schnorr.PubKeyBytesLen]
|
||||
xhex.Encode(b[len(b)-2*schnorr.PubKeyBytesLen:], ev.Pubkey)
|
||||
b = append(b, `","`...)
|
||||
b = append(b, jCreatedAt...)
|
||||
b = append(b, `":`...)
|
||||
b = ints.New(ev.CreatedAt).Marshal(b)
|
||||
b = append(b, `,"`...)
|
||||
b = append(b, jKind...)
|
||||
b = append(b, `":`...)
|
||||
b = ints.New(ev.Kind).Marshal(b)
|
||||
b = append(b, `,"`...)
|
||||
b = append(b, jTags...)
|
||||
b = append(b, `":[`...)
|
||||
lts := len(*ev.Tags) - 1
|
||||
for i, tt := range *ev.Tags {
|
||||
b = append(b, '[')
|
||||
lt := len(tt.T) - 1
|
||||
for j, t := range tt.T {
|
||||
b = append(b, '"')
|
||||
b = append(b, t...)
|
||||
b = append(b, '"')
|
||||
if j < lt {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, ']')
|
||||
if i < lts {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, `],"`...)
|
||||
b = append(b, jContent...)
|
||||
b = append(b, `":"`...)
|
||||
// it can happen the slice has insufficient capacity to hold the content AND
|
||||
// the signature at this point, because the signature encoder must have
|
||||
// sufficient capacity pre-allocated as it does not append to the buffer.
|
||||
// unlike every other encoding function up to this point.
|
||||
if cap(b) < len(b)+len(ev.Content)+7+256+2 {
|
||||
b2 := make([]byte, len(b)+len(ev.Content)*2+7+256+2)
|
||||
copy(b2, b)
|
||||
b2 = b2[:len(b)]
|
||||
// return the old buffer to the pool for reuse.
|
||||
bufpool.PutBytes(b)
|
||||
b = b2
|
||||
}
|
||||
b = text.NostrEscape(b, ev.Content)
|
||||
b = append(b, `","`...)
|
||||
b = append(b, jSig...)
|
||||
b = append(b, `":"`...)
|
||||
b = b[:len(b)+2*schnorr.SignatureSize]
|
||||
xhex.Encode(b[len(b)-2*schnorr.SignatureSize:], ev.Sig)
|
||||
b = append(b, `"}`...)
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshalls a JSON string into an event.E.
|
||||
//
|
||||
// Call ev.Free() to return the provided buffer to the bufpool afterwards.
|
||||
func (ev *E) UnmarshalJSON(b []byte) (err error) {
|
||||
key := make([]byte, 0, 9)
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
// Skip whitespace
|
||||
if isWhitespace(b[0]) {
|
||||
continue
|
||||
}
|
||||
if b[0] == '{' {
|
||||
b = b[1:]
|
||||
goto BetweenKeys
|
||||
}
|
||||
}
|
||||
goto eof
|
||||
BetweenKeys:
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
// Skip whitespace
|
||||
if isWhitespace(b[0]) {
|
||||
continue
|
||||
}
|
||||
if b[0] == '"' {
|
||||
b = b[1:]
|
||||
goto InKey
|
||||
}
|
||||
}
|
||||
goto eof
|
||||
InKey:
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
if b[0] == '"' {
|
||||
b = b[1:]
|
||||
goto InKV
|
||||
}
|
||||
key = append(key, b[0])
|
||||
}
|
||||
goto eof
|
||||
InKV:
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
// Skip whitespace
|
||||
if isWhitespace(b[0]) {
|
||||
continue
|
||||
}
|
||||
if b[0] == ':' {
|
||||
b = b[1:]
|
||||
goto InVal
|
||||
}
|
||||
}
|
||||
goto eof
|
||||
InVal:
|
||||
// Skip whitespace before value
|
||||
for len(b) > 0 && isWhitespace(b[0]) {
|
||||
b = b[1:]
|
||||
}
|
||||
switch key[0] {
|
||||
case jId[0]:
|
||||
if !utils.FastEqual(jId, key) {
|
||||
goto invalid
|
||||
}
|
||||
var id []byte
|
||||
if id, b, err = text.UnmarshalHex(b); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if len(id) != sha256.Size {
|
||||
err = errorf.E(
|
||||
"invalid ID, require %d got %d", sha256.Size,
|
||||
len(id),
|
||||
)
|
||||
return
|
||||
}
|
||||
ev.ID = id
|
||||
goto BetweenKV
|
||||
case jPubkey[0]:
|
||||
if !utils.FastEqual(jPubkey, key) {
|
||||
goto invalid
|
||||
}
|
||||
var pk []byte
|
||||
if pk, b, err = text.UnmarshalHex(b); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if len(pk) != schnorr.PubKeyBytesLen {
|
||||
err = errorf.E(
|
||||
"invalid pubkey, require %d got %d",
|
||||
schnorr.PubKeyBytesLen, len(pk),
|
||||
)
|
||||
return
|
||||
}
|
||||
ev.Pubkey = pk
|
||||
goto BetweenKV
|
||||
case jKind[0]:
|
||||
if !utils.FastEqual(jKind, key) {
|
||||
goto invalid
|
||||
}
|
||||
k := kind.New(0)
|
||||
if b, err = k.Unmarshal(b); chk.E(err) {
|
||||
return
|
||||
}
|
||||
ev.Kind = k.ToU16()
|
||||
goto BetweenKV
|
||||
case jTags[0]:
|
||||
if !utils.FastEqual(jTags, key) {
|
||||
goto invalid
|
||||
}
|
||||
ev.Tags = new(tag.S)
|
||||
if b, err = ev.Tags.Unmarshal(b); chk.E(err) {
|
||||
return
|
||||
}
|
||||
goto BetweenKV
|
||||
case jSig[0]:
|
||||
if !utils.FastEqual(jSig, key) {
|
||||
goto invalid
|
||||
}
|
||||
var sig []byte
|
||||
if sig, b, err = text.UnmarshalHex(b); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if len(sig) != schnorr.SignatureSize {
|
||||
err = errorf.E(
|
||||
"invalid sig length, require %d got %d '%s'\n%s",
|
||||
schnorr.SignatureSize, len(sig), b, b,
|
||||
)
|
||||
return
|
||||
}
|
||||
ev.Sig = sig
|
||||
goto BetweenKV
|
||||
case jContent[0]:
|
||||
if key[1] == jContent[1] {
|
||||
if !utils.FastEqual(jContent, key) {
|
||||
goto invalid
|
||||
}
|
||||
if ev.Content, b, err = text.UnmarshalQuoted(b); chk.T(err) {
|
||||
return
|
||||
}
|
||||
goto BetweenKV
|
||||
} else if key[1] == jCreatedAt[1] {
|
||||
if !utils.FastEqual(jCreatedAt, key) {
|
||||
goto invalid
|
||||
}
|
||||
if b, err = ints.New(0).Unmarshal(b); chk.T(err) {
|
||||
return
|
||||
}
|
||||
goto BetweenKV
|
||||
} else {
|
||||
goto invalid
|
||||
}
|
||||
default:
|
||||
goto invalid
|
||||
}
|
||||
BetweenKV:
|
||||
key = key[:0]
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
// Skip whitespace
|
||||
if isWhitespace(b[0]) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(b) == 0:
|
||||
return
|
||||
case b[0] == '}':
|
||||
b = b[1:]
|
||||
goto AfterClose
|
||||
case b[0] == ',':
|
||||
b = b[1:]
|
||||
goto BetweenKeys
|
||||
case b[0] == '"':
|
||||
b = b[1:]
|
||||
goto InKey
|
||||
}
|
||||
}
|
||||
log.I.F("between kv")
|
||||
goto eof
|
||||
AfterClose:
|
||||
// Skip any trailing whitespace
|
||||
for len(b) > 0 && isWhitespace(b[0]) {
|
||||
b = b[1:]
|
||||
}
|
||||
return
|
||||
invalid:
|
||||
err = fmt.Errorf(
|
||||
"invalid key,\n'%s'\n'%s'\n'%s'", string(b), string(b[:len(b)]),
|
||||
string(b),
|
||||
)
|
||||
return
|
||||
eof:
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
// isWhitespace returns true if the byte is a whitespace character (space, tab, newline, carriage return).
|
||||
func isWhitespace(b byte) bool {
|
||||
return b == ' ' || b == '\t' || b == '\n' || b == '\r'
|
||||
}
|
||||
|
||||
// S is an array of event.E that sorts in reverse chronological order.
|
||||
|
||||
63
pkg/encoders/event/event_test.go
Normal file
63
pkg/encoders/event/event_test.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package event
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/profile"
|
||||
"lol.mleku.dev/chk"
|
||||
"lukechampine.com/frand"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/tag"
|
||||
"next.orly.dev/pkg/utils"
|
||||
"next.orly.dev/pkg/utils/bufpool"
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
// lol.SetLogLevel("trace")
|
||||
prof := profile.Start(profile.MemProfile)
|
||||
defer prof.Stop()
|
||||
for range 1000000 {
|
||||
ev := New()
|
||||
ev.ID = frand.Bytes(32)
|
||||
ev.Pubkey = frand.Bytes(32)
|
||||
ev.CreatedAt = time.Now().Unix()
|
||||
ev.Kind = 1
|
||||
ev.Tags = &tag.S{
|
||||
{T: [][]byte{[]byte("t"), []byte("hashtag")}},
|
||||
{
|
||||
T: [][]byte{
|
||||
[]byte("e"),
|
||||
hex.EncAppend(nil, frand.Bytes(32)),
|
||||
},
|
||||
},
|
||||
}
|
||||
ev.Content = frand.Bytes(frand.Intn(1024) + 1)
|
||||
ev.Sig = frand.Bytes(64)
|
||||
// log.I.S(ev)
|
||||
b, err := ev.MarshalJSON()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var bc []byte
|
||||
bc = append(bc, b...)
|
||||
// log.I.F("%s", bc)
|
||||
ev2 := New()
|
||||
if err = ev2.UnmarshalJSON(b); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var b2 []byte
|
||||
if b2, err = ev.MarshalJSON(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !utils.FastEqual(bc, b2) {
|
||||
t.Errorf("failed to re-marshal back original")
|
||||
}
|
||||
// free up the resources for the next iteration
|
||||
ev.Free()
|
||||
ev2.Free()
|
||||
bufpool.PutBytes(b)
|
||||
bufpool.PutBytes(b2)
|
||||
bufpool.PutBytes(bc)
|
||||
}
|
||||
}
|
||||
@@ -19,8 +19,8 @@ type K struct {
|
||||
K uint16
|
||||
}
|
||||
|
||||
// New creates a new kind.K with a provided integer value. Note that anything larger than 2^16
|
||||
// will be truncated.
|
||||
// New creates a new kind.K with a provided integer value. Note that anything
|
||||
// larger than 2^16 will be truncated.
|
||||
func New[V constraints.Integer](k V) (ki *K) { return &K{uint16(k)} }
|
||||
|
||||
// ToInt returns the value of the kind.K as an int.
|
||||
@@ -55,7 +55,8 @@ func (k *K) ToU64() uint64 {
|
||||
return uint64(k.K)
|
||||
}
|
||||
|
||||
// Name returns the human readable string describing the semantics of the kind.K.
|
||||
// Name returns the human readable string describing the semantics of the
|
||||
// kind.K.
|
||||
func (k *K) Name() string { return GetString(k) }
|
||||
|
||||
// Equal checks if
|
||||
@@ -76,8 +77,8 @@ var Privileged = []*K{
|
||||
PrivateDirectMessage,
|
||||
}
|
||||
|
||||
// IsPrivileged returns true if the type is the kind of message nobody else than the pubkeys in
|
||||
// the event and p tags of the event are party to.
|
||||
// IsPrivileged returns true if the type is the kind of message nobody else than
|
||||
// the pubkeys in the event and p tags of the event are party to.
|
||||
func (k *K) IsPrivileged() (is bool) {
|
||||
for i := range Privileged {
|
||||
if k.Equal(Privileged[i]) {
|
||||
@@ -87,8 +88,11 @@ func (k *K) IsPrivileged() (is bool) {
|
||||
return
|
||||
}
|
||||
|
||||
// Marshal renders the kind.K into bytes containing the ASCII string form of the kind number.
|
||||
func (k *K) Marshal(dst []byte) (b []byte) { return ints.New(k.ToU64()).Marshal(dst) }
|
||||
// Marshal renders the kind.K into bytes containing the ASCII string form of the
|
||||
// kind number.
|
||||
func (k *K) Marshal(dst []byte) (b []byte) {
|
||||
return ints.New(k.ToU64()).Marshal(dst)
|
||||
}
|
||||
|
||||
// Unmarshal decodes a byte string into a kind.K.
|
||||
func (k *K) Unmarshal(b []byte) (r []byte, err error) {
|
||||
|
||||
50
pkg/encoders/tag/atag/atag.go
Normal file
50
pkg/encoders/tag/atag/atag.go
Normal file
@@ -0,0 +1,50 @@
|
||||
// Package atag implements a special, optimized handling for keeping a tags
|
||||
// (address) in a more memory efficient form while working with these tags.
|
||||
package atag
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/ints"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
)
|
||||
|
||||
// T is a data structure for what is found in an `a` tag: kind:pubkey:arbitrary data
|
||||
type T struct {
|
||||
Kind *kind.K
|
||||
PubKey []byte
|
||||
DTag []byte
|
||||
}
|
||||
|
||||
// Marshal an atag.T into raw bytes.
|
||||
func (t *T) Marshal(dst []byte) (b []byte) {
|
||||
b = t.Kind.Marshal(dst)
|
||||
b = append(b, ':')
|
||||
b = hex.EncAppend(b, t.PubKey)
|
||||
b = append(b, ':')
|
||||
b = append(b, t.DTag...)
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal an atag.T from its ascii encoding.
|
||||
func (t *T) Unmarshal(b []byte) (r []byte, err error) {
|
||||
split := bytes.Split(b, []byte{':'})
|
||||
if len(split) != 3 {
|
||||
return
|
||||
}
|
||||
// kind
|
||||
kin := ints.New(uint16(0))
|
||||
if _, err = kin.Unmarshal(split[0]); chk.E(err) {
|
||||
return
|
||||
}
|
||||
t.Kind = kind.New(kin.Uint16())
|
||||
// pubkey
|
||||
if t.PubKey, err = hex.DecAppend(t.PubKey, split[1]); chk.E(err) {
|
||||
return
|
||||
}
|
||||
// d-tag
|
||||
t.DTag = split[2]
|
||||
return
|
||||
}
|
||||
45
pkg/encoders/tag/atag/atag_test.go
Normal file
45
pkg/encoders/tag/atag/atag_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package atag
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/log"
|
||||
"lukechampine.com/frand"
|
||||
"next.orly.dev/pkg/crypto/ec/schnorr"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/encoders/kind"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func TestT_Marshal_Unmarshal(t *testing.T) {
|
||||
k := kind.New(frand.Intn(math.MaxUint16))
|
||||
pk := make([]byte, schnorr.PubKeyBytesLen)
|
||||
frand.Read(pk)
|
||||
d := make([]byte, frand.Intn(10)+3)
|
||||
frand.Read(d)
|
||||
var dtag string
|
||||
dtag = hex.Enc(d)
|
||||
t1 := &T{
|
||||
Kind: k,
|
||||
PubKey: pk,
|
||||
DTag: []byte(dtag),
|
||||
}
|
||||
b1 := t1.Marshal(nil)
|
||||
log.I.F("%s", b1)
|
||||
t2 := &T{}
|
||||
var r []byte
|
||||
var err error
|
||||
if r, err = t2.Unmarshal(b1); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(r) > 0 {
|
||||
log.I.S(r)
|
||||
t.Fatalf("remainder")
|
||||
}
|
||||
b2 := t2.Marshal(nil)
|
||||
if !utils.FastEqual(b1, b2) {
|
||||
t.Fatalf("failed to re-marshal back original")
|
||||
}
|
||||
}
|
||||
75
pkg/encoders/tag/tag.go
Normal file
75
pkg/encoders/tag/tag.go
Normal file
@@ -0,0 +1,75 @@
|
||||
// Package tag provides an implementation of a nostr tag list, an array of
|
||||
// strings with a usually single letter first "key" field, including methods to
|
||||
// compare, marshal/unmarshal and access elements with their proper semantics.
|
||||
package tag
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/errorf"
|
||||
"next.orly.dev/pkg/encoders/text"
|
||||
"next.orly.dev/pkg/utils/bufpool"
|
||||
)
|
||||
|
||||
// The tag position meanings, so they are clear when reading.
|
||||
const (
|
||||
Key = iota
|
||||
Value
|
||||
Relay
|
||||
)
|
||||
|
||||
type T struct {
|
||||
T [][]byte
|
||||
b bufpool.B
|
||||
}
|
||||
|
||||
func New(t ...[]byte) *T {
|
||||
return &T{T: t, b: bufpool.Get()}
|
||||
}
|
||||
|
||||
func (t *T) Free() {
|
||||
bufpool.Put(t.b)
|
||||
t.T = nil
|
||||
}
|
||||
|
||||
// Marshal encodes a tag.T as standard minified JSON array of strings.
|
||||
//
|
||||
// Call bufpool.PutBytes(b) to return the buffer to the bufpool after use.
|
||||
func (t *T) Marshal() (b []byte) {
|
||||
dst := t.b
|
||||
dst = append(dst, '[')
|
||||
for i, s := range t.T {
|
||||
dst = text.AppendQuote(dst, s, text.NostrEscape)
|
||||
if i < len(t.T)-1 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// Unmarshal decodes a standard minified JSON array of strings to a tags.T.
|
||||
//
|
||||
// Call bufpool.PutBytes(b) to return the buffer to the bufpool after use.
|
||||
func (t *T) Unmarshal(b []byte) (r []byte, err error) {
|
||||
var inQuotes, openedBracket bool
|
||||
var quoteStart int
|
||||
for i := 0; i < len(b); i++ {
|
||||
if !openedBracket && b[i] == '[' {
|
||||
openedBracket = true
|
||||
} else if !inQuotes {
|
||||
if b[i] == '"' {
|
||||
inQuotes, quoteStart = true, i+1
|
||||
} else if b[i] == ']' {
|
||||
return b[i+1:], err
|
||||
}
|
||||
} else if b[i] == '\\' && i < len(b)-1 {
|
||||
i++
|
||||
} else if b[i] == '"' {
|
||||
inQuotes = false
|
||||
t.T = append(t.T, text.NostrUnescape(b[quoteStart:i]))
|
||||
}
|
||||
}
|
||||
if !openedBracket || inQuotes {
|
||||
return nil, errorf.E("tag: failed to parse tag")
|
||||
}
|
||||
return
|
||||
}
|
||||
32
pkg/encoders/tag/tag_test.go
Normal file
32
pkg/encoders/tag/tag_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package tag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lukechampine.com/frand"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshal(t *testing.T) {
|
||||
for _ = range 1000 {
|
||||
n := frand.Intn(8)
|
||||
tg := New()
|
||||
for _ = range n {
|
||||
b1 := make([]byte, frand.Intn(8))
|
||||
_, _ = frand.Read(b1)
|
||||
tg.T = append(tg.T, b1)
|
||||
}
|
||||
tb := tg.Marshal()
|
||||
var tbc []byte
|
||||
tbc = append(tbc, tb...)
|
||||
tg2 := New()
|
||||
if _, err := tg2.Unmarshal(tb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tb2 := tg2.Marshal()
|
||||
if !utils.FastEqual(tbc, tb2) {
|
||||
t.Fatalf("failed to re-marshal back original")
|
||||
}
|
||||
}
|
||||
}
|
||||
78
pkg/encoders/tag/tags.go
Normal file
78
pkg/encoders/tag/tags.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package tag
|
||||
|
||||
import (
|
||||
"lol.mleku.dev/chk"
|
||||
"next.orly.dev/pkg/utils/bufpool"
|
||||
)
|
||||
|
||||
// S is a list of tag.T - which are lists of string elements with ordering and
|
||||
// no uniqueness constraint (not a set).
|
||||
type S []*T
|
||||
|
||||
// MarshalJSON encodes a tags.T appended to a provided byte slice in JSON form.
|
||||
//
|
||||
// Call bufpool.PutBytes(b) to return the buffer to the bufpool after use.
|
||||
func (s *S) MarshalJSON() (b []byte, err error) {
|
||||
b = bufpool.Get()
|
||||
b = append(b, '[')
|
||||
for i, ss := range *s {
|
||||
b = append(b, ss.Marshal()...)
|
||||
if i < len(*s)-1 {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, ']')
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalJSON a tags.T from a provided byte slice and return what remains
|
||||
// after the end of the array.
|
||||
//
|
||||
// Call bufpool.PutBytes(b) to return the buffer to the bufpool after use.
|
||||
func (s *S) UnmarshalJSON(b []byte) (err error) {
|
||||
_, err = s.Unmarshal(b)
|
||||
return
|
||||
}
|
||||
|
||||
// Unmarshal a tags.T from a provided byte slice and return what remains after
|
||||
// the end of the array.
|
||||
func (s *S) Unmarshal(b []byte) (r []byte, err error) {
|
||||
r = b[:]
|
||||
for len(r) > 0 {
|
||||
switch r[0] {
|
||||
case '[':
|
||||
r = r[1:]
|
||||
goto inTags
|
||||
case ',':
|
||||
r = r[1:]
|
||||
// next
|
||||
case ']':
|
||||
r = r[1:]
|
||||
// the end
|
||||
return
|
||||
default:
|
||||
r = r[1:]
|
||||
}
|
||||
inTags:
|
||||
for len(r) > 0 {
|
||||
switch r[0] {
|
||||
case '[':
|
||||
tt := New()
|
||||
if r, err = tt.Unmarshal(r); chk.E(err) {
|
||||
return
|
||||
}
|
||||
*s = append(*s, tt)
|
||||
case ',':
|
||||
r = r[1:]
|
||||
// next
|
||||
case ']':
|
||||
r = r[1:]
|
||||
// the end
|
||||
return
|
||||
default:
|
||||
r = r[1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
37
pkg/encoders/tag/tags_test.go
Normal file
37
pkg/encoders/tag/tags_test.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package tag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lukechampine.com/frand"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func TestSMarshalUnmarshal(t *testing.T) {
|
||||
for _ = range 100 {
|
||||
tgs := new(S)
|
||||
n := frand.Intn(8)
|
||||
for _ = range n {
|
||||
n := frand.Intn(8)
|
||||
tg := New()
|
||||
for _ = range n {
|
||||
b1 := make([]byte, frand.Intn(8))
|
||||
_, _ = frand.Read(b1)
|
||||
tg.T = append(tg.T, b1)
|
||||
}
|
||||
*tgs = append(*tgs, tg)
|
||||
}
|
||||
tgsb, _ := tgs.MarshalJSON()
|
||||
var tbc []byte
|
||||
tbc = append(tbc, tgsb...)
|
||||
tgs2 := new(S)
|
||||
if err := tgs2.UnmarshalJSON(tgsb); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tgsb2, _ := tgs2.MarshalJSON()
|
||||
if !utils.FastEqual(tbc, tgsb2) {
|
||||
t.Fatalf("failed to re-marshal back original")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,11 +63,12 @@ func NostrUnescape(dst []byte) (b []byte) {
|
||||
c := dst[r]
|
||||
switch {
|
||||
|
||||
// nip-01 specifies the following single letter C-style escapes for control
|
||||
// codes under 0x20.
|
||||
// nip-01 specifies the following single letter C-style escapes for
|
||||
// control codes under 0x20.
|
||||
//
|
||||
// no others are specified but must be preserved, so only these can be
|
||||
// safely decoded at runtime as they must be re-encoded when marshalled.
|
||||
// no others are specified but must be preserved, so only these can
|
||||
// be safely decoded at runtime as they must be re-encoded when
|
||||
// marshalled.
|
||||
case c == '"':
|
||||
dst[w] = '"'
|
||||
w++
|
||||
@@ -90,8 +91,8 @@ func NostrUnescape(dst []byte) (b []byte) {
|
||||
dst[w] = '\r'
|
||||
w++
|
||||
|
||||
// special cases for non-nip-01 specified json escapes (must be preserved for ID
|
||||
// generation).
|
||||
// special cases for non-nip-01 specified json escapes (must be
|
||||
// preserved for ID generation).
|
||||
case c == 'u':
|
||||
dst[w] = '\\'
|
||||
w++
|
||||
@@ -103,7 +104,8 @@ func NostrUnescape(dst []byte) (b []byte) {
|
||||
dst[w] = '/'
|
||||
w++
|
||||
|
||||
// special case for octal escapes (must be preserved for ID generation).
|
||||
// special case for octal escapes (must be preserved for ID
|
||||
// generation).
|
||||
case c >= '0' && c <= '9':
|
||||
dst[w] = '\\'
|
||||
w++
|
||||
|
||||
255
pkg/encoders/text/helpers.go
Normal file
255
pkg/encoders/text/helpers.go
Normal file
@@ -0,0 +1,255 @@
|
||||
package text
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/templexxx/xhex"
|
||||
"lol.mleku.dev/chk"
|
||||
"lol.mleku.dev/errorf"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
// JSONKey generates the JSON format for an object key and terminates with the semicolon.
|
||||
func JSONKey(dst, k []byte) (b []byte) {
|
||||
dst = append(dst, '"')
|
||||
dst = append(dst, k...)
|
||||
dst = append(dst, '"', ':')
|
||||
b = dst
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalHex takes a byte string that should contain a quoted hexadecimal
|
||||
// encoded value, decodes it using a SIMD hex codec and returns the decoded
|
||||
// bytes in a newly allocated buffer.
|
||||
func UnmarshalHex(b []byte) (h []byte, rem []byte, err error) {
|
||||
rem = b[:]
|
||||
var inQuote bool
|
||||
var start int
|
||||
for i := 0; i < len(b); i++ {
|
||||
if !inQuote {
|
||||
if b[i] == '"' {
|
||||
inQuote = true
|
||||
start = i + 1
|
||||
}
|
||||
} else if b[i] == '"' {
|
||||
hexStr := b[start:i]
|
||||
rem = b[i+1:]
|
||||
l := len(hexStr)
|
||||
if l%2 != 0 {
|
||||
err = errorf.E(
|
||||
"invalid length for hex: %d, %0x",
|
||||
len(hexStr), hexStr,
|
||||
)
|
||||
return
|
||||
}
|
||||
// Allocate a new buffer for the decoded data
|
||||
h = make([]byte, l/2)
|
||||
if err = xhex.Decode(h, hexStr); chk.E(err) {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if !inQuote {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalQuoted performs an in-place unquoting of NIP-01 quoted byte string.
|
||||
func UnmarshalQuoted(b []byte) (content, rem []byte, err error) {
|
||||
if len(b) == 0 {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
rem = b[:]
|
||||
for ; len(rem) >= 0; rem = rem[1:] {
|
||||
// advance to open quotes
|
||||
if rem[0] == '"' {
|
||||
rem = rem[1:]
|
||||
content = rem
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(rem) == 0 {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
var escaping bool
|
||||
var contentLen int
|
||||
for len(rem) > 0 {
|
||||
if rem[0] == '\\' {
|
||||
if !escaping {
|
||||
escaping = true
|
||||
contentLen++
|
||||
rem = rem[1:]
|
||||
} else {
|
||||
escaping = false
|
||||
contentLen++
|
||||
rem = rem[1:]
|
||||
}
|
||||
} else if rem[0] == '"' {
|
||||
if !escaping {
|
||||
rem = rem[1:]
|
||||
content = content[:contentLen]
|
||||
content = NostrUnescape(content)
|
||||
return
|
||||
}
|
||||
contentLen++
|
||||
rem = rem[1:]
|
||||
escaping = false
|
||||
} else {
|
||||
escaping = false
|
||||
switch rem[0] {
|
||||
// none of these characters are allowed inside a JSON string:
|
||||
//
|
||||
// backspace, tab, newline, form feed or carriage return.
|
||||
case '\b', '\t', '\n', '\f', '\r':
|
||||
err = errorf.E(
|
||||
"invalid character '%s' in quoted string",
|
||||
NostrEscape(nil, rem[:1]),
|
||||
)
|
||||
return
|
||||
}
|
||||
contentLen++
|
||||
rem = rem[1:]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MarshalHexArray(dst []byte, ha [][]byte) (b []byte) {
|
||||
dst = append(dst, '[')
|
||||
for i := range ha {
|
||||
dst = AppendQuote(dst, ha[i], hex.EncAppend)
|
||||
if i != len(ha)-1 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
b = dst
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalHexArray unpacks a JSON array containing strings with hexadecimal, and checks all
|
||||
// values have the specified byte size.
|
||||
func UnmarshalHexArray(b []byte, size int) (t [][]byte, rem []byte, err error) {
|
||||
rem = b
|
||||
var openBracket bool
|
||||
for ; len(rem) > 0; rem = rem[1:] {
|
||||
if rem[0] == '[' {
|
||||
openBracket = true
|
||||
} else if openBracket {
|
||||
if rem[0] == ',' {
|
||||
continue
|
||||
} else if rem[0] == ']' {
|
||||
rem = rem[1:]
|
||||
return
|
||||
} else if rem[0] == '"' {
|
||||
var h []byte
|
||||
if h, rem, err = UnmarshalHex(rem); chk.E(err) {
|
||||
return
|
||||
}
|
||||
if len(h) != size {
|
||||
err = errorf.E(
|
||||
"invalid hex array size, got %d expect %d",
|
||||
2*len(h), 2*size,
|
||||
)
|
||||
return
|
||||
}
|
||||
t = append(t, h)
|
||||
if rem[0] == ']' {
|
||||
rem = rem[1:]
|
||||
// done
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnmarshalStringArray unpacks a JSON array containing strings.
|
||||
func UnmarshalStringArray(b []byte) (t [][]byte, rem []byte, err error) {
|
||||
rem = b
|
||||
var openBracket bool
|
||||
for ; len(rem) > 0; rem = rem[1:] {
|
||||
if rem[0] == '[' {
|
||||
openBracket = true
|
||||
} else if openBracket {
|
||||
if rem[0] == ',' {
|
||||
continue
|
||||
} else if rem[0] == ']' {
|
||||
rem = rem[1:]
|
||||
return
|
||||
} else if rem[0] == '"' {
|
||||
var h []byte
|
||||
if h, rem, err = UnmarshalQuoted(rem); chk.E(err) {
|
||||
return
|
||||
}
|
||||
t = append(t, h)
|
||||
if rem[0] == ']' {
|
||||
rem = rem[1:]
|
||||
// done
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func True() []byte { return []byte("true") }
|
||||
func False() []byte { return []byte("false") }
|
||||
|
||||
func MarshalBool(src []byte, truth bool) []byte {
|
||||
if truth {
|
||||
return append(src, True()...)
|
||||
}
|
||||
return append(src, False()...)
|
||||
}
|
||||
|
||||
func UnmarshalBool(src []byte) (rem []byte, truth bool, err error) {
|
||||
rem = src
|
||||
t, f := True(), False()
|
||||
for i := range rem {
|
||||
if rem[i] == t[0] {
|
||||
if len(rem) < i+len(t) {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
if utils.FastEqual(t, rem[i:i+len(t)]) {
|
||||
truth = true
|
||||
rem = rem[i+len(t):]
|
||||
return
|
||||
}
|
||||
}
|
||||
if rem[i] == f[0] {
|
||||
if len(rem) < i+len(f) {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
if utils.FastEqual(f, rem[i:i+len(f)]) {
|
||||
rem = rem[i+len(f):]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// if a truth value is not found in the string it will run to the end
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
|
||||
func Comma(b []byte) (rem []byte, err error) {
|
||||
rem = b
|
||||
for i := range rem {
|
||||
if rem[i] == ',' {
|
||||
rem = rem[i:]
|
||||
return
|
||||
}
|
||||
}
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
54
pkg/encoders/text/helpers_test.go
Normal file
54
pkg/encoders/text/helpers_test.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package text
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"lol.mleku.dev/chk"
|
||||
"lukechampine.com/frand"
|
||||
"next.orly.dev/pkg/crypto/sha256"
|
||||
"next.orly.dev/pkg/encoders/hex"
|
||||
"next.orly.dev/pkg/utils"
|
||||
)
|
||||
|
||||
func TestUnmarshalHexArray(t *testing.T) {
|
||||
var ha [][]byte
|
||||
h := make([]byte, sha256.Size)
|
||||
frand.Read(h)
|
||||
var dst []byte
|
||||
for _ = range 20 {
|
||||
hh := sha256.Sum256(h)
|
||||
h = hh[:]
|
||||
ha = append(ha, h)
|
||||
}
|
||||
dst = append(dst, '[')
|
||||
for i := range ha {
|
||||
dst = AppendQuote(dst, ha[i], hex.EncAppend)
|
||||
if i != len(ha)-1 {
|
||||
dst = append(dst, ',')
|
||||
}
|
||||
}
|
||||
dst = append(dst, ']')
|
||||
var ha2 [][]byte
|
||||
var rem []byte
|
||||
var err error
|
||||
if ha2, rem, err = UnmarshalHexArray(dst, sha256.Size); chk.E(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(ha2) != len(ha) {
|
||||
t.Fatalf(
|
||||
"failed to unmarshal, got %d fields, expected %d", len(ha2),
|
||||
len(ha),
|
||||
)
|
||||
}
|
||||
if len(rem) > 0 {
|
||||
t.Fatalf("failed to unmarshal, remnant afterwards '%s'", rem)
|
||||
}
|
||||
for i := range ha2 {
|
||||
if !utils.FastEqual(ha[i], ha2[i]) {
|
||||
t.Fatalf(
|
||||
"failed to unmarshal at element %d; got %x, expected %x",
|
||||
i, ha[i], ha2[i],
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
88
pkg/encoders/text/wrap.go
Normal file
88
pkg/encoders/text/wrap.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package text
|
||||
|
||||
// AppendBytesClosure is a function type for appending data from a source to a destination and
|
||||
// returning the appended-to slice.
|
||||
type AppendBytesClosure func(dst, src []byte) []byte
|
||||
|
||||
// AppendClosure is a simple append where the caller appends to the destination and returns the
|
||||
// appended-to slice.
|
||||
type AppendClosure func(dst []byte) []byte
|
||||
|
||||
// Unquote removes the quotes around a slice of bytes.
|
||||
func Unquote(b []byte) []byte { return b[1 : len(b)-1] }
|
||||
|
||||
// Noop simply appends the source to the destination slice and returns it.
|
||||
func Noop(dst, src []byte) []byte { return append(dst, src...) }
|
||||
|
||||
// AppendQuote appends a source of bytes, that have been processed by an AppendBytesClosure and
|
||||
// returns the appended-to slice.
|
||||
func AppendQuote(dst, src []byte, ac AppendBytesClosure) []byte {
|
||||
dst = append(dst, '"')
|
||||
dst = ac(dst, src)
|
||||
dst = append(dst, '"')
|
||||
return dst
|
||||
}
|
||||
|
||||
// Quote simply quotes a provided source and attaches it to the provided destination slice.
|
||||
func Quote(dst, src []byte) []byte { return AppendQuote(dst, src, Noop) }
|
||||
|
||||
// AppendSingleQuote appends a provided AppendBytesClosure's output from a given source of
|
||||
// bytes, wrapped in single quotes ”.
|
||||
func AppendSingleQuote(dst, src []byte, ac AppendBytesClosure) []byte {
|
||||
dst = append(dst, '\'')
|
||||
dst = ac(dst, src)
|
||||
dst = append(dst, '\'')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendBackticks appends a provided AppendBytesClosure's output from a given source of
|
||||
// bytes, wrapped in backticks “.
|
||||
func AppendBackticks(dst, src []byte, ac AppendBytesClosure) []byte {
|
||||
dst = append(dst, '`')
|
||||
dst = ac(dst, src)
|
||||
dst = append(dst, '`')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendBrace appends a provided AppendBytesClosure's output from a given source of
|
||||
// bytes, wrapped in braces ().
|
||||
func AppendBrace(dst, src []byte, ac AppendBytesClosure) []byte {
|
||||
dst = append(dst, '(')
|
||||
dst = ac(dst, src)
|
||||
dst = append(dst, ')')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendParenthesis appends a provided AppendBytesClosure's output from a given source of
|
||||
// bytes, wrapped in parentheses {}.
|
||||
func AppendParenthesis(dst, src []byte, ac AppendBytesClosure) []byte {
|
||||
dst = append(dst, '{')
|
||||
dst = ac(dst, src)
|
||||
dst = append(dst, '}')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendBracket appends a provided AppendBytesClosure's output from a given source of
|
||||
// bytes, wrapped in brackets [].
|
||||
func AppendBracket(dst, src []byte, ac AppendBytesClosure) []byte {
|
||||
dst = append(dst, '[')
|
||||
dst = ac(dst, src)
|
||||
dst = append(dst, ']')
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendList appends an input source bytes processed by an AppendBytesClosure and separates
|
||||
// elements with the given separator byte.
|
||||
func AppendList(
|
||||
dst []byte, src [][]byte, separator byte,
|
||||
ac AppendBytesClosure,
|
||||
) []byte {
|
||||
last := len(src) - 1
|
||||
for i := range src {
|
||||
dst = append(dst, ac(dst, src[i])...)
|
||||
if i < last {
|
||||
dst = append(dst, separator)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
78
pkg/utils/bufpool/bufpool.go
Normal file
78
pkg/utils/bufpool/bufpool.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package bufpool
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"lol.mleku.dev/log"
|
||||
"next.orly.dev/pkg/utils/units"
|
||||
)
|
||||
|
||||
const (
|
||||
// BufferSize is the size of each buffer in the pool (1kb)
|
||||
BufferSize = units.Kb / 2
|
||||
)
|
||||
|
||||
type B []byte
|
||||
|
||||
func (b B) ToBytes() []byte { return b }
|
||||
|
||||
var Pool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
// Create a new buffer when the pool is empty
|
||||
b := make([]byte, 0, BufferSize)
|
||||
log.T.C(
|
||||
func() string {
|
||||
ptr := unsafe.SliceData(b)
|
||||
return fmt.Sprintf("creating buffer at: %p", ptr)
|
||||
},
|
||||
)
|
||||
return B(b)
|
||||
},
|
||||
}
|
||||
|
||||
// Get returns a buffer from the pool or creates a new one if the pool is empty.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// buf := bufpool.Get()
|
||||
// defer bufpool.Put(buf)
|
||||
// // Use buf...
|
||||
func Get() B {
|
||||
b := Pool.Get().(B)
|
||||
log.T.C(
|
||||
func() string {
|
||||
ptr := unsafe.SliceData(b)
|
||||
return fmt.Sprintf("getting buffer at: %p", ptr)
|
||||
},
|
||||
)
|
||||
return b
|
||||
}
|
||||
|
||||
// Put returns a buffer to the pool.
|
||||
// Buffers should be returned to the pool when no longer needed to allow reuse.
|
||||
func Put(b B) {
|
||||
for i := range b {
|
||||
(b)[i] = 0
|
||||
}
|
||||
b = b[:0]
|
||||
log.T.C(
|
||||
func() string {
|
||||
ptr := unsafe.SliceData(b)
|
||||
return fmt.Sprintf("returning to buffer: %p", ptr)
|
||||
},
|
||||
)
|
||||
Pool.Put(b)
|
||||
}
|
||||
|
||||
// PutBytes returns a buffer was not necessarily created by Get().
|
||||
func PutBytes(b []byte) {
|
||||
log.T.C(
|
||||
func() string {
|
||||
ptr := unsafe.SliceData(b)
|
||||
return fmt.Sprintf("returning bytes to buffer: %p", ptr)
|
||||
},
|
||||
)
|
||||
Put(b)
|
||||
}
|
||||
71
pkg/utils/bufpool/bufpool_test.go
Normal file
71
pkg/utils/bufpool/bufpool_test.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package bufpool
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBufferPoolGetPut(t *testing.T) {
|
||||
// Get a buffer from the pool
|
||||
buf1 := Get()
|
||||
|
||||
// Verify the buffer is the correct size
|
||||
if len(*buf1) != BufferSize {
|
||||
t.Errorf("Expected buffer size of %d, got %d", BufferSize, len(*buf1))
|
||||
}
|
||||
|
||||
// Write some data to the buffer
|
||||
(*buf1)[0] = 42
|
||||
|
||||
// Return the buffer to the pool
|
||||
Put(buf1)
|
||||
|
||||
// Get another buffer, which should be the same one we just returned
|
||||
buf2 := Get()
|
||||
|
||||
// Buffer may or may not be cleared, but we should be able to use it
|
||||
// Let's check if we have the expected buffer size
|
||||
if len(*buf2) != BufferSize {
|
||||
t.Errorf("Expected buffer size of %d, got %d", BufferSize, len(*buf2))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleBuffers(t *testing.T) {
|
||||
// Get multiple buffers at once to ensure the pool can handle it
|
||||
const numBuffers = 10
|
||||
buffers := make([]B, numBuffers)
|
||||
|
||||
// Get buffers from the pool
|
||||
for i := 0; i < numBuffers; i++ {
|
||||
buffers[i] = Get()
|
||||
// Verify each buffer is the correct size
|
||||
if len(*buffers[i]) != BufferSize {
|
||||
t.Errorf(
|
||||
"Buffer %d: Expected size of %d, got %d", i, BufferSize,
|
||||
len(*buffers[i]),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all buffers to the pool
|
||||
for i := 0; i < numBuffers; i++ {
|
||||
Put(buffers[i])
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetPut(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf := Get()
|
||||
Put(buf)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetPutParallel(b *testing.B) {
|
||||
b.RunParallel(
|
||||
func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
buf := Get()
|
||||
Put(buf)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
Reference in New Issue
Block a user