diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index 9932710..8595069 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -19,10 +19,401 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -114,7 +505,7 @@
"go.import.settings.migrated": "true",
"go.sdk.automatically.set": "true",
"junie.onboarding.icon.badge.shown": "true",
- "last_opened_file_path": "/home/david/src/orly.dev",
+ "last_opened_file_path": "/home/david/src/orly.dev/interfaces/store",
"node.js.detected.package.eslint": "true",
"node.js.selected.package.eslint": "(autodetect)",
"nodejs_package_manager_path": "npm",
@@ -127,17 +518,18 @@
+
+
+
-
-
diff --git a/LICENSE b/LICENSE
index 0e259d4..fdddb29 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,121 +1,24 @@
-Creative Commons Legal Code
+This is free and unencumbered software released into the public domain.
-CC0 1.0 Universal
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
- CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
- LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
- ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
- INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
- REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
- PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
- THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
- HEREUNDER.
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
-Statement of Purpose
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
-The laws of most jurisdictions throughout the world automatically confer
-exclusive Copyright and Related Rights (defined below) upon the creator
-and subsequent owner(s) (each and all, an "owner") of an original work of
-authorship and/or a database (each, a "Work").
-
-Certain owners wish to permanently relinquish those rights to a Work for
-the purpose of contributing to a commons of creative, cultural and
-scientific works ("Commons") that the public can reliably and without fear
-of later claims of infringement build upon, modify, incorporate in other
-works, reuse and redistribute as freely as possible in any form whatsoever
-and for any purposes, including without limitation commercial purposes.
-These owners may contribute to the Commons to promote the ideal of a free
-culture and the further production of creative, cultural and scientific
-works, or to gain reputation or greater distribution for their Work in
-part through the use and efforts of others.
-
-For these and/or other purposes and motivations, and without any
-expectation of additional consideration or compensation, the person
-associating CC0 with a Work (the "Affirmer"), to the extent that he or she
-is an owner of Copyright and Related Rights in the Work, voluntarily
-elects to apply CC0 to the Work and publicly distribute the Work under its
-terms, with knowledge of his or her Copyright and Related Rights in the
-Work and the meaning and intended legal effect of CC0 on those rights.
-
-1. Copyright and Related Rights. A Work made available under CC0 may be
-protected by copyright and related or neighboring rights ("Copyright and
-Related Rights"). Copyright and Related Rights include, but are not
-limited to, the following:
-
- i. the right to reproduce, adapt, distribute, perform, display,
- communicate, and translate a Work;
- ii. moral rights retained by the original author(s) and/or performer(s);
-iii. publicity and privacy rights pertaining to a person's image or
- likeness depicted in a Work;
- iv. rights protecting against unfair competition in regards to a Work,
- subject to the limitations in paragraph 4(a), below;
- v. rights protecting the extraction, dissemination, use and reuse of data
- in a Work;
- vi. database rights (such as those arising under Directive 96/9/EC of the
- European Parliament and of the Council of 11 March 1996 on the legal
- protection of databases, and under any national implementation
- thereof, including any amended or successor version of such
- directive); and
-vii. other similar, equivalent or corresponding rights throughout the
- world based on applicable law or treaty, and any national
- implementations thereof.
-
-2. Waiver. To the greatest extent permitted by, but not in contravention
-of, applicable law, Affirmer hereby overtly, fully, permanently,
-irrevocably and unconditionally waives, abandons, and surrenders all of
-Affirmer's Copyright and Related Rights and associated claims and causes
-of action, whether now known or unknown (including existing as well as
-future claims and causes of action), in the Work (i) in all territories
-worldwide, (ii) for the maximum duration provided by applicable law or
-treaty (including future time extensions), (iii) in any current or future
-medium and for any number of copies, and (iv) for any purpose whatsoever,
-including without limitation commercial, advertising or promotional
-purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
-member of the public at large and to the detriment of Affirmer's heirs and
-successors, fully intending that such Waiver shall not be subject to
-revocation, rescission, cancellation, termination, or any other legal or
-equitable action to disrupt the quiet enjoyment of the Work by the public
-as contemplated by Affirmer's express Statement of Purpose.
-
-3. Public License Fallback. Should any part of the Waiver for any reason
-be judged legally invalid or ineffective under applicable law, then the
-Waiver shall be preserved to the maximum extent permitted taking into
-account Affirmer's express Statement of Purpose. In addition, to the
-extent the Waiver is so judged Affirmer hereby grants to each affected
-person a royalty-free, non transferable, non sublicensable, non exclusive,
-irrevocable and unconditional license to exercise Affirmer's Copyright and
-Related Rights in the Work (i) in all territories worldwide, (ii) for the
-maximum duration provided by applicable law or treaty (including future
-time extensions), (iii) in any current or future medium and for any number
-of copies, and (iv) for any purpose whatsoever, including without
-limitation commercial, advertising or promotional purposes (the
-"License"). The License shall be deemed effective as of the date CC0 was
-applied by Affirmer to the Work. Should any part of the License for any
-reason be judged legally invalid or ineffective under applicable law, such
-partial invalidity or ineffectiveness shall not invalidate the remainder
-of the License, and in such case Affirmer hereby affirms that he or she
-will not (i) exercise any of his or her remaining Copyright and Related
-Rights in the Work or (ii) assert any associated claims and causes of
-action with respect to the Work, in either case contrary to Affirmer's
-express Statement of Purpose.
-
-4. Limitations and Disclaimers.
-
- a. No trademark or patent rights held by Affirmer are waived, abandoned,
- surrendered, licensed or otherwise affected by this document.
- b. Affirmer offers the Work as-is and makes no representations or
- warranties of any kind concerning the Work, express, implied,
- statutory or otherwise, including without limitation warranties of
- title, merchantability, fitness for a particular purpose, non
- infringement, or the absence of latent or other defects, accuracy, or
- the present or absence of errors, whether or not discoverable, all to
- the greatest extent permissible under applicable law.
- c. Affirmer disclaims responsibility for clearing rights of other persons
- that may apply to the Work or any use thereof, including without
- limitation any person's Copyright and Related Rights in the Work.
- Further, Affirmer disclaims responsibility for obtaining any necessary
- consents, permissions or other rights required for any use of the
- Work.
- d. Affirmer understands and acknowledges that Creative Commons is not a
- party to this document and has no duty or obligation with respect to
- this CC0 or use of the Work.
+For more information, please refer to
diff --git a/addresstag/addresstag.go b/addresstag/addresstag.go
new file mode 100644
index 0000000..66d36bf
--- /dev/null
+++ b/addresstag/addresstag.go
@@ -0,0 +1,21 @@
+package addresstag
+
+import (
+ "strconv"
+ "strings"
+
+ "orly.dev/hex"
+)
+
+// DecodeAddressTag unpacks the contents of an `a` tag.
+func DecodeAddressTag(tagValue string) (k uint16, pkb []byte, d string) {
+ split := strings.Split(tagValue, ":")
+ if len(split) == 3 {
+ if pkb, _ = hex.Dec(split[1]); len(pkb) == 32 {
+ if key, err := strconv.ParseUint(split[0], 10, 16); err == nil {
+ return uint16(key), pkb, split[2]
+ }
+ }
+ }
+ return
+}
diff --git a/app/main.go b/app/main.go
new file mode 100644
index 0000000..2f88bc8
--- /dev/null
+++ b/app/main.go
@@ -0,0 +1,81 @@
+// Package app implements the realy nostr relay with a simple follow/mute list authentication scheme and the new HTTP REST based protocol.
+package app
+
+import (
+ "net/http"
+ "sync"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/filter"
+ "orly.dev/filters"
+ "orly.dev/interfaces/store"
+ "orly.dev/realy/config"
+)
+
+type List map[string]struct{}
+
+type Relay struct {
+ sync.Mutex
+ *config.C
+ Store store.I
+}
+
+func (r *Relay) Name() string { return r.C.AppName }
+
+func (r *Relay) Storage() store.I { return r.Store }
+
+func (r *Relay) Init() (err error) {
+ // for _, src := range r.C.Owners {
+ // if len(src) < 1 {
+ // continue
+ // }
+ // dst := make([]byte, len(src)/2)
+ // if _, err = hex.DecBytes(dst, []byte(src)); chk.E(err) {
+ // if dst, err = bech32encoding.NpubToBytes([]byte(src)); chk.E(err) {
+ // continue
+ // }
+ // }
+ // r.owners = append(r.owners, dst)
+ // }
+ // if len(r.owners) > 0 {
+ // log.F.C(func() string {
+ // ownerIds := make([]string, len(r.owners))
+ // for i, npub := range r.owners {
+ // ownerIds[i] = hex.Enc(npub)
+ // }
+ // owners := strings.Join(ownerIds, ",")
+ // return fmt.Sprintf("owners %s", owners)
+ // })
+ // r.ZeroLists()
+ // r.CheckOwnerLists(context.Bg())
+ // }
+ return nil
+}
+
+func (r *Relay) AcceptEvent(
+ c context.T, evt *event.E, hr *http.Request,
+ origin string, authedPubkey []byte,
+) (accept bool, notice string, afterSave func()) {
+ accept = true
+ return
+}
+
+func (r *Relay) AcceptFilter(
+ c context.T, hr *http.Request, f *filter.S,
+ authedPubkey []byte,
+) (allowed *filter.S, ok bool, modified bool) {
+ allowed = f
+ ok = true
+ return
+}
+
+func (r *Relay) AcceptReq(
+ c context.T, hr *http.Request, id []byte,
+ ff *filters.T, authedPubkey []byte,
+) (allowed *filters.T, ok bool, modified bool) {
+
+ allowed = ff
+ ok = true
+ return
+}
diff --git a/app/resources.go b/app/resources.go
new file mode 100644
index 0000000..39b41d2
--- /dev/null
+++ b/app/resources.go
@@ -0,0 +1,30 @@
+package app
+
+import (
+ "orly.dev/log"
+ "os"
+ "runtime"
+ "time"
+
+ "orly.dev/context"
+)
+
+func MonitorResources(c context.T) {
+ tick := time.NewTicker(time.Minute * 15)
+ log.I.Ln("running process", os.Args[0], os.Getpid())
+ // memStats := &runtime.MemStats{}
+ for {
+ select {
+ case <-c.Done():
+ log.D.Ln("shutting down resource monitor")
+ return
+ case <-tick.C:
+ // runtime.ReadMemStats(memStats)
+ log.D.Ln(
+ "# goroutines", runtime.NumGoroutine(), "# cgo calls",
+ runtime.NumCgoCall(),
+ )
+ // log.D.S(memStats)
+ }
+ }
+}
diff --git a/apputil/doc.go b/apputil/doc.go
new file mode 100644
index 0000000..0cde52a
--- /dev/null
+++ b/apputil/doc.go
@@ -0,0 +1,2 @@
+// Package apputil provides some simple filesystem functions
+package apputil
diff --git a/atomic/.codecov.yml b/atomic/.codecov.yml
new file mode 100644
index 0000000..571116c
--- /dev/null
+++ b/atomic/.codecov.yml
@@ -0,0 +1,19 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
+# Also update COVER_IGNORE_PKGS in the Makefile.
+ignore:
+ - /internal/gen-atomicint/
+ - /internal/gen-valuewrapper/
diff --git a/atomic/CHANGELOG.md b/atomic/CHANGELOG.md
new file mode 100644
index 0000000..71db542
--- /dev/null
+++ b/atomic/CHANGELOG.md
@@ -0,0 +1,130 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## Unreleased
+- No changes yet.
+
+## [1.11.0] - 2023-05-02
+### Fixed
+- Fix `Swap` and `CompareAndSwap` for `Value` wrappers without initialization.
+
+### Added
+- Add `String` method to `atomic.Pointer[T]` type allowing users to safely print
+underlying values of pointers.
+
+[1.11.0]: https://github.com/uber-go/atomic/compare/v1.10.0...v1.11.0
+
+## [1.10.0] - 2022-08-11
+### Added
+- Add `atomic.Float32` type for atomic operations on `float32`.
+- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
+ and `atomic.Value`.
+- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any
+ type. This is present only for Go 1.18 or higher, and is a drop-in for
+ replacement for the standard library's `sync/atomic.Pointer` type.
+
+### Changed
+- Deprecate `CAS` methods on all types in favor of corresponding
+ `CompareAndSwap` methods.
+
+Thanks to @eNV25 and @icpd for their contributions to this release.
+
+[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
+
+## [1.9.0] - 2021-07-15
+### Added
+- Add `Float64.Swap` to match int atomic operations.
+- Add `atomic.Time` type for atomic operations on `time.Time` values.
+
+[1.9.0]: https://github.com/uber-go/atomic/compare/v1.8.0...v1.9.0
+
+## [1.8.0] - 2021-06-09
+### Added
+- Add `atomic.Uintptr` type for atomic operations on `uintptr` values.
+- Add `atomic.UnsafePointer` type for atomic operations on `unsafe.Pointer` values.
+
+[1.8.0]: https://github.com/uber-go/atomic/compare/v1.7.0...v1.8.0
+
+## [1.7.0] - 2020-09-14
+### Added
+- Support JSON serialization and deserialization of primitive atomic types.
+- Support Text marshalling and unmarshalling for string atomics.
+
+### Changed
+- Disallow incorrect comparison of atomic values in a non-atomic way.
+
+### Removed
+- Remove dependency on `golang.org/x/{lint, tools}`.
+
+[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
+
+## [1.6.0] - 2020-02-24
+### Changed
+- Drop library dependency on `golang.org/x/{lint, tools}`.
+
+[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
+
+## [1.5.1] - 2019-11-19
+- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
+ causing `CAS` to fail even though the old value matches.
+
+[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
+
+## [1.5.0] - 2019-10-29
+### Changed
+- With Go modules, only the `go.uber.org/atomic` import path is supported now.
+ If you need to use the old import path, please add a `replace` directive to
+ your `go.mod`.
+
+[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
+
+## [1.4.0] - 2019-05-01
+### Added
+ - Add `atomic.Error` type for atomic operations on `error` values.
+
+[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
+
+## [1.3.2] - 2018-05-02
+### Added
+- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
+
+[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
+
+## [1.3.1] - 2017-11-14
+### Fixed
+- Revert optimization for `atomic.String.Store("")` which caused data races.
+
+[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
+
+## [1.3.0] - 2017-11-13
+### Added
+- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
+
+### Changed
+- Optimize `atomic.String.Store("")` by avoiding an allocation.
+
+[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
+
+## [1.2.0] - 2017-04-12
+### Added
+- Shadow `atomic.Value` from `sync/atomic`.
+
+[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
+
+## [1.1.0] - 2017-03-10
+### Added
+- Add atomic `Float64` type.
+
+### Changed
+- Support new `go.uber.org/atomic` import path.
+
+[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
+
+## [1.0.0] - 2016-07-18
+
+- Initial release.
+
+[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/atomic/LICENSE b/atomic/LICENSE
new file mode 100644
index 0000000..8765c9f
--- /dev/null
+++ b/atomic/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/atomic/Makefile b/atomic/Makefile
new file mode 100644
index 0000000..53432ab
--- /dev/null
+++ b/atomic/Makefile
@@ -0,0 +1,79 @@
+# Directory to place `go install`ed binaries into.
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+GEN_ATOMICINT = $(GOBIN)/gen-atomicint
+GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
+STATICCHECK = $(GOBIN)/staticcheck
+
+GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
+
+# Also update ignore section in .codecov.yml.
+COVER_IGNORE_PKGS = \
+ github.com/p9ds/atomic/internal/gen-atomicint \
+ github.com/p9ds/atomic/internal/gen-atomicwrapper
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
+
+$(GOLINT):
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK):
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
+ go build -o $@ ./internal/gen-atomicwrapper
+
+$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
+ go build -o $@ ./internal/gen-atomicint
+
+.PHONY: golint
+golint: $(GOLINT)
+ $(GOLINT) ./...
+
+.PHONY: staticcheck
+staticcheck: $(STATICCHECK)
+ $(STATICCHECK) ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck generatenodirty
+
+# comma separated list of packages to consider for code coverage.
+COVER_PKG = $(shell \
+ go list -find ./... | \
+ grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
+ paste -sd, -)
+
+.PHONY: cover
+cover:
+ go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: generate
+generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
+ go generate ./...
+
+.PHONY: generatenodirty
+generatenodirty:
+ @[ -z "$$(git status --porcelain)" ] || ( \
+ echo "Working tree is dirty. Commit your changes first."; \
+ git status; \
+ exit 1 )
+ @make generate
+ @status=$$(git status --porcelain); \
+ [ -z "$$status" ] || ( \
+ echo "Working tree is dirty after `make generate`:"; \
+ echo "$$status"; \
+ echo "Please ensure that the generated code is up-to-date." )
diff --git a/atomic/README.md b/atomic/README.md
new file mode 100644
index 0000000..3eed44a
--- /dev/null
+++ b/atomic/README.md
@@ -0,0 +1,33 @@
+# atomic
+
+Simple wrappers for primitive types to enforce atomic access.
+
+## Installation
+
+```shell
+$ go get -u github.com/mleku/nodl/pkg/atomic@latest
+```
+
+## Usage
+
+The standard library's `sync/atomic` is powerful, but it's easy to forget which
+variables must be accessed atomically. `github.com/mleku/nodl/pkg/atomic` preserves all the
+functionality of the standard library, but wraps the primitive types to
+provide a safer, more convenient API.
+
+```go
+var atom atomic.Uint32
+atom.Store(42)
+atom.Sub(2)
+atom.CompareAndSwap(40, 11)
+```
+
+See the [documentation][doc] for a complete API specification.
+
+## Development Status
+
+Stable.
+
+---
+
+Released under the [MIT License](LICENSE.txt).
\ No newline at end of file
diff --git a/atomic/assert_test.go b/atomic/assert_test.go
new file mode 100644
index 0000000..47cfbf2
--- /dev/null
+++ b/atomic/assert_test.go
@@ -0,0 +1,45 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Marks the test as failed if the error cannot be cast into the provided type
+// with errors.As.
+//
+// assertErrorAsType(t, err, new(ErrFoo))
+func assertErrorAsType(t *testing.T, err error, typ interface{}, msgAndArgs ...interface{}) bool {
+ t.Helper()
+
+ return assert.True(t, errors.As(err, typ), msgAndArgs...)
+}
+
+func assertErrorJSONUnmarshalType(t *testing.T, err error, msgAndArgs ...interface{}) bool {
+ t.Helper()
+
+ return assertErrorAsType(t, err, new(*json.UnmarshalTypeError), msgAndArgs...)
+}
diff --git a/atomic/bool.go b/atomic/bool.go
new file mode 100644
index 0000000..f0a2ddd
--- /dev/null
+++ b/atomic/bool.go
@@ -0,0 +1,88 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+)
+
+// Bool is an atomic type-safe wrapper for bool values.
+type Bool struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroBool bool
+
+// NewBool creates a new Bool.
+func NewBool(val bool) *Bool {
+ x := &Bool{}
+ if val != _zeroBool {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped bool.
+func (x *Bool) Load() bool {
+ return truthy(x.v.Load())
+}
+
+// Store atomically stores the passed bool.
+func (x *Bool) Store(val bool) {
+ x.v.Store(boolToInt(val))
+}
+
+// CAS is an atomic compare-and-swap for bool values.
+//
+// Deprecated: Use CompareAndSwap.
+func (x *Bool) CAS(old, new bool) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for bool values.
+func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
+ return x.v.CompareAndSwap(boolToInt(old), boolToInt(new))
+}
+
+// Swap atomically stores the given bool and returns the old
+// value.
+func (x *Bool) Swap(val bool) (old bool) {
+ return truthy(x.v.Swap(boolToInt(val)))
+}
+
+// MarshalJSON encodes the wrapped bool into JSON.
+func (x *Bool) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a bool from JSON.
+func (x *Bool) UnmarshalJSON(b []byte) error {
+ var v bool
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/atomic/bool_ext.go b/atomic/bool_ext.go
new file mode 100644
index 0000000..a2e60e9
--- /dev/null
+++ b/atomic/bool_ext.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
+
+func truthy(n uint32) bool {
+ return n == 1
+}
+
+func boolToInt(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() (old bool) {
+ for {
+ old := b.Load()
+ if b.CAS(old, !old) {
+ return old
+ }
+ }
+}
+
+// String encodes the wrapped value as a string.
+func (b *Bool) String() string {
+ return strconv.FormatBool(b.Load())
+}
diff --git a/atomic/bool_test.go b/atomic/bool_test.go
new file mode 100644
index 0000000..6753ebd
--- /dev/null
+++ b/atomic/bool_test.go
@@ -0,0 +1,150 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBool(t *testing.T) {
+ atom := NewBool(false)
+ require.False(t, atom.Toggle(), "Expected Toggle to return previous value.")
+ require.True(t, atom.Toggle(), "Expected Toggle to return previous value.")
+ require.False(t, atom.Toggle(), "Expected Toggle to return previous value.")
+ require.True(t, atom.Load(), "Unexpected state after swap.")
+
+ require.True(t, atom.CAS(true, true), "CAS should swap when old matches")
+ require.True(t, atom.Load(), "CAS should have no effect")
+ require.True(t, atom.CAS(true, false), "CAS should swap when old matches")
+ require.False(t, atom.Load(), "CAS should have modified the value")
+ require.False(t, atom.CAS(true, false), "CAS should fail on old mismatch")
+ require.False(t, atom.Load(), "CAS should not have modified the value")
+
+ atom.Store(false)
+ require.False(t, atom.Load(), "Unexpected state after store.")
+
+ prev := atom.Swap(false)
+ require.False(t, prev, "Expected Swap to return previous value.")
+
+ prev = atom.Swap(true)
+ require.False(t, prev, "Expected Swap to return previous value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ atom.Store(true)
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("true"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("false"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.False(t, atom.Load(), "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte("42"), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ t.Run("true", func(t *testing.T) {
+ assert.Equal(t, "true", NewBool(true).String(),
+ "String() returned an unexpected value.")
+ })
+
+ t.Run("false", func(t *testing.T) {
+ var b Bool
+ assert.Equal(t, "false", b.String(),
+ "String() returned an unexpected value.")
+ })
+ })
+}
+
+func TestBool_InitializeDefaults(t *testing.T) {
+ tests := []struct {
+ msg string
+ newBool func() *Bool
+ }{
+ {
+ msg: "Uninitialized",
+ newBool: func() *Bool {
+ var b Bool
+ return &b
+ },
+ },
+ {
+ msg: "NewBool with default",
+ newBool: func() *Bool {
+ return NewBool(false)
+ },
+ },
+ {
+ msg: "Bool swapped with default",
+ newBool: func() *Bool {
+ b := NewBool(true)
+ b.Swap(false)
+ return b
+ },
+ },
+ {
+ msg: "Bool CAS'd with default",
+ newBool: func() *Bool {
+ b := NewBool(true)
+ b.CompareAndSwap(true, false)
+ return b
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.msg, func(t *testing.T) {
+ t.Run("Marshal", func(t *testing.T) {
+ b := tt.newBool()
+ marshalled, err := b.MarshalJSON()
+ require.NoError(t, err)
+ assert.Equal(t, "false", string(marshalled))
+ })
+
+ t.Run("String", func(t *testing.T) {
+ b := tt.newBool()
+ assert.Equal(t, "false", b.String())
+ })
+
+ t.Run("CompareAndSwap", func(t *testing.T) {
+ b := tt.newBool()
+ require.True(t, b.CompareAndSwap(false, true))
+ assert.Equal(t, true, b.Load())
+ })
+
+ t.Run("Swap", func(t *testing.T) {
+ b := tt.newBool()
+ assert.Equal(t, false, b.Swap(true))
+ })
+ })
+ }
+}
diff --git a/atomic/doc.go b/atomic/doc.go
new file mode 100644
index 0000000..ae7390e
--- /dev/null
+++ b/atomic/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
diff --git a/atomic/duration.go b/atomic/duration.go
new file mode 100644
index 0000000..7c23868
--- /dev/null
+++ b/atomic/duration.go
@@ -0,0 +1,89 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// Duration is an atomic type-safe wrapper for time.Duration values.
+type Duration struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Int64
+}
+
+var _zeroDuration time.Duration
+
+// NewDuration creates a new Duration.
+func NewDuration(val time.Duration) *Duration {
+ x := &Duration{}
+ if val != _zeroDuration {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Duration.
+func (x *Duration) Load() time.Duration {
+ return time.Duration(x.v.Load())
+}
+
+// Store atomically stores the passed time.Duration.
+func (x *Duration) Store(val time.Duration) {
+ x.v.Store(int64(val))
+}
+
+// CAS is an atomic compare-and-swap for time.Duration values.
+//
+// Deprecated: Use CompareAndSwap.
+func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for time.Duration values.
+func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) {
+ return x.v.CompareAndSwap(int64(old), int64(new))
+}
+
+// Swap atomically stores the given time.Duration and returns the old
+// value.
+func (x *Duration) Swap(val time.Duration) (old time.Duration) {
+ return time.Duration(x.v.Swap(int64(val)))
+}
+
+// MarshalJSON encodes the wrapped time.Duration into JSON.
+func (x *Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a time.Duration from JSON.
+func (x *Duration) UnmarshalJSON(b []byte) error {
+ var v time.Duration
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/atomic/duration_ext.go b/atomic/duration_ext.go
new file mode 100644
index 0000000..62a45b3
--- /dev/null
+++ b/atomic/duration_ext.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "time"
+
+//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
+
+// Add atomically adds to the wrapped time.Duration and returns the new value.
+func (x *Duration) Add(delta time.Duration) time.Duration {
+ return time.Duration(x.v.Add(int64(delta)))
+}
+
+// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
+func (x *Duration) Sub(delta time.Duration) time.Duration {
+ return time.Duration(x.v.Sub(int64(delta)))
+}
+
+// String encodes the wrapped value as a string.
+func (x *Duration) String() string {
+ return x.Load().String()
+}
diff --git a/atomic/duration_test.go b/atomic/duration_test.go
new file mode 100644
index 0000000..f5779fe
--- /dev/null
+++ b/atomic/duration_test.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDuration(t *testing.T) {
+ atom := NewDuration(5 * time.Minute)
+
+ require.Equal(t, 5*time.Minute, atom.Load(), "Load didn't work.")
+ require.Equal(t, 6*time.Minute, atom.Add(time.Minute), "Add didn't work.")
+ require.Equal(t, 4*time.Minute, atom.Sub(2*time.Minute), "Sub didn't work.")
+
+ require.True(t, atom.CAS(4*time.Minute, time.Minute), "CAS didn't report a swap.")
+ require.Equal(t, time.Minute, atom.Load(), "CAS didn't set the correct value.")
+
+ require.Equal(t, time.Minute, atom.Swap(2*time.Minute), "Swap didn't return the old value.")
+ require.Equal(t, 2*time.Minute, atom.Load(), "Swap didn't set the correct value.")
+
+ atom.Store(10 * time.Minute)
+ require.Equal(t, 10*time.Minute, atom.Load(), "Store didn't set the correct value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ atom.Store(time.Second)
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("1000000000"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("1000000000"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, time.Second, atom.Load(),
+ "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte("\"1000000000\""), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ assert.Equal(t, "42s", NewDuration(42*time.Second).String(),
+ "String() returned an unexpected value.")
+ })
+}
diff --git a/atomic/error.go b/atomic/error.go
new file mode 100644
index 0000000..b7e3f12
--- /dev/null
+++ b/atomic/error.go
@@ -0,0 +1,72 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// Error is an atomic type-safe wrapper for error values.
+type Error struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroError error
+
+// NewError creates a new Error.
+func NewError(val error) *Error {
+ x := &Error{}
+ if val != _zeroError {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped error.
+func (x *Error) Load() error {
+ return unpackError(x.v.Load())
+}
+
+// Store atomically stores the passed error.
+func (x *Error) Store(val error) {
+ x.v.Store(packError(val))
+}
+
+// CompareAndSwap is an atomic compare-and-swap for error values.
+func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
+ if x.v.CompareAndSwap(packError(old), packError(new)) {
+ return true
+ }
+
+ if old == _zeroError {
+ // If the old value is the empty value, then it's possible the
+ // underlying Value hasn't been set and is nil, so retry with nil.
+ return x.v.CompareAndSwap(nil, packError(new))
+ }
+
+ return false
+}
+
+// Swap atomically stores the given error and returns the old
+// value.
+func (x *Error) Swap(val error) (old error) {
+ return unpackError(x.v.Swap(packError(val)))
+}
diff --git a/atomic/error_ext.go b/atomic/error_ext.go
new file mode 100644
index 0000000..d31fb63
--- /dev/null
+++ b/atomic/error_ext.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// atomic.Value panics on nil inputs, or if the underlying type changes.
+// Stabilize by always storing a custom struct that we control.
+
+//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go
+
+type packedError struct{ Value error }
+
+func packError(v error) interface{} {
+ return packedError{v}
+}
+
+func unpackError(v interface{}) error {
+ if err, ok := v.(packedError); ok {
+ return err.Value
+ }
+ return nil
+}
diff --git a/atomic/error_test.go b/atomic/error_test.go
new file mode 100644
index 0000000..1f02e6d
--- /dev/null
+++ b/atomic/error_test.go
@@ -0,0 +1,136 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestErrorByValue(t *testing.T) {
+ err := &Error{}
+ require.Nil(t, err.Load(), "Initial value shall be nil")
+}
+
+func TestNewErrorWithNilArgument(t *testing.T) {
+ err := NewError(nil)
+ require.Nil(t, err.Load(), "Initial value shall be nil")
+}
+
+func TestErrorCanStoreNil(t *testing.T) {
+ err := NewError(errors.New("hello"))
+ err.Store(nil)
+ require.Nil(t, err.Load(), "Stored value shall be nil")
+}
+
+func TestNewErrorWithError(t *testing.T) {
+ err1 := errors.New("hello1")
+ err2 := errors.New("hello2")
+
+ atom := NewError(err1)
+ require.Equal(t, err1, atom.Load(), "Expected Load to return initialized value")
+
+ atom.Store(err2)
+ require.Equal(t, err2, atom.Load(), "Expected Load to return overridden value")
+}
+
+func TestErrorSwap(t *testing.T) {
+ err1 := errors.New("hello1")
+ err2 := errors.New("hello2")
+
+ atom := NewError(err1)
+ require.Equal(t, err1, atom.Load(), "Expected Load to return initialized value")
+
+ old := atom.Swap(err2)
+ require.Equal(t, err2, atom.Load(), "Expected Load to return overridden value")
+ require.Equal(t, err1, old, "Expected old to be initial value")
+}
+
+func TestErrorCompareAndSwap(t *testing.T) {
+ err1 := errors.New("hello1")
+ err2 := errors.New("hello2")
+
+ atom := NewError(err1)
+ require.Equal(t, err1, atom.Load(), "Expected Load to return initialized value")
+
+ swapped := atom.CompareAndSwap(err2, err2)
+ require.False(t, swapped, "Expected swapped to be false")
+ require.Equal(t, err1, atom.Load(), "Expected Load to return initial value")
+
+ swapped = atom.CompareAndSwap(err1, err2)
+ require.True(t, swapped, "Expected swapped to be true")
+ require.Equal(t, err2, atom.Load(), "Expected Load to return overridden value")
+}
+
+func TestError_InitializeDefaults(t *testing.T) {
+ tests := []struct {
+ msg string
+ newError func() *Error
+ }{
+ {
+ msg: "Uninitialized",
+ newError: func() *Error {
+ var e Error
+ return &e
+ },
+ },
+ {
+ msg: "NewError with default",
+ newError: func() *Error {
+ return NewError(nil)
+ },
+ },
+ {
+ msg: "Error swapped with default",
+ newError: func() *Error {
+ e := NewError(assert.AnError)
+ e.Swap(nil)
+ return e
+ },
+ },
+ {
+ msg: "Error CAS'd with default",
+ newError: func() *Error {
+ e := NewError(assert.AnError)
+ e.CompareAndSwap(assert.AnError, nil)
+ return e
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.msg, func(t *testing.T) {
+ t.Run("CompareAndSwap", func(t *testing.T) {
+ e := tt.newError()
+ require.True(t, e.CompareAndSwap(nil, assert.AnError))
+ assert.Equal(t, assert.AnError, e.Load())
+ })
+
+ t.Run("Swap", func(t *testing.T) {
+ e := tt.newError()
+ assert.Equal(t, nil, e.Swap(assert.AnError))
+ })
+ })
+ }
+}
diff --git a/atomic/example_test.go b/atomic/example_test.go
new file mode 100644
index 0000000..9b344eb
--- /dev/null
+++ b/atomic/example_test.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic_test
+
+import (
+ "fmt"
+
+ "orly.dev/atomic"
+)
+
+func Example() {
+ // Uint32 is a thin wrapper around the primitive uint32 type.
+ var atom atomic.Uint32
+
+ // The wrapper ensures that all operations are atomic.
+ atom.Store(42)
+ fmt.Println(atom.Inc())
+ fmt.Println(atom.CompareAndSwap(43, 0))
+ fmt.Println(atom.Load())
+
+ // Output:
+ // 43
+ // true
+ // 0
+}
diff --git a/atomic/float32.go b/atomic/float32.go
new file mode 100644
index 0000000..62c3633
--- /dev/null
+++ b/atomic/float32.go
@@ -0,0 +1,77 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float32 is an atomic type-safe wrapper for float32 values.
+type Float32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroFloat32 float32
+
+// NewFloat32 creates a new Float32.
+func NewFloat32(val float32) *Float32 {
+ x := &Float32{}
+ if val != _zeroFloat32 {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float32.
+func (x *Float32) Load() float32 {
+ return math.Float32frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float32.
+func (x *Float32) Store(val float32) {
+ x.v.Store(math.Float32bits(val))
+}
+
+// Swap atomically stores the given float32 and returns the old
+// value.
+func (x *Float32) Swap(val float32) (old float32) {
+ return math.Float32frombits(x.v.Swap(math.Float32bits(val)))
+}
+
+// MarshalJSON encodes the wrapped float32 into JSON.
+func (x *Float32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float32 from JSON.
+func (x *Float32) UnmarshalJSON(b []byte) error {
+ var v float32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/atomic/float32_ext.go b/atomic/float32_ext.go
new file mode 100644
index 0000000..b0cd8d9
--- /dev/null
+++ b/atomic/float32_ext.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "math"
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go
+
+// Add atomically adds to the wrapped float32 and returns the new value.
+func (f *Float32) Add(delta float32) float32 {
+ for {
+ old := f.Load()
+ new := old + delta
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float32 and returns the new value.
+func (f *Float32) Sub(delta float32) float32 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float32 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float32) CAS(old, new float32) (swapped bool) {
+ return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float32 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CompareAndSwap(old, new) {
+// break
+// }
+// }
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) {
+ return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new))
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float32) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32)
+}
diff --git a/atomic/float32_test.go b/atomic/float32_test.go
new file mode 100644
index 0000000..5b7fd51
--- /dev/null
+++ b/atomic/float32_test.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFloat32(t *testing.T) {
+ atom := NewFloat32(4.2)
+
+ require.Equal(t, float32(4.2), atom.Load(), "Load didn't work.")
+
+ require.True(t, atom.CAS(4.2, 0.5), "CAS didn't report a swap.")
+ require.Equal(t, float32(0.5), atom.Load(), "CAS didn't set the correct value.")
+ require.False(t, atom.CAS(0.0, 1.5), "CAS reported a swap.")
+
+ atom.Store(42.0)
+ require.Equal(t, float32(42.0), atom.Load(), "Store didn't set the correct value.")
+ require.Equal(t, float32(42.5), atom.Add(0.5), "Add didn't work.")
+ require.Equal(t, float32(42.0), atom.Sub(0.5), "Sub didn't work.")
+
+ require.Equal(t, float32(42.0), atom.Swap(45.0), "Swap didn't return the old value.")
+ require.Equal(t, float32(45.0), atom.Load(), "Swap didn't set the correct value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ atom.Store(42.5)
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("42.5"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("40.5"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, float32(40.5), atom.Load(),
+ "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte("\"40.5\""), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ assert.Equal(t, "42.5", NewFloat32(42.5).String(),
+ "String() returned an unexpected value.")
+ })
+}
diff --git a/atomic/float64.go b/atomic/float64.go
new file mode 100644
index 0000000..5bc11ca
--- /dev/null
+++ b/atomic/float64.go
@@ -0,0 +1,77 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float64 is an atomic type-safe wrapper for float64 values.
+type Float64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint64
+}
+
+var _zeroFloat64 float64
+
+// NewFloat64 creates a new Float64.
+func NewFloat64(val float64) *Float64 {
+ x := &Float64{}
+ if val != _zeroFloat64 {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float64.
+func (x *Float64) Load() float64 {
+ return math.Float64frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float64.
+func (x *Float64) Store(val float64) {
+ x.v.Store(math.Float64bits(val))
+}
+
+// Swap atomically stores the given float64 and returns the old
+// value.
+func (x *Float64) Swap(val float64) (old float64) {
+ return math.Float64frombits(x.v.Swap(math.Float64bits(val)))
+}
+
+// MarshalJSON encodes the wrapped float64 into JSON.
+func (x *Float64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float64 from JSON.
+func (x *Float64) UnmarshalJSON(b []byte) error {
+ var v float64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/atomic/float64_ext.go b/atomic/float64_ext.go
new file mode 100644
index 0000000..48c52b0
--- /dev/null
+++ b/atomic/float64_ext.go
@@ -0,0 +1,76 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "math"
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -swap -json -imports math -file=float64.go
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(delta float64) float64 {
+ for {
+ old := f.Load()
+ new := old + delta
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(delta float64) float64 {
+ return f.Add(-delta)
+}
+
+// CAS is an atomic compare-and-swap for float64 values.
+//
+// Deprecated: Use CompareAndSwap
+func (f *Float64) CAS(old, new float64) (swapped bool) {
+ return f.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap for float64 values.
+//
+// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
+// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
+// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
+//
+// for {
+// old := atom.Load()
+// new = f(old)
+// if atom.CompareAndSwap(old, new) {
+// break
+// }
+// }
+//
+// If CompareAndSwap did not match NaN to match, then the above would loop forever.
+func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) {
+ return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new))
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float64) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(f.Load(), 'g', -1, 64)
+}
diff --git a/atomic/float64_test.go b/atomic/float64_test.go
new file mode 100644
index 0000000..32fbc58
--- /dev/null
+++ b/atomic/float64_test.go
@@ -0,0 +1,73 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFloat64(t *testing.T) {
+ atom := NewFloat64(4.2)
+
+ require.Equal(t, float64(4.2), atom.Load(), "Load didn't work.")
+
+ require.True(t, atom.CAS(4.2, 0.5), "CAS didn't report a swap.")
+ require.Equal(t, float64(0.5), atom.Load(), "CAS didn't set the correct value.")
+ require.False(t, atom.CAS(0.0, 1.5), "CAS reported a swap.")
+
+ atom.Store(42.0)
+ require.Equal(t, float64(42.0), atom.Load(), "Store didn't set the correct value.")
+ require.Equal(t, float64(42.5), atom.Add(0.5), "Add didn't work.")
+ require.Equal(t, float64(42.0), atom.Sub(0.5), "Sub didn't work.")
+
+ require.Equal(t, float64(42.0), atom.Swap(45.0), "Swap didn't return the old value.")
+ require.Equal(t, float64(45.0), atom.Load(), "Swap didn't set the correct value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ atom.Store(42.5)
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("42.5"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("40.5"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, float64(40.5), atom.Load(),
+ "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte("\"40.5\""), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ assert.Equal(t, "42.5", NewFloat64(42.5).String(),
+ "String() returned an unexpected value.")
+ })
+}
diff --git a/atomic/gen.go b/atomic/gen.go
new file mode 100644
index 0000000..1e9ef4f
--- /dev/null
+++ b/atomic/gen.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
+//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
+//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
+//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
+//go:generate bin/gen-atomicint -name=Uintptr -wrapped=uintptr -unsigned -file=uintptr.go
diff --git a/atomic/int32.go b/atomic/int32.go
new file mode 100644
index 0000000..5320eac
--- /dev/null
+++ b/atomic/int32.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int32 is an atomic wrapper around int32.
+type Int32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int32
+}
+
+// NewInt32 creates a new Int32.
+func NewInt32(val int32) *Int32 {
+ return &Int32{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+ return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(delta int32) int32 {
+ return atomic.AddInt32(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(delta int32) int32 {
+ return atomic.AddInt32(&i.v, -delta)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Int32) CAS(old, new int32) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) {
+ return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(val int32) {
+ atomic.StoreInt32(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(val int32) (old int32) {
+ return atomic.SwapInt32(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped int32 into JSON.
+func (i *Int32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int32.
+func (i *Int32) UnmarshalJSON(b []byte) error {
+ var v int32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int32) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/atomic/int32_test.go b/atomic/int32_test.go
new file mode 100644
index 0000000..9992251
--- /dev/null
+++ b/atomic/int32_test.go
@@ -0,0 +1,82 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInt32(t *testing.T) {
+ atom := NewInt32(42)
+
+ require.Equal(t, int32(42), atom.Load(), "Load didn't work.")
+ require.Equal(t, int32(46), atom.Add(4), "Add didn't work.")
+ require.Equal(t, int32(44), atom.Sub(2), "Sub didn't work.")
+ require.Equal(t, int32(45), atom.Inc(), "Inc didn't work.")
+ require.Equal(t, int32(44), atom.Dec(), "Dec didn't work.")
+
+ require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.")
+ require.Equal(t, int32(0), atom.Load(), "CAS didn't set the correct value.")
+
+ require.Equal(t, int32(0), atom.Swap(1), "Swap didn't return the old value.")
+ require.Equal(t, int32(1), atom.Load(), "Swap didn't set the correct value.")
+
+ atom.Store(42)
+ require.Equal(t, int32(42), atom.Load(), "Store didn't set the correct value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("40"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, int32(40), atom.Load(), "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte(`"40"`), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ t.Run("positive", func(t *testing.T) {
+ atom := NewInt32(math.MaxInt32)
+ assert.Equal(t, "2147483647", atom.String(),
+ "String() returned an unexpected value.")
+ })
+
+ t.Run("negative", func(t *testing.T) {
+ atom := NewInt32(math.MinInt32)
+ assert.Equal(t, "-2147483648", atom.String(),
+ "String() returned an unexpected value.")
+ })
+ })
+}
diff --git a/atomic/int64.go b/atomic/int64.go
new file mode 100644
index 0000000..460821d
--- /dev/null
+++ b/atomic/int64.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int64 is an atomic wrapper around int64.
+type Int64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int64
+}
+
+// NewInt64 creates a new Int64.
+func NewInt64(val int64) *Int64 {
+ return &Int64{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+ return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(delta int64) int64 {
+ return atomic.AddInt64(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(delta int64) int64 {
+ return atomic.AddInt64(&i.v, -delta)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Int64) CAS(old, new int64) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) {
+ return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(val int64) {
+ atomic.StoreInt64(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(val int64) (old int64) {
+ return atomic.SwapInt64(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped int64 into JSON.
+func (i *Int64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int64.
+func (i *Int64) UnmarshalJSON(b []byte) error {
+ var v int64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int64) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/atomic/int64_test.go b/atomic/int64_test.go
new file mode 100644
index 0000000..ed5a104
--- /dev/null
+++ b/atomic/int64_test.go
@@ -0,0 +1,82 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestInt64(t *testing.T) {
+ atom := NewInt64(42)
+
+ require.Equal(t, int64(42), atom.Load(), "Load didn't work.")
+ require.Equal(t, int64(46), atom.Add(4), "Add didn't work.")
+ require.Equal(t, int64(44), atom.Sub(2), "Sub didn't work.")
+ require.Equal(t, int64(45), atom.Inc(), "Inc didn't work.")
+ require.Equal(t, int64(44), atom.Dec(), "Dec didn't work.")
+
+ require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.")
+ require.Equal(t, int64(0), atom.Load(), "CAS didn't set the correct value.")
+
+ require.Equal(t, int64(0), atom.Swap(1), "Swap didn't return the old value.")
+ require.Equal(t, int64(1), atom.Load(), "Swap didn't set the correct value.")
+
+ atom.Store(42)
+ require.Equal(t, int64(42), atom.Load(), "Store didn't set the correct value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("40"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, int64(40), atom.Load(), "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte(`"40"`), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ t.Run("positive", func(t *testing.T) {
+ atom := NewInt64(math.MaxInt64)
+ assert.Equal(t, "9223372036854775807", atom.String(),
+ "String() returned an unexpected value.")
+ })
+
+ t.Run("negative", func(t *testing.T) {
+ atom := NewInt64(math.MinInt64)
+ assert.Equal(t, "-9223372036854775808", atom.String(),
+ "String() returned an unexpected value.")
+ })
+ })
+}
diff --git a/atomic/internal/gen-atomicint/main.go b/atomic/internal/gen-atomicint/main.go
new file mode 100644
index 0000000..719fe9c
--- /dev/null
+++ b/atomic/internal/gen-atomicint/main.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// gen-atomicint generates an atomic wrapper around an integer type.
+//
+// gen-atomicint -name Int32 -wrapped int32 -file out.go
+//
+// The generated wrapper will use the functions in the sync/atomic package
+// named after the generated type.
+package main
+
+import (
+ "bytes"
+ "embed"
+ "errors"
+ "flag"
+ "fmt"
+ "go/format"
+ "io"
+ "log"
+ "os"
+ "text/template"
+ "time"
+)
+
+func main() {
+ log.SetFlags(0)
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatalf("%+v", err)
+ }
+}
+
+func run(args []string) error {
+ var opts struct {
+ Name string
+ Wrapped string
+ File string
+ Unsigned bool
+ }
+
+ flag := flag.NewFlagSet("gen-atomicint", flag.ContinueOnError)
+
+ flag.StringVar(&opts.Name, "name", "", "name of the generated type (e.g. Int32)")
+ flag.StringVar(&opts.Wrapped, "wrapped", "", "name of the wrapped type (e.g. int32)")
+ flag.StringVar(&opts.File, "file", "", "output file path (default: stdout)")
+ flag.BoolVar(&opts.Unsigned, "unsigned", false, "whether the type is unsigned")
+
+ if err := flag.Parse(args); err != nil {
+ return err
+ }
+
+ if len(opts.Name) == 0 || len(opts.Wrapped) == 0 {
+ return errors.New("flags -name and -wrapped are required")
+ }
+
+ var w io.Writer = os.Stdout
+ if file := opts.File; len(file) > 0 {
+ f, err := os.Create(file)
+ if err != nil {
+ return fmt.Errorf("create %q: %v", file, err)
+ }
+ defer f.Close()
+
+ w = f
+ }
+
+ data := struct {
+ Name string
+ Wrapped string
+ Unsigned bool
+ ToYear int
+ }{
+ Name: opts.Name,
+ Wrapped: opts.Wrapped,
+ Unsigned: opts.Unsigned,
+ ToYear: time.Now().Year(),
+ }
+
+ var buff bytes.Buffer
+ if err := _tmpl.ExecuteTemplate(&buff, "wrapper.tmpl", data); err != nil {
+ return fmt.Errorf("render template: %v", err)
+ }
+
+ bs, err := format.Source(buff.Bytes())
+ if err != nil {
+ return fmt.Errorf("reformat source: %v", err)
+ }
+
+ io.WriteString(w, "// @generated Code generated by gen-atomicint.\n\n")
+ _, err = w.Write(bs)
+ return err
+}
+
+var (
+ //go:embed *.tmpl
+ _tmplFS embed.FS
+
+ _tmpl = template.Must(template.New("atomicint").ParseFS(_tmplFS, "*.tmpl"))
+)
diff --git a/atomic/internal/gen-atomicint/wrapper.tmpl b/atomic/internal/gen-atomicint/wrapper.tmpl
new file mode 100644
index 0000000..502fadc
--- /dev/null
+++ b/atomic/internal/gen-atomicint/wrapper.tmpl
@@ -0,0 +1,117 @@
+// Copyright (c) 2020-{{.ToYear}} Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// {{ .Name }} is an atomic wrapper around {{ .Wrapped }}.
+type {{ .Name }} struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v {{ .Wrapped }}
+}
+
+// New{{ .Name }} creates a new {{ .Name }}.
+func New{{ .Name }}(val {{ .Wrapped }}) *{{ .Name }} {
+ return &{{ .Name }}{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *{{ .Name }}) Load() {{ .Wrapped }} {
+ return atomic.Load{{ .Name }}(&i.v)
+}
+
+// Add atomically adds to the wrapped {{ .Wrapped }} and returns the new value.
+func (i *{{ .Name }}) Add(delta {{ .Wrapped }}) {{ .Wrapped }} {
+ return atomic.Add{{ .Name }}(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped {{ .Wrapped }} and returns the new value.
+func (i *{{ .Name }}) Sub(delta {{ .Wrapped }}) {{ .Wrapped }} {
+ return atomic.Add{{ .Name }}(&i.v,
+ {{- if .Unsigned -}}
+ ^(delta - 1)
+ {{- else -}}
+ -delta
+ {{- end -}}
+ )
+}
+
+// Inc atomically increments the wrapped {{ .Wrapped }} and returns the new value.
+func (i *{{ .Name }}) Inc() {{ .Wrapped }} {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped {{ .Wrapped }} and returns the new value.
+func (i *{{ .Name }}) Dec() {{ .Wrapped }} {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *{{ .Name }}) CAS(old, new {{ .Wrapped }}) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *{{ .Name }}) CompareAndSwap(old, new {{ .Wrapped }}) (swapped bool) {
+ return atomic.CompareAndSwap{{ .Name }}(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *{{ .Name }}) Store(val {{ .Wrapped }}) {
+ atomic.Store{{ .Name }}(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped {{ .Wrapped }} and returns the old value.
+func (i *{{ .Name }}) Swap(val {{ .Wrapped }}) (old {{ .Wrapped }}) {
+ return atomic.Swap{{ .Name }}(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped {{ .Wrapped }} into JSON.
+func (i *{{ .Name }}) MarshalJSON() (by, er) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped {{ .Wrapped }}.
+func (i *{{ .Name }}) UnmarshalJSON(b by) er {
+ var v {{ .Wrapped }}
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *{{ .Name }}) String() string {
+ v := i.Load()
+ {{ if .Unsigned -}}
+ return strconv.FormatUint(uint64(v), 10)
+ {{- else -}}
+ return strconv.FormatInt(int64(v), 10)
+ {{- end }}
+}
diff --git a/atomic/internal/gen-atomicwrapper/main.go b/atomic/internal/gen-atomicwrapper/main.go
new file mode 100644
index 0000000..26683cd
--- /dev/null
+++ b/atomic/internal/gen-atomicwrapper/main.go
@@ -0,0 +1,203 @@
+// Copyright (c) 2020-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// gen-atomicwrapper generates wrapper types around other atomic types.
+//
+// It supports plugging in functions which convert the value inside the atomic
+// type to the user-facing value. For example,
+//
+// Given, atomic.Value and the functions,
+//
+// func packString(string) enveloper{}
+// func unpackString(enveloper{}) string
+//
+// We can run the following command:
+//
+// gen-atomicwrapper -name String -wrapped Value \
+// -type string -pack fromString -unpack tostring
+//
+// This wil generate approximately,
+//
+// type String struct{ v Value }
+//
+// func (s *String) Load() string {
+// return unpackString(v.Load())
+// }
+//
+// func (s *String) Store(s string) {
+// return s.v.Store(packString(s))
+// }
+//
+// The packing/unpacking logic allows the stored value to be different from
+// the user-facing value.
+package main
+
+import (
+ "bytes"
+ "embed"
+ "errors"
+ "flag"
+ "fmt"
+ "go/format"
+ "io"
+ "log"
+ "os"
+ "sort"
+ "strings"
+ "text/template"
+ "time"
+)
+
+func main() {
+ log.SetFlags(0)
+ if err := run(os.Args[1:]); err != nil {
+ log.Fatalf("%+v", err)
+ }
+}
+
+type stringList []string
+
+func (sl *stringList) String() string {
+ return strings.Join(*sl, ",")
+}
+
+func (sl *stringList) Set(s string) error {
+ for _, i := range strings.Split(s, ",") {
+ *sl = append(*sl, strings.TrimSpace(i))
+ }
+ return nil
+}
+
+func run(args []string) error {
+ var opts struct {
+ Name string
+ Wrapped string
+ Type string
+
+ Imports stringList
+ Pack, Unpack string
+
+ CAS bool
+ CompareAndSwap bool
+ Swap bool
+ JSON bool
+
+ File string
+ ToYear int
+ }
+
+ opts.ToYear = time.Now().Year()
+
+ fl := flag.NewFlagSet("gen-atomicwrapper", flag.ContinueOnError)
+
+ // Required flags
+ fl.StringVar(&opts.Name, "name", "",
+ "name of the generated type (e.g. Duration)")
+ fl.StringVar(&opts.Wrapped, "wrapped", "",
+ "name of the wrapped atomic (e.g. Int64)")
+ fl.StringVar(&opts.Type, "type", "",
+ "name of the type exposed by the atomic (e.g. time.Duration)")
+
+ // Optional flags
+ fl.Var(&opts.Imports, "imports",
+ "comma separated list of imports to add")
+ fl.StringVar(&opts.Pack, "pack", "",
+ "function to transform values with before storage")
+ fl.StringVar(&opts.Unpack, "unpack", "",
+ "function to reverse packing on loading")
+ fl.StringVar(&opts.File, "file", "",
+ "output file path (default: stdout)")
+
+ // Switches for individual methods. Underlying atomics must support
+ // these.
+ fl.BoolVar(&opts.CAS, "cas", false,
+ "generate a deprecated `CAS(old, new) bool` method; requires -pack")
+ fl.BoolVar(&opts.CompareAndSwap, "compareandswap", false,
+ "generate a `CompareAndSwap(old, new) bool` method; requires -pack")
+ fl.BoolVar(&opts.Swap, "swap", false,
+ "generate a `Swap(new) old` method; requires -pack and -unpack")
+ fl.BoolVar(&opts.JSON, "json", false,
+ "generate `Marshal/UnmarshJSON` methods")
+
+ if err := fl.Parse(args); err != nil {
+ return err
+ }
+
+ if len(opts.Name) == 0 ||
+ len(opts.Wrapped) == 0 ||
+ len(opts.Type) == 0 ||
+ len(opts.Pack) == 0 ||
+ len(opts.Unpack) == 0 {
+ return errors.New("flags -name, -wrapped, -pack, -unpack and -type are required")
+ }
+
+ if opts.CAS {
+ opts.CompareAndSwap = true
+ }
+
+ var w io.Writer = os.Stdout
+ if file := opts.File; len(file) > 0 {
+ f, err := os.Create(file)
+ if err != nil {
+ return fmt.Errorf("create %q: %v", file, err)
+ }
+ defer f.Close()
+
+ w = f
+ }
+
+ // Import encoding/json if needed.
+ if opts.JSON {
+ found := false
+ for _, imp := range opts.Imports {
+ if imp == "encoding/json" {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ opts.Imports = append(opts.Imports, "encoding/json")
+ }
+ }
+
+ sort.Strings(opts.Imports)
+
+ var buff bytes.Buffer
+ if err := _tmpl.ExecuteTemplate(&buff, "wrapper.tmpl", opts); err != nil {
+ return fmt.Errorf("render template: %v", err)
+ }
+
+ bs, err := format.Source(buff.Bytes())
+ if err != nil {
+ return fmt.Errorf("reformat source: %v", err)
+ }
+
+ io.WriteString(w, "// @generated Code generated by gen-atomicwrapper.\n\n")
+ _, err = w.Write(bs)
+ return err
+}
+
+var (
+ //go:embed *.tmpl
+ _tmplFS embed.FS
+
+ _tmpl = template.Must(template.New("atomicwrapper").ParseFS(_tmplFS, "*.tmpl"))
+)
diff --git a/atomic/internal/gen-atomicwrapper/wrapper.tmpl b/atomic/internal/gen-atomicwrapper/wrapper.tmpl
new file mode 100644
index 0000000..6ed6a9e
--- /dev/null
+++ b/atomic/internal/gen-atomicwrapper/wrapper.tmpl
@@ -0,0 +1,120 @@
+// Copyright (c) 2020-{{.ToYear}} Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+{{ with .Imports }}
+import (
+ {{ range . -}}
+ {{ printf "%q" . }}
+ {{ end }}
+)
+{{ end }}
+
+// {{ .Name }} is an atomic type-safe wrapper for {{ .Type }} values.
+type {{ .Name }} struct{
+ _ nocmp // disallow non-atomic comparison
+
+ v {{ .Wrapped }}
+}
+
+var _zero{{ .Name }} {{ .Type }}
+
+
+// New{{ .Name }} creates a new {{ .Name }}.
+func New{{ .Name }}(val {{ .Type }}) *{{ .Name }} {
+ x := &{{ .Name }}{}
+ if val != _zero{{ .Name }} {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped {{ .Type }}.
+func (x *{{ .Name }}) Load() {{ .Type }} {
+ {{ if .Unpack -}}
+ return {{ .Unpack }}(x.v.Load())
+ {{- else -}}
+ if v := x.v.Load(); v != nil {
+ return v.({{ .Type }})
+ }
+ return _zero{{ .Name }}
+ {{- end }}
+}
+
+// Store atomically stores the passed {{ .Type }}.
+func (x *{{ .Name }}) Store(val {{ .Type }}) {
+ x.v.Store({{ .Pack }}(val))
+}
+
+{{ if .CAS -}}
+ // CAS is an atomic compare-and-swap for {{ .Type }} values.
+ //
+ // Deprecated: Use CompareAndSwap.
+ func (x *{{ .Name }}) CAS(old, new {{ .Type }}) (swapped bool) {
+ return x.CompareAndSwap(old, new)
+ }
+{{- end }}
+
+{{ if .CompareAndSwap -}}
+ // CompareAndSwap is an atomic compare-and-swap for {{ .Type }} values.
+ func (x *{{ .Name }}) CompareAndSwap(old, new {{ .Type }}) (swapped bool) {
+ {{ if eq .Wrapped "Value" -}}
+ if x.v.CompareAndSwap({{ .Pack }}(old), {{ .Pack }}(new)) {
+ return true
+ }
+
+ if old == _zero{{ .Name }} {
+ // If the old value is the empty value, then it's possible the
+ // underlying Value hasn't been set and is nil, so retry with nil.
+ return x.v.CompareAndSwap(nil, {{ .Pack }}(new))
+ }
+
+ return false
+ {{- else -}}
+ return x.v.CompareAndSwap({{ .Pack }}(old), {{ .Pack }}(new))
+ {{- end }}
+ }
+{{- end }}
+
+{{ if .Swap -}}
+ // Swap atomically stores the given {{ .Type }} and returns the old
+ // value.
+ func (x *{{ .Name }}) Swap(val {{ .Type }}) (old {{ .Type }}) {
+ return {{ .Unpack }}(x.v.Swap({{ .Pack }}(val)))
+ }
+{{- end }}
+
+{{ if .JSON -}}
+ // MarshalJSON encodes the wrapped {{ .Type }} into JSON.
+ func (x *{{ .Name }}) MarshalJSON() (by, er) {
+ return json.Marshal(x.Load())
+ }
+
+ // UnmarshalJSON decodes a {{ .Type }} from JSON.
+ func (x *{{ .Name }}) UnmarshalJSON(b by) er {
+ var v {{ .Type }}
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+ }
+{{- end }}
diff --git a/atomic/nocmp.go b/atomic/nocmp.go
new file mode 100644
index 0000000..54b7417
--- /dev/null
+++ b/atomic/nocmp.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// nocmp is an uncomparable struct. Embed this inside another struct to make
+// it uncomparable.
+//
+// type Foo struct {
+// nocmp
+// // ...
+// }
+//
+// This DOES NOT:
+//
+// - Disallow shallow copies of structs
+// - Disallow comparison of pointers to uncomparable structs
+type nocmp [0]func()
diff --git a/atomic/nocmp_test.go b/atomic/nocmp_test.go
new file mode 100644
index 0000000..8719421
--- /dev/null
+++ b/atomic/nocmp_test.go
@@ -0,0 +1,164 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "bytes"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNocmpComparability(t *testing.T) {
+ tests := []struct {
+ desc string
+ give interface{}
+ comparable bool
+ }{
+ {
+ desc: "nocmp struct",
+ give: nocmp{},
+ },
+ {
+ desc: "struct with nocmp embedded",
+ give: struct{ nocmp }{},
+ },
+ {
+ desc: "pointer to struct with nocmp embedded",
+ give: &struct{ nocmp }{},
+ comparable: true,
+ },
+
+ // All exported types must be uncomparable.
+ {desc: "Bool", give: Bool{}},
+ {desc: "Duration", give: Duration{}},
+ {desc: "Error", give: Error{}},
+ {desc: "Float64", give: Float64{}},
+ {desc: "Int32", give: Int32{}},
+ {desc: "Int64", give: Int64{}},
+ {desc: "String", give: String{}},
+ {desc: "Uint32", give: Uint32{}},
+ {desc: "Uint64", give: Uint64{}},
+ {desc: "Value", give: Value{}},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.desc, func(t *testing.T) {
+ typ := reflect.TypeOf(tt.give)
+ assert.Equalf(t, tt.comparable, typ.Comparable(),
+ "type %v comparablity mismatch", typ)
+ })
+ }
+}
+
+// nocmp must not add to the size of a struct in-memory.
+func TestNocmpSize(t *testing.T) {
+ type x struct{ _ int }
+
+ before := reflect.TypeOf(x{}).Size()
+
+ type y struct {
+ _ nocmp
+ _ x
+ }
+
+ after := reflect.TypeOf(y{}).Size()
+
+ assert.Equal(t, before, after,
+ "expected nocmp to have no effect on struct size")
+}
+
+// This test will fail to compile if we disallow copying of nocmp.
+//
+// We need to allow this so that users can do,
+//
+// var x atomic.Int32
+// x = atomic.NewInt32(1)
+func TestNocmpCopy(t *testing.T) {
+ type foo struct{ _ nocmp }
+
+ t.Run("struct copy", func(t *testing.T) {
+ a := foo{}
+ b := a
+ _ = b // unused
+ })
+
+ t.Run("pointer copy", func(t *testing.T) {
+ a := &foo{}
+ b := *a
+ _ = b // unused
+ })
+}
+
+// Fake go.mod with no dependencies.
+const _exampleGoMod = `module example.com/nocmp`
+
+const _badFile = `package atomic
+
+import "fmt"
+
+type Int64 struct {
+ nocmp
+
+ v int64
+}
+
+func shouldNotCompile() {
+ var x, y Int64
+ fmt.Println(x == y)
+}
+`
+
+func TestNocmpIntegration(t *testing.T) {
+ tempdir := t.TempDir()
+
+ nocmp, err := os.ReadFile("nocmp.go")
+ require.NoError(t, err, "unable to read nocmp.go")
+
+ require.NoError(t,
+ os.WriteFile(filepath.Join(tempdir, "go.mod"), []byte(_exampleGoMod), 0o644),
+ "unable to write go.mod")
+
+ require.NoError(t,
+ os.WriteFile(filepath.Join(tempdir, "nocmp.go"), nocmp, 0o644),
+ "unable to write nocmp.go")
+
+ require.NoError(t,
+ os.WriteFile(filepath.Join(tempdir, "bad.go"), []byte(_badFile), 0o644),
+ "unable to write bad.go")
+
+ var stderr bytes.Buffer
+ cmd := exec.Command("go", "build")
+ cmd.Dir = tempdir
+ // Create a minimal build environment with only HOME set so that "go
+ // build" has somewhere to put the cache and other Go files in.
+ cmd.Env = []string{"HOME=" + filepath.Join(tempdir, "home")}
+ cmd.Stderr = &stderr
+ require.Error(t, cmd.Run(), "bad.go must not compile")
+
+ assert.Contains(t, stderr.String(),
+ "struct containing nocmp cannot be compared")
+}
diff --git a/atomic/pointer_test.go b/atomic/pointer_test.go
new file mode 100644
index 0000000..837bd45
--- /dev/null
+++ b/atomic/pointer_test.go
@@ -0,0 +1,100 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.18
+// +build go1.18
+
+package atomic
+
+//
+// import (
+// "fmt"
+// "testing"
+//
+// "github.com/stretchr/testify/require"
+// )
+//
+// func TestPointer(t *testing.T) {
+// type foo struct{ v int }
+//
+// i := foo{42}
+// j := foo{0}
+// k := foo{1}
+//
+// tests := []struct {
+// desc string
+// newAtomic func() *Pointer[foo]
+// initial *foo
+// }{
+// {
+// desc: "New",
+// newAtomic: func() *Pointer[foo] {
+// return NewPointer(&i)
+// },
+// initial: &i,
+// },
+// {
+// desc: "New/nil",
+// newAtomic: func() *Pointer[foo] {
+// return NewPointer[foo](nil)
+// },
+// initial: nil,
+// },
+// {
+// desc: "zero value",
+// newAtomic: func() *Pointer[foo] {
+// var p Pointer[foo]
+// return &p
+// },
+// initial: nil,
+// },
+// }
+//
+// for _, tt := range tests {
+// t.Run(tt.desc, func(t *testing.T) {
+// t.Run("Load", func(t *testing.T) {
+// atom := tt.newAtomic()
+// require.Equal(t, tt.initial, atom.Load(), "Load should report nil.")
+// })
+//
+// t.Run("Swap", func(t *testing.T) {
+// atom := tt.newAtomic()
+// require.Equal(t, tt.initial, atom.Swap(&k), "Swap didn't return the old value.")
+// require.Equal(t, &k, atom.Load(), "Swap didn't set the correct value.")
+// })
+//
+// t.Run("CAS", func(t *testing.T) {
+// atom := tt.newAtomic()
+// require.True(t, atom.CompareAndSwap(tt.initial, &j), "CAS didn't report a swap.")
+// require.Equal(t, &j, atom.Load(), "CAS didn't set the correct value.")
+// })
+//
+// t.Run("Store", func(t *testing.T) {
+// atom := tt.newAtomic()
+// atom.Store(&i)
+// require.Equal(t, &i, atom.Load(), "Store didn't set the correct value.")
+// })
+// t.Run("String", func(t *testing.T) {
+// atom := tt.newAtomic()
+// require.Equal(t, fmt.Sprint(tt.initial), atom.String(), "String did not return the correct value.")
+// })
+// })
+// }
+// }
diff --git a/atomic/stress_test.go b/atomic/stress_test.go
new file mode 100644
index 0000000..0ac7ac5
--- /dev/null
+++ b/atomic/stress_test.go
@@ -0,0 +1,289 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "errors"
+ "math"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+const (
+ _parallelism = 4
+ _iterations = 1000
+)
+
+var _stressTests = map[string]func() func(){
+ "i32/std": stressStdInt32,
+ "i32": stressInt32,
+ "i64/std": stressStdInt64,
+ "i64": stressInt64,
+ "u32/std": stressStdUint32,
+ "u32": stressUint32,
+ "u64/std": stressStdUint64,
+ "u64": stressUint64,
+ "f64": stressFloat64,
+ "bool": stressBool,
+ "string": stressString,
+ "duration": stressDuration,
+ "error": stressError,
+ "time": stressTime,
+}
+
+func TestStress(t *testing.T) {
+ for name, ff := range _stressTests {
+ t.Run(name, func(t *testing.T) {
+ defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(_parallelism))
+
+ start := make(chan struct{})
+ var wg sync.WaitGroup
+ wg.Add(_parallelism)
+ f := ff()
+ for i := 0; i < _parallelism; i++ {
+ go func() {
+ defer wg.Done()
+ <-start
+ for j := 0; j < _iterations; j++ {
+ f()
+ }
+ }()
+ }
+ close(start)
+ wg.Wait()
+ })
+ }
+}
+
+func BenchmarkStress(b *testing.B) {
+ for name, ff := range _stressTests {
+ b.Run(name, func(b *testing.B) {
+ f := ff()
+
+ b.Run("serial", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ f()
+ }
+ })
+
+ b.Run("parallel", func(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ f()
+ }
+ })
+ })
+ })
+ }
+}
+
+func stressStdInt32() func() {
+ var atom int32
+ return func() {
+ atomic.LoadInt32(&atom)
+ atomic.AddInt32(&atom, 1)
+ atomic.AddInt32(&atom, -2)
+ atomic.AddInt32(&atom, 1)
+ atomic.AddInt32(&atom, -1)
+ atomic.CompareAndSwapInt32(&atom, 1, 0)
+ atomic.SwapInt32(&atom, 5)
+ atomic.StoreInt32(&atom, 1)
+ }
+}
+
+func stressInt32() func() {
+ var atom Int32
+ return func() {
+ atom.Load()
+ atom.Add(1)
+ atom.Sub(2)
+ atom.Inc()
+ atom.Dec()
+ atom.CAS(1, 0)
+ atom.Swap(5)
+ atom.Store(1)
+ }
+}
+
+func stressStdInt64() func() {
+ var atom int64
+ return func() {
+ atomic.LoadInt64(&atom)
+ atomic.AddInt64(&atom, 1)
+ atomic.AddInt64(&atom, -2)
+ atomic.AddInt64(&atom, 1)
+ atomic.AddInt64(&atom, -1)
+ atomic.CompareAndSwapInt64(&atom, 1, 0)
+ atomic.SwapInt64(&atom, 5)
+ atomic.StoreInt64(&atom, 1)
+ }
+}
+
+func stressInt64() func() {
+ var atom Int64
+ return func() {
+ atom.Load()
+ atom.Add(1)
+ atom.Sub(2)
+ atom.Inc()
+ atom.Dec()
+ atom.CAS(1, 0)
+ atom.Swap(5)
+ atom.Store(1)
+ }
+}
+
+func stressStdUint32() func() {
+ var atom uint32
+ return func() {
+ atomic.LoadUint32(&atom)
+ atomic.AddUint32(&atom, 1)
+ // Adding `MaxUint32` is the same as subtracting 1
+ atomic.AddUint32(&atom, math.MaxUint32-1)
+ atomic.AddUint32(&atom, 1)
+ atomic.AddUint32(&atom, math.MaxUint32)
+ atomic.CompareAndSwapUint32(&atom, 1, 0)
+ atomic.SwapUint32(&atom, 5)
+ atomic.StoreUint32(&atom, 1)
+ }
+}
+
+func stressUint32() func() {
+ var atom Uint32
+ return func() {
+ atom.Load()
+ atom.Add(1)
+ atom.Sub(2)
+ atom.Inc()
+ atom.Dec()
+ atom.CAS(1, 0)
+ atom.Swap(5)
+ atom.Store(1)
+ }
+}
+
+func stressStdUint64() func() {
+ var atom uint64
+ return func() {
+ atomic.LoadUint64(&atom)
+ atomic.AddUint64(&atom, 1)
+ // Adding `MaxUint64` is the same as subtracting 1
+ atomic.AddUint64(&atom, math.MaxUint64-1)
+ atomic.AddUint64(&atom, 1)
+ atomic.AddUint64(&atom, math.MaxUint64)
+ atomic.CompareAndSwapUint64(&atom, 1, 0)
+ atomic.SwapUint64(&atom, 5)
+ atomic.StoreUint64(&atom, 1)
+ }
+}
+
+func stressUint64() func() {
+ var atom Uint64
+ return func() {
+ atom.Load()
+ atom.Add(1)
+ atom.Sub(2)
+ atom.Inc()
+ atom.Dec()
+ atom.CAS(1, 0)
+ atom.Swap(5)
+ atom.Store(1)
+ }
+}
+
+func stressFloat64() func() {
+ var atom Float64
+ return func() {
+ atom.Load()
+ atom.CAS(1.0, 0.1)
+ atom.Add(1.1)
+ atom.Sub(0.2)
+ atom.Store(1.0)
+ }
+}
+
+func stressBool() func() {
+ var atom Bool
+ return func() {
+ atom.Load()
+ atom.Store(false)
+ atom.Swap(true)
+ atom.CAS(true, false)
+ atom.CAS(true, false)
+ atom.Load()
+ atom.Toggle()
+ atom.Toggle()
+ }
+}
+
+func stressString() func() {
+ var atom String
+ return func() {
+ atom.Load()
+ atom.Store("abc")
+ atom.Load()
+ atom.Store("def")
+ atom.Load()
+ atom.Store("")
+ }
+}
+
+func stressDuration() func() {
+ var atom = NewDuration(0)
+ return func() {
+ atom.Load()
+ atom.Add(1)
+ atom.Sub(2)
+ atom.CAS(1, 0)
+ atom.Swap(5)
+ atom.Store(1)
+ }
+}
+
+func stressError() func() {
+ var atom = NewError(nil)
+ var err1 = errors.New("err1")
+ var err2 = errors.New("err2")
+ return func() {
+ atom.Load()
+ atom.Store(err1)
+ atom.Load()
+ atom.Store(err2)
+ atom.Load()
+ atom.Store(nil)
+ }
+}
+
+func stressTime() func() {
+ var atom = NewTime(time.Date(2021, 6, 17, 9, 0, 0, 0, time.UTC))
+ var dayAgo = time.Date(2021, 6, 16, 9, 0, 0, 0, time.UTC)
+ var weekAgo = time.Date(2021, 6, 10, 9, 0, 0, 0, time.UTC)
+ return func() {
+ atom.Load()
+ atom.Store(dayAgo)
+ atom.Load()
+ atom.Store(weekAgo)
+ atom.Store(time.Time{})
+ }
+}
diff --git a/atomic/string.go b/atomic/string.go
new file mode 100644
index 0000000..061466c
--- /dev/null
+++ b/atomic/string.go
@@ -0,0 +1,72 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper for string values.
+type String struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroString string
+
+// NewString creates a new String.
+func NewString(val string) *String {
+ x := &String{}
+ if val != _zeroString {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped string.
+func (x *String) Load() string {
+ return unpackString(x.v.Load())
+}
+
+// Store atomically stores the passed string.
+func (x *String) Store(val string) {
+ x.v.Store(packString(val))
+}
+
+// CompareAndSwap is an atomic compare-and-swap for string values.
+func (x *String) CompareAndSwap(old, new string) (swapped bool) {
+ if x.v.CompareAndSwap(packString(old), packString(new)) {
+ return true
+ }
+
+ if old == _zeroString {
+ // If the old value is the empty value, then it's possible the
+ // underlying Value hasn't been set and is nil, so retry with nil.
+ return x.v.CompareAndSwap(nil, packString(new))
+ }
+
+ return false
+}
+
+// Swap atomically stores the given string and returns the old
+// value.
+func (x *String) Swap(val string) (old string) {
+ return unpackString(x.v.Swap(packString(val)))
+}
diff --git a/atomic/string_ext.go b/atomic/string_ext.go
new file mode 100644
index 0000000..019109c
--- /dev/null
+++ b/atomic/string_ext.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped Value -pack packString -unpack unpackString -compareandswap -swap -file=string.go
+
+func packString(s string) interface{} {
+ return s
+}
+
+func unpackString(v interface{}) string {
+ if s, ok := v.(string); ok {
+ return s
+ }
+ return ""
+}
+
+// String returns the wrapped value.
+func (s *String) String() string {
+ return s.Load()
+}
+
+// MarshalText encodes the wrapped string into a textual form.
+//
+// This makes it encodable as JSON, YAML, XML, and more.
+func (s *String) MarshalText() ([]byte, error) {
+ return []byte(s.Load()), nil
+}
+
+// UnmarshalText decodes text and replaces the wrapped string with it.
+//
+// This makes it decodable from JSON, YAML, XML, and more.
+func (s *String) UnmarshalText(b []byte) error {
+ s.Store(string(b))
+ return nil
+}
diff --git a/atomic/string_test.go b/atomic/string_test.go
new file mode 100644
index 0000000..6163113
--- /dev/null
+++ b/atomic/string_test.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2016-2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "encoding/xml"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestStringNoInitialValue(t *testing.T) {
+ atom := &String{}
+ require.Equal(t, "", atom.Load(), "Initial value should be blank string")
+}
+
+func TestString(t *testing.T) {
+ atom := NewString("")
+ require.Equal(t, "", atom.Load(), "Expected Load to return initialized value")
+
+ atom.Store("abc")
+ require.Equal(t, "abc", atom.Load(), "Unexpected value after Store")
+
+ atom = NewString("bcd")
+ require.Equal(t, "bcd", atom.Load(), "Expected Load to return initialized value")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte(`"bcd"`), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte(`"abc"`), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, "abc", atom.Load(), "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte("42"), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ atom = NewString("foo")
+
+ t.Run("XML/Marshal", func(t *testing.T) {
+ bytes, err := xml.Marshal(atom)
+ require.NoError(t, err, "xml.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("foo"), bytes,
+ "xml.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("XML/Unmarshal", func(t *testing.T) {
+ err := xml.Unmarshal([]byte("bar"), &atom)
+ require.NoError(t, err, "xml.Unmarshal errored unexpectedly.")
+ require.Equal(t, "bar", atom.Load(), "xml.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("String", func(t *testing.T) {
+ atom := NewString("foo")
+ assert.Equal(t, "foo", atom.String(),
+ "String() returned an unexpected value.")
+ })
+
+ t.Run("CompareAndSwap", func(t *testing.T) {
+ atom := NewString("foo")
+
+ swapped := atom.CompareAndSwap("bar", "bar")
+ require.False(t, swapped, "swapped isn't false")
+ require.Equal(t, atom.Load(), "foo", "Load returned wrong value")
+
+ swapped = atom.CompareAndSwap("foo", "bar")
+ require.True(t, swapped, "swapped isn't true")
+ require.Equal(t, atom.Load(), "bar", "Load returned wrong value")
+ })
+
+ t.Run("Swap", func(t *testing.T) {
+ atom := NewString("foo")
+
+ old := atom.Swap("bar")
+ require.Equal(t, old, "foo", "Swap returned wrong value")
+ require.Equal(t, atom.Load(), "bar", "Load returned wrong value")
+ })
+}
+
+func TestString_InitializeDefault(t *testing.T) {
+ tests := []struct {
+ msg string
+ newStr func() *String
+ }{
+ {
+ msg: "Uninitialized",
+ newStr: func() *String {
+ var s String
+ return &s
+ },
+ },
+ {
+ msg: "NewString with default",
+ newStr: func() *String {
+ return NewString("")
+ },
+ },
+ {
+ msg: "String swapped with default",
+ newStr: func() *String {
+ s := NewString("initial")
+ s.Swap("")
+ return s
+ },
+ },
+ {
+ msg: "String CAS'd with default",
+ newStr: func() *String {
+ s := NewString("initial")
+ s.CompareAndSwap("initial", "")
+ return s
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.msg, func(t *testing.T) {
+ t.Run("MarshalText", func(t *testing.T) {
+ str := tt.newStr()
+ text, err := str.MarshalText()
+ require.NoError(t, err)
+ assert.Equal(t, "", string(text), "")
+ })
+
+ t.Run("String", func(t *testing.T) {
+ str := tt.newStr()
+ assert.Equal(t, "", str.String())
+ })
+
+ t.Run("CompareAndSwap", func(t *testing.T) {
+ str := tt.newStr()
+ require.True(t, str.CompareAndSwap("", "new"))
+ assert.Equal(t, "new", str.Load())
+ })
+
+ t.Run("Swap", func(t *testing.T) {
+ str := tt.newStr()
+ assert.Equal(t, "", str.Swap("new"))
+ })
+ })
+ }
+}
diff --git a/atomic/time.go b/atomic/time.go
new file mode 100644
index 0000000..cc2a230
--- /dev/null
+++ b/atomic/time.go
@@ -0,0 +1,55 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "time"
+)
+
+// Time is an atomic type-safe wrapper for time.Time values.
+type Time struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroTime time.Time
+
+// NewTime creates a new Time.
+func NewTime(val time.Time) *Time {
+ x := &Time{}
+ if val != _zeroTime {
+ x.Store(val)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Time.
+func (x *Time) Load() time.Time {
+ return unpackTime(x.v.Load())
+}
+
+// Store atomically stores the passed time.Time.
+func (x *Time) Store(val time.Time) {
+ x.v.Store(packTime(val))
+}
diff --git a/atomic/time_ext.go b/atomic/time_ext.go
new file mode 100644
index 0000000..1e3dc97
--- /dev/null
+++ b/atomic/time_ext.go
@@ -0,0 +1,36 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "time"
+
+//go:generate bin/gen-atomicwrapper -name=Time -type=time.Time -wrapped=Value -pack=packTime -unpack=unpackTime -imports time -file=time.go
+
+func packTime(t time.Time) interface{} {
+ return t
+}
+
+func unpackTime(v interface{}) time.Time {
+ if t, ok := v.(time.Time); ok {
+ return t
+ }
+ return time.Time{}
+}
diff --git a/atomic/time_test.go b/atomic/time_test.go
new file mode 100644
index 0000000..83ac022
--- /dev/null
+++ b/atomic/time_test.go
@@ -0,0 +1,86 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTime(t *testing.T) {
+ start := time.Date(2021, 6, 17, 9, 10, 0, 0, time.UTC)
+ atom := NewTime(start)
+
+ require.Equal(t, start, atom.Load(), "Load didn't work")
+ require.Equal(t, time.Time{}, NewTime(time.Time{}).Load(), "Default time value is wrong")
+}
+
+func TestTimeLocation(t *testing.T) {
+ // Check TZ data hasn't been lost from load/store.
+ ny, err := time.LoadLocation("America/New_York")
+ require.NoError(t, err, "Failed to load location")
+ nyTime := NewTime(time.Date(2021, 1, 1, 0, 0, 0, 0, ny))
+
+ var atom Time
+ atom.Store(nyTime.Load())
+
+ assert.Equal(t, ny, atom.Load().Location(), "Location information is wrong")
+}
+
+func TestLargeTime(t *testing.T) {
+ // Check "large/small" time that are beyond int64 ns
+ // representation (< year 1678 or > year 2262) can be
+ // correctly load/store'd.
+ t.Parallel()
+
+ t.Run("future", func(t *testing.T) {
+ future := time.Date(2262, 12, 31, 0, 0, 0, 0, time.UTC)
+ atom := NewTime(future)
+ dayAfterFuture := atom.Load().AddDate(0, 1, 0)
+
+ atom.Store(dayAfterFuture)
+ assert.Equal(t, 2263, atom.Load().Year())
+ })
+
+ t.Run("past", func(t *testing.T) {
+ past := time.Date(1678, 1, 1, 0, 0, 0, 0, time.UTC)
+ atom := NewTime(past)
+ dayBeforePast := atom.Load().AddDate(0, -1, 0)
+
+ atom.Store(dayBeforePast)
+ assert.Equal(t, 1677, atom.Load().Year())
+ })
+}
+
+func TestMonotonic(t *testing.T) {
+ before := NewTime(time.Now())
+ time.Sleep(15 * time.Millisecond)
+ after := NewTime(time.Now())
+
+ // try loading/storing before and test monotonic clock value hasn't been lost
+ bt := before.Load()
+ before.Store(bt)
+ d := after.Load().Sub(before.Load())
+ assert.True(t, 15 <= d.Milliseconds())
+}
diff --git a/atomic/tools/tools.go b/atomic/tools/tools.go
new file mode 100644
index 0000000..6c8e7e8
--- /dev/null
+++ b/atomic/tools/tools.go
@@ -0,0 +1,30 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build tools
+// +build tools
+
+package tools
+
+import (
+ // Tools used during development.
+ _ "golang.org/x/lint/golint"
+ _ "honnef.co/go/tools/cmd/staticcheck"
+)
diff --git a/atomic/uint32.go b/atomic/uint32.go
new file mode 100644
index 0000000..4adc294
--- /dev/null
+++ b/atomic/uint32.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint32 is an atomic wrapper around uint32.
+type Uint32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint32
+}
+
+// NewUint32 creates a new Uint32.
+func NewUint32(val uint32) *Uint32 {
+ return &Uint32{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+ return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(delta uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uint32) CAS(old, new uint32) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
+ return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(val uint32) {
+ atomic.StoreUint32(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(val uint32) (old uint32) {
+ return atomic.SwapUint32(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uint32 into JSON.
+func (i *Uint32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint32.
+func (i *Uint32) UnmarshalJSON(b []byte) error {
+ var v uint32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint32) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/atomic/uint32_test.go b/atomic/uint32_test.go
new file mode 100644
index 0000000..8bfcda2
--- /dev/null
+++ b/atomic/uint32_test.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUint32(t *testing.T) {
+ atom := NewUint32(42)
+
+ require.Equal(t, uint32(42), atom.Load(), "Load didn't work.")
+ require.Equal(t, uint32(46), atom.Add(4), "Add didn't work.")
+ require.Equal(t, uint32(44), atom.Sub(2), "Sub didn't work.")
+ require.Equal(t, uint32(45), atom.Inc(), "Inc didn't work.")
+ require.Equal(t, uint32(44), atom.Dec(), "Dec didn't work.")
+
+ require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.")
+ require.Equal(t, uint32(0), atom.Load(), "CAS didn't set the correct value.")
+
+ require.Equal(t, uint32(0), atom.Swap(1), "Swap didn't return the old value.")
+ require.Equal(t, uint32(1), atom.Load(), "Swap didn't set the correct value.")
+
+ atom.Store(42)
+ require.Equal(t, uint32(42), atom.Load(), "Store didn't set the correct value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("40"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, uint32(40), atom.Load(),
+ "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte(`"40"`), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ // Use an integer with the signed bit set. If we're converting
+ // incorrectly, we'll get a negative value here.
+ atom := NewUint32(math.MaxUint32)
+ assert.Equal(t, "4294967295", atom.String(),
+ "String() returned an unexpected value.")
+ })
+}
diff --git a/atomic/uint64.go b/atomic/uint64.go
new file mode 100644
index 0000000..0e2eddb
--- /dev/null
+++ b/atomic/uint64.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint64 is an atomic wrapper around uint64.
+type Uint64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint64
+}
+
+// NewUint64 creates a new Uint64.
+func NewUint64(val uint64) *Uint64 {
+ return &Uint64{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+ return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(delta uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uint64) CAS(old, new uint64) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
+ return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(val uint64) {
+ atomic.StoreUint64(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(val uint64) (old uint64) {
+ return atomic.SwapUint64(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uint64 into JSON.
+func (i *Uint64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint64.
+func (i *Uint64) UnmarshalJSON(b []byte) error {
+ var v uint64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint64) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/atomic/uint64_test.go b/atomic/uint64_test.go
new file mode 100644
index 0000000..1141e5a
--- /dev/null
+++ b/atomic/uint64_test.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUint64(t *testing.T) {
+ atom := NewUint64(42)
+
+ require.Equal(t, uint64(42), atom.Load(), "Load didn't work.")
+ require.Equal(t, uint64(46), atom.Add(4), "Add didn't work.")
+ require.Equal(t, uint64(44), atom.Sub(2), "Sub didn't work.")
+ require.Equal(t, uint64(45), atom.Inc(), "Inc didn't work.")
+ require.Equal(t, uint64(44), atom.Dec(), "Dec didn't work.")
+
+ require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.")
+ require.Equal(t, uint64(0), atom.Load(), "CAS didn't set the correct value.")
+
+ require.Equal(t, uint64(0), atom.Swap(1), "Swap didn't return the old value.")
+ require.Equal(t, uint64(1), atom.Load(), "Swap didn't set the correct value.")
+
+ atom.Store(42)
+ require.Equal(t, uint64(42), atom.Load(), "Store didn't set the correct value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("40"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, uint64(40), atom.Load(),
+ "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte(`"40"`), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ // Use an integer with the signed bit set. If we're converting
+ // incorrectly, we'll get a negative value here.
+ atom := NewUint64(math.MaxUint64)
+ assert.Equal(t, "18446744073709551615", atom.String(),
+ "String() returned an unexpected value.")
+ })
+}
diff --git a/atomic/uintptr.go b/atomic/uintptr.go
new file mode 100644
index 0000000..7d5b000
--- /dev/null
+++ b/atomic/uintptr.go
@@ -0,0 +1,109 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uintptr is an atomic wrapper around uintptr.
+type Uintptr struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uintptr
+}
+
+// NewUintptr creates a new Uintptr.
+func NewUintptr(val uintptr) *Uintptr {
+ return &Uintptr{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uintptr) Load() uintptr {
+ return atomic.LoadUintptr(&i.v)
+}
+
+// Add atomically adds to the wrapped uintptr and returns the new value.
+func (i *Uintptr) Add(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, delta)
+}
+
+// Sub atomically subtracts from the wrapped uintptr and returns the new value.
+func (i *Uintptr) Sub(delta uintptr) uintptr {
+ return atomic.AddUintptr(&i.v, ^(delta - 1))
+}
+
+// Inc atomically increments the wrapped uintptr and returns the new value.
+func (i *Uintptr) Inc() uintptr {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uintptr and returns the new value.
+func (i *Uintptr) Dec() uintptr {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap.
+func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
+ return i.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
+ return atomic.CompareAndSwapUintptr(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uintptr) Store(val uintptr) {
+ atomic.StoreUintptr(&i.v, val)
+}
+
+// Swap atomically swaps the wrapped uintptr and returns the old value.
+func (i *Uintptr) Swap(val uintptr) (old uintptr) {
+ return atomic.SwapUintptr(&i.v, val)
+}
+
+// MarshalJSON encodes the wrapped uintptr into JSON.
+func (i *Uintptr) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uintptr.
+func (i *Uintptr) UnmarshalJSON(b []byte) error {
+ var v uintptr
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uintptr) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/atomic/uintptr_test.go b/atomic/uintptr_test.go
new file mode 100644
index 0000000..7d8ac39
--- /dev/null
+++ b/atomic/uintptr_test.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestUintptr(t *testing.T) {
+ atom := NewUintptr(42)
+
+ require.Equal(t, uintptr(42), atom.Load(), "Load didn't work.")
+ require.Equal(t, uintptr(46), atom.Add(4), "Add didn't work.")
+ require.Equal(t, uintptr(44), atom.Sub(2), "Sub didn't work.")
+ require.Equal(t, uintptr(45), atom.Inc(), "Inc didn't work.")
+ require.Equal(t, uintptr(44), atom.Dec(), "Dec didn't work.")
+
+ require.True(t, atom.CAS(44, 0), "CAS didn't report a swap.")
+ require.Equal(t, uintptr(0), atom.Load(), "CAS didn't set the correct value.")
+
+ require.Equal(t, uintptr(0), atom.Swap(1), "Swap didn't return the old value.")
+ require.Equal(t, uintptr(1), atom.Load(), "Swap didn't set the correct value.")
+
+ atom.Store(42)
+ require.Equal(t, uintptr(42), atom.Load(), "Store didn't set the correct value.")
+
+ t.Run("JSON/Marshal", func(t *testing.T) {
+ bytes, err := json.Marshal(atom)
+ require.NoError(t, err, "json.Marshal errored unexpectedly.")
+ require.Equal(t, []byte("42"), bytes, "json.Marshal encoded the wrong bytes.")
+ })
+
+ t.Run("JSON/Unmarshal", func(t *testing.T) {
+ err := json.Unmarshal([]byte("40"), &atom)
+ require.NoError(t, err, "json.Unmarshal errored unexpectedly.")
+ require.Equal(t, uintptr(40), atom.Load(),
+ "json.Unmarshal didn't set the correct value.")
+ })
+
+ t.Run("JSON/Unmarshal/Error", func(t *testing.T) {
+ err := json.Unmarshal([]byte(`"40"`), &atom)
+ require.Error(t, err, "json.Unmarshal didn't error as expected.")
+ assertErrorJSONUnmarshalType(t, err,
+ "json.Unmarshal failed with unexpected error %v, want UnmarshalTypeError.", err)
+ })
+
+ t.Run("String", func(t *testing.T) {
+ // Use an integer with the signed bit set. If we're converting
+ // incorrectly, we'll get a negative value here.
+ // Use an int variable, as constants cause compile-time overflows.
+ negative := -1
+ atom := NewUintptr(uintptr(negative))
+ want := fmt.Sprint(uintptr(negative))
+ assert.Equal(t, want, atom.String(),
+ "String() returned an unexpected value.")
+ })
+}
diff --git a/atomic/unsafe_pointer.go b/atomic/unsafe_pointer.go
new file mode 100644
index 0000000..34868ba
--- /dev/null
+++ b/atomic/unsafe_pointer.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2021-2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// UnsafePointer is an atomic wrapper around unsafe.Pointer.
+type UnsafePointer struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v unsafe.Pointer
+}
+
+// NewUnsafePointer creates a new UnsafePointer.
+func NewUnsafePointer(val unsafe.Pointer) *UnsafePointer {
+ return &UnsafePointer{v: val}
+}
+
+// Load atomically loads the wrapped value.
+func (p *UnsafePointer) Load() unsafe.Pointer {
+ return atomic.LoadPointer(&p.v)
+}
+
+// Store atomically stores the passed value.
+func (p *UnsafePointer) Store(val unsafe.Pointer) {
+ atomic.StorePointer(&p.v, val)
+}
+
+// Swap atomically swaps the wrapped unsafe.Pointer and returns the old value.
+func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
+ return atomic.SwapPointer(&p.v, val)
+}
+
+// CAS is an atomic compare-and-swap.
+//
+// Deprecated: Use CompareAndSwap
+func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
+ return p.CompareAndSwap(old, new)
+}
+
+// CompareAndSwap is an atomic compare-and-swap.
+func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) {
+ return atomic.CompareAndSwapPointer(&p.v, old, new)
+}
diff --git a/atomic/unsafe_pointer_test.go b/atomic/unsafe_pointer_test.go
new file mode 100644
index 0000000..f0193df
--- /dev/null
+++ b/atomic/unsafe_pointer_test.go
@@ -0,0 +1,83 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "testing"
+ "unsafe"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestUnsafePointer(t *testing.T) {
+ i := int64(42)
+ j := int64(0)
+ k := int64(1)
+
+ tests := []struct {
+ desc string
+ newAtomic func() *UnsafePointer
+ initial unsafe.Pointer
+ }{
+ {
+ desc: "non-empty",
+ newAtomic: func() *UnsafePointer {
+ return NewUnsafePointer(unsafe.Pointer(&i))
+ },
+ initial: unsafe.Pointer(&i),
+ },
+ {
+ desc: "nil",
+ newAtomic: func() *UnsafePointer {
+ var p UnsafePointer
+ return &p
+ },
+ initial: unsafe.Pointer(nil),
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.desc, func(t *testing.T) {
+ t.Run("Load", func(t *testing.T) {
+ atom := tt.newAtomic()
+ require.Equal(t, tt.initial, atom.Load(), "Load should report nil.")
+ })
+
+ t.Run("Swap", func(t *testing.T) {
+ atom := tt.newAtomic()
+ require.Equal(t, tt.initial, atom.Swap(unsafe.Pointer(&k)), "Swap didn't return the old value.")
+ require.Equal(t, unsafe.Pointer(&k), atom.Load(), "Swap didn't set the correct value.")
+ })
+
+ t.Run("CAS", func(t *testing.T) {
+ atom := tt.newAtomic()
+ require.True(t, atom.CAS(tt.initial, unsafe.Pointer(&j)), "CAS didn't report a swap.")
+ require.Equal(t, unsafe.Pointer(&j), atom.Load(), "CAS didn't set the correct value.")
+ })
+
+ t.Run("Store", func(t *testing.T) {
+ atom := tt.newAtomic()
+ atom.Store(unsafe.Pointer(&i))
+ require.Equal(t, unsafe.Pointer(&i), atom.Load(), "Store didn't set the correct value.")
+ })
+ })
+ }
+}
diff --git a/atomic/value.go b/atomic/value.go
new file mode 100644
index 0000000..52caedb
--- /dev/null
+++ b/atomic/value.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "sync/atomic"
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct {
+ _ nocmp // disallow non-atomic comparison
+
+ atomic.Value
+}
diff --git a/atomic/value_test.go b/atomic/value_test.go
new file mode 100644
index 0000000..bb9f301
--- /dev/null
+++ b/atomic/value_test.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestValue(t *testing.T) {
+ var v Value
+ assert.Nil(t, v.Load(), "initial Value is not nil")
+
+ v.Store(42)
+ assert.Equal(t, 42, v.Load())
+
+ v.Store(84)
+ assert.Equal(t, 84, v.Load())
+
+ assert.Panics(t, func() { v.Store("foo") })
+}
diff --git a/auth/nip42.go b/auth/nip42.go
index 8b0a7c7..6a22e17 100644
--- a/auth/nip42.go
+++ b/auth/nip42.go
@@ -4,13 +4,13 @@ import (
"crypto/rand"
"encoding/base64"
"net/url"
+ "orly.dev/chk"
+ "orly.dev/log"
"strings"
"time"
- "orly.dev/chk"
"orly.dev/event"
"orly.dev/kind"
- "orly.dev/log"
"orly.dev/tag"
"orly.dev/tags"
"orly.dev/timestamp"
@@ -26,7 +26,7 @@ func GenerateChallenge() (b []byte) {
}
// CreateUnsigned creates an event which should be sent via an "AUTH" command.
-// If the authentication succeeds, the user will be authenticated as pubkey.
+// If the authentication succeeds, the user will be authenticated as a pubkey.
func CreateUnsigned(pubkey, challenge []byte, relayURL string) (ev *event.E) {
return &event.E{
Pubkey: pubkey,
@@ -41,13 +41,19 @@ func CreateUnsigned(pubkey, challenge []byte, relayURL string) (ev *event.E) {
// helper function for ValidateAuthEvent.
func parseURL(input string) (*url.URL, error) {
- return url.Parse(strings.ToLower(strings.TrimSuffix(input, "/")))
+ return url.Parse(
+ strings.ToLower(
+ strings.TrimSuffix(input, "/"),
+ ),
+ )
}
var (
- // ChallengeTag is the tag for the challenge in a NIP-42 auth event (prevents relay attacks).
+ // ChallengeTag is the tag for the challenge in a NIP-42 auth event
+ // (prevents relay attacks).
ChallengeTag = []byte("challenge")
- // RelayTag is is the relay tag for a NIP-42 auth event (prevents cross-server attacks).
+ // RelayTag is the relay tag for a NIP-42 auth event (prevents cross-server
+ // attacks).
RelayTag = []byte("relay")
)
diff --git a/auth/nip42_test.go b/auth/nip42_test.go
index 2e7f137..51e1507 100644
--- a/auth/nip42_test.go
+++ b/auth/nip42_test.go
@@ -1,9 +1,9 @@
package auth
import (
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/p256k"
)
diff --git a/bech32encoding/keys.go b/bech32encoding/keys.go
index 1fc56a3..31d6d64 100644
--- a/bech32encoding/keys.go
+++ b/bech32encoding/keys.go
@@ -2,14 +2,14 @@ package bech32encoding
import (
"bytes"
-
"orly.dev/chk"
+ "orly.dev/log"
+
btcec "orly.dev/ec"
"orly.dev/ec/bech32"
"orly.dev/ec/schnorr"
"orly.dev/ec/secp256k1"
"orly.dev/hex"
- "orly.dev/log"
)
const (
diff --git a/bech32encoding/keys_test.go b/bech32encoding/keys_test.go
index 2e7d5e9..55d914c 100644
--- a/bech32encoding/keys_test.go
+++ b/bech32encoding/keys_test.go
@@ -4,9 +4,9 @@ import (
"bytes"
"crypto/rand"
"encoding/hex"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/ec/schnorr"
"orly.dev/ec/secp256k1"
)
diff --git a/bech32encoding/nip19.go b/bech32encoding/nip19.go
index 74a6327..74bfe8b 100644
--- a/bech32encoding/nip19.go
+++ b/bech32encoding/nip19.go
@@ -3,18 +3,18 @@ package bech32encoding
import (
"bytes"
"encoding/binary"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+ "orly.dev/log"
- "github.com/minio/sha256-simd"
"orly.dev/bech32encoding/pointers"
"orly.dev/bech32encoding/tlv"
- "orly.dev/chk"
"orly.dev/ec/bech32"
"orly.dev/ec/schnorr"
- "orly.dev/errorf"
"orly.dev/eventid"
"orly.dev/hex"
"orly.dev/kind"
- "orly.dev/log"
+ "orly.dev/sha256"
)
var (
@@ -223,8 +223,6 @@ func EncodeEvent(
pubkey := make([]byte, schnorr.PubKeyBytesLen)
if _, err = hex.DecBytes(pubkey, author); len(pubkey) == 32 {
tlv.WriteEntry(buf, tlv.Author, pubkey)
- } else if chk.E(err) {
- return
}
var bits5 []byte
if bits5, err = bech32.ConvertBits(buf.Bytes(), 8, 5, true); chk.D(err) {
diff --git a/bech32encoding/nip19_test.go b/bech32encoding/nip19_test.go
index 39fb5b0..7c2ffdd 100644
--- a/bech32encoding/nip19_test.go
+++ b/bech32encoding/nip19_test.go
@@ -2,15 +2,15 @@ package bech32encoding
import (
"bytes"
+ "orly.dev/chk"
+ "orly.dev/log"
"reflect"
"testing"
"orly.dev/bech32encoding/pointers"
- "orly.dev/chk"
"orly.dev/eventid"
"orly.dev/hex"
"orly.dev/kind"
- "orly.dev/log"
)
func TestEncodeNpub(t *testing.T) {
diff --git a/bech32encoding/tlv/tlv.go b/bech32encoding/tlv/tlv.go
index 95e068e..838a877 100644
--- a/bech32encoding/tlv/tlv.go
+++ b/bech32encoding/tlv/tlv.go
@@ -5,8 +5,6 @@ package tlv
import (
"io"
-
- "orly.dev/chk"
)
const (
@@ -20,17 +18,17 @@ const (
func ReadEntry(buf io.Reader) (typ uint8, value []byte) {
var err error
t := make([]byte, 1)
- if _, err = buf.Read(t); chk.E(err) {
+ if _, err = buf.Read(t); err != nil {
return
}
typ = t[0]
l := make([]byte, 1)
- if _, err = buf.Read(l); chk.E(err) {
+ if _, err = buf.Read(l); err != nil {
return
}
length := int(l[0])
value = make([]byte, length)
- if _, err = buf.Read(value); chk.E(err) {
+ if _, err = buf.Read(value); err != nil {
// nil value signals end of data or error
value = nil
}
diff --git a/bin/binary.go b/bin/binary.go
new file mode 100644
index 0000000..52b0f91
--- /dev/null
+++ b/bin/binary.go
@@ -0,0 +1,40 @@
+package bin
+
+import (
+ "encoding/binary"
+ "orly.dev/errorf"
+)
+
+// Append is a straight append with length prefix.
+func Append(dst, src []byte) (b []byte) {
+ // if an allocation or two may occur, do it all in one immediately.
+ minLen := len(src) + len(dst) + binary.MaxVarintLen32
+ if cap(dst) < minLen {
+ tmp := make([]byte, 0, minLen)
+ dst = append(tmp, dst...)
+ }
+ dst = binary.AppendUvarint(dst, uint64(len(src)))
+ dst = append(dst, src...)
+ b = dst
+ return
+}
+
+// Extract decodes the data based on the length prefix and returns a the the
+// remaining data from the provided slice.
+func Extract(b []byte) (str, rem []byte, err error) {
+ l, read := binary.Uvarint(b)
+ if read < 1 {
+ err = errorf.E("failed to read uvarint length prefix")
+ return
+ }
+ if len(b) < int(l)+read {
+ err = errorf.E(
+ "insufficient data in buffer, require %d have %d",
+ int(l)+read, len(b),
+ )
+ return
+ }
+ str = b[read : read+int(l)]
+ rem = b[read+int(l):]
+ return
+}
diff --git a/cmd/doc.go b/cmd/doc.go
new file mode 100644
index 0000000..0fac62d
--- /dev/null
+++ b/cmd/doc.go
@@ -0,0 +1,2 @@
+// Package cmd contains the executable applications of the realy suite.
+package cmd
diff --git a/cmd/lerproxy/LICENSE b/cmd/lerproxy/LICENSE
new file mode 100644
index 0000000..3b0fd64
--- /dev/null
+++ b/cmd/lerproxy/LICENSE
@@ -0,0 +1,22 @@
+MIT License
+
+Copyright (c) 2016 Artyom Pervukhin
+Copyright (c) 2024 mleku npub1fjqqy4a93z5zsjwsfxqhc2764kvykfdyttvldkkkdera8dr78vhsmmleku
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/cmd/lerproxy/README.md b/cmd/lerproxy/README.md
new file mode 100644
index 0000000..cce1ceb
--- /dev/null
+++ b/cmd/lerproxy/README.md
@@ -0,0 +1,125 @@
+# lerproxy
+
+Command lerproxy implements https reverse proxy with automatic LetsEncrypt and your own own TLS
+certificates for multiple hostnames/backends including a static filesystem directory, nostr
+DNS verification [NIP-05](https://github.com/nostr-protocol/nips/blob/master/05.md) hosting.
+
+## Install
+
+ go install lerproxy.mleku.dev@latest
+
+## Run
+
+```
+Usage: lerproxy.mleku.dev [--listen LISTEN] [--map MAP] [--rewrites REWRITES] [--cachedir CACHEDIR] [--hsts] [--email EMAIL] [--http HTTP] [--rto RTO] [--wto WTO] [--idle IDLE] [--cert CERT]
+
+Options:
+ --listen LISTEN, -l LISTEN
+ address to listen at [default: :https]
+ --map MAP, -m MAP file with host/backend mapping [default: mapping.txt]
+ --rewrites REWRITES, -r REWRITES [default: rewrites.txt]
+ --cachedir CACHEDIR, -c CACHEDIR
+ path to directory to cache key and certificates [default: /var/cache/letsencrypt]
+ --hsts, -h add Strict-Transport-Security header
+ --email EMAIL, -e EMAIL
+ contact email address presented to letsencrypt CA
+ --http HTTP optional address to serve http-to-https redirects and ACME http-01 challenge responses [default: :http]
+ --rto RTO, -r RTO maximum duration before timing out read of the request [default: 1m]
+ --wto WTO, -w WTO maximum duration before timing out write of the response [default: 5m]
+ --idle IDLE, -i IDLE how long idle connection is kept before closing (set rto, wto to 0 to use this)
+ --cert CERT certificates and the domain they match: eg: mleku.dev:/path/to/cert - this will indicate to load two, one with extension .key and one with .crt, each expected to be PEM encoded TLS private and public keys, respectively
+ --help, -h display this help and exit
+```
+
+`mapping.txt` contains host-to-backend mapping, where backend can be specified
+as:
+
+* http/https url for http(s) connections to backend *without* passing "Host"
+ header from request;
+* host:port for http over TCP connections to backend;
+* absolute path for http over unix socket connections;
+* @name for http over abstract unix socket connections (linux only);
+* absolute path with a trailing slash to serve files from a given directory;
+* path to a nostr.json file containing a
+ [nip-05](https://github.com/nostr-protocol/nips/blob/master/05.md) and
+ hosting it at `https://example.com/.well-known/nostr.json`
+* using the prefix `git+` and a full web address path after it, generate html
+ with the necessary meta tags that indicate to the `go` tool when fetching
+ dependencies from the address found after the `+`.
+* in the launch parameters for `lerproxy` you can now add any number of `--cert` parameters with
+ the domain (including for wildcards), and the path to the `.crt`/`.key` files:
+
+ lerproxy.mleku.dev --cert :/path/to/TLS_cert
+
+ this will then, if found, load and parse the TLS certificate and secret key if the suffix of
+ the domain matches. The certificate path is expanded to two files with the above filename
+ extensions and become active in place of the LetsEncrypt certificates
+
+ > Note that the match is greedy, so you can explicitly separately give a subdomain
+ certificate and it will be selected even if there is a wildcard that also matches.
+
+# IMPORTANT
+
+With Comodo SSL (sectigo RSA) certificates you also need to append the intermediate certificate
+to the `.crt` file in order to get it to work properly with openssl library based tools like
+wget, curl and the go tool, which is quite important if you want to do subdomains on a wildcard
+certificate.
+
+Probably the same applies to some of the other certificate authorities. If you sometimes get
+issues with CLI tools refusing to accept these certificates on your web server or other, this
+may be the problem.
+
+## example mapping.txt
+
+ nostr.example.com: /path/to/nostr.json
+ subdomain1.example.com: 127.0.0.1:8080
+ subdomain2.example.com: /var/run/http.socket
+ subdomain3.example.com: @abstractUnixSocket
+ uploads.example.com: https://uploads-bucket.s3.amazonaws.com
+ # this is a comment, it can only start on a new line
+ static.example.com: /var/www/
+ awesome-go-project.example.com: git+https://github.com/crappy-name/crappy-go-project-name
+
+Note that when `@name` backend is specified, connection to abstract unix socket
+is made in a manner compatible with some other implementations like uWSGI, that
+calculate addrlen including trailing zero byte despite [documentation not
+requiring that](http://man7.org/linux/man-pages/man7/unix.7.html). It won't
+work with other implementations that calculate addrlen differently (i.e. by
+taking into account only `strlen(addr)` like Go, or even `UNIX_PATH_MAX`).
+
+## systemd service file
+
+```
+[Unit]
+Description=lerproxy
+
+[Service]
+Type=simple
+User=username
+ExecStart=/usr/local/bin/lerproxy.mleku.dev -m /path/to/mapping.txt -l xxx.xxx.xxx.xxx:443 --http xxx.xxx.xxx.6:80 -m /path/to/mapping.txt -e email@example.com -c /path/to/letsencrypt/cache --cert example.com:/path/to/tls/certs
+Restart=on-failure
+Wants=network-online.target
+After=network.target network-online.target wg-quick@wg0.service
+
+[Install]
+WantedBy=multi-user.target
+```
+
+If your VPS has wireguard running and you want to be able to host services from the other end of
+a tunnel, such as your dev machine (something I do for nostr relay development) add the
+`wg-quick@wg0` or whatever wg-quick configuration you are using to ensure when it boots,
+`lerproxy` does not run until the tunnel is active.
+
+## privileged port binding
+
+The simplest way to allow `lerproxy` to bind to port 80 and 443 is as follows:
+
+ setcap 'cap_net_bind_service=+ep' /path/to/lerproxy.mleku.dev
+
+## todo
+
+- add url rewriting such as flipping addresses such as a gitea instance
+ `example.com/gituser/reponame` to `reponame.example.com` by funneling all
+ `example.com/gituser` into be rewritten to be the only accessible user account on the gitea
+ instance. or for other things like a dynamic subscription based hosting service subdomain
+ instead of path
\ No newline at end of file
diff --git a/cmd/lerproxy/buf/bufpool.go b/cmd/lerproxy/buf/bufpool.go
new file mode 100644
index 0000000..95abf42
--- /dev/null
+++ b/cmd/lerproxy/buf/bufpool.go
@@ -0,0 +1,16 @@
+// Package buf implements a simple concurrent safe buffer pool for raw bytes.
+package buf
+
+import "sync"
+
+var bufferPool = &sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 32*1024)
+ return &buf
+ },
+}
+
+type Pool struct{}
+
+func (bp Pool) Get() []byte { return *(bufferPool.Get().(*[]byte)) }
+func (bp Pool) Put(b []byte) { bufferPool.Put(&b) }
diff --git a/cmd/lerproxy/hsts/proxy.go b/cmd/lerproxy/hsts/proxy.go
new file mode 100644
index 0000000..e26f9f9
--- /dev/null
+++ b/cmd/lerproxy/hsts/proxy.go
@@ -0,0 +1,15 @@
+// Package hsts implements a HTTP handler that enforces HSTS.
+package hsts
+
+import "net/http"
+
+type Proxy struct {
+ http.Handler
+}
+
+func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ w.Header().
+ Set("Strict-Transport-Security",
+ "max-age=31536000; includeSubDomains; preload")
+ p.ServeHTTP(w, r)
+}
diff --git a/cmd/lerproxy/main.go b/cmd/lerproxy/main.go
new file mode 100644
index 0000000..f9540e3
--- /dev/null
+++ b/cmd/lerproxy/main.go
@@ -0,0 +1,403 @@
+// Command lerproxy implements https reverse proxy with automatic LetsEncrypt
+// usage for multiple hostnames/backends,your own SSL certificates, nostr NIP-05
+// DNS verification hosting and Go vanity redirects.
+package main
+
+import (
+ "bufio"
+ "crypto/tls"
+ "encoding/json"
+ "fmt"
+ "io"
+ stdLog "log"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "os"
+ "os/signal"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/alexflint/go-arg"
+ "golang.org/x/crypto/acme/autocert"
+ "golang.org/x/sync/errgroup"
+
+ "orly.dev/cmd/lerproxy/buf"
+ "orly.dev/cmd/lerproxy/hsts"
+ "orly.dev/cmd/lerproxy/reverse"
+ "orly.dev/cmd/lerproxy/tcpkeepalive"
+ "orly.dev/cmd/lerproxy/util"
+ "orly.dev/context"
+)
+
+type runArgs struct {
+ Addr string `arg:"-l,--listen" default:":https" help:"address to listen at"`
+ Conf string `arg:"-m,--map" default:"mapping.txt" help:"file with host/backend mapping"`
+ Cache string `arg:"-c,--cachedir" default:"/var/cache/letsencrypt" help:"path to directory to cache key and certificates"`
+ HSTS bool `arg:"-h,--hsts" help:"add Strict-Transport-Security header"`
+ Email string `arg:"-e,--email" help:"contact email address presented to letsencrypt CA"`
+ HTTP string `arg:"--http" default:":http" help:"optional address to serve http-to-https redirects and ACME http-01 challenge responses"`
+ RTO time.Duration `arg:"-r,--rto" default:"1m" help:"maximum duration before timing out read of the request"`
+ WTO time.Duration `arg:"-w,--wto" default:"5m" help:"maximum duration before timing out write of the response"`
+ Idle time.Duration `arg:"-i,--idle" help:"how long idle connection is kept before closing (set rto, wto to 0 to use this)"`
+ Certs []string `arg:"--cert,separate" help:"certificates and the domain they match: eg: orly.dev:/path/to/cert - this will indicate to load two, one with extension .key and one with .crt, each expected to be PEM encoded TLS private and public keys, respectively"`
+ // Rewrites string `arg:"-r,--rewrites" default:"rewrites.txt"`
+}
+
+var args runArgs
+
+func main() {
+ arg.MustParse(&args)
+ ctx, cancel := signal.NotifyContext(context.Bg(), os.Interrupt)
+ defer cancel()
+ if err := run(ctx, args); chk.T(err) {
+ log.F.Ln(err)
+ }
+}
+
+func run(c context.T, args runArgs) (err error) {
+
+ if args.Cache == "" {
+ err = log.E.Err("no cache specified")
+ return
+ }
+
+ var srv *http.Server
+ var httpHandler http.Handler
+ if srv, httpHandler, err = setupServer(args); chk.E(err) {
+ return
+ }
+ srv.ReadHeaderTimeout = 5 * time.Second
+ if args.RTO > 0 {
+ srv.ReadTimeout = args.RTO
+ }
+ if args.WTO > 0 {
+ srv.WriteTimeout = args.WTO
+ }
+ group, ctx := errgroup.WithContext(c)
+ if args.HTTP != "" {
+ httpServer := http.Server{
+ Addr: args.HTTP,
+ Handler: httpHandler,
+ ReadTimeout: 10 * time.Second,
+ WriteTimeout: 10 * time.Second,
+ }
+ group.Go(
+ func() (err error) {
+ chk.E(httpServer.ListenAndServe())
+ return
+ },
+ )
+ group.Go(
+ func() error {
+ <-ctx.Done()
+ ctx, cancel := context.Timeout(
+ context.Bg(),
+ time.Second,
+ )
+ defer cancel()
+ return httpServer.Shutdown(ctx)
+ },
+ )
+ }
+ if srv.ReadTimeout != 0 || srv.WriteTimeout != 0 || args.Idle == 0 {
+ group.Go(
+ func() (err error) {
+ chk.E(srv.ListenAndServeTLS("", ""))
+ return
+ },
+ )
+ } else {
+ group.Go(
+ func() (err error) {
+ var ln net.Listener
+ if ln, err = net.Listen("tcp", srv.Addr); chk.E(err) {
+ return
+ }
+ defer ln.Close()
+ ln = tcpkeepalive.Listener{
+ Duration: args.Idle,
+ TCPListener: ln.(*net.TCPListener),
+ }
+ err = srv.ServeTLS(ln, "", "")
+ chk.E(err)
+ return
+ },
+ )
+ }
+ group.Go(
+ func() error {
+ <-ctx.Done()
+ ctx, cancel := context.Timeout(context.Bg(), time.Second)
+ defer cancel()
+ return srv.Shutdown(ctx)
+ },
+ )
+ return group.Wait()
+}
+
+// TLSConfig returns a TLSConfig that works with a LetsEncrypt automatic SSL cert issuer as well
+// as any provided .pem certificates from providers.
+//
+// The certs are provided in the form "example.com:/path/to/cert.pem"
+func TLSConfig(m *autocert.Manager, certs ...string) (tc *tls.Config) {
+ certMap := make(map[string]*tls.Certificate)
+ var mx sync.Mutex
+ for _, cert := range certs {
+ split := strings.Split(cert, ":")
+ if len(split) != 2 {
+ log.E.F("invalid certificate parameter format: `%s`", cert)
+ continue
+ }
+ var err error
+ var c tls.Certificate
+ if c, err = tls.LoadX509KeyPair(
+ split[1]+".crt", split[1]+".key",
+ ); chk.E(err) {
+ continue
+ }
+ certMap[split[0]] = &c
+ }
+ tc = m.TLSConfig()
+ tc.GetCertificate = func(helo *tls.ClientHelloInfo) (
+ cert *tls.Certificate, err error,
+ ) {
+ mx.Lock()
+ var own string
+ for i := range certMap {
+ // to also handle explicit subdomain certs, prioritize over a root wildcard.
+ if helo.ServerName == i {
+ own = i
+ break
+ }
+ // if it got to us and ends in the same name dot tld assume the subdomain was
+ // redirected or it's a wildcard certificate, thus only the ending needs to match.
+ if strings.HasSuffix(helo.ServerName, i) {
+ own = i
+ break
+ }
+ }
+ if own != "" {
+ defer mx.Unlock()
+ return certMap[own], nil
+ }
+ mx.Unlock()
+ return m.GetCertificate(helo)
+ }
+ return
+}
+
+func setupServer(a runArgs) (s *http.Server, h http.Handler, err error) {
+ var mapping map[string]string
+ if mapping, err = readMapping(a.Conf); chk.E(err) {
+ return
+ }
+ var proxy http.Handler
+ if proxy, err = setProxy(mapping); chk.E(err) {
+ return
+ }
+ if a.HSTS {
+ proxy = &hsts.Proxy{Handler: proxy}
+ }
+ if err = os.MkdirAll(a.Cache, 0700); chk.E(err) {
+ err = fmt.Errorf(
+ "cannot create cache directory %q: %v",
+ a.Cache, err,
+ )
+ chk.E(err)
+ return
+ }
+ m := autocert.Manager{
+ Prompt: autocert.AcceptTOS,
+ Cache: autocert.DirCache(a.Cache),
+ HostPolicy: autocert.HostWhitelist(util.GetKeys(mapping)...),
+ Email: a.Email,
+ }
+ s = &http.Server{
+ Handler: proxy,
+ Addr: a.Addr,
+ TLSConfig: TLSConfig(&m, a.Certs...),
+ }
+ h = m.HTTPHandler(nil)
+ return
+}
+
+type NostrJSON struct {
+ Names map[string]string `json:"names"`
+ Relays map[string][]string `json:"relays"`
+}
+
+func setProxy(mapping map[string]string) (h http.Handler, err error) {
+ if len(mapping) == 0 {
+ return nil, fmt.Errorf("empty mapping")
+ }
+ mux := http.NewServeMux()
+ for hostname, backendAddr := range mapping {
+ hn, ba := hostname, backendAddr
+ if strings.ContainsRune(hn, os.PathSeparator) {
+ err = log.E.Err("invalid hostname: %q", hn)
+ return
+ }
+ network := "tcp"
+ if ba != "" && ba[0] == '@' && runtime.GOOS == "linux" {
+ // append \0 to address so addrlen for connect(2) is calculated in a
+ // way compatible with some other implementations (i.e. uwsgi)
+ network, ba = "unix", ba+string(byte(0))
+ } else if strings.HasPrefix(ba, "git+") {
+ split := strings.Split(ba, "git+")
+ if len(split) != 2 {
+ log.E.Ln("invalid go vanity redirect: %s: %s", hn, ba)
+ continue
+ }
+ redirector := fmt.Sprintf(
+ `redirecting to %s`,
+ hn, split[1], split[1], split[1], split[1],
+ )
+ mux.HandleFunc(
+ hn+"/",
+ func(writer http.ResponseWriter, request *http.Request) {
+ writer.Header().Set(
+ "Access-Control-Allow-Methods",
+ "GET,HEAD,PUT,PATCH,POST,DELETE",
+ )
+ writer.Header().Set("Access-Control-Allow-Origin", "*")
+ writer.Header().Set("Content-Type", "text/html")
+ writer.Header().Set(
+ "Content-Length", fmt.Sprint(len(redirector)),
+ )
+ writer.Header().Set(
+ "strict-transport-security",
+ "max-age=0; includeSubDomains",
+ )
+ fmt.Fprint(writer, redirector)
+ },
+ )
+ continue
+ } else if filepath.IsAbs(ba) {
+ network = "unix"
+ switch {
+ case strings.HasSuffix(ba, string(os.PathSeparator)):
+ // path specified as directory with explicit trailing slash; add
+ // this path as static site
+ fs := http.FileServer(http.Dir(ba))
+ mux.Handle(hn+"/", fs)
+ continue
+ case strings.HasSuffix(ba, "nostr.json"):
+ log.I.Ln(hn, ba)
+ var fb []byte
+ if fb, err = os.ReadFile(ba); chk.E(err) {
+ continue
+ }
+ var v NostrJSON
+ if err = json.Unmarshal(fb, &v); chk.E(err) {
+ continue
+ }
+ var jb []byte
+ if jb, err = json.Marshal(v); chk.E(err) {
+ continue
+ }
+ nostrJSON := string(jb)
+ mux.HandleFunc(
+ hn+"/.well-known/nostr.json",
+ func(writer http.ResponseWriter, request *http.Request) {
+ log.I.Ln("serving nostr json to", hn)
+ writer.Header().Set(
+ "Access-Control-Allow-Methods",
+ "GET,HEAD,PUT,PATCH,POST,DELETE",
+ )
+ writer.Header().Set("Access-Control-Allow-Origin", "*")
+ writer.Header().Set("Content-Type", "application/json")
+ writer.Header().Set(
+ "Content-Length", fmt.Sprint(len(nostrJSON)),
+ )
+ writer.Header().Set(
+ "strict-transport-security",
+ "max-age=0; includeSubDomains",
+ )
+ fmt.Fprint(writer, nostrJSON)
+ },
+ )
+ continue
+ }
+ } else if u, err := url.Parse(ba); err == nil {
+ switch u.Scheme {
+ case "http", "https":
+ rp := reverse.NewSingleHostReverseProxy(u)
+ modifyCORSResponse := func(res *http.Response) error {
+ res.Header.Set(
+ "Access-Control-Allow-Methods",
+ "GET,HEAD,PUT,PATCH,POST,DELETE",
+ )
+ // res.Header.Set("Access-Control-Allow-Credentials", "true")
+ res.Header.Set("Access-Control-Allow-Origin", "*")
+ return nil
+ }
+ rp.ModifyResponse = modifyCORSResponse
+ rp.ErrorLog = stdLog.New(
+ os.Stderr, "lerproxy", stdLog.Llongfile,
+ )
+ rp.BufferPool = buf.Pool{}
+ mux.Handle(hn+"/", rp)
+ continue
+ }
+ }
+ rp := &httputil.ReverseProxy{
+ Director: func(req *http.Request) {
+ req.URL.Scheme = "http"
+ req.URL.Host = req.Host
+ req.Header.Set("X-Forwarded-Proto", "https")
+ req.Header.Set("X-Forwarded-For", req.RemoteAddr)
+ req.Header.Set(
+ "Access-Control-Allow-Methods",
+ "GET,HEAD,PUT,PATCH,POST,DELETE",
+ )
+ // req.Header.Set("Access-Control-Allow-Credentials", "true")
+ req.Header.Set("Access-Control-Allow-Origin", "*")
+ log.D.Ln(req.URL, req.RemoteAddr)
+ },
+ Transport: &http.Transport{
+ DialContext: func(c context.T, n, addr string) (
+ net.Conn, error,
+ ) {
+ return net.DialTimeout(network, ba, 5*time.Second)
+ },
+ },
+ ErrorLog: stdLog.New(io.Discard, "", 0),
+ BufferPool: buf.Pool{},
+ }
+ mux.Handle(hn+"/", rp)
+ }
+ return mux, nil
+}
+
+func readMapping(file string) (m map[string]string, err error) {
+ var f *os.File
+ if f, err = os.Open(file); chk.E(err) {
+ return
+ }
+ m = make(map[string]string)
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ if b := sc.Bytes(); len(b) == 0 || b[0] == '#' {
+ continue
+ }
+ s := strings.SplitN(sc.Text(), ":", 2)
+ if len(s) != 2 {
+ err = fmt.Errorf("invalid line: %q", sc.Text())
+ log.E.Ln(err)
+ chk.E(f.Close())
+ return
+ }
+ m[strings.TrimSpace(s[0])] = strings.TrimSpace(s[1])
+ }
+ err = sc.Err()
+ chk.E(err)
+ chk.E(f.Close())
+ return
+}
diff --git a/cmd/lerproxy/reverse/proxy.go b/cmd/lerproxy/reverse/proxy.go
new file mode 100644
index 0000000..acfbe75
--- /dev/null
+++ b/cmd/lerproxy/reverse/proxy.go
@@ -0,0 +1,35 @@
+// Package reverse is a copy of httputil.NewSingleHostReverseProxy with addition
+// of "X-Forwarded-Proto" header.
+package reverse
+
+import (
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "orly.dev/log"
+
+ "orly.dev/cmd/lerproxy/util"
+)
+
+// NewSingleHostReverseProxy is a copy of httputil.NewSingleHostReverseProxy
+// with addition of "X-Forwarded-Proto" header.
+func NewSingleHostReverseProxy(target *url.URL) (rp *httputil.ReverseProxy) {
+ targetQuery := target.RawQuery
+ director := func(req *http.Request) {
+ log.D.S(req)
+ req.URL.Scheme = target.Scheme
+ req.URL.Host = target.Host
+ req.URL.Path = util.SingleJoiningSlash(target.Path, req.URL.Path)
+ if targetQuery == "" || req.URL.RawQuery == "" {
+ req.URL.RawQuery = targetQuery + req.URL.RawQuery
+ } else {
+ req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
+ }
+ if _, ok := req.Header["User-Agent"]; !ok {
+ req.Header.Set("User-Agent", "")
+ }
+ req.Header.Set("X-Forwarded-Proto", "https")
+ }
+ rp = &httputil.ReverseProxy{Director: director}
+ return
+}
diff --git a/cmd/lerproxy/tcpkeepalive/listener.go b/cmd/lerproxy/tcpkeepalive/listener.go
new file mode 100644
index 0000000..abca982
--- /dev/null
+++ b/cmd/lerproxy/tcpkeepalive/listener.go
@@ -0,0 +1,40 @@
+// Package tcpkeepalive implements a net.TCPListener with a singleton set period
+// for a default 3 minute keep-aline.
+package tcpkeepalive
+
+import (
+ "net"
+ "orly.dev/chk"
+ "time"
+
+ "orly.dev/cmd/lerproxy/timeout"
+)
+
+// Period can be changed prior to opening a Listener to alter its'
+// KeepAlivePeriod.
+var Period = 3 * time.Minute
+
+// Listener sets TCP keep-alive timeouts on accepted connections.
+// It's used by ListenAndServe and ListenAndServeTLS so dead TCP connections
+// (e.g. closing laptop mid-download) eventually go away.
+type Listener struct {
+ time.Duration
+ *net.TCPListener
+}
+
+func (ln Listener) Accept() (conn net.Conn, e error) {
+ var tc *net.TCPConn
+ if tc, e = ln.AcceptTCP(); chk.E(e) {
+ return
+ }
+ if e = tc.SetKeepAlive(true); chk.E(e) {
+ return
+ }
+ if e = tc.SetKeepAlivePeriod(Period); chk.E(e) {
+ return
+ }
+ if ln.Duration != 0 {
+ return timeout.Conn{Duration: ln.Duration, TCPConn: tc}, nil
+ }
+ return tc, nil
+}
diff --git a/cmd/lerproxy/timeout/conn.go b/cmd/lerproxy/timeout/conn.go
new file mode 100644
index 0000000..88988b4
--- /dev/null
+++ b/cmd/lerproxy/timeout/conn.go
@@ -0,0 +1,33 @@
+// Package timeout provides a simple extension of a net.TCPConn with a
+// configurable read/write deadline.
+package timeout
+
+import (
+ "net"
+ "orly.dev/chk"
+ "time"
+)
+
+// Conn extends deadline after successful read or write operations
+type Conn struct {
+ time.Duration
+ *net.TCPConn
+}
+
+func (c Conn) Read(b []byte) (n int, e error) {
+ if n, e = c.TCPConn.Read(b); !chk.E(e) {
+ if e = c.SetDeadline(c.getTimeout()); chk.E(e) {
+ }
+ }
+ return
+}
+
+func (c Conn) Write(b []byte) (n int, e error) {
+ if n, e = c.TCPConn.Write(b); !chk.E(e) {
+ if e = c.SetDeadline(c.getTimeout()); chk.E(e) {
+ }
+ }
+ return
+}
+
+func (c Conn) getTimeout() (t time.Time) { return time.Now().Add(c.Duration) }
diff --git a/cmd/lerproxy/util/u.go b/cmd/lerproxy/util/u.go
new file mode 100644
index 0000000..9a333ce
--- /dev/null
+++ b/cmd/lerproxy/util/u.go
@@ -0,0 +1,26 @@
+// Package util provides some helpers for lerproxy, a tool to convert maps of
+// strings to slices of the same strings, and a helper to avoid putting two / in
+// a URL.
+package util
+
+import "strings"
+
+func GetKeys(m map[string]string) []string {
+ out := make([]string, 0, len(m))
+ for k := range m {
+ out = append(out, k)
+ }
+ return out
+}
+
+func SingleJoiningSlash(a, b string) string {
+ suffixSlash := strings.HasSuffix(a, "/")
+ prefixSlash := strings.HasPrefix(b, "/")
+ switch {
+ case suffixSlash && prefixSlash:
+ return a + b[1:]
+ case !suffixSlash && !prefixSlash:
+ return a + "/" + b
+ }
+ return a + b
+}
diff --git a/cmd/nauth/main.go b/cmd/nauth/main.go
new file mode 100644
index 0000000..999c7a9
--- /dev/null
+++ b/cmd/nauth/main.go
@@ -0,0 +1,89 @@
+package main
+
+import (
+ "encoding/base64"
+ "fmt"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+ "orly.dev/log"
+ "os"
+ "time"
+
+ "orly.dev/bech32encoding"
+ "orly.dev/httpauth"
+ "orly.dev/p256k"
+ "orly.dev/signer"
+)
+
+const secEnv = "NOSTR_SECRET_KEY"
+
+func fail(format string, a ...any) {
+ _, _ = fmt.Fprintf(os.Stderr, format+"\n", a...)
+ os.Exit(1)
+}
+
+func main() {
+ // lol.SetLogLevel("trace")
+ if len(os.Args) > 1 && os.Args[1] == "help" {
+ fmt.Printf(
+ `nauth help:
+
+for generating extended expiration NIP-98 tokens:
+
+ nauth
+
+ * NIP-98 secret will be expected in the environment variable "%s" - if absent, will not be added to the header. Endpoint is assumed to not require it if absent. An error will be returned if it was needed.
+
+ output will be rendered to stdout
+
+`, secEnv,
+ )
+ os.Exit(0)
+ }
+ if len(os.Args) < 3 {
+ fail(
+ `error: nauth requires minimum 2 args:
+
+ signing nsec (in bech32 format) is expected to be found in %s environment variable.
+
+ use "help" to get usage information
+`, secEnv,
+ )
+ }
+ ex, err := time.ParseDuration(os.Args[2])
+ if err != nil {
+ fail(err.Error())
+ }
+ var sign signer.I
+ if sign, err = GetNIP98Signer(); err != nil {
+ fail(err.Error())
+ }
+ exp := time.Now().Add(ex).Unix()
+ ev := httpauth.MakeNIP98Event(os.Args[1], "", "", exp)
+ if err = ev.Sign(sign); err != nil {
+ fail(err.Error())
+ }
+ log.T.F("nip-98 http auth event:\n%s\n", ev.SerializeIndented())
+ b64 := base64.URLEncoding.EncodeToString(ev.Serialize())
+ fmt.Println("Nostr " + b64)
+}
+
+func GetNIP98Signer() (sign signer.I, err error) {
+ nsex := os.Getenv(secEnv)
+ var sk []byte
+ if len(nsex) == 0 {
+ err = errorf.E(
+ "no bech32 secret key found in environment variable %s", secEnv,
+ )
+ return
+ } else if sk, err = bech32encoding.NsecToBytes([]byte(nsex)); chk.E(err) {
+ err = errorf.E("failed to decode nsec: '%s'", err.Error())
+ return
+ }
+ sign = &p256k.Signer{}
+ if err = sign.InitSec(sk); chk.E(err) {
+ err = errorf.E("failed to init signer: '%s'", err.Error())
+ return
+ }
+ return
+}
diff --git a/cmd/nurl/main.go b/cmd/nurl/main.go
new file mode 100644
index 0000000..2d34e55
--- /dev/null
+++ b/cmd/nurl/main.go
@@ -0,0 +1,196 @@
+// Package main is a simple implementation of a cURL like tool that can do
+// simple GET/POST operations on a HTTP server that understands NIP-98
+// authentication, with the signing key found in an environment variable.
+package main
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+ "orly.dev/log"
+ realy_lol "orly.dev/version"
+ "os"
+
+ "orly.dev/bech32encoding"
+ "orly.dev/hex"
+ "orly.dev/httpauth"
+ "orly.dev/p256k"
+ "orly.dev/sha256"
+ "orly.dev/signer"
+)
+
+const secEnv = "NOSTR_SECRET_KEY"
+
+var userAgent = fmt.Sprintf("nurl/%s", realy_lol.V)
+
+func fail(format string, a ...any) {
+ _, _ = fmt.Fprintf(os.Stderr, format+"\n", a...)
+ os.Exit(1)
+}
+
+func main() {
+ // lol.SetLogLevel("trace")
+ if len(os.Args) > 1 && os.Args[1] == "help" {
+ fmt.Printf(
+ `nurl help:
+
+for nostr http using NIP-98 HTTP authentication:
+
+ nurl
+
+ if no file is given, the request will be processed as a HTTP GET (if relevant there can be request parameters).
+
+ * NIP-98 secret will be expected in the environment variable "%s" - if absent, will not be added to the header. Endpoint is assumed to not require it if absent. An error will be returned if it was needed.
+
+ output will be rendered to stdout
+
+`, secEnv,
+ )
+ os.Exit(0)
+ }
+ if len(os.Args) < 2 {
+ fail(
+ `error: nurl requires minimum 1 arg:
+
+ signing nsec (in bech32 format) is expected to be found in %s environment variable.
+
+ use "help" to get usage information
+`, secEnv,
+ )
+ }
+ var err error
+ var sign signer.I
+ if sign, err = GetNIP98Signer(); err != nil {
+ }
+ var ur *url.URL
+ if ur, err = url.Parse(os.Args[1]); chk.E(err) {
+ fail("invalid URL: `%s` error: `%s`", os.Args[2], err.Error())
+ }
+ log.T.S(ur)
+ if len(os.Args) == 2 {
+ if err = Get(ur, sign); chk.E(err) {
+ fail(err.Error())
+ }
+ return
+ }
+ if err = Post(os.Args[2], ur, sign); chk.E(err) {
+ fail(err.Error())
+ }
+}
+
+func GetNIP98Signer() (sign signer.I, err error) {
+ nsex := os.Getenv(secEnv)
+ var sk []byte
+ if len(nsex) == 0 {
+ err = errorf.E(
+ "no bech32 secret key found in environment variable %s", secEnv,
+ )
+ return
+ } else if sk, err = bech32encoding.NsecToBytes([]byte(nsex)); chk.E(err) {
+ err = errorf.E("failed to decode nsec: '%s'", err.Error())
+ return
+ }
+ sign = &p256k.Signer{}
+ if err = sign.InitSec(sk); chk.E(err) {
+ err = errorf.E("failed to init signer: '%s'", err.Error())
+ return
+ }
+ return
+}
+
+func Get(ur *url.URL, sign signer.I) (err error) {
+ log.T.F("GET")
+ var r *http.Request
+ if r, err = http.NewRequest("GET", ur.String(), nil); chk.E(err) {
+ return
+ }
+ r.Header.Add("User-Agent", userAgent)
+ if sign != nil {
+ if err = httpauth.AddNIP98Header(
+ r, ur, "GET", "", sign, 0,
+ ); chk.E(err) {
+ fail(err.Error())
+ }
+ }
+ client := &http.Client{
+ CheckRedirect: func(
+ req *http.Request,
+ via []*http.Request,
+ ) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ var res *http.Response
+ if res, err = client.Do(r); chk.E(err) {
+ err = errorf.E("request failed: %w", err)
+ return
+ }
+ if _, err = io.Copy(os.Stdout, res.Body); chk.E(err) {
+ res.Body.Close()
+ return
+ }
+ res.Body.Close()
+ return
+}
+
+func Post(f string, ur *url.URL, sign signer.I) (err error) {
+ log.T.F("POST")
+ var contentLength int64
+ var payload io.ReadCloser
+ // get the file path parameters and optional hash
+ var fi os.FileInfo
+ if fi, err = os.Stat(f); chk.E(err) {
+ return
+ }
+ var b []byte
+ if b, err = os.ReadFile(f); chk.E(err) {
+ return
+ }
+ hb := sha256.Sum256(b)
+ h := hex.Enc(hb[:])
+ contentLength = fi.Size()
+ if payload, err = os.Open(f); chk.E(err) {
+ return
+ }
+ log.T.F("opened file %s hash %s", f, h)
+ var r *http.Request
+ r = &http.Request{
+ Method: "POST",
+ URL: ur,
+ Proto: "HTTP/1.1",
+ ProtoMajor: 1,
+ ProtoMinor: 1,
+ Header: make(http.Header),
+ Body: payload,
+ ContentLength: contentLength,
+ Host: ur.Host,
+ }
+ r.Header.Add("User-Agent", userAgent)
+ if sign != nil {
+ if err = httpauth.AddNIP98Header(
+ r, ur, "POST", h, sign, 0,
+ ); chk.E(err) {
+ fail(err.Error())
+ }
+ }
+ r.GetBody = func() (rc io.ReadCloser, err error) {
+ rc = payload
+ return
+ }
+ // log.I.S(r)
+ client := &http.Client{}
+ var res *http.Response
+ if res, err = client.Do(r); chk.E(err) {
+ return
+ }
+ // log.I.S(res)
+ defer res.Body.Close()
+ if io.Copy(os.Stdout, res.Body); chk.E(err) {
+ return
+ }
+ fmt.Println()
+ return
+}
diff --git a/cmd/vainstr/LICENSE b/cmd/vainstr/LICENSE
new file mode 100644
index 0000000..0e259d4
--- /dev/null
+++ b/cmd/vainstr/LICENSE
@@ -0,0 +1,121 @@
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
diff --git a/cmd/vainstr/README.md b/cmd/vainstr/README.md
new file mode 100644
index 0000000..2b3e7b4
--- /dev/null
+++ b/cmd/vainstr/README.md
@@ -0,0 +1,16 @@
+# vainstr
+nostr vanity key miner
+
+## usage
+
+```
+Usage: vainstr [--threads THREADS] [STRING [POSITION]]
+
+Positional arguments:
+ STRING
+ POSITION [begin|contain|end]
+
+Options:
+ --threads THREADS number of threads to mine with - defaults to using all CPU threads available
+ --help, -h display this help and exit
+```
\ No newline at end of file
diff --git a/cmd/vainstr/main.go b/cmd/vainstr/main.go
new file mode 100644
index 0000000..adb9d23
--- /dev/null
+++ b/cmd/vainstr/main.go
@@ -0,0 +1,233 @@
+// Package main is a simple nostr key miner that uses the fast bitcoin secp256k1
+// C library to derive npubs with specified prefix/infix/suffix strings present.
+package main
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/alexflint/go-arg"
+
+ "orly.dev/atomic"
+ "orly.dev/bech32encoding"
+ "orly.dev/ec/bech32"
+ "orly.dev/ec/schnorr"
+ "orly.dev/ec/secp256k1"
+ "orly.dev/interrupt"
+ "orly.dev/qu"
+)
+
+var prefix = append(bech32encoding.PubHRP, '1')
+
+const (
+ PositionBeginning = iota
+ PositionContains
+ PositionEnding
+)
+
+type Result struct {
+ sec *secp256k1.SecretKey
+ npub []byte
+ pub *secp256k1.PublicKey
+}
+
+var args struct {
+ String string `arg:"positional" help:"the string you want to appear in the npub"`
+ Position string `arg:"positional" default:"end" help:"[begin|contain|end] default: end"`
+ Threads int `help:"number of threads to mine with - defaults to using all CPU threads available"`
+}
+
+func main() {
+ arg.MustParse(&args)
+ if args.String == "" {
+ _, _ = fmt.Fprintln(
+ os.Stderr,
+ `Usage: vainstr [--threads THREADS] [STRING [POSITION]]
+
+Positional arguments:
+ STRING the string you want to appear in the npub
+ POSITION [begin|contain|end] default: end
+
+Options:
+ --threads THREADS number of threads to mine with - defaults to using all CPU threads available
+ --help, -h display this help and exit`,
+ )
+ os.Exit(0)
+ }
+ var where int
+ canonical := strings.ToLower(args.Position)
+ switch {
+ case strings.HasPrefix(canonical, "begin"):
+ where = PositionBeginning
+ case strings.Contains(canonical, "contain"):
+ where = PositionContains
+ case strings.HasSuffix(canonical, "end"):
+ where = PositionEnding
+ }
+ if args.Threads == 0 {
+ args.Threads = runtime.NumCPU()
+ }
+ if err := Vanity(args.String, where, args.Threads); chk.T(err) {
+ log.F.F("error: %s", err)
+ }
+}
+
+func Vanity(str string, where int, threads int) (e error) {
+
+ // check the string has valid bech32 ciphers
+ for i := range str {
+ wrong := true
+ for j := range bech32.Charset {
+ if str[i] == bech32.Charset[j] {
+ wrong = false
+ break
+ }
+ }
+ if wrong {
+ return fmt.Errorf(
+ "found invalid character '%c' only ones from '%s' allowed\n",
+ str[i], bech32.Charset,
+ )
+ }
+ }
+ started := time.Now()
+ quit, shutdown := qu.T(), qu.T()
+ resC := make(chan Result)
+ interrupt.AddHandler(
+ func() {
+ // this will stop work if CTRL-C or Interrupt signal from OS.
+ shutdown.Q()
+ },
+ )
+ var wg sync.WaitGroup
+ counter := atomic.NewInt64(0)
+ for i := 0; i < threads; i++ {
+ log.D.F("starting up worker %d", i)
+ go mine(str, where, quit, resC, &wg, counter)
+ }
+ tick := time.NewTicker(time.Second * 5)
+ var res Result
+out:
+ for {
+ select {
+ case <-tick.C:
+ workingFor := time.Now().Sub(started)
+ wm := workingFor % time.Second
+ workingFor -= wm
+ fmt.Printf(
+ "working for %v, attempts %d\n",
+ workingFor, counter.Load(),
+ )
+ case r := <-resC:
+ // one of the workers found the solution
+ res = r
+ // tell the others to stop
+ quit.Q()
+ break out
+ case <-shutdown.Wait():
+ quit.Q()
+ log.I.Ln("\rinterrupt signal received")
+ os.Exit(0)
+ }
+ }
+
+ // wait for all workers to stop
+ wg.Wait()
+
+ fmt.Printf(
+ "generated in %d attempts using %d threads, taking %v\n",
+ counter.Load(), args.Threads, time.Now().Sub(started),
+ )
+ secBytes := res.sec.Serialize()
+ log.D.Ln(
+ "generated key pair:\n"+
+ "\nhex:\n"+
+ "\tsecret: %s\n"+
+ "\tpublic: %s\n\n",
+ hex.EncodeToString(secBytes),
+ hex.EncodeToString(schnorr.SerializePubKey(res.pub)),
+ )
+ nsec, _ := bech32encoding.SecretKeyToNsec(res.sec)
+ fmt.Printf("\nNSEC = %s\nNPUB = %s\n\n", nsec, res.npub)
+ return
+}
+
+func mine(
+ str string, where int, quit qu.C, resC chan Result, wg *sync.WaitGroup,
+ counter *atomic.Int64,
+) {
+
+ wg.Add(1)
+ var r Result
+ var e error
+ found := false
+out:
+ for {
+ select {
+ case <-quit:
+ wg.Done()
+ if found {
+ // send back the result
+ log.D.Ln("sending back result\n")
+ resC <- r
+ log.D.Ln("sent\n")
+ } else {
+ log.D.Ln("other thread found it\n")
+ }
+ break out
+ default:
+ }
+ counter.Inc()
+ r.sec, r.pub, e = GenKeyPair()
+ if e != nil {
+ log.E.Ln("error generating key: '%v' worker stopping", e)
+ break out
+ }
+ r.npub, e = bech32encoding.PublicKeyToNpub(r.pub)
+ if e != nil {
+ log.E.Ln("fatal error generating npub: %s\n", e)
+ break out
+ }
+ switch where {
+ case PositionBeginning:
+ if bytes.HasPrefix(r.npub, append(prefix, []byte(str)...)) {
+ found = true
+ quit.Q()
+ }
+ case PositionEnding:
+ if bytes.HasSuffix(r.npub, []byte(str)) {
+ found = true
+ quit.Q()
+ }
+ case PositionContains:
+ if bytes.Contains(r.npub, []byte(str)) {
+ found = true
+ quit.Q()
+ }
+ }
+ }
+}
+
+// GenKeyPair creates a fresh new key pair using the entropy source used by
+// crypto/rand (ie, /dev/random on posix systems).
+func GenKeyPair() (
+ sec *secp256k1.SecretKey,
+ pub *secp256k1.PublicKey, err error,
+) {
+
+ sec, err = secp256k1.GenerateSecretKey()
+ if err != nil {
+ err = fmt.Errorf("error generating key: %s", err)
+ return
+ }
+ pub = sec.PubKey()
+ return
+}
diff --git a/codec/codec.go b/codec/codec.go
new file mode 100644
index 0000000..d6b9e5f
--- /dev/null
+++ b/codec/codec.go
@@ -0,0 +1,42 @@
+// Package codec is a set of interfaces for nostr messages and message elements.
+package codec
+
+import (
+ "io"
+)
+
+// Envelope is an interface for the nostr "envelope" message formats, a JSON
+// array with the first field an upper case string that provides type
+// information, in combination with the context of the side sending it (relay or
+// client).
+type Envelope interface {
+ // Label returns the (uppercase) string that signifies the type of message.
+ Label() string
+ // Write outputs the envelope to an io.Writer
+ Write(w io.Writer) (err error)
+ // JSON is a somewhat simplified version of the json.Marshaler/json.Unmarshaler
+ // that has no error for the Marshal side of the operation.
+ JSON
+}
+
+// JSON is a somewhat simplified version of the json.Marshaler/json.Unmarshaler
+// that has no error for the Marshal side of the operation.
+type JSON interface {
+ // Marshal converts the data of the type into JSON, appending it to the provided
+ // slice and returning the extended slice.
+ Marshal(dst []byte) (b []byte)
+ // Unmarshal decodes a JSON form of a type back into the runtime form, and
+ // returns whatever remains after the type has been decoded out.
+ Unmarshal(b []byte) (r []byte, err error)
+}
+
+// Binary is a similarly simplified form of the stdlib binary Marshal/Unmarshal
+// interfaces. Same as JSON it does not have an error for the MarshalBinary.
+type Binary interface {
+ // MarshalBinary converts the data of the type into binary form, appending it to
+ // the provided slice.
+ MarshalBinary(dst []byte) (b []byte)
+ // UnmarshalBinary decodes a binary form of a type back into the runtime form,
+ // and returns whatever remains after the type has been decoded out.
+ UnmarshalBinary(b []byte) (r []byte, err error)
+}
diff --git a/database/fetch-event-by-serial_test.go b/database/fetch-event-by-serial_test.go
index 1af7640..38fdeb6 100644
--- a/database/fetch-event-by-serial_test.go
+++ b/database/fetch-event-by-serial_test.go
@@ -81,7 +81,7 @@ func TestFetchEventBySerial(t *testing.T) {
},
)
if err != nil {
- t.Fatalf("Failed to query for IDs: %v", err)
+ t.Fatalf("Failed to query for Ids: %v", err)
}
// Verify we got exactly one result
@@ -108,24 +108,32 @@ func TestFetchEventBySerial(t *testing.T) {
// Verify the fetched event has the same ID as the original event
if !bytes.Equal(fetchedEvent.Id, testEvent.Id) {
- t.Fatalf("Fetched event ID doesn't match original event ID. Got %x, expected %x",
- fetchedEvent.Id, testEvent.Id)
+ t.Fatalf(
+ "Fetched event ID doesn't match original event ID. Got %x, expected %x",
+ fetchedEvent.Id, testEvent.Id,
+ )
}
// Verify other event properties match
if fetchedEvent.Kind.K != testEvent.Kind.K {
- t.Fatalf("Fetched event kind doesn't match. Got %d, expected %d",
- fetchedEvent.Kind.K, testEvent.Kind.K)
+ t.Fatalf(
+ "Fetched event kind doesn't match. Got %d, expected %d",
+ fetchedEvent.Kind.K, testEvent.Kind.K,
+ )
}
if !bytes.Equal(fetchedEvent.Pubkey, testEvent.Pubkey) {
- t.Fatalf("Fetched event pubkey doesn't match. Got %x, expected %x",
- fetchedEvent.Pubkey, testEvent.Pubkey)
+ t.Fatalf(
+ "Fetched event pubkey doesn't match. Got %x, expected %x",
+ fetchedEvent.Pubkey, testEvent.Pubkey,
+ )
}
if fetchedEvent.CreatedAt.V != testEvent.CreatedAt.V {
- t.Fatalf("Fetched event created_at doesn't match. Got %d, expected %d",
- fetchedEvent.CreatedAt.V, testEvent.CreatedAt.V)
+ t.Fatalf(
+ "Fetched event created_at doesn't match. Got %d, expected %d",
+ fetchedEvent.CreatedAt.V, testEvent.CreatedAt.V,
+ )
}
// Test with a non-existent serial
@@ -143,6 +151,9 @@ func TestFetchEventBySerial(t *testing.T) {
// The fetched event should be nil
if fetchedEvent != nil {
- t.Fatalf("Expected nil event for non-existent serial, but got: %v", fetchedEvent)
+ t.Fatalf(
+ "Expected nil event for non-existent serial, but got: %v",
+ fetchedEvent,
+ )
}
}
diff --git a/database/get-serial-by-id_test.go b/database/get-serial-by-id_test.go
index d41316f..5f67419 100644
--- a/database/get-serial-by-id_test.go
+++ b/database/get-serial-by-id_test.go
@@ -69,32 +69,32 @@ func TestGetSerialById(t *testing.T) {
// Test GetSerialById with a known event ID
testEvent := events[3] // Using the same event as in QueryForIds test
-
+
// Get the serial by ID
serial, err := db.GetSerialById(testEvent.Id)
if err != nil {
t.Fatalf("Failed to get serial by ID: %v", err)
}
-
+
// Verify the serial is not nil
if serial == nil {
t.Fatal("Expected serial to be non-nil, but got nil")
}
-
+
// Test with a non-existent ID
nonExistentId := make([]byte, len(testEvent.Id))
// Ensure it's different from any real ID
for i := range nonExistentId {
nonExistentId[i] = ^testEvent.Id[i]
}
-
+
serial, err = db.GetSerialById(nonExistentId)
if err != nil {
t.Fatalf("Expected no error for non-existent ID, but got: %v", err)
}
-
- // For non-existent IDs, the function should return nil serial
+
+ // For non-existent Ids, the function should return nil serial
if serial != nil {
t.Fatalf("Expected nil serial for non-existent ID, but got: %v", serial)
}
-}
\ No newline at end of file
+}
diff --git a/database/indexes/keys_test.go b/database/indexes/keys_test.go
index e377cd7..ab49cde 100644
--- a/database/indexes/keys_test.go
+++ b/database/indexes/keys_test.go
@@ -625,6 +625,7 @@ func TestPubkeyTagFunctions(t *testing.T) {
// TestTagFunctions tests the Tag-related functions
func TestTagFunctions(t *testing.T) {
+ var err error
// Test TagVars
k, v, ca, ser := TagVars()
if k == nil || v == nil || ca == nil || ser == nil {
@@ -752,6 +753,7 @@ func TestKindFunctions(t *testing.T) {
// TestKindTagFunctions tests the TagKind-related functions
func TestKindTagFunctions(t *testing.T) {
+ var err error
// Test TagKindVars
k, v, ki, ca, ser := TagKindVars()
if ki == nil || k == nil || v == nil || ca == nil || ser == nil {
diff --git a/database/indexes/types/identhash_test.go b/database/indexes/types/identhash_test.go
index 647c579..a18b583 100644
--- a/database/indexes/types/identhash_test.go
+++ b/database/indexes/types/identhash_test.go
@@ -10,6 +10,7 @@ import (
)
func TestFromIdent(t *testing.T) {
+ var err error
// Create a test identity
testIdent := []byte("test-identity")
@@ -34,6 +35,7 @@ func TestFromIdent(t *testing.T) {
}
func TestIdent_MarshalWriteUnmarshalRead(t *testing.T) {
+ var err error
// Create a Ident with a known value
i1 := &Ident{}
testIdent := []byte("test-identity")
@@ -68,6 +70,7 @@ func TestIdent_MarshalWriteUnmarshalRead(t *testing.T) {
}
func TestIdent_UnmarshalReadWithCorruptedData(t *testing.T) {
+ var err error
// Create a Ident with a known value
i1 := &Ident{}
testIdent1 := []byte("test-identity-1")
diff --git a/database/query-events.go b/database/query-events.go
index 75f18bc..3fc9141 100644
--- a/database/query-events.go
+++ b/database/query-events.go
@@ -12,7 +12,7 @@ import (
)
// QueryEvents retrieves events based on the provided filter.
-// If the filter contains IDs, it fetches events by those IDs directly,
+// If the filter contains Ids, it fetches events by those Ids directly,
// overriding other filter criteria. Otherwise, it queries by other filter
// criteria and fetches matching events. Results are returned in reverse
// chronological order of their creation timestamps.
@@ -62,7 +62,7 @@ func (d *D) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
if bytes.Equal(
ev.Pubkey, e.Pubkey,
) && ev.Kind.K == e.Kind.K {
-
+
}
}
// } else if ev.Kind.IsParameterizedReplaceable(){
diff --git a/database/query-for-ids.go b/database/query-for-ids.go
index ac52f63..96d942b 100644
--- a/database/query-for-ids.go
+++ b/database/query-for-ids.go
@@ -12,9 +12,9 @@ import (
)
// QueryForIds retrieves a list of IdPkTs based on the provided filter.
-// It supports filtering by ranges and tags but disallows filtering by IDs.
+// It supports filtering by ranges and tags but disallows filtering by Ids.
// Results are sorted by timestamp in reverse chronological order.
-// Returns an error if the filter contains IDs or if any operation fails.
+// Returns an error if the filter contains Ids or if any operation fails.
func (d *D) QueryForIds(c context.T, f *filter.F) (
idPkTs []store.IdPkTs, err error,
) {
diff --git a/database/save-event.go b/database/save-event.go
index dd7ddc8..8c213cf 100644
--- a/database/save-event.go
+++ b/database/save-event.go
@@ -34,8 +34,10 @@ func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) {
// If there are previous events, log that we're replacing one
if len(prevEvents) > 0 {
- d.Logger.Infof("Saving new version of replaceable event kind %d from pubkey %s",
- ev.Kind.K, hex.Enc(ev.Pubkey))
+ d.Logger.Infof(
+ "Saving new version of replaceable event kind %d from pubkey %s",
+ ev.Kind.K, hex.Enc(ev.Pubkey),
+ )
}
}
@@ -89,6 +91,6 @@ func (d *D) SaveEvent(c context.T, ev *event.E) (kc, vc int, err error) {
return
},
)
- // log.T.F("total data written: %d bytes keys %d bytes values", kc, vc)
+ // log.F.F("total data written: %d bytes keys %d bytes values", kc, vc)
return
}
diff --git a/dns/nip05.go b/dns/nip05.go
new file mode 100644
index 0000000..046384f
--- /dev/null
+++ b/dns/nip05.go
@@ -0,0 +1,157 @@
+// Package dns is an implementation of the specification of NIP-05, providing
+// DNS based verification for nostr identities.
+package dns
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+ "regexp"
+ "strings"
+
+ "orly.dev/bech32encoding/pointers"
+ "orly.dev/context"
+ "orly.dev/keys"
+)
+
+// Nip05Regex is an regular expression that matches up with the same pattern as
+// an email address.
+var Nip05Regex = regexp.MustCompile(`^(?:([\w.+-]+)@)?([\w_-]+(\.[\w_-]+)+)$`)
+
+// WellKnownResponse is the structure of the JSON to be found at
+// /.well-known/nostr.json
+type WellKnownResponse struct {
+ // Names is a list of usernames associated with the DNS identity as in @
+ Names map[string]string `json:"names"`
+ // Relays associates one of the public keys from Names to a list of relay URLs
+ // that are recommended for that user.
+ Relays map[string][]string `json:"relays,omitempty"`
+ NIP46 map[string][]string `json:"nip46,omitempty"` // todo: is this obsolete?
+}
+
+// NewWellKnownResponse creates a new WellKnownResponse and is required as all
+// the fields are maps and need to be allocated.
+func NewWellKnownResponse() *WellKnownResponse {
+ return &WellKnownResponse{
+ Names: make(map[string]string),
+ Relays: make(map[string][]string),
+ NIP46: make(map[string][]string),
+ }
+}
+
+// IsValidIdentifier verifies that an identifier matches a correct NIP-05
+// username@domain
+func IsValidIdentifier(input string) bool {
+ return Nip05Regex.MatchString(input)
+}
+
+// ParseIdentifier searches a string for a valid NIP-05 username@domain
+func ParseIdentifier(account string) (name, domain string, err error) {
+ res := Nip05Regex.FindStringSubmatch(account)
+ if len(res) == 0 {
+ return "", "", errorf.E("invalid identifier")
+ }
+ if res[1] == "" {
+ res[1] = "_"
+ }
+ return res[1], res[2], nil
+}
+
+// QueryIdentifier queries a web server from the domain of a NIP-05 DNS
+// identifier
+func QueryIdentifier(c context.T, account string) (
+ prf *pointers.Profile,
+ err error,
+) {
+
+ var result *WellKnownResponse
+ var name string
+ if result, name, err = Fetch(c, account); chk.E(err) {
+ return
+ }
+ pubkey, ok := result.Names[name]
+ if !ok {
+ err = errorf.E("no entry for name '%s'", name)
+ return
+ }
+ if !keys.IsValidPublicKey(pubkey) {
+ return nil, errorf.E("got an invalid public key '%s'", pubkey)
+ }
+ var pkb []byte
+ if pkb, err = keys.HexPubkeyToBytes(pubkey); chk.E(err) {
+ return
+ }
+ relays, _ := result.Relays[pubkey]
+ return &pointers.Profile{
+ PublicKey: pkb,
+ Relays: StringSliceToByteSlice(relays),
+ }, nil
+}
+
+// Fetch parses a DNS identity to find the URL to query for a NIP-05 identity
+// verification document.
+func Fetch(c context.T, account string) (
+ resp *WellKnownResponse,
+ name string, err error,
+) {
+
+ var domain string
+ if name, domain, err = ParseIdentifier(account); chk.E(err) {
+ err = errorf.E("failed to parse '%s': %w", account, err)
+ return
+ }
+ var req *http.Request
+ if req, err = http.NewRequestWithContext(
+ c, "GET",
+ fmt.Sprintf("https://%s/.well-known/nostr.json?name=%s", domain, name),
+ nil,
+ ); chk.E(err) {
+
+ return resp, name, errorf.E("failed to create a request: %w", err)
+ }
+ client := &http.Client{
+ CheckRedirect: func(
+ req *http.Request,
+ via []*http.Request,
+ ) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ var res *http.Response
+ if res, err = client.Do(req); chk.E(err) {
+ err = errorf.E("request failed: %w", err)
+ return
+ }
+ defer res.Body.Close()
+ resp = NewWellKnownResponse()
+ b := make([]byte, 65535)
+ var n int
+ if n, err = res.Body.Read(b); chk.E(err) {
+ return
+ }
+ b = b[:n]
+ if err = json.Unmarshal(b, resp); chk.E(err) {
+ err = errorf.E("failed to decode json response: %w", err)
+ }
+ return
+}
+
+// NormalizeIdentifier mainly removes the `_@` from the base username so that
+// only the domain remains.
+func NormalizeIdentifier(account string) string {
+ if strings.HasPrefix(account, "_@") {
+ return account[2:]
+ }
+ return account
+}
+
+// StringSliceToByteSlice converts a slice of strings to a slice of slices of
+// bytes.
+func StringSliceToByteSlice(ss []string) (bs [][]byte) {
+ for _, s := range ss {
+ bs = append(bs, []byte(s))
+ }
+ return
+}
diff --git a/dns/nip05_test.go b/dns/nip05_test.go
new file mode 100644
index 0000000..60bda91
--- /dev/null
+++ b/dns/nip05_test.go
@@ -0,0 +1,71 @@
+package dns
+
+import (
+ "bytes"
+ "context"
+ "orly.dev/chk"
+ "testing"
+
+ "orly.dev/bech32encoding/pointers"
+ "orly.dev/keys"
+)
+
+func TestParse(t *testing.T) {
+ name, domain, _ := ParseIdentifier("saknd@yyq.com")
+ if name != "saknd" || domain != "yyq.com" {
+ t.Fatalf("wrong parsing")
+ }
+
+ name, domain, _ = ParseIdentifier("287354gkj+asbdfo8gw3rlicbsopifbcp3iougb5piseubfdikswub5ks@yyq.com")
+ if name != "287354gkj+asbdfo8gw3rlicbsopifbcp3iougb5piseubfdikswub5ks" || domain != "yyq.com" {
+ t.Fatalf("wrong parsing")
+ }
+
+ name, domain, _ = ParseIdentifier("asdn.com")
+ if name != "_" || domain != "asdn.com" {
+ t.Fatalf("wrong parsing")
+ }
+
+ name, domain, _ = ParseIdentifier("_@uxux.com.br")
+ if name != "_" || domain != "uxux.com.br" {
+ t.Fatalf("wrong parsing")
+ }
+
+ _, _, err := ParseIdentifier("821yh498ig21")
+ if err == nil {
+ t.Fatalf("should have errored")
+ }
+
+ _, _, err = ParseIdentifier("////")
+ if err == nil {
+ t.Fatalf("should have errored")
+ }
+}
+
+func TestQuery(t *testing.T) {
+ var pkb []byte
+ var err error
+ var pp *pointers.Profile
+ acct := "fiatjaf.com"
+ if pp, err = QueryIdentifier(context.Background(), acct); chk.E(err) {
+ t.Fatal(err)
+ }
+ if pkb, err = keys.HexPubkeyToBytes(
+ "3bf0c63fcb93463407af97a5e5ee64fa883d107ef9e558472c4eb9aaaefa459d",
+ ); chk.E(err) {
+ t.Fatal(err)
+ }
+ if err != nil || !bytes.Equal(pp.PublicKey, pkb) {
+ t.Fatalf("invalid query for fiatjaf.com")
+ }
+
+ pp, err = QueryIdentifier(context.Background(), "htlc@fiatjaf.com")
+ if pkb, err = keys.HexPubkeyToBytes(
+ "f9dd6a762506260b38a2d3e5b464213c2e47fa3877429fe9ee60e071a31a07d7",
+ ); chk.E(err) {
+ t.Fatal(err)
+ }
+ if err != nil || !bytes.Equal(pp.PublicKey, pkb) {
+ t.Fatalf("invalid query for htlc@fiatjaf.com")
+ }
+}
diff --git a/ec/base58/base58_test.go b/ec/base58/base58_test.go
index 9f2f525..220c9ab 100644
--- a/ec/base58/base58_test.go
+++ b/ec/base58/base58_test.go
@@ -9,7 +9,6 @@ import (
"encoding/hex"
"testing"
- "orly.dev/chk"
"orly.dev/ec/base58"
)
@@ -99,7 +98,7 @@ func TestBase58(t *testing.T) {
// Decode tests
for x, test := range hexTests {
b, err := hex.DecodeString(test.in)
- if chk.E(err) {
+ if err != nil {
t.Errorf("hex.DecodeString failed failed #%d: got: %s", x, test.in)
continue
}
diff --git a/ec/base58/base58check.go b/ec/base58/base58check.go
index 6ecb7c1..21de493 100644
--- a/ec/base58/base58check.go
+++ b/ec/base58/base58check.go
@@ -7,7 +7,7 @@ package base58
import (
"errors"
- "github.com/minio/sha256-simd"
+ "orly.dev/sha256"
)
// ErrChecksum indicates that the checksum of a check-encoded string does not verify against
diff --git a/ec/base58/base58check_test.go b/ec/base58/base58check_test.go
index acdabab..d9a8e9c 100644
--- a/ec/base58/base58check_test.go
+++ b/ec/base58/base58check_test.go
@@ -7,7 +7,6 @@ package base58_test
import (
"testing"
- "orly.dev/chk"
"orly.dev/ec/base58"
)
@@ -51,7 +50,7 @@ func TestBase58Check(t *testing.T) {
// test decoding
res, version, err := base58.CheckDecode(test.out)
switch {
- case chk.E(err):
+ case err != nil:
t.Errorf("CheckDecode test #%d failed with err: %v", x, err)
case version != test.version:
diff --git a/ec/base58/example_test.go b/ec/base58/example_test.go
index bc43416..0076ecb 100644
--- a/ec/base58/example_test.go
+++ b/ec/base58/example_test.go
@@ -7,7 +7,6 @@ package base58_test
import (
"fmt"
- "orly.dev/chk"
"orly.dev/ec/base58"
)
@@ -43,7 +42,7 @@ func ExampleCheckDecode() {
// Decode an example Base58Check encoded data.
encoded := "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa"
decoded, version, err := base58.CheckDecode(encoded)
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
diff --git a/ec/base58/genalphabet.go b/ec/base58/genalphabet.go
index 90f7330..9cb8702 100644
--- a/ec/base58/genalphabet.go
+++ b/ec/base58/genalphabet.go
@@ -13,8 +13,6 @@ import (
"log"
"os"
"strconv"
-
- "orly.dev/chk"
)
var (
@@ -47,14 +45,14 @@ var b58 = [256]byte{`)
func write(w io.Writer, b []byte) {
_, err := w.Write(b)
- if chk.E(err) {
+ if err != nil {
log.Fatal(err)
}
}
func main() {
fi, err := os.Create("alphabet.go")
- if chk.E(err) {
+ if err != nil {
log.Fatal(err)
}
defer fi.Close()
diff --git a/ec/base58/util_test.go b/ec/base58/util_test.go
new file mode 100644
index 0000000..64b0b18
--- /dev/null
+++ b/ec/base58/util_test.go
@@ -0,0 +1,9 @@
+package base58_test
+
+import (
+ "orly.dev/lol"
+)
+
+var (
+ log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
+)
diff --git a/ec/bech32/bech32.go b/ec/bech32/bech32.go
index 33a89a9..c27003b 100644
--- a/ec/bech32/bech32.go
+++ b/ec/bech32/bech32.go
@@ -8,8 +8,6 @@ package bech32
import (
"bytes"
"strings"
-
- "orly.dev/chk"
)
// Charset is the set of characters used in the data section of bech32 strings.
@@ -122,10 +120,8 @@ func bech32Polymod(hrp []byte, values, checksum []byte) int {
// and 126), otherwise the results are undefined.
//
// For more details on the checksum calculation, please refer to BIP 173.
-func writeBech32Checksum(
- hrp []byte, data []byte, bldr *bytes.Buffer,
- version Version,
-) {
+func writeBech32Checksum(hrp []byte, data []byte, bldr *bytes.Buffer,
+ version Version) {
bech32Const := int(VersionToConsts[version])
polymod := bech32Polymod(hrp, data, nil) ^ bech32Const
@@ -205,7 +201,7 @@ func decodeNoLimit(bech []byte) ([]byte, []byte, Version, error) {
// Each character corresponds to the byte with value of the index in
// 'charset'.
decoded, err := toBytes(data)
- if chk.E(err) {
+ if err != nil {
return nil, nil, VersionUnknown, err
}
// Verify if the checksum (stored inside decoded[:]) is valid, given the
@@ -319,10 +315,8 @@ func EncodeM(hrp, data []byte) ([]byte, error) {
// ConvertBits converts a byte slice where each byte is encoding fromBits bits,
// to a byte slice where each byte is encoding toBits bits.
-func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) (
- []byte,
- error,
-) {
+func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) ([]byte,
+ error) {
if fromBits < 1 || fromBits > 8 || toBits < 1 || toBits > 8 {
return nil, ErrInvalidBitGroups{}
@@ -391,7 +385,7 @@ func ConvertBits(data []byte, fromBits, toBits uint8, pad bool) (
// checksum purposes.
func EncodeFromBase256(hrp, data []byte) ([]byte, error) {
converted, err := ConvertBits(data, 8, 5, true)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
return Encode(hrp, converted)
@@ -402,11 +396,11 @@ func EncodeFromBase256(hrp, data []byte) ([]byte, error) {
// base256-encoded byte slice and returns it along with the lowercase HRP.
func DecodeToBase256(bech []byte) ([]byte, []byte, error) {
hrp, data, err := Decode(bech)
- if chk.E(err) {
+ if err != nil {
return nil, nil, err
}
converted, err := ConvertBits(data, 5, 8, false)
- if chk.E(err) {
+ if err != nil {
return nil, nil, err
}
return hrp, converted, nil
diff --git a/ec/bech32/bech32_test.go b/ec/bech32/bech32_test.go
index 0bd59a7..dce7dbf 100644
--- a/ec/bech32/bech32_test.go
+++ b/ec/bech32/bech32_test.go
@@ -12,8 +12,6 @@ import (
"fmt"
"strings"
"testing"
-
- "orly.dev/chk"
)
// TestBech32 tests whether decoding and re-encoding the valid BIP-173 test
@@ -26,52 +24,32 @@ func TestBech32(t *testing.T) {
}{
{"A12UEL5L", nil},
{"a12uel5l", nil},
- {
- "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
- nil,
- },
+ {"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
+ nil},
{"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", nil},
- {
- "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
- nil,
- },
+ {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
+ nil},
{"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", nil},
- {
- "split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w",
- ErrInvalidChecksum{
- "2y9e3w", "2y9e3wlc445v",
- "2y9e2w",
- },
- }, // invalid checksum
- {
- "s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p",
- ErrInvalidCharacter(' '),
- }, // invalid character (space) in hrp
- {
- "spl\x7Ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
- ErrInvalidCharacter(127),
- }, // invalid character (DEL) in hrp
- {
- "split1cheo2y9e2w",
- ErrNonCharsetChar('o'),
- }, // invalid character (o) in data part
+ {"split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w",
+ ErrInvalidChecksum{"2y9e3w", "2y9e3wlc445v",
+ "2y9e2w"}}, // invalid checksum
+ {"s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p",
+ ErrInvalidCharacter(' ')}, // invalid character (space) in hrp
+ {"spl\x7Ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
+ ErrInvalidCharacter(127)}, // invalid character (DEL) in hrp
+ {"split1cheo2y9e2w",
+ ErrNonCharsetChar('o')}, // invalid character (o) in data part
{"split1a2y9w", ErrInvalidSeparatorIndex(5)}, // too short data part
- {
- "1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
- ErrInvalidSeparatorIndex(0),
- }, // empty hrp
- {
- "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
- ErrInvalidLength(91),
- }, // too long
+ {"1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
+ ErrInvalidSeparatorIndex(0)}, // empty hrp
+ {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
+ ErrInvalidLength(91)}, // too long
// Additional test vectors used in bitcoin core
{" 1nwldj5", ErrInvalidCharacter(' ')},
{"\x7f" + "1axkwrx", ErrInvalidCharacter(0x7f)},
{"\x801eym55h", ErrInvalidCharacter(0x80)},
- {
- "an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx",
- ErrInvalidLength(91),
- },
+ {"an84characterslonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1569pvx",
+ ErrInvalidLength(91)},
{"pzry9x0s0muk", ErrInvalidSeparatorIndex(-1)},
{"1pzry9x0s0muk", ErrInvalidSeparatorIndex(0)},
{"x1b4n0q5v", ErrNonCharsetChar(98)},
@@ -87,32 +65,28 @@ func TestBech32(t *testing.T) {
str := []byte(test.str)
hrp, decoded, err := Decode([]byte(str))
if !errors.Is(err, test.expectedError) {
- t.Errorf(
- "%d: expected decoding error %v "+
- "instead got %v", i, test.expectedError, err,
- )
+ t.Errorf("%d: expected decoding error %v "+
+ "instead got %v", i, test.expectedError, err)
continue
}
- if chk.E(err) {
+ if err != nil {
// End test case here if a decoding error was expected.
continue
}
// Check that it encodes to the same string
encoded, err := Encode(hrp, decoded)
- if chk.E(err) {
+ if err != nil {
t.Errorf("encoding failed: %v", err)
}
if !bytes.Equal(encoded, bytes.ToLower([]byte(str))) {
- t.Errorf(
- "expected data to encode to %v, but got %v",
- str, encoded,
- )
+ t.Errorf("expected data to encode to %v, but got %v",
+ str, encoded)
}
// Flip a bit in the string an make sure it is caught.
pos := bytes.LastIndexAny(str, "1")
flipped := []byte(string(str[:pos+1]) + string(str[pos+1]^1) + string(str[pos+2:]))
_, _, err = Decode(flipped)
- if !chk.E(err) {
+ if err == nil {
t.Error("expected decoding to fail")
}
}
@@ -129,25 +103,19 @@ func TestBech32M(t *testing.T) {
}{
{"A1LQFN3A", nil},
{"a1lqfn3a", nil},
- {
- "an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6",
- nil,
- },
+ {"an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6",
+ nil},
{"abcdef1l7aum6echk45nj3s0wdvt2fg8x9yrzpqzd3ryx", nil},
- {
- "11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8",
- nil,
- },
+ {"11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8",
+ nil},
{"split1checkupstagehandshakeupstreamerranterredcaperredlc445v", nil},
{"?1v759aa", nil},
// Additional test vectors used in bitcoin core
{"\x201xj0phk", ErrInvalidCharacter('\x20')},
{"\x7f1g6xzxy", ErrInvalidCharacter('\x7f')},
{"\x801vctc34", ErrInvalidCharacter('\x80')},
- {
- "an84characterslonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11d6pts4",
- ErrInvalidLength(91),
- },
+ {"an84characterslonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11d6pts4",
+ ErrInvalidLength(91)},
{"qyrz8wqd2c9m", ErrInvalidSeparatorIndex(-1)},
{"1qyrz8wqd2c9m", ErrInvalidSeparatorIndex(0)},
{"y1b0jsk6g", ErrNonCharsetChar(98)},
@@ -167,34 +135,30 @@ func TestBech32M(t *testing.T) {
str := []byte(test.str)
hrp, decoded, err := Decode(str)
if test.expectedError != err {
- t.Errorf(
- "%d: (%v) expected decoding error %v "+
- "instead got %v", i, str, test.expectedError,
- err,
- )
+ t.Errorf("%d: (%v) expected decoding error %v "+
+ "instead got %v", i, str, test.expectedError,
+ err)
continue
}
- if chk.E(err) {
+ if err != nil {
// End test case here if a decoding error was expected.
continue
}
// Check that it encodes to the same string, using bech32 m.
encoded, err := EncodeM(hrp, decoded)
- if chk.E(err) {
+ if err != nil {
t.Errorf("encoding failed: %v", err)
}
if !bytes.Equal(encoded, bytes.ToLower(str)) {
- t.Errorf(
- "expected data to encode to %v, but got %v",
- str, encoded,
- )
+ t.Errorf("expected data to encode to %v, but got %v",
+ str, encoded)
}
// Flip a bit in the string an make sure it is caught.
pos := bytes.LastIndexAny(str, "1")
flipped := []byte(string(str[:pos+1]) + string(str[pos+1]^1) + string(str[pos+2:]))
_, _, err = Decode(flipped)
- if !chk.E(err) {
+ if err == nil {
t.Error("expected decoding to fail")
}
}
@@ -210,73 +174,47 @@ func TestBech32DecodeGeneric(t *testing.T) {
}{
{"A1LQFN3A", VersionM},
{"a1lqfn3a", VersionM},
- {
- "an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6",
- VersionM,
- },
+ {"an83characterlonghumanreadablepartthatcontainsthetheexcludedcharactersbioandnumber11sg7hg6",
+ VersionM},
{"abcdef1l7aum6echk45nj3s0wdvt2fg8x9yrzpqzd3ryx", VersionM},
- {
- "11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8",
- VersionM,
- },
- {
- "split1checkupstagehandshakeupstreamerranterredcaperredlc445v",
- VersionM,
- },
+ {"11llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllludsr8",
+ VersionM},
+ {"split1checkupstagehandshakeupstreamerranterredcaperredlc445v",
+ VersionM},
{"?1v759aa", VersionM},
{"A12UEL5L", Version0},
{"a12uel5l", Version0},
- {
- "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
- Version0,
- },
+ {"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
+ Version0},
{"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", Version0},
- {
- "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
- Version0,
- },
- {
- "split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
- Version0,
- },
+ {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
+ Version0},
+ {"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
+ Version0},
{"BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4", Version0},
- {
- "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
- Version0,
- },
- {
- "bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kt5nd6y",
- VersionM,
- },
+ {"tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
+ Version0},
+ {"bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kt5nd6y",
+ VersionM},
{"BC1SW50QGDZ25J", VersionM},
{"bc1zw508d6qejxtdg4y5r3zarvaryvaxxpcs", VersionM},
- {
- "tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy",
- Version0,
- },
- {
- "tb1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesf3hn0c",
- VersionM,
- },
- {
- "bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqzk5jj0",
- VersionM,
- },
+ {"tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy",
+ Version0},
+ {"tb1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesf3hn0c",
+ VersionM},
+ {"bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqzk5jj0",
+ VersionM},
}
for i, test := range tests {
_, _, version, err := DecodeGeneric([]byte(test.str))
- if chk.E(err) {
- t.Errorf(
- "%d: (%v) unexpected error during "+
- "decoding: %v", i, test.str, err,
- )
+ if err != nil {
+ t.Errorf("%d: (%v) unexpected error during "+
+ "decoding: %v", i, test.str, err)
continue
}
if version != test.version {
- t.Errorf(
- "(%v): invalid version: expected %v, got %v",
- test.str, test.version, version,
- )
+ t.Errorf("(%v): invalid version: expected %v, got %v",
+ test.str, test.version, version)
}
}
}
@@ -290,91 +228,79 @@ func TestMixedCaseEncode(t *testing.T) {
hrp string
data string
encoded string
- }{
- {
- name: "all uppercase HRP with no data",
- hrp: "A",
- data: "",
- encoded: "a12uel5l",
- }, {
- name: "all uppercase HRP with data",
- hrp: "UPPERCASE",
- data: "787878",
- encoded: "uppercase10pu8sss7kmp",
- }, {
- name: "mixed case HRP even offsets uppercase",
- hrp: "AbCdEf",
- data: "00443214c74254b635cf84653a56d7c675be77df",
- encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
- }, {
- name: "mixed case HRP odd offsets uppercase ",
- hrp: "aBcDeF",
- data: "00443214c74254b635cf84653a56d7c675be77df",
- encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
- }, {
- name: "all lowercase HRP",
- hrp: "abcdef",
- data: "00443214c74254b635cf84653a56d7c675be77df",
- encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
- },
- }
+ }{{
+ name: "all uppercase HRP with no data",
+ hrp: "A",
+ data: "",
+ encoded: "a12uel5l",
+ }, {
+ name: "all uppercase HRP with data",
+ hrp: "UPPERCASE",
+ data: "787878",
+ encoded: "uppercase10pu8sss7kmp",
+ }, {
+ name: "mixed case HRP even offsets uppercase",
+ hrp: "AbCdEf",
+ data: "00443214c74254b635cf84653a56d7c675be77df",
+ encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
+ }, {
+ name: "mixed case HRP odd offsets uppercase ",
+ hrp: "aBcDeF",
+ data: "00443214c74254b635cf84653a56d7c675be77df",
+ encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
+ }, {
+ name: "all lowercase HRP",
+ hrp: "abcdef",
+ data: "00443214c74254b635cf84653a56d7c675be77df",
+ encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
+ }}
for _, test := range tests {
// Convert the text hex to bytes, convert those bytes from base256 to
// base32, then ensure the encoded result with the HRP provided in the
// test data is as expected.
data, err := hex.DecodeString(test.data)
- if chk.E(err) {
+ if err != nil {
t.Errorf("%q: invalid hex %q: %v", test.name, test.data, err)
continue
}
convertedData, err := ConvertBits(data, 8, 5, true)
- if chk.E(err) {
- t.Errorf(
- "%q: unexpected convert bits error: %v", test.name,
- err,
- )
+ if err != nil {
+ t.Errorf("%q: unexpected convert bits error: %v", test.name,
+ err)
continue
}
gotEncoded, err := Encode([]byte(test.hrp), convertedData)
- if chk.E(err) {
+ if err != nil {
t.Errorf("%q: unexpected encode error: %v", test.name, err)
continue
}
if !bytes.Equal(gotEncoded, []byte(test.encoded)) {
- t.Errorf(
- "%q: mismatched encoding -- got %q, want %q", test.name,
- gotEncoded, test.encoded,
- )
+ t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name,
+ gotEncoded, test.encoded)
continue
}
// Ensure the decoding the expected lowercase encoding converted to all
// uppercase produces the lowercase HRP and original data.
gotHRP, gotData, err := Decode(bytes.ToUpper([]byte(test.encoded)))
- if chk.E(err) {
+ if err != nil {
t.Errorf("%q: unexpected decode error: %v", test.name, err)
continue
}
wantHRP := strings.ToLower(test.hrp)
if !bytes.Equal(gotHRP, []byte(wantHRP)) {
- t.Errorf(
- "%q: mismatched decoded HRP -- got %q, want %q", test.name,
- gotHRP, wantHRP,
- )
+ t.Errorf("%q: mismatched decoded HRP -- got %q, want %q", test.name,
+ gotHRP, wantHRP)
continue
}
convertedGotData, err := ConvertBits(gotData, 5, 8, false)
- if chk.E(err) {
- t.Errorf(
- "%q: unexpected convert bits error: %v", test.name,
- err,
- )
+ if err != nil {
+ t.Errorf("%q: unexpected convert bits error: %v", test.name,
+ err)
continue
}
if !bytes.Equal(convertedGotData, data) {
- t.Errorf(
- "%q: mismatched data -- got %x, want %x", test.name,
- convertedGotData, data,
- )
+ t.Errorf("%q: mismatched data -- got %x, want %x", test.name,
+ convertedGotData, data)
continue
}
}
@@ -386,16 +312,14 @@ func TestCanDecodeUnlimtedBech32(t *testing.T) {
input := "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq5kx0yd"
// Sanity check that an input of this length errors on regular Decode()
_, _, err := Decode([]byte(input))
- if !chk.E(err) {
+ if err == nil {
t.Fatalf("Test vector not appropriate")
}
// Try and decode it.
hrp, data, err := DecodeNoLimit([]byte(input))
- if chk.E(err) {
- t.Fatalf(
- "Expected decoding of large string to work. Got error: %v",
- err,
- )
+ if err != nil {
+ t.Fatalf("Expected decoding of large string to work. Got error: %v",
+ err)
}
// Verify data for correctness.
if !bytes.Equal(hrp, []byte("1")) {
@@ -419,145 +343,125 @@ func TestBech32Base256(t *testing.T) {
hrp string // expected human-readable part
data string // expected hex-encoded data
err error // expected error
- }{
- {
- name: "all uppercase, no data",
- encoded: "A12UEL5L",
- hrp: "a",
- data: "",
- }, {
- name: "long hrp with separator and excluded chars, no data",
- encoded: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
- hrp: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio",
- data: "",
- }, {
- name: "6 char hrp with data with leading zero",
- encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
- hrp: "abcdef",
- data: "00443214c74254b635cf84653a56d7c675be77df",
- }, {
- name: "hrp same as separator and max length encoded string",
- encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
- hrp: "1",
- data: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
- }, {
- name: "5 char hrp with data chosen to produce human-readable data part",
- encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
- hrp: "split",
- data: "c5f38b70305f519bf66d85fb6cf03058f3dde463ecd7918f2dc743918f2d",
- }, {
- name: "same as previous but with checksum invalidated",
- encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w",
- err: ErrInvalidChecksum{"2y9e3w", "2y9e3wlc445v", "2y9e2w"},
- }, {
- name: "hrp with invalid character (space)",
- encoded: "s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p",
- err: ErrInvalidCharacter(' '),
- }, {
- name: "hrp with invalid character (DEL)",
- encoded: "spl\x7ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
- err: ErrInvalidCharacter(127),
- }, {
- name: "data part with invalid character (o)",
- encoded: "split1cheo2y9e2w",
- err: ErrNonCharsetChar('o'),
- }, {
- name: "data part too short",
- encoded: "split1a2y9w",
- err: ErrInvalidSeparatorIndex(5),
- }, {
- name: "empty hrp",
- encoded: "1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
- err: ErrInvalidSeparatorIndex(0),
- }, {
- name: "no separator",
- encoded: "pzry9x0s0muk",
- err: ErrInvalidSeparatorIndex(-1),
- }, {
- name: "too long by one char",
- encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
- err: ErrInvalidLength(91),
- }, {
- name: "invalid due to mixed case in hrp",
- encoded: "aBcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
- err: ErrMixedCase{},
- }, {
- name: "invalid due to mixed case in data part",
- encoded: "abcdef1Qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
- err: ErrMixedCase{},
- },
- }
+ }{{
+ name: "all uppercase, no data",
+ encoded: "A12UEL5L",
+ hrp: "a",
+ data: "",
+ }, {
+ name: "long hrp with separator and excluded chars, no data",
+ encoded: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs",
+ hrp: "an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio",
+ data: "",
+ }, {
+ name: "6 char hrp with data with leading zero",
+ encoded: "abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
+ hrp: "abcdef",
+ data: "00443214c74254b635cf84653a56d7c675be77df",
+ }, {
+ name: "hrp same as separator and max length encoded string",
+ encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
+ hrp: "1",
+ data: "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
+ }, {
+ name: "5 char hrp with data chosen to produce human-readable data part",
+ encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
+ hrp: "split",
+ data: "c5f38b70305f519bf66d85fb6cf03058f3dde463ecd7918f2dc743918f2d",
+ }, {
+ name: "same as previous but with checksum invalidated",
+ encoded: "split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w",
+ err: ErrInvalidChecksum{"2y9e3w", "2y9e3wlc445v", "2y9e2w"},
+ }, {
+ name: "hrp with invalid character (space)",
+ encoded: "s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p",
+ err: ErrInvalidCharacter(' '),
+ }, {
+ name: "hrp with invalid character (DEL)",
+ encoded: "spl\x7ft1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
+ err: ErrInvalidCharacter(127),
+ }, {
+ name: "data part with invalid character (o)",
+ encoded: "split1cheo2y9e2w",
+ err: ErrNonCharsetChar('o'),
+ }, {
+ name: "data part too short",
+ encoded: "split1a2y9w",
+ err: ErrInvalidSeparatorIndex(5),
+ }, {
+ name: "empty hrp",
+ encoded: "1checkupstagehandshakeupstreamerranterredcaperred2y9e3w",
+ err: ErrInvalidSeparatorIndex(0),
+ }, {
+ name: "no separator",
+ encoded: "pzry9x0s0muk",
+ err: ErrInvalidSeparatorIndex(-1),
+ }, {
+ name: "too long by one char",
+ encoded: "11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j",
+ err: ErrInvalidLength(91),
+ }, {
+ name: "invalid due to mixed case in hrp",
+ encoded: "aBcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
+ err: ErrMixedCase{},
+ }, {
+ name: "invalid due to mixed case in data part",
+ encoded: "abcdef1Qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw",
+ err: ErrMixedCase{},
+ }}
for _, test := range tests {
// Ensure the decode either produces an error or not as expected.
str := test.encoded
gotHRP, gotData, err := DecodeToBase256([]byte(str))
if test.err != err {
- t.Errorf(
- "%q: unexpected decode error -- got %v, want %v",
- test.name, err, test.err,
- )
+ t.Errorf("%q: unexpected decode error -- got %v, want %v",
+ test.name, err, test.err)
continue
}
- if chk.E(err) {
+ if err != nil {
// End test case here if a decoding error was expected.
continue
}
// Ensure the expected HRP and original data are as expected.
if !bytes.Equal(gotHRP, []byte(test.hrp)) {
- t.Errorf(
- "%q: mismatched decoded HRP -- got %q, want %q", test.name,
- gotHRP, test.hrp,
- )
+ t.Errorf("%q: mismatched decoded HRP -- got %q, want %q", test.name,
+ gotHRP, test.hrp)
continue
}
data, err := hex.DecodeString(test.data)
- if chk.E(err) {
+ if err != nil {
t.Errorf("%q: invalid hex %q: %v", test.name, test.data, err)
continue
}
if !bytes.Equal(gotData, data) {
- t.Errorf(
- "%q: mismatched data -- got %x, want %x", test.name,
- gotData, data,
- )
+ t.Errorf("%q: mismatched data -- got %x, want %x", test.name,
+ gotData, data)
continue
}
// Encode the same data with the HRP converted to all uppercase and
// ensure the result is the lowercase version of the original encoded
// bech32 string.
- gotEncoded, err := EncodeFromBase256(
- bytes.ToUpper([]byte(test.hrp)), data,
- )
- if chk.E(err) {
- t.Errorf(
- "%q: unexpected uppercase HRP encode error: %v", test.name,
- err,
- )
+ gotEncoded, err := EncodeFromBase256(bytes.ToUpper([]byte(test.hrp)), data)
+ if err != nil {
+ t.Errorf("%q: unexpected uppercase HRP encode error: %v", test.name,
+ err)
}
wantEncoded := bytes.ToLower([]byte(str))
if !bytes.Equal(gotEncoded, wantEncoded) {
- t.Errorf(
- "%q: mismatched encoding -- got %q, want %q", test.name,
- gotEncoded, wantEncoded,
- )
+ t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name,
+ gotEncoded, wantEncoded)
}
// Encode the same data with the HRP converted to all lowercase and
// ensure the result is the lowercase version of the original encoded
// bech32 string.
- gotEncoded, err = EncodeFromBase256(
- bytes.ToLower([]byte(test.hrp)), data,
- )
- if chk.E(err) {
- t.Errorf(
- "%q: unexpected lowercase HRP encode error: %v", test.name,
- err,
- )
+ gotEncoded, err = EncodeFromBase256(bytes.ToLower([]byte(test.hrp)), data)
+ if err != nil {
+ t.Errorf("%q: unexpected lowercase HRP encode error: %v", test.name,
+ err)
}
if !bytes.Equal(gotEncoded, wantEncoded) {
- t.Errorf(
- "%q: mismatched encoding -- got %q, want %q", test.name,
- gotEncoded, wantEncoded,
- )
+ t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name,
+ gotEncoded, wantEncoded)
}
// Encode the same data with the HRP converted to mixed upper and
// lowercase and ensure the result is the lowercase version of the
@@ -571,23 +475,19 @@ func TestBech32Base256(t *testing.T) {
mixedHRPBuilder.WriteRune(r)
}
gotEncoded, err = EncodeFromBase256(mixedHRPBuilder.Bytes(), data)
- if chk.E(err) {
- t.Errorf(
- "%q: unexpected lowercase HRP encode error: %v", test.name,
- err,
- )
+ if err != nil {
+ t.Errorf("%q: unexpected lowercase HRP encode error: %v", test.name,
+ err)
}
if !bytes.Equal(gotEncoded, wantEncoded) {
- t.Errorf(
- "%q: mismatched encoding -- got %q, want %q", test.name,
- gotEncoded, wantEncoded,
- )
+ t.Errorf("%q: mismatched encoding -- got %q, want %q", test.name,
+ gotEncoded, wantEncoded)
}
// Ensure a bit flip in the string is caught.
pos := strings.LastIndexAny(test.encoded, "1")
flipped := str[:pos+1] + string(str[pos+1]^1) + str[pos+2:]
_, _, err = DecodeToBase256([]byte(flipped))
- if !chk.E(err) {
+ if err == nil {
t.Error("expected decoding to fail")
}
}
@@ -599,12 +499,12 @@ func TestBech32Base256(t *testing.T) {
func BenchmarkEncodeDecodeCycle(b *testing.B) {
// Use a fixed, 49-byte raw data for testing.
inputData, err := hex.DecodeString("cbe6365ddbcda9a9915422c3f091c13f8c7b2f263b8d34067bd12c274408473fa764871c9dd51b1bb34873b3473b633ed1")
- if chk.E(err) {
+ if err != nil {
b.Fatalf("failed to initialize input data: %v", err)
}
// Convert this into a 79-byte, base 32 byte slice.
base32Input, err := ConvertBits(inputData, 8, 5, true)
- if chk.E(err) {
+ if err != nil {
b.Fatalf("failed to convert input to 32 bits-per-element: %v", err)
}
// Use a fixed hrp for the tests. This should generate an encoded bech32
@@ -617,11 +517,11 @@ func BenchmarkEncodeDecodeCycle(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
str, err := Encode([]byte(hrp), base32Input)
- if chk.E(err) {
+ if err != nil {
b.Fatalf("failed to encode input: %v", err)
}
_, _, err = Decode(str)
- if chk.E(err) {
+ if err != nil {
b.Fatalf("failed to decode string: %v", err)
}
}
@@ -679,22 +579,20 @@ func TestConvertBits(t *testing.T) {
}
for i, tc := range tests {
input, err := hex.DecodeString(tc.input)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("invalid test input data: %v", err)
}
expected, err := hex.DecodeString(tc.output)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("invalid test output data: %v", err)
}
actual, err := ConvertBits(input, tc.fromBits, tc.toBits, tc.pad)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("test case %d failed: %v", i, err)
}
if !bytes.Equal(actual, expected) {
- t.Fatalf(
- "test case %d has wrong output; expected=%x actual=%x",
- i, expected, actual,
- )
+ t.Fatalf("test case %d has wrong output; expected=%x actual=%x",
+ i, expected, actual)
}
}
}
@@ -720,15 +618,13 @@ func TestConvertBitsFailures(t *testing.T) {
}
for i, tc := range tests {
input, err := hex.DecodeString(tc.input)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("invalid test input data: %v", err)
}
_, err = ConvertBits(input, tc.fromBits, tc.toBits, tc.pad)
if err != tc.err {
- t.Fatalf(
- "test case %d failure: expected '%v' got '%v'", i,
- tc.err, err,
- )
+ t.Fatalf("test case %d failure: expected '%v' got '%v'", i,
+ tc.err, err)
}
}
}
@@ -741,14 +637,14 @@ func TestConvertBitsFailures(t *testing.T) {
func BenchmarkConvertBitsDown(b *testing.B) {
// Use a fixed, 49-byte raw data for testing.
inputData, err := hex.DecodeString("cbe6365ddbcda9a9915422c3f091c13f8c7b2f263b8d34067bd12c274408473fa764871c9dd51b1bb34873b3473b633ed1")
- if chk.E(err) {
+ if err != nil {
b.Fatalf("failed to initialize input data: %v", err)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := ConvertBits(inputData, 8, 5, true)
- if chk.E(err) {
+ if err != nil {
b.Fatalf("error converting bits: %v", err)
}
}
@@ -762,14 +658,14 @@ func BenchmarkConvertBitsDown(b *testing.B) {
func BenchmarkConvertBitsUp(b *testing.B) {
// Use a fixed, 79-byte raw data for testing.
inputData, err := hex.DecodeString("190f13030c170e1b1916141a13040a14040b011f01040e01071e0607160b1906070e06130801131b1a0416020e110008081c1f1a0e19040703120e1d0a06181b160d0407070c1a07070d11131d1408")
- if chk.E(err) {
+ if err != nil {
b.Fatalf("failed to initialize input data: %v", err)
}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := ConvertBits(inputData, 8, 5, true)
- if chk.E(err) {
+ if err != nil {
b.Fatalf("error converting bits: %v", err)
}
}
diff --git a/ec/bech32/example_test.go b/ec/bech32/example_test.go
index 0984b3a..ae15651 100644
--- a/ec/bech32/example_test.go
+++ b/ec/bech32/example_test.go
@@ -7,15 +7,13 @@ package bech32
import (
"encoding/hex"
"fmt"
-
- "orly.dev/chk"
)
// This example demonstrates how to decode a bech32 encoded string.
func ExampleDecode() {
encoded := "bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx"
hrp, decoded, err := Decode([]byte(encoded))
- if chk.E(err) {
+ if err != nil {
fmt.Println("Error:", err)
}
// Show the decoded data.
@@ -31,11 +29,11 @@ func ExampleEncode() {
data := []byte("Test data")
// Convert test data to base32:
conv, err := ConvertBits(data, 8, 5, true)
- if chk.E(err) {
+ if err != nil {
fmt.Println("Error:", err)
}
encoded, err := Encode([]byte("customHrp!11111q"), conv)
- if chk.E(err) {
+ if err != nil {
fmt.Println("Error:", err)
}
// Show the encoded data.
diff --git a/ec/bench_test.go b/ec/bench_test.go
index aee35a5..c4d8625 100644
--- a/ec/bench_test.go
+++ b/ec/bench_test.go
@@ -8,7 +8,6 @@ import (
"math/big"
"testing"
- "orly.dev/chk"
"orly.dev/ec/secp256k1"
"orly.dev/hex"
)
@@ -36,7 +35,7 @@ func setHex(hexString string) *FieldVal {
// called with hard-coded values.
func hexToFieldVal(s string) *FieldVal {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var f FieldVal
@@ -150,7 +149,7 @@ func BenchmarkScalarMult(b *testing.B) {
// must only) be called with hard-coded values.
func hexToModNScalar(s string) *ModNScalar {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var scalar ModNScalar
diff --git a/ec/btcec_test.go b/ec/btcec_test.go
index 807fafb..3d80d79 100644
--- a/ec/btcec_test.go
+++ b/ec/btcec_test.go
@@ -11,8 +11,6 @@ import (
"fmt"
"math/big"
"testing"
-
- "orly.dev/chk"
)
// isJacobianOnS256Curve returns boolean if the point (x,y,z) is on the
@@ -231,24 +229,18 @@ func TestAddJacobian(t *testing.T) {
// Ensure the test data is using points that are actually on
// the curve (or the point at infinity).
if !p1.Z.IsZero() && !isJacobianOnS256Curve(&p1) {
- t.Errorf(
- "#%d first point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d first point is not on the curve -- "+
+ "invalid test data", i)
continue
}
if !p2.Z.IsZero() && !isJacobianOnS256Curve(&p2) {
- t.Errorf(
- "#%d second point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d second point is not on the curve -- "+
+ "invalid test data", i)
continue
}
if !want.Z.IsZero() && !isJacobianOnS256Curve(&want) {
- t.Errorf(
- "#%d expected point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d expected point is not on the curve -- "+
+ "invalid test data", i)
continue
}
// Add the two points.
@@ -257,11 +249,8 @@ func TestAddJacobian(t *testing.T) {
// Ensure result matches expected.
if !r.X.Equals(&want.X) || !r.Y.Equals(&want.Y) || !r.Z.Equals(&want.Z) {
- t.Errorf(
- "#%d wrong result\ngot: (%v, %v, %v)\n"+
- "want: (%v, %v, %v)", i, r.X, r.Y, r.Z, want.X, want.Y,
- want.Z,
- )
+ t.Errorf("#%d wrong result\ngot: (%v, %v, %v)\n"+
+ "want: (%v, %v, %v)", i, r.X, r.Y, r.Z, want.X, want.Y, want.Z)
continue
}
}
@@ -334,24 +323,18 @@ func TestAddAffine(t *testing.T) {
// Ensure the test data is using points that are actually on
// the curve (or the point at infinity).
if !(x1.Sign() == 0 && y1.Sign() == 0) && !S256().IsOnCurve(x1, y1) {
- t.Errorf(
- "#%d first point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d first point is not on the curve -- "+
+ "invalid test data", i)
continue
}
if !(x2.Sign() == 0 && y2.Sign() == 0) && !S256().IsOnCurve(x2, y2) {
- t.Errorf(
- "#%d second point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d second point is not on the curve -- "+
+ "invalid test data", i)
continue
}
if !(x3.Sign() == 0 && y3.Sign() == 0) && !S256().IsOnCurve(x3, y3) {
- t.Errorf(
- "#%d expected point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d expected point is not on the curve -- "+
+ "invalid test data", i)
continue
}
// Add the two points.
@@ -359,10 +342,8 @@ func TestAddAffine(t *testing.T) {
// Ensure result matches expected.
if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 {
- t.Errorf(
- "#%d wrong result\ngot: (%x, %x)\n"+
- "want: (%x, %x)", i, rx, ry, x3, y3,
- )
+ t.Errorf("#%d wrong result\ngot: (%x, %x)\n"+
+ "want: (%x, %x)", i, rx, ry, x3, y3)
continue
}
}
@@ -429,17 +410,13 @@ func TestDoubleJacobian(t *testing.T) {
// Ensure the test data is using points that are actually on
// the curve (or the point at infinity).
if !p1.Z.IsZero() && !isJacobianOnS256Curve(&p1) {
- t.Errorf(
- "#%d first point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d first point is not on the curve -- "+
+ "invalid test data", i)
continue
}
if !want.Z.IsZero() && !isJacobianOnS256Curve(&want) {
- t.Errorf(
- "#%d expected point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d expected point is not on the curve -- "+
+ "invalid test data", i)
continue
}
// Double the point.
@@ -447,11 +424,9 @@ func TestDoubleJacobian(t *testing.T) {
DoubleNonConst(&p1, &result)
// Ensure result matches expected.
if !isStrictlyEqual(&result, &want) {
- t.Errorf(
- "#%d wrong result\ngot: (%v, %v, %v)\n"+
- "want: (%v, %v, %v)", i, result.X, result.Y, result.Z,
- want.X, want.Y, want.Z,
- )
+ t.Errorf("#%d wrong result\ngot: (%v, %v, %v)\n"+
+ "want: (%v, %v, %v)", i, result.X, result.Y, result.Z,
+ want.X, want.Y, want.Z)
continue
}
}
@@ -506,17 +481,13 @@ func TestDoubleAffine(t *testing.T) {
// Ensure the test data is using points that are actually on
// the curve (or the point at infinity).
if !(x1.Sign() == 0 && y1.Sign() == 0) && !S256().IsOnCurve(x1, y1) {
- t.Errorf(
- "#%d first point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d first point is not on the curve -- "+
+ "invalid test data", i)
continue
}
if !(x3.Sign() == 0 && y3.Sign() == 0) && !S256().IsOnCurve(x3, y3) {
- t.Errorf(
- "#%d expected point is not on the curve -- "+
- "invalid test data", i,
- )
+ t.Errorf("#%d expected point is not on the curve -- "+
+ "invalid test data", i)
continue
}
// Double the point.
@@ -524,10 +495,8 @@ func TestDoubleAffine(t *testing.T) {
// Ensure result matches expected.
if rx.Cmp(x3) != 00 || ry.Cmp(y3) != 0 {
- t.Errorf(
- "#%d wrong result\ngot: (%x, %x)\n"+
- "want: (%x, %x)", i, rx, ry, x3, y3,
- )
+ t.Errorf("#%d wrong result\ngot: (%x, %x)\n"+
+ "want: (%x, %x)", i, rx, ry, x3, y3)
continue
}
}
@@ -584,10 +553,8 @@ func TestBaseMult(t *testing.T) {
}
x, y := s256.ScalarBaseMult(k.Bytes())
if fmt.Sprintf("%X", x) != e.x || fmt.Sprintf("%X", y) != e.y {
- t.Errorf(
- "%d: bad output for k=%s: got (%X, %X), want (%s, %s)", i,
- e.k, x, y, e.x, e.y,
- )
+ t.Errorf("%d: bad output for k=%s: got (%X, %X), want (%s, %s)", i,
+ e.k, x, y, e.x, e.y)
}
if testing.Short() && i > 5 {
break
@@ -601,17 +568,15 @@ func TestBaseMultVerify(t *testing.T) {
for i := 0; i < 30; i++ {
data := make([]byte, bytes)
_, err := rand.Read(data)
- if chk.E(err) {
+ if err != nil {
t.Errorf("failed to read random data for %d", i)
continue
}
x, y := s256.ScalarBaseMult(data)
xWant, yWant := s256.ScalarMult(s256.Gx, s256.Gy, data)
if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 {
- t.Errorf(
- "%d: bad output for %X: got (%X, %X), want (%X, %X)",
- i, data, x, y, xWant, yWant,
- )
+ t.Errorf("%d: bad output for %X: got (%X, %X), want (%X, %X)",
+ i, data, x, y, xWant, yWant)
}
if testing.Short() && i > 2 {
break
@@ -654,10 +619,8 @@ func TestScalarMult(t *testing.T) {
yWant, _ := new(big.Int).SetString(test.ry, 16)
xGot, yGot := s256.ScalarMult(x, y, k.Bytes())
if xGot.Cmp(xWant) != 0 || yGot.Cmp(yWant) != 0 {
- t.Fatalf(
- "%d: bad output: got (%X, %X), want (%X, %X)", i, xGot,
- yGot, xWant, yWant,
- )
+ t.Fatalf("%d: bad output: got (%X, %X), want (%X, %X)", i, xGot,
+ yGot, xWant, yWant)
}
}
}
@@ -675,7 +638,7 @@ func TestScalarMultRand(t *testing.T) {
for i := 0; i < 1024; i++ {
data := make([]byte, 32)
_, err := rand.Read(data)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("failed to read random data at %d", i)
break
}
@@ -683,10 +646,8 @@ func TestScalarMultRand(t *testing.T) {
exponent.Mul(exponent, new(big.Int).SetBytes(data))
xWant, yWant := s256.ScalarBaseMult(exponent.Bytes())
if x.Cmp(xWant) != 0 || y.Cmp(yWant) != 0 {
- t.Fatalf(
- "%d: bad output for %X: got (%X, %X), want (%X, %X)", i,
- data, x, y, xWant, yWant,
- )
+ t.Fatalf("%d: bad output for %X: got (%X, %X), want (%X, %X)", i,
+ data, x, y, xWant, yWant)
break
}
}
@@ -838,7 +799,7 @@ func TestSplitKRand(t *testing.T) {
for i := 0; i < 1024; i++ {
bytesK := make([]byte, 32)
_, err := rand.Read(bytesK)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("failed to read random data at %d", i)
break
}
@@ -863,7 +824,7 @@ func TestSplitKRand(t *testing.T) {
func testKeyGeneration(t *testing.T, c *KoblitzCurve, tag string) {
priv, err := NewSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Errorf("%s: error: %s", tag, err)
return
}
@@ -887,10 +848,8 @@ func checkNAFEncoding(pos, neg []byte, origValue *big.Int) error {
return fmt.Errorf("positive has leading zero -- got %x", pos)
}
if len(neg) > len(pos) {
- return fmt.Errorf(
- "negative has len %d > pos len %d", len(neg),
- len(pos),
- )
+ return fmt.Errorf("negative has len %d > pos len %d", len(neg),
+ len(pos))
}
// Ensure the result doesn't have any adjacent non-zero digits.
gotPos := new(big.Int).SetBytes(pos)
@@ -900,10 +859,8 @@ func checkNAFEncoding(pos, neg []byte, origValue *big.Int) error {
for bit := 1; bit < posOrNeg.BitLen(); bit++ {
thisBit := posOrNeg.Bit(bit)
if prevBit == 1 && thisBit == 1 {
- return fmt.Errorf(
- "adjacent non-zero digits found at bit pos %d",
- bit-1,
- )
+ return fmt.Errorf("adjacent non-zero digits found at bit pos %d",
+ bit-1)
}
prevBit = thisBit
}
@@ -911,10 +868,8 @@ func checkNAFEncoding(pos, neg []byte, origValue *big.Int) error {
// NAF representation sum back to the original value.
gotValue := new(big.Int).Sub(gotPos, gotNeg)
if origValue.Cmp(gotValue) != 0 {
- return fmt.Errorf(
- "pos-neg is not original value: got %x, want %x",
- gotValue, origValue,
- )
+ return fmt.Errorf("pos-neg is not original value: got %x, want %x",
+ gotValue, origValue)
}
return nil
}
diff --git a/ec/chaincfg/deployment_time_frame.go b/ec/chaincfg/deployment_time_frame.go
index 42253c4..3d8da00 100644
--- a/ec/chaincfg/deployment_time_frame.go
+++ b/ec/chaincfg/deployment_time_frame.go
@@ -4,13 +4,12 @@ import (
"fmt"
"time"
- "orly.dev/chk"
"orly.dev/ec/wire"
)
var (
// ErrNoBlockClock is returned when an operation fails due to lack of
- // synchronization with the current up-to-date block clock.
+ // synchornization with the current up to date block clock.
ErrNoBlockClock = fmt.Errorf("no block clock synchronized")
)
@@ -89,7 +88,7 @@ func (m *MedianTimeDeploymentStarter) HasStarted(blkHeader *wire.BlockHeader) (
return true, nil
}
medianTime, err := m.blockClock.PastMedianTime(blkHeader)
- if chk.E(err) {
+ if err != nil {
return false, err
}
// We check both after and equal here as after will fail for equivalent
@@ -130,7 +129,7 @@ func (m *MedianTimeDeploymentEnder) HasEnded(blkHeader *wire.BlockHeader) (
return false, nil
}
medianTime, err := m.blockClock.PastMedianTime(blkHeader)
- if chk.E(err) {
+ if err != nil {
return false, err
}
// We check both after and equal here as after will fail for equivalent
diff --git a/ec/chaincfg/params.go b/ec/chaincfg/params.go
index 4121fbd..dfa5b63 100644
--- a/ec/chaincfg/params.go
+++ b/ec/chaincfg/params.go
@@ -5,7 +5,6 @@ import (
"math/big"
"time"
- "orly.dev/chk"
"orly.dev/ec/chainhash"
"orly.dev/ec/wire"
)
@@ -480,7 +479,7 @@ var MainNetParams = Params{
// hard-coded, and therefore known good, hashes.
func newHashFromStr(hexStr string) *chainhash.Hash {
hash, err := chainhash.NewHashFromStr(hexStr)
- if chk.E(err) {
+ if err != nil {
// Ordinarily I don't like panics in library code since it
// can take applications down without them having a chance to
// recover which is extremely annoying, however an exception is
diff --git a/ec/chainhash/hash.go b/ec/chainhash/hash.go
index c188834..5b1f0b0 100644
--- a/ec/chainhash/hash.go
+++ b/ec/chainhash/hash.go
@@ -9,10 +9,8 @@ import (
"encoding/json"
"fmt"
- "github.com/minio/sha256-simd"
- "orly.dev/chk"
-
"orly.dev/hex"
+ "orly.dev/sha256"
)
const (
@@ -125,11 +123,11 @@ func (hash *Hash) UnmarshalJSON(input []byte) error {
}
var sh string
err := json.Unmarshal(input, &sh)
- if chk.E(err) {
+ if err != nil {
return err
}
newHash, err := NewHashFromStr(sh)
- if chk.E(err) {
+ if err != nil {
return err
}
return hash.SetBytes(newHash[:])
@@ -140,7 +138,7 @@ func (hash *Hash) UnmarshalJSON(input []byte) error {
func NewHash(newHash []byte) (*Hash, error) {
var sh Hash
err := sh.SetBytes(newHash)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
return &sh, err
@@ -176,7 +174,7 @@ func TaggedHash(tag []byte, msgs ...[]byte) *Hash {
func NewHashFromStr(hash string) (*Hash, error) {
ret := new(Hash)
err := Decode(ret, hash)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
return ret, nil
@@ -205,7 +203,7 @@ func Decode(dst *Hash, src string) error {
reversedHash[HashSize-hex.DecLen(len(srcBytes)):],
srcBytes,
)
- if chk.E(err) {
+ if err != nil {
return err
}
// Reverse copy from the temporary hash to destination. Because the
@@ -221,7 +219,7 @@ func Decode(dst *Hash, src string) error {
func decodeLegacy(dst *Hash, src []byte) error {
var hashBytes []byte
err := json.Unmarshal(src, &hashBytes)
- if chk.E(err) {
+ if err != nil {
return err
}
if len(hashBytes) != HashSize {
diff --git a/ec/chainhash/hash_test.go b/ec/chainhash/hash_test.go
index b5e47d8..92a4456 100644
--- a/ec/chainhash/hash_test.go
+++ b/ec/chainhash/hash_test.go
@@ -7,28 +7,23 @@ package chainhash
import (
"bytes"
"testing"
-
- "orly.dev/chk"
)
// mainNetGenesisHash is the hash of the first block in the block chain for the
// main network (genesis block).
-var mainNetGenesisHash = Hash(
- [HashSize]byte{
- // Make go vet happy.
- 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
- 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
- 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
- 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
-)
+var mainNetGenesisHash = Hash([HashSize]byte{ // Make go vet happy.
+ 0x6f, 0xe2, 0x8c, 0x0a, 0xb6, 0xf1, 0xb3, 0x72,
+ 0xc1, 0xa6, 0xa2, 0x46, 0xae, 0x63, 0xf7, 0x4f,
+ 0x93, 0x1e, 0x83, 0x65, 0xe1, 0x5a, 0x08, 0x9c,
+ 0x68, 0xd6, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00,
+})
// TestHash tests the Hash API.
func TestHash(t *testing.T) {
// Hash of block 234439.
blockHashStr := "14a0810ac680a3eb3f82edc878cea25ec41d6b790744e5daeef"
blockHash, err := NewHashFromStr(blockHashStr)
- if chk.E(err) {
+ if err != nil {
t.Errorf("NewHashFromStr: %v", err)
}
// Hash of block 234440 as byte slice.
@@ -39,40 +34,32 @@ func TestHash(t *testing.T) {
0xa6, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
}
hash, err := NewHash(buf)
- if chk.E(err) {
+ if err != nil {
t.Errorf("NewHash: unexpected error %v", err)
}
// Ensure proper size.
if len(hash) != HashSize {
- t.Errorf(
- "NewHash: hash length mismatch - got: %v, want: %v",
- len(hash), HashSize,
- )
+ t.Errorf("NewHash: hash length mismatch - got: %v, want: %v",
+ len(hash), HashSize)
}
// Ensure contents match.
if !bytes.Equal(hash[:], buf) {
- t.Errorf(
- "NewHash: hash contents mismatch - got: %v, want: %v",
- hash[:], buf,
- )
+ t.Errorf("NewHash: hash contents mismatch - got: %v, want: %v",
+ hash[:], buf)
}
// Ensure contents of hash of block 234440 don't match 234439.
if hash.IsEqual(blockHash) {
- t.Errorf(
- "IsEqual: hash contents should not match - got: %v, want: %v",
- hash, blockHash,
- )
+ t.Errorf("IsEqual: hash contents should not match - got: %v, want: %v",
+ hash, blockHash)
}
// Set hash from byte slice and ensure contents match.
err = hash.SetBytes(blockHash.CloneBytes())
- if chk.E(err) {
+ if err != nil {
t.Errorf("SetBytes: %v", err)
}
if !hash.IsEqual(blockHash) {
- t.Errorf(
- "IsEqual: hash contents mismatch - got: %v, want: %v",
- hash, blockHash,
- )
+ t.Errorf("IsEqual: hash contents mismatch - got: %v, want: %v",
+ hash, blockHash)
}
// Ensure nil hashes are handled properly.
if !(*Hash)(nil).IsEqual(nil) {
@@ -83,13 +70,13 @@ func TestHash(t *testing.T) {
}
// Invalid size for SetBytes.
err = hash.SetBytes([]byte{0x00})
- if !chk.E(err) {
+ if err == nil {
t.Errorf("SetBytes: failed to received expected err - got: nil")
}
// Invalid size for NewHash.
invalidHash := make([]byte, HashSize+1)
_, err = NewHash(invalidHash)
- if !chk.E(err) {
+ if err == nil {
t.Errorf("NewHash: failed to received expected err - got: nil")
}
}
@@ -98,21 +85,16 @@ func TestHash(t *testing.T) {
func TestHashString(t *testing.T) {
// Block 100000 hash.
wantStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
- hash := Hash(
- [HashSize]byte{
- // Make go vet happy.
- 0x06, 0xe5, 0x33, 0xfd, 0x1a, 0xda, 0x86, 0x39,
- 0x1f, 0x3f, 0x6c, 0x34, 0x32, 0x04, 0xb0, 0xd2,
- 0x78, 0xd4, 0xaa, 0xec, 0x1c, 0x0b, 0x20, 0xaa,
- 0x27, 0xba, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- )
+ hash := Hash([HashSize]byte{ // Make go vet happy.
+ 0x06, 0xe5, 0x33, 0xfd, 0x1a, 0xda, 0x86, 0x39,
+ 0x1f, 0x3f, 0x6c, 0x34, 0x32, 0x04, 0xb0, 0xd2,
+ 0x78, 0xd4, 0xaa, 0xec, 0x1c, 0x0b, 0x20, 0xaa,
+ 0x27, 0xba, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+ })
hashStr := hash.String()
if hashStr != wantStr {
- t.Errorf(
- "String: wrong hash string - got %v, want %v",
- hashStr, wantStr,
- )
+ t.Errorf("String: wrong hash string - got %v, want %v",
+ hashStr, wantStr)
}
}
@@ -186,7 +168,7 @@ func TestHashString(t *testing.T) {
// if err != test.err {
// t.Errorf(unexpectedErrStr, i, err, test.err)
// continue
-// } else if chk.E(err) {
+// } else if err != nil {
// // Got expected error. Move on to the next test.
// continue
// }
@@ -202,16 +184,16 @@ func TestHashString(t *testing.T) {
// hashStr := "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
// legacyHashStr := []byte("[6,229,51,253,26,218,134,57,31,63,108,52,50,4,176,210,120,212,170,236,28,11,32,170,39,186,3,0,0,0,0,0]")
// hash, err := NewHashFromStr(hashStr)
-// if chk.E(err) {
+// if err != nil {
// t.Errorf("NewHashFromStr error:%v, hashStr:%s", err, hashStr)
// }
// hashBytes, err := json.Marshal(hash)
-// if chk.E(err) {
+// if err != nil {
// t.Errorf("Marshal json error:%v, hash:%v", err, hashBytes)
// }
// var newHash Hash
// err = json.Unmarshal(hashBytes, &newHash)
-// if chk.E(err) {
+// if err != nil {
// t.Errorf("Unmarshal json error:%v, hash:%v", err, hashBytes)
// }
// if !hash.IsEqual(&newHash) {
@@ -219,7 +201,7 @@ func TestHashString(t *testing.T) {
// newHash.String(), hashStr)
// }
// err = newHash.Unmarshal(legacyHashStr)
-// if chk.E(err) {
+// if err != nil {
// t.Errorf("Unmarshal legacy json error:%v, hash:%v", err, legacyHashStr)
// }
// if !hash.IsEqual(&newHash) {
diff --git a/ec/chainhash/hashfuncs.go b/ec/chainhash/hashfuncs.go
index c4321d1..a21b719 100644
--- a/ec/chainhash/hashfuncs.go
+++ b/ec/chainhash/hashfuncs.go
@@ -5,7 +5,7 @@
package chainhash
-import "github.com/minio/sha256-simd"
+import "orly.dev/sha256"
// HashB calculates hash(b) and returns the resulting bytes.
func HashB(b []byte) []byte {
diff --git a/ec/ciphering_test.go b/ec/ciphering_test.go
index 24ea84a..b2a07c2 100644
--- a/ec/ciphering_test.go
+++ b/ec/ciphering_test.go
@@ -7,27 +7,23 @@ package btcec
import (
"bytes"
"testing"
-
- "orly.dev/chk"
)
func TestGenerateSharedSecret(t *testing.T) {
privKey1, err := NewSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Errorf("secret key generation error: %s", err)
return
}
privKey2, err := NewSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Errorf("secret key generation error: %s", err)
return
}
secret1 := GenerateSharedSecret(privKey1, privKey2.PubKey())
secret2 := GenerateSharedSecret(privKey2, privKey1.PubKey())
if !bytes.Equal(secret1, secret2) {
- t.Errorf(
- "ECDH failed, secrets mismatch - first: %x, second: %x",
- secret1, secret2,
- )
+ t.Errorf("ECDH failed, secrets mismatch - first: %x, second: %x",
+ secret1, secret2)
}
}
diff --git a/ec/curve.go b/ec/curve.go
index e17aee9..4423f32 100644
--- a/ec/curve.go
+++ b/ec/curve.go
@@ -6,7 +6,6 @@ package btcec
import (
"fmt"
- "orly.dev/chk"
"orly.dev/ec/secp256k1"
)
@@ -84,7 +83,7 @@ func ParseJacobian(point []byte) (JacobianPoint, error) {
return infinityPoint, nil
}
noncePk, err := secp256k1.ParsePubKey(point)
- if chk.E(err) {
+ if err != nil {
return JacobianPoint{}, err
}
noncePk.AsJacobian(&result)
diff --git a/ec/ecdsa/bench_test.go b/ec/ecdsa/bench_test.go
index 0ddd996..afb2e17 100644
--- a/ec/ecdsa/bench_test.go
+++ b/ec/ecdsa/bench_test.go
@@ -8,7 +8,6 @@ package ecdsa
import (
"testing"
- "orly.dev/chk"
"orly.dev/ec/secp256k1"
"orly.dev/hex"
)
@@ -19,7 +18,7 @@ import (
// must only) be called with hard-coded values.
func hexToModNScalar(s string) *secp256k1.ModNScalar {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var scalar secp256k1.ModNScalar
@@ -35,7 +34,7 @@ func hexToModNScalar(s string) *secp256k1.ModNScalar {
// called with hard-coded values.
func hexToFieldVal(s string) *secp256k1.FieldVal {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var f secp256k1.FieldVal
@@ -153,7 +152,7 @@ func BenchmarkRecoverCompact(b *testing.B) {
msgHash := hexToBytes("c301ba9de5d6053caad9f5eb46523f007702add2c62fa39de03146a36b8026b7")
// Ensure a valid compact signature is being benchmarked.
pubKey, wasCompressed, err := RecoverCompact(compactSig, msgHash)
- if chk.E(err) {
+ if err != nil {
b.Fatalf("unexpected err: %v", err)
}
if !wasCompressed {
diff --git a/ec/ecdsa/example_test.go b/ec/ecdsa/example_test.go
index 30152f3..3b4ca8b 100644
--- a/ec/ecdsa/example_test.go
+++ b/ec/ecdsa/example_test.go
@@ -13,7 +13,7 @@ package ecdsa_test
// // Decode a hex-encoded secret key.
// pkBytes, err := hex.Dec("22a47fa09a223f2aa079edf85a7c2d4f87" +
// "20ee63e502ee2869afab7de234b80c")
-// if chk.E(err) {
+// if err != nil {
// fmt.Println(err)
// return
// }
@@ -44,12 +44,12 @@ package ecdsa_test
// // Decode hex-encoded serialized public key.
// pubKeyBytes, err := hex.Dec("02a673638cb9587cb68ea08dbef685c" +
// "6f2d2a751a8b3c6f2a7e9a4999e6e4bfaf5")
-// if chk.E(err) {
+// if err != nil {
// fmt.Println(err)
// return
// }
// pubKey, err := secp256k1.ParsePubKey(pubKeyBytes)
-// if chk.E(err) {
+// if err != nil {
// fmt.Println(err)
// return
// }
@@ -58,12 +58,12 @@ package ecdsa_test
// sigBytes, err := hex.Dec("3045022100fcc0a8768cfbcefcf2cadd7cfb0" +
// "fb18ed08dd2e2ae84bef1a474a3d351b26f0302200fc1a350b45f46fa0010139130" +
// "2818d748c2b22615511a3ffd5bb638bd777207")
-// if chk.E(err) {
+// if err != nil {
// fmt.Println(err)
// return
// }
// signature, err := ecdsa.ParseDERSignature(sigBytes)
-// if chk.E(err) {
+// if err != nil {
// fmt.Println(err)
// return
// }
diff --git a/ec/ecdsa/signature_test.go b/ec/ecdsa/signature_test.go
index 4121723..355f57c 100644
--- a/ec/ecdsa/signature_test.go
+++ b/ec/ecdsa/signature_test.go
@@ -12,10 +12,10 @@ import (
"bytes"
"errors"
"math/rand"
+ "orly.dev/chk"
"testing"
"time"
- "orly.dev/chk"
"orly.dev/ec/secp256k1"
"orly.dev/hex"
)
@@ -26,7 +26,7 @@ import (
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
return b
@@ -880,7 +880,7 @@ func TestSignatureIsEqual(t *testing.T) {
// // or not the signature was for a compressed public key are the
// // expected values.
// gotPubKey, gotCompressed, err := RecoverCompact(gotSig, hash)
-// if chk.E(err) {
+// if err != nil {
// t.Errorf("%s: unexpected error when recovering: %v", test.name,
// err)
// continue
@@ -1092,7 +1092,7 @@ func TestSignAndRecoverCompactRandom(t *testing.T) {
gotSig := SignCompact(secKey, hash[:], compressed)
gotPubKey, gotCompressed, err := RecoverCompact(gotSig, hash[:])
- if chk.E(err) {
+ if err != nil {
t.Fatalf(
"unexpected err: %v\nsig: %x\nhash: %x\nsecret key: %x",
err, gotSig, hash, secKey.Serialize(),
@@ -1120,7 +1120,7 @@ func TestSignAndRecoverCompactRandom(t *testing.T) {
randBit := rng.Intn(7)
badSig[randByte] ^= 1 << randBit
badPubKey, _, err := RecoverCompact(badSig, hash[:])
- if !chk.E(err) && badPubKey.IsEqual(wantPubKey) {
+ if err == nil && badPubKey.IsEqual(wantPubKey) {
t.Fatalf(
"recovered public key for bad sig: %x\nhash: %x\n"+
"secret key: %x", badSig, hash, secKey.Serialize(),
@@ -1135,7 +1135,7 @@ func TestSignAndRecoverCompactRandom(t *testing.T) {
randBit = rng.Intn(7)
badHash[randByte] ^= 1 << randBit
badPubKey, _, err = RecoverCompact(gotSig, badHash[:])
- if !chk.E(err) && badPubKey.IsEqual(wantPubKey) {
+ if err == nil && badPubKey.IsEqual(wantPubKey) {
t.Fatalf(
"recovered public key for bad hash: %x\nsig: %x\n"+
"secret key: %x", badHash, gotSig, secKey.Serialize(),
diff --git a/ec/ecdsa/util_test.go b/ec/ecdsa/util_test.go
new file mode 100644
index 0000000..444d26b
--- /dev/null
+++ b/ec/ecdsa/util_test.go
@@ -0,0 +1,9 @@
+package ecdsa_test
+
+import (
+ "orly.dev/lol"
+)
+
+var (
+ log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
+)
diff --git a/ec/field_test.go b/ec/field_test.go
index 6c3b545..9e5d164 100644
--- a/ec/field_test.go
+++ b/ec/field_test.go
@@ -7,9 +7,9 @@ package btcec
import (
"math/rand"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/hex"
)
@@ -1189,7 +1189,7 @@ func TestFieldSquareRoot(t *testing.T) {
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
return b
diff --git a/ec/fuzz_test.go b/ec/fuzz_test.go
index cf94010..df143e1 100644
--- a/ec/fuzz_test.go
+++ b/ec/fuzz_test.go
@@ -11,7 +11,6 @@ package btcec
import (
"testing"
- "orly.dev/chk"
"orly.dev/hex"
)
@@ -29,7 +28,7 @@ func FuzzParsePubKey(f *testing.F) {
}
for _, pubKey := range recoveryTestPubKeys {
seed, err := hex.Dec(pubKey)
- if chk.E(err) {
+ if err != nil {
f.Fatal(err)
}
f.Add(seed)
@@ -38,10 +37,10 @@ func FuzzParsePubKey(f *testing.F) {
f.Fuzz(
func(t *testing.T, input []byte) {
key, err := ParsePubKey(input)
- if key == nil && !chk.E(err) {
+ if key == nil && err == nil {
panic("key==nil && err==nil")
}
- if key != nil && chk.E(err) {
+ if key != nil && err != nil {
panic("key!=nil yet err!=nil")
}
},
diff --git a/ec/musig2/bench_test.go b/ec/musig2/bench_test.go
index b4b06f6..8cadcbd 100644
--- a/ec/musig2/bench_test.go
+++ b/ec/musig2/bench_test.go
@@ -8,7 +8,6 @@ import (
"fmt"
"testing"
- "orly.dev/chk"
"orly.dev/ec"
"orly.dev/ec/schnorr"
"orly.dev/hex"
@@ -21,7 +20,7 @@ var (
func hexToBytes(s string) []byte {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
return b
@@ -29,7 +28,7 @@ func hexToBytes(s string) []byte {
func hexToModNScalar(s string) *btcec.ModNScalar {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var scalar btcec.ModNScalar
@@ -41,12 +40,12 @@ func hexToModNScalar(s string) *btcec.ModNScalar {
func genSigner(t *testing.B) signer {
privKey, err := btcec.NewSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to gen priv key: %v", err)
}
pubKey := privKey.PubKey()
nonces, err := GenNonces(WithPublicKey(pubKey))
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to gen nonces: %v", err)
}
return signer{
@@ -77,7 +76,7 @@ func BenchmarkPartialSign(b *testing.B) {
signers[i] = genSigner(b)
}
combinedNonce, err := AggregateNonces(signers.pubNonces())
- if chk.E(err) {
+ if err != nil {
b.Fatalf("unable to generate combined nonce: %v", err)
}
var sig *PartialSignature
@@ -100,7 +99,7 @@ func BenchmarkPartialSign(b *testing.B) {
signers[0].nonces.SecNonce, signers[0].privKey,
combinedNonce, keys, msg, signOpts...,
)
- if chk.E(err) {
+ if err != nil {
b.Fatalf("unable to generate sig: %v", err)
}
}
@@ -132,7 +131,7 @@ func BenchmarkPartialSign(b *testing.B) {
// combinedNonce, err := AggregateNonces(
// signers.pubNonces(),
// )
-// if chk.E(err) {
+// if err != nil {
// b.Fatalf("unable to generate combined "+
// "nonce: %v", err)
// }
@@ -145,7 +144,7 @@ func BenchmarkPartialSign(b *testing.B) {
// signers[0].nonces.SecNonce, signers[0].privKey,
// combinedNonce, signers.keys(), msg,
// )
-// if chk.E(err) {
+// if err != nil {
// b.Fatalf("unable to generate sig: %v", err)
// }
// keys := signers.keys()
@@ -187,7 +186,7 @@ func BenchmarkCombineSigs(b *testing.B) {
signers[i] = genSigner(b)
}
combinedNonce, err := AggregateNonces(signers.pubNonces())
- if chk.E(err) {
+ if err != nil {
b.Fatalf("unable to generate combined nonce: %v", err)
}
var msg [32]byte
@@ -199,7 +198,7 @@ func BenchmarkCombineSigs(b *testing.B) {
signer.nonces.SecNonce, signer.privKey,
combinedNonce, signers.keys(), msg,
)
- if chk.E(err) {
+ if err != nil {
b.Fatalf(
"unable to generate partial sig: %v",
err,
@@ -239,7 +238,7 @@ func BenchmarkAggregateNonces(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
pubNonce, err := AggregateNonces(nonces)
- if chk.E(err) {
+ if err != nil {
b.Fatalf("unable to generate nonces: %v", err)
}
testNonce = pubNonce
diff --git a/ec/musig2/context.go b/ec/musig2/context.go
index 6b8cc29..4c2d5f1 100644
--- a/ec/musig2/context.go
+++ b/ec/musig2/context.go
@@ -4,8 +4,8 @@ package musig2
import (
"fmt"
-
"orly.dev/chk"
+
"orly.dev/ec"
"orly.dev/ec/schnorr"
)
@@ -214,7 +214,7 @@ func NewContext(
WithPublicKey(ctx.pubKey),
WithNonceSecretKeyAux(signingKey),
)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
}
@@ -266,7 +266,7 @@ func (c *Context) combineSignerKeys() error {
c.combinedKey, _, _, err = AggregateKeys(
c.opts.keySet, c.shouldSort, keyAggOpts...,
)
- if chk.E(err) {
+ if err != nil {
return err
}
return nil
@@ -425,7 +425,7 @@ func (c *Context) NewSession(options ...SessionOption) (*Session, error) {
WithNonceSecretKeyAux(c.signingKey),
WithNonceCombinedKeyAux(c.combinedKey.FinalKey),
)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
}
@@ -469,7 +469,7 @@ func (s *Session) RegisterPubNonce(nonce [PubNonceSize]byte) (bool, error) {
// now.
if haveAllNonces {
combinedNonce, err := AggregateNonces(s.pubNonces)
- if chk.E(err) {
+ if err != nil {
return false, err
}
s.combinedNonce = &combinedNonce
@@ -514,7 +514,7 @@ func (s *Session) Sign(
// Now that we've generated our signature, we'll make sure to blank out
// our signing nonce.
s.localNonces = nil
- if chk.E(err) {
+ if err != nil {
return nil, err
}
s.msg = msg
diff --git a/ec/musig2/keys.go b/ec/musig2/keys.go
index 61c9a7f..9a76ed2 100644
--- a/ec/musig2/keys.go
+++ b/ec/musig2/keys.go
@@ -7,7 +7,6 @@ import (
"fmt"
"sort"
- "orly.dev/chk"
"orly.dev/ec"
"orly.dev/ec/chainhash"
"orly.dev/ec/schnorr"
@@ -400,7 +399,7 @@ func AggregateKeys(
finalKeyJ, parityAcc, opts.tweaks[i-1].Tweak, tweakAcc,
opts.tweaks[i-1].IsXOnly,
)
- if chk.E(err) {
+ if err != nil {
return nil, nil, nil, err
}
}
diff --git a/ec/musig2/keys_test.go b/ec/musig2/keys_test.go
index ee15be2..bde52f0 100644
--- a/ec/musig2/keys_test.go
+++ b/ec/musig2/keys_test.go
@@ -11,7 +11,6 @@ import (
"testing"
"github.com/stretchr/testify/require"
- "orly.dev/chk"
"orly.dev/ec"
"orly.dev/ec/schnorr"
@@ -93,7 +92,7 @@ func keysFromIndices(
inputKeys[i], err = btcec.ParsePubKey(
mustParseHex(pubKeys[keyIdx]),
)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
}
@@ -175,7 +174,7 @@ func TestMuSig2KeyAggTestVectors(t *testing.T) {
)
// In this set of test cases, we should only get this
// for the very first vector.
- if chk.E(err) {
+ if err != nil {
switch testCase.Comment {
case "Invalid public key":
require.ErrorIs(
diff --git a/ec/musig2/musig2_test.go b/ec/musig2/musig2_test.go
index 9fd6d66..2ad309d 100644
--- a/ec/musig2/musig2_test.go
+++ b/ec/musig2/musig2_test.go
@@ -8,11 +8,9 @@ import (
"sync"
"testing"
- "github.com/minio/sha256-simd"
- "orly.dev/chk"
-
"orly.dev/ec"
"orly.dev/hex"
+ "orly.dev/sha256"
)
const (
@@ -21,7 +19,7 @@ const (
func mustParseHex(str string) []byte {
b, err := hex.Dec(str)
- if chk.E(err) {
+ if err != nil {
panic(fmt.Errorf("unable to parse hex: %v", err))
}
return b
@@ -80,7 +78,7 @@ func testMultiPartySign(
signSet := make([]*btcec.PublicKey, numSigners)
for i := 0; i < numSigners; i++ {
privKey, err := btcec.NewSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to gen priv key: %v", err)
}
pubKey := privKey.PubKey()
@@ -106,17 +104,17 @@ func testMultiPartySign(
signCtx, err := NewContext(
signerKey, false, ctxOpts...,
)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to generate context: %v", err)
}
if combinedKey == nil {
combinedKey, err = signCtx.CombinedKey()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("combined key not available: %v", err)
}
}
session, err := signCtx.NewSession()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to generate new session: %v", err)
}
signers[i] = session
@@ -135,7 +133,7 @@ func testMultiPartySign(
}
nonce := otherCtx.PublicNonce()
haveAll, err := signer.RegisterPubNonce(nonce)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to add public nonce")
}
if j == len(signers)-1 && !haveAll {
@@ -153,14 +151,14 @@ func testMultiPartySign(
for i := range signers {
signer := signers[i]
partialSig, err := signer.Sign(msg)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to generate partial sig: %v", err)
}
// We don't need to combine the signature for the very first
// signer, as it already has that partial signature.
if i != 0 {
haveAll, err := combiner.CombineSig(partialSig)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to combine sigs: %v", err)
}
@@ -248,11 +246,11 @@ func TestMuSigMultiParty(t *testing.T) {
func TestMuSigEarlyNonce(t *testing.T) {
t.Parallel()
privKey1, err := btcec.NewSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to gen priv key: %v", err)
}
privKey2, err := btcec.NewSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to gen priv key: %v", err)
}
// If we try to make a context, with just the secret key and sorting
@@ -266,14 +264,14 @@ func TestMuSigEarlyNonce(t *testing.T) {
ctx1, err := NewContext(
privKey1, true, WithNumSigners(numSigners), WithEarlyNonceGen(),
)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to make ctx: %v", err)
}
pubKey1 := ctx1.PubKey()
ctx2, err := NewContext(
privKey2, true, WithKnownSigners(signers), WithEarlyNonceGen(),
)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to make ctx: %v", err)
}
pubKey2 := ctx2.PubKey()
@@ -283,16 +281,16 @@ func TestMuSigEarlyNonce(t *testing.T) {
t.Fatalf("unepxected error: %v", err)
}
_, err = ctx2.CombinedKey()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to get combined key: %v", err)
}
// The early nonces _should_ be available at this point.
nonce1, err := ctx1.EarlySessionNonce()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("session nonce not available: %v", err)
}
nonce2, err := ctx2.EarlySessionNonce()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("session nonce not available: %v", err)
}
// The number of registered signers should still be 1 for both parties.
@@ -319,7 +317,7 @@ func TestMuSigEarlyNonce(t *testing.T) {
}
// We'll now register the other signer for party 1.
done, err := ctx1.RegisterSigner(&pubKey2)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to register signer: %v", err)
}
if !done {
@@ -332,11 +330,11 @@ func TestMuSigEarlyNonce(t *testing.T) {
}
// We should be able to create the session at this point.
session1, err := ctx1.NewSession()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to create new session: %v", err)
}
session2, err := ctx2.NewSession()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to create new session: %v", err)
}
msg := sha256.Sum256([]byte("let's get taprooty, LN style"))
@@ -349,14 +347,14 @@ func TestMuSigEarlyNonce(t *testing.T) {
// Now we can exchange nonces to continue with the rest of the signing
// process as normal.
done, err = session1.RegisterPubNonce(nonce2.PubNonce)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to register nonce: %v", err)
}
if !done {
t.Fatalf("signer 1 doesn't have all nonces")
}
done, err = session2.RegisterPubNonce(nonce1.PubNonce)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to register nonce: %v", err)
}
if !done {
@@ -369,15 +367,15 @@ func TestMuSigEarlyNonce(t *testing.T) {
}
// Sign the message and combine the two partial sigs into one.
_, err = session1.Sign(msg)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to gen sig: %v", err)
}
sig2, err := session2.Sign(msg)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to gen sig: %v", err)
}
done, err = session1.CombineSig(sig2)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to combine sig: %v", err)
}
if !done {
@@ -390,7 +388,7 @@ func TestMuSigEarlyNonce(t *testing.T) {
}
// Finally, verify that the final signature is valid.
combinedKey, err := ctx1.CombinedKey()
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unexpected combined key error: %v", err)
}
finalSig := session1.FinalSig()
diff --git a/ec/musig2/nonces.go b/ec/musig2/nonces.go
index 55ace36..cdb06d7 100644
--- a/ec/musig2/nonces.go
+++ b/ec/musig2/nonces.go
@@ -8,8 +8,8 @@ import (
"encoding/binary"
"errors"
"io"
-
"orly.dev/chk"
+
"orly.dev/ec"
"orly.dev/ec/chainhash"
"orly.dev/ec/schnorr"
@@ -244,12 +244,12 @@ func genNonceAuxBytes(
}
// Next, we'll write out: len(pk) || pk
err := writeBytesPrefix(&w, pubkey, uint8Writer)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
// Next, we'll write out: len(aggpk) || aggpk.
err = writeBytesPrefix(&w, opts.combinedKey, uint8Writer)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
switch {
@@ -269,13 +269,13 @@ func genNonceAuxBytes(
return nil, err
}
err = writeBytesPrefix(&w, opts.msg, uint64Writer)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
}
// Finally we'll write out the auxiliary input.
err = writeBytesPrefix(&w, opts.auxInput, uint32Writer)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
// Next we'll write out the interaction/index number which will
@@ -318,11 +318,11 @@ func GenNonces(options ...NonceGenOption) (*Nonces, error) {
// Using our randomness, pubkey and the set of optional params, generate our
// two secret nonces: k1 and k2.
k1, err := genNonceAuxBytes(randBytes[:], opts.publicKey, 0, opts)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
k2, err := genNonceAuxBytes(randBytes[:], opts.publicKey, 1, opts)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
var k1Mod, k2Mod btcec.ModNScalar
@@ -362,7 +362,7 @@ func AggregateNonces(pubNonces [][PubNonceSize]byte) (
// decode.
var nonceJ btcec.JacobianPoint
nonceJ, err := btcec.ParseJacobian(slicer(pubNonceBytes))
- if chk.E(err) {
+ if err != nil {
return btcec.JacobianPoint{}, err
}
pubNonceJs[i] = &nonceJ
@@ -387,7 +387,7 @@ func AggregateNonces(pubNonces [][PubNonceSize]byte) (
return n[:btcec.PubKeyBytesLenCompressed]
},
)
- if chk.E(err) {
+ if err != nil {
return finalNonce, err
}
combinedNonce2, err := combineNonces(
@@ -395,7 +395,7 @@ func AggregateNonces(pubNonces [][PubNonceSize]byte) (
return n[btcec.PubKeyBytesLenCompressed:]
},
)
- if chk.E(err) {
+ if err != nil {
return finalNonce, err
}
copy(finalNonce[:], btcec.JacobianToByteSlice(combinedNonce1))
diff --git a/ec/musig2/nonces_test.go b/ec/musig2/nonces_test.go
index 7aadd5e..574d876 100644
--- a/ec/musig2/nonces_test.go
+++ b/ec/musig2/nonces_test.go
@@ -11,7 +11,6 @@ import (
"testing"
"github.com/stretchr/testify/require"
- "orly.dev/chk"
"orly.dev/hex"
)
@@ -61,7 +60,7 @@ func TestMusig2NonceGenTestVectors(t *testing.T) {
t.Run(
fmt.Sprintf("test_case=%v", i), func(t *testing.T) {
nonce, err := GenNonces(withCustomOptions(customOpts))
- if chk.E(err) {
+ if err != nil {
t.Fatalf("err gen nonce aux bytes %v", err)
}
expectedBytes, _ := hex.Dec(testCase.Expected)
@@ -143,7 +142,7 @@ func TestMusig2AggregateNoncesTestVectors(t *testing.T) {
t.Run(
fmt.Sprintf("invalid_case=%v", i), func(t *testing.T) {
_, err := AggregateNonces(testNonces)
- require.True(t, chk.E(err))
+ require.True(t, err != nil)
require.Equal(t, testCase.ExpectedErr, err.Error())
},
)
diff --git a/ec/musig2/sign.go b/ec/musig2/sign.go
index 624d9da..6b48329 100644
--- a/ec/musig2/sign.go
+++ b/ec/musig2/sign.go
@@ -6,8 +6,8 @@ import (
"bytes"
"fmt"
"io"
-
"orly.dev/chk"
+
"orly.dev/ec"
"orly.dev/ec/chainhash"
"orly.dev/ec/schnorr"
@@ -228,13 +228,13 @@ func computeSigningNonce(
r1J, err := btcec.ParseJacobian(
combinedNonce[:btcec.PubKeyBytesLenCompressed],
)
- if chk.E(err) {
+ if err != nil {
return nil, nil, err
}
r2J, err := btcec.ParseJacobian(
combinedNonce[btcec.PubKeyBytesLenCompressed:],
)
- if chk.E(err) {
+ if err != nil {
return nil, nil, err
}
@@ -318,7 +318,7 @@ func Sign(
combinedKey, parityAcc, _, err := AggregateKeys(
pubKeys, opts.sortKeys, keyAggOpts...,
)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
@@ -328,7 +328,7 @@ func Sign(
nonce, nonceBlinder, err := computeSigningNonce(
combinedNonce, combinedKey.FinalKey, msg,
)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
@@ -479,7 +479,7 @@ func verifyPartialSig(
combinedKey, parityAcc, _, err := AggregateKeys(
keySet, opts.sortKeys, keyAggOpts...,
)
- if chk.E(err) {
+ if err != nil {
return err
}
@@ -499,13 +499,13 @@ func verifyPartialSig(
r1J, err := btcec.ParseJacobian(
combinedNonce[:btcec.PubKeyBytesLenCompressed],
)
- if chk.E(err) {
+ if err != nil {
return err
}
r2J, err := btcec.ParseJacobian(
combinedNonce[btcec.PubKeyBytesLenCompressed:],
)
- if chk.E(err) {
+ if err != nil {
return err
}
@@ -521,13 +521,13 @@ func verifyPartialSig(
pubNonce1J, err := btcec.ParseJacobian(
pubNonce[:btcec.PubKeyBytesLenCompressed],
)
- if chk.E(err) {
+ if err != nil {
return err
}
pubNonce2J, err := btcec.ParseJacobian(
pubNonce[btcec.PubKeyBytesLenCompressed:],
)
- if chk.E(err) {
+ if err != nil {
return err
}
@@ -574,7 +574,7 @@ func verifyPartialSig(
e.SetByteSlice(challengeBytes[:])
signingKey, err := btcec.ParsePubKey(pubKey)
- if chk.E(err) {
+ if err != nil {
return err
}
diff --git a/ec/musig2/sign_test.go b/ec/musig2/sign_test.go
index 6e8fd06..29e5196 100644
--- a/ec/musig2/sign_test.go
+++ b/ec/musig2/sign_test.go
@@ -12,7 +12,6 @@ import (
"testing"
"github.com/stretchr/testify/require"
- "orly.dev/chk"
"orly.dev/ec"
"orly.dev/ec/secp256k1"
@@ -125,7 +124,7 @@ func TestMusig2SignVerify(t *testing.T) {
pubKeys, err := keysFromIndices(
t, testCase.Indices, testCases.PubKeys,
)
- if chk.E(err) {
+ if err != nil {
require.ErrorIs(t, err, secp256k1.ErrPubKeyNotOnCurve)
return
}
@@ -182,7 +181,7 @@ func TestMusig2SignVerify(t *testing.T) {
err = partialSig.Decode(
bytes.NewReader(mustParseHex(testCase.Sig)),
)
- if chk.E(err) && strings.Contains(
+ if err != nil && strings.Contains(
testCase.Comment, "group size",
) {
require.ErrorIs(t, err, ErrPartialSigInvalid)
diff --git a/ec/pubkey_test.go b/ec/pubkey_test.go
index 8760a14..e2cc519 100644
--- a/ec/pubkey_test.go
+++ b/ec/pubkey_test.go
@@ -9,7 +9,6 @@ import (
"testing"
"github.com/davecgh/go-spew/spew"
- "orly.dev/chk"
)
type pubKeyTest struct {
@@ -24,8 +23,7 @@ var pubKeyTests = []pubKeyTest{
// 0437cd7f8525ceed2324359c2d0ba26006d92d85
{
name: "uncompressed ok",
- key: []byte{
- 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
+ key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
@@ -39,8 +37,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "uncompressed x changed",
- key: []byte{
- 0x04, 0x15, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
+ key: []byte{0x04, 0x15, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
@@ -53,8 +50,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "uncompressed y changed",
- key: []byte{
- 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
+ key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
@@ -67,8 +63,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "uncompressed claims compressed",
- key: []byte{
- 0x03, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
+ key: []byte{0x03, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
@@ -81,8 +76,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "uncompressed as hybrid ok",
- key: []byte{
- 0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
+ key: []byte{0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
@@ -96,8 +90,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "uncompressed as hybrid wrong",
- key: []byte{
- 0x06, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
+ key: []byte{0x06, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
@@ -111,8 +104,7 @@ var pubKeyTests = []pubKeyTest{
// from tx 0b09c51c51ff762f00fb26217269d2a18e77a4fa87d69b3c363ab4df16543f20
{
name: "compressed ok (ybit = 0)",
- key: []byte{
- 0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
+ key: []byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
@@ -123,8 +115,7 @@ var pubKeyTests = []pubKeyTest{
// from tx fdeb8e72524e8dab0da507ddbaf5f88fe4a933eb10a66bc4745bb0aa11ea393c
{
name: "compressed ok (ybit = 1)",
- key: []byte{
- 0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
+ key: []byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
@@ -134,8 +125,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "compressed claims uncompressed (ybit = 0)",
- key: []byte{
- 0x04, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
+ key: []byte{0x04, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
@@ -144,8 +134,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "compressed claims uncompressed (ybit = 1)",
- key: []byte{
- 0x05, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
+ key: []byte{0x05, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
@@ -159,8 +148,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "X == P",
- key: []byte{
- 0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ key: []byte{0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFC, 0x2F, 0xb2, 0xe0,
@@ -173,8 +161,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "X > P",
- key: []byte{
- 0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ key: []byte{0x04, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFD, 0x2F, 0xb2, 0xe0,
@@ -187,8 +174,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "Y == P",
- key: []byte{
- 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
+ key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xFF, 0xFF,
@@ -201,8 +187,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "Y > P",
- key: []byte{
- 0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
+ key: []byte{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xFF, 0xFF,
@@ -215,8 +200,7 @@ var pubKeyTests = []pubKeyTest{
},
{
name: "hybrid",
- key: []byte{
- 0x06, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb,
+ key: []byte{0x06, 0x79, 0xbe, 0x66, 0x7e, 0xf9, 0xdc, 0xbb,
0xac, 0x55, 0xa0, 0x62, 0x95, 0xce, 0x87, 0x0b, 0x07,
0x02, 0x9b, 0xfc, 0xdb, 0x2d, 0xce, 0x28, 0xd9, 0x59,
0xf2, 0x81, 0x5b, 0x16, 0xf8, 0x17, 0x98, 0x48, 0x3a,
@@ -233,20 +217,16 @@ var pubKeyTests = []pubKeyTest{
func TestPubKeys(t *testing.T) {
for _, test := range pubKeyTests {
pk, err := ParsePubKey(test.key)
- if chk.E(err) {
+ if err != nil {
if test.isValid {
- t.Errorf(
- "%s pubkey failed when shouldn't %v",
- test.name, err,
- )
+ t.Errorf("%s pubkey failed when shouldn't %v",
+ test.name, err)
}
continue
}
if !test.isValid {
- t.Errorf(
- "%s counted as valid when it should fail",
- test.name,
- )
+ t.Errorf("%s counted as valid when it should fail",
+ test.name)
continue
}
var pkStr []byte
@@ -259,10 +239,8 @@ func TestPubKeys(t *testing.T) {
pkStr = test.key
}
if !bytes.Equal(test.key, pkStr) {
- t.Errorf(
- "%s pubkey: serialized keys do not match.",
- test.name,
- )
+ t.Errorf("%s pubkey: serialized keys do not match.",
+ test.name)
spew.Dump(test.key)
spew.Dump(pkStr)
}
@@ -271,38 +249,32 @@ func TestPubKeys(t *testing.T) {
func TestPublicKeyIsEqual(t *testing.T) {
pubKey1, err := ParsePubKey(
- []byte{
- 0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
+ []byte{0x03, 0x26, 0x89, 0xc7, 0xc2, 0xda, 0xb1, 0x33,
0x09, 0xfb, 0x14, 0x3e, 0x0e, 0x8f, 0xe3, 0x96, 0x34,
0x25, 0x21, 0x88, 0x7e, 0x97, 0x66, 0x90, 0xb6, 0xb4,
0x7f, 0x5b, 0x2a, 0x4b, 0x7d, 0x44, 0x8e,
},
)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("failed to parse raw bytes for pubKey1: %v", err)
}
pubKey2, err := ParsePubKey(
- []byte{
- 0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
+ []byte{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
},
)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("failed to parse raw bytes for pubKey2: %v", err)
}
if !pubKey1.IsEqual(pubKey1) {
- t.Fatalf(
- "value of IsEqual is incorrect, %v is "+
- "equal to %v", pubKey1, pubKey1,
- )
+ t.Fatalf("value of IsEqual is incorrect, %v is "+
+ "equal to %v", pubKey1, pubKey1)
}
if pubKey1.IsEqual(pubKey2) {
- t.Fatalf(
- "value of IsEqual is incorrect, %v is not "+
- "equal to %v", pubKey1, pubKey2,
- )
+ t.Fatalf("value of IsEqual is incorrect, %v is not "+
+ "equal to %v", pubKey1, pubKey2)
}
}
@@ -311,11 +283,9 @@ func TestIsCompressed(t *testing.T) {
isCompressed := IsCompressedPubKey(test.key)
wantCompressed := (test.format == pubkeyCompressed)
if isCompressed != wantCompressed {
- t.Fatalf(
- "%s (%x) pubkey: unexpected compressed result, "+
- "got %v, want %v", test.name, test.key,
- isCompressed, wantCompressed,
- )
+ t.Fatalf("%s (%x) pubkey: unexpected compressed result, "+
+ "got %v, want %v", test.name, test.key,
+ isCompressed, wantCompressed)
}
}
}
diff --git a/ec/schnorr/bench_test.go b/ec/schnorr/bench_test.go
index fef6100..bb05584 100644
--- a/ec/schnorr/bench_test.go
+++ b/ec/schnorr/bench_test.go
@@ -9,12 +9,10 @@ import (
"math/big"
"testing"
- "github.com/minio/sha256-simd"
- "orly.dev/chk"
-
"orly.dev/ec"
"orly.dev/ec/secp256k1"
"orly.dev/hex"
+ "orly.dev/sha256"
)
// hexToBytes converts the passed hex string into bytes and will panic if there
@@ -23,7 +21,7 @@ import (
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
return b
@@ -35,7 +33,7 @@ func hexToBytes(s string) []byte {
// must only) be called with hard-coded values.
func hexToModNScalar(s string) *btcec.ModNScalar {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var scalar btcec.ModNScalar
@@ -51,7 +49,7 @@ func hexToModNScalar(s string) *btcec.ModNScalar {
// called with hard-coded values.
func hexToFieldVal(s string) *btcec.FieldVal {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var f btcec.FieldVal
@@ -113,7 +111,7 @@ func BenchmarkSigVerify(b *testing.B) {
// Double sha256 of by{0x01, 0x02, 0x03, 0x04}
msgHash := sha256.Sum256([]byte("benchmark"))
sig, err := Sign(privKey, msgHash[:])
- if chk.E(err) {
+ if err != nil {
b.Fatalf("unable to sign: %v", err)
}
if !sig.Verify(msgHash[:], pubKey) {
diff --git a/ec/schnorr/signature.go b/ec/schnorr/signature.go
index 670f386..2b4abfc 100644
--- a/ec/schnorr/signature.go
+++ b/ec/schnorr/signature.go
@@ -4,8 +4,8 @@ package schnorr
import (
"fmt"
-
"orly.dev/chk"
+
"orly.dev/ec"
"orly.dev/ec/chainhash"
"orly.dev/ec/secp256k1"
@@ -143,7 +143,7 @@ func schnorrVerify(sig *Signature, hash []byte, pubKeyBytes []byte) error {
//
// Fail if P is not a point on the curve
pubKey, err := ParsePubKey(pubKeyBytes)
- if chk.E(err) {
+ if err != nil {
return err
}
if !pubKey.IsOnCurve() {
@@ -482,7 +482,7 @@ func Sign(
}
sig, err := schnorrSign(&privKeyScalar, &kPrime, pub, hash, opts)
kPrime.Zero()
- if chk.E(err) {
+ if err != nil {
return nil, err
}
return sig, nil
@@ -503,7 +503,7 @@ func Sign(
// Steps 10-15.
sig, err := schnorrSign(&privKeyScalar, k, pub, hash, opts)
k.Zero()
- if chk.E(err) {
+ if err != nil {
// Try again with a new nonce.
continue
}
diff --git a/ec/schnorr/signature_test.go b/ec/schnorr/signature_test.go
index 49a7d14..36dafb4 100644
--- a/ec/schnorr/signature_test.go
+++ b/ec/schnorr/signature_test.go
@@ -7,13 +7,13 @@ package schnorr
import (
"errors"
+ "orly.dev/chk"
"strings"
"testing"
"testing/quick"
"github.com/davecgh/go-spew/spew"
- "orly.dev/chk"
"orly.dev/ec"
"orly.dev/ec/secp256k1"
"orly.dev/hex"
@@ -192,7 +192,7 @@ var bip340TestVectors = []bip340Test{
// the only way it can fail is if there is an error in the test source code.
func decodeHex(hexStr string) []byte {
b, err := hex.Dec(hexStr)
- if chk.E(err) {
+ if err != nil {
panic(
"invalid hex string in test source: err " + err.Error() +
", hex: " + hexStr,
@@ -218,7 +218,7 @@ func TestSchnorrSign(t *testing.T) {
signOpts = []SignOption{CustomNonce(auxBytes)}
}
sig, err := Sign(privKey, msg, signOpts...)
- if chk.E(err) {
+ if err != nil {
t.Fatalf("test #%v: sig generation failed: %v", i+1, err)
}
if strings.ToUpper(hex.Enc(sig.Serialize())) != test.signature {
@@ -229,10 +229,10 @@ func TestSchnorrSign(t *testing.T) {
}
pubKeyBytes := decodeHex(test.publicKey)
err = schnorrVerify(sig, msg, pubKeyBytes)
- if chk.E(err) {
+ if err != nil {
t.Fail()
}
- verify := !chk.E(err)
+ verify := err == nil
if test.verifyResult != verify {
t.Fatalf(
"test #%v: verification mismatch: "+
@@ -248,7 +248,7 @@ func TestSchnorrVerify(t *testing.T) {
pubKeyBytes := decodeHex(test.publicKey)
_, err := ParsePubKey(pubKeyBytes)
switch {
- case !test.validPubKey && chk.E(err):
+ case !test.validPubKey && err != nil:
if !errors.Is(err, test.expectErr) {
t.Fatalf(
"test #%v: pubkey validation should "+
@@ -257,22 +257,22 @@ func TestSchnorrVerify(t *testing.T) {
)
}
continue
- case chk.E(err):
+ case err != nil:
t.Fatalf("test #%v: unable to parse pubkey: %v", i, err)
}
msg := decodeHex(test.message)
sig, err := ParseSignature(decodeHex(test.signature))
- if chk.E(err) {
+ if err != nil {
t.Fatalf("unable to parse sig: %v", err)
}
err = schnorrVerify(sig, msg, pubKeyBytes)
- if chk.E(err) && test.verifyResult {
+ if err != nil && test.verifyResult {
t.Fatalf(
"test #%v: verification shouldn't have failed: %v", i+1,
err,
)
}
- verify := !chk.E(err)
+ verify := err == nil
if test.verifyResult != verify {
t.Fatalf(
"test #%v: verificaiton mismatch: expected "+
@@ -301,7 +301,7 @@ func TestSchnorrSignNoMutate(t *testing.T) {
privKey, _ := btcec.SecKeyFromBytes(privBytesCopy[:])
// Generate a signature for secret key with our message.
_, err := Sign(privKey, msg[:])
- if chk.E(err) {
+ if err != nil {
t.Logf("unable to gen sig: %v", err)
return false
}
diff --git a/ec/secp256k1/bench_test.go b/ec/secp256k1/bench_test.go
index 893a100..6862204 100644
--- a/ec/secp256k1/bench_test.go
+++ b/ec/secp256k1/bench_test.go
@@ -158,7 +158,7 @@ func BenchmarkParsePubKeyCompressed(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _, _ = ParsePubKey(pubKeyBytes)
+ ParsePubKey(pubKeyBytes)
}
}
@@ -172,6 +172,6 @@ func BenchmarkParsePubKeyUncompressed(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
- _, _ = ParsePubKey(pubKeyBytes)
+ ParsePubKey(pubKeyBytes)
}
}
diff --git a/ec/secp256k1/curve.go b/ec/secp256k1/curve.go
index 19c9323..ff808d0 100644
--- a/ec/secp256k1/curve.go
+++ b/ec/secp256k1/curve.go
@@ -8,7 +8,6 @@ package secp256k1
import (
"math/bits"
- "orly.dev/chk"
"orly.dev/hex"
)
@@ -34,7 +33,7 @@ import (
// called with hard-coded values.
func hexToFieldVal(s string) *FieldVal {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var f FieldVal
@@ -58,7 +57,7 @@ func hexToModNScalar(s string) *ModNScalar {
s = "0" + s
}
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
var scalar ModNScalar
diff --git a/ec/secp256k1/curve_test.go b/ec/secp256k1/curve_test.go
index 1910068..a09dc8b 100644
--- a/ec/secp256k1/curve_test.go
+++ b/ec/secp256k1/curve_test.go
@@ -10,10 +10,9 @@ import (
"math/big"
"math/bits"
"math/rand"
+ "orly.dev/chk"
"testing"
"time"
-
- "orly.dev/chk"
)
var (
diff --git a/ec/secp256k1/ecdh_test.go b/ec/secp256k1/ecdh_test.go
index 03a0954..eb40f2b 100644
--- a/ec/secp256k1/ecdh_test.go
+++ b/ec/secp256k1/ecdh_test.go
@@ -8,18 +8,16 @@ package secp256k1
import (
"bytes"
"testing"
-
- "orly.dev/chk"
)
func TestGenerateSharedSecret(t *testing.T) {
secKey1, err := GenerateSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Errorf("secret key generation error: %s", err)
return
}
secKey2, err := GenerateSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Errorf("secret key generation error: %s", err)
return
}
@@ -28,9 +26,7 @@ func TestGenerateSharedSecret(t *testing.T) {
secret1 := GenerateSharedSecret(secKey1, pubKey2)
secret2 := GenerateSharedSecret(secKey2, pubKey1)
if !bytes.Equal(secret1, secret2) {
- t.Errorf(
- "ECDH failed, secrets mismatch - first: %x, second: %x",
- secret1, secret2,
- )
+ t.Errorf("ECDH failed, secrets mismatch - first: %x, second: %x",
+ secret1, secret2)
}
}
diff --git a/ec/secp256k1/ellipticadaptor_test.go b/ec/secp256k1/ellipticadaptor_test.go
index 2ab7c5f..b494501 100644
--- a/ec/secp256k1/ellipticadaptor_test.go
+++ b/ec/secp256k1/ellipticadaptor_test.go
@@ -7,10 +7,9 @@ package secp256k1
import (
"math/big"
"math/rand"
+ "orly.dev/chk"
"testing"
"time"
-
- "orly.dev/chk"
)
// randBytes returns a byte slice of the required size created from a random
diff --git a/ec/secp256k1/example_test.go b/ec/secp256k1/example_test.go
index 0471f68..a5e05bb 100644
--- a/ec/secp256k1/example_test.go
+++ b/ec/secp256k1/example_test.go
@@ -11,11 +11,9 @@ import (
"encoding/binary"
"fmt"
- "github.com/minio/sha256-simd"
- "orly.dev/chk"
-
"orly.dev/ec/secp256k1"
"orly.dev/hex"
+ "orly.dev/sha256"
)
// This example demonstrates use of GenerateSharedSecret to encrypt a message
@@ -24,7 +22,7 @@ import (
func Example_encryptDecryptMessage() {
newAEAD := func(key []byte) (cipher.AEAD, error) {
block, err := aes.NewCipher(key)
- if chk.E(err) {
+ if err != nil {
return nil, err
}
return cipher.NewGCM(block)
@@ -34,19 +32,19 @@ func Example_encryptDecryptMessage() {
"04115c42e757b2efb7671c578530ec191a1359381e6a71127a9d37c486fd30da" +
"e57e76dc58f693bd7e7010358ce6b165e483a2921010db67ac11b1b51b651953d2",
) // uncompressed pubkey
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
pubKey, err := secp256k1.ParsePubKey(pubKeyBytes)
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
// Derive an ephemeral public/secret keypair for performing ECDHE with
// the recipient.
ephemeralSecKey, err := secp256k1.GenerateSecretKey()
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
@@ -74,7 +72,7 @@ func Example_encryptDecryptMessage() {
// first (and only) use of a counter.
plaintext := []byte("test message")
aead, err := newAEAD(cipherKey[:])
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
@@ -90,7 +88,7 @@ func Example_encryptDecryptMessage() {
pkBytes, err := hex.Dec(
"a11b0a4e1a132305652ee7a8eb7848f6ad5ea381e3ce20a2c086a2e388230811",
)
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
@@ -101,7 +99,7 @@ func Example_encryptDecryptMessage() {
pubKeyLen := binary.LittleEndian.Uint32(ciphertext[:4])
senderPubKeyBytes := ciphertext[4 : 4+pubKeyLen]
senderPubKey, err := secp256k1.ParsePubKey(senderPubKeyBytes)
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
@@ -115,7 +113,7 @@ func Example_encryptDecryptMessage() {
)
// Open the sealed message.
aead, err = newAEAD(recoveredCipherKey[:])
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
@@ -124,7 +122,7 @@ func Example_encryptDecryptMessage() {
nil, nonce, ciphertext[4+pubKeyLen:],
senderPubKeyBytes,
)
- if chk.E(err) {
+ if err != nil {
fmt.Println(err)
return
}
diff --git a/ec/secp256k1/field_test.go b/ec/secp256k1/field_test.go
index 5507682..55a04d3 100644
--- a/ec/secp256k1/field_test.go
+++ b/ec/secp256k1/field_test.go
@@ -11,11 +11,11 @@ import (
"fmt"
"math/big"
"math/rand"
+ "orly.dev/chk"
"reflect"
"testing"
"time"
- "orly.dev/chk"
"orly.dev/hex"
)
diff --git a/ec/secp256k1/loadprecomputed.go b/ec/secp256k1/loadprecomputed.go
index acbfdb1..fe35a3e 100644
--- a/ec/secp256k1/loadprecomputed.go
+++ b/ec/secp256k1/loadprecomputed.go
@@ -11,8 +11,6 @@ import (
"io"
"strings"
"sync"
-
- "orly.dev/chk"
)
//go:generate go run genprecomps.go
@@ -52,11 +50,11 @@ var s256BytePoints = func() func() *bytePointTable {
// multiplication.
decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(bp))
r, err := zlib.NewReader(decoder)
- if chk.E(err) {
+ if err != nil {
panic(err)
}
serialized, err := io.ReadAll(r)
- if chk.E(err) {
+ if err != nil {
panic(err)
}
// Deserialize the precomputed byte points and set the memory table to
diff --git a/ec/secp256k1/modnscalar_test.go b/ec/secp256k1/modnscalar_test.go
index 67cf190..ff5a9fe 100644
--- a/ec/secp256k1/modnscalar_test.go
+++ b/ec/secp256k1/modnscalar_test.go
@@ -9,11 +9,11 @@ import (
"fmt"
"math/big"
"math/rand"
+ "orly.dev/chk"
"reflect"
"testing"
"time"
- "orly.dev/chk"
"orly.dev/hex"
)
diff --git a/ec/secp256k1/nonce.go b/ec/secp256k1/nonce.go
index b30dd30..e838d11 100644
--- a/ec/secp256k1/nonce.go
+++ b/ec/secp256k1/nonce.go
@@ -9,7 +9,7 @@ import (
"bytes"
"hash"
- "github.com/minio/sha256-simd"
+ "orly.dev/sha256"
)
// References:
diff --git a/ec/secp256k1/nonce_test.go b/ec/secp256k1/nonce_test.go
index 64998a2..c0eccc0 100644
--- a/ec/secp256k1/nonce_test.go
+++ b/ec/secp256k1/nonce_test.go
@@ -9,10 +9,8 @@ import (
"bytes"
"testing"
- "github.com/minio/sha256-simd"
- "orly.dev/chk"
-
"orly.dev/hex"
+ "orly.dev/sha256"
)
// hexToBytes converts the passed hex string into bytes and will panic if there
@@ -21,7 +19,7 @@ import (
// hard-coded values.
func hexToBytes(s string) []byte {
b, err := hex.Dec(s)
- if chk.E(err) {
+ if err != nil {
panic("invalid hex in source file: " + s)
}
return b
diff --git a/ec/secp256k1/precomps/genprecomps.go b/ec/secp256k1/precomps/genprecomps.go
index 733d129..7099704 100644
--- a/ec/secp256k1/precomps/genprecomps.go
+++ b/ec/secp256k1/precomps/genprecomps.go
@@ -11,11 +11,11 @@ package main
import (
"fmt"
"math/big"
+ "orly.dev/chk"
+ "orly.dev/log"
"os"
- "orly.dev/chk"
"orly.dev/ec/secp256k1"
- "orly.dev/log"
)
// curveParams houses the secp256k1 curve parameters for convenient access.
@@ -192,7 +192,7 @@ func endomorphismVectors(lambda *big.Int) (a1, b1, a2, b2 *big.Int) {
}
// deriveEndomorphismParams calculates and returns parameters needed to make use
-// of the secp256k1 endomorphism. TODO: this is unused
+// of the secp256k1 endomorphism.
func deriveEndomorphismParams() [2]endomorphismParams {
// roots returns the solutions of the characteristic polynomial of the
// secp256k1 endomorphism.
@@ -321,12 +321,12 @@ func main() {
}
serialized := serializedBytePoints()
embedded, err := os.Create("secp256k1/rawbytepoints.bin")
- if chk.E(err) {
+ if err != nil {
log.F.Ln(err)
os.Exit(1)
}
n, err := embedded.Write(serialized)
- if chk.E(err) {
+ if err != nil {
panic(err)
}
if n != len(serialized) {
diff --git a/ec/secp256k1/pubkey_test.go b/ec/secp256k1/pubkey_test.go
index df83000..6480d57 100644
--- a/ec/secp256k1/pubkey_test.go
+++ b/ec/secp256k1/pubkey_test.go
@@ -9,8 +9,6 @@ import (
"bytes"
"errors"
"testing"
-
- "orly.dev/chk"
)
// TestParsePubKey ensures that public keys are properly parsed according
@@ -22,217 +20,209 @@ func TestParsePubKey(t *testing.T) {
err error // expected error
wantX string // expected x coordinate
wantY string // expected y coordinate
- }{
- {
- name: "uncompressed ok",
- key: "04" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- err: nil,
- wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- wantY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- }, {
- name: "uncompressed x changed (not on curve)",
- key: "04" +
- "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- err: ErrPubKeyNotOnCurve,
- }, {
- name: "uncompressed y changed (not on curve)",
- key: "04" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4",
- err: ErrPubKeyNotOnCurve,
- }, {
- name: "uncompressed claims compressed",
- key: "03" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- err: ErrPubKeyInvalidFormat,
- }, {
- name: "uncompressed as hybrid ok (ybit = 0)",
- key: "06" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
- err: nil,
- wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- wantY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
- }, {
- name: "uncompressed as hybrid ok (ybit = 1)",
- key: "07" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- err: nil,
- wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- wantY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- }, {
- name: "uncompressed as hybrid wrong oddness",
- key: "06" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- err: ErrPubKeyMismatchedOddness,
- }, {
- name: "compressed ok (ybit = 0)",
- key: "02" +
- "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
- err: nil,
- wantX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
- wantY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032",
- }, {
- name: "compressed ok (ybit = 1)",
- key: "03" +
- "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
- err: nil,
- wantX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
- wantY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f",
- }, {
- name: "compressed claims uncompressed (ybit = 0)",
- key: "04" +
- "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
- err: ErrPubKeyInvalidFormat,
- }, {
- name: "compressed claims uncompressed (ybit = 1)",
- key: "04" +
- "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
- err: ErrPubKeyInvalidFormat,
- }, {
- name: "compressed claims hybrid (ybit = 0)",
- key: "06" +
- "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
- err: ErrPubKeyInvalidFormat,
- }, {
- name: "compressed claims hybrid (ybit = 1)",
- key: "07" +
- "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
- err: ErrPubKeyInvalidFormat,
- }, {
- name: "compressed with invalid x coord (ybit = 0)",
- key: "03" +
- "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c",
- err: ErrPubKeyNotOnCurve,
- }, {
- name: "compressed with invalid x coord (ybit = 1)",
- key: "03" +
- "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d",
- err: ErrPubKeyNotOnCurve,
- }, {
- name: "empty",
- key: "",
- err: ErrPubKeyInvalidLen,
- }, {
- name: "wrong length",
- key: "05",
- err: ErrPubKeyInvalidLen,
- }, {
- name: "uncompressed x == p",
- key: "04" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- err: ErrPubKeyXTooBig,
- }, {
- // The y coordinate produces a valid point for x == 1 (mod p), but it
- // should fail to parse instead of wrapping around.
- name: "uncompressed x > p (p + 1 -- aka 1)",
- key: "04" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30" +
- "bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441",
- err: ErrPubKeyXTooBig,
- }, {
- name: "uncompressed y == p",
- key: "04" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
- err: ErrPubKeyYTooBig,
- }, {
- // The x coordinate produces a valid point for y == 1 (mod p), but it
- // should fail to parse instead of wrapping around.
- name: "uncompressed y > p (p + 1 -- aka 1)",
- key: "04" +
- "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
- err: ErrPubKeyYTooBig,
- }, {
- name: "compressed x == p (ybit = 0)",
- key: "02" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
- err: ErrPubKeyXTooBig,
- }, {
- name: "compressed x == p (ybit = 1)",
- key: "03" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
- err: ErrPubKeyXTooBig,
- }, {
- // This would be valid for x == 2 (mod p), but it should fail to parse
- // instead of wrapping around.
- name: "compressed x > p (p + 2 -- aka 2) (ybit = 0)",
- key: "02" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc31",
- err: ErrPubKeyXTooBig,
- }, {
- // This would be valid for x == 1 (mod p), but it should fail to parse
- // instead of wrapping around.
- name: "compressed x > p (p + 1 -- aka 1) (ybit = 1)",
- key: "03" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
- err: ErrPubKeyXTooBig,
- }, {
- name: "hybrid x == p (ybit = 1)",
- key: "07" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- err: ErrPubKeyXTooBig,
- }, {
- // The y coordinate produces a valid point for x == 1 (mod p), but it
- // should fail to parse instead of wrapping around.
- name: "hybrid x > p (p + 1 -- aka 1) (ybit = 0)",
- key: "06" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30" +
- "bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441",
- err: ErrPubKeyXTooBig,
- }, {
- name: "hybrid y == p (ybit = 0 when mod p)",
- key: "06" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
- err: ErrPubKeyYTooBig,
- }, {
- // The x coordinate produces a valid point for y == 1 (mod p), but it
- // should fail to parse instead of wrapping around.
- name: "hybrid y > p (p + 1 -- aka 1) (ybit = 1 when mod p)",
- key: "07" +
- "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507" +
- "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
- err: ErrPubKeyYTooBig,
- },
- }
+ }{{
+ name: "uncompressed ok",
+ key: "04" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ err: nil,
+ wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ wantY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ }, {
+ name: "uncompressed x changed (not on curve)",
+ key: "04" +
+ "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ err: ErrPubKeyNotOnCurve,
+ }, {
+ name: "uncompressed y changed (not on curve)",
+ key: "04" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4",
+ err: ErrPubKeyNotOnCurve,
+ }, {
+ name: "uncompressed claims compressed",
+ key: "03" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ err: ErrPubKeyInvalidFormat,
+ }, {
+ name: "uncompressed as hybrid ok (ybit = 0)",
+ key: "06" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
+ err: nil,
+ wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ wantY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
+ }, {
+ name: "uncompressed as hybrid ok (ybit = 1)",
+ key: "07" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ err: nil,
+ wantX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ wantY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ }, {
+ name: "uncompressed as hybrid wrong oddness",
+ key: "06" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ err: ErrPubKeyMismatchedOddness,
+ }, {
+ name: "compressed ok (ybit = 0)",
+ key: "02" +
+ "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
+ err: nil,
+ wantX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
+ wantY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032",
+ }, {
+ name: "compressed ok (ybit = 1)",
+ key: "03" +
+ "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
+ err: nil,
+ wantX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
+ wantY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f",
+ }, {
+ name: "compressed claims uncompressed (ybit = 0)",
+ key: "04" +
+ "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
+ err: ErrPubKeyInvalidFormat,
+ }, {
+ name: "compressed claims uncompressed (ybit = 1)",
+ key: "04" +
+ "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
+ err: ErrPubKeyInvalidFormat,
+ }, {
+ name: "compressed claims hybrid (ybit = 0)",
+ key: "06" +
+ "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
+ err: ErrPubKeyInvalidFormat,
+ }, {
+ name: "compressed claims hybrid (ybit = 1)",
+ key: "07" +
+ "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
+ err: ErrPubKeyInvalidFormat,
+ }, {
+ name: "compressed with invalid x coord (ybit = 0)",
+ key: "03" +
+ "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c",
+ err: ErrPubKeyNotOnCurve,
+ }, {
+ name: "compressed with invalid x coord (ybit = 1)",
+ key: "03" +
+ "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d",
+ err: ErrPubKeyNotOnCurve,
+ }, {
+ name: "empty",
+ key: "",
+ err: ErrPubKeyInvalidLen,
+ }, {
+ name: "wrong length",
+ key: "05",
+ err: ErrPubKeyInvalidLen,
+ }, {
+ name: "uncompressed x == p",
+ key: "04" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ err: ErrPubKeyXTooBig,
+ }, {
+ // The y coordinate produces a valid point for x == 1 (mod p), but it
+ // should fail to parse instead of wrapping around.
+ name: "uncompressed x > p (p + 1 -- aka 1)",
+ key: "04" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30" +
+ "bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441",
+ err: ErrPubKeyXTooBig,
+ }, {
+ name: "uncompressed y == p",
+ key: "04" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
+ err: ErrPubKeyYTooBig,
+ }, {
+ // The x coordinate produces a valid point for y == 1 (mod p), but it
+ // should fail to parse instead of wrapping around.
+ name: "uncompressed y > p (p + 1 -- aka 1)",
+ key: "04" +
+ "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
+ err: ErrPubKeyYTooBig,
+ }, {
+ name: "compressed x == p (ybit = 0)",
+ key: "02" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
+ err: ErrPubKeyXTooBig,
+ }, {
+ name: "compressed x == p (ybit = 1)",
+ key: "03" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
+ err: ErrPubKeyXTooBig,
+ }, {
+ // This would be valid for x == 2 (mod p), but it should fail to parse
+ // instead of wrapping around.
+ name: "compressed x > p (p + 2 -- aka 2) (ybit = 0)",
+ key: "02" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc31",
+ err: ErrPubKeyXTooBig,
+ }, {
+ // This would be valid for x == 1 (mod p), but it should fail to parse
+ // instead of wrapping around.
+ name: "compressed x > p (p + 1 -- aka 1) (ybit = 1)",
+ key: "03" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
+ err: ErrPubKeyXTooBig,
+ }, {
+ name: "hybrid x == p (ybit = 1)",
+ key: "07" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ err: ErrPubKeyXTooBig,
+ }, {
+ // The y coordinate produces a valid point for x == 1 (mod p), but it
+ // should fail to parse instead of wrapping around.
+ name: "hybrid x > p (p + 1 -- aka 1) (ybit = 0)",
+ key: "06" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30" +
+ "bde70df51939b94c9c24979fa7dd04ebd9b3572da7802290438af2a681895441",
+ err: ErrPubKeyXTooBig,
+ }, {
+ name: "hybrid y == p (ybit = 0 when mod p)",
+ key: "06" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f",
+ err: ErrPubKeyYTooBig,
+ }, {
+ // The x coordinate produces a valid point for y == 1 (mod p), but it
+ // should fail to parse instead of wrapping around.
+ name: "hybrid y > p (p + 1 -- aka 1) (ybit = 1 when mod p)",
+ key: "07" +
+ "1fe1e5ef3fceb5c135ab7741333ce5a6e80d68167653f6b2b24bcbcfaaaff507" +
+ "fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc30",
+ err: ErrPubKeyYTooBig,
+ }}
for _, test := range tests {
pubKeyBytes := hexToBytes(test.key)
pubKey, err := ParsePubKey(pubKeyBytes)
if !errors.Is(err, test.err) {
- t.Errorf(
- "%s mismatched e -- got %v, want %v", test.name, err,
- test.err,
- )
+ t.Errorf("%s mismatched e -- got %v, want %v", test.name, err,
+ test.err)
continue
}
- if chk.E(err) {
+ if err != nil {
continue
}
// Ensure the x and y coordinates match the expected values upon
// successful parse.
wantX, wantY := hexToFieldVal(test.wantX), hexToFieldVal(test.wantY)
if !pubKey.x.Equals(wantX) {
- t.Errorf(
- "%s: mismatched x coordinate -- got %v, want %v",
- test.name, pubKey.x, wantX,
- )
+ t.Errorf("%s: mismatched x coordinate -- got %v, want %v",
+ test.name, pubKey.x, wantX)
continue
}
if !pubKey.y.Equals(wantY) {
- t.Errorf(
- "%s: mismatched y coordinate -- got %v, want %v",
- test.name, pubKey.y, wantY,
- )
+ t.Errorf("%s: mismatched y coordinate -- got %v, want %v",
+ test.name, pubKey.y, wantY)
continue
}
}
@@ -247,81 +237,79 @@ func TestPubKeySerialize(t *testing.T) {
pubY string // hex encoded y coordinate for pubkey to serialize
compress bool // whether to serialize compressed or uncompressed
expected string // hex encoded expected pubkey serialization
- }{
- {
- name: "uncompressed (ybit = 0)",
- pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- pubY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
- compress: false,
- expected: "04" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
- }, {
- name: "uncompressed (ybit = 1)",
- pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- compress: false,
- expected: "04" +
- "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- }, {
- // It's invalid to parse pubkeys that are not on the curve, however it
- // is possible to manually create them and they should serialize
- // correctly.
- name: "uncompressed not on the curve due to x coord",
- pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- compress: false,
- expected: "04" +
- "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- }, {
- // It's invalid to parse pubkeys that are not on the curve, however it
- // is possible to manually create them and they should serialize
- // correctly.
- name: "uncompressed not on the curve due to y coord",
- pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4",
- compress: false,
- expected: "04" +
- "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
- "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4",
- }, {
- name: "compressed (ybit = 0)",
- pubX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
- pubY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032",
- compress: true,
- expected: "02" +
- "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
- }, {
- name: "compressed (ybit = 1)",
- pubX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
- pubY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f",
- compress: true,
- expected: "03" +
- "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
- }, {
- // It's invalid to parse pubkeys that are not on the curve, however it
- // is possible to manually create them and they should serialize
- // correctly.
- name: "compressed not on curve (ybit = 0)",
- pubX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c",
- pubY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032",
- compress: true,
- expected: "02" +
- "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c",
- }, {
- // It's invalid to parse pubkeys that are not on the curve, however it
- // is possible to manually create them and they should serialize
- // correctly.
- name: "compressed not on curve (ybit = 1)",
- pubX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d",
- pubY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f",
- compress: true,
- expected: "03" +
- "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d",
- },
- }
+ }{{
+ name: "uncompressed (ybit = 0)",
+ pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ pubY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
+ compress: false,
+ expected: "04" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
+ }, {
+ name: "uncompressed (ybit = 1)",
+ pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ compress: false,
+ expected: "04" +
+ "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ }, {
+ // It's invalid to parse pubkeys that are not on the curve, however it
+ // is possible to manually create them and they should serialize
+ // correctly.
+ name: "uncompressed not on the curve due to x coord",
+ pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ compress: false,
+ expected: "04" +
+ "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ }, {
+ // It's invalid to parse pubkeys that are not on the curve, however it
+ // is possible to manually create them and they should serialize
+ // correctly.
+ name: "uncompressed not on the curve due to y coord",
+ pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4",
+ compress: false,
+ expected: "04" +
+ "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c" +
+ "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4",
+ }, {
+ name: "compressed (ybit = 0)",
+ pubX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
+ pubY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032",
+ compress: true,
+ expected: "02" +
+ "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4d",
+ }, {
+ name: "compressed (ybit = 1)",
+ pubX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
+ pubY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f",
+ compress: true,
+ expected: "03" +
+ "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448e",
+ }, {
+ // It's invalid to parse pubkeys that are not on the curve, however it
+ // is possible to manually create them and they should serialize
+ // correctly.
+ name: "compressed not on curve (ybit = 0)",
+ pubX: "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c",
+ pubY: "0890ff84d7999d878a57bee170e19ef4b4803b4bdede64503a6ac352b03c8032",
+ compress: true,
+ expected: "02" +
+ "ce0b14fb842b1ba549fdd675c98075f12e9c510f8ef52bd021a9a1f4809d3b4c",
+ }, {
+ // It's invalid to parse pubkeys that are not on the curve, however it
+ // is possible to manually create them and they should serialize
+ // correctly.
+ name: "compressed not on curve (ybit = 1)",
+ pubX: "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d",
+ pubY: "499dd7852849a38aa23ed9f306f07794063fe7904e0f347bc209fdddaf37691f",
+ compress: true,
+ expected: "03" +
+ "2689c7c2dab13309fb143e0e8fe396342521887e976690b6b47f5b2a4b7d448d",
+ }}
for _, test := range tests {
// Parse the test data.
x, y := hexToFieldVal(test.pubX), hexToFieldVal(test.pubY)
@@ -336,10 +324,8 @@ func TestPubKeySerialize(t *testing.T) {
}
expected := hexToBytes(test.expected)
if !bytes.Equal(serialized, expected) {
- t.Errorf(
- "%s: mismatched serialized public key -- got %x, want %x",
- test.name, serialized, expected,
- )
+ t.Errorf("%s: mismatched serialized public key -- got %x, want %x",
+ test.name, serialized, expected)
continue
}
}
@@ -362,23 +348,17 @@ func TestPublicKeyIsEqual(t *testing.T) {
}
if !pubKey1.IsEqual(pubKey1) {
- t.Fatalf(
- "bad self public key equality check: (%v, %v)", pubKey1.x,
- pubKey1.y,
- )
+ t.Fatalf("bad self public key equality check: (%v, %v)", pubKey1.x,
+ pubKey1.y)
}
if !pubKey1.IsEqual(pubKey1Copy) {
- t.Fatalf(
- "bad public key equality check: (%v, %v) == (%v, %v)",
- pubKey1.x, pubKey1.y, pubKey1Copy.x, pubKey1Copy.y,
- )
+ t.Fatalf("bad public key equality check: (%v, %v) == (%v, %v)",
+ pubKey1.x, pubKey1.y, pubKey1Copy.x, pubKey1Copy.y)
}
if pubKey1.IsEqual(pubKey2) {
- t.Fatalf(
- "bad public key equality check: (%v, %v) != (%v, %v)",
- pubKey1.x, pubKey1.y, pubKey2.x, pubKey2.y,
- )
+ t.Fatalf("bad public key equality check: (%v, %v) != (%v, %v)",
+ pubKey1.x, pubKey1.y, pubKey2.x, pubKey2.y)
}
}
@@ -390,31 +370,29 @@ func TestPublicKeyAsJacobian(t *testing.T) {
pubKey string // hex encoded serialized compressed pubkey
wantX string // hex encoded expected X coordinate
wantY string // hex encoded expected Y coordinate
- }{
- {
- name: "public key for secret key 0x01",
- pubKey: "0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
- wantX: "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
- wantY: "483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8",
- }, {
- name: "public for secret key 0x03",
- pubKey: "02f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9",
- wantX: "f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9",
- wantY: "388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672",
- }, {
- name: "public for secret key 0x06",
- pubKey: "03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556",
- wantX: "fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556",
- wantY: "ae12777aacfbb620f3be96017f45c560de80f0f6518fe4a03c870c36b075f297",
- },
- }
+ }{{
+ name: "public key for secret key 0x01",
+ pubKey: "0279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
+ wantX: "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
+ wantY: "483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8",
+ }, {
+ name: "public for secret key 0x03",
+ pubKey: "02f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9",
+ wantX: "f9308a019258c31049344f85f89d5229b531c845836f99b08601f113bce036f9",
+ wantY: "388f7b0f632de8140fe337e62a37f3566500a99934c2231b6cb9fd7584b8e672",
+ }, {
+ name: "public for secret key 0x06",
+ pubKey: "03fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556",
+ wantX: "fff97bd5755eeea420453a14355235d382f6472f8568a18b2f057a1460297556",
+ wantY: "ae12777aacfbb620f3be96017f45c560de80f0f6518fe4a03c870c36b075f297",
+ }}
for _, test := range tests {
// Parse the test data.
pubKeyBytes := hexToBytes(test.pubKey)
wantX := hexToFieldVal(test.wantX)
wantY := hexToFieldVal(test.wantY)
pubKey, err := ParsePubKey(pubKeyBytes)
- if chk.E(err) {
+ if err != nil {
t.Errorf("%s: failed to parse public key: %v", test.name, err)
continue
}
@@ -423,24 +401,18 @@ func TestPublicKeyAsJacobian(t *testing.T) {
var point JacobianPoint
pubKey.AsJacobian(&point)
if !point.Z.IsOne() {
- t.Errorf(
- "%s: invalid Z coordinate -- got %v, want 1", test.name,
- point.Z,
- )
+ t.Errorf("%s: invalid Z coordinate -- got %v, want 1", test.name,
+ point.Z)
continue
}
if !point.X.Equals(wantX) {
- t.Errorf(
- "%s: invalid X coordinate - got %v, want %v", test.name,
- point.X, wantX,
- )
+ t.Errorf("%s: invalid X coordinate - got %v, want %v", test.name,
+ point.X, wantX)
continue
}
if !point.Y.Equals(wantY) {
- t.Errorf(
- "%s: invalid Y coordinate - got %v, want %v", test.name,
- point.Y, wantY,
- )
+ t.Errorf("%s: invalid Y coordinate - got %v, want %v", test.name,
+ point.Y, wantY)
continue
}
}
@@ -454,29 +426,27 @@ func TestPublicKeyIsOnCurve(t *testing.T) {
pubX string // hex encoded x coordinate for pubkey to serialize
pubY string // hex encoded y coordinate for pubkey to serialize
want bool // expected result
- }{
- {
- name: "valid with even y",
- pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- pubY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
- want: true,
- }, {
- name: "valid with odd y",
- pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- want: true,
- }, {
- name: "invalid due to x coord",
- pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
- want: false,
- }, {
- name: "invalid due to y coord",
- pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
- pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4",
- want: false,
- },
- }
+ }{{
+ name: "valid with even y",
+ pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ pubY: "4d1f1522047b33068bbb9b07d1e9f40564749b062b3fc0666479bc08a94be98c",
+ want: true,
+ }, {
+ name: "valid with odd y",
+ pubX: "11db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ want: true,
+ }, {
+ name: "invalid due to x coord",
+ pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a3",
+ want: false,
+ }, {
+ name: "invalid due to y coord",
+ pubX: "15db93e1dcdb8a016b49840f8c53bc1eb68a382e97b1482ecad7b148a6909a5c",
+ pubY: "b2e0eaddfb84ccf9744464f82e160bfa9b8b64f9d4c03f999b8643f656b412a4",
+ want: false,
+ }}
for _, test := range tests {
// Parse the test data.
x, y := hexToFieldVal(test.pubX), hexToFieldVal(test.pubY)
@@ -484,10 +454,8 @@ func TestPublicKeyIsOnCurve(t *testing.T) {
result := pubKey.IsOnCurve()
if result != test.want {
- t.Errorf(
- "%s: mismatched is on curve result -- got %v, want %v",
- test.name, result, test.want,
- )
+ t.Errorf("%s: mismatched is on curve result -- got %v, want %v",
+ test.name, result, test.want)
continue
}
}
diff --git a/ec/secp256k1/seckey.go b/ec/secp256k1/seckey.go
index f340ea4..5c628a8 100644
--- a/ec/secp256k1/seckey.go
+++ b/ec/secp256k1/seckey.go
@@ -8,7 +8,6 @@ package secp256k1
import (
"crypto/rand"
"io"
-
"orly.dev/chk"
)
diff --git a/ec/secp256k1/seckey_bench_test.go b/ec/secp256k1/seckey_bench_test.go
index 2895576..bbede9a 100644
--- a/ec/secp256k1/seckey_bench_test.go
+++ b/ec/secp256k1/seckey_bench_test.go
@@ -6,8 +6,6 @@ package secp256k1
import (
"testing"
-
- "orly.dev/chk"
)
// BenchmarkSecretKeyGenerate benchmarks generating new cryptographically
@@ -17,7 +15,7 @@ func BenchmarkSecretKeyGenerate(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := GenerateSecretKey()
- if chk.E(err) {
+ if err != nil {
b.Fatal(err)
}
}
diff --git a/ec/secp256k1/seckey_test.go b/ec/secp256k1/seckey_test.go
index 4899116..08cc3ad 100644
--- a/ec/secp256k1/seckey_test.go
+++ b/ec/secp256k1/seckey_test.go
@@ -11,14 +11,12 @@ import (
"errors"
"math/big"
"testing"
-
- "orly.dev/chk"
)
// TestGenerateSecretKey ensures the key generation works as expected.
func TestGenerateSecretKey(t *testing.T) {
sec, err := GenerateSecretKey()
- if chk.E(err) {
+ if err != nil {
t.Errorf("failed to generate secret key: %s", err)
return
}
@@ -32,7 +30,7 @@ func TestGenerateSecretKey(t *testing.T) {
// entropy source works as expected.
func TestGenerateSecretKeyFromRand(t *testing.T) {
sec, err := GenerateSecretKeyFromRand(rand.Reader)
- if chk.E(err) {
+ if err != nil {
t.Errorf("failed to generate secret key: %s", err)
return
}
@@ -63,35 +61,31 @@ func TestGenerateSecretKeyCorners(t *testing.T) {
// 4th invocation: 1 (32-byte big endian)
oneModN := hexToModNScalar("01")
var numReads int
- mockReader := mockSecretKeyReaderFunc(
- func(p []byte) (int, error) {
- numReads++
- switch numReads {
- case 1:
- return copy(p, bytes.Repeat([]byte{0x00}, len(p))), nil
- case 2:
- return copy(p, curveParams.N.Bytes()), nil
- case 3:
- nPlusOne := new(big.Int).Add(curveParams.N, big.NewInt(1))
- return copy(p, nPlusOne.Bytes()), nil
- }
- oneModNBytes := oneModN.Bytes()
- return copy(p, oneModNBytes[:]), nil
- },
- )
+ mockReader := mockSecretKeyReaderFunc(func(p []byte) (int, error) {
+ numReads++
+ switch numReads {
+ case 1:
+ return copy(p, bytes.Repeat([]byte{0x00}, len(p))), nil
+ case 2:
+ return copy(p, curveParams.N.Bytes()), nil
+ case 3:
+ nPlusOne := new(big.Int).Add(curveParams.N, big.NewInt(1))
+ return copy(p, nPlusOne.Bytes()), nil
+ }
+ oneModNBytes := oneModN.Bytes()
+ return copy(p, oneModNBytes[:]), nil
+ })
// Generate a secret key using the mock reader and ensure the resulting key
// is the expected one. It should be the value "1" since the other values
// the sequence produces are invalid and thus should be rejected.
sec, err := GenerateSecretKeyFromRand(mockReader)
- if chk.E(err) {
+ if err != nil {
t.Errorf("failed to generate secret key: %s", err)
return
}
if !sec.Key.Equals(oneModN) {
- t.Fatalf(
- "unexpected secret key -- got: %x, want %x", sec.Serialize(),
- oneModN.Bytes(),
- )
+ t.Fatalf("unexpected secret key -- got: %x, want %x", sec.Serialize(),
+ oneModN.Bytes())
}
}
@@ -100,11 +94,9 @@ func TestGenerateSecretKeyCorners(t *testing.T) {
func TestGenerateSecretKeyError(t *testing.T) {
// Create a mock reader that returns an error.
errDisabled := errors.New("disabled")
- mockReader := mockSecretKeyReaderFunc(
- func(p []byte) (int, error) {
- return 0, errDisabled
- },
- )
+ mockReader := mockSecretKeyReaderFunc(func(p []byte) (int, error) {
+ return 0, errDisabled
+ })
// Generate a secret key using the mock reader and ensure the expected
// error is returned.
_, err := GenerateSecretKeyFromRand(mockReader)
@@ -121,17 +113,15 @@ func TestSecKeys(t *testing.T) {
name string
sec string // hex encoded secret key to test
pub string // expected hex encoded serialized compressed public key
- }{
- {
- name: "random secret key 1",
- sec: "eaf02ca348c524e6392655ba4d29603cd1a7347d9d65cfe93ce1ebffdca22694",
- pub: "025ceeba2ab4a635df2c0301a3d773da06ac5a18a7c3e0d09a795d7e57d233edf1",
- }, {
- name: "random secret key 2",
- sec: "24b860d0651db83feba821e7a94ba8b87162665509cefef0cbde6a8fbbedfe7c",
- pub: "032a6e51bf218085647d330eac2fafaeee07617a777ad9e8e7141b4cdae92cb637",
- },
- }
+ }{{
+ name: "random secret key 1",
+ sec: "eaf02ca348c524e6392655ba4d29603cd1a7347d9d65cfe93ce1ebffdca22694",
+ pub: "025ceeba2ab4a635df2c0301a3d773da06ac5a18a7c3e0d09a795d7e57d233edf1",
+ }, {
+ name: "random secret key 2",
+ sec: "24b860d0651db83feba821e7a94ba8b87162665509cefef0cbde6a8fbbedfe7c",
+ pub: "032a6e51bf218085647d330eac2fafaeee07617a777ad9e8e7141b4cdae92cb637",
+ }}
for _, test := range tests {
// Parse test data.
@@ -143,18 +133,14 @@ func TestSecKeys(t *testing.T) {
serializedPubKey := pub.SerializeCompressed()
if !bytes.Equal(serializedPubKey, wantPubKeyBytes) {
- t.Errorf(
- "%s unexpected serialized public key - got: %x, want: %x",
- test.name, serializedPubKey, wantPubKeyBytes,
- )
+ t.Errorf("%s unexpected serialized public key - got: %x, want: %x",
+ test.name, serializedPubKey, wantPubKeyBytes)
}
serializedSecKey := sec.Serialize()
if !bytes.Equal(serializedSecKey, secKeyBytes) {
- t.Errorf(
- "%s unexpected serialized secret key - got: %x, want: %x",
- test.name, serializedSecKey, secKeyBytes,
- )
+ t.Errorf("%s unexpected serialized secret key - got: %x, want: %x",
+ test.name, serializedSecKey, secKeyBytes)
}
}
}
diff --git a/ec/secp256k1/util_test.go b/ec/secp256k1/util_test.go
new file mode 100644
index 0000000..8ab9b0b
--- /dev/null
+++ b/ec/secp256k1/util_test.go
@@ -0,0 +1,9 @@
+package secp256k1_test
+
+import (
+ "orly.dev/lol"
+)
+
+var (
+ log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
+)
diff --git a/ec/taproot/taproot.go b/ec/taproot/taproot.go
index 2909902..032a837 100644
--- a/ec/taproot/taproot.go
+++ b/ec/taproot/taproot.go
@@ -6,8 +6,8 @@ import (
"bytes"
"errors"
"fmt"
-
"orly.dev/chk"
+
"orly.dev/ec/bech32"
"orly.dev/ec/chaincfg"
)
diff --git a/ec/util_test.go b/ec/util_test.go
new file mode 100644
index 0000000..bcbd11a
--- /dev/null
+++ b/ec/util_test.go
@@ -0,0 +1,9 @@
+package btcec_test
+
+import (
+ "orly.dev/lol"
+)
+
+var (
+ log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
+)
diff --git a/encryption/README.md b/encryption/README.md
new file mode 100644
index 0000000..2ca8f06
--- /dev/null
+++ b/encryption/README.md
@@ -0,0 +1 @@
+Code copied from https://github.com/paulmillr/nip44/tree/e7aed61aaf77240ac10c325683eed14b22e7950f/go.
diff --git a/encryption/doc.go b/encryption/doc.go
new file mode 100644
index 0000000..07bdbbb
--- /dev/null
+++ b/encryption/doc.go
@@ -0,0 +1,3 @@
+// Package encryption contains the message encryption schemes defined in NIP-04
+// and NIP-44, used for encrypting the content of nostr messages.
+package encryption
diff --git a/encryption/nip4.go b/encryption/nip4.go
new file mode 100644
index 0000000..1ab585d
--- /dev/null
+++ b/encryption/nip4.go
@@ -0,0 +1,116 @@
+package encryption
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/cipher"
+ "encoding/base64"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+ "strings"
+
+ "lukechampine.com/frand"
+
+ "orly.dev/hex"
+ "orly.dev/p256k"
+)
+
+// ComputeSharedSecret returns a shared secret key used to encrypt messages. The private and public keys should be hex
+// encoded. Uses the Diffie-Hellman key exchange (ECDH) (RFC 4753).
+func ComputeSharedSecret(pkh, skh string) (sharedSecret []byte, err error) {
+ var skb, pkb []byte
+ if skb, err = hex.Dec(skh); chk.E(err) {
+ return
+ }
+ if pkb, err = hex.Dec(pkh); chk.E(err) {
+ return
+ }
+ signer := new(p256k.Signer)
+ if err = signer.InitSec(skb); chk.E(err) {
+ return
+ }
+ if sharedSecret, err = signer.ECDH(pkb); chk.E(err) {
+ return
+ }
+ return
+}
+
+// EncryptNip4 encrypts message with key using aes-256-cbc. key should be the shared secret generated by
+// ComputeSharedSecret.
+//
+// Returns: base64(encrypted_bytes) + "?iv=" + base64(initialization_vector).
+//
+// Deprecated: upgrade to using Decrypt with the NIP-44 algorithm.
+func EncryptNip4(msg string, key []byte) (ct []byte, err error) {
+ // block size is 16 bytes
+ iv := make([]byte, 16)
+ if _, err = frand.Read(iv); chk.E(err) {
+ err = errorf.E("error creating initialization vector: %w", err)
+ return
+ }
+ // automatically picks aes-256 based on key length (32 bytes)
+ var block cipher.Block
+ if block, err = aes.NewCipher(key); chk.E(err) {
+ err = errorf.E("error creating block cipher: %w", err)
+ return
+ }
+ mode := cipher.NewCBCEncrypter(block, iv)
+ plaintext := []byte(msg)
+ // add padding
+ base := len(plaintext)
+ // this will be a number between 1 and 16 (inclusive), never 0
+ bs := block.BlockSize()
+ padding := bs - base%bs
+ // encode the padding in all the padding bytes themselves
+ padText := bytes.Repeat([]byte{byte(padding)}, padding)
+ paddedMsgBytes := append(plaintext, padText...)
+ ciphertext := make([]byte, len(paddedMsgBytes))
+ mode.CryptBlocks(ciphertext, paddedMsgBytes)
+ return []byte(base64.StdEncoding.EncodeToString(ciphertext) + "?iv=" +
+ base64.StdEncoding.EncodeToString(iv)), nil
+}
+
+// DecryptNip4 decrypts a content string using the shared secret key. The inverse operation to message ->
+// EncryptNip4(message, key).
+//
+// Deprecated: upgrade to using Decrypt with the NIP-44 algorithm.
+func DecryptNip4(content string, key []byte) (msg []byte, err error) {
+ parts := strings.Split(content, "?iv=")
+ if len(parts) < 2 {
+ return nil, errorf.E(
+ "error parsing encrypted message: no initialization vector",
+ )
+ }
+ var ciphertext []byte
+ if ciphertext, err = base64.StdEncoding.DecodeString(parts[0]); chk.E(err) {
+ err = errorf.E("error decoding ciphertext from base64: %w", err)
+ return
+ }
+ var iv []byte
+ if iv, err = base64.StdEncoding.DecodeString(parts[1]); chk.E(err) {
+ err = errorf.E("error decoding iv from base64: %w", err)
+ return
+ }
+ var block cipher.Block
+ if block, err = aes.NewCipher(key); chk.E(err) {
+ err = errorf.E("error creating block cipher: %w", err)
+ return
+ }
+ mode := cipher.NewCBCDecrypter(block, iv)
+ msg = make([]byte, len(ciphertext))
+ mode.CryptBlocks(msg, ciphertext)
+ // remove padding
+ var (
+ plaintextLen = len(msg)
+ )
+ if plaintextLen > 0 {
+ // the padding amount is encoded in the padding bytes themselves
+ padding := int(msg[plaintextLen-1])
+ if padding > plaintextLen {
+ err = errorf.E("invalid padding amount: %d", padding)
+ return
+ }
+ msg = msg[0 : plaintextLen-padding]
+ }
+ return msg, nil
+}
diff --git a/encryption/nip44.go b/encryption/nip44.go
new file mode 100644
index 0000000..0387a6d
--- /dev/null
+++ b/encryption/nip44.go
@@ -0,0 +1,241 @@
+package encryption
+
+import (
+ "bytes"
+ "crypto/hmac"
+ "crypto/rand"
+ "encoding/base64"
+ "encoding/binary"
+ "io"
+ "math"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+
+ "golang.org/x/crypto/chacha20"
+ "golang.org/x/crypto/hkdf"
+
+ "orly.dev/sha256"
+)
+
+const (
+ version byte = 2
+ MinPlaintextSize = 0x0001 // 1b msg => padded to 32b
+ MaxPlaintextSize = 0xffff // 65535 (64kb-1) => padded to 64kb
+)
+
+type Opts struct {
+ err error
+ nonce []byte
+}
+
+// Deprecated: use WithCustomNonce instead of WithCustomSalt, so the naming is less confusing
+var WithCustomSalt = WithCustomNonce
+
+// WithCustomNonce enables using a custom nonce (salt) instead of using the
+// system crypto/rand entropy source.
+func WithCustomNonce(salt []byte) func(opts *Opts) {
+ return func(opts *Opts) {
+ if len(salt) != 32 {
+ opts.err = errorf.E("salt must be 32 bytes, got %d", len(salt))
+ }
+ opts.nonce = salt
+ }
+}
+
+// Encrypt data using a provided symmetric conversation key using NIP-44
+// encryption (chacha20 cipher stream and sha256 HMAC).
+func Encrypt(
+ plaintext string, conversationKey []byte,
+ applyOptions ...func(opts *Opts),
+) (
+ cipherString string,
+ err error,
+) {
+
+ var o Opts
+ for _, apply := range applyOptions {
+ apply(&o)
+ }
+ if chk.E(o.err) {
+ err = o.err
+ return
+ }
+ if o.nonce == nil {
+ o.nonce = make([]byte, 32)
+ if _, err = rand.Read(o.nonce); chk.E(err) {
+ return
+ }
+ }
+ var enc, cc20nonce, auth []byte
+ if enc, cc20nonce, auth, err = getKeys(
+ conversationKey, o.nonce,
+ ); chk.E(err) {
+ return
+ }
+ plain := []byte(plaintext)
+ size := len(plain)
+ if size < MinPlaintextSize || size > MaxPlaintextSize {
+ err = errorf.E("plaintext should be between 1b and 64kB")
+ return
+ }
+ padding := CalcPadding(size)
+ padded := make([]byte, 2+padding)
+ binary.BigEndian.PutUint16(padded, uint16(size))
+ copy(padded[2:], plain)
+ var cipher []byte
+ if cipher, err = encrypt(enc, cc20nonce, padded); chk.E(err) {
+ return
+ }
+ var mac []byte
+ if mac, err = sha256Hmac(auth, cipher, o.nonce); chk.E(err) {
+ return
+ }
+ ct := make([]byte, 0, 1+32+len(cipher)+32)
+ ct = append(ct, version)
+ ct = append(ct, o.nonce...)
+ ct = append(ct, cipher...)
+ ct = append(ct, mac...)
+ cipherString = base64.StdEncoding.EncodeToString(ct)
+ return
+}
+
+// Decrypt data that has been encoded using a provided symmetric conversation
+// key using NIP-44 encryption (chacha20 cipher stream and sha256 HMAC).
+func Decrypt(b64ciphertextWrapped string, conversationKey []byte) (
+ plaintext string,
+ err error,
+) {
+ cLen := len(b64ciphertextWrapped)
+ if cLen < 132 || cLen > 87472 {
+ err = errorf.E("invalid payload length: %d", cLen)
+ return
+ }
+ if b64ciphertextWrapped[:1] == "#" {
+ err = errorf.E("unknown version")
+ return
+ }
+ var decoded []byte
+ if decoded, err = base64.StdEncoding.DecodeString(b64ciphertextWrapped); chk.E(err) {
+ return
+ }
+ if decoded[0] != version {
+ err = errorf.E("unknown version %d", decoded[0])
+ return
+ }
+ dLen := len(decoded)
+ if dLen < 99 || dLen > 65603 {
+ err = errorf.E("invalid data length: %d", dLen)
+ return
+ }
+ nonce, ciphertext, givenMac := decoded[1:33], decoded[33:dLen-32], decoded[dLen-32:]
+ var enc, cc20nonce, auth []byte
+ if enc, cc20nonce, auth, err = getKeys(conversationKey, nonce); chk.E(err) {
+ return
+ }
+ var expectedMac []byte
+ if expectedMac, err = sha256Hmac(auth, ciphertext, nonce); chk.E(err) {
+ return
+ }
+ if !bytes.Equal(givenMac, expectedMac) {
+ err = errorf.E("invalid hmac")
+ return
+ }
+ var padded []byte
+ if padded, err = encrypt(enc, cc20nonce, ciphertext); chk.E(err) {
+ return
+ }
+ unpaddedLen := binary.BigEndian.Uint16(padded[0:2])
+ if unpaddedLen < uint16(MinPlaintextSize) || unpaddedLen > uint16(MaxPlaintextSize) ||
+ len(padded) != 2+CalcPadding(int(unpaddedLen)) {
+ err = errorf.E("invalid padding")
+ return
+ }
+ unpadded := padded[2:][:unpaddedLen]
+ if len(unpadded) == 0 || len(unpadded) != int(unpaddedLen) {
+ err = errorf.E("invalid padding")
+ return
+ }
+ plaintext = string(unpadded)
+ return
+}
+
+// GenerateConversationKey performs an ECDH key generation hashed with the nip-44-v2 using hkdf.
+func GenerateConversationKey(pkh, skh string) (ck []byte, err error) {
+ if skh >= "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141" ||
+ skh == "0000000000000000000000000000000000000000000000000000000000000000" {
+ err = errorf.E(
+ "invalid private key: x coordinate %s is not on the secp256k1 curve",
+ skh,
+ )
+ return
+ }
+ var shared []byte
+ if shared, err = ComputeSharedSecret(pkh, skh); chk.E(err) {
+ return
+ }
+ ck = hkdf.Extract(sha256.New, shared, []byte("nip44-v2"))
+ return
+}
+
+func encrypt(key, nonce, message []byte) (dst []byte, err error) {
+ var cipher *chacha20.Cipher
+ if cipher, err = chacha20.NewUnauthenticatedCipher(key, nonce); chk.E(err) {
+ return
+ }
+ dst = make([]byte, len(message))
+ cipher.XORKeyStream(dst, message)
+ return
+}
+
+func sha256Hmac(key, ciphertext, nonce []byte) (h []byte, err error) {
+ if len(nonce) != sha256.Size {
+ err = errorf.E("nonce aad must be 32 bytes")
+ return
+ }
+ hm := hmac.New(sha256.New, key)
+ hm.Write(nonce)
+ hm.Write(ciphertext)
+ h = hm.Sum(nil)
+ return
+}
+
+func getKeys(conversationKey, nonce []byte) (
+ enc, cc20nonce, auth []byte, err error,
+) {
+ if len(conversationKey) != 32 {
+ err = errorf.E("conversation key must be 32 bytes")
+ return
+ }
+ if len(nonce) != 32 {
+ err = errorf.E("nonce must be 32 bytes")
+ return
+ }
+ r := hkdf.Expand(sha256.New, conversationKey, nonce)
+ enc = make([]byte, 32)
+ if _, err = io.ReadFull(r, enc); chk.E(err) {
+ return
+ }
+ cc20nonce = make([]byte, 12)
+ if _, err = io.ReadFull(r, cc20nonce); chk.E(err) {
+ return
+ }
+ auth = make([]byte, 32)
+ if _, err = io.ReadFull(r, auth); chk.E(err) {
+ return
+ }
+ return
+}
+
+// CalcPadding creates padding for the message payload that is precisely a power
+// of two in order to reduce the chances of plaintext attack. This is plainly
+// retarded because it could blow out the message size a lot when just a random few
+// dozen bytes and a length prefix would achieve the same result.
+func CalcPadding(sLen int) (l int) {
+ if sLen <= 32 {
+ return 32
+ }
+ nextPower := 1 << int(math.Floor(math.Log2(float64(sLen-1)))+1)
+ chunk := int(math.Max(32, float64(nextPower/8)))
+ l = chunk * int(math.Floor(float64((sLen-1)/chunk))+1)
+ return
+}
diff --git a/encryption/nip44_test.go b/encryption/nip44_test.go
new file mode 100644
index 0000000..4b16ea5
--- /dev/null
+++ b/encryption/nip44_test.go
@@ -0,0 +1,1368 @@
+package encryption
+
+import (
+ "crypto/rand"
+ "fmt"
+ "hash"
+ "orly.dev/chk"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "orly.dev/hex"
+ "orly.dev/keys"
+ "orly.dev/sha256"
+)
+
+func assertCryptPriv(
+ t *testing.T,
+ sk1, sk2, conversationKey, salt, plaintext, expected string,
+) {
+ var (
+ k1, s []byte
+ actual, decrypted string
+ ok bool
+ err error
+ )
+ k1, err = hex.Dec(conversationKey)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for conversation key: %v", err,
+ ); !ok {
+ return
+ }
+ if ok = assertConversationKeyGenerationSec(
+ t, sk1, sk2, conversationKey,
+ ); !ok {
+ return
+ }
+ s, err = hex.Dec(salt)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for salt: %v", err,
+ ); !ok {
+ return
+ }
+ actual, err = Encrypt(plaintext, k1, WithCustomNonce(s))
+ if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok {
+ return
+ }
+ if ok = assert.Equalf(t, expected, actual, "wrong encryption"); !ok {
+ return
+ }
+ decrypted, err = Decrypt(expected, k1)
+ if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok {
+ return
+ }
+ assert.Equal(t, decrypted, plaintext, "wrong decryption")
+}
+
+func assertDecryptFail(
+ t *testing.T, conversationKey, plaintext, ciphertext, msg string,
+) {
+ var (
+ k1 []byte
+ ok bool
+ err error
+ )
+ k1, err = hex.Dec(conversationKey)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for conversation key: %v", err,
+ ); !ok {
+ return
+ }
+ _, err = Decrypt(ciphertext, k1)
+ assert.ErrorContains(t, err, msg)
+}
+
+func assertConversationKeyFail(
+ t *testing.T, priv string, pub string, msg string,
+) {
+ _, err := GenerateConversationKey(pub, priv)
+ assert.ErrorContains(t, err, msg)
+}
+
+func assertConversationKeyGeneration(
+ t *testing.T, priv, pub, conversationKey string,
+) bool {
+ var (
+ actualConversationKey,
+ expectedConversationKey []byte
+ ok bool
+ err error
+ )
+ expectedConversationKey, err = hex.Dec(conversationKey)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for conversation key: %v", err,
+ ); !ok {
+ return false
+ }
+ actualConversationKey, err = GenerateConversationKey(pub, priv)
+ if ok = assert.NoErrorf(
+ t, err, "conversation key generation failed: %v", err,
+ ); !ok {
+ return false
+ }
+ if ok = assert.Equalf(
+ t, expectedConversationKey, actualConversationKey,
+ "wrong conversation key",
+ ); !ok {
+ return false
+ }
+ return true
+}
+
+func assertConversationKeyGenerationSec(
+ t *testing.T, sk1, sk2, conversationKey string,
+) bool {
+ pub2, err := keys.GetPublicKeyHex(sk2)
+ if ok := assert.NoErrorf(
+ t, err, "failed to derive pubkey from sk2: %v", err,
+ ); !ok {
+ return false
+ }
+ return assertConversationKeyGeneration(t, sk1, pub2, conversationKey)
+}
+
+func assertConversationKeyGenerationPub(
+ t *testing.T, sk, pub, conversationKey string,
+) bool {
+ return assertConversationKeyGeneration(t, sk, pub, conversationKey)
+}
+
+func assertMessageKeyGeneration(
+ t *testing.T,
+ conversationKey, salt, chachaKey, chachaSalt, hmacKey string,
+) bool {
+ var (
+ convKey, convSalt, actualChaChaKey, expectedChaChaKey, actualChaChaNonce,
+ expectedChaChaNonce, actualHmacKey, expectedHmacKey []byte
+ ok bool
+ err error
+ )
+ convKey, err = hex.Dec(conversationKey)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for convKey: %v", err,
+ ); !ok {
+ return false
+ }
+ convSalt, err = hex.Dec(salt)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for salt: %v", err,
+ ); !ok {
+ return false
+ }
+ expectedChaChaKey, err = hex.Dec(chachaKey)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for encrypt key: %v", err,
+ ); !ok {
+ return false
+ }
+ expectedChaChaNonce, err = hex.Dec(chachaSalt)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for encrypt nonce: %v", err,
+ ); !ok {
+ return false
+ }
+ expectedHmacKey, err = hex.Dec(hmacKey)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for hmac key: %v", err,
+ ); !ok {
+ return false
+ }
+ actualChaChaKey, actualChaChaNonce, actualHmacKey, err = getKeys(
+ convKey, convSalt,
+ )
+ if ok = assert.NoErrorf(
+ t, err, "message key generation failed: %v", err,
+ ); !ok {
+ return false
+ }
+ if ok = assert.Equalf(
+ t, expectedChaChaKey, actualChaChaKey, "wrong encrypt key",
+ ); !ok {
+ return false
+ }
+ if ok = assert.Equalf(
+ t, expectedChaChaNonce, actualChaChaNonce,
+ "wrong encrypt nonce",
+ ); !ok {
+ return false
+ }
+ if ok = assert.Equalf(
+ t, expectedHmacKey, actualHmacKey, "wrong hmac key",
+ ); !ok {
+ return false
+ }
+ return true
+}
+
+func assertCryptLong(
+ t *testing.T, conversationKey, salt, pattern string, repeat int,
+ plaintextSha256, payloadSha256 string,
+) {
+ var (
+ convKey, convSalt []byte
+ plaintext, actualPlaintextSha256, actualPayload, actualPayloadSha256 string
+ h hash.Hash
+ ok bool
+ err error
+ )
+ convKey, err = hex.Dec(conversationKey)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for convKey: %v", err,
+ ); !ok {
+ return
+ }
+ convSalt, err = hex.Dec(salt)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for salt: %v", err,
+ ); !ok {
+ return
+ }
+ plaintext = ""
+ for i := 0; i < repeat; i++ {
+ plaintext += pattern
+ }
+ h = sha256.New()
+ h.Write([]byte(plaintext))
+ actualPlaintextSha256 = hex.Enc(h.Sum(nil))
+ if ok = assert.Equalf(
+ t, plaintextSha256, actualPlaintextSha256,
+ "invalid plaintext sha256 hash: %v", err,
+ ); !ok {
+ return
+ }
+ actualPayload, err = Encrypt(plaintext, convKey, WithCustomNonce(convSalt))
+ if ok = assert.NoErrorf(t, err, "encryption failed: %v", err); !ok {
+ return
+ }
+ h.Reset()
+ h.Write([]byte(actualPayload))
+ actualPayloadSha256 = hex.Enc(h.Sum(nil))
+ if ok = assert.Equalf(
+ t, payloadSha256, actualPayloadSha256,
+ "invalid payload sha256 hash: %v", err,
+ ); !ok {
+ return
+ }
+}
+
+func TestCryptPriv001(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ "c41c775356fd92eadc63ff5a0dc1da211b268cbea22316767095b2871ea1412d",
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ "a",
+ "AgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABee0G5VSK0/9YypIObAtDKfYEAjD35uVkHyB0F4DwrcNaCXlCWZKaArsGrY6M9wnuTMxWfp1RTN9Xga8no+kF5Vsb",
+ )
+}
+
+func TestCryptPriv002(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ "c41c775356fd92eadc63ff5a0dc1da211b268cbea22316767095b2871ea1412d",
+ "f00000000000000000000000000000f00000000000000000000000000000000f",
+ "🍕🫃",
+ "AvAAAAAAAAAAAAAAAAAAAPAAAAAAAAAAAAAAAAAAAAAPSKSK6is9ngkX2+cSq85Th16oRTISAOfhStnixqZziKMDvB0QQzgFZdjLTPicCJaV8nDITO+QfaQ61+KbWQIOO2Yj",
+ )
+}
+
+func TestCryptPriv003(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "5c0c523f52a5b6fad39ed2403092df8cebc36318b39383bca6c00808626fab3a",
+ "4b22aa260e4acb7021e32f38a6cdf4b673c6a277755bfce287e370c924dc936d",
+ "3e2b52a63be47d34fe0a80e34e73d436d6963bc8f39827f327057a9986c20a45",
+ "b635236c42db20f021bb8d1cdff5ca75dd1a0cc72ea742ad750f33010b24f73b",
+ "表ポあA鷗ŒéB逍Üߪąñ丂㐀𠀀",
+ "ArY1I2xC2yDwIbuNHN/1ynXdGgzHLqdCrXUPMwELJPc7s7JqlCMJBAIIjfkpHReBPXeoMCyuClwgbT419jUWU1PwaNl4FEQYKCDKVJz+97Mp3K+Q2YGa77B6gpxB/lr1QgoqpDf7wDVrDmOqGoiPjWDqy8KzLueKDcm9BVP8xeTJIxs=",
+ )
+}
+
+func TestCryptPriv004(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "8f40e50a84a7462e2b8d24c28898ef1f23359fff50d8c509e6fb7ce06e142f9c",
+ "b9b0a1e9cc20100c5faa3bbe2777303d25950616c4c6a3fa2e3e046f936ec2ba",
+ "d5a2f879123145a4b291d767428870f5a8d9e5007193321795b40183d4ab8c2b",
+ "b20989adc3ddc41cd2c435952c0d59a91315d8c5218d5040573fc3749543acaf",
+ "ability🤝的 ȺȾ",
+ "ArIJia3D3cQc0sQ1lSwNWakTFdjFIY1QQFc/w3SVQ6yvbG2S0x4Yu86QGwPTy7mP3961I1XqB6SFFTzqDZZavhxoWMj7mEVGMQIsh2RLWI5EYQaQDIePSnXPlzf7CIt+voTD",
+ )
+}
+
+func TestCryptPriv005(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "875adb475056aec0b4809bd2db9aa00cff53a649e7b59d8edcbf4e6330b0995c",
+ "9c05781112d5b0a2a7148a222e50e0bd891d6b60c5483f03456e982185944aae",
+ "3b15c977e20bfe4b8482991274635edd94f366595b1a3d2993515705ca3cedb8",
+ "8d4442713eb9d4791175cb040d98d6fc5be8864d6ec2f89cf0895a2b2b72d1b1",
+ "pepper👀їжак",
+ "Ao1EQnE+udR5EXXLBA2Y1vxb6IZNbsL4nPCJWisrctGxY3AduCS+jTUgAAnfvKafkmpy15+i9YMwCdccisRa8SvzW671T2JO4LFSPX31K4kYUKelSAdSPwe9NwO6LhOsnoJ+",
+ )
+}
+
+func TestCryptPriv006(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "eba1687cab6a3101bfc68fd70f214aa4cc059e9ec1b79fdb9ad0a0a4e259829f",
+ "dff20d262bef9dfd94666548f556393085e6ea421c8af86e9d333fa8747e94b3",
+ "4f1538411098cf11c8af216836444787c462d47f97287f46cf7edb2c4915b8a5",
+ "2180b52ae645fcf9f5080d81b1f0b5d6f2cd77ff3c986882bb549158462f3407",
+ "( ͡° ͜ʖ ͡°)",
+ "AiGAtSrmRfz59QgNgbHwtdbyzXf/PJhogrtUkVhGLzQHv4qhKQwnFQ54OjVMgqCea/Vj0YqBSdhqNR777TJ4zIUk7R0fnizp6l1zwgzWv7+ee6u+0/89KIjY5q1wu6inyuiv",
+ )
+}
+
+func TestCryptPriv007(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e",
+ "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214",
+ "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd",
+ "e4cd5f7ce4eea024bc71b17ad456a986a74ac426c2c62b0a15eb5c5c8f888b68",
+ "مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ،",
+ "AuTNX3zk7qAkvHGxetRWqYanSsQmwsYrChXrXFyPiItoIBsWu1CB+sStla2M4VeANASHxM78i1CfHQQH1YbBy24Tng7emYW44ol6QkFD6D8Zq7QPl+8L1c47lx8RoODEQMvNCbOk5ffUV3/AhONHBXnffrI+0025c+uRGzfqpYki4lBqm9iYU+k3Tvjczq9wU0mkVDEaM34WiQi30MfkJdRbeeYaq6kNvGPunLb3xdjjs5DL720d61Flc5ZfoZm+CBhADy9D9XiVZYLKAlkijALJur9dATYKci6OBOoc2SJS2Clai5hOVzR0yVeyHRgRfH9aLSlWW5dXcUxTo7qqRjNf8W5+J4jF4gNQp5f5d0YA4vPAzjBwSP/5bGzNDslKfcAH",
+ )
+}
+
+func TestCryptPriv008(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e",
+ "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214",
+ "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd",
+ "e4cd5f7ce4eea024bc71b17ad456a986a74ac426c2c62b0a15eb5c5c8f888b68",
+ "مُنَاقَشَةُ سُبُلِ اِسْتِخْدَامِ اللُّغَةِ فِي النُّظُمِ الْقَائِمَةِ وَفِيم يَخُصَّ التَّطْبِيقَاتُ الْحاسُوبِيَّةُ،",
+ "AuTNX3zk7qAkvHGxetRWqYanSsQmwsYrChXrXFyPiItoIBsWu1CB+sStla2M4VeANASHxM78i1CfHQQH1YbBy24Tng7emYW44ol6QkFD6D8Zq7QPl+8L1c47lx8RoODEQMvNCbOk5ffUV3/AhONHBXnffrI+0025c+uRGzfqpYki4lBqm9iYU+k3Tvjczq9wU0mkVDEaM34WiQi30MfkJdRbeeYaq6kNvGPunLb3xdjjs5DL720d61Flc5ZfoZm+CBhADy9D9XiVZYLKAlkijALJur9dATYKci6OBOoc2SJS2Clai5hOVzR0yVeyHRgRfH9aLSlWW5dXcUxTo7qqRjNf8W5+J4jF4gNQp5f5d0YA4vPAzjBwSP/5bGzNDslKfcAH",
+ )
+}
+
+func TestCryptPriv009X(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e",
+ "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214",
+ "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd",
+ "38d1ca0abef9e5f564e89761a86cee04574b6825d3ef2063b10ad75899e4b023",
+ "الكل في المجمو عة (5)",
+ "AjjRygq++eX1ZOiXYahs7gRXS2gl0+8gY7EK11iZ5LAjbOTrlfrxak5Lki42v2jMPpLSicy8eHjsWkkMtF0i925vOaKG/ZkMHh9ccQBdfTvgEGKzztedqDCAWb5TP1YwU1PsWaiiqG3+WgVvJiO4lUdMHXL7+zKKx8bgDtowzz4QAwI=",
+ )
+}
+
+func TestCryptPriv010(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e",
+ "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214",
+ "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd",
+ "4f1a31909f3483a9e69c8549a55bbc9af25fa5bbecf7bd32d9896f83ef2e12e0",
+ "𝖑𝖆𝖟𝖞 社會科學院語學研究所",
+ "Ak8aMZCfNIOp5pyFSaVbvJryX6W77Pe9MtmJb4PvLhLgh/TsxPLFSANcT67EC1t/qxjru5ZoADjKVEt2ejdx+xGvH49mcdfbc+l+L7gJtkH7GLKpE9pQNQWNHMAmj043PAXJZ++fiJObMRR2mye5VHEANzZWkZXMrXF7YjuG10S1pOU=",
+ )
+}
+
+func TestCryptPriv011(t *testing.T) {
+ assertCryptPriv(
+ t,
+ "d5633530f5bcfebceb5584cfbbf718a30df0751b729dd9a789b9f30c0587d74e",
+ "b74e6a341fb134127272b795a08b59250e5fa45a82a2eb4095e4ce9ed5f5e214",
+ "75fe686d21a035f0c7cd70da64ba307936e5ca0b20710496a6b6b5f573377bdd",
+ "a3e219242d85465e70adcd640b564b3feff57d2ef8745d5e7a0663b2dccceb54",
+ "🙈 🙉 🙊 0️⃣ 1️⃣ 2️⃣ 3️⃣ 4️⃣ 5️⃣ 6️⃣ 7️⃣ 8️⃣ 9️⃣ 🔟 Powerلُلُصّبُلُلصّبُررً ॣ ॣh ॣ ॣ冗",
+ "AqPiGSQthUZecK3NZAtWSz/v9X0u+HRdXnoGY7LczOtUf05aMF89q1FLwJvaFJYICZoMYgRJHFLwPiOHce7fuAc40kX0wXJvipyBJ9HzCOj7CgtnC1/cmPCHR3s5AIORmroBWglm1LiFMohv1FSPEbaBD51VXxJa4JyWpYhreSOEjn1wd0lMKC9b+osV2N2tpbs+rbpQem2tRen3sWflmCqjkG5VOVwRErCuXuPb5+hYwd8BoZbfCrsiAVLd7YT44dRtKNBx6rkabWfddKSLtreHLDysOhQUVOp/XkE7OzSkWl6sky0Hva6qJJ/V726hMlomvcLHjE41iKmW2CpcZfOedg==",
+ )
+}
+
+func TestCryptLong001(t *testing.T) {
+ assertCryptLong(
+ t,
+ "8fc262099ce0d0bb9b89bac05bb9e04f9bc0090acc181fef6840ccee470371ed",
+ "326bcb2c943cd6bb717588c9e5a7e738edf6ed14ec5f5344caa6ef56f0b9cff7",
+ "x",
+ 65535,
+ "09ab7495d3e61a76f0deb12cb0306f0696cbb17ffc12131368c7a939f12f56d3",
+ "90714492225faba06310bff2f249ebdc2a5e609d65a629f1c87f2d4ffc55330a",
+ )
+}
+
+func TestCryptLong002(t *testing.T) {
+ assertCryptLong(
+ t,
+ "56adbe3720339363ab9c3b8526ffce9fd77600927488bfc4b59f7a68ffe5eae0",
+ "ad68da81833c2a8ff609c3d2c0335fd44fe5954f85bb580c6a8d467aa9fc5dd0",
+ "!",
+ 65535,
+ "6af297793b72ae092c422e552c3bb3cbc310da274bd1cf9e31023a7fe4a2d75e",
+ "8013e45a109fad3362133132b460a2d5bce235fe71c8b8f4014793fb52a49844",
+ )
+}
+
+func TestCryptLong003(t *testing.T) {
+ assertCryptLong(
+ t,
+ "7fc540779979e472bb8d12480b443d1e5eb1098eae546ef2390bee499bbf46be",
+ "34905e82105c20de9a2f6cd385a0d541e6bcc10601d12481ff3a7575dc622033",
+ "🦄",
+ 16383,
+ "a249558d161b77297bc0cb311dde7d77190f6571b25c7e4429cd19044634a61f",
+ "b3348422471da1f3c59d79acfe2fe103f3cd24488109e5b18734cdb5953afd15",
+ )
+}
+
+func TestConversationKeyFail001(t *testing.T) {
+ // sec1 higher than curve.n
+ assertConversationKeyFail(
+ t,
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "invalid private key: x coordinate ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff is not on the secp256k1 curve",
+ )
+}
+
+func TestConversationKeyFail002(t *testing.T) {
+ // sec1 is 0
+ assertConversationKeyFail(
+ t,
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "invalid private key: x coordinate 0000000000000000000000000000000000000000000000000000000000000000 is not on the secp256k1 curve",
+ )
+}
+
+func TestConversationKeyFail003(t *testing.T) {
+ // pub2 is invalid, no sqrt, all-ff
+ assertConversationKeyFail(
+ t,
+ "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364139",
+ "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
+ "invalid public key: x >= field prime",
+ // "invalid public key: x >= field prime",
+ )
+}
+
+func TestConversationKeyFail004(t *testing.T) {
+ // sec1 == curve.n
+ assertConversationKeyFail(
+ t,
+ "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141",
+ "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "invalid private key: x coordinate fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141 is not on the secp256k1 curve",
+ )
+}
+
+func TestConversationKeyFail005(t *testing.T) {
+ // pub2 is invalid, no sqrt
+ assertConversationKeyFail(
+ t,
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "invalid public key: x coordinate 1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef is not on the secp256k1 curve",
+ // "invalid public key: x coordinate 1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef is not on the secp256k1 curve",
+ )
+}
+
+func TestConversationKeyFail006(t *testing.T) {
+ // pub2 is point of order 3 on twist
+ assertConversationKeyFail(
+ t,
+ "0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "invalid public key: x coordinate 0000000000000000000000000000000000000000000000000000000000000000 is not on the secp256k1 curve",
+ // "invalid public key: x coordinate 0000000000000000000000000000000000000000000000000000000000000000 is not on the secp256k1 curve",
+ )
+}
+
+func TestConversationKeyFail007(t *testing.T) {
+ // pub2 is point of order 13 on twist
+ assertConversationKeyFail(
+ t,
+ "0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20",
+ "eb1f7200aecaa86682376fb1c13cd12b732221e774f553b0a0857f88fa20f86d",
+ "invalid public key: x coordinate eb1f7200aecaa86682376fb1c13cd12b732221e774f553b0a0857f88fa20f86d is not on the secp256k1 curve",
+ // "invalid public key: x coordinate eb1f7200aecaa86682376fb1c13cd12b732221e774f553b0a0857f88fa20f86d is not on the secp256k1 curve",
+ )
+}
+
+func TestConversationKeyFail008(t *testing.T) {
+ // pub2 is point of order 3319 on twist
+ assertConversationKeyFail(
+ t,
+ "0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20",
+ "709858a4c121e4a84eb59c0ded0261093c71e8ca29efeef21a6161c447bcaf9f",
+ "invalid public key: x coordinate 709858a4c121e4a84eb59c0ded0261093c71e8ca29efeef21a6161c447bcaf9f is not on the secp256k1 curve",
+ // "invalid public key: x coordinate 709858a4c121e4a84eb59c0ded0261093c71e8ca29efeef21a6161c447bcaf9f is not on the secp256k1 curve",
+ )
+}
+
+func TestDecryptFail001(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "ca2527a037347b91bea0c8a30fc8d9600ffd81ec00038671e3a0f0cb0fc9f642",
+ // "daaea5ca345b268e5b62060ca72c870c48f713bc1e00ff3fc0ddb78e826f10db",
+ "n o b l e",
+ "#Atqupco0WyaOW2IGDKcshwxI9xO8HgD/P8Ddt46CbxDbrhdG8VmJdU0MIDf06CUvEvdnr1cp1fiMtlM/GrE92xAc1K5odTpCzUB+mjXgbaqtntBUbTToSUoT0ovrlPwzGjyp",
+ "unknown version",
+ )
+}
+
+func TestDecryptFail002(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "36f04e558af246352dcf73b692fbd3646a2207bd8abd4b1cd26b234db84d9481",
+ // "ad408d4be8616dc84bb0bf046454a2a102edac937c35209c43cd7964c5feb781",
+ "⚠️",
+ "AK1AjUvoYW3IS7C/BGRUoqEC7ayTfDUgnEPNeWTF/reBZFaha6EAIRueE9D1B1RuoiuFScC0Q94yjIuxZD3JStQtE8JMNacWFs9rlYP+ZydtHhRucp+lxfdvFlaGV/sQlqZz",
+ "unknown version 0",
+ )
+}
+
+func TestDecryptFail003(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "ca2527a037347b91bea0c8a30fc8d9600ffd81ec00038671e3a0f0cb0fc9f642",
+ // "daaea5ca345b268e5b62060ca72c870c48f713bc1e00ff3fc0ddb78e826f10db",
+ "n o s t r",
+ "Atфupco0WyaOW2IGDKcshwxI9xO8HgD/P8Ddt46CbxDbrhdG8VmJZE0UICD06CUvEvdnr1cp1fiMtlM/GrE92xAc1EwsVCQEgWEu2gsHUVf4JAa3TpgkmFc3TWsax0v6n/Wq",
+ "illegal base64 data at input byte 2",
+ )
+}
+
+func TestDecryptFail004(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "cff7bd6a3e29a450fd27f6c125d5edeb0987c475fd1e8d97591e0d4d8a89763c",
+ // "09ff97750b084012e15ecb84614ce88180d7b8ec0d468508a86b6d70c0361a25",
+ "¯\\_(ツ)_/¯",
+ "Agn/l3ULCEAS4V7LhGFM6IGA17jsDUaFCKhrbXDANholyySBfeh+EN8wNB9gaLlg4j6wdBYh+3oK+mnxWu3NKRbSvQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
+ "invalid hmac",
+ )
+}
+
+func TestDecryptFail005(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "cfcc9cf682dfb00b11357f65bdc45e29156b69db424d20b3596919074f5bf957",
+ // "65b14b0b949aaa7d52c417eb753b390e8ad6d84b23af4bec6d9bfa3e03a08af4",
+ "🥎",
+ "AmWxSwuUmqp9UsQX63U7OQ6K1thLI69L7G2b+j4DoIr0oRWQ8avl4OLqWZiTJ10vIgKrNqjoaX+fNhE9RqmR5g0f6BtUg1ijFMz71MO1D4lQLQfW7+UHva8PGYgQ1QpHlKgR",
+ "invalid hmac",
+ )
+}
+
+func TestDecryptFail006(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "5254827d29177622d40a7b67cad014fe7137700c3c523903ebbe3e1b74d40214",
+ // "7ab65dbb8bbc2b8e35cafb5745314e1f050325a864d11d0475ef75b3660d91c1",
+ "elliptic-curve cryptography",
+ "Anq2XbuLvCuONcr7V0UxTh8FAyWoZNEdBHXvdbNmDZHB573MI7R7rrTYftpqmvUpahmBC2sngmI14/L0HjOZ7lWGJlzdh6luiOnGPc46cGxf08MRC4CIuxx3i2Lm0KqgJ7vA",
+ "invalid padding",
+ )
+}
+
+func TestDecryptFail007(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "fea39aca9aa8340c3a78ae1f0902aa7e726946e4efcd7783379df8096029c496",
+ // "7d4283e3b54c885d6afee881f48e62f0a3f5d7a9e1cb71ccab594a7882c39330",
+ "noble",
+ "An1Cg+O1TIhdav7ogfSOYvCj9dep4ctxzKtZSniCw5MwRrrPJFyAQYZh5VpjC2QYzny5LIQ9v9lhqmZR4WBYRNJ0ognHVNMwiFV1SHpvUFT8HHZN/m/QarflbvDHAtO6pY16",
+ "invalid padding",
+ )
+}
+
+func TestDecryptFail008(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "0c4cffb7a6f7e706ec94b2e879f1fc54ff8de38d8db87e11787694d5392d5b3f",
+ // "6f9fd72667c273acd23ca6653711a708434474dd9eb15c3edb01ce9a95743e9b",
+ "censorship-resistant and global social network",
+ "Am+f1yZnwnOs0jymZTcRpwhDRHTdnrFcPtsBzpqVdD6b2NZDaNm/TPkZGr75kbB6tCSoq7YRcbPiNfJXNch3Tf+o9+zZTMxwjgX/nm3yDKR2kHQMBhVleCB9uPuljl40AJ8kXRD0gjw+aYRJFUMK9gCETZAjjmrsCM+nGRZ1FfNsHr6Z",
+ "invalid padding",
+ )
+}
+
+func TestDecryptFail009(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "5cd2d13b9e355aeb2452afbd3786870dbeecb9d355b12cb0a3b6e9da5744cd35",
+ // "b60036976a1ada277b948fd4caa065304b96964742b89d26f26a25263a5060bd",
+ "0",
+ "",
+ "invalid payload length: 0",
+ )
+}
+
+func TestDecryptFail010(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "d61d3f09c7dfe1c0be91af7109b60a7d9d498920c90cbba1e137320fdd938853",
+ // "1a29d02c8b4527745a2ccb38bfa45655deb37bc338ab9289d756354cea1fd07c",
+ "1",
+ "Ag==",
+ "invalid payload length: 4",
+ )
+}
+
+func TestDecryptFail011(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "873bb0fc665eb950a8e7d5971965539f6ebd645c83c08cd6a85aafbad0f0bc47",
+ // "c826d3c38e765ab8cc42060116cd1464b2a6ce01d33deba5dedfb48615306d4a",
+ "2",
+ "AqxgToSh3H7iLYRJjoWAM+vSv/Y1mgNlm6OWWjOYUClrFF8=",
+ "invalid payload length: 48",
+ )
+}
+
+func TestDecryptFail012(t *testing.T) {
+ assertDecryptFail(
+ t,
+ "9f2fef8f5401ac33f74641b568a7a30bb19409c76ffdc5eae2db6b39d2617fbe",
+ // "9ff6484642545221624eaac7b9ea27133a4cc2356682a6033aceeef043549861",
+ "3",
+ "Ap/2SEZCVFIhYk6qx7nqJxM6TMI1ZoKmAzrO7vBDVJhhuZXWiM20i/tIsbjT0KxkJs2MZjh1oXNYMO9ggfk7i47WQA==",
+ "invalid payload length: 92",
+ )
+}
+
+func TestConversationKey001(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "315e59ff51cb9209768cf7da80791ddcaae56ac9775eb25b6dee1234bc5d2268",
+ "c2f9d9948dc8c7c38321e4b85c8558872eafa0641cd269db76848a6073e69133",
+ "3dfef0ce2a4d80a25e7a328accf73448ef67096f65f79588e358d9a0eb9013f1",
+ )
+}
+
+func TestConversationKey002(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "a1e37752c9fdc1273be53f68c5f74be7c8905728e8de75800b94262f9497c86e",
+ "03bb7947065dde12ba991ea045132581d0954f042c84e06d8c00066e23c1a800",
+ "4d14f36e81b8452128da64fe6f1eae873baae2f444b02c950b90e43553f2178b",
+ )
+}
+
+func TestConversationKey003(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "98a5902fd67518a0c900f0fb62158f278f94a21d6f9d33d30cd3091195500311",
+ "aae65c15f98e5e677b5050de82e3aba47a6fe49b3dab7863cf35d9478ba9f7d1",
+ "9c00b769d5f54d02bf175b7284a1cbd28b6911b06cda6666b2243561ac96bad7",
+ )
+}
+
+func TestConversationKey004(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "86ae5ac8034eb2542ce23ec2f84375655dab7f836836bbd3c54cefe9fdc9c19f",
+ "59f90272378089d73f1339710c02e2be6db584e9cdbe86eed3578f0c67c23585",
+ "19f934aafd3324e8415299b64df42049afaa051c71c98d0aa10e1081f2e3e2ba",
+ )
+}
+
+func TestConversationKey005(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "2528c287fe822421bc0dc4c3615878eb98e8a8c31657616d08b29c00ce209e34",
+ "f66ea16104c01a1c532e03f166c5370a22a5505753005a566366097150c6df60",
+ "c833bbb292956c43366145326d53b955ffb5da4e4998a2d853611841903f5442",
+ )
+}
+
+func TestConversationKey006(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "49808637b2d21129478041813aceb6f2c9d4929cd1303cdaf4fbdbd690905ff2",
+ "74d2aab13e97827ea21baf253ad7e39b974bb2498cc747cdb168582a11847b65",
+ "4bf304d3c8c4608864c0fe03890b90279328cd24a018ffa9eb8f8ccec06b505d",
+ )
+}
+
+func TestConversationKey007(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "af67c382106242c5baabf856efdc0629cc1c5b4061f85b8ceaba52aa7e4b4082",
+ "bdaf0001d63e7ec994fad736eab178ee3c2d7cfc925ae29f37d19224486db57b",
+ "a3a575dd66d45e9379904047ebfb9a7873c471687d0535db00ef2daa24b391db",
+ )
+}
+
+func TestConversationKey008(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "0e44e2d1db3c1717b05ffa0f08d102a09c554a1cbbf678ab158b259a44e682f1",
+ "1ffa76c5cc7a836af6914b840483726207cb750889753d7499fb8b76aa8fe0de",
+ "a39970a667b7f861f100e3827f4adbf6f464e2697686fe1a81aeda817d6b8bdf",
+ )
+}
+
+func TestConversationKey009(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "5fc0070dbd0666dbddc21d788db04050b86ed8b456b080794c2a0c8e33287bb6",
+ "31990752f296dd22e146c9e6f152a269d84b241cc95bb3ff8ec341628a54caf0",
+ "72c21075f4b2349ce01a3e604e02a9ab9f07e35dd07eff746de348b4f3c6365e",
+ )
+}
+
+func TestConversationKey010(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "1b7de0d64d9b12ddbb52ef217a3a7c47c4362ce7ea837d760dad58ab313cba64",
+ "24383541dd8083b93d144b431679d70ef4eec10c98fceef1eff08b1d81d4b065",
+ "dd152a76b44e63d1afd4dfff0785fa07b3e494a9e8401aba31ff925caeb8f5b1",
+ )
+}
+
+func TestConversationKey011(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "df2f560e213ca5fb33b9ecde771c7c0cbd30f1cf43c2c24de54480069d9ab0af",
+ "eeea26e552fc8b5e377acaa03e47daa2d7b0c787fac1e0774c9504d9094c430e",
+ "770519e803b80f411c34aef59c3ca018608842ebf53909c48d35250bd9323af6",
+ )
+}
+
+func TestConversationKey012(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "cffff919fcc07b8003fdc63bc8a00c0f5dc81022c1c927c62c597352190d95b9",
+ "eb5c3cca1a968e26684e5b0eb733aecfc844f95a09ac4e126a9e58a4e4902f92",
+ "46a14ee7e80e439ec75c66f04ad824b53a632b8409a29bbb7c192e43c00bb795",
+ )
+}
+
+func TestConversationKey013(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "64ba5a685e443e881e9094647ddd32db14444bb21aa7986beeba3d1c4673ba0a",
+ "50e6a4339fac1f3bf86f2401dd797af43ad45bbf58e0801a7877a3984c77c3c4",
+ "968b9dbbfcede1664a4ca35a5d3379c064736e87aafbf0b5d114dff710b8a946",
+ )
+}
+
+func TestConversationKey014(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "dd0c31ccce4ec8083f9b75dbf23cc2878e6d1b6baa17713841a2428f69dee91a",
+ "b483e84c1339812bed25be55cff959778dfc6edde97ccd9e3649f442472c091b",
+ "09024503c7bde07eb7865505891c1ea672bf2d9e25e18dd7a7cea6c69bf44b5d",
+ )
+}
+
+func TestConversationKey015(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "af71313b0d95c41e968a172b33ba5ebd19d06cdf8a7a98df80ecf7af4f6f0358",
+ "2a5c25266695b461ee2af927a6c44a3c598b8095b0557e9bd7f787067435bc7c",
+ "fe5155b27c1c4b4e92a933edae23726a04802a7cc354a77ac273c85aa3c97a92",
+ )
+}
+
+func TestConversationKey016(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "6636e8a389f75fe068a03b3edb3ea4a785e2768e3f73f48ffb1fc5e7cb7289dc",
+ "514eb2064224b6a5829ea21b6e8f7d3ea15ff8e70e8555010f649eb6e09aec70",
+ "ff7afacd4d1a6856d37ca5b546890e46e922b508639214991cf8048ddbe9745c",
+ )
+}
+
+func TestConversationKey017(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "94b212f02a3cfb8ad147d52941d3f1dbe1753804458e6645af92c7b2ea791caa",
+ "f0cac333231367a04b652a77ab4f8d658b94e86b5a8a0c472c5c7b0d4c6a40cc",
+ "e292eaf873addfed0a457c6bd16c8effde33d6664265697f69f420ab16f6669b",
+ )
+}
+
+func TestConversationKey018(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "aa61f9734e69ae88e5d4ced5aae881c96f0d7f16cca603d3bed9eec391136da6",
+ "4303e5360a884c360221de8606b72dd316da49a37fe51e17ada4f35f671620a6",
+ "8e7d44fd4767456df1fb61f134092a52fcd6836ebab3b00766e16732683ed848",
+ )
+}
+
+func TestConversationKey019(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "5e914bdac54f3f8e2cba94ee898b33240019297b69e96e70c8a495943a72fc98",
+ "5bd097924f606695c59f18ff8fd53c174adbafaaa71b3c0b4144a3e0a474b198",
+ "f5a0aecf2984bf923c8cd5e7bb8be262d1a8353cb93959434b943a07cf5644bc",
+ )
+}
+
+func TestConversationKey020(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "8b275067add6312ddee064bcdbeb9d17e88aa1df36f430b2cea5cc0413d8278a",
+ "65bbbfca819c90c7579f7a82b750a18c858db1afbec8f35b3c1e0e7b5588e9b8",
+ "2c565e7027eb46038c2263563d7af681697107e975e9914b799d425effd248d6",
+ )
+}
+
+func TestConversationKey021(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "1ac848de312285f85e0f7ec208aac20142a1f453402af9b34ec2ec7a1f9c96fc",
+ "45f7318fe96034d23ee3ddc25b77f275cc1dd329664dd51b89f89c4963868e41",
+ "b56e970e5057a8fd929f8aad9248176b9af87819a708d9ddd56e41d1aec74088",
+ )
+}
+
+func TestConversationKey022(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "295a1cf621de401783d29d0e89036aa1c62d13d9ad307161b4ceb535ba1b40e6",
+ "840115ddc7f1034d3b21d8e2103f6cb5ab0b63cf613f4ea6e61ae3d016715cdd",
+ "b4ee9c0b9b9fef88975773394f0a6f981ca016076143a1bb575b9ff46e804753",
+ )
+}
+
+func TestConversationKey023(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "a28eed0fe977893856ab9667e06ace39f03abbcdb845c329a1981be438ba565d",
+ "b0f38b950a5013eba5ab4237f9ed29204a59f3625c71b7e210fec565edfa288c",
+ "9d3a802b45bc5aeeb3b303e8e18a92ddd353375710a31600d7f5fff8f3a7285b",
+ )
+}
+
+func TestConversationKey024(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "7ab65af72a478c05f5c651bdc4876c74b63d20d04cdbf71741e46978797cd5a4",
+ "f1112159161b568a9cb8c9dd6430b526c4204bcc8ce07464b0845b04c041beda",
+ "943884cddaca5a3fef355e9e7f08a3019b0b66aa63ec90278b0f9fdb64821e79",
+ )
+}
+
+func TestConversationKey025(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "95c79a7b75ba40f2229e85756884c138916f9d103fc8f18acc0877a7cceac9fe",
+ "cad76bcbd31ca7bbda184d20cc42f725ed0bb105b13580c41330e03023f0ffb3",
+ "81c0832a669eea13b4247c40be51ccfd15bb63fcd1bba5b4530ce0e2632f301b",
+ )
+}
+
+func TestConversationKey026(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "baf55cc2febd4d980b4b393972dfc1acf49541e336b56d33d429bce44fa12ec9",
+ "0c31cf87fe565766089b64b39460ebbfdedd4a2bc8379be73ad3c0718c912e18",
+ "37e2344da9ecdf60ae2205d81e89d34b280b0a3f111171af7e4391ded93b8ea6",
+ )
+}
+
+func TestConversationKey027(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "6eeec45acd2ed31693c5256026abf9f072f01c4abb61f51cf64e6956b6dc8907",
+ "e501b34ed11f13d816748c0369b0c728e540df3755bab59ed3327339e16ff828",
+ "afaa141b522ddb27bb880d768903a7f618bb8b6357728cae7fb03af639b946e6",
+ )
+}
+
+func TestConversationKey028(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "261a076a9702af1647fb343c55b3f9a4f1096273002287df0015ba81ce5294df",
+ "b2777c863878893ae100fb740c8fab4bebd2bf7be78c761a75593670380a6112",
+ "76f8d2853de0734e51189ced523c09427c3e46338b9522cd6f74ef5e5b475c74",
+ )
+}
+
+func TestConversationKey029(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "ed3ec71ca406552ea41faec53e19f44b8f90575eda4b7e96380f9cc73c26d6f3",
+ "86425951e61f94b62e20cae24184b42e8e17afcf55bafa58645efd0172624fae",
+ "f7ffc520a3a0e9e9b3c0967325c9bf12707f8e7a03f28b6cd69ae92cf33f7036",
+ )
+}
+
+func TestConversationKey030(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "5a788fc43378d1303ac78639c59a58cb88b08b3859df33193e63a5a3801c722e",
+ "a8cba2f87657d229db69bee07850fd6f7a2ed070171a06d006ec3a8ac562cf70",
+ "7d705a27feeedf78b5c07283362f8e361760d3e9f78adab83e3ae5ce7aeb6409",
+ )
+}
+
+func TestConversationKey031(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "63bffa986e382b0ac8ccc1aa93d18a7aa445116478be6f2453bad1f2d3af2344",
+ "b895c70a83e782c1cf84af558d1038e6b211c6f84ede60408f519a293201031d",
+ "3a3b8f00d4987fc6711d9be64d9c59cf9a709c6c6481c2cde404bcc7a28f174e",
+ )
+}
+
+func TestConversationKey032(t *testing.T) {
+ assertConversationKeyGenerationPub(
+ t,
+ "e4a8bcacbf445fd3721792b939ff58e691cdcba6a8ba67ac3467b45567a03e5c",
+ "b54053189e8c9252c6950059c783edb10675d06d20c7b342f73ec9fa6ed39c9d",
+ "7b3933b4ef8189d347169c7955589fc1cfc01da5239591a08a183ff6694c44ad",
+ )
+}
+
+func TestConversationKey033(t *testing.T) {
+ // sec1 = n-2, pub2: random, 0x02
+ assertConversationKeyGenerationPub(
+ t,
+ "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364139",
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ "8b6392dbf2ec6a2b2d5b1477fc2be84d63ef254b667cadd31bd3f444c44ae6ba",
+ )
+}
+
+func TestConversationKey034(t *testing.T) {
+ // sec1 = 2, pub2: rand
+ assertConversationKeyGenerationPub(
+ t,
+ "0000000000000000000000000000000000000000000000000000000000000002",
+ "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdeb",
+ "be234f46f60a250bef52a5ee34c758800c4ca8e5030bf4cc1a31d37ba2104d43",
+ )
+}
+
+func TestConversationKey035(t *testing.T) {
+ // sec1 == pub2
+ assertConversationKeyGenerationPub(
+ t,
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ "79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
+ "3b4610cb7189beb9cc29eb3716ecc6102f1247e8f3101a03a1787d8908aeb54e",
+ )
+}
+
+func TestMessageKeyGeneration001(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "e1e6f880560d6d149ed83dcc7e5861ee62a5ee051f7fde9975fe5d25d2a02d72",
+ "f145f3bed47cb70dbeaac07f3a3fe683e822b3715edb7c4fe310829014ce7d76",
+ "c4ad129bb01180c0933a160c",
+ "027c1db445f05e2eee864a0975b0ddef5b7110583c8c192de3732571ca5838c4",
+ )
+}
+
+func TestMessageKeyGeneration002(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "e1d6d28c46de60168b43d79dacc519698512ec35e8ccb12640fc8e9f26121101",
+ "e35b88f8d4a8f1606c5082f7a64b100e5d85fcdb2e62aeafbec03fb9e860ad92",
+ "22925e920cee4a50a478be90",
+ "46a7c55d4283cb0df1d5e29540be67abfe709e3b2e14b7bf9976e6df994ded30",
+ )
+}
+
+func TestMessageKeyGeneration003(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "cfc13bef512ac9c15951ab00030dfaf2626fdca638dedb35f2993a9eeb85d650",
+ "020783eb35fdf5b80ef8c75377f4e937efb26bcbad0e61b4190e39939860c4bf",
+ "d3594987af769a52904656ac",
+ "237ec0ccb6ebd53d179fa8fd319e092acff599ef174c1fdafd499ef2b8dee745",
+ )
+}
+
+func TestMessageKeyGeneration004(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "ea6eb84cac23c5c1607c334e8bdf66f7977a7e374052327ec28c6906cbe25967",
+ "ff68db24b34fa62c78ac5ffeeaf19533afaedf651fb6a08384e46787f6ce94be",
+ "50bb859aa2dde938cc49ec7a",
+ "06ff32e1f7b29753a727d7927b25c2dd175aca47751462d37a2039023ec6b5a6",
+ )
+}
+
+func TestMessageKeyGeneration005(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "8c2e1dd3792802f1f9f7842e0323e5d52ad7472daf360f26e15f97290173605d",
+ "2f9daeda8683fdeede81adac247c63cc7671fa817a1fd47352e95d9487989d8b",
+ "400224ba67fc2f1b76736916",
+ "465c05302aeeb514e41c13ed6405297e261048cfb75a6f851ffa5b445b746e4b",
+ )
+}
+
+func TestMessageKeyGeneration006(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "05c28bf3d834fa4af8143bf5201a856fa5fac1a3aee58f4c93a764fc2f722367",
+ "1e3d45777025a035be566d80fd580def73ed6f7c043faec2c8c1c690ad31c110",
+ "021905b1ea3afc17cb9bf96f",
+ "74a6e481a89dcd130aaeb21060d7ec97ad30f0007d2cae7b1b11256cc70dfb81",
+ )
+}
+
+func TestMessageKeyGeneration007(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "5e043fb153227866e75a06d60185851bc90273bfb93342f6632a728e18a07a17",
+ "1ea72c9293841e7737c71567d8120145a58991aaa1c436ef77bf7adb83f882f1",
+ "72f69a5a5f795465cee59da8",
+ "e9daa1a1e9a266ecaa14e970a84bce3fbbf329079bbccda626582b4e66a0d4c9",
+ )
+}
+
+func TestMessageKeyGeneration009(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "7be7338eaf06a87e274244847fe7a97f5c6a91f44adc18fcc3e411ad6f786dbf",
+ "881e7968a1f0c2c80742ee03cd49ea587e13f22699730f1075ade01931582bf6",
+ "6e69be92d61c04a276021565",
+ "901afe79e74b19967c8829af23617d7d0ffbf1b57190c096855c6a03523a971b",
+ )
+}
+
+func TestMessageKeyGeneration010(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "94571c8d590905bad7becd892832b472f2aa5212894b6ce96e5ba719c178d976",
+ "f80873dd48466cb12d46364a97b8705c01b9b4230cb3ec3415a6b9551dc42eef",
+ "3dda53569cfcb7fac1805c35",
+ "e9fc264345e2839a181affebc27d2f528756e66a5f87b04bf6c5f1997047051e",
+ )
+}
+
+func TestMessageKeyGeneration011(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "13a6ee974b1fd759135a2c2010e3cdda47081c78e771125e4f0c382f0284a8cb",
+ "bc5fb403b0bed0d84cf1db872b6522072aece00363178c98ad52178d805fca85",
+ "65064239186e50304cc0f156",
+ "e872d320dde4ed3487958a8e43b48aabd3ced92bc24bb8ff1ccb57b590d9701a",
+ )
+}
+
+func TestMessageKeyGeneration012(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "082fecdb85f358367b049b08be0e82627ae1d8edb0f27327ccb593aa2613b814",
+ "1fbdb1cf6f6ea816349baf697932b36107803de98fcd805ebe9849b8ad0e6a45",
+ "2e605e1d825a3eaeb613db9c",
+ "fae910f591cf3c7eb538c598583abad33bc0a03085a96ca4ea3a08baf17c0eec",
+ )
+}
+
+func TestMessageKeyGeneration013(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "4c19020c74932c30ec6b2d8cd0d5bb80bd0fc87da3d8b4859d2fb003810afd03",
+ "1ab9905a0189e01cda82f843d226a82a03c4f5b6dbea9b22eb9bc953ba1370d4",
+ "cbb2530ea653766e5a37a83a",
+ "267f68acac01ac7b34b675e36c2cef5e7b7a6b697214add62a491bedd6efc178",
+ )
+}
+
+func TestMessageKeyGeneration014(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "67723a3381497b149ce24814eddd10c4c41a1e37e75af161930e6b9601afd0ff",
+ "9ecbd25e7e2e6c97b8c27d376dcc8c5679da96578557e4e21dba3a7ef4e4ac07",
+ "ef649fcf335583e8d45e3c2e",
+ "04dbbd812fa8226fdb45924c521a62e3d40a9e2b5806c1501efdeba75b006bf1",
+ )
+}
+
+func TestMessageKeyGeneration015(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "42063fe80b093e8619b1610972b4c3ab9e76c14fd908e642cd4997cafb30f36c",
+ "211c66531bbcc0efcdd0130f9f1ebc12a769105eb39608994bcb188fa6a73a4a",
+ "67803605a7e5010d0f63f8c8",
+ "e840e4e8921b57647369d121c5a19310648105dbdd008200ebf0d3b668704ff8",
+ )
+}
+
+func TestMessageKeyGeneration016(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "b5ac382a4be7ac03b554fe5f3043577b47ea2cd7cfc7e9ca010b1ffbb5cf1a58",
+ "b3b5f14f10074244ee42a3837a54309f33981c7232a8b16921e815e1f7d1bb77",
+ "4e62a0073087ed808be62469",
+ "c8efa10230b5ea11633816c1230ca05fa602ace80a7598916d83bae3d3d2ccd7",
+ )
+}
+
+func TestMessageKeyGeneration017(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "e9d1eba47dd7e6c1532dc782ff63125db83042bb32841db7eeafd528f3ea7af9",
+ "54241f68dc2e50e1db79e892c7c7a471856beeb8d51b7f4d16f16ab0645d2f1a",
+ "a963ed7dc29b7b1046820a1d",
+ "aba215c8634530dc21c70ddb3b3ee4291e0fa5fa79be0f85863747bde281c8b2",
+ )
+}
+
+func TestMessageKeyGeneration018(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "a94ecf8efeee9d7068de730fad8daf96694acb70901d762de39fa8a5039c3c49",
+ "c0565e9e201d2381a2368d7ffe60f555223874610d3d91fbbdf3076f7b1374dd",
+ "329bb3024461e84b2e1c489b",
+ "ac42445491f092481ce4fa33b1f2274700032db64e3a15014fbe8c28550f2fec",
+ )
+}
+
+func TestMessageKeyGeneration019(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "533605ea214e70c25e9a22f792f4b78b9f83a18ab2103687c8a0075919eaaa53",
+ "ab35a5e1e54d693ff023db8500d8d4e79ad8878c744e0eaec691e96e141d2325",
+ "653d759042b85194d4d8c0a7",
+ "b43628e37ba3c31ce80576f0a1f26d3a7c9361d29bb227433b66f49d44f167ba",
+ )
+}
+
+func TestMessageKeyGeneration020(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "7f38df30ceea1577cb60b355b4f5567ff4130c49e84fed34d779b764a9cc184c",
+ "a37d7f211b84a551a127ff40908974eb78415395d4f6f40324428e850e8c42a3",
+ "b822e2c959df32b3cb772a7c",
+ "1ba31764f01f69b5c89ded2d7c95828e8052c55f5d36f1cd535510d61ba77420",
+ )
+}
+
+func TestMessageKeyGeneration021(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "11b37f9dbc4d0185d1c26d5f4ed98637d7c9701fffa65a65839fa4126573a4e5",
+ "964f38d3a31158a5bfd28481247b18dd6e44d69f30ba2a40f6120c6d21d8a6ba",
+ "5f72c5b87c590bcd0f93b305",
+ "2fc4553e7cedc47f29690439890f9f19c1077ef3e9eaeef473d0711e04448918",
+ )
+}
+
+func TestMessageKeyGeneration022(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "8be790aa483d4cdd843189f71f135b3ec7e31f381312c8fe9f177aab2a48eafa",
+ "95c8c74d633721a131316309cf6daf0804d59eaa90ea998fc35bac3d2fbb7a94",
+ "409a7654c0e4bf8c2c6489be",
+ "21bb0b06eb2b460f8ab075f497efa9a01c9cf9146f1e3986c3bf9da5689b6dc4",
+ )
+}
+
+func TestMessageKeyGeneration023(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "19fd2a718ea084827d6bd73f509229ddf856732108b59fc01819f611419fd140",
+ "cc6714b9f5616c66143424e1413d520dae03b1a4bd202b82b0a89b0727f5cdc8",
+ "1b7fd2534f015a8f795d8f32",
+ "2bef39c4ce5c3c59b817e86351373d1554c98bc131c7e461ed19d96cfd6399a0",
+ )
+}
+
+func TestMessageKeyGeneration024(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "3c2acd893952b2f6d07d8aea76f545ca45961a93fe5757f6a5a80811d5e0255d",
+ "c8de6c878cb469278d0af894bc181deb6194053f73da5014c2b5d2c8db6f2056",
+ "6ffe4f1971b904a1b1a81b99",
+ "df1cd69dd3646fca15594284744d4211d70e7d8472e545d276421fbb79559fd4",
+ )
+}
+
+func TestMessageKeyGeneration025(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "7dbea4cead9ac91d4137f1c0a6eebb6ba0d1fb2cc46d829fbc75f8d86aca6301",
+ "c8e030f6aa680c3d0b597da9c92bb77c21c4285dd620c5889f9beba7446446b0",
+ "a9b5a67d081d3b42e737d16f",
+ "355a85f551bc3cce9a14461aa60994742c9bbb1c81a59ca102dc64e61726ab8e",
+ )
+}
+
+func TestMessageKeyGeneration026(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "45422e676cdae5f1071d3647d7a5f1f5adafb832668a578228aa1155a491f2f3",
+ "758437245f03a88e2c6a32807edfabff51a91c81ca2f389b0b46f2c97119ea90",
+ "263830a065af33d9c6c5aa1f",
+ "7c581cf3489e2de203a95106bfc0de3d4032e9d5b92b2b61fb444acd99037e17",
+ )
+}
+
+func TestMessageKeyGeneration027(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "babc0c03fad24107ad60678751f5db2678041ff0d28671ede8d65bdf7aa407e9",
+ "bd68a28bd48d9ffa3602db72c75662ac2848a0047a313d2ae2d6bc1ac153d7e9",
+ "d0f9d2a1ace6c758f594ffdd",
+ "eb435e3a642adfc9d59813051606fc21f81641afd58ea6641e2f5a9f123bb50a",
+ )
+}
+
+func TestMessageKeyGeneration028(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "7a1b8aac37d0d20b160291fad124ab697cfca53f82e326d78fef89b4b0ea8f83",
+ "9e97875b651a1d30d17d086d1e846778b7faad6fcbc12e08b3365d700f62e4fe",
+ "ccdaad5b3b7645be430992eb",
+ "6f2f55cf35174d75752f63c06cc7cbc8441759b142999ed2d5a6d09d263e1fc4",
+ )
+}
+
+func TestMessageKeyGeneration029(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "8370e4e32d7e680a83862cab0da6136ef607014d043e64cdf5ecc0c4e20b3d9a",
+ "1472bed5d19db9c546106de946e0649cd83cc9d4a66b087a65906e348dcf92e2",
+ "ed02dece5fc3a186f123420b",
+ "7b3f7739f49d30c6205a46b174f984bb6a9fc38e5ccfacef2dac04fcbd3b184e",
+ )
+}
+
+func TestMessageKeyGeneration030(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "9f1c5e8a29cd5677513c2e3a816551d6833ee54991eb3f00d5b68096fc8f0183",
+ "5e1a7544e4d4dafe55941fcbdf326f19b0ca37fc49c4d47e9eec7fb68cde4975",
+ "7d9acb0fdc174e3c220f40de",
+ "e265ab116fbbb86b2aefc089a0986a0f5b77eda50c7410404ad3b4f3f385c7a7",
+ )
+}
+
+func TestMessageKeyGeneration031(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "c385aa1c37c2bfd5cc35fcdbdf601034d39195e1cabff664ceb2b787c15d0225",
+ "06bf4e60677a13e54c4a38ab824d2ef79da22b690da2b82d0aa3e39a14ca7bdd",
+ "26b450612ca5e905b937e147",
+ "22208152be2b1f5f75e6bfcc1f87763d48bb7a74da1be3d102096f257207f8b3",
+ )
+}
+
+func TestMessageKeyGeneration032(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "3ff73528f88a50f9d35c0ddba4560bacee5b0462d0f4cb6e91caf41847040ce4",
+ "850c8a17a23aa761d279d9901015b2bbdfdff00adbf6bc5cf22bd44d24ecabc9",
+ "4a296a1fb0048e5020d3b129",
+ "b1bf49a533c4da9b1d629b7ff30882e12d37d49c19abd7b01b7807d75ee13806",
+ )
+}
+
+func TestMessageKeyGeneration033(t *testing.T) {
+ assertMessageKeyGeneration(
+ t,
+ "a1a3d60f3470a8612633924e91febf96dc5366ce130f658b1f0fc652c20b3b54",
+ "2dcf39b9d4c52f1cb9db2d516c43a7c6c3b8c401f6a4ac8f131a9e1059957036",
+ "17f8057e6156ba7cc5310d01eda8c40f9aa388f9fd1712deb9511f13ecc37d27",
+ "a8188daff807a1182200b39d",
+ "47b89da97f68d389867b5d8a2d7ba55715a30e3d88a3cc11f3646bc2af5580ef",
+ )
+}
+
+func TestMaxLength(t *testing.T) {
+ sk1 := keys.GeneratePrivateKey()
+ sk2 := keys.GeneratePrivateKey()
+ pub2, _ := keys.GetPublicKeyHex(string(sk2))
+ salt := make([]byte, 32)
+ rand.Read(salt)
+ conversationKey, _ := GenerateConversationKey(pub2, string(sk1))
+ plaintext := strings.Repeat("a", MaxPlaintextSize)
+ encrypted, err := Encrypt(plaintext, conversationKey, WithCustomNonce(salt))
+ if chk.E(err) {
+ t.Error(err)
+ }
+
+ assertCryptPub(
+ t,
+ string(sk1),
+ pub2,
+ fmt.Sprintf("%x", conversationKey),
+ fmt.Sprintf("%x", salt),
+ plaintext,
+ encrypted,
+ )
+}
+
+func assertCryptPub(
+ t *testing.T,
+ sk1, pub2, conversationKey, salt, plaintext, expected string,
+) {
+ var (
+ k1, s []byte
+ actual, decrypted string
+ ok bool
+ err error
+ )
+ k1, err = hex.Dec(conversationKey)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for conversation key: %v", err,
+ ); !ok {
+ return
+ }
+ if ok = assertConversationKeyGenerationPub(
+ t, sk1, pub2, conversationKey,
+ ); !ok {
+ return
+ }
+ s, err = hex.Dec(salt)
+ if ok = assert.NoErrorf(
+ t, err, "hex decode failed for salt: %v", err,
+ ); !ok {
+ return
+ }
+ actual, err = Encrypt(plaintext, k1, WithCustomNonce(s))
+ if ok = assert.NoError(t, err, "encryption failed: %v", err); !ok {
+ return
+ }
+ if ok = assert.Equalf(t, expected, actual, "wrong encryption"); !ok {
+ return
+ }
+ decrypted, err = Decrypt(expected, k1)
+ if ok = assert.NoErrorf(t, err, "decryption failed: %v", err); !ok {
+ return
+ }
+ assert.Equal(t, decrypted, plaintext, "wrong decryption")
+}
diff --git a/env/config.go b/env/config.go
index f075121..fa05de3 100644
--- a/env/config.go
+++ b/env/config.go
@@ -1,6 +1,6 @@
-// Package config is an implementation of the env.Source interface from
+// Package env is an implementation of the env.Source interface from
// go-simpler.org
-package config
+package env
import (
"orly.dev/chk"
diff --git a/envelopes/authenvelope/authenvelope.go b/envelopes/authenvelope/authenvelope.go
index 2e1719c..f5182c5 100644
--- a/envelopes/authenvelope/authenvelope.go
+++ b/envelopes/authenvelope/authenvelope.go
@@ -4,13 +4,13 @@ package authenvelope
import (
"io"
-
"orly.dev/chk"
- envs "orly.dev/envelopes"
"orly.dev/errorf"
- "orly.dev/event"
- "orly.dev/interfaces/codec"
"orly.dev/log"
+
+ "orly.dev/codec"
+ envs "orly.dev/envelopes"
+ "orly.dev/event"
"orly.dev/text"
)
@@ -105,8 +105,6 @@ func NewResponse() *Response { return &Response{} }
// NewResponseWith creates a new Response with a provided event.E.
func NewResponseWith(event *event.E) *Response { return &Response{Event: event} }
-func (en *Response) Id() []byte { return en.Event.Id }
-
// Label returns the label of a auth Response envelope.
func (en *Response) Label() string { return L }
diff --git a/envelopes/authenvelope/authenvelope_test.go b/envelopes/authenvelope/authenvelope_test.go
index a60ae35..8ca0e48 100644
--- a/envelopes/authenvelope/authenvelope_test.go
+++ b/envelopes/authenvelope/authenvelope_test.go
@@ -2,10 +2,10 @@ package authenvelope
import (
"bytes"
+ "orly.dev/chk"
"testing"
"orly.dev/auth"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/p256k"
)
@@ -27,7 +27,7 @@ func TestAuth(t *testing.T) {
copy(oChal, b1)
var rem []byte
var l string
- if l, b1 = envelopes.Identify(b1); chk.E(err) {
+ if l, b1, err = envelopes.Identify(b1); chk.E(err) {
t.Fatal(err)
}
if l != L {
@@ -62,7 +62,7 @@ func TestAuth(t *testing.T) {
b3 = resp.Marshal(b3)
oResp := make([]byte, len(b3))
copy(oResp, b3)
- if l, b3 = envelopes.Identify(b3); chk.E(err) {
+ if l, b3, err = envelopes.Identify(b3); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/envelopes/closedenvelope/closedenvelope.go b/envelopes/closedenvelope/closedenvelope.go
index 9695dd8..8baa5d9 100644
--- a/envelopes/closedenvelope/closedenvelope.go
+++ b/envelopes/closedenvelope/closedenvelope.go
@@ -5,10 +5,10 @@ package closedenvelope
import (
"io"
-
"orly.dev/chk"
+
+ "orly.dev/codec"
"orly.dev/envelopes"
- "orly.dev/interfaces/codec"
"orly.dev/subscription"
"orly.dev/text"
)
diff --git a/envelopes/closedenvelope/closedenvelope_test.go b/envelopes/closedenvelope/closedenvelope_test.go
index acfe1ad..5f684d1 100644
--- a/envelopes/closedenvelope/closedenvelope_test.go
+++ b/envelopes/closedenvelope/closedenvelope_test.go
@@ -2,11 +2,11 @@ package closedenvelope
import (
"bytes"
+ "orly.dev/chk"
"testing"
"lukechampine.com/frand"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/subscription"
)
@@ -43,7 +43,7 @@ func TestMarshalUnmarshal(t *testing.T) {
copy(rb1, rb)
var rem []byte
var l string
- if l, rb = envelopes.Identify(rb); chk.E(err) {
+ if l, rb, err = envelopes.Identify(rb); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/envelopes/closeenvelope/closeenvelope.go b/envelopes/closeenvelope/closeenvelope.go
index ff43164..1431c37 100644
--- a/envelopes/closeenvelope/closeenvelope.go
+++ b/envelopes/closeenvelope/closeenvelope.go
@@ -4,10 +4,10 @@ package closeenvelope
import (
"io"
-
"orly.dev/chk"
+
+ "orly.dev/codec"
"orly.dev/envelopes"
- "orly.dev/interfaces/codec"
"orly.dev/subscription"
)
diff --git a/envelopes/closeenvelope/closeenvelope_test.go b/envelopes/closeenvelope/closeenvelope_test.go
index 55cd6d7..e02d4b9 100644
--- a/envelopes/closeenvelope/closeenvelope_test.go
+++ b/envelopes/closeenvelope/closeenvelope_test.go
@@ -2,9 +2,9 @@ package closeenvelope
import (
"bytes"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/subscription"
)
@@ -25,7 +25,7 @@ func TestMarshalUnmarshal(t *testing.T) {
copy(rb1, rb)
var rem []byte
var l string
- if l, rb = envelopes.Identify(rb); chk.E(err) {
+ if l, rb, err = envelopes.Identify(rb); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/envelopes/countenvelope/countenvelope.go b/envelopes/countenvelope/countenvelope.go
index 5572855..d98ed05 100644
--- a/envelopes/countenvelope/countenvelope.go
+++ b/envelopes/countenvelope/countenvelope.go
@@ -5,12 +5,12 @@ package countenvelope
import (
"bytes"
"io"
-
"orly.dev/chk"
- "orly.dev/envelopes"
"orly.dev/errorf"
+
+ "orly.dev/codec"
+ "orly.dev/envelopes"
"orly.dev/filters"
- "orly.dev/interfaces/codec"
"orly.dev/ints"
"orly.dev/subscription"
"orly.dev/text"
diff --git a/envelopes/countenvelope/countenvelope_test.go b/envelopes/countenvelope/countenvelope_test.go
index f359d63..6f458c6 100644
--- a/envelopes/countenvelope/countenvelope_test.go
+++ b/envelopes/countenvelope/countenvelope_test.go
@@ -2,9 +2,9 @@ package countenvelope
import (
"bytes"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/filters"
"orly.dev/subscription"
@@ -30,7 +30,7 @@ func TestRequest(t *testing.T) {
copy(rb1, rb)
var rem []byte
var l string
- if l, rb = envelopes.Identify(rb); chk.E(err) {
+ if l, rb, err = envelopes.Identify(rb); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/envelopes/eid/eid.go b/envelopes/eid/eid.go
deleted file mode 100644
index 4ef65ac..0000000
--- a/envelopes/eid/eid.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package eid
-
-type Ider interface {
- Id() []byte
-}
diff --git a/envelopes/eoseenvelope/eoseenvelope.go b/envelopes/eoseenvelope/eoseenvelope.go
index cd820b9..954fac1 100644
--- a/envelopes/eoseenvelope/eoseenvelope.go
+++ b/envelopes/eoseenvelope/eoseenvelope.go
@@ -6,11 +6,10 @@ package eoseenvelope
import (
"io"
- "orly.dev/log"
-
"orly.dev/chk"
+
+ "orly.dev/codec"
"orly.dev/envelopes"
- "orly.dev/interfaces/codec"
"orly.dev/subscription"
)
@@ -40,7 +39,6 @@ func (en *T) Label() string { return L }
// Write the eoseenvelope.T to a provided io.Writer.
func (en *T) Write(w io.Writer) (err error) {
- log.I.F("writing EOSE to %s", en.Subscription.String())
_, err = w.Write(en.Marshal(nil))
return
}
diff --git a/envelopes/eoseenvelope/eoseenvelope_test.go b/envelopes/eoseenvelope/eoseenvelope_test.go
index c1d7c00..8854dec 100644
--- a/envelopes/eoseenvelope/eoseenvelope_test.go
+++ b/envelopes/eoseenvelope/eoseenvelope_test.go
@@ -2,9 +2,9 @@ package eoseenvelope
import (
"bytes"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/subscription"
)
@@ -26,7 +26,7 @@ func TestMarshalUnmarshal(t *testing.T) {
copy(rb1, rb)
var rem []byte
var l string
- if l, rb = envelopes.Identify(rb); chk.E(err) {
+ if l, rb, err = envelopes.Identify(rb); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/envelopes/eventenvelope/eventenvelope.go b/envelopes/eventenvelope/eventenvelope.go
index 817bfa3..8e59e5f 100644
--- a/envelopes/eventenvelope/eventenvelope.go
+++ b/envelopes/eventenvelope/eventenvelope.go
@@ -4,12 +4,12 @@ package eventenvelope
import (
"io"
-
"orly.dev/chk"
- "orly.dev/envelopes"
"orly.dev/errorf"
+
+ "orly.dev/codec"
+ "orly.dev/envelopes"
"orly.dev/event"
- "orly.dev/interfaces/codec"
"orly.dev/subscription"
)
@@ -29,8 +29,6 @@ func NewSubmission() *Submission { return &Submission{E: &event.E{}} }
// NewSubmissionWith creates a new eventenvelope.Submission with a provided event.E.
func NewSubmissionWith(ev *event.E) *Submission { return &Submission{E: ev} }
-func (en *Submission) Id() []byte { return en.E.Id }
-
// Label returns the label of a event eventenvelope.Submission envelope.
func (en *Submission) Label() string { return L }
@@ -105,8 +103,6 @@ func NewResultWith[V string | []byte](s V, ev *event.E) (
return &Result{subscription.MustNew(s), ev}, nil
}
-func (en *Result) Id() []byte { return en.Event.Id }
-
// Label returns the label of a event eventenvelope.Result envelope.
func (en *Result) Label() string { return L }
diff --git a/envelopes/eventenvelope/eventenvelope_test.go b/envelopes/eventenvelope/eventenvelope_test.go
index ac7a0cf..b1f420d 100644
--- a/envelopes/eventenvelope/eventenvelope_test.go
+++ b/envelopes/eventenvelope/eventenvelope_test.go
@@ -3,9 +3,9 @@ package eventenvelope
import (
"bufio"
"bytes"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/event"
"orly.dev/event/examples"
@@ -14,7 +14,6 @@ import (
func TestSubmission(t *testing.T) {
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
- scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
var c, rem, out []byte
var err error
for scanner.Scan() {
@@ -34,7 +33,7 @@ func TestSubmission(t *testing.T) {
rem = ea.Marshal(rem)
c = append(c, rem...)
var l string
- if l, rem = envelopes.Identify(rem); chk.E(err) {
+ if l, rem, err = envelopes.Identify(rem); chk.E(err) {
t.Fatal(err)
}
if l != L {
@@ -59,7 +58,6 @@ func TestSubmission(t *testing.T) {
func TestResult(t *testing.T) {
scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
- scanner.Buffer(make([]byte, 0, 1_000_000_000), 1_000_000_000)
var c, rem, out []byte
var err error
for scanner.Scan() {
@@ -83,7 +81,7 @@ func TestResult(t *testing.T) {
rem = ea.Marshal(rem)
c = append(c, rem...)
var l string
- if l, rem = envelopes.Identify(rem); chk.E(err) {
+ if l, rem, err = envelopes.Identify(rem); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/envelopes/identify.go b/envelopes/identify.go
index 4f54b89..435aa81 100644
--- a/envelopes/identify.go
+++ b/envelopes/identify.go
@@ -1,12 +1,11 @@
package envelopes
// Identify handles determining what kind of codec.Envelope is, by the Label,
-// the first step in identifying the structure of the message.
-//
-// This first step is not enough because the same labels are used on several
-// codec.Envelope types in the nostr specification. The rest of the context is
-// in whether this is a client or a relay receiving it.
-func Identify(b []byte) (t string, rem []byte) {
+// the first step in identifying the structure of the message. This first step
+// is not sufficient because the same labels are used on several codec.Envelope
+// types in the nostr specification. The rest of the context is in whether this
+// is a client or a relay receiving it.
+func Identify(b []byte) (t string, rem []byte, err error) {
var openBrackets, openQuotes, afterQuotes bool
var label []byte
rem = b
diff --git a/envelopes/noticeenvelope/noticeenvelope.go b/envelopes/noticeenvelope/noticeenvelope.go
index 665bd19..0d4632f 100644
--- a/envelopes/noticeenvelope/noticeenvelope.go
+++ b/envelopes/noticeenvelope/noticeenvelope.go
@@ -5,10 +5,10 @@ package noticeenvelope
import (
"io"
-
"orly.dev/chk"
+
+ "orly.dev/codec"
"orly.dev/envelopes"
- "orly.dev/interfaces/codec"
"orly.dev/text"
)
diff --git a/envelopes/noticeenvelope/noticeenvelope_test.go b/envelopes/noticeenvelope/noticeenvelope_test.go
index 06c0c0c..fbe01d1 100644
--- a/envelopes/noticeenvelope/noticeenvelope_test.go
+++ b/envelopes/noticeenvelope/noticeenvelope_test.go
@@ -2,9 +2,9 @@ package noticeenvelope
import (
"bytes"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/envelopes/messages"
)
@@ -21,7 +21,7 @@ func TestMarshalUnmarshal(t *testing.T) {
copy(rb1, rb)
var rem []byte
var l string
- if l, rb = envelopes.Identify(rb); chk.E(err) {
+ if l, rb, err = envelopes.Identify(rb); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/envelopes/okenvelope/okenvelope.go b/envelopes/okenvelope/okenvelope.go
index d3b8bc5..e9d16de 100644
--- a/envelopes/okenvelope/okenvelope.go
+++ b/envelopes/okenvelope/okenvelope.go
@@ -1,19 +1,18 @@
// Package okenvelope is a codec for the OK message, which is an acknowledgement
// for an EVENT eventenvelope.Submission, containing true/false and if false a
-// message with a machine-readable error type as found in the 'messages'
-// package.
+// message with a machine readable error type as found in the messages package.
package okenvelope
import (
"io"
-
- "github.com/minio/sha256-simd"
"orly.dev/chk"
- "orly.dev/envelopes"
"orly.dev/errorf"
- "orly.dev/eventid"
- "orly.dev/interfaces/codec"
"orly.dev/log"
+
+ "orly.dev/codec"
+ "orly.dev/envelopes"
+ "orly.dev/eventid"
+ "orly.dev/sha256"
"orly.dev/text"
)
@@ -57,9 +56,7 @@ func (en *T) ReasonString() string { return string(en.Reason) }
// Write the okenvelope.T to a provided io.Writer.
func (en *T) Write(w io.Writer) (err error) {
- msg := en.Marshal(nil)
- log.T.F("%s", msg)
- _, err = w.Write(msg)
+ _, err = w.Write(en.Marshal(nil))
return
}
diff --git a/envelopes/okenvelope/okenvelope_test.go b/envelopes/okenvelope/okenvelope_test.go
index f82777c..60f2348 100644
--- a/envelopes/okenvelope/okenvelope_test.go
+++ b/envelopes/okenvelope/okenvelope_test.go
@@ -2,9 +2,9 @@ package okenvelope
import (
"bytes"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/envelopes/messages"
"orly.dev/eventid"
@@ -23,7 +23,7 @@ func TestMarshalUnmarshal(t *testing.T) {
copy(rb1, rb)
var rem []byte
var l string
- if l, rb = envelopes.Identify(rb); chk.E(err) {
+ if l, rb, err = envelopes.Identify(rb); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/envelopes/reqenvelope/reqenvelope.go b/envelopes/reqenvelope/reqenvelope.go
index 705fa12..a4a06ab 100644
--- a/envelopes/reqenvelope/reqenvelope.go
+++ b/envelopes/reqenvelope/reqenvelope.go
@@ -4,11 +4,11 @@ package reqenvelope
import (
"io"
-
"orly.dev/chk"
+
+ "orly.dev/codec"
"orly.dev/envelopes"
"orly.dev/filters"
- "orly.dev/interfaces/codec"
"orly.dev/subscription"
"orly.dev/text"
)
@@ -102,7 +102,7 @@ func (en *T) Unmarshal(b []byte) (r []byte, err error) {
// Parse reads a REQ envelope from minified JSON into a newly allocated
// reqenvelope.T.
-func Parse(b []byte) (t *T, rem []byte, err error) {
+func (en *T) Parse(b []byte) (t *T, rem []byte, err error) {
t = New()
if rem, err = t.Unmarshal(b); chk.E(err) {
return
diff --git a/envelopes/reqenvelope/reqenvelope_test.go b/envelopes/reqenvelope/reqenvelope_test.go
index 04ef118..a00325e 100644
--- a/envelopes/reqenvelope/reqenvelope_test.go
+++ b/envelopes/reqenvelope/reqenvelope_test.go
@@ -2,9 +2,9 @@ package reqenvelope
import (
"bytes"
+ "orly.dev/chk"
"testing"
- "orly.dev/chk"
"orly.dev/envelopes"
"orly.dev/filters"
"orly.dev/subscription"
@@ -30,7 +30,7 @@ func TestMarshalUnmarshal(t *testing.T) {
copy(rb1, rb)
var rem []byte
var l string
- if l, rb = envelopes.Identify(rb); chk.E(err) {
+ if l, rb, err = envelopes.Identify(rb); chk.E(err) {
t.Fatal(err)
}
if l != L {
diff --git a/event/binary.go b/event/binary.go
index 2d64090..c5a834f 100644
--- a/event/binary.go
+++ b/event/binary.go
@@ -59,7 +59,7 @@ func (ev *E) UnmarshalBinary(r io.Reader) (err error) {
if ca, err = varint.Decode(r); chk.E(err) {
return
}
- ev.CreatedAt = timestamp.New(ca)
+ ev.CreatedAt = timestamp.New(int64(ca))
var k uint64
if k, err = varint.Decode(r); chk.E(err) {
return
@@ -69,13 +69,13 @@ func (ev *E) UnmarshalBinary(r io.Reader) (err error) {
if nTags, err = varint.Decode(r); chk.E(err) {
return
}
- ev.Tags = tags.NewWithCap(nTags)
+ ev.Tags = tags.NewWithCap(int(nTags))
for range nTags {
var nField uint64
if nField, err = varint.Decode(r); chk.E(err) {
return
}
- t := tag.NewWithCap(nField)
+ t := tag.NewWithCap(int(nField))
for range nField {
var lenField uint64
if lenField, err = varint.Decode(r); chk.E(err) {
diff --git a/event/json.go b/event/json.go
index 5801248..da54414 100644
--- a/event/json.go
+++ b/event/json.go
@@ -259,7 +259,7 @@ InVal:
if !bytes.Equal(jCreatedAt, key) {
goto invalid
}
- ev.CreatedAt = timestamp.New(uint(0))
+ ev.CreatedAt = timestamp.New(int64(0))
if r, err = ev.CreatedAt.Unmarshal(r); chk.T(err) {
return
}
diff --git a/eventid/eventid.go b/eventid/eventid.go
index 1190f85..4402974 100644
--- a/eventid/eventid.go
+++ b/eventid/eventid.go
@@ -4,12 +4,12 @@ package eventid
import (
"lukechampine.com/frand"
-
- "github.com/minio/sha256-simd"
"orly.dev/chk"
"orly.dev/errorf"
- "orly.dev/hex"
"orly.dev/log"
+
+ "orly.dev/hex"
+ "orly.dev/sha256"
)
// T is the SHA256 hash in hexadecimal of the canonical form of an event as
diff --git a/filter/filter.go b/filter/filter.go
index 3397062..2e0e5be 100644
--- a/filter/filter.go
+++ b/filter/filter.go
@@ -1,33 +1,27 @@
-// Package filter is a codec for the nostr filter (queries) and includes:
-//
-// - tools for matching them to events
-//
-// - a canonical format scheme to enable compactly
-//
-// - identifying subscription filters
-//
-// - a simplified filter that leaves out the IDs and Search fields for
-// use in the HTTP API.
+// Package filter is a codec for nostr filters (queries) and includes tools for
+// matching them to events, a canonical format scheme to enable compactly
+// identifying subscription filters, and a simplified filter that leavse out the
+// IDs and Search fields for use in the HTTP API.
package filter
import (
"bytes"
"encoding/binary"
+ "orly.dev/chk"
+ "orly.dev/errorf"
"sort"
"lukechampine.com/frand"
- "github.com/minio/sha256-simd"
- "orly.dev/chk"
"orly.dev/ec/schnorr"
"orly.dev/ec/secp256k1"
- "orly.dev/errorf"
"orly.dev/event"
"orly.dev/hex"
"orly.dev/ints"
"orly.dev/kind"
"orly.dev/kinds"
- "orly.dev/pointers"
+ "orly.dev/realy/pointers"
+ "orly.dev/sha256"
"orly.dev/tag"
"orly.dev/tags"
"orly.dev/text"
@@ -36,41 +30,36 @@ import (
// F is the primary query form for requesting events from a nostr relay.
//
-// The ordering of the fields of filters is not specified as in the protocol
-// there is no requirement to generate a hash for fast recognition of identical
-// filters.
-//
-// However, for internal use in a relay, by applying a consistent sort order,
-// this library will produce an identical JSON from the same *set* of fields no
-// matter what order they were provided.
+// The ordering of fields of filters is not specified as in the protocol there
+// is no requirement to generate a hash for fast recognition of identical
+// filters. However, for internal use in a relay, by applying a consistent sort
+// order, this library will produce an identical JSON from the same *set* of
+// fields no matter what order they were provided.
//
// This is to facilitate the deduplication of filters so an effective identical
// match is not performed on an identical filter.
type F struct {
- Ids *tag.T `json:"ids,omitempty"`
- Kinds *kinds.T `json:"kinds,omitempty"`
- Authors *tag.T `json:"authors,omitempty"`
- // Tags are internally stored with the key being prefixed with # and a-zA-Z
- // as the second character in the first field of a tag.T, but when marshaled
- // render as an object key that if not present is not rendered.
- Tags *tags.T `json:"-,omitempty"`
- Since *timestamp.T `json:"since,omitempty"`
- Until *timestamp.T `json:"until,omitempty"`
- Search []byte `json:"search,omitempty"`
- Limit *uint `json:"limit,omitempty"`
+ Ids *tag.T `json:"ids,omitempty"`
+ Kinds *kinds.T `json:"kinds,omitempty"`
+ Authors *tag.T `json:"authors,omitempty"`
+ Tags *tags.T `json:"-,omitempty"`
+ Since *timestamp.T `json:"since,omitempty"`
+ Until *timestamp.T `json:"until,omitempty"`
+ Search []byte `json:"search,omitempty"`
+ Limit *uint `json:"limit,omitempty"`
}
-// New creates a new, reasonably initialized filter that will be ready for most
-// uses without further allocations.
+// New creates a new, reasonably initialized filter that will be ready for most uses without
+// further allocations.
func New() (f *F) {
return &F{
Ids: tag.NewWithCap(10),
Kinds: kinds.NewWithCap(10),
Authors: tag.NewWithCap(10),
Tags: tags.New(),
- Since: new(timestamp.T),
- Until: new(timestamp.T),
- Search: nil,
+ // Since: timestamp.New(),
+ // Until: timestamp.New(),
+ Search: nil,
}
}
@@ -119,8 +108,8 @@ var (
Search = []byte("search")
)
-// Marshal a filter into raw JSON bytes, minified. The field ordering and sort
-// of fields is canonicalized so that a hash can identify the same filter.
+// Marshal a filter into raw JSON bytes, minified. The field ordering and sort of fields is
+// canonicalized so that a hash can identify the same filter.
func (f *F) Marshal(dst []byte) (b []byte) {
var err error
_ = err
@@ -159,9 +148,8 @@ func (f *F) Marshal(dst []byte) (b []byte) {
// } else {
// first = true
// }
- //
- // tags are stored as tags with the initial element the "#a" and the
- // rest the list in each element of the tags list. eg:
+ // tags are stored as tags with the initial element the "#a" and the rest the list in
+ // each element of the tags list. eg:
//
// [["#p",""," 'z' || tKey[1] < 'A' && tKey[1] > 'Z') {
- // the first "key" field must begin with '#' and the second be alpha
+ // first "key" field must begin with '#' and second be alpha
continue
}
values := tg.ToSliceOfBytes()[1:]
@@ -267,7 +254,7 @@ const (
// Unmarshal a filter from raw (minified) JSON bytes into the runtime format.
//
-// todo: this does not tolerate whitespace, but it's bleeding fast.
+// todo: this may tolerate whitespace, not certain currently.
func (f *F) Unmarshal(b []byte) (r []byte, err error) {
r = b[:]
var key []byte
@@ -316,7 +303,7 @@ func (f *F) Unmarshal(b []byte) (r []byte, err error) {
copy(k, key)
switch key[1] {
case 'e', 'p':
- // the tags must all be 64-character hexadecimal
+ // the tags must all be 64 character hexadecimal
var ff [][]byte
if ff, r, err = text.UnmarshalHexArray(
r,
@@ -326,7 +313,7 @@ func (f *F) Unmarshal(b []byte) (r []byte, err error) {
}
ff = append([][]byte{k}, ff...)
f.Tags = f.Tags.AppendTags(tag.New(ff...))
- // f.Tags.E = append(f.Tags.E, tag.New(ff...))
+ // f.Tags.F = append(f.Tags.F, tag.New(ff...))
default:
// other types of tags can be anything
var ff [][]byte
@@ -335,7 +322,7 @@ func (f *F) Unmarshal(b []byte) (r []byte, err error) {
}
ff = append([][]byte{k}, ff...)
f.Tags = f.Tags.AppendTags(tag.New(ff...))
- // f.Tags.E = append(f.Tags.E, tag.New(ff...))
+ // f.Tags.F = append(f.Tags.F, tag.New(ff...))
}
state = betweenKV
case IDs[0]:
@@ -431,10 +418,14 @@ func (f *F) Unmarshal(b []byte) (r []byte, err error) {
}
if r[0] == '}' {
state = afterClose
+ // log.I.Ln("afterClose")
+ // rem = rem[1:]
} else if r[0] == ',' {
state = openParen
+ // log.I.Ln("openParen")
} else if r[0] == '"' {
state = inKey
+ // log.I.Ln("inKey")
}
}
if len(r) == 0 {
@@ -453,35 +444,50 @@ invalid:
// Matches checks a filter against an event and determines if the event matches the filter.
func (f *F) Matches(ev *event.E) bool {
if ev == nil {
+ // log.F.ToSliceOfBytes("nil event")
return false
}
if f.Ids.Len() > 0 && !f.Ids.Contains(ev.Id) {
+ // log.F.ToSliceOfBytes("no ids in filter match event\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
return false
}
if f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) {
+ // log.F.ToSliceOfBytes("no matching kinds in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
return false
}
if f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) {
+ // log.F.ToSliceOfBytes("no matching authors in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
return false
}
if f.Tags.Len() > 0 && !ev.Tags.Intersects(f.Tags) {
return false
}
+ // if f.Tags.Len() > 0 {
+ // for _, v := range f.Tags.ToSliceOfTags() {
+ // tvs := v.ToSliceOfBytes()
+ // if !ev.Tags.ContainsAny(v.FilterKey(), tag.New(tvs...)) {
+ // return false
+ // }
+ // }
+ // return false
+ // }
if f.Since.Int() != 0 && ev.CreatedAt.I64() < f.Since.I64() {
+ // log.F.ToSliceOfBytes("event is older than since\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
return false
}
if f.Until.Int() != 0 && ev.CreatedAt.I64() > f.Until.I64() {
+ // log.F.ToSliceOfBytes("event is newer than until\nEVENT %s\nFILTER %s", ev.ToObject().String(), f.ToObject().String())
return false
}
return true
}
-// Fingerprint returns an 8 byte truncated sha256 hash of the filter in the
-// canonical form created by Marshal.
+// Fingerprint returns an 8 byte truncated sha256 hash of the filter in the canonical form
+// created by Marshal.
//
-// This hash is generated via the JSON encoded form of the filter, with the
-// Limit field removed. This value should be set to zero after all results from
-// a query of stored events, as per NIP-01.
+// This hash is generated via the JSON encoded form of the filter, with the Limit field removed.
+// This value should be set to zero after all results from a query of stored events, as per
+// NIP-01.
func (f *F) Fingerprint() (fp uint64, err error) {
lim := f.Limit
f.Limit = nil
@@ -494,8 +500,8 @@ func (f *F) Fingerprint() (fp uint64, err error) {
return
}
-// Sort the fields of a filter so a fingerprint on a filter that has the same
-// set of content produces the same fingerprint.
+// Sort the fields of a filter so a fingerprint on a filter that has the same set of content
+// produces the same fingerprint.
func (f *F) Sort() {
if f.Ids != nil {
sort.Sort(f.Ids)
@@ -521,8 +527,7 @@ func arePointerValuesEqual[V comparable](a *V, b *V) bool {
return false
}
-// Equal checks a filter against another filter to see if they are the same
-// filter.
+// Equal checks a filter against another filter to see if they are the same filter.
func (f *F) Equal(b *F) bool {
// sort the fields so they come out the same
f.Sort()
@@ -545,8 +550,9 @@ func GenFilter() (f *F, err error) {
n := frand.Intn(16)
for _ = range n {
id := make([]byte, sha256.Size)
- _, _ = frand.Read(id)
+ frand.Read(id)
f.Ids = f.Ids.Append(id)
+ // f.Ids.Field = append(f.Ids.Field, id)
}
n = frand.Intn(16)
for _ = range n {
@@ -560,6 +566,7 @@ func GenFilter() (f *F, err error) {
}
pk := sk.PubKey()
f.Authors = f.Authors.Append(schnorr.SerializePubKey(pk))
+ // f.Authors.Field = append(f.Authors.Field, schnorr.SerializePubKey(pk))
}
a := frand.Intn(16)
if a < n {
@@ -575,22 +582,24 @@ func GenFilter() (f *F, err error) {
var idb [][]byte
for range l {
id := make([]byte, sha256.Size)
- _, _ = frand.Read(id)
+ frand.Read(id)
idb = append(idb, id)
}
idb = append([][]byte{{'#', byte(b)}}, idb...)
f.Tags = f.Tags.AppendTags(tag.FromBytesSlice(idb...))
+ // f.Tags.F = append(f.Tags.F, tag.FromBytesSlice(idb...))
} else {
var idb [][]byte
for range l {
bb := make([]byte, frand.Intn(31)+1)
- _, _ = frand.Read(bb)
+ frand.Read(bb)
id := make([]byte, 0, len(bb)*2)
id = hex.EncAppend(id, bb)
idb = append(idb, id)
}
idb = append([][]byte{{'#', byte(b)}}, idb...)
f.Tags = f.Tags.AppendTags(tag.FromBytesSlice(idb...))
+ // f.Tags.F = append(f.Tags.F, tag.FromBytesSlice(idb...))
}
}
tn := int(timestamp.Now().I64())
diff --git a/filter/filter_test.go b/filter/filter_test.go
index 16e755e..60a5d01 100644
--- a/filter/filter_test.go
+++ b/filter/filter_test.go
@@ -2,9 +2,8 @@ package filter
import (
"bytes"
- "testing"
-
"orly.dev/chk"
+ "testing"
)
func TestT_MarshalUnmarshal(t *testing.T) {
diff --git a/filter/simple.go b/filter/simple.go
index 297a5e7..5f26489 100644
--- a/filter/simple.go
+++ b/filter/simple.go
@@ -2,17 +2,17 @@ package filter
import (
"encoding/binary"
+ "orly.dev/chk"
+ "orly.dev/errorf"
"sort"
- "github.com/minio/sha256-simd"
- "orly.dev/chk"
"orly.dev/ec/schnorr"
- "orly.dev/errorf"
"orly.dev/event"
"orly.dev/hex"
"orly.dev/ints"
"orly.dev/kinds"
- "orly.dev/pointers"
+ "orly.dev/realy/pointers"
+ "orly.dev/sha256"
"orly.dev/tag"
"orly.dev/tags"
"orly.dev/text"
@@ -253,7 +253,7 @@ func (f *S) Unmarshal(b []byte) (r []byte, err error) {
}
ff = append([][]byte{k}, ff...)
f.Tags = f.Tags.AppendTags(tag.New(ff...))
- // s.Tags.E = append(s.Tags.E, tag.New(ff...))
+ // s.Tags.F = append(s.Tags.F, tag.New(ff...))
default:
// other types of tags can be anything
var ff [][]byte
@@ -262,7 +262,7 @@ func (f *S) Unmarshal(b []byte) (r []byte, err error) {
}
ff = append([][]byte{k}, ff...)
f.Tags = f.Tags.AppendTags(tag.New(ff...))
- // s.Tags.E = append(s.Tags.E, tag.New(ff...))
+ // s.Tags.F = append(s.Tags.F, tag.New(ff...))
}
state = betweenKV
case Kinds[0]:
@@ -322,15 +322,15 @@ invalid:
// Matches checks if a filter.S matches an event.
func (f *S) Matches(ev *event.E) bool {
if ev == nil {
- // log.E.ToSliceOfBytes("nil event")
+ // log.F.ToSliceOfBytes("nil event")
return false
}
if f.Kinds.Len() > 0 && !f.Kinds.Contains(ev.Kind) {
- // log.E.ToSliceOfBytes("no matching kinds in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), s.ToObject().String())
+ // log.F.ToSliceOfBytes("no matching kinds in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), s.ToObject().String())
return false
}
if f.Authors.Len() > 0 && !f.Authors.Contains(ev.Pubkey) {
- // log.E.ToSliceOfBytes("no matching authors in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), s.ToObject().String())
+ // log.F.ToSliceOfBytes("no matching authors in filter\nEVENT %s\nFILTER %s", ev.ToObject().String(), s.ToObject().String())
return false
}
if f.Tags.Len() > 0 && !ev.Tags.Intersects(f.Tags) {
diff --git a/filters/filters.go b/filters/filters.go
index b373c1a..70a1fab 100644
--- a/filters/filters.go
+++ b/filters/filters.go
@@ -34,7 +34,7 @@ func (f *T) Len() int { return len(f.F) }
// New creates a new filters.T out of a variadic list of filter.F.
func New(ff ...*filter.F) (f *T) { return &T{F: ff} }
-// Match checks if a set of filters.T matches on an event.E.
+// Match checks if a set of filters.T matches on an event.F.
func (f *T) Match(event *event.E) bool {
for _, f := range f.F {
if f.Matches(event) {
diff --git a/filters/filters_test.go b/filters/filters_test.go
index 569f6ba..b9666e8 100644
--- a/filters/filters_test.go
+++ b/filters/filters_test.go
@@ -2,9 +2,8 @@ package filters
import (
"bytes"
- "testing"
-
"orly.dev/chk"
+ "testing"
)
func TestT_MarshalUnmarshal(t *testing.T) {
diff --git a/go.mod b/go.mod
index 653f88e..5c913f2 100644
--- a/go.mod
+++ b/go.mod
@@ -1,54 +1,66 @@
module orly.dev
-go 1.24.4
+go 1.24.2
require (
github.com/adrg/xdg v0.5.3
+ github.com/alexflint/go-arg v1.6.0
github.com/danielgtaylor/huma/v2 v2.34.1
- github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
+ github.com/davecgh/go-spew v1.1.1
github.com/dgraph-io/badger/v4 v4.7.0
github.com/fasthttp/websocket v1.5.12
github.com/fatih/color v1.18.0
github.com/gobwas/httphead v0.1.0
- github.com/gobwas/ws v1.2.1
+ github.com/gobwas/ws v1.4.0
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
+ github.com/klauspost/cpuid/v2 v2.2.11
github.com/minio/sha256-simd v1.0.1
github.com/pkg/profile v1.7.0
+ github.com/puzpuzpuz/xsync/v3 v3.5.1
github.com/rs/cors v1.11.1
github.com/stretchr/testify v1.10.0
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b
go-simpler.org/env v0.12.0
go.uber.org/atomic v1.11.0
- golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
+ golang.org/x/crypto v0.40.0
+ golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc
+ golang.org/x/lint v0.0.0-20241112194109-818c5a804067
+ golang.org/x/net v0.42.0
+ golang.org/x/sync v0.16.0
+ honnef.co/go/tools v0.6.1
lukechampine.com/frand v1.5.1
)
require (
- github.com/andybalholm/brotli v1.1.1 // indirect
+ github.com/BurntSushi/toml v1.5.0 // indirect
+ github.com/alexflint/go-scalar v1.2.0 // indirect
+ github.com/andybalholm/brotli v1.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/fgprof v0.9.5 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/gobwas/pool v0.2.1 // indirect
github.com/google/flatbuffers v25.2.10+incompatible // indirect
github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect
github.com/klauspost/compress v1.18.0 // indirect
- github.com/klauspost/cpuid/v2 v2.2.11 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
- github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/savsgio/gotils v0.0.0-20250408102913-196191ec6287 // indirect
github.com/templexxx/cpu v0.1.1 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
- github.com/valyala/fasthttp v1.62.0 // indirect
+ github.com/valyala/fasthttp v1.63.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
- go.opentelemetry.io/otel v1.35.0 // indirect
- go.opentelemetry.io/otel/metric v1.35.0 // indirect
- go.opentelemetry.io/otel/trace v1.35.0 // indirect
- golang.org/x/net v0.40.0 // indirect
- golang.org/x/sys v0.33.0 // indirect
+ go.opentelemetry.io/otel v1.37.0 // indirect
+ go.opentelemetry.io/otel/metric v1.37.0 // indirect
+ go.opentelemetry.io/otel/trace v1.37.0 // indirect
+ golang.org/x/exp/typeparams v0.0.0-20250711185948-6ae5c78190dc // indirect
+ golang.org/x/mod v0.26.0 // indirect
+ golang.org/x/sys v0.34.0 // indirect
+ golang.org/x/text v0.27.0 // indirect
+ golang.org/x/tools v0.35.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index b51e204..743cffa 100644
--- a/go.sum
+++ b/go.sum
@@ -1,7 +1,13 @@
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78=
github.com/adrg/xdg v0.5.3/go.mod h1:nlTsY+NNiCBGCK2tpm09vRqfVzrc2fLmXGpBLF0zlTQ=
-github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
-github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
+github.com/alexflint/go-arg v1.6.0 h1:wPP9TwTPO54fUVQl4nZoxbFfKCcy5E6HBCumj1XVRSo=
+github.com/alexflint/go-arg v1.6.0/go.mod h1:A7vTJzvjoaSTypg4biM5uYNTkJ27SkNTArtYXnlqVO8=
+github.com/alexflint/go-scalar v1.2.0 h1:WR7JPKkeNpnYIOfHRa7ivM21aWAdHD0gEWHCx+WQBRw=
+github.com/alexflint/go-scalar v1.2.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o=
+github.com/andybalholm/brotli v1.2.0 h1:ukwgCxwYrmACq68yiUqwIWnGY0cTPox/M94sVwToPjQ=
+github.com/andybalholm/brotli v1.2.0/go.mod h1:rzTDkvFWvIrjDXZHkuS16NPggd91W3kUSvPlQ1pLaKY=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs=
@@ -16,9 +22,8 @@ github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38
github.com/danielgtaylor/huma/v2 v2.34.1 h1:EmOJAbzEGfy0wAq/QMQ1YKfEMBEfE94xdBRLPBP0gwQ=
github.com/danielgtaylor/huma/v2 v2.34.1/go.mod h1:ynwJgLk8iGVgoaipi5tgwIQ5yoFNmiu+QdhU7CEEmhk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
-github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgraph-io/badger/v4 v4.7.0 h1:Q+J8HApYAY7UMpL8d9owqiB+odzEc0zn/aqOD9jhc6Y=
github.com/dgraph-io/badger/v4 v4.7.0/go.mod h1:He7TzG3YBy3j4f5baj5B7Zl2XyfNe5bl4Udl0aPemVA=
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
@@ -35,16 +40,17 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu
github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
-github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
+github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=
+github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
@@ -79,17 +85,19 @@ github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5
github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0=
github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
-github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
+github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
-github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc=
-github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg=
+github.com/savsgio/gotils v0.0.0-20250408102913-196191ec6287 h1:qIQ0tWF9vxGtkJa24bR+2i53WBCz1nW/Pc47oVYauC4=
+github.com/savsgio/gotils v0.0.0-20250408102913-196191ec6287/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
@@ -101,31 +109,58 @@ github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b h1:XeDLE6c9mzHpdv3W
github.com/templexxx/xhex v0.0.0-20200614015412-aed53437177b/go.mod h1:7rwmCH0wC2fQvNEvPZ3sKXukhyCTyiaZ5VTZMQYpZKQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.62.0 h1:8dKRBX/y2rCzyc6903Zu1+3qN0H/d2MsxPPmVNamiH0=
-github.com/valyala/fasthttp v1.62.0/go.mod h1:FCINgr4GKdKqV8Q0xv8b+UxPV+H/O5nNFo3D+r54Htg=
+github.com/valyala/fasthttp v1.63.0 h1:DisIL8OjB7ul2d7cBaMRcKTQDYnrGy56R4FCiuDP0Ns=
+github.com/valyala/fasthttp v1.63.0/go.mod h1:REc4IeW+cAEyLrRPa5A81MIjvz0QE1laoTX2EaPHKJM=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
go-simpler.org/env v0.12.0 h1:kt/lBts0J1kjWJAnB740goNdvwNxt5emhYngL0Fzufs=
go-simpler.org/env v0.12.0/go.mod h1:cc/5Md9JCUM7LVLtN0HYjPTDcI3Q8TDaPlNTAlDU+WI=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
-go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
-go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
-go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
-go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
-go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
-go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
+go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
+go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
+go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
+go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
+go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
+go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
-golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
-golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
-golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
+golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
+golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc h1:TS73t7x3KarrNd5qAipmspBDS1rkMcgVG/fS1aRb4Rc=
+golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc=
+golang.org/x/exp/typeparams v0.0.0-20250711185948-6ae5c78190dc h1:mPO8OXAJgNBiEFwAG1Lh4pe7uxJgEWPk+io1+SzvMfk=
+golang.org/x/exp/typeparams v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:LKZHyeOpPuZcMgxeHjJp4p5yvxrCX1xDvH10zYHhjjQ=
+golang.org/x/lint v0.0.0-20241112194109-818c5a804067 h1:adDmSQyFTCiv19j015EGKJBoaa7ElV0Q1Wovb/4G7NA=
+golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
+golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
+golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
-golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
+golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
+golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
+golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
+golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY=
+golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -134,5 +169,7 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.6.1 h1:R094WgE8K4JirYjBaOpz/AvTyUu/3wbmAoskKN/pxTI=
+honnef.co/go/tools v0.6.1/go.mod h1:3puzxxljPCe8RGJX7BIy1plGbxEOZni5mR2aXe3/uk4=
lukechampine.com/frand v1.5.1 h1:fg0eRtdmGFIxhP5zQJzM1lFDbD6CUfu/f+7WgAZd5/w=
lukechampine.com/frand v1.5.1/go.mod h1:4VstaWc2plN4Mjr10chUD46RAVGWhpkZ5Nja8+Azp0Q=
diff --git a/helpers/helpers.go b/helpers/helpers.go
deleted file mode 100644
index a25980f..0000000
--- a/helpers/helpers.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package helpers
-
-import (
- "net/http"
- "strings"
-)
-
-func GenerateDescription(text string, scopes []string) string {
- if len(scopes) == 0 {
- return text
- }
- result := make([]string, 0)
- for _, value := range scopes {
- result = append(result, "`"+value+"`")
- }
- return text + "
**Scopes** " + strings.Join(result, ", ")
-}
-
-func GetRemoteFromReq(r *http.Request) (rr string) {
- // reverse proxy should populate this field so we see the remote not the
- // proxy
- remoteAddress := r.Header.Get("X-Forwarded-For")
- if remoteAddress == "" {
- remoteAddress = r.Header.Get("Forwarded")
- if remoteAddress == "" {
- rr = r.RemoteAddr
- return
- } else {
- splitted := strings.Split(remoteAddress, ", ")
- if len(splitted) >= 1 {
- forwarded := strings.Split(splitted[0], "=")
- if len(forwarded) == 2 {
- // by the standard this should be the address of the client.
- rr = splitted[1]
- }
- return
- }
- }
- }
- splitted := strings.Split(remoteAddress, " ")
- if len(splitted) == 1 {
- rr = splitted[0]
- }
- if len(splitted) == 2 {
- sp := strings.Split(splitted[0], ",")
- rr = sp[0]
- }
- return
-}
diff --git a/hex/aliases.go b/hex/aliases.go
index 43dbb39..5b87794 100644
--- a/hex/aliases.go
+++ b/hex/aliases.go
@@ -4,11 +4,10 @@ package hex
import (
"encoding/hex"
-
- "github.com/templexxx/xhex"
-
"orly.dev/chk"
"orly.dev/errorf"
+
+ "github.com/templexxx/xhex"
)
var Enc = hex.EncodeToString
@@ -23,7 +22,7 @@ var DecLen = hex.DecodedLen
type InvalidByteError = hex.InvalidByteError
-// EncAppend uses xhex to encode a slice of bytes and appends it to a provided destination slice.
+// EncAppend uses xhex to encode a sice of bytes and appends it to a provided destination slice.
func EncAppend(dst, src []byte) (b []byte) {
l := len(dst)
dst = append(dst, make([]byte, len(src)*2)...)
diff --git a/httpauth/nip98auth.go b/httpauth/nip98auth.go
index 2438d7b..3b2bf46 100644
--- a/httpauth/nip98auth.go
+++ b/httpauth/nip98auth.go
@@ -4,12 +4,12 @@ import (
"encoding/base64"
"net/http"
"net/url"
+ "orly.dev/chk"
+ "orly.dev/log"
"strings"
- "orly.dev/chk"
"orly.dev/event"
"orly.dev/kind"
- "orly.dev/log"
"orly.dev/signer"
"orly.dev/tag"
"orly.dev/tags"
diff --git a/httpauth/validate.go b/httpauth/validate.go
index 9492160..0457249 100644
--- a/httpauth/validate.go
+++ b/httpauth/validate.go
@@ -4,17 +4,16 @@ import (
"encoding/base64"
"fmt"
"net/http"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+ "orly.dev/log"
"strings"
"time"
- "orly.dev/chk"
- "orly.dev/errorf"
"orly.dev/event"
"orly.dev/ints"
"orly.dev/kind"
"orly.dev/tag"
-
- "orly.dev/log"
)
var ErrMissingKey = fmt.Errorf(
@@ -29,7 +28,6 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) (
pubkey []byte, err error,
) {
val := r.Header.Get(HeaderKey)
- log.I.F(val)
if val == "" {
err = ErrMissingKey
valid = true
@@ -46,7 +44,6 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) (
log.I.F("validating auth '%s'", val)
switch {
case strings.HasPrefix(val, NIP98Prefix):
- log.T.F(val)
split := strings.Split(val, " ")
if len(split) == 1 {
err = errorf.E(
@@ -56,8 +53,7 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) (
}
if len(split) > 2 {
err = errorf.E(
- "extraneous content after second field space separated: %s",
- val,
+ "extraneous content after second field space separated: %s", val,
)
return
}
@@ -79,8 +75,7 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) (
if !ev.Kind.Equal(kind.HTTPAuth) {
err = errorf.E(
"invalid kind %d %s in nip-98 http auth event, require %d %s",
- ev.Kind.K, ev.Kind.Name(), kind.HTTPAuth.K,
- kind.HTTPAuth.Name(),
+ ev.Kind.K, ev.Kind.Name(), kind.HTTPAuth.K, kind.HTTPAuth.Name(),
)
return
}
@@ -180,15 +175,10 @@ func CheckAuth(r *http.Request, tolerance ...time.Duration) (
return
}
}
- log.T.F("%d %s", time.Now().Unix(), ev.Serialize())
if valid, err = ev.Verify(); chk.E(err) {
return
}
- if valid {
- log.I.F("event verified %0x", ev.Pubkey)
- }
if !valid {
- log.T.F("event not verified")
return
}
pubkey = ev.Pubkey
diff --git a/interfaces/store/store_interface.go b/interfaces/store/store_interface.go
index e2bc053..ba78cfd 100644
--- a/interfaces/store/store_interface.go
+++ b/interfaces/store/store_interface.go
@@ -33,6 +33,11 @@ type I interface {
Syncer
LogLeveler
EventIdSerialer
+ Initer
+}
+
+type Initer interface {
+ Init(path string) (err error)
}
type Pather interface {
diff --git a/interrupt/main.go b/interrupt/main.go
index ed02eeb..aa25e82 100644
--- a/interrupt/main.go
+++ b/interrupt/main.go
@@ -5,12 +5,13 @@ package interrupt
import (
"fmt"
+ "orly.dev/log"
"os"
"os/signal"
"runtime"
- "go.uber.org/atomic"
- "orly.dev/log"
+ "orly.dev/atomic"
+ "orly.dev/qu"
)
// HandlerWithSource is an interrupt handling closure and the source location that it was sent
@@ -32,7 +33,7 @@ var (
signals = []os.Signal{os.Interrupt}
// ShutdownRequestChan is a channel that can receive shutdown requests
- ShutdownRequestChan = make(chan struct{})
+ ShutdownRequestChan = qu.T()
// addHandlerChan is used to add an interrupt handler to the list of handlers to be invoked
// on SIGINT (Ctrl+C) signals.
@@ -40,7 +41,7 @@ var (
// HandlersDone is closed after all interrupt handlers run the first time an interrupt is
// signaled.
- HandlersDone = make(chan struct{})
+ HandlersDone = make(qu.C)
interruptCallbacks []func()
interruptCallbackSources []string
@@ -60,7 +61,7 @@ func Listener() {
interruptCallbacks[idx]()
}
log.D.Ln("interrupt handlers finished")
- close(HandlersDone)
+ HandlersDone.Q()
if RestartRequested {
Restart()
}
@@ -74,7 +75,7 @@ out:
invokeCallbacks()
break out
- case <-ShutdownRequestChan:
+ case <-ShutdownRequestChan.Wait():
log.W.Ln("received shutdown request - shutting down...")
requested.Store(true)
invokeCallbacks()
@@ -87,7 +88,7 @@ out:
handler.Source,
)
- case <-HandlersDone:
+ case <-HandlersDone.Wait():
break out
}
}
@@ -118,7 +119,7 @@ func Request() {
return
}
requested.Store(true)
- close(ShutdownRequestChan)
+ ShutdownRequestChan.Q()
var ok bool
select {
case _, ok = <-ShutdownRequestChan:
diff --git a/interrupt/restart.go b/interrupt/restart.go
index f02c9c6..6c4dde4 100644
--- a/interrupt/restart.go
+++ b/interrupt/restart.go
@@ -3,12 +3,11 @@
package interrupt
import (
+ "orly.dev/log"
"os"
"syscall"
"github.com/kardianos/osext"
-
- "orly.dev/log"
)
// Restart uses syscall.Exec to restart the process. MacOS and Windows are not implemented,
diff --git a/ints/gen/pregen.go b/ints/gen/pregen.go
index 2411ff8..62f9dc9 100644
--- a/ints/gen/pregen.go
+++ b/ints/gen/pregen.go
@@ -4,9 +4,8 @@ package main
import (
"fmt"
- "os"
-
"orly.dev/chk"
+ "os"
)
func main() {
diff --git a/ints/ints.go b/ints/ints.go
index 72d61ab..63578bf 100644
--- a/ints/ints.go
+++ b/ints/ints.go
@@ -7,10 +7,9 @@ package ints
import (
_ "embed"
"io"
+ "orly.dev/errorf"
"golang.org/x/exp/constraints"
-
- "orly.dev/errorf"
)
// run this to regenerate (pointlessly) the base 10 array of 4 places per entry
@@ -19,6 +18,8 @@ import (
//go:embed base10k.txt
var base10k []byte
+const base = 10000
+
// T is an integer with a fast codec to decimal ASCII.
type T struct {
N uint64
diff --git a/ints/ints_test.go b/ints/ints_test.go
index 1e02b3f..de0a457 100644
--- a/ints/ints_test.go
+++ b/ints/ints_test.go
@@ -2,12 +2,11 @@ package ints
import (
"math"
+ "orly.dev/chk"
"strconv"
"testing"
"lukechampine.com/frand"
-
- "orly.dev/chk"
)
func TestMarshalUnmarshal(t *testing.T) {
diff --git a/json/base64.go b/json/base64.go
index 37e5339..f50371f 100644
--- a/json/base64.go
+++ b/json/base64.go
@@ -3,9 +3,9 @@ package json
import (
"bytes"
"encoding/base64"
-
"orly.dev/chk"
"orly.dev/errorf"
+
"orly.dev/text"
)
diff --git a/json/bech32.go b/json/bech32.go
index 985e81d..f642c5d 100644
--- a/json/bech32.go
+++ b/json/bech32.go
@@ -2,11 +2,11 @@ package json
import (
"bytes"
+ "orly.dev/chk"
+ "orly.dev/errorf"
"orly.dev/bech32encoding"
- "orly.dev/chk"
"orly.dev/ec/bech32"
- "orly.dev/errorf"
"orly.dev/text"
)
diff --git a/json/examples_test.go b/json/examples_test.go
index 9560edb..de8a863 100644
--- a/json/examples_test.go
+++ b/json/examples_test.go
@@ -3,8 +3,8 @@ package json
import (
"bytes"
"fmt"
-
"orly.dev/chk"
+
"orly.dev/hex"
)
diff --git a/json/keyvalue.go b/json/keyvalue.go
index 9a50af0..bb24908 100644
--- a/json/keyvalue.go
+++ b/json/keyvalue.go
@@ -2,9 +2,9 @@ package json
import (
"io"
-
"orly.dev/chk"
- "orly.dev/interfaces/codec"
+
+ "orly.dev/codec"
)
// An Object is an (not necessarily) ordered list of KeyValue.
diff --git a/json/signed.go b/json/signed.go
index dfad759..3762970 100644
--- a/json/signed.go
+++ b/json/signed.go
@@ -2,8 +2,8 @@ package json
import (
"golang.org/x/exp/constraints"
-
"orly.dev/chk"
+
"orly.dev/ints"
)
diff --git a/json/unsigned.go b/json/unsigned.go
index c424593..e784006 100644
--- a/json/unsigned.go
+++ b/json/unsigned.go
@@ -2,8 +2,8 @@ package json
import (
"golang.org/x/exp/constraints"
-
"orly.dev/chk"
+
"orly.dev/ints"
)
diff --git a/keys/keys.go b/keys/keys.go
new file mode 100644
index 0000000..719ffc0
--- /dev/null
+++ b/keys/keys.go
@@ -0,0 +1,82 @@
+// Package keys is a set of helpers for generating and converting public/secret
+// keys to hex and back to binary.
+package keys
+
+import (
+ "bytes"
+ "orly.dev/chk"
+
+ "orly.dev/ec/schnorr"
+ "orly.dev/hex"
+ "orly.dev/p256k"
+)
+
+// GeneratePrivateKey - deprecated, use GenerateSecretKeyHex
+var GeneratePrivateKey = func() string { return GenerateSecretKeyHex() }
+
+// GenerateSecretKey creates a new secret key and returns the bytes of the secret.
+func GenerateSecretKey() (skb []byte, err error) {
+ signer := &p256k.Signer{}
+ if err = signer.Generate(); chk.E(err) {
+ return
+ }
+ skb = signer.Sec()
+ return
+}
+
+// GenerateSecretKeyHex generates a secret key and encodes the bytes as hex.
+func GenerateSecretKeyHex() (sks string) {
+ skb, err := GenerateSecretKey()
+ if chk.E(err) {
+ return
+ }
+ return hex.Enc(skb)
+}
+
+// GetPublicKeyHex generates a public key from a hex encoded secret key.
+func GetPublicKeyHex(sk string) (pk string, err error) {
+ var b []byte
+ if b, err = hex.Dec(sk); chk.E(err) {
+ return
+ }
+ signer := &p256k.Signer{}
+ if err = signer.InitSec(b); chk.E(err) {
+ return
+ }
+
+ return hex.Enc(signer.Pub()), nil
+}
+
+// SecretBytesToPubKeyHex generates a public key from secret key bytes.
+func SecretBytesToPubKeyHex(skb []byte) (pk string, err error) {
+ signer := &p256k.Signer{}
+ if err = signer.InitSec(skb); chk.E(err) {
+ return
+ }
+ return hex.Enc(signer.Pub()), nil
+}
+
+// IsValid32ByteHex checks that a hex string is a valid 32 bytes lower case hex encoded value as
+// per nostr NIP-01 spec.
+func IsValid32ByteHex[V []byte | string](pk V) bool {
+ if bytes.Equal(bytes.ToLower([]byte(pk)), []byte(pk)) {
+ return false
+ }
+ var err error
+ dec := make([]byte, 32)
+ if _, err = hex.DecBytes(dec, []byte(pk)); chk.E(err) {
+ }
+ return len(dec) == 32
+}
+
+// IsValidPublicKey checks that a hex encoded public key is a valid BIP-340 public key.
+func IsValidPublicKey[V []byte | string](pk V) bool {
+ v, _ := hex.Dec(string(pk))
+ _, err := schnorr.ParsePubKey(v)
+ return err == nil
+}
+
+// HexPubkeyToBytes decodes a pubkey from hex encoded string/bytes.
+func HexPubkeyToBytes[V []byte | string](hpk V) (pkb []byte, err error) {
+ return hex.DecAppend(nil, []byte(hpk))
+}
diff --git a/kind/kind.go b/kind/kind.go
index e6a0ba6..84a346f 100644
--- a/kind/kind.go
+++ b/kind/kind.go
@@ -4,11 +4,11 @@
package kind
import (
+ "orly.dev/chk"
"sync"
"golang.org/x/exp/constraints"
- "orly.dev/chk"
"orly.dev/ints"
)
diff --git a/kind/kind_test.go b/kind/kind_test.go
index 22730a5..72d38e2 100644
--- a/kind/kind_test.go
+++ b/kind/kind_test.go
@@ -1,11 +1,10 @@
package kind
import (
+ "orly.dev/chk"
"testing"
"lukechampine.com/frand"
-
- "orly.dev/chk"
)
func TestMarshalUnmarshal(t *testing.T) {
diff --git a/kinds/kinds_test.go b/kinds/kinds_test.go
index fec719d..21a43af 100644
--- a/kinds/kinds_test.go
+++ b/kinds/kinds_test.go
@@ -1,11 +1,11 @@
package kinds
import (
+ "orly.dev/chk"
"testing"
"lukechampine.com/frand"
- "orly.dev/chk"
"orly.dev/kind"
)
diff --git a/layer2/badgerbadger/badgerbadger.go b/layer2/badgerbadger/badgerbadger.go
new file mode 100644
index 0000000..3abefec
--- /dev/null
+++ b/layer2/badgerbadger/badgerbadger.go
@@ -0,0 +1,67 @@
+// Package badgerbadger is a test of the layer 2 that uses two instances of the
+// ratel event store, meant for testing the layer 2 protocol with two tiers of
+// the database a size limited cache and a large non-purging store.
+package badgerbadger
+
+import (
+ "sync"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/eventid"
+ "orly.dev/filter"
+ "orly.dev/layer2"
+ "orly.dev/ratel"
+ "orly.dev/store"
+)
+
+// Backend is a hybrid badger/badger eventstore where L1 will have GC enabled
+// and L2 will not. This is mainly for testing, as both are local.
+type Backend struct {
+ *layer2.Backend
+}
+
+var _ store.I = (*Backend)(nil)
+
+// GetBackend returns a l2.Backend that combines two differently configured
+// backends... the settings need to be configured in the ratel.T data structure
+// before calling this.
+func GetBackend(c context.T, wg *sync.WaitGroup, L1, L2 *ratel.T) (es store.I) {
+ // log.I.S(L1, L2)
+ es = &layer2.Backend{Ctx: c, WG: wg, L1: L1, L2: L2}
+ return
+}
+
+// Init sets up the badger event store and connects to the configured IC
+// canister.
+//
+// required params are address, canister Id and the badger event store size
+// limit (which can be 0)
+func (b *Backend) Init(path string) (err error) { return b.Backend.Init(path) }
+
+// Close the connection to the database.
+// IC is a request/response API authing at each request.
+func (b *Backend) Close() (err error) { return b.Backend.Close() }
+
+// // CountEvents returns the number of events found matching the filter.
+// func (b *Backend) CountEvents(c context.F, f *filter.F) (count int, approx bool, err error) {
+// return b.Backend.CountEvents(c, f)
+// }
+
+// DeleteEvent removes an event from the event store.
+func (b *Backend) DeleteEvent(c context.T, eid *eventid.T) (err error) {
+ return b.Backend.DeleteEvent(c, eid)
+}
+
+// QueryEvents searches for events that match a filter and returns them
+// asynchronously over a provided channel.
+func (b *Backend) QueryEvents(c context.T, f *filter.F) (
+ ch event.Ts, err error,
+) {
+ return b.Backend.QueryEvents(c, f)
+}
+
+// SaveEvent writes an event to the event store.
+func (b *Backend) SaveEvent(c context.T, ev *event.E) (err error) {
+ return b.Backend.SaveEvent(c, ev)
+}
diff --git a/layer2/badgerbadger/tester/badgerbadger.go b/layer2/badgerbadger/tester/badgerbadger.go
new file mode 100644
index 0000000..774f6c0
--- /dev/null
+++ b/layer2/badgerbadger/tester/badgerbadger.go
@@ -0,0 +1,216 @@
+// Package main is a tester for a layer2 database scheme with one ratel DB with
+// cache and the second not, testing the maintenance of the cache utilization
+// and second level being accessed to fetch events that have been pruned out of
+// the cache.
+package main
+
+import (
+ "orly.dev/chk"
+ "orly.dev/log"
+ "os"
+ "sync"
+ "time"
+
+ "lukechampine.com/frand"
+
+ "orly.dev/bech32encoding"
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/filter"
+ "orly.dev/interrupt"
+ "orly.dev/keys"
+ "orly.dev/layer2"
+ "orly.dev/lol"
+ "orly.dev/qu"
+ "orly.dev/ratel"
+ "orly.dev/tag"
+ "orly.dev/tests"
+ "orly.dev/units"
+)
+
+type Counter struct {
+ id []byte
+ size int
+ requested int
+}
+
+func main() {
+ lol.NoTimeStamp.Store(true)
+ lol.SetLogLevel(lol.LevelNames[lol.Debug])
+ var (
+ err error
+ sec []byte
+ mx sync.Mutex
+ counter []Counter
+ total int
+ MaxContentSize = units.Mb / 2
+ TotalSize = 1
+ MaxDelay = time.Second / 40
+ HW = 50
+ LW = 25
+ // fill rate capped to size of difference between high and low water mark
+ diff = TotalSize * units.Gb * (HW - LW) / 100
+ )
+ if sec, err = keys.GenerateSecretKey(); chk.E(err) {
+ panic(err)
+ }
+ var nsec []byte
+ if nsec, err = bech32encoding.HexToNsec(sec); chk.E(err) {
+ panic(err)
+ }
+ log.T.Ln("signing with", nsec)
+ c, cancel := context.Cancel(context.Bg())
+ var wg sync.WaitGroup
+ // defer cancel()
+ // create L1 with cache management settings enabled; we do it in the current dir
+ // because os.TempDir can point to a ramdisk which is very impractical for this
+ // test.
+ path := "./badgerbadgertest"
+ os.RemoveAll(path)
+ b1 := ratel.GetBackend(
+ c, &wg, true, true, units.Gb, lol.Error, 4*units.Mb, "none",
+ TotalSize, LW, HW, 2,
+ )
+ // create L2 with no cache management
+ b2 := ratel.GetBackend(
+ c, &wg, false, true, units.Gb, lol.Trace, 4*units.Mb, "none",
+ )
+ // Respond to interrupt signal and clean up after interrupt or end of test.
+ // defer chk.E(os.RemoveAll(path))
+ interrupt.AddHandler(
+ func() {
+ cancel()
+ chk.E(os.RemoveAll(path))
+ },
+ )
+ // now join them together in a 2 level eventstore
+ twoLevel := layer2.Backend{
+ Ctx: c,
+ WG: &wg,
+ L1: b1,
+ L2: b2,
+ }
+ if err = twoLevel.Init(path); chk.E(err) {
+ os.Exit(1)
+ }
+ // start GC
+ // go b1.GarbageCollector()
+end:
+ for {
+ select {
+ case <-c.Done():
+ log.I.Ln("context canceled")
+ return
+ default:
+ }
+ mx.Lock()
+ if total > TotalSize*10*units.Gb {
+ log.I.Ln(total, TotalSize*10*units.Gb)
+ mx.Unlock()
+ cancel()
+ return
+ }
+ mx.Unlock()
+ newEvent := qu.T()
+ go func() {
+ ticker := time.NewTicker(time.Second)
+ var fetchIDs [][]byte
+ // start fetching loop
+ for {
+ select {
+ case <-newEvent:
+ // make new request, not necessarily from existing... bias rng
+ // factor by request count
+ mx.Lock()
+ var sum int
+ for i := range counter {
+ rn := frand.Intn(256)
+ if sum > diff {
+ // don't overfill
+ break
+ }
+ // multiply this number by the number of accesses the event
+ // has and request every event that gets over 50% so that we
+ // create a bias towards already requested.
+ if counter[i].requested+rn > 216 {
+ log.T.Ln(
+ "counter", counter[i].requested, "+", rn,
+ "=",
+ counter[i].requested+rn,
+ )
+ // log.F.Ln("adding to fetchIDs")
+ counter[i].requested++
+ fetchIDs = append(fetchIDs, counter[i].id)
+ sum += counter[i].size
+ }
+ }
+ // if len(fetchIDs) > 0 {
+ // log.F.Ln("fetchIDs", len(fetchIDs), fetchIDs)
+ // }
+ mx.Unlock()
+ case <-ticker.C:
+ // copy out current list of events to request
+ mx.Lock()
+ log.T.Ln("ticker", len(fetchIDs))
+ ids := tag.NewWithCap(len(fetchIDs))
+ for i := range fetchIDs {
+ ids.Append(fetchIDs[i])
+ }
+ fetchIDs = fetchIDs[:0]
+ mx.Unlock()
+ if ids.Len() > 0 {
+ _, err = twoLevel.QueryEvents(c, &filter.F{Ids: ids})
+ }
+ case <-c.Done():
+ log.I.Ln("context canceled")
+ return
+ }
+ }
+ }()
+ var ev *event.E
+ var bs int
+ out:
+ for {
+ select {
+ case <-c.Done():
+ log.I.Ln("context canceled")
+ return
+ default:
+ }
+ if ev, bs, err = tests.GenerateEvent(MaxContentSize); chk.E(err) {
+ return
+ }
+ mx.Lock()
+ counter = append(
+ counter, Counter{id: ev.Id, size: bs, requested: 1},
+ )
+ total += bs
+ if total > TotalSize*10*units.Gb {
+ log.I.Ln(total, TotalSize*units.Gb)
+ mx.Unlock()
+ cancel()
+ break out
+ }
+ mx.Unlock()
+ newEvent.Signal()
+ sc, _ := context.Timeout(c, 2*time.Second)
+ if err = twoLevel.SaveEvent(sc, ev); chk.E(err) {
+ continue end
+ }
+ delay := frand.Intn(int(MaxDelay))
+ log.T.Ln("waiting between", delay, "ns")
+ if delay == 0 {
+ continue
+ }
+ select {
+ case <-c.Done():
+ log.I.Ln("context canceled")
+ return
+ case <-time.After(time.Duration(delay)):
+ }
+ }
+ select {
+ case <-c.Done():
+ }
+ }
+}
diff --git a/layer2/layer2.go b/layer2/layer2.go
new file mode 100644
index 0000000..9cadf30
--- /dev/null
+++ b/layer2/layer2.go
@@ -0,0 +1,279 @@
+// Package layer2 is a library for building nostr event stores with two separate
+// data storage systems, primarily for creating size limited caches with larger
+// stores backing them, to enable scaling providing access to an event store to
+// more users more quickly via a caching strategy.
+package layer2
+
+import (
+ "errors"
+ "io"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/eventid"
+ "orly.dev/filter"
+ "orly.dev/store"
+ "orly.dev/tag"
+ "orly.dev/timestamp"
+)
+
+// Backend is a two level nostr event store. The first level is assumed to have a subset of all
+// events that the second level has. This is a mechanism for sharding nostr event data across
+// multiple relays which can then be failovers for each other or shards by geography or subject
+// matter.
+type Backend struct {
+ Ctx context.T
+ WG *sync.WaitGroup
+ path string
+ // L1 will store its state/configuration in path/layer1
+ L1 store.I
+ // L2 will store its state/configuration in path/layer2
+ L2 store.I
+ // PollFrequency is how often the L2 is queried for recent events. This is only
+ // relevant for shared layer2 stores, and will not apply for layer2
+ // implementations that are just two separate data store systems on the same
+ // server.
+ PollFrequency time.Duration
+ // PollOverlap is the multiple of the PollFrequency within which polling the L2
+ // is done to ensure any slow synchrony on the L2 is covered (2-4 usually).
+ PollOverlap int
+ // EventSignal triggers when the L1 saves a new event from the L2
+ //
+ // caller is responsible for populating this so that a signal can pass to all
+ // peers sharing the same L2 and enable cross-cluster subscription delivery.
+ EventSignal event.C
+}
+
+// Init a layer2.Backend setting up their configurations and polling frequencies and other
+// similar things.
+func (b *Backend) Init(path string) (err error) {
+ b.path = path
+ // each backend will have configuration files living in a subfolder of the same
+ // root, path/layer1 and path/layer2 - this may only be state/configuration, or
+ // it can be the site of the storage of data.
+ path1 := filepath.Join(path, "layer1")
+ path2 := filepath.Join(path, "layer2")
+ if err = b.L1.Init(path1); chk.E(err) {
+ return
+ }
+ if err = b.L2.Init(path2); chk.E(err) {
+ return
+ }
+ // if poll syncing is disabled don't start the ticker
+ if b.PollFrequency == 0 {
+ return
+ }
+ // Polling overlap should be 4x polling frequency, if less than 2x
+ if b.PollOverlap < 2 {
+ b.PollOverlap = 4
+ }
+ log.I.Ln(
+ "L2 polling frequency", b.PollFrequency, "overlap",
+ b.PollFrequency*time.Duration(b.PollOverlap),
+ )
+ go func() {
+ ticker := time.NewTicker(5 * time.Second)
+ last := timestamp.Now().I64()
+ out:
+ for {
+ select {
+ case <-b.Ctx.Done():
+ chk.E(b.Close())
+ return
+ case <-ticker.C:
+ until := timestamp.Now()
+ var evs []*event.E
+ if evs, err = b.L2.QueryEvents(
+ b.Ctx,
+ &filter.F{Since: timestamp.FromUnix(last), Until: until},
+ ); chk.E(err) {
+ continue out
+ }
+ // todo now wat
+ _ = evs
+ last = until.I64() - int64(time.Duration(b.PollOverlap)*b.PollFrequency/time.Second)
+ }
+ }
+ }()
+ return
+}
+
+// Path returns the filesystem path root of the layer2.Backend.
+func (b *Backend) Path() (s string) { return b.path }
+
+// Close the two layers of a layer2.Backend.
+func (b *Backend) Close() (err error) {
+ var e1, e2 error
+ if e1 = b.L1.Close(); chk.E(e1) {
+ err = e1
+ }
+ if e2 = b.L2.Close(); chk.E(e2) {
+ if err != nil {
+ err = errors.Join(err, e2)
+ } else {
+ err = e2
+ }
+ }
+ return
+}
+
+// Nuke wipes the both of the event stores in parallel and returns when both are complete.
+func (b *Backend) Nuke() (err error) {
+ var wg sync.WaitGroup
+ var err1, err2 error
+ go func() {
+ if err1 = b.L1.Nuke(); chk.E(err) {
+ }
+ wg.Done()
+ }()
+ go func() {
+ wg.Add(1)
+ if err2 = b.L2.Nuke(); chk.E(err) {
+ }
+ wg.Done()
+ }()
+ wg.Wait()
+ err = errors.Join(err1, err2)
+ return
+}
+
+// QueryEvents processes a filter.F search on the event store. The events found in the second
+// level will be saved into the first level so they become available from the first layer next
+// time they match.
+func (b *Backend) QueryEvents(c context.T, f *filter.F) (
+ evs event.Ts, err error,
+) {
+ if evs, err = b.L1.QueryEvents(c, f); chk.E(err) {
+ return
+ }
+ // if there is pruned events (have only Id, no pubkey), they will also be in the
+ // L2 result, save these to the L1.
+ var revives [][]byte
+ var founds event.Ts
+ for _, ev := range evs {
+ if len(ev.Pubkey) == 0 {
+ // note the event Id to fetch
+ revives = append(revives, ev.Id)
+ } else {
+ founds = append(founds, ev)
+ }
+ }
+ evs = founds
+ go func(revives [][]byte) {
+ var err error
+ // construct the filter to fetch the missing events in the background that we
+ // know about, these will come in later on the subscription while it remains
+ // open.
+ l2filter := &filter.F{Ids: tag.New(revives...)}
+ var evs2 event.Ts
+ if evs2, err = b.L2.QueryEvents(c, l2filter); chk.E(err) {
+ return
+ }
+ for _, ev := range evs2 {
+ // saving the events here will trigger a match on the subscription
+ if err = b.L1.SaveEvent(c, ev); err != nil {
+ continue
+ }
+ }
+ // after fetching what we know exists of non pruned indexes that found stubs we
+ // want to run the query to the L2 anyway, and any matches that are found that
+ // were not locally available will now be available.
+ //
+ // if the subscription is still open the matches will be delivered later, the
+ // late events will be in descending (reverse chronological) order but the stream
+ // as a whole will not be. whatever.
+ var evs event.Ts
+ if evs, err = b.L2.QueryEvents(c, f); chk.E(err) {
+ return
+ }
+ for _, ev := range evs {
+ if err = b.L1.SaveEvent(c, ev); err != nil {
+ continue
+ }
+ }
+ }(revives)
+ return
+}
+
+// // CountEvents counts how many events match on a filter, providing an approximate flag if either
+// // of the layers return this, and the result is the maximum of the two layers results.
+// func (b *Backend) CountEvents(c context.F, f *filter.F) (count int, approx bool, err error) {
+// var wg sync.WaitGroup
+// var count1, count2 int
+// var approx1, approx2 bool
+// var err1, err2 error
+// go func() {
+// count1, approx1, err1 = b.L1.CountEvents(c, f)
+// wg.Done()
+// }()
+// // because this is a low-data query we will wait until the L2 also gets a count,
+// // which should be under a few hundred ms in most cases
+// go func() {
+// wg.Add(1)
+// count2, approx2, err2 = b.L2.CountEvents(c, f)
+// }()
+// wg.Wait()
+// // we return the maximum, it is assumed the L2 is authoritative, but it could be
+// // the L1 has more for whatever reason, so return the maximum of the two.
+// count = count1
+// approx = approx1
+// if count2 > count {
+// count = count2
+// // the approximate flag probably will be false if the L2 got more, and it is a
+// // very large, non GC store.
+// approx = approx2
+// }
+// err = errors.Join(err1, err2)
+// return
+// }
+
+// DeleteEvent deletes an event on both the layer1 and layer2.
+func (b *Backend) DeleteEvent(c context.T, ev *eventid.T) (err error) {
+ // delete the events from both stores.
+ err = errors.Join(
+ b.L1.DeleteEvent(c, ev),
+ b.L2.DeleteEvent(c, ev),
+ )
+ return
+}
+
+// SaveEvent stores an event on both layer1 and layer2.
+func (b *Backend) SaveEvent(c context.T, ev *event.E) (err error) {
+ // save to both event stores
+ err = errors.Join(
+ b.L1.SaveEvent(c, ev), // this will also send out to subscriptions
+ b.L2.SaveEvent(c, ev),
+ )
+ return
+}
+
+// Import events to the layer2, if the events come up in searches they will be propagated down
+// to the layer1.
+func (b *Backend) Import(r io.Reader) {
+ // we import up to the L2 directly, demanded data will be fetched from it by
+ // later queries.
+ b.L2.Import(r)
+}
+
+// Export from the layer2, which is assumed to be the most authoritative (and large) store of
+// events available to the relay.
+func (b *Backend) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
+ // export only from the L2 as it is considered to be the authoritative event
+ // store of the two, and this is generally an administrative or infrequent action
+ // and latency will not matter as it usually will be a big bulky download.
+ b.L2.Export(c, w, pubkeys...)
+}
+
+// Sync triggers both layer1 and layer2 to flush their buffers and store any events in caches.
+func (b *Backend) Sync() (err error) {
+ err1 := b.L1.Sync()
+ // more than likely L2 sync is a noop.
+ err2 := b.L2.Sync()
+ err = errors.Join(err1, err2)
+ return
+}
diff --git a/list/list.go b/list/list.go
deleted file mode 100644
index 6133589..0000000
--- a/list/list.go
+++ /dev/null
@@ -1,3 +0,0 @@
-package list
-
-type L map[string]struct{}
diff --git a/lol/README.md b/lol/README.md
index 896eaad..d9f49f8 100644
--- a/lol/README.md
+++ b/lol/README.md
@@ -7,41 +7,12 @@ main feature is printing source code locations to make debugging easier.
## terminals
-Due to how so few terminals actually support source location hyperlinks, pretty much tilix and intellij terminal are
-the only two that really provide adequate functionality; this logging library defaults to output format that works
-best with intellij. As such, the terminal is aware of the CWD and the code locations printed are relative, as
-required to get the hyperlinkization from this terminal.
-
-Handling support for Tilix requires more complications and
-due to advances with IntelliJ's handling it is not practical to support any other for this purpose. Users of this
+Due to how so few terminals actually support source location hyperlinks, pretty much tilix and intellij terminal are
+the only two that really provide adequate functionality, this logging library defaults to output format that works
+best with intellij. As such, the terminal is aware of the CWD and the code locations printed are relative, as
+required to get the hyperlinkization from this terminal. Handling support for Tilix requires more complications and
+due to advances with IntelliJ's handling it is not practical to support any other for this purpose. Users of this
library can always fall back to manually interpreting and accessing the relative file path to find the source of a log.
-## using with tilix
-
-this enables us to remove the base of the path for a more compact code location string,
-this can be used with tilix custom hyperlinks feature
-
-create a script called `setcurrent` in your PATH ( eg ~/.local/bin/setcurrent )
-
- #!/usr/bin/bash
- echo $(pwd) > ~/.current
-
-make it executable
-
- chmod +x ~/.local/bin/setcurrent
-
-set the following environment variable in your ~/.bashrc
-
- export PROMPT_COMMAND='setcurrent'
-
-using the following regular expressions, replacing the path as necessary, and setting
-perhaps a different program than ide (this is for goland, i use an alias to the binary)
-
- ^((([a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+)) ide --line $5 $(cat /home/mleku/.current)/$2
- [ ]((([a-zA-Z@0-9-_./]+)+([a-zA-Z@0-9-_.]+)):([0-9]+)) ide --line $5 $(cat /home/mleku/.current)/$2
- ([/](([a-zA-Z@0-9-_.]+/)+([a-zA-Z@0-9-_.]+)):([0-9]+)) ide --line $5 /$2
-
-and so long as you use this with an app containing /lol/log.go as this one is, this finds
-that path and trims it off from the log line locations and in tilix you can click on the
-file locations that are relative to the CWD where you are running the relay from. if this
-is a remote machine, just go to the location where your source code is to make it work
\ No newline at end of file
+In addition, due to this terminal's slow rendering of long lines, long log strings are automatically broken into 80
+character lines, and if there is comma separators in the line, the line is broken at the comma instead of at column80. This works perfectly for this purpose.
\ No newline at end of file
diff --git a/lol/log.go b/lol/log.go
index 2dfc143..d953186 100644
--- a/lol/log.go
+++ b/lol/log.go
@@ -1,20 +1,20 @@
-// Package lol (log of location) is a simple logging library the source location
-// of a log print to make tracing errors simpler.
-//
-// Includes a set of logging levels and the ability to filter out higher log
-// levels for a more quiet output.
+// Package lol (log of location) is a simple logging library that prints a high precision unix
+// timestamp and the source location of a log print to make tracing errors simpler. Includes a
+// set of logging levels and the ability to filter out higher log levels for a more quiet
+// output.
package lol
import (
"fmt"
- "github.com/fatih/color"
"io"
"os"
"runtime"
+ "strings"
"sync/atomic"
"time"
"github.com/davecgh/go-spew/spew"
+ "github.com/fatih/color"
)
const (
@@ -38,8 +38,8 @@ var LevelNames = []string{
}
type (
- // LevelPrinter defines a set of terminal printing primitives that output with
- // extra data, time, log logLevelList, and code location
+ // LevelPrinter defines a set of terminal printing primitives that output with extra data,
+ // time, log logLevelList, and code location
// Ln prints lists of interfaces with spaces in between
Ln func(a ...interface{})
@@ -47,10 +47,10 @@ type (
F func(format string, a ...interface{})
// S prints a spew.Sdump for an enveloper slice
S func(a ...interface{})
- // C accepts a function so that the extra computation can be avoided if it is
- // not being viewed
+ // C accepts a function so that the extra computation can be avoided if it is not being
+ // viewed
C func(closure func() string)
- // Chk is a shortcut for printing if there is an error or returning true
+ // Chk is a shortcut for printing if there is an error, or returning true
Chk func(e error) bool
// Err is a pass-through function that uses fmt.Errorf to construct an error and returns the
// error after printing it to the log
@@ -73,7 +73,7 @@ type (
Colorizer func(a ...any) string
}
- // Entry is a log entry to be printed as JSON to the log file
+ // Entry is a log entry to be printed as json to the log file
Entry struct {
Time time.Time
Level string
@@ -84,9 +84,8 @@ type (
)
var (
- // Writer can be swapped out for any io.*Writer* that you want to use instead
- // of stdout.
- Writer io.Writer = os.Stdout
+ // Writer can be swapped out for any io.*Writer* that you want to use instead of stdout.
+ Writer io.Writer = os.Stderr
// LevelSpecs specifies the id, string name and color-printing function
LevelSpecs = []LevelSpec{
@@ -98,6 +97,8 @@ var (
{Debug, "DBG", color.New(color.FgHiBlue).Sprint},
{Trace, "TRC", color.New(color.FgHiMagenta).Sprint},
}
+ NoTimeStamp atomic.Bool
+ ShortLoc atomic.Bool
)
// NoSprint is a noop for sprint (it returns nothing no matter what is given to it).
@@ -129,14 +130,11 @@ type Logger struct {
// Level is the level that the logger is printing at.
var Level atomic.Int32
-func GetLevel() string {
- return LevelNames[Level.Load()]
-}
-
// Main is the main logger.
var Main = &Logger{}
func init() {
+ // Main = &Logger{}
Main.Log, Main.Check, Main.Errorf = New(os.Stderr, 2)
ll := os.Getenv("LOG_LEVEL")
if ll == "" {
@@ -154,6 +152,7 @@ func init() {
// SetLoggers configures a log level.
func SetLoggers(level int) {
+ Main.Log.T.F("log level %s", LevelSpecs[level].Colorizer(LevelNames[level]))
Level.Store(int32(level))
if Level.Load() < Trace {
Tracer = noopTracer
@@ -183,8 +182,7 @@ func SetLogLevel(level string) {
SetLoggers(Trace)
}
-// JoinStrings joins together anything into a set of strings with space
-// separating the items.
+// JoinStrings joins together anything into a set of strings with space separating the items.
func JoinStrings(a ...any) (s string) {
for i := range a {
s += fmt.Sprint(a[i])
@@ -205,9 +203,8 @@ func getTracer() (fn func(funcName string, variables ...any)) {
for _, v := range variables {
vars += spew.Sdump(v)
}
- fmt.Fprintf(
- Writer, "%s %s %s\n%s",
- // TimeStamper(),
+ fmt.Fprintf(Writer, "%s %s %s\n%s",
+ //TimeStamper(),
LevelSpecs[Trace].Colorizer(LevelSpecs[Trace].Name),
funcName,
loc,
@@ -231,10 +228,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter {
if Level.Load() < l {
return
}
- fmt.Fprintf(
- writer,
+ fmt.Fprintf(writer,
"%s%s %s %s\n",
- TimeStamper(),
+ msgCol(TimeStamper()),
LevelSpecs[l].Colorizer(LevelSpecs[l].Name),
JoinStrings(a...),
msgCol(GetLoc(skip)),
@@ -244,10 +240,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter {
if Level.Load() < l {
return
}
- fmt.Fprintf(
- writer,
+ fmt.Fprintf(writer,
"%s%s %s %s\n",
- TimeStamper(),
+ msgCol(TimeStamper()),
LevelSpecs[l].Colorizer(LevelSpecs[l].Name),
fmt.Sprintf(format, a...),
msgCol(GetLoc(skip)),
@@ -257,10 +252,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter {
if Level.Load() < l {
return
}
- fmt.Fprintf(
- writer,
+ fmt.Fprintf(writer,
"%s%s %s %s\n",
- TimeStamper(),
+ msgCol(TimeStamper()),
LevelSpecs[l].Colorizer(LevelSpecs[l].Name),
spew.Sdump(a...),
msgCol(GetLoc(skip)),
@@ -270,10 +264,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter {
if Level.Load() < l {
return
}
- fmt.Fprintf(
- writer,
+ fmt.Fprintf(writer,
"%s%s %s %s\n",
- TimeStamper(),
+ msgCol(TimeStamper()),
LevelSpecs[l].Colorizer(LevelSpecs[l].Name),
closure(),
msgCol(GetLoc(skip)),
@@ -284,10 +277,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter {
return e != nil
}
if e != nil {
- fmt.Fprintf(
- writer,
+ fmt.Fprintf(writer,
"%s%s %s %s\n",
- TimeStamper(),
+ msgCol(TimeStamper()),
LevelSpecs[l].Colorizer(LevelSpecs[l].Name),
e.Error(),
msgCol(GetLoc(skip)),
@@ -298,10 +290,9 @@ func GetPrinter(l int32, writer io.Writer, skip int) LevelPrinter {
},
Err: func(format string, a ...interface{}) error {
if Level.Load() >= l {
- fmt.Fprintf(
- writer,
+ fmt.Fprintf(writer,
"%s%s %s %s\n",
- TimeStamper(),
+ msgCol(TimeStamper()),
LevelSpecs[l].Colorizer(LevelSpecs[l].Name),
fmt.Sprintf(format, a...),
msgCol(GetLoc(skip)),
@@ -320,11 +311,7 @@ func GetNullPrinter() LevelPrinter {
S: func(a ...interface{}) {},
C: func(closure func() string) {},
Chk: func(e error) bool { return e != nil },
- Err: func(format string, a ...interface{}) error {
- return fmt.Errorf(
- format, a...,
- )
- },
+ Err: func(format string, a ...interface{}) error { return fmt.Errorf(format, a...) },
}
}
@@ -362,18 +349,10 @@ func New(writer io.Writer, skip int) (l *Log, c *Check, errorf *Errorf) {
// TimeStamper generates the timestamp for logs.
func TimeStamper() (s string) {
- ts := time.Now().Format("150405.000000")
- ds := time.Now().Format("2006-01-02")
- s += color.New(color.FgBlue).Sprint(ds[0:4])
- s += color.New(color.FgHiBlue).Sprint(ds[5:7])
- s += color.New(color.FgBlue).Sprint(ds[8:])
- s += color.New(color.FgHiBlue).Sprint(ts[0:2])
- s += color.New(color.FgBlue).Sprint(ts[2:4])
- s += color.New(color.FgHiBlue).Sprint(ts[4:6])
- s += color.New(color.FgBlue).Sprint(ts[7:])
- // s = color.New(color.Faint).Sprint(s)
- s += " "
- return
+ if NoTimeStamp.Load() {
+ return
+ }
+ return time.Now().Format("2006-01-02T15:04:05Z07:00.000 ")
}
// var wd, _ = os.Getwd()
@@ -420,14 +399,14 @@ func init() {
// GetLoc returns the code location of the caller.
func GetLoc(skip int) (output string) {
_, file, line, _ := runtime.Caller(skip)
- // if strings.Contains(file, "pkg/mod/") {
- // } else {
- // var split []string
- // split = strings.Split(file, prefix)
- // if len(split) > 1 {
- // file = split[1]
- // }
- // }
+ if strings.Contains(file, "pkg/mod/") || !ShortLoc.Load() {
+ } else {
+ var split []string
+ split = strings.Split(file, prefix)
+ if len(split) > 1 {
+ file = split[1]
+ }
+ }
output = fmt.Sprintf("%s:%d", file, line)
return
}
diff --git a/lol/log_test.go b/lol/log_test.go
index aa5d180..ece2846 100644
--- a/lol/log_test.go
+++ b/lol/log_test.go
@@ -6,6 +6,7 @@ import (
"fmt"
"strings"
"testing"
+ "time"
)
func TestLogLevels(t *testing.T) {
@@ -15,9 +16,7 @@ func TestLogLevels(t *testing.T) {
}
// Test that LevelNames matches the constants
- expectedLevelNames := []string{
- "off", "fatal", "error", "warn", "info", "debug", "trace",
- }
+ expectedLevelNames := []string{"off", "fatal", "error", "warn", "info", "debug", "trace"}
for i, name := range expectedLevelNames {
if LevelNames[i] != name {
t.Errorf("LevelNames[%d] = %s, want %s", i, LevelNames[i], name)
@@ -41,17 +40,12 @@ func TestGetLogLevel(t *testing.T) {
}
for _, test := range tests {
- t.Run(
- test.level, func(t *testing.T) {
- result := GetLogLevel(test.level)
- if result != test.expected {
- t.Errorf(
- "GetLogLevel(%q) = %d, want %d", test.level, result,
- test.expected,
- )
- }
- },
- )
+ t.Run(test.level, func(t *testing.T) {
+ result := GetLogLevel(test.level)
+ if result != test.expected {
+ t.Errorf("GetLogLevel(%q) = %d, want %d", test.level, result, test.expected)
+ }
+ })
}
}
@@ -75,18 +69,13 @@ func TestSetLogLevel(t *testing.T) {
}
for _, test := range tests {
- t.Run(
- test.level, func(t *testing.T) {
- SetLogLevel(test.level)
- result := Level.Load()
- if result != test.expected {
- t.Errorf(
- "After SetLogLevel(%q), Level = %d, want %d",
- test.level, result, test.expected,
- )
- }
- },
- )
+ t.Run(test.level, func(t *testing.T) {
+ SetLogLevel(test.level)
+ result := Level.Load()
+ if result != test.expected {
+ t.Errorf("After SetLogLevel(%q), Level = %d, want %d", test.level, result, test.expected)
+ }
+ })
}
}
@@ -103,20 +92,69 @@ func TestJoinStrings(t *testing.T) {
}
for i, test := range tests {
- t.Run(
- fmt.Sprintf("case_%d", i), func(t *testing.T) {
- result := JoinStrings(test.args...)
- if result != test.expected {
- t.Errorf(
- "JoinStrings(%v) = %q, want %q", test.args, result,
- test.expected,
- )
- }
- },
- )
+ t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
+ result := JoinStrings(test.args...)
+ if result != test.expected {
+ t.Errorf("JoinStrings(%v) = %q, want %q", test.args, result, test.expected)
+ }
+ })
}
}
+func TestTimeStamper(t *testing.T) {
+ // Test with NoTimeStamp = false
+ NoTimeStamp.Store(false)
+ timestamp := TimeStamper()
+ if timestamp == "" {
+ t.Error("TimeStamper() returned empty string when NoTimeStamp = false")
+ }
+
+ // Check format (should be like "2006-01-02T15:04:05Z07:00.000 ")
+ _, err := time.Parse("2006-01-02T15:04:05Z07:00.000 ", timestamp)
+ if err != nil {
+ t.Errorf("TimeStamper() returned timestamp in unexpected format: %q, error: %v", timestamp, err)
+ }
+
+ // Test with NoTimeStamp = true
+ NoTimeStamp.Store(true)
+ timestamp = TimeStamper()
+ if timestamp != "" {
+ t.Errorf("TimeStamper() returned %q when NoTimeStamp = true, expected empty string", timestamp)
+ }
+
+ // Reset for other tests
+ NoTimeStamp.Store(false)
+}
+
+func TestGetLoc(t *testing.T) {
+ // Test with ShortLoc = false
+ ShortLoc.Store(false)
+ loc := GetLoc(1)
+ if !strings.Contains(loc, "log_test.go") {
+ t.Errorf("GetLoc(1) = %q, expected to contain 'log_test.go'", loc)
+ }
+
+ // Test with ShortLoc = true
+ ShortLoc.Store(true)
+ loc = GetLoc(1)
+ if !strings.Contains(loc, "log_test.go") {
+ t.Errorf("GetLoc(1) = %q, expected to contain 'log_test.go'", loc)
+ }
+
+ // Test edge case where file path doesn't contain prefix
+ originalPrefix := prefix
+ defer func() { prefix = originalPrefix }() // Restore original prefix after test
+
+ prefix = "non-existent-path"
+ loc = GetLoc(1)
+ if !strings.Contains(loc, "log_test.go") {
+ t.Errorf("GetLoc(1) with non-existent prefix = %q, expected to contain 'log_test.go'", loc)
+ }
+
+ // Reset for other tests
+ ShortLoc.Store(false)
+}
+
func TestGetPrinter(t *testing.T) {
// Create a buffer to capture output
var buf bytes.Buffer
@@ -133,9 +171,7 @@ func TestGetPrinter(t *testing.T) {
buf.Reset()
printer.Ln("test message")
if buf.String() != "" {
- t.Errorf(
- "printer.Ln() printed when level is too high: %q", buf.String(),
- )
+ t.Errorf("printer.Ln() printed when level is too high: %q", buf.String())
}
// Set log level to Debug
@@ -149,9 +185,7 @@ func TestGetPrinter(t *testing.T) {
t.Error("printer.Ln() did not print when it should have")
}
if !strings.Contains(output, "test message") {
- t.Errorf(
- "printer.Ln() output %q does not contain 'test message'", output,
- )
+ t.Errorf("printer.Ln() output %q does not contain 'test message'", output)
}
// Test F method
@@ -159,10 +193,7 @@ func TestGetPrinter(t *testing.T) {
printer.F("formatted %s", "message")
output = buf.String()
if !strings.Contains(output, "formatted message") {
- t.Errorf(
- "printer.F() output %q does not contain 'formatted message'",
- output,
- )
+ t.Errorf("printer.F() output %q does not contain 'formatted message'", output)
}
// Test S method
@@ -170,9 +201,7 @@ func TestGetPrinter(t *testing.T) {
printer.S("spew message")
output = buf.String()
if !strings.Contains(output, "spew message") {
- t.Errorf(
- "printer.S() output %q does not contain 'spew message'", output,
- )
+ t.Errorf("printer.S() output %q does not contain 'spew message'", output)
}
// Test C method
@@ -180,9 +209,7 @@ func TestGetPrinter(t *testing.T) {
printer.C(func() string { return "closure message" })
output = buf.String()
if !strings.Contains(output, "closure message") {
- t.Errorf(
- "printer.C() output %q does not contain 'closure message'", output,
- )
+ t.Errorf("printer.C() output %q does not contain 'closure message'", output)
}
// Test Chk method with nil error
@@ -203,10 +230,7 @@ func TestGetPrinter(t *testing.T) {
t.Error("printer.Chk(error) returned false, expected true")
}
if !strings.Contains(buf.String(), "test error") {
- t.Errorf(
- "printer.Chk(error) output %q does not contain 'test error'",
- buf.String(),
- )
+ t.Errorf("printer.Chk(error) output %q does not contain 'test error'", buf.String())
}
// Test Err method
@@ -216,17 +240,11 @@ func TestGetPrinter(t *testing.T) {
t.Error("printer.Err() returned nil error")
}
if err.Error() != "error message" {
- t.Errorf(
- "printer.Err() returned error with message %q, expected 'error message'",
- err.Error(),
- )
+ t.Errorf("printer.Err() returned error with message %q, expected 'error message'", err.Error())
}
// Check if the message was logged
if !strings.Contains(buf.String(), "error message") {
- t.Errorf(
- "printer.Err() output %q does not contain 'error message'",
- buf.String(),
- )
+ t.Errorf("printer.Err() output %q does not contain 'error message'", buf.String())
}
}
@@ -253,10 +271,7 @@ func TestGetNullPrinter(t *testing.T) {
t.Error("GetNullPrinter().Err() returned nil error")
}
if err.Error() != "test error" {
- t.Errorf(
- "GetNullPrinter().Err() returned error with message %q, expected 'test error'",
- err.Error(),
- )
+ t.Errorf("GetNullPrinter().Err() returned error with message %q, expected 'test error'", err.Error())
}
}
@@ -283,9 +298,6 @@ func TestNew(t *testing.T) {
buf.Reset()
log.D.Ln("test message")
if !strings.Contains(buf.String(), "test message") {
- t.Errorf(
- "log.D.Ln() output %q does not contain 'test message'",
- buf.String(),
- )
+ t.Errorf("log.D.Ln() output %q does not contain 'test message'", buf.String())
}
}
diff --git a/main.go b/main.go
index 513db3a..0f12818 100644
--- a/main.go
+++ b/main.go
@@ -1,27 +1,32 @@
+// Package main is a nostr relay with a simple follow/mute list authentication
+// scheme and the new HTTP REST based protocol. Configuration is via environment
+// variables or an optional .env file.
package main
import (
"fmt"
"github.com/pkg/profile"
- "net"
"net/http"
+ _ "net/http/pprof"
"orly.dev/chk"
- "orly.dev/config"
- "orly.dev/context"
- "orly.dev/database"
- "orly.dev/interrupt"
"orly.dev/log"
- "orly.dev/lol"
- "orly.dev/servemux"
- "orly.dev/server"
- "orly.dev/socketapi"
- "orly.dev/version"
+ realy_lol "orly.dev/version"
"os"
- "strconv"
"sync"
+
+ "orly.dev/app"
+ "orly.dev/context"
+ "orly.dev/interrupt"
+ "orly.dev/lol"
+ "orly.dev/ratel"
+ "orly.dev/realy"
+ "orly.dev/realy/config"
+ "orly.dev/realy/options"
+ "orly.dev/units"
)
func main() {
+ log.I.F("starting realy %s", realy_lol.V)
var err error
var cfg *config.C
if cfg, err = config.New(); chk.T(err) {
@@ -39,39 +44,44 @@ func main() {
config.PrintHelp(cfg, os.Stderr)
os.Exit(0)
}
+ log.I.Ln("log level", cfg.LogLevel)
+ lol.SetLogLevel(cfg.LogLevel)
if cfg.Pprof {
defer profile.Start(profile.MemProfile).Stop()
go func() {
chk.E(http.ListenAndServe("127.0.0.1:6060", nil))
}()
}
- log.I.F(
- "starting %s %s; log level: %s", version.Name, version.V,
- lol.GetLevel(),
- )
- wg := &sync.WaitGroup{}
+ var wg sync.WaitGroup
c, cancel := context.Cancel(context.Bg())
- interrupt.AddHandler(func() { cancel() })
- var sto *database.D
- if sto, err = database.New(
- c, cancel, cfg.DataDir, cfg.LogLevel,
- ); chk.E(err) {
- return
+ storage := ratel.New(
+ ratel.BackendParams{
+ Ctx: c,
+ WG: &wg,
+ BlockCacheSize: units.Gb,
+ LogLevel: lol.GetLogLevel(cfg.DbLogLevel),
+ MaxLimit: ratel.DefaultMaxLimit,
+ },
+ )
+ r := &app.Relay{C: cfg, Store: storage}
+ go app.MonitorResources(c)
+ var server *realy.Server
+ serverParams := &realy.ServerParams{
+ Ctx: c,
+ Cancel: cancel,
+ Rl: r,
+ DbPath: cfg.DataDir,
+ MaxLimit: ratel.DefaultMaxLimit,
}
- serveMux := servemux.New()
- s := &server.S{
- Ctx: c,
- Cancel: cancel,
- WG: wg,
- Addr: net.JoinHostPort(cfg.Listen, strconv.Itoa(cfg.Port)),
- Mux: serveMux,
- Cfg: cfg,
- Store: sto,
- }
- wg.Add(1)
- interrupt.AddHandler(func() { s.Shutdown() })
- socketapi.New(s, "/{$}", serveMux, socketapi.DefaultSocketParams())
- if err = s.Start(); chk.E(err) {
+ var opts []options.O
+ if server, err = realy.NewServer(serverParams, opts...); chk.E(err) {
os.Exit(1)
}
+ if err != nil {
+ log.F.F("failed to create server: %v", err)
+ }
+ interrupt.AddHandler(func() { server.Shutdown() })
+ if err = server.Start(cfg.Listen, cfg.Port); chk.E(err) {
+ log.F.F("server terminated: %v", err)
+ }
}
diff --git a/normalize/normalize.go b/normalize/normalize.go
index af1b8e3..0fdc9dd 100644
--- a/normalize/normalize.go
+++ b/normalize/normalize.go
@@ -4,11 +4,12 @@ package normalize
import (
"bytes"
+ "fmt"
"net/url"
-
"orly.dev/chk"
- "orly.dev/ints"
"orly.dev/log"
+
+ "orly.dev/ints"
)
var (
@@ -92,3 +93,48 @@ func URL[V string | []byte](v V) (b []byte) {
p.Path = string(bytes.TrimRight([]byte(p.Path), "/"))
return []byte(p.String())
}
+
+// Msg constructs a properly formatted message with a machine-readable prefix for OK and CLOSED
+// envelopes.
+func Msg(prefix Reason, format string, params ...any) []byte {
+ if len(prefix) < 1 {
+ prefix = Error
+ }
+ return []byte(fmt.Sprintf(prefix.S()+": "+format, params...))
+}
+
+// Reason is the machine-readable prefix before the colon in an OK or CLOSED envelope message.
+// Below are the most common kinds that are mentioned in NIP-01.
+type Reason []byte
+
+var (
+ AuthRequired = Reason("auth-required")
+ PoW = Reason("pow")
+ Duplicate = Reason("duplicate")
+ Blocked = Reason("blocked")
+ RateLimited = Reason("rate-limited")
+ Invalid = Reason("invalid")
+ Error = Reason("error")
+ Unsupported = Reason("unsupported")
+ Restricted = Reason("restricted")
+)
+
+// S returns the Reason as a string
+func (r Reason) S() string { return string(r) }
+
+// B returns the Reason as a byte slice.
+func (r Reason) B() []byte { return r }
+
+// IsPrefix returns whether a text contains the same Reason prefix.
+func (r Reason) IsPrefix(reason []byte) bool {
+ return bytes.HasPrefix(
+ reason, r.B(),
+ )
+}
+
+// F allows creation of a full Reason text with a printf style format.
+func (r Reason) F(format string, params ...any) []byte {
+ return Msg(
+ r, format, params...,
+ )
+}
diff --git a/nwc/doc.go b/nwc/doc.go
new file mode 100644
index 0000000..66f90cc
--- /dev/null
+++ b/nwc/doc.go
@@ -0,0 +1,4 @@
+// Package nwc is an implementation of the NWC Nostr Wallet Connect protocol for
+// communicating with lightning (and potentially other kinds of wallets) using
+// nostr ephemeral event messages.
+package nwc
diff --git a/nwc/error.go b/nwc/error.go
new file mode 100644
index 0000000..ff3df5a
--- /dev/null
+++ b/nwc/error.go
@@ -0,0 +1,6 @@
+package nwc
+
+type Error struct {
+ Code []byte
+ Message []byte
+}
diff --git a/nwc/get_balance.go b/nwc/get_balance.go
new file mode 100644
index 0000000..80c9813
--- /dev/null
+++ b/nwc/get_balance.go
@@ -0,0 +1,19 @@
+package nwc
+
+type GetBalanceRequest struct {
+ Request
+ // nothing to see here, move along
+}
+
+func NewGetBalanceRequest() *GetBalanceRequest {
+ return &GetBalanceRequest{Request{Methods.GetBalance}}
+}
+
+type GetBalanceResponse struct {
+ Response
+ Balance Msat
+}
+
+func NewGetBalanceResponse(balance Msat) *GetBalanceResponse {
+ return &GetBalanceResponse{Response{Type: Methods.GetBalance}, balance}
+}
diff --git a/nwc/get_info.go b/nwc/get_info.go
new file mode 100644
index 0000000..6d42a13
--- /dev/null
+++ b/nwc/get_info.go
@@ -0,0 +1,29 @@
+package nwc
+
+type GetInfoRequest struct {
+ Request
+ // nothing to see here, move along
+}
+
+func NewGetInfoRequest() GetInfoRequest {
+ return GetInfoRequest{Request{Methods.GetInfo}}
+}
+
+type GetInfo struct {
+ Alias []byte
+ Color []byte // Hex string
+ Pubkey []byte
+ Network []byte // mainnet/testnet/signet/regtest
+ BlockHeight uint64
+ BlockHash []byte
+ Methods []byte // pay_invoice, get_balance, make_invoice, lookup_invoice, list_transactions, get_info (list of methods)
+}
+
+type GetInfoResponse struct {
+ Response
+ GetInfo
+}
+
+func NewGetInfoResponse(gi GetInfo) GetInfoResponse {
+ return GetInfoResponse{Response{Type: Methods.GetInfo}, gi}
+}
diff --git a/nwc/lightning.go b/nwc/lightning.go
new file mode 100644
index 0000000..ef0968a
--- /dev/null
+++ b/nwc/lightning.go
@@ -0,0 +1,18 @@
+package nwc
+
+import (
+ "orly.dev/kind"
+)
+
+var Kinds = []*kind.T{
+ kind.WalletInfo,
+ kind.WalletRequest,
+ kind.WalletResponse,
+ kind.WalletNotification,
+}
+
+type Server struct {
+}
+
+type Client struct {
+}
diff --git a/nwc/list_transactions.go b/nwc/list_transactions.go
new file mode 100644
index 0000000..57cbab5
--- /dev/null
+++ b/nwc/list_transactions.go
@@ -0,0 +1,21 @@
+package nwc
+
+type ListTransactionsRequest struct {
+ Request
+ ListTransactions
+}
+
+func NewListTransactionsRequest(req ListTransactions) *ListTransactionsRequest {
+ return &ListTransactionsRequest{
+ Request{Methods.ListTransactions}, req,
+ }
+}
+
+type ListTransactionsResponse struct {
+ Response
+ Transactions []LookupInvoice
+}
+
+func NewListTransactionsResponse(txs []LookupInvoice) ListTransactionsResponse {
+ return ListTransactionsResponse{Response{Type: Methods.ListTransactions}, txs}
+}
diff --git a/nwc/lookup_invoice.go b/nwc/lookup_invoice.go
new file mode 100644
index 0000000..7385a95
--- /dev/null
+++ b/nwc/lookup_invoice.go
@@ -0,0 +1,26 @@
+package nwc
+
+type LookupInvoiceRequest struct {
+ Request
+ PaymentHash, Invoice []byte
+}
+
+func NewLookupInvoiceRequest(paymentHash, invoice []byte) *LookupInvoiceRequest {
+ return &LookupInvoiceRequest{
+ Request{Methods.LookupInvoice}, paymentHash, invoice,
+ }
+}
+
+type LookupInvoice struct {
+ Response
+ InvoiceResponse
+ SettledAt int64 // optional if unpaid
+}
+type LookupInvoiceResponse struct {
+ Response
+ LookupInvoice
+}
+
+func NewLookupInvoiceResponse(resp LookupInvoice) LookupInvoiceResponse {
+ return LookupInvoiceResponse{Response{Type: Methods.LookupInvoice}, resp}
+}
diff --git a/nwc/make_invoice_response.go b/nwc/make_invoice_response.go
new file mode 100644
index 0000000..e53fa7b
--- /dev/null
+++ b/nwc/make_invoice_response.go
@@ -0,0 +1,29 @@
+package nwc
+
+type MakeInvoiceRequest struct {
+ Request
+ Amount Msat
+ Description []byte // optional
+ DescriptionHash []byte // optional
+ Expiry int // optional
+}
+
+func NewMakeInvoiceRequest(amount Msat, description, descriptionHash []byte,
+ expiry int) MakeInvoiceRequest {
+ return MakeInvoiceRequest{
+ Request{Methods.MakeInvoice},
+ amount,
+ description,
+ descriptionHash,
+ expiry,
+ }
+}
+
+type MakeInvoiceResponse struct {
+ Response
+ InvoiceResponse
+}
+
+func NewMakeInvoiceResponse(resp InvoiceResponse) MakeInvoiceResponse {
+ return MakeInvoiceResponse{Response{Type: Methods.MakeInvoice}, resp}
+}
diff --git a/nwc/multi_pay_invoice.go b/nwc/multi_pay_invoice.go
new file mode 100644
index 0000000..73314a9
--- /dev/null
+++ b/nwc/multi_pay_invoice.go
@@ -0,0 +1,19 @@
+package nwc
+
+type MultiPayInvoiceRequest struct {
+ Request
+ Invoices []Invoice
+}
+
+func NewMultiPayInvoiceRequest(invoices []Invoice) MultiPayInvoiceRequest {
+ return MultiPayInvoiceRequest{
+ Request: Request{Methods.MultiPayInvoice},
+ Invoices: invoices,
+ }
+}
+
+type MultiPayInvoiceResponse = PayInvoiceResponse
+
+func NewMultiPayInvoiceResponse(preimage []byte, feesPaid Msat) MultiPayInvoiceResponse {
+ return MultiPayInvoiceResponse{Response{Type: Methods.MultiPayInvoice}, preimage, feesPaid}
+}
diff --git a/nwc/multi_pay_keysend.go b/nwc/multi_pay_keysend.go
new file mode 100644
index 0000000..5fe9fdd
--- /dev/null
+++ b/nwc/multi_pay_keysend.go
@@ -0,0 +1,18 @@
+package nwc
+
+type MultiPayKeysendRequest struct {
+ Request
+ Keysends []PayKeysendRequest
+}
+
+func NewMultiPayKeysendRequest(keysends []PayKeysendRequest) MultiPayKeysendRequest {
+ return MultiPayKeysendRequest{Request{Methods.MultiPayKeysend}, keysends}
+}
+
+type MultiPayKeysendResponse = PayKeysendResponse
+
+func NewMultiPayKKeysendResponse(preimage []byte, feesPaid Msat) MultiPayKeysendResponse {
+ return MultiPayKeysendResponse{
+ Response{Type: Methods.MultiPayKeysend}, preimage, feesPaid,
+ }
+}
diff --git a/nwc/names.go b/nwc/names.go
new file mode 100644
index 0000000..ce7fb8f
--- /dev/null
+++ b/nwc/names.go
@@ -0,0 +1,130 @@
+package nwc
+
+// Methods are the text of the value of the Method field of Request.Method and
+// Response.ResultType in a form that allows more convenient reference than using
+// a map or package scoped variable. These appear in the API Request and Response
+// types.
+var Methods = struct {
+ PayInvoice,
+ MultiPayInvoice,
+ PayKeysend,
+ MultiPayKeysend,
+ MakeInvoice,
+ LookupInvoice,
+ ListTransactions,
+ GetBalance,
+ GetInfo []byte
+}{
+ []byte("pay_invoice"),
+ []byte("multi_pay_invoice"),
+ []byte("pay_keysend"),
+ []byte("multi_pay_keysend"),
+ []byte("make_invoice"),
+ []byte("lookup_invoice"),
+ []byte("list_transactions"),
+ []byte("get_balance"),
+ []byte("get_info"),
+}
+
+// Keys are the proper JSON bytes for the JSON object keys of the structs of the
+// same-named type used lower in the following. Anonymous struct syntax is used
+// to make neater addressing of these fields as symbols.
+var Keys = struct {
+ Method,
+ Params,
+ ResultType,
+ Error,
+ Result,
+ Invoice,
+ Amount,
+ Preimage,
+ FeesPaid,
+ Id,
+ TLVRecords,
+ Type,
+ Value,
+ Pubkey,
+ Description,
+ DescriptionHash,
+ Expiry,
+ CreatedAt,
+ ExpiresAt,
+ Metadata,
+ SettledAt,
+ From,
+ Until,
+ Offset,
+ Unpaid,
+ Balance,
+ Notifications,
+ NotificationType,
+ Notification,
+ PaymentHash []byte
+}{
+ []byte("method"),
+ []byte("params"),
+ []byte("result_type"),
+ []byte("error"),
+ []byte("result"),
+ []byte("invoice"),
+ []byte("amount"),
+ []byte("preimage"),
+ []byte("fees_paid"),
+ []byte("id"),
+ []byte("tlv_records"),
+ []byte("type"),
+ []byte("value"),
+ []byte("pubkey"),
+ []byte("description"),
+ []byte("description_hash"),
+ []byte("expiry"),
+ []byte("created_at"),
+ []byte("expires_at"),
+ []byte("metadata"),
+ []byte("settled_at"),
+ []byte("from"),
+ []byte("until"),
+ []byte("offset"),
+ []byte("unpaid"),
+ []byte("balance"),
+ []byte("notifications"),
+ []byte("notification_type"),
+ []byte("notification"),
+ []byte("payment_hash"),
+}
+
+// Notifications are the proper strings for the Notification.NotificationType
+var Notifications = struct {
+ PaymentReceived, PaymentSent []byte
+}{
+ []byte("payment_received"),
+ []byte("payment_sent"),
+}
+
+var Errors = struct {
+ // RateLimited - The client is sending commands too fast.It should retry in a few seconds.
+ RateLimited,
+ // NotImplemented - The command is not known or is intentionally not implemented.
+ NotImplemented,
+ // InsufficientBalance - The wallet does not have enough funds to cover a fee reserve or the payment amount.
+ InsufficientBalance,
+ // QuotaExceeded - The wallet has exceeded its spending quota.
+ QuotaExceeded,
+ // Restricted - This public key is not allowed to do this operation.
+ Restricted,
+ // Unauthorized - This public key has no wallet connected.
+ Unauthorized,
+ // Internal - An internal error.
+ Internal,
+ // Other - Other error.
+ Other []byte
+}{
+ []byte("RATE_LIMITED"),
+ []byte("NOT_IMPLEMENTED"),
+ []byte("INSUFFICIENT_BALANCE"),
+ []byte("QUOTA_EXCEEDED"),
+ []byte("RESTRICTED"),
+ []byte("UNAUTHORIZED"),
+ []byte("INTERNAL"),
+ []byte("OTHER"),
+}
diff --git a/nwc/payKeysend.go b/nwc/payKeysend.go
new file mode 100644
index 0000000..212bc2d
--- /dev/null
+++ b/nwc/payKeysend.go
@@ -0,0 +1 @@
+package nwc
diff --git a/nwc/pay_invoice.go b/nwc/pay_invoice.go
new file mode 100644
index 0000000..251f9c5
--- /dev/null
+++ b/nwc/pay_invoice.go
@@ -0,0 +1,91 @@
+package nwc
+
+import (
+ "orly.dev/text"
+)
+
+type PayInvoiceRequest struct {
+ Request
+ Invoice
+}
+
+func NewPayInvoiceRequest[V string | []byte](
+ invoice V, amount Msat,
+) PayInvoiceRequest {
+ return PayInvoiceRequest{
+ Request{Methods.PayInvoice}, Invoice{nil, []byte(invoice), amount},
+ }
+}
+
+func (p PayInvoiceRequest) Marshal(dst []byte) (b []byte) {
+ // open parentheses
+ dst = append(dst, '{')
+ // method
+ dst = text.JSONKey(dst, Keys.Method)
+ dst = text.Quote(dst, p.RequestType())
+ dst = append(dst, ',')
+ // Params
+ dst = text.JSONKey(dst, Keys.Params)
+ dst = append(dst, '{')
+ // Invoice
+ dst = text.JSONKey(dst, Keys.Invoice)
+ dst = text.AppendQuote(dst, p.Invoice.Invoice, text.Noop)
+ // Amount - optional (omit if zero)
+ if p.Amount > 0 {
+ dst = append(dst, ',')
+ dst = text.JSONKey(dst, Keys.Amount)
+ dst = p.Amount.Bytes(dst)
+ }
+ // close parentheses
+ dst = append(dst, '}')
+ dst = append(dst, '}')
+ b = dst
+ return
+}
+
+func (p PayInvoiceRequest) Unmarshal(b []byte) (r []byte, err error) {
+
+ return
+}
+
+type PayInvoiceResponse struct {
+ Response
+ Preimage []byte
+ FeesPaid Msat // optional, omitted if zero
+}
+
+func NewPayInvoiceResponse(preimage []byte, feesPaid Msat) PayInvoiceResponse {
+ return PayInvoiceResponse{
+ Response{Type: Methods.PayInvoice}, preimage, feesPaid,
+ }
+}
+
+func (p PayInvoiceResponse) Marshal(dst []byte) (b []byte) {
+ // open parentheses
+ dst = append(dst, '{')
+ // method
+ dst = text.JSONKey(dst, p.Response.Type)
+ dst = text.Quote(dst, p.ResultType())
+ // Params
+ dst = text.JSONKey(dst, Keys.Params)
+ // open parenthesis
+ dst = append(dst, '{')
+ // Invoice
+ dst = text.JSONKey(dst, Keys.Preimage)
+ dst = text.AppendQuote(dst, p.Preimage, text.Noop)
+ // Amount - optional (omit if zero)
+ if p.FeesPaid > 0 {
+ dst = append(dst, ',')
+ dst = text.JSONKey(dst, Keys.FeesPaid)
+ dst = p.FeesPaid.Bytes(dst)
+ }
+ // close parentheses
+ dst = append(dst, '}')
+ dst = append(dst, '}')
+ return
+}
+
+func (p PayInvoiceResponse) Unmarshal(b []byte) (r []byte, err error) {
+ // TODO implement me
+ panic("implement me")
+}
diff --git a/nwc/pay_invoice_test.go b/nwc/pay_invoice_test.go
new file mode 100644
index 0000000..148a5e9
--- /dev/null
+++ b/nwc/pay_invoice_test.go
@@ -0,0 +1,25 @@
+package nwc
+
+import (
+ "fmt"
+ "orly.dev/chk"
+)
+
+func ExamplePayInvoiceRequest_Marshal() {
+ ir := NewPayInvoiceRequest("lnbc50n1...", 0)
+ var b []byte
+ var err error
+ if b = ir.Marshal(b); chk.E(err) {
+ return
+ }
+ fmt.Printf("%s\n", b)
+ b = b[:0]
+ ir = NewPayInvoiceRequest("lnbc50n1...", 123)
+ if b = ir.Marshal(b); chk.E(err) {
+ return
+ }
+ fmt.Printf("%s\n", b)
+ // Output:
+ // {"method":"pay_invoice","params":{"invoice":"lnbc50n1..."}}
+ // {"method":"pay_invoice","params":{"invoice":"lnbc50n1...","amount":123}}
+}
diff --git a/nwc/pay_keysend.go b/nwc/pay_keysend.go
new file mode 100644
index 0000000..fce7b5f
--- /dev/null
+++ b/nwc/pay_keysend.go
@@ -0,0 +1,33 @@
+package nwc
+
+type TLV struct {
+ Type uint64
+ Value []byte
+}
+
+type PayKeysendRequest struct {
+ Request
+ Amount Msat
+ Pubkey []byte
+ Preimage []byte // optional
+ TLVRecords []TLV // optional
+}
+
+func NewPayKeysendRequest(amount Msat, pubkey, preimage []byte,
+ tlvRecords []TLV) PayKeysendRequest {
+ return PayKeysendRequest{
+ Request{Methods.PayKeysend},
+ amount,
+ pubkey,
+ preimage,
+ tlvRecords,
+ }
+}
+
+type PayKeysendResponse = PayInvoiceResponse
+
+func NewPayKeysendResponse(preimage []byte, feesPaid Msat) PayKeysendResponse {
+ return PayInvoiceResponse{
+ Response{Type: Methods.PayKeysend}, preimage, feesPaid,
+ }
+}
diff --git a/nwc/protocols.go b/nwc/protocols.go
new file mode 100644
index 0000000..4824ff1
--- /dev/null
+++ b/nwc/protocols.go
@@ -0,0 +1,101 @@
+package nwc
+
+import (
+ "orly.dev/ints"
+)
+
+// Interfaces
+//
+// By using these interfaces and embedding the following implementations it becomes simple to type check the specific
+// request, response or notification variable being used in a given place in the code, without using reflection.
+//
+// All request, responses and methods embed the implementations and their types then become easily checked.
+
+type Requester interface {
+ RequestType() []byte
+}
+
+type Resulter interface {
+ ResultType() []byte
+}
+
+type Notifier interface {
+ NotificationType() []byte
+}
+
+// Implementations
+//
+// By embedding the following types into the message structs and writing a constructor that loads the type name,
+// code can handle these without reflection, determine type via type assertion and introspect the message type via
+// the interface accessor method.
+
+type Request struct {
+ Method []byte
+}
+
+func (r Request) RequestType() []byte { return r.Method }
+
+type Response struct {
+ Type []byte
+ Error
+}
+
+func (r Response) ResultType() []byte { return r.Type }
+
+type Notification struct {
+ Type []byte
+}
+
+func (n Notification) NotificationType() []byte { return n.Type }
+
+// Msat is milli-sat, max possible value is 1000 x 21 x 100 000 000 (well, under 19 places of 64 bits in base 10)
+type Msat uint64
+
+func (m Msat) Bytes(dst []byte) (b []byte) { return ints.New(uint64(m)).Marshal(dst) }
+
+// Methods
+
+type Invoice struct {
+ Id []byte // nil for request, required for responses (omitted if nil)
+ Invoice []byte
+ Amount Msat // optional, omitted if zero
+}
+
+type InvoiceResponse struct {
+ Type []byte // incoming or outgoing
+ Invoice []byte // optional
+ Description []byte // optional
+ DescriptionHash []byte // optional
+ Preimage []byte // optional if unpaid
+ PaymentHash []byte
+ Amount Msat
+ FeesPaid Msat
+ CreatedAt int64
+ ExpiresAt int64 // optional if not applicable
+ Metadata []any // optional, probably like tags but retardation can be retarded so allow also numbers and floats
+
+}
+
+type ListTransactions struct {
+ From int64 // optional
+ Until int64 // optional
+ Limit int // optional
+ Offset int // optional
+ Unpaid bool // optional default false
+ Type []byte // incoming/outgoing/empty for "both"
+}
+
+// Notifications
+
+var (
+ PaymentSent = []byte("payment_sent")
+ PaymentReceived = []byte("payment_received")
+)
+
+type PaymentSentNotification struct {
+ LookupInvoiceResponse
+}
+
+type PaymentReceivedNotification struct {
+ LookupInvoiceResponse
+}
diff --git a/openapi/common.go b/openapi/common.go
new file mode 100644
index 0000000..3d5469a
--- /dev/null
+++ b/openapi/common.go
@@ -0,0 +1,12 @@
+package openapi
+
+import (
+ "orly.dev/realy/interfaces"
+)
+
+type Operations struct{ interfaces.Server }
+
+// NewOperations creates a new openapi.Operations..
+func NewOperations(s interfaces.Server) (ep *Operations) {
+ return &Operations{Server: s}
+}
diff --git a/openapi/http-configuration.go b/openapi/http-configuration.go
new file mode 100644
index 0000000..2eff132
--- /dev/null
+++ b/openapi/http-configuration.go
@@ -0,0 +1,94 @@
+package openapi
+
+// import (
+// "net/http"
+//
+// "github.com/danielgtaylor/huma/v2"
+//
+// "orly.dev/context"
+// "orly.dev/realy/helpers"
+// "orly.dev/store"
+// )
+//
+// // ConfigurationSetInput is the parameters for HTTP API method to set Configuration.
+// type ConfigurationSetInput struct {
+// Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"`
+// Body *store.Configuration `doc:"the new configuration"`
+// }
+//
+// // ConfigurationGetInput is the parameters for HTTP API method to get Configuration.
+// type ConfigurationGetInput struct {
+// Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"`
+// Accept string `header:"Accept" default:"application/json" enum:"application/json" required:"true"`
+// }
+//
+// // ConfigurationGetOutput is the result of getting Configuration.
+// type ConfigurationGetOutput struct {
+// Body store.Configuration `doc:"the current configuration"`
+// }
+//
+// // RegisterConfigurationSet implements the HTTP API for setting Configuration.
+// func (x *Operations) RegisterConfigurationSet(api huma.API) {
+// name := "ConfigurationSet"
+// description := "Set the configuration"
+// path := "/configuration/set"
+// scopes := []string{"admin", "write"}
+// method := http.MethodPost
+// huma.Register(api, huma.Operation{
+// OperationID: name,
+// Summary: name,
+// Path: path,
+// Method: method,
+// Tags: []string{"admin"},
+// Description: helpers.GenerateDescription(description, scopes),
+// Security: []map[string][]string{{"auth": scopes}},
+// }, func(ctx context.T, input *ConfigurationSetInput) (wgh *struct{}, err error) {
+// log.I.S(input)
+// r := ctx.Value("http-request").(*http.Request)
+// // w := ctx.Value("http-response").(http.ResponseWriter)
+// // rr := GetRemoteFromReq(r)
+// authed, _ := x.AdminAuth(r)
+// if !authed {
+// // pubkey = ev.Pubkey
+// err = huma.Error401Unauthorized("authorization required")
+// return
+// }
+// sto := x.Storage()
+// if c, ok := sto.(store.Configurationer); ok {
+// if err = c.SetConfiguration(input.Body); chk.E(err) {
+// return
+// }
+// x.SetConfiguration(input.Body)
+// }
+// return
+// })
+// }
+//
+// // RegisterConfigurationGet implements the HTTP API for getting the Configuration.
+// func (x *Operations) RegisterConfigurationGet(api huma.API) {
+// name := "ConfigurationGet"
+// description := "Fetch the current configuration"
+// path := "/configuration/get"
+// scopes := []string{"admin", "read"}
+// method := http.MethodGet
+// huma.Register(api, huma.Operation{
+// OperationID: name,
+// Summary: name,
+// Path: path,
+// Method: method,
+// Tags: []string{"admin"},
+// Description: helpers.GenerateDescription(description, scopes),
+// Security: []map[string][]string{{"auth": scopes}},
+// }, func(ctx context.T, input *ConfigurationGetInput) (output *ConfigurationGetOutput,
+// err error) {
+// r := ctx.Value("http-request").(*http.Request)
+// authed, _ := x.AdminAuth(r)
+// if !authed {
+// err = huma.Error401Unauthorized("authorization required")
+// return
+// }
+// output = &ConfigurationGetOutput{Body: x.Configuration()}
+// // }
+// return
+// })
+// }
diff --git a/openapi/http-disconnect.go b/openapi/http-disconnect.go
new file mode 100644
index 0000000..2666e7b
--- /dev/null
+++ b/openapi/http-disconnect.go
@@ -0,0 +1,51 @@
+package openapi
+
+import (
+ "net/http"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/realy/helpers"
+)
+
+// DisconnectInput is the parameters for triggering the disconnection of all open websockets.
+type DisconnectInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"`
+}
+
+// DisconnectOutput is the result type for the Disconnect HTTP API method.
+type DisconnectOutput struct{}
+
+// RegisterDisconnect is the implementation of the HTTP API Disconnect method.
+func (x *Operations) RegisterDisconnect(api huma.API) {
+ name := "Disconnect"
+ description := "Close all open nip-01 websockets"
+ path := "/disconnect"
+ scopes := []string{"admin"}
+ method := http.MethodGet
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"admin"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ DefaultStatus: 204,
+ }, func(ctx context.T, input *DisconnectInput) (
+ wgh *DisconnectOutput, err error,
+ ) {
+ // r := ctx.Value("http-request").(*http.Request)
+ // authed, _ := x.AdminAuth(r)
+ // if !authed {
+ // // pubkey = ev.Pubkey
+ // err = huma.Error401Unauthorized("authorization required")
+ // return
+ // }
+ x.Disconnect()
+ return
+ },
+ )
+}
diff --git a/openapi/http-event.go b/openapi/http-event.go
new file mode 100644
index 0000000..800bb14
--- /dev/null
+++ b/openapi/http-event.go
@@ -0,0 +1,249 @@
+package openapi
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net/http"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/filter"
+ "orly.dev/hex"
+ "orly.dev/httpauth"
+ "orly.dev/ints"
+ "orly.dev/kind"
+ "orly.dev/realy/helpers"
+ "orly.dev/sha256"
+ "orly.dev/tag"
+)
+
+// EventInput is the parameters for the Event HTTP API method.
+type EventInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"`
+ RawBody []byte
+}
+
+// EventOutput is the return parameters for the HTTP API Event method.
+type EventOutput struct{ Body string }
+
+// RegisterEvent is the implementatino of the HTTP API Event method.
+func (x *Operations) RegisterEvent(api huma.API) {
+ name := "Event"
+ description := "Submit an event"
+ path := "/event"
+ scopes := []string{"user", "write"}
+ method := http.MethodPost
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"events"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ }, func(ctx context.T, input *EventInput) (
+ output *EventOutput, err error,
+ ) {
+ r := ctx.Value("http-request").(*http.Request)
+ // w := ctx.Value("http-response").(http.ResponseWriter)
+ rr := helpers.GetRemoteFromReq(r)
+ ev := &event.E{}
+ if _, err = ev.Unmarshal(input.RawBody); chk.E(err) {
+ err = huma.Error406NotAcceptable(err.Error())
+ return
+ }
+ var ok bool
+ sto := x.Storage()
+ if sto == nil {
+ panic("no event store has been set to store event")
+ }
+ // advancedDeleter, _ := sto.(relay.AdvancedDeleter)
+ var valid bool
+ var pubkey []byte
+ valid, pubkey, err = httpauth.CheckAuth(r)
+ // missing := !errors.Is(err, httpauth.ErrMissingKey)
+ // if there is an error but not that the token is missing, or there is no error
+ // but the signature is invalid, return error that request is unauthorized.
+ if err != nil && !errors.Is(err, httpauth.ErrMissingKey) {
+ err = huma.Error400BadRequest(err.Error())
+ return
+ }
+ err = nil
+ if !valid {
+ err = huma.Error401Unauthorized("Authorization header is invalid")
+ return
+ }
+ // if there was auth, or no auth, check the relay policy allows accepting the
+ // event (no auth with auth required or auth not valid for action can apply
+ // here).
+ // accept, notice, after := x.AcceptEvent(ctx, ev, r, rr, pubkey)
+ // if !accept {
+ // err = huma.Error401Unauthorized(notice)
+ // return
+ // }
+ if !bytes.Equal(ev.GetIDBytes(), ev.Id) {
+ err = huma.Error400BadRequest("event id is computed incorrectly")
+ return
+ }
+ if ok, err = ev.Verify(); chk.T(err) {
+ err = huma.Error400BadRequest("failed to verify signature")
+ return
+ } else if !ok {
+ err = huma.Error400BadRequest("signature is invalid")
+ return
+ }
+ if ev.Kind.K == kind.Deletion.K {
+ log.I.F("delete event\n%s", ev.Serialize())
+ for _, t := range ev.Tags.ToSliceOfTags() {
+ var res []*event.E
+ if t.Len() >= 2 {
+ switch {
+ case bytes.Equal(t.Key(), []byte("e")):
+ evId := make([]byte, sha256.Size)
+ if _, err = hex.DecBytes(
+ evId, t.Value(),
+ ); chk.E(err) {
+ continue
+ }
+ res, err = sto.QueryEvents(
+ ctx, &filter.F{Ids: tag.New(evId)},
+ )
+ if err != nil {
+ err = huma.Error500InternalServerError(err.Error())
+ return
+ }
+ for i := range res {
+ if res[i].Kind.Equal(kind.Deletion) {
+ err = huma.Error409Conflict("not processing or storing delete event containing delete event references")
+ }
+ if !bytes.Equal(res[i].Pubkey, ev.Pubkey) {
+ err = huma.Error409Conflict("cannot delete other users' events (delete by e tag)")
+ return
+ }
+ }
+ case bytes.Equal(t.Key(), []byte("a")):
+ split := bytes.Split(t.Value(), []byte{':'})
+ if len(split) != 3 {
+ continue
+ }
+ var pk []byte
+ if pk, err = hex.DecAppend(
+ nil, split[1],
+ ); chk.E(err) {
+ err = huma.Error400BadRequest(
+ fmt.Sprintf(
+ "delete event a tag pubkey value invalid: %s",
+ t.Value(),
+ ),
+ )
+ return
+ }
+ kin := ints.New(uint16(0))
+ if _, err = kin.Unmarshal(split[0]); chk.E(err) {
+ err = huma.Error400BadRequest(
+ fmt.Sprintf(
+ "delete event a tag kind value invalid: %s",
+ t.Value(),
+ ),
+ )
+ return
+ }
+ kk := kind.New(kin.Uint16())
+ if kk.Equal(kind.Deletion) {
+ err = huma.Error403Forbidden("delete event kind may not be deleted")
+ return
+ }
+ if !kk.IsParameterizedReplaceable() {
+ err = huma.Error403Forbidden("delete tags with a tags containing non-parameterized-replaceable events cannot be processed")
+ return
+ }
+ if !bytes.Equal(pk, ev.Pubkey) {
+ log.I.S(pk, ev.Pubkey, ev)
+ err = huma.Error403Forbidden("cannot delete other users' events (delete by a tag)")
+ return
+ }
+ f := filter.New()
+ f.Kinds.K = []*kind.T{kk}
+ f.Authors.Append(pk)
+ f.Tags.AppendTags(
+ tag.New(
+ []byte{'#', 'd'}, split[2],
+ ),
+ )
+ res, err = sto.QueryEvents(ctx, f)
+ if err != nil {
+ err = huma.Error500InternalServerError(err.Error())
+ return
+ }
+ }
+ }
+ if len(res) < 1 {
+ continue
+ }
+ var resTmp []*event.E
+ for _, v := range res {
+ if ev.CreatedAt.U64() >= v.CreatedAt.U64() {
+ resTmp = append(resTmp, v)
+ }
+ }
+ res = resTmp
+ for _, target := range res {
+ if target.Kind.K == kind.Deletion.K {
+ err = huma.Error403Forbidden(
+ fmt.Sprintf(
+ "cannot delete delete event %s", ev.Id,
+ ),
+ )
+ return
+ }
+ if target.CreatedAt.Int() > ev.CreatedAt.Int() {
+ // todo: shouldn't this be an error?
+ log.I.F(
+ "not deleting\n%d%\nbecause delete event is older\n%d",
+ target.CreatedAt.Int(), ev.CreatedAt.Int(),
+ )
+ continue
+ }
+ if !bytes.Equal(target.Pubkey, ev.Pubkey) {
+ err = huma.Error403Forbidden("only author can delete event")
+ return
+ }
+ // if advancedDeleter != nil {
+ // advancedDeleter.BeforeDelete(ctx, t.Value(), ev.Pubkey)
+ // }
+ if err = sto.DeleteEvent(
+ ctx, target.EventId(),
+ ); chk.T(err) {
+ err = huma.Error500InternalServerError(err.Error())
+ return
+ }
+ // if advancedDeleter != nil {
+ // advancedDeleter.AfterDelete(t.Value(), ev.Pubkey)
+ // }
+ }
+ res = nil
+ }
+ return
+ }
+ var reason []byte
+ ok, reason = x.AddEvent(ctx, x.Relay(), ev, r, rr, pubkey)
+ // return the response whether true or false and any reason if false
+ if ok {
+ } else {
+ err = huma.Error500InternalServerError(string(reason))
+ }
+ // if after != nil {
+ // // do this in the background and let the http response close
+ // go after()
+ // }
+ output = &EventOutput{"event accepted"}
+ return
+ },
+ )
+}
diff --git a/openapi/http-events.go b/openapi/http-events.go
new file mode 100644
index 0000000..5bbf69e
--- /dev/null
+++ b/openapi/http-events.go
@@ -0,0 +1,124 @@
+package openapi
+
+import (
+ "fmt"
+ "net/http"
+ "orly.dev/chk"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/hex"
+ "orly.dev/interfaces/store"
+ "orly.dev/realy/helpers"
+ "orly.dev/sha256"
+ "orly.dev/tag"
+)
+
+// EventsInput is the parameters for an Events HTTP API method. Basically an array of eventid.T.
+type EventsInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"`
+ Body []string `doc:"list of event Ids"`
+}
+
+// RegisterEvents is the implementation of the HTTP API for Events.
+func (x *Operations) RegisterEvents(api huma.API) {
+ name := "Events"
+ description := "Returns the full events from a list of event Ids as a line structured JSON."
+ path := "/events"
+ scopes := []string{"user", "read"}
+ method := http.MethodPost
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"events"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ DefaultStatus: 204,
+ }, func(ctx context.T, input *EventsInput) (
+ output *huma.StreamResponse, err error,
+ ) {
+ // log.I.S(input)
+ // if len(input.Body) == 10000 {
+ // err = huma.Error400BadRequest(
+ // "cannot process more than 10000 events in a request")
+ // return
+ // }
+ // var authrequired bool
+ // if len(input.Body) > 1000 {
+ // authrequired = true
+ // }
+ // r := ctx.Value("http-request").(*http.Request)
+ // var valid bool
+ // var pubkey []byte
+ // valid, pubkey, err = httpauth.CheckAuth(r)
+ // // if there is an error but not that the token is missing, or there is no error
+ // // but the signature is invalid, return error that request is unauthorized.
+ // if err != nil && !errors.Is(err, httpauth.ErrMissingKey) {
+ // err = huma.Error400BadRequest(err.Error())
+ // return
+ // }
+ // err = nil
+ // if authrequired && len(pubkey) != schnorr.PubKeyBytesLen {
+ // err = huma.Error400BadRequest(
+ // "cannot process more than 1000 events in a request without being authenticated")
+ // return
+ // }
+ // if authrequired && valid {
+ // if len(x.Owners()) < 1 {
+ // err = huma.Error400BadRequest(
+ // "cannot process more than 1000 events in a request without auth enabled")
+ // return
+ // }
+ // if rl, ok := x.Relay().(*app.Relay); ok {
+ // rl.Lock()
+ // // we only allow the first level of the allowed users this kind of access
+ // if _, ok = rl.OwnersFollowed[string(pubkey)]; !ok {
+ // err = huma.Error403Forbidden(
+ // fmt.Sprintf(
+ // "authenticated user %0x does not have permission for this request (owners can use export)",
+ // pubkey))
+ // return
+ // }
+ // }
+ // }
+ // if !valid {
+ // err = huma.Error401Unauthorized("Authorization header is invalid")
+ // return
+ // }
+ sto := x.Storage()
+ var evIds [][]byte
+ for _, id := range input.Body {
+ var idb []byte
+ if idb, err = hex.Dec(id); chk.E(err) {
+ err = huma.Error422UnprocessableEntity(err.Error())
+ return
+ }
+ if len(idb) != sha256.Size {
+ err = huma.Error422UnprocessableEntity(
+ fmt.Sprintf(
+ "event Id must be 64 hex characters: '%s'", id,
+ ),
+ )
+ }
+ evIds = append(evIds, idb)
+ }
+ if idsWriter, ok := sto.(store.GetIdsWriter); ok {
+ output = &huma.StreamResponse{
+ func(ctx huma.Context) {
+ if err = idsWriter.FetchIds(
+ x.Context(), tag.New(evIds...),
+ ctx.BodyWriter(),
+ ); chk.E(err) {
+ return
+ }
+ },
+ }
+ }
+ return
+ },
+ )
+}
diff --git a/openapi/http-export.go b/openapi/http-export.go
new file mode 100644
index 0000000..614baf9
--- /dev/null
+++ b/openapi/http-export.go
@@ -0,0 +1,68 @@
+package openapi
+
+import (
+ "net/http"
+ "orly.dev/log"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/realy/helpers"
+)
+
+// ExportInput is the parameters for the HTTP API Export method.
+type ExportInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"`
+}
+
+// ExportOutput is the return value of Export. It usually will be line structured JSON. In
+// future there may be more output formats.
+type ExportOutput struct{ RawBody []byte }
+
+// RegisterExport implements the Export HTTP API method.
+func (x *Operations) RegisterExport(api huma.API) {
+ name := "Export"
+ description := "Export all events (only works with NIP-98/JWT capable client, will not work with UI)"
+ path := "/export"
+ scopes := []string{"admin", "read"}
+ method := http.MethodGet
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"admin"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ }, func(ctx context.T, input *ExportInput) (
+ resp *huma.StreamResponse, err error,
+ ) {
+ // r := ctx.Value("http-request").(*http.Request)
+ // rr := helpers.GetRemoteFromReq(r)
+ // log.I.F("processing export from %s", rr)
+ // // w := ctx.Value("http-response").(http.ResponseWriter)
+ // authed, pubkey := x.AdminAuth(r)
+ // if !authed {
+ // // pubkey = ev.Pubkey
+ // err = huma.Error401Unauthorized("Not Authorized")
+ // return
+ // }
+ // log.I.F("export of event data requested on admin port from %s pubkey %0x",
+ // rr, pubkey)
+ sto := x.Storage()
+ resp = &huma.StreamResponse{
+ func(ctx huma.Context) {
+ ctx.SetHeader("Content-Type", "application/nostr+jsonl")
+ sto.Export(x.Context(), ctx.BodyWriter())
+ if f, ok := ctx.BodyWriter().(http.Flusher); ok {
+ f.Flush()
+ } else {
+ log.W.F("error: unable to flush")
+ }
+ },
+ }
+ return
+ },
+ )
+}
diff --git a/openapi/http-filter.go b/openapi/http-filter.go
new file mode 100644
index 0000000..6e84bd8
--- /dev/null
+++ b/openapi/http-filter.go
@@ -0,0 +1,235 @@
+package openapi
+
+import (
+ "net/http"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "sort"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/filter"
+ "orly.dev/filters"
+ "orly.dev/hex"
+ "orly.dev/interfaces/store"
+ "orly.dev/kind"
+ "orly.dev/kinds"
+ "orly.dev/realy/helpers"
+ "orly.dev/tag"
+ "orly.dev/tags"
+ "orly.dev/timestamp"
+)
+
+// SimpleFilter is the main parts of a filter.F that relate to event store indexes.
+type SimpleFilter struct {
+ Kinds []int `json:"kinds,omitempty" doc:"array of kind numbers to match on"`
+ Authors []string `json:"authors,omitempty" doc:"array of author pubkeys to match on (hex encoded)"`
+ Tags [][]string `json:"tags,omitempty" doc:"array of tags to match on (first key of each '#x' and terms to match from the second field of the event tag)"`
+}
+
+// FilterInput is the parameters for a Filter HTTP API call.
+type FilterInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"`
+ Since int64 `query:"since" doc:"timestamp of the oldest events to return (inclusive)"`
+ Until int64 `query:"until" doc:"timestamp of the newest events to return (inclusive)"`
+ Limit uint `query:"limit" doc:"maximum number of results to return"`
+ Sort string `query:"sort" enum:"asc,desc" default:"desc" doc:"sort order by created_at timestamp"`
+ Body SimpleFilter `body:"filter" doc:"filter criteria to match for events to return"`
+}
+
+// ToFilter converts a SimpleFilter input to a regular nostr filter.F.
+func (fi FilterInput) ToFilter() (f *filter.F, err error) {
+ f = filter.New()
+ var ks []*kind.T
+ for _, k := range fi.Body.Kinds {
+ ks = append(ks, kind.New(k))
+ }
+ f.Kinds = kinds.New(ks...)
+ var as [][]byte
+ for _, a := range fi.Body.Authors {
+ var b []byte
+ if b, err = hex.Dec(a); chk.E(err) {
+ return
+ }
+ as = append(as, b)
+ }
+ f.Authors = tag.New(as...)
+ var ts []*tag.T
+ for _, t := range fi.Body.Tags {
+ ts = append(ts, tag.New(t...))
+ }
+ f.Tags = tags.New(ts...)
+ if fi.Limit != 0 {
+ f.Limit = &fi.Limit
+ }
+ if fi.Since != 0 {
+ f.Since = timestamp.New(fi.Since)
+ }
+ if fi.Until != 0 {
+ f.Until = timestamp.New(fi.Until)
+ }
+ return
+}
+
+// FilterOutput is a list of event Ids that match the query in the sort order requested.
+type FilterOutput struct {
+ Body []string `doc:"list of event Ids that mach the query in the sort order requested"`
+}
+
+// RegisterFilter is the implementation of the HTTP API Filter method.
+func (x *Operations) RegisterFilter(api huma.API) {
+ name := "Filter"
+ description := "Search for events and receive a sorted list of event Ids (one of authors, kinds or tags must be present)"
+ path := "/filter"
+ scopes := []string{"user", "read"}
+ method := http.MethodPost
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"events"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ }, func(ctx context.T, input *FilterInput) (
+ output *FilterOutput, err error,
+ ) {
+ log.I.S(input)
+ var f *filter.F
+ if f, err = input.ToFilter(); chk.E(err) {
+ err = huma.Error422UnprocessableEntity(err.Error())
+ return
+ }
+ log.I.F("%s", f.Marshal(nil))
+ // r := ctx.Value("http-request").(*http.Request)
+ // rr := helpers.GetRemoteFromReq(r)
+ // if len(input.Body.Authors) < 1 && len(input.Body.Kinds) < 1 && len(input.Body.Tags) < 1 {
+ // err = huma.Error400BadRequest(
+ // "cannot process filter with none of Authors/Kinds/Tags")
+ // return
+ // }
+ // var valid bool
+ // var pubkey []byte
+ // valid, pubkey, err = httpauth.CheckAuth(r)
+ // if there is an error but not that the token is missing, or there is no error
+ // but the signature is invalid, return error that request is unauthorized.
+ // if err != nil && !errors.Is(err, httpauth.ErrMissingKey) {
+ // err = huma.Error400BadRequest(err.Error())
+ // return
+ // }
+ // err = nil
+ // if !valid {
+ // err = huma.Error401Unauthorized("Authorization header is invalid")
+ // return
+ // }
+ allowed := filters.New(f)
+ // if accepter, ok := x.Relay().(relay.ReqAcceptor); ok {
+ // var accepted, modified bool
+ // allowed, accepted, modified = accepter.AcceptReq(x.Context(), r, nil,
+ // filters.New(f), pubkey)
+ // if !accepted {
+ // err = huma.Error401Unauthorized("auth to get access for this filter")
+ // return
+ // } else if modified {
+ // log.D.F("filter modified %s", allowed.F[0])
+ // }
+ // }
+ // if len(allowed.F) == 0 {
+ // err = huma.Error401Unauthorized("all kinds in event restricted; auth to get access for this filter")
+ // return
+ // }
+ // if f.Kinds.IsPrivileged() {
+ // if auther, ok := x.Relay().(relay.Authenticator); ok && auther.AuthRequired() {
+ // log.F.F("privileged request\n%s", f.Serialize())
+ // senders := f.Authors
+ // receivers := f.Tags.GetAll(tag.New("#p"))
+ // switch {
+ // case len(pubkey) == 0:
+ // err = huma.Error401Unauthorized("auth required for processing request due to presence of privileged kinds (DMs, app specific data)")
+ // return
+ // case senders.Contains(pubkey) || receivers.ContainsAny([]byte("#p"),
+ // tag.New(pubkey)):
+ // log.F.F("user %0x from %s allowed to query for privileged event",
+ // pubkey, rr)
+ // default:
+ // err = huma.Error403Forbidden(fmt.Sprintf(
+ // "authenticated user %0x does not have authorization for "+
+ // "requested filters", pubkey))
+ // }
+ // }
+ // }
+ sto := x.Storage()
+ var ok bool
+ var quer store.Querier
+ if quer, ok = sto.(store.Querier); !ok {
+ err = huma.Error501NotImplemented("simple filter request not implemented")
+ return
+ }
+ var evs []store.IdPkTs
+ if evs, err = quer.QueryForIds(
+ x.Context(), allowed.F[0],
+ ); chk.E(err) {
+ err = huma.Error500InternalServerError(
+ "error querying for events", err,
+ )
+ return
+ }
+ if input.Limit > 0 {
+ evs = evs[:input.Limit]
+ }
+ switch input.Sort {
+ case "asc":
+ sort.Slice(
+ evs, func(i, j int) bool {
+ return evs[i].Ts < evs[j].Ts
+ },
+ )
+ case "desc":
+ sort.Slice(
+ evs, func(i, j int) bool {
+ return evs[i].Ts > evs[j].Ts
+ },
+ )
+ }
+ // if len(pubkey) > 0 {
+ // // remove events from results if we find the user's mute list, that are present
+ // // on this list
+ // var mutes event.Ts
+ // if mutes, err = sto.QueryEvents(x.Context(), &filter.F{Authors: tag.New(pubkey),
+ // Kinds: kinds.New(kind.MuteList)}); !chk.E(err) {
+ // var mutePubs [][]byte
+ // for _, ev := range mutes {
+ // for _, t := range ev.Tags.ToSliceOfTags() {
+ // if bytes.Equal(t.Key(), []byte("p")) {
+ // var p []byte
+ // if p, err = hex.Dec(string(t.Value())); chk.E(err) {
+ // continue
+ // }
+ // mutePubs = append(mutePubs, p)
+ // }
+ // }
+ // }
+ // var tmp []store.IdTsPk
+ // next:
+ // for _, ev := range evs {
+ // for _, pk := range mutePubs {
+ // if bytes.Equal(ev.Pub, pk) {
+ // continue next
+ // }
+ // }
+ // tmp = append(tmp, ev)
+ // }
+ // // log.I.ToSliceOfBytes("done")
+ // evs = tmp
+ // }
+ // }
+ output = &FilterOutput{}
+ for _, ev := range evs {
+ output.Body = append(output.Body, hex.Enc(ev.Id))
+ }
+ return
+ },
+ )
+}
diff --git a/openapi/http-import.go b/openapi/http-import.go
new file mode 100644
index 0000000..4fc55b1
--- /dev/null
+++ b/openapi/http-import.go
@@ -0,0 +1,71 @@
+package openapi
+
+import (
+ "bytes"
+ "github.com/danielgtaylor/huma/v2"
+ "net/http"
+
+ "orly.dev/context"
+ "orly.dev/realy/helpers"
+)
+
+// ImportInput is the parameters of an import operation, authentication and the stream of line
+// structured JSON events.
+type ImportInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 token for authentication" required:"true"`
+ RawBody []byte
+}
+
+// ImportOutput is nothing, basically, a 204 or 200 status is expected.
+type ImportOutput struct{}
+
+// RegisterImport is the implementation of the Import operation.
+func (x *Operations) RegisterImport(api huma.API) {
+ name := "Import"
+ description := "Import events from line structured JSON (jsonl)"
+ path := "/import"
+ scopes := []string{"admin", "write"}
+ method := http.MethodPost
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"admin"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ DefaultStatus: 204,
+ },
+ func(ctx context.T, input *ImportInput) (wgh *ImportOutput, err error) {
+ // r := ctx.Value("http-request").(*http.Request)
+ // rr := helpers.GetRemoteFromReq(r)
+ // authed, pubkey := x.AdminAuth(r, time.Minute*10)
+ // if !authed {
+ // // pubkey = ev.Pubkey
+ // err = huma.Error401Unauthorized(
+ // fmt.Sprintf("user %0x not authorized for action", pubkey))
+ // return
+ // }
+ sto := x.Storage()
+ if len(input.RawBody) > 0 {
+ read := bytes.NewBuffer(input.RawBody)
+ sto.Import(read)
+ // if realy, ok := x.Relay().(*app.Relay); ok {
+ // realy.ZeroLists()
+ // realy.CheckOwnerLists(context.Bg())
+ // }
+ // } else {
+ // log.I.F("import of event data requested on admin port from %s pubkey %0x", rr,
+ // pubkey)
+ // read := io.LimitReader(r.Body, r.ContentLength)
+ // sto.Import(read)
+ // if realy, ok := x.Relay().(*app.Relay); ok {
+ // realy.ZeroLists()
+ // realy.CheckOwnerLists(context.Bg())
+ // }
+ }
+ return
+ },
+ )
+}
diff --git a/openapi/http-nuke.go b/openapi/http-nuke.go
new file mode 100644
index 0000000..fb0ce4b
--- /dev/null
+++ b/openapi/http-nuke.go
@@ -0,0 +1,71 @@
+package openapi
+
+import (
+ "net/http"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "strings"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/interfaces/store"
+ "orly.dev/realy/helpers"
+)
+
+// NukeInput is the parameters for the HTTP API method nuke. Note that it has a confirmation
+// header that must be provided to prevent accidental invocation of this method.
+type NukeInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"`
+ Confirm string `header:"X-Confirm" doc:"must put 'Yes I Am Sure' in this field as confirmation"`
+}
+
+// NukeOutput is basically nothing, a 200 or 204 HTTP status response is normal.
+type NukeOutput struct{}
+
+// RegisterNuke is the implementation of the Wipe HTTP API method.
+func (x *Operations) RegisterNuke(api huma.API) {
+ name := "Wipe"
+ description := "Wipe all events in the database"
+ path := "/nuke"
+ scopes := []string{"admin", "write"}
+ method := http.MethodGet
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"admin"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ DefaultStatus: 204,
+ }, func(ctx context.T, input *NukeInput) (wgh *NukeOutput, err error) {
+ // r := ctx.Value("http-request").(*http.Request)
+ // // w := ctx.Value("http-response").(http.ResponseWriter)
+ // rr := helpers.GetRemoteFromReq(r)
+ // authed, pubkey := x.AdminAuth(r)
+ // if !authed {
+ // // pubkey = ev.Pubkey
+ // err = huma.Error401Unauthorized("user not authorized for action")
+ // return
+ // }
+ if input.Confirm != "Yes I Am Sure" {
+ err = huma.Error403Forbidden("Confirm missing or incorrect")
+ return
+ }
+ // log.I.F("database nuke request from %s pubkey %0x", rr, pubkey)
+ sto := x.Storage()
+ if nuke, ok := sto.(store.Wiper); ok {
+ log.I.F("rescanning")
+ if err = nuke.Wipe(); chk.E(err) {
+ if strings.HasPrefix(err.Error(), "Value log GC attempt") {
+ err = nil
+ }
+ return
+ }
+ }
+ return
+ },
+ )
+}
diff --git a/openapi/http-relay.go b/openapi/http-relay.go
new file mode 100644
index 0000000..e5693a2
--- /dev/null
+++ b/openapi/http-relay.go
@@ -0,0 +1,95 @@
+package openapi
+
+import (
+ "bytes"
+ "net/http"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/realy/helpers"
+)
+
+// RelayInput is the parameters for the Event HTTP API method.
+type RelayInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"`
+ RawBody []byte
+}
+
+// RelayOutput is the return parameters for the HTTP API Relay method.
+type RelayOutput struct{ Body string }
+
+// RegisterRelay is the implementatino of the HTTP API Relay method.
+func (x *Operations) RegisterRelay(api huma.API) {
+ name := "relay"
+ description := "relay an event, don't store it"
+ path := "/relay"
+ scopes := []string{"user"}
+ method := http.MethodPost
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"events"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ }, func(ctx context.T, input *RelayInput) (
+ output *RelayOutput, err error,
+ ) {
+ log.I.S(input)
+ // r := ctx.Value("http-request").(*http.Request)
+ // rr := helpers.GetRemoteFromReq(r)
+ // var valid bool
+ // var pubkey []byte
+ // valid, pubkey, err = httpauth.CheckAuth(r)
+ // // if there is an error but not that the token is missing, or there is no error
+ // // but the signature is invalid, return error that request is unauthorized.
+ // if err != nil && !errors.Is(err, httpauth.ErrMissingKey) {
+ // err = huma.Error400BadRequest(err.Error())
+ // return
+ // }
+ // err = nil
+ // if !valid {
+ // err = huma.Error401Unauthorized("Authorization header is invalid")
+ // return
+ // }
+ var ok bool
+ // if there was auth, or no auth, check the relay policy allows accepting the
+ // event (no auth with auth required or auth not valid for action can apply
+ // here).
+ ev := &event.E{}
+ if _, err = ev.Unmarshal(input.RawBody); chk.E(err) {
+ err = huma.Error406NotAcceptable(err.Error())
+ return
+ }
+ // accept, notice, _ := x.AcceptEvent(ctx, ev, r, rr, pubkey)
+ // if !accept {
+ // err = huma.Error401Unauthorized(notice)
+ // return
+ // }
+ if !bytes.Equal(ev.GetIDBytes(), ev.Id) {
+ err = huma.Error400BadRequest("event id is computed incorrectly")
+ return
+ }
+ if ok, err = ev.Verify(); chk.T(err) {
+ err = huma.Error400BadRequest("failed to verify signature")
+ return
+ } else if !ok {
+ err = huma.Error400BadRequest("signature is invalid")
+ return
+ }
+ // var authRequired bool
+ // var ar relay.Authenticator
+ // if ar, ok = x.Relay().(relay.Authenticator); ok {
+ // authRequired = ar.AuthRequired()
+ // }
+ x.Publisher().Deliver(ev)
+ return
+ },
+ )
+}
diff --git a/openapi/http-rescan.go b/openapi/http-rescan.go
new file mode 100644
index 0000000..7577c01
--- /dev/null
+++ b/openapi/http-rescan.go
@@ -0,0 +1,58 @@
+package openapi
+
+import (
+ "net/http"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/interfaces/store"
+ "orly.dev/realy/helpers"
+)
+
+type RescanInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"`
+}
+
+type RescanOutput struct{}
+
+func (x *Operations) RegisterRescan(api huma.API) {
+ name := "Rescan"
+ description := "Rescan all events and rewrite their indexes (to enable new indexes on old events)"
+ path := "/rescan"
+ scopes := []string{"admin"}
+ method := http.MethodGet
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"admin"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ DefaultStatus: 204,
+ },
+ func(ctx context.T, input *RescanInput) (wgh *RescanOutput, err error) {
+ // r := ctx.Value("http-request").(*http.Request)
+ // rr := helpers.GetRemoteFromReq(r)
+ // authed, pubkey := x.AdminAuth(r)
+ // if !authed {
+ // err = huma.Error401Unauthorized("not authorized")
+ // return
+ // }
+ // log.I.F("index rescan requested on admin port from %s pubkey %0x",
+ // rr, pubkey)
+ sto := x.Storage()
+ if rescanner, ok := sto.(store.Rescanner); ok {
+ log.I.F("rescanning")
+ if err = rescanner.Rescan(); chk.E(err) {
+ return
+ }
+ }
+ return
+ },
+ )
+}
diff --git a/openapi/http-shutdown.go b/openapi/http-shutdown.go
new file mode 100644
index 0000000..326477a
--- /dev/null
+++ b/openapi/http-shutdown.go
@@ -0,0 +1,51 @@
+package openapi
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/danielgtaylor/huma/v2"
+
+ "orly.dev/context"
+ "orly.dev/realy/helpers"
+)
+
+type ShutdownInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"true"`
+}
+
+type ShutdownOutput struct{}
+
+func (x *Operations) RegisterShutdown(api huma.API) {
+ name := "Shutdown"
+ description := "Shutdown relay"
+ path := "/shutdown"
+ scopes := []string{"admin"}
+ method := http.MethodGet
+ huma.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"admin"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ DefaultStatus: 204,
+ }, func(ctx context.T, input *ShutdownInput) (
+ wgh *ShutdownOutput, err error,
+ ) {
+ // r := ctx.Value("http-request").(*http.Request)
+ // authed, _ := x.AdminAuth(r)
+ // if !authed {
+ // err = huma.Error401Unauthorized("authorization required")
+ // return
+ // }
+ go func() {
+ time.Sleep(time.Second)
+ x.Shutdown()
+ }()
+ return
+ },
+ )
+}
diff --git a/openapi/http-subscribe.go b/openapi/http-subscribe.go
new file mode 100644
index 0000000..48bb5ce
--- /dev/null
+++ b/openapi/http-subscribe.go
@@ -0,0 +1,158 @@
+package openapi
+
+import (
+ "net/http"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "github.com/danielgtaylor/huma/v2"
+ "github.com/danielgtaylor/huma/v2/sse"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/filter"
+ "orly.dev/filters"
+ "orly.dev/hex"
+ "orly.dev/kind"
+ "orly.dev/kinds"
+ "orly.dev/realy/helpers"
+ "orly.dev/tag"
+ "orly.dev/tags"
+)
+
+type SubscribeInput struct {
+ Auth string `header:"Authorization" doc:"nostr nip-98 (and expiring variant)" required:"false"`
+ Accept string `header:"Accept" default:"text/event-stream" enum:"text/event-stream" required:"true"`
+ // ContentType string `header:"Content-Type" default:"text/event-stream" enum:"text/event-stream" required:"true"`
+ Body SimpleFilter `body:"filter" doc:"filter criteria to match for events to return"`
+}
+
+func (fi SubscribeInput) ToFilter() (f *filter.F, err error) {
+ f = filter.New()
+ var ks []*kind.T
+ for _, k := range fi.Body.Kinds {
+ ks = append(ks, kind.New(k))
+ }
+ f.Kinds = kinds.New(ks...)
+ var as [][]byte
+ for _, a := range fi.Body.Authors {
+ var b []byte
+ if b, err = hex.Dec(a); chk.E(err) {
+ return
+ }
+ as = append(as, b)
+ }
+ f.Authors = tag.New(as...)
+ var ts []*tag.T
+ for _, t := range fi.Body.Tags {
+ ts = append(ts, tag.New(t...))
+ }
+ f.Tags = tags.New(ts...)
+ return
+}
+
+func (x *Operations) RegisterSubscribe(api huma.API) {
+ name := "Subscribe"
+ description := "Subscribe for newly published events by author, kind or tags; empty also allowed, which just sends all incoming events - uses Server Sent Events format for compatibility with standard libraries."
+ path := "/subscribe"
+ scopes := []string{"user", "read"}
+ method := http.MethodPost
+ sse.Register(
+ api, huma.Operation{
+ OperationID: name,
+ Summary: name,
+ Path: path,
+ Method: method,
+ Tags: []string{"events"},
+ Description: helpers.GenerateDescription(description, scopes),
+ Security: []map[string][]string{{"auth": scopes}},
+ },
+ map[string]any{
+ "event": event.J{},
+ },
+ func(ctx context.T, input *SubscribeInput, send sse.Sender) {
+ log.I.S(input)
+ var err error
+ var f *filter.F
+ if f, err = input.ToFilter(); chk.E(err) {
+ err = huma.Error422UnprocessableEntity(err.Error())
+ return
+ }
+ log.I.F("%s", f.Marshal(nil))
+ r := ctx.Value("http-request").(*http.Request)
+ // rr := helpers.GetRemoteFromReq(r)
+ // var valid bool
+ // var pubkey []byte
+ // valid, pubkey, err = httpauth.CheckAuth(r)
+ // // if there is an error but not that the token is missing, or there is no error
+ // // but the signature is invalid, return error that request is unauthorized.
+ // if err != nil && !errors.Is(err, httpauth.ErrMissingKey) {
+ // err = huma.Error400BadRequest(err.Error())
+ // return
+ // }
+ // err = nil
+ // if !valid {
+ // err = huma.Error401Unauthorized("Authorization header is invalid")
+ // return
+ // }
+ allowed := filters.New(f)
+ // if accepter, ok := x.Relay().(relay.ReqAcceptor); ok {
+ // var accepted, modified bool
+ // allowed, accepted, modified = accepter.AcceptReq(x.Context(), r, nil,
+ // filters.New(f),
+ // pubkey)
+ // if !accepted {
+ // err = huma.Error401Unauthorized("auth to get access for this filter")
+ // return
+ // } else if modified {
+ // log.D.F("filter modified %s", allowed.F[0])
+ // }
+ // }
+ if len(allowed.F) == 0 {
+ err = huma.Error401Unauthorized("all kinds in event restricted; auth to get access for this filter")
+ return
+ }
+ // if f.Kinds.IsPrivileged() {
+ // if auther, ok := x.Relay().(relay.Authenticator); ok && auther.AuthRequired() {
+ // log.F.F("privileged request\n%s", f.Serialize())
+ // senders := f.Authors
+ // receivers := f.Tags.GetAll(tag.New("#p"))
+ // switch {
+ // case len(pubkey) == 0:
+ // err = huma.Error401Unauthorized("auth required for processing request due to presence of privileged kinds (DMs, app specific data)")
+ // return
+ // case senders.Contains(pubkey) || receivers.ContainsAny([]byte("#p"),
+ // tag.New(pubkey)):
+ // log.F.F("user %0x from %s allowed to query for privileged event",
+ // pubkey, rr)
+ // default:
+ // err = huma.Error403Forbidden(fmt.Sprintf(
+ // "authenticated user %0x does not have authorization for "+
+ // "requested filters", pubkey))
+ // }
+ // }
+ // }
+ // register the filter with the listeners
+ receiver := make(event.C, 32)
+ x.Publisher().Receive(
+ &H{
+ Ctx: r.Context(),
+ Receiver: receiver,
+ // Pubkey: pubkey,
+ Filter: f,
+ },
+ )
+ out:
+ for {
+ select {
+ case <-r.Context().Done():
+ break out
+ case ev := <-receiver:
+ if err = send.Data(ev.ToEventJ()); chk.E(err) {
+ }
+ }
+ }
+ return
+ },
+ )
+}
diff --git a/openapi/huma.go b/openapi/huma.go
new file mode 100644
index 0000000..5015343
--- /dev/null
+++ b/openapi/huma.go
@@ -0,0 +1,49 @@
+package openapi
+
+import (
+ "net/http"
+
+ "github.com/danielgtaylor/huma/v2"
+ "github.com/danielgtaylor/huma/v2/adapters/humago"
+)
+
+// ExposeMiddleware adds the http.Request and http.ResponseWriter to the context
+// for the Operations handler.
+func ExposeMiddleware(ctx huma.Context, next func(huma.Context)) {
+ // Unwrap the request and response objects.
+ r, w := humago.Unwrap(ctx)
+ ctx = huma.WithValue(ctx, "http-request", r)
+ ctx = huma.WithValue(ctx, "http-response", w)
+ next(ctx)
+}
+
+// NewHuma creates a new huma.API with a Scalar docs UI, and a middleware that allows methods to
+// access the http.Request and http.ResponseWriter.
+func NewHuma(router *ServeMux, name, version, description string) (api huma.API) {
+ config := huma.DefaultConfig(name, version)
+ config.Info.Description = description
+ config.DocsPath = ""
+ router.ServeMux.HandleFunc("/api", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/html")
+ w.Write([]byte(`
+
+
+ realy HTTP API UI
+
+
+
+
+
+
+
+`))
+ })
+
+ api = humago.New(router, config)
+ api.UseMiddleware(ExposeMiddleware)
+ return
+}
diff --git a/openapi/publisher-openapi.go b/openapi/publisher-openapi.go
new file mode 100644
index 0000000..490b1cb
--- /dev/null
+++ b/openapi/publisher-openapi.go
@@ -0,0 +1,96 @@
+package openapi
+
+import (
+ "sync"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/filter"
+ "orly.dev/realy/publish/publisher"
+)
+
+const Type = "openapi"
+
+// H is the control structure for a HTTP SSE subscription, including the filter, authed
+// pubkey and a channel to send the events to.
+type H struct {
+ // Ctx is the http.Request context of the subscriber, this enables garbage
+ // collecting the subscriptions from http.
+ Ctx context.T
+ // Receiver is a channel that the listener sends subscription events to for http
+ // subscribe endpoint.
+ Receiver event.C
+ // // Pubkey is the pubkey authed to this subscription
+ // Pubkey []byte
+ // Filter is the filter associated with the http subscription
+ Filter *filter.F
+}
+
+func (h *H) Type() string { return Type }
+
+// Map is a collection of H TTP subscriptions.
+type Map map[*H]struct{}
+
+type S struct {
+ // Map is the map of subscriptions from the http api.
+ Map
+ // HLock is the mutex that locks the Map.
+ Mx sync.Mutex
+}
+
+var _ publisher.I = &S{}
+
+func New() *S { return &S{Map: make(Map)} }
+
+func (p *S) Type() string { return Type }
+
+func (p *S) Receive(msg publisher.Message) {
+ if m, ok := msg.(*H); ok {
+ p.Mx.Lock()
+ p.Map[m] = struct{}{}
+ p.Mx.Unlock()
+ }
+}
+
+func (p *S) Deliver(ev *event.E) {
+ p.Mx.Lock()
+ var subs []*H
+ for sub := range p.Map {
+ // check if the subscription's subscriber is still alive
+ select {
+ case <-sub.Ctx.Done():
+ subs = append(subs, sub)
+ default:
+ }
+ }
+ for _, sub := range subs {
+ delete(p.Map, sub)
+ }
+ subs = subs[:0]
+ for sub := range p.Map {
+ // if auth required, check the subscription pubkey matches
+ // if !publicReadable {
+ // if authRequired && len(sub.Pubkey) == 0 {
+ // continue
+ // }
+ // }
+ // if the filter doesn't match, skip
+ if !sub.Filter.Matches(ev) {
+ continue
+ }
+ // // if the filter is privileged and the user doesn't have matching auth, skip
+ // if ev.Kind.IsPrivileged() {
+ // ab := sub.Pubkey
+ // var containsPubkey bool
+ // if ev.Tags != nil {
+ // containsPubkey = ev.Tags.ContainsAny([]byte{'p'}, tag.New(ab))
+ // }
+ // if !bytes.Equal(ev.Pubkey, ab) || containsPubkey {
+ // continue
+ // }
+ // }
+ // send the event to the subscriber
+ sub.Receiver <- ev
+ }
+ p.Mx.Unlock()
+}
diff --git a/openapi/serveMux.go b/openapi/serveMux.go
new file mode 100644
index 0000000..50ab7f0
--- /dev/null
+++ b/openapi/serveMux.go
@@ -0,0 +1,21 @@
+package openapi
+
+import "net/http"
+
+type ServeMux struct {
+ *http.ServeMux
+}
+
+func NewServeMux() *ServeMux {
+ return &ServeMux{http.NewServeMux()}
+}
+
+func (c *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Access-Control-Allow-Origin", "*")
+ w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE")
+ w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
+ if r.Method == http.MethodOptions {
+ return
+ }
+ c.ServeMux.ServeHTTP(w, r)
+}
diff --git a/p256k/btcec.go b/p256k/btcec.go
index a8a3996..e3e6e6f 100644
--- a/p256k/btcec.go
+++ b/p256k/btcec.go
@@ -3,7 +3,6 @@
package p256k
import (
- "orly.dev/log"
"orly.dev/p256k/btcec"
)
diff --git a/p256k/btcec/btcec.go b/p256k/btcec/btcec.go
index 4be841e..4102b73 100644
--- a/p256k/btcec/btcec.go
+++ b/p256k/btcec/btcec.go
@@ -14,6 +14,7 @@ import (
type Signer struct {
SecretKey *secp256k1.SecretKey
PublicKey *secp256k1.PublicKey
+ BTCECSec *ec.SecretKey
pkb, skb []byte
}
@@ -25,22 +26,14 @@ func (s *Signer) Generate() (err error) {
return
}
s.skb = s.SecretKey.Serialize()
+ s.BTCECSec, _ = ec.PrivKeyFromBytes(s.skb)
s.PublicKey = s.SecretKey.PubKey()
s.pkb = schnorr.SerializePubKey(s.PublicKey)
return
}
-// GenerateForECDH creates a new Signer.
-func (s *Signer) GenerateForECDH() (err error) {
- return s.Generate()
-}
-
-func (s *Signer) InitECDH() {
- // noop because this isn't needed in this version
-}
-
// InitSec initialises a Signer using raw secret key bytes.
-func (s *Signer) InitSec(sec []byte, _ ...bool) (err error) {
+func (s *Signer) InitSec(sec []byte) (err error) {
if len(sec) != secp256k1.SecKeyBytesLen {
err = errorf.E("sec key must be %d bytes", secp256k1.SecKeyBytesLen)
return
@@ -48,6 +41,7 @@ func (s *Signer) InitSec(sec []byte, _ ...bool) (err error) {
s.SecretKey = secp256k1.SecKeyFromBytes(sec)
s.PublicKey = s.SecretKey.PubKey()
s.pkb = schnorr.SerializePubKey(s.PublicKey)
+ s.BTCECSec, _ = ec.PrivKeyFromBytes(s.skb)
return
}
@@ -112,7 +106,7 @@ func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) {
); chk.E(err) {
return
}
- secret = ec.GenerateSharedSecret(s.SecretKey, pub)
+ secret = ec.GenerateSharedSecret(s.BTCECSec, pub)
return
}
diff --git a/p256k/btcec/btcec_test.go b/p256k/btcec/btcec_test.go
index 3f46aa0..2b7bd52 100644
--- a/p256k/btcec/btcec_test.go
+++ b/p256k/btcec/btcec_test.go
@@ -1,13 +1,16 @@
package btcec_test
import (
+ "bufio"
"bytes"
"testing"
"time"
- "orly.dev/chk"
- "orly.dev/log"
+ "orly.dev/ec/schnorr"
+ "orly.dev/event"
+ "orly.dev/event/examples"
"orly.dev/p256k/btcec"
+ "orly.dev/sha256"
)
func TestSigner_Generate(t *testing.T) {
@@ -26,17 +29,96 @@ func TestSigner_Generate(t *testing.T) {
}
func TestBTCECSignerVerify(t *testing.T) {
+ evs := make([]*event.E, 0, 10000)
+ scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
+ buf := make([]byte, 1_000_000)
+ scanner.Buffer(buf, len(buf))
+ var err error
+ signer := &btcec.Signer{}
+ for scanner.Scan() {
+ var valid bool
+ b := scanner.Bytes()
+ ev := event.New()
+ if _, err = ev.Unmarshal(b); chk.E(err) {
+ t.Errorf("failed to marshal\n%s", b)
+ } else {
+ if valid, err = ev.Verify(); chk.E(err) || !valid {
+ t.Errorf("invalid signature\n%s", b)
+ continue
+ }
+ }
+ id := ev.GetIDBytes()
+ if len(id) != sha256.Size {
+ t.Errorf("id should be 32 bytes, got %d", len(id))
+ continue
+ }
+ if err = signer.InitPub(ev.Pubkey); chk.E(err) {
+ t.Errorf("failed to init pub key: %s\n%0x", err, b)
+ }
+ if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
+ t.Errorf("failed to verify: %s\n%0x", err, b)
+ }
+ if !valid {
+ t.Errorf(
+ "invalid signature for pub %0x %0x %0x", ev.Pubkey, id,
+ ev.Sig,
+ )
+ }
+ evs = append(evs, ev)
+ }
}
func TestBTCECSignerSign(t *testing.T) {
+ evs := make([]*event.E, 0, 10000)
+ scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
+ buf := make([]byte, 1_000_000)
+ scanner.Buffer(buf, len(buf))
+ var err error
+ signer := &btcec.Signer{}
+ var skb []byte
+ if err = signer.Generate(); chk.E(err) {
+ t.Fatal(err)
+ }
+ skb = signer.Sec()
+ if err = signer.InitSec(skb); chk.E(err) {
+ t.Fatal(err)
+ }
+ verifier := &btcec.Signer{}
+ pkb := signer.Pub()
+ if err = verifier.InitPub(pkb); chk.E(err) {
+ t.Fatal(err)
+ }
+ for scanner.Scan() {
+ b := scanner.Bytes()
+ ev := event.New()
+ if _, err = ev.Unmarshal(b); chk.E(err) {
+ t.Errorf("failed to marshal\n%s", b)
+ }
+ evs = append(evs, ev)
+ }
+ var valid bool
+ sig := make([]byte, schnorr.SignatureSize)
+ for _, ev := range evs {
+ ev.Pubkey = pkb
+ id := ev.GetIDBytes()
+ if sig, err = signer.Sign(id); chk.E(err) {
+ t.Errorf("failed to sign: %s\n%0x", err, id)
+ }
+ if valid, err = verifier.Verify(id, sig); chk.E(err) {
+ t.Errorf("failed to verify: %s\n%0x", err, id)
+ }
+ if !valid {
+ t.Errorf("invalid signature")
+ }
+ }
+ signer.Zero()
}
func TestBTCECECDH(t *testing.T) {
n := time.Now()
var err error
var counter int
- const total = 200
- var count int
+ const total = 100
for _ = range total {
s1 := new(btcec.Signer)
if err = s1.Generate(); chk.E(err) {
@@ -62,16 +144,13 @@ func TestBTCECECDH(t *testing.T) {
secret2,
)
}
- count++
}
}
a := time.Now()
duration := a.Sub(n)
log.I.Ln(
- "errors", counter,
- "total", count,
- "time", duration,
- "time/op", duration/time.Duration(count),
- "ops/sec", int(time.Second)/int(duration/time.Duration(count)),
+ "errors", counter, "total", total, "time", duration, "time/op",
+ int(duration/total),
+ "ops/sec", int(time.Second)/int(duration/total),
)
}
diff --git a/p256k/btcec/util_test.go b/p256k/btcec/util_test.go
new file mode 100644
index 0000000..bcbd11a
--- /dev/null
+++ b/p256k/btcec/util_test.go
@@ -0,0 +1,9 @@
+package btcec_test
+
+import (
+ "orly.dev/lol"
+)
+
+var (
+ log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
+)
diff --git a/p256k/doc.go b/p256k/doc.go
index 990a438..d88e08e 100644
--- a/p256k/doc.go
+++ b/p256k/doc.go
@@ -2,5 +2,5 @@
// bitcoin/libsecp256k1 library for fast signature creation and verification of
// the BIP-340 nostr X-only signatures and public keys, and ECDH.
//
-// Currently, the ECDH is only implemented with the btcec library.
+// Currently the ECDH is only implemented with the btcec library.
package p256k
diff --git a/p256k/p256k.go b/p256k/p256k.go
index 9eea48e..da66883 100644
--- a/p256k/p256k.go
+++ b/p256k/p256k.go
@@ -37,18 +37,6 @@ var _ realy.I = &Signer{}
// Generate a new Signer key pair using the CGO bindings to libsecp256k1
func (s *Signer) Generate() (err error) {
- var cs *Sec
- var cx *XPublicKey
- if s.skb, s.pkb, cs, cx, err = Generate(); chk.E(err) {
- return
- }
- s.SecretKey = &cs.Key
- s.PublicKey = cx.Key
- return
-}
-
-// GenerateForECDH a new Signer key pair using the CGO bindings to libsecp256k1
-func (s *Signer) GenerateForECDH() (err error) {
var cs *Sec
var cx *XPublicKey
if s.skb, s.pkb, cs, cx, err = Generate(); chk.E(err) {
@@ -60,9 +48,10 @@ func (s *Signer) GenerateForECDH() (err error) {
return
}
-func (s *Signer) InitSec(skb []byte, nobtcec ...bool) (err error) {
+func (s *Signer) InitSec(skb []byte) (err error) {
var cs *Sec
var cx *XPublicKey
+ // var cp *PublicKey
if s.pkb, cs, cx, err = FromSecretBytes(skb); chk.E(err) {
if err.Error() != "provided secret generates a public key with odd Y coordinate, fixed version returned" {
log.E.Ln(err)
@@ -74,10 +63,7 @@ func (s *Signer) InitSec(skb []byte, nobtcec ...bool) (err error) {
s.PublicKey = cx.Key
// s.ECPublicKey = cp.Key
// needed for ecdh
- if len(nobtcec) > 0 && nobtcec[0] != true {
- } else {
- s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb)
- }
+ s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb)
return
}
@@ -94,6 +80,8 @@ func (s *Signer) InitPub(pub []byte) (err error) {
func (s *Signer) Sec() (b []byte) { return s.skb }
func (s *Signer) Pub() (b []byte) { return s.pkb }
+// func (s *Signer) ECPub() (b []byte) { return s.pkb }
+
func (s *Signer) Sign(msg []byte) (sig []byte, err error) {
if s.SecretKey == nil {
err = errorf.E("p256k: I secret not initialized")
@@ -125,18 +113,7 @@ func (s *Signer) Verify(msg, sig []byte) (valid bool, err error) {
return
}
-func (s *Signer) InitECDH() {
- s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb)
-}
-
func (s *Signer) ECDH(pubkeyBytes []byte) (secret []byte, err error) {
- if s.BTCECSec == nil {
- if s.skb == nil {
- err = errorf.E("p256k: Secret key bytes not initialized")
- return
- }
- s.BTCECSec, _ = btcec.PrivKeyFromBytes(s.skb)
- }
var pub *secp256k1.PublicKey
if pub, err = secp256k1.ParsePubKey(
append(
diff --git a/p256k/p256k_test.go b/p256k/p256k_test.go
index 4bd9a7e..a094cc9 100644
--- a/p256k/p256k_test.go
+++ b/p256k/p256k_test.go
@@ -3,13 +3,15 @@
package p256k_test
import (
+ "bufio"
"bytes"
+ "crypto/sha256"
"testing"
"time"
- "github.com/minio/sha256-simd"
- "orly.dev/chk"
- "orly.dev/log"
+ "orly.dev/ec/schnorr"
+ "orly.dev/event"
+ "orly.dev/event/examples"
"orly.dev/p256k"
realy "orly.dev/signer"
)
@@ -30,43 +32,94 @@ func TestSigner_Generate(t *testing.T) {
}
func TestSignerVerify(t *testing.T) {
- // Initialize a new signer
+ // evs := make([]*event.E, 0, 10000)
+ scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
+ buf := make([]byte, 1_000_000)
+ scanner.Buffer(buf, len(buf))
+ var err error
signer := &p256k.Signer{}
- err := signer.Generate()
- if chk.E(err) {
- t.Fatalf("Failed to generate signer key pair: %v", err)
+ for scanner.Scan() {
+ var valid bool
+ b := scanner.Bytes()
+ bc := make([]byte, 0, len(b))
+ bc = append(bc, b...)
+ ev := event.New()
+ if _, err = ev.Unmarshal(b); chk.E(err) {
+ t.Errorf("failed to marshal\n%s", b)
+ } else {
+ if valid, err = ev.Verify(); chk.T(err) || !valid {
+ t.Errorf("invalid signature\n%s", bc)
+ continue
+ }
+ }
+ id := ev.GetIDBytes()
+ if len(id) != sha256.Size {
+ t.Errorf("id should be 32 bytes, got %d", len(id))
+ continue
+ }
+ if err = signer.InitPub(ev.Pubkey); chk.T(err) {
+ t.Errorf("failed to init pub key: %s\n%0x", err, ev.Pubkey)
+ continue
+ }
+ if valid, err = signer.Verify(id, ev.Sig); chk.E(err) {
+ t.Errorf("failed to verify: %s\n%0x", err, ev.Id)
+ continue
+ }
+ if !valid {
+ t.Errorf(
+ "invalid signature for\npub %0x\neid %0x\nsig %0x\n%s",
+ ev.Pubkey, id, ev.Sig, bc,
+ )
+ continue
+ }
+ // fmt.Printf("%s\n", bc)
+ // evs = append(evs, ev)
}
+}
- // Sample message to sign
- message := sha256.Sum256([]byte("Hello, world!"))
- // Sign the message
- signature, err := signer.Sign(message[:])
- if chk.E(err) {
- t.Fatalf("Failed to sign message: %v", err)
+func TestSignerSign(t *testing.T) {
+ evs := make([]*event.E, 0, 10000)
+ scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
+ buf := make([]byte, 1_000_000)
+ scanner.Buffer(buf, len(buf))
+ var err error
+ signer := &p256k.Signer{}
+ var skb, pkb []byte
+ if skb, pkb, _, _, err = p256k.Generate(); chk.E(err) {
+ t.Fatal(err)
}
-
- // Verify the signature
- valid, err := signer.Verify(message[:], signature)
- if chk.E(err) {
- t.Fatalf("Error verifying signature: %v", err)
+ log.I.S(skb, pkb)
+ if err = signer.InitSec(skb); chk.E(err) {
+ t.Fatal(err)
}
-
- // Check if the signature is valid
- if !valid {
- t.Error("Valid signature was rejected")
+ verifier := &p256k.Signer{}
+ if err = verifier.InitPub(pkb); chk.E(err) {
+ t.Fatal(err)
}
-
- // Modify the message and verify again
- tamperedMessage := sha256.Sum256([]byte("Hello, tampered world!"))
- valid, err = signer.Verify(tamperedMessage[:], signature)
- if !chk.E(err) {
- t.Fatalf("Error verifying tampered message: %v", err)
+ for scanner.Scan() {
+ b := scanner.Bytes()
+ ev := event.New()
+ if _, err = ev.Unmarshal(b); chk.E(err) {
+ t.Errorf("failed to marshal\n%s", b)
+ }
+ evs = append(evs, ev)
}
-
- // Expect the verification to fail
- if valid {
- t.Error("Invalid signature was accepted")
+ var valid bool
+ sig := make([]byte, schnorr.SignatureSize)
+ for _, ev := range evs {
+ ev.Pubkey = pkb
+ id := ev.GetIDBytes()
+ if sig, err = signer.Sign(id); chk.E(err) {
+ t.Errorf("failed to sign: %s\n%0x", err, id)
+ }
+ if valid, err = verifier.Verify(id, sig); chk.E(err) {
+ t.Errorf("failed to verify: %s\n%0x", err, id)
+ }
+ if !valid {
+ t.Errorf("invalid signature")
+ }
}
+ signer.Zero()
}
func TestECDH(t *testing.T) {
@@ -74,14 +127,14 @@ func TestECDH(t *testing.T) {
var err error
var s1, s2 realy.I
var counter int
- const total = 50
+ const total = 100
for _ = range total {
s1, s2 = &p256k.Signer{}, &p256k.Signer{}
- if err = s1.GenerateForECDH(); chk.E(err) {
+ if err = s1.Generate(); chk.E(err) {
t.Fatal(err)
}
for _ = range total {
- if err = s2.GenerateForECDH(); chk.E(err) {
+ if err = s2.Generate(); chk.E(err) {
t.Fatal(err)
}
var secret1, secret2 []byte
diff --git a/p256k/secp256k1.go b/p256k/secp256k1.go
index 59ad93f..9c276c4 100644
--- a/p256k/secp256k1.go
+++ b/p256k/secp256k1.go
@@ -4,14 +4,14 @@ package p256k
import (
"crypto/rand"
- "unsafe"
-
- "github.com/minio/sha256-simd"
"orly.dev/chk"
- "orly.dev/ec/schnorr"
- "orly.dev/ec/secp256k1"
"orly.dev/errorf"
"orly.dev/log"
+ "unsafe"
+
+ "orly.dev/ec/schnorr"
+ "orly.dev/ec/secp256k1"
+ "orly.dev/sha256"
)
/*
diff --git a/p256k/secp256k1_test.go b/p256k/secp256k1_test.go
index 79323bb..4237e76 100644
--- a/p256k/secp256k1_test.go
+++ b/p256k/secp256k1_test.go
@@ -3,11 +3,86 @@
package p256k_test
import (
+ "bufio"
+ "bytes"
"testing"
+
+ "orly.dev/ec/schnorr"
+ "orly.dev/event"
+ "orly.dev/event/examples"
+ "orly.dev/p256k"
+ "orly.dev/sha256"
)
func TestVerify(t *testing.T) {
+ evs := make([]*event.E, 0, 10000)
+ scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
+ buf := make([]byte, 1_000_000)
+ scanner.Buffer(buf, len(buf))
+ var err error
+ for scanner.Scan() {
+ var valid bool
+ b := scanner.Bytes()
+ ev := event.New()
+ if _, err = ev.Unmarshal(b); chk.E(err) {
+ t.Errorf("failed to marshal\n%s", b)
+ } else {
+ if valid, err = ev.Verify(); chk.E(err) || !valid {
+ t.Errorf("btcec: invalid signature\n%s", b)
+ continue
+ }
+ }
+ id := ev.GetIDBytes()
+ if len(id) != sha256.Size {
+ t.Errorf("id should be 32 bytes, got %d", len(id))
+ continue
+ }
+ if err = p256k.VerifyFromBytes(id, ev.Sig, ev.Pubkey); chk.E(err) {
+ t.Error(err)
+ continue
+ }
+ evs = append(evs, ev)
+ }
}
func TestSign(t *testing.T) {
+ evs := make([]*event.E, 0, 10000)
+ scanner := bufio.NewScanner(bytes.NewBuffer(examples.Cache))
+ buf := make([]byte, 1_000_000)
+ scanner.Buffer(buf, len(buf))
+ var err error
+ var sec1 *p256k.Sec
+ var pub1 *p256k.XPublicKey
+ var pb []byte
+ if _, pb, sec1, pub1, err = p256k.Generate(); chk.E(err) {
+ t.Fatal(err)
+ }
+ for scanner.Scan() {
+ b := scanner.Bytes()
+ ev := event.New()
+ if _, err = ev.Unmarshal(b); chk.E(err) {
+ t.Errorf("failed to marshal\n%s", b)
+ }
+ evs = append(evs, ev)
+ }
+ sig := make([]byte, schnorr.SignatureSize)
+ for _, ev := range evs {
+ ev.Pubkey = pb
+ var uid *p256k.Uchar
+ if uid, err = p256k.Msg(ev.GetIDBytes()); chk.E(err) {
+ t.Fatal(err)
+ }
+ if sig, err = p256k.Sign(uid, sec1.Sec()); chk.E(err) {
+ t.Fatal(err)
+ }
+ ev.Sig = sig
+ var usig *p256k.Uchar
+ if usig, err = p256k.Sig(sig); chk.E(err) {
+ t.Fatal(err)
+ }
+ if !p256k.Verify(uid, usig, pub1.Key) {
+ t.Errorf("invalid signature")
+ }
+ }
+ p256k.Zero(&sec1.Key)
}
diff --git a/p256k/util_test.go b/p256k/util_test.go
new file mode 100644
index 0000000..4399dc7
--- /dev/null
+++ b/p256k/util_test.go
@@ -0,0 +1,9 @@
+package p256k_test
+
+import (
+ "orly.dev/lol"
+)
+
+var (
+ log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
+)
diff --git a/qu/README.adoc b/qu/README.adoc
new file mode 100644
index 0000000..0ad2e8a
--- /dev/null
+++ b/qu/README.adoc
@@ -0,0 +1,60 @@
+= qu
+
+===== observable signal channels
+
+simple channels that act as breakers or momentary one-shot triggers.
+
+can enable logging to get detailed information on channel state, and channels do
+not panic if closed channels are attempted to be closed or signalled with.
+
+provides a neat function based syntax for usage.
+
+wait function does require use of the `<-` receive operator prefix to be used in
+a select statement.
+
+== usage
+
+=== creating channels:
+
+==== unbuffered
+
+----
+newSigChan := qu.T()
+----
+
+==== buffered
+
+----
+newBufferedSigChan := qu.Ts(5)
+----
+
+==== closing
+
+----
+newSigChan.Q()
+----
+
+==== signalling
+
+----
+newBufferedSigChan.Signal()
+----
+
+==== logging features
+
+----
+numberOpenUnbufferedChannels := GetOpenUnbufferedChanCount()
+
+numberOpenBufferedChannels := GetOpenBufferedChanCount()
+----
+
+print a list of closed and open channels known by qu:
+
+----
+PrintChanState()
+----
+
+== garbage collection
+
+this library automatically cleans up closed channels once a minute to free
+resources that have become unused.
\ No newline at end of file
diff --git a/qu/qu.go b/qu/qu.go
new file mode 100644
index 0000000..2e277d1
--- /dev/null
+++ b/qu/qu.go
@@ -0,0 +1,242 @@
+// Package qu is a library for making handling signal (chan struct{}) channels
+// simpler, as well as monitoring the state of the signal channels in an
+// application.
+package qu
+
+import (
+ "fmt"
+ "orly.dev/log"
+ "strings"
+ "sync"
+ "time"
+
+ "orly.dev/atomic"
+ "orly.dev/lol"
+)
+
+// C is your basic empty struct signalling channel
+type C chan struct{}
+
+var (
+ createdList []string
+ createdChannels []C
+ createdChannelBufferCounts []int
+ mx sync.Mutex
+ logEnabled = atomic.NewBool(false)
+)
+
+// SetLogging switches on and off the channel logging
+func SetLogging(on bool) {
+ logEnabled.Store(on)
+}
+
+func l(a ...interface{}) {
+ if logEnabled.Load() {
+ log.D.Ln(a...)
+ }
+}
+
+func lc(cl func() string) {
+ if logEnabled.Load() {
+ log.D.Ln(cl())
+ }
+}
+
+// T creates an unbuffered chan struct{} for trigger and quit signalling (momentary and breaker
+// switches)
+func T() C {
+ mx.Lock()
+ defer mx.Unlock()
+ msg := fmt.Sprintf("chan from %s", lol.GetLoc(1))
+ l("created", msg)
+ createdList = append(createdList, msg)
+ o := make(C)
+ createdChannels = append(createdChannels, o)
+ createdChannelBufferCounts = append(createdChannelBufferCounts, 0)
+ return o
+}
+
+// Ts creates a buffered chan struct{} which is specifically intended for signalling without
+// blocking, generally one is the size of buffer to be used, though there might be conceivable
+// cases where the channel should accept more signals without blocking the caller
+func Ts(n int) C {
+ mx.Lock()
+ defer mx.Unlock()
+ msg := fmt.Sprintf("buffered chan (%d) from %s", n, lol.GetLoc(1))
+ l("created", msg)
+ createdList = append(createdList, msg)
+ o := make(C, n)
+ createdChannels = append(createdChannels, o)
+ createdChannelBufferCounts = append(createdChannelBufferCounts, n)
+ return o
+}
+
+// Q closes the channel, which makes it emit a nil every time it is selected.
+func (c C) Q() {
+ open := !testChanIsClosed(c)
+ lc(
+ func() (o string) {
+ lo := getLocForChan(c)
+ mx.Lock()
+ defer mx.Unlock()
+ if open {
+ return "closing chan from " + lo + "\n" + strings.Repeat(
+ " ",
+ 48,
+ ) + "from" + lol.GetLoc(1)
+ } else {
+ return "from" + lol.GetLoc(1) + "\n" + strings.Repeat(" ", 48) +
+ "channel " + lo + " was already closed"
+ }
+ },
+ )
+ if open {
+ close(c)
+ }
+}
+
+// Signal sends struct{}{} on the channel which functions as a momentary switch,
+// useful in pairs for stop/start
+func (c C) Signal() {
+ lc(func() (o string) { return "signalling " + getLocForChan(c) })
+ if !testChanIsClosed(c) {
+ c <- struct{}{}
+ }
+}
+
+// Wait should be placed with a `<-` in a select case in addition to the channel
+// variable name
+func (c C) Wait() <-chan struct{} {
+ lc(
+ func() (o string) {
+ return fmt.Sprint(
+ "waiting on "+getLocForChan(c)+"at",
+ lol.GetLoc(1),
+ )
+ },
+ )
+ return c
+}
+
+// IsClosed exposes a test to see if the channel is closed
+func (c C) IsClosed() bool {
+ return testChanIsClosed(c)
+}
+
+// testChanIsClosed allows you to see whether the channel has been closed so you
+// can avoid a panic by trying to close or signal on it
+func testChanIsClosed(ch C) (o bool) {
+ if ch == nil {
+ return true
+ }
+ select {
+ case <-ch:
+ o = true
+ default:
+ }
+ return
+}
+
+// getLocForChan finds which record connects to the channel in question
+func getLocForChan(c C) (s string) {
+ s = "not found"
+ mx.Lock()
+ for i := range createdList {
+ if i >= len(createdChannels) {
+ break
+ }
+ if createdChannels[i] == c {
+ s = createdList[i]
+ }
+ }
+ mx.Unlock()
+ return
+}
+
+// once a minute clean up the channel cache to remove closed channels no longer
+// in use
+func init() {
+ go func() {
+ for {
+ <-time.After(time.Minute)
+ l("cleaning up closed channels")
+ var c []C
+ var ll []string
+ mx.Lock()
+ for i := range createdChannels {
+ if i >= len(createdList) {
+ break
+ }
+ if testChanIsClosed(createdChannels[i]) {
+ } else {
+ c = append(c, createdChannels[i])
+ ll = append(ll, createdList[i])
+ }
+ }
+ createdChannels = c
+ createdList = ll
+ mx.Unlock()
+ }
+ }()
+}
+
+// PrintChanState creates an output showing the current state of the channels
+// being monitored This is a function for use by the programmer while debugging
+func PrintChanState() {
+ mx.Lock()
+ for i := range createdChannels {
+ if i >= len(createdList) {
+ break
+ }
+ if testChanIsClosed(createdChannels[i]) {
+ log.T.Ln(">>> closed", createdList[i])
+ } else {
+ log.T.Ln("<<< open", createdList[i])
+ }
+ }
+ mx.Unlock()
+}
+
+// GetOpenUnbufferedChanCount returns the number of qu channels that are still open
+func GetOpenUnbufferedChanCount() (o int) {
+ mx.Lock()
+ var c int
+ for i := range createdChannels {
+ if i >= len(createdChannels) {
+ break
+ }
+ // skip buffered channels
+ if createdChannelBufferCounts[i] > 0 {
+ continue
+ }
+ if testChanIsClosed(createdChannels[i]) {
+ c++
+ } else {
+ o++
+ }
+ }
+ mx.Unlock()
+ return
+}
+
+// GetOpenBufferedChanCount returns the number of qu channels that are still open
+func GetOpenBufferedChanCount() (o int) {
+ mx.Lock()
+ var c int
+ for i := range createdChannels {
+ if i >= len(createdChannels) {
+ break
+ }
+ // skip unbuffered channels
+ if createdChannelBufferCounts[i] < 1 {
+ continue
+ }
+ if testChanIsClosed(createdChannels[i]) {
+ c++
+ } else {
+ o++
+ }
+ }
+ mx.Unlock()
+ return
+}
diff --git a/ratel/close.go b/ratel/close.go
new file mode 100644
index 0000000..604ddbb
--- /dev/null
+++ b/ratel/close.go
@@ -0,0 +1,26 @@
+package ratel
+
+import (
+ "orly.dev/chk"
+ "orly.dev/log"
+)
+
+// Close the database. If the Flatten flag was set, then trigger the flattening of tables before
+// shutting down.
+func (r *T) Close() (err error) {
+ // chk.E(r.DB.Sync())
+ r.WG.Wait()
+ log.I.F("closing database %s", r.Path())
+ if r.Flatten {
+ if err = r.DB.Flatten(4); chk.E(err) {
+ }
+ log.D.F("database flattened")
+ }
+ if err = r.seq.Release(); chk.E(err) {
+ }
+ log.D.F("database released")
+ if err = r.DB.Close(); chk.E(err) {
+ }
+ log.I.F("database closed")
+ return
+}
diff --git a/ratel/compact.go b/ratel/compact.go
new file mode 100644
index 0000000..3ec89f1
--- /dev/null
+++ b/ratel/compact.go
@@ -0,0 +1,34 @@
+package ratel
+
+import (
+ "orly.dev/chk"
+ "orly.dev/event"
+)
+
+// Unmarshal an event from bytes, using compact encoding if configured.
+func (r *T) Unmarshal(ev *event.E, evb []byte) (rem []byte, err error) {
+ // if r.UseCompact {
+ // if rem, err = ev.UnmarshalCompact(evb); chk.E(err) {
+ // ev = nil
+ // evb = evb[:0]
+ // return
+ // }
+ // } else {
+ if rem, err = ev.Unmarshal(evb); chk.E(err) {
+ ev = nil
+ evb = evb[:0]
+ return
+ }
+ // }
+ return
+}
+
+// Marshal an event using compact encoding if configured.
+func (r *T) Marshal(ev *event.E, dst []byte) (b []byte) {
+ // if r.UseCompact {
+ // b = ev.MarshalCompact(dst)
+ // } else {
+ b = ev.Marshal(dst)
+ // }
+ return
+}
diff --git a/ratel/configuration.go b/ratel/configuration.go
new file mode 100644
index 0000000..53d8f3a
--- /dev/null
+++ b/ratel/configuration.go
@@ -0,0 +1,47 @@
+package ratel
+
+// import (
+// "encoding/json"
+//
+// "github.com/dgraph-io/badger/v4"
+//
+// "orly.dev/ratel/prefixes"
+// "orly.dev/store"
+// )
+//
+// // SetConfiguration stores the store.Configuration value to a provided setting.
+// func (r *T) SetConfiguration(c *store.Configuration) (err error) {
+// var b []byte
+// if b, err = json.Marshal(c); chk.E(err) {
+// return
+// }
+// log.I.F("%s", b)
+// err = r.Update(func(txn *badger.Txn) (err error) {
+// if err = txn.Set(prefixes.Configuration.Key(), b); chk.E(err) {
+// return
+// }
+// return
+// })
+// return
+// }
+//
+// // GetConfiguration returns the current store.Configuration stored in the database.
+// func (r *T) GetConfiguration() (c *store.Configuration, err error) {
+// err = r.View(func(txn *badger.Txn) (err error) {
+// c = &store.Configuration{BlockList: make([]string, 0)}
+// var it *badger.Item
+// if it, err = txn.Get(prefixes.Configuration.Key()); chk.E(err) {
+// err = nil
+// return
+// }
+// var b []byte
+// if b, err = it.ValueCopy(nil); chk.E(err) {
+// return
+// }
+// if err = json.Unmarshal(b, c); chk.E(err) {
+// return
+// }
+// return
+// })
+// return
+// }
diff --git a/ratel/countevents.go b/ratel/countevents.go
new file mode 100644
index 0000000..e560bb0
--- /dev/null
+++ b/ratel/countevents.go
@@ -0,0 +1,135 @@
+package ratel
+
+// func (r *T) CountEvents(c context.T, f *filter.T) (count int, approx bool, err error) {
+// log.T.ToSliceOfBytes("QueryEvents,%s", f.Serialize())
+// var queries []query
+// var extraFilter *filter.T
+// var since uint64
+// if queries, extraFilter, since, err = PrepareQueries(f); chk.E(err) {
+// return
+// }
+// var delEvs [][]byte
+// defer func() {
+// // after the count delete any events that are expired as per NIP-40
+// for _, d := range delEvs {
+// chk.E(r.DeleteEvent(r.Ctx, eventid.NewWith(d)))
+// }
+// }()
+// // search for the keys generated from the filter
+// for _, q := range queries {
+// select {
+// case <-c.Done():
+// return
+// default:
+// }
+// var eventKey []byte
+// err = r.View(func(txn *badger.Txn) (err error) {
+// // iterate only through keys and in reverse order
+// opts := badger.IteratorOptions{
+// Reverse: true,
+// }
+// it := txn.NewIterator(opts)
+// defer it.Close()
+// for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() {
+// select {
+// case <-r.Ctx.Done():
+// return
+// case <-c.Done():
+// return
+// default:
+// }
+// item := it.Item()
+// k := item.KeyCopy(nil)
+// if !q.skipTS {
+// if len(k) < createdat.Len+serial.Len {
+// continue
+// }
+// createdAt := createdat.FromKey(k)
+// if createdAt.Val.U64() < since {
+// break
+// }
+// }
+// // todo: here we should get the kind field from the key and and collate the
+// // todo: matches that are replaceable/parameterized replaceable ones to decode
+// // todo: to check for replacements so we can actually not set the approx flag.
+// ser := serial.FromKey(k)
+// eventKey = prefixes.Event.Key(ser)
+// // eventKeys = append(eventKeys, idx)
+// }
+// return
+// })
+// if chk.E(err) {
+// // this means shutdown, probably
+// if errors.Is(err, badger.ErrDBClosed) {
+// return
+// }
+// }
+// // todo: here we should decode replaceable events and discard the outdated versions
+// if extraFilter != nil {
+// // if there is an extra filter we need to fetch and decode the event to determine a
+// // match.
+// err = r.View(func(txn *badger.Txn) (err error) {
+// opts := badger.IteratorOptions{Reverse: true}
+// it := txn.NewIterator(opts)
+// defer it.Close()
+// for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() {
+// item := it.Item()
+// if r.HasL2 && item.ValueSize() == sha256.Size {
+// // we will count this though it may not match in fact. for general,
+// // simple filters there isn't likely to be an extrafilter anyway. the
+// // count result can have an "approximate" flag so we flip this now.
+// approx = true
+// return
+// }
+// ev := &event.E{}
+// var appr bool
+// if err = item.Value(func(eventValue []byte) (err error) {
+// var rem []byte
+// if rem, err = r.Unmarshal(ev, eventValue); chk.E(err) {
+// return
+// }
+// if len(rem) > 0 {
+// log.T.S(rem)
+// }
+// if et := ev.Tags.GetFirst(tag.New("expiration")); et != nil {
+// var exp uint64
+// if exp, err = strconv.ParseUint(string(et.Value()), 10, 64); chk.E(err) {
+// return
+// }
+// if int64(exp) > time.Now().Unix() {
+// // this needs to be deleted
+// delEvs = append(delEvs, ev.Id)
+// return
+// }
+// }
+// if ev.Kind.IsReplaceable() ||
+// (ev.Kind.IsParameterizedReplaceable() &&
+// ev.Tags.GetFirst(tag.New("d")) != nil) {
+// // we aren't going to spend this extra time so this just flips the
+// // approximate flag. generally clients are asking for counts to get
+// // an outside estimate anyway, to avoid exceeding MaxLimit
+// appr = true
+// }
+// return
+// }); chk.E(err) {
+// continue
+// }
+// if ev == nil {
+// continue
+// }
+// if extraFilter.Matches(ev) {
+// count++
+// if appr {
+// approx = true
+// }
+// return
+// }
+// }
+// return
+// })
+// } else {
+// count++
+// }
+// }
+// return
+// }
diff --git a/ratel/create-a-tag.go b/ratel/create-a-tag.go
new file mode 100644
index 0000000..91511fe
--- /dev/null
+++ b/ratel/create-a-tag.go
@@ -0,0 +1,94 @@
+package ratel
+
+import (
+ "orly.dev/chk"
+ "orly.dev/log"
+ "strings"
+
+ "orly.dev/ec/schnorr"
+ "orly.dev/hex"
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/arb"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/index"
+ "orly.dev/ratel/keys/kinder"
+ "orly.dev/ratel/keys/pubkey"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/tag/atag"
+)
+
+// Create_a_Tag generates tag indexes from a tag key, tag value, created_at
+// timestamp and the event serial.
+func Create_a_Tag(
+ tagKey, tagValue string, CA *createdat.T,
+ ser *serial.T,
+) (prf index.P, elems []keys.Element, err error) {
+
+ var pkb []byte
+ // first check if it might be a public key, fastest test
+ if len(tagValue) == 2*schnorr.PubKeyBytesLen {
+ // this could be a pubkey
+ pkb, err = hex.Dec(tagValue)
+ if err == nil {
+ // it's a pubkey
+ var pkk keys.Element
+ if pkk, err = pubkey.NewFromBytes(pkb); chk.E(err) {
+ return
+ }
+ prf, elems = prefixes.Tag32, keys.Make(pkk, ser)
+ return
+ } else {
+ err = nil
+ }
+ }
+ // check for `a` tag
+ if tagKey == "a" && strings.Count(tagValue, ":") == 2 {
+ a := &atag.T{}
+ var rem []byte
+ if rem, err = a.Unmarshal([]byte(tagValue)); chk.E(err) {
+ return
+ }
+ if len(rem) > 0 {
+ log.I.S("remainder", tagKey, tagValue, rem)
+ }
+ prf = prefixes.TagAddr
+ var pk *pubkey.T
+ if pk, err = pubkey.NewFromBytes(a.PubKey); chk.E(err) {
+ return
+ }
+ elems = keys.Make(
+ kinder.New(a.Kind.K), pk, arb.New(a.DTag), CA,
+ ser,
+ )
+ return
+ // todo: leaving this here in case bugz, note to remove this later
+ // // this means we will get 3 pieces here
+ // split := strings.Split(tagValue, ":")
+ // // middle element should be a public key so must be 64 hex ciphers
+ // if len(split[1]) != schnorr.PubKeyBytesLen*2 {
+ // return
+ // }
+ // var k uint16
+ // var d string
+ // if pkb, err = hex.Dec(split[1]); !chk.E(err) {
+ // var kin uint64
+ // if kin, err = strconv.ParseUint(split[0], 10, 16); err == nil {
+ // k = uint16(kin)
+ // d = split[2]
+ // var pk *pubkey.T
+ // if pk, err = pubkey.NewFromBytes(pkb); chk.E(err) {
+ // return
+ // }
+ // prf = prefixes.TagAddr
+ // elems = keys.Make(kinder.New(k), pk, arb.NewFromString(d), CA,
+ // ser)
+ // return
+ // }
+ // }
+ }
+ // store whatever as utf-8
+ prf = prefixes.Tag
+ elems = keys.Make(arb.New(tagValue), CA, ser)
+ return
+}
diff --git a/ratel/del/del.go b/ratel/del/del.go
new file mode 100644
index 0000000..b5a6a80
--- /dev/null
+++ b/ratel/del/del.go
@@ -0,0 +1,13 @@
+// Package del is a simple sorted list for database keys, primarily used to
+// collect lists of events that need to be deleted either by expiration or for
+// the garbage collector.
+package del
+
+import "bytes"
+
+// Items is an array of bytes used for sorting and collating database index keys.
+type Items [][]byte
+
+func (c Items) Len() int { return len(c) }
+func (c Items) Less(i, j int) bool { return bytes.Compare(c[i], c[j]) < 0 }
+func (c Items) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
diff --git a/ratel/deleteevent.go b/ratel/deleteevent.go
new file mode 100644
index 0000000..adcda3e
--- /dev/null
+++ b/ratel/deleteevent.go
@@ -0,0 +1,120 @@
+package ratel
+
+import (
+ "github.com/dgraph-io/badger/v4"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/eventid"
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/id"
+ "orly.dev/ratel/keys/index"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/keys/tombstone"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/timestamp"
+)
+
+// DeleteEvent deletes an event if it exists and writes a tombstone for the event unless
+// requested not to, so that the event can't be saved again.
+func (r *T) DeleteEvent(
+ c context.T, eid *eventid.T, noTombstone ...bool,
+) (err error) {
+ log.W.F("deleting event %0x", eid.Bytes())
+ var foundSerial []byte
+ seri := serial.New(nil)
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ // query event by id to ensure we don't try to save duplicates
+ prf := prefixes.Id.Key(id.New(eid))
+ it := txn.NewIterator(badger.IteratorOptions{})
+ defer it.Close()
+ it.Seek(prf)
+ if it.ValidForPrefix(prf) {
+ var k []byte
+ // get the serial
+ k = it.Item().Key()
+ // copy serial out
+ keys.Read(k, index.Empty(), id.New(&eventid.T{}), seri)
+ // save into foundSerial
+ foundSerial = seri.Val
+ }
+ return
+ },
+ )
+ if chk.E(err) {
+ return
+ }
+ if foundSerial == nil {
+ return
+ }
+ var indexKeys [][]byte
+ ev := event.New()
+ var evKey, evb, tombstoneKey []byte
+ // fetch the event to get its index keys
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ // retrieve the event record
+ evKey = keys.Write(index.New(prefixes.Event), seri)
+ it := txn.NewIterator(badger.IteratorOptions{})
+ defer it.Close()
+ it.Seek(evKey)
+ if it.ValidForPrefix(evKey) {
+ if evb, err = it.Item().ValueCopy(evb); chk.E(err) {
+ return
+ }
+ // log.I.S(evb)
+ var rem []byte
+ if rem, err = r.Unmarshal(ev, evb); chk.E(err) {
+ return
+ }
+ if len(rem) != 0 {
+ log.I.S(rem)
+ }
+ // log.I.S(rem, ev, seri)
+ indexKeys = GetIndexKeysForEvent(ev, seri)
+ // // we don't make tombstones for replacements, but it is better to shift that
+ // // logic outside of this closure.
+ // if len(noTombstone) > 0 && !noTombstone[0] {
+ if len(noTombstone) > 0 && !noTombstone[0] {
+ log.I.F("making tombstone")
+ ts := tombstone.NewWith(ev.EventId())
+ tombstoneKey = prefixes.Tombstone.Key(
+ ts, createdat.New(timestamp.Now()),
+ )
+ }
+ // }
+ return
+ }
+ return
+ },
+ )
+ if chk.E(err) {
+ return
+ }
+ err = r.Update(
+ func(txn *badger.Txn) (err error) {
+ if err = txn.Delete(evKey); chk.E(err) {
+ }
+ for _, key := range indexKeys {
+ if err = txn.Delete(key); chk.E(err) {
+ }
+ }
+ if len(tombstoneKey) > 0 {
+ log.T.S("writing tombstone", tombstoneKey)
+ // write tombstone
+ log.W.F(
+ "writing tombstone %0x for event %0x", tombstoneKey, ev.Id,
+ )
+ if err = txn.Set(tombstoneKey, nil); chk.E(err) {
+ return
+ }
+ }
+ return
+ },
+ )
+ return
+}
diff --git a/ratel/export.go b/ratel/export.go
new file mode 100644
index 0000000..067cb81
--- /dev/null
+++ b/ratel/export.go
@@ -0,0 +1,209 @@
+package ratel
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "github.com/dgraph-io/badger/v4"
+
+ "orly.dev/context"
+ "orly.dev/filter"
+ "orly.dev/hex"
+ "orly.dev/qu"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/tag"
+ "orly.dev/tags"
+)
+
+// Export the complete database of stored events to an io.Writer in line structured minified
+// JSON.
+func (r *T) Export(c context.T, w io.Writer, pubkeys ...[]byte) {
+ var counter int
+ var err error
+ if len(pubkeys) > 0 {
+ var pks []string
+ for i := range pubkeys {
+ pks = append(pks, hex.Enc(pubkeys[i]))
+ }
+ o := "["
+ for _, pk := range pks {
+ o += pk + ","
+ }
+ o += "]"
+ log.I.F("exporting selected pubkeys:\n%s", o)
+ keyChan := make(chan []byte, 256)
+ // specific set of public keys, so we need to run a search
+ fa := &filter.F{Authors: tag.New(pubkeys...)}
+ var queries []query
+ if queries, _, _, err = PrepareQueries(fa); chk.E(err) {
+ return
+ }
+ pTag := [][]byte{[]byte("#b")}
+ pTag = append(pTag, pubkeys...)
+ fp := &filter.F{Tags: tags.New(tag.New(pTag...))}
+ var queries2 []query
+ if queries2, _, _, err = PrepareQueries(fp); chk.E(err) {
+ return
+ }
+ queries = append(queries, queries2...)
+ // start up writer loop
+ quit := qu.T()
+ go func() {
+ for {
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ case <-quit:
+ return
+ case eventKey := <-keyChan:
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ case <-quit:
+ return
+ default:
+ }
+ opts := badger.IteratorOptions{Reverse: false}
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ var count int
+ for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() {
+ count++
+ item := it.Item()
+ // if r.HasL2 && item.ValueSize() == sha256.Size {
+ // // we aren't fetching from L2 for export, so don't send this back.
+ // return
+ // }
+ if err = item.Value(
+ func(eventValue []byte) (err error) {
+ // send the event to client (no need to re-encode it)
+ if _, err = fmt.Fprintf(
+ w, "%s\n", eventValue,
+ ); chk.E(err) {
+ return
+ }
+ return
+ },
+ ); chk.E(err) {
+ return
+ }
+ }
+ return
+ },
+ )
+ if chk.E(err) {
+ }
+ }
+ }
+ }()
+ // stop the writer loop
+ defer quit.Q()
+ for _, q := range queries {
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ default:
+ }
+ // search for the keys generated from the filter
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ default:
+ }
+ opts := badger.IteratorOptions{
+ Reverse: true,
+ }
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() {
+ item := it.Item()
+ k := item.KeyCopy(nil)
+ evKey := prefixes.Event.Key(serial.FromKey(k))
+ counter++
+ if counter%1000 == 0 && counter > 0 {
+ log.I.F("%d events exported", counter)
+ }
+ keyChan <- evKey
+ }
+ return
+ },
+ )
+ if chk.E(err) {
+ // this means shutdown, probably
+ if errors.Is(err, badger.ErrDBClosed) {
+ return
+ }
+ }
+ }
+ } else {
+ // blanket download requested
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ it := txn.NewIterator(badger.IteratorOptions{Prefix: prefixes.Event.Key()})
+ defer it.Close()
+ for it.Rewind(); it.Valid(); it.Next() {
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ default:
+ }
+ item := it.Item()
+ b, e := item.ValueCopy(nil)
+ if chk.E(e) {
+ // already isn't the same as the return value!
+ // err = nil
+ continue
+ }
+ // send the event to client
+ // if r.UseCompact {
+ // ev := &event.F{}
+ // var rem []byte
+ // rem, err = ev.UnmarshalCompact(b)
+ // if chk.E(err) {
+ // err = nil
+ // continue
+ // }
+ // if len(rem) > 0 {
+ // log.I.S(rem)
+ // }
+ // if _, err = fmt.Fprintf(w, "%s\n", ev.Marshal(nil)); chk.E(err) {
+ // return
+ // }
+ //
+ // } else {
+ // the database stores correct JSON versions so no need to decode/encode.
+ if _, err = fmt.Fprintf(w, "%s\n", b); chk.E(err) {
+ return
+ }
+ // }
+ counter++
+ if counter%1000 == 0 && counter > 0 {
+ log.I.F("%d events exported", counter)
+ }
+ }
+ return
+ },
+ )
+ chk.E(err)
+ }
+ log.I.Ln("exported", counter, "events")
+ return
+}
diff --git a/ratel/fetch-ids.go b/ratel/fetch-ids.go
new file mode 100644
index 0000000..204380f
--- /dev/null
+++ b/ratel/fetch-ids.go
@@ -0,0 +1,79 @@
+package ratel
+
+import (
+ "io"
+ "orly.dev/chk"
+ "orly.dev/eventidserial"
+
+ "github.com/dgraph-io/badger/v4"
+
+ "orly.dev/context"
+ "orly.dev/ratel/keys/id"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/tag"
+)
+
+// FetchIds retrieves events based on a list of event Ids that have been provided.
+func (r *T) FetchIds(c context.T, evIds *tag.T, out io.Writer) (err error) {
+ // create an ample buffer for decoding events, 100kb should usually be enough, if
+ // it needs to get bigger it will be reallocated.
+ b := make([]byte, 0, 100000)
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ for _, v := range evIds.ToSliceOfBytes() {
+ var evId *id.T
+ if evId, err = id.NewFromBytes(v); chk.E(err) {
+ return
+ }
+ k := prefixes.Id.Key(evId)
+ it := txn.NewIterator(badger.DefaultIteratorOptions)
+ var ser *serial.T
+ defer it.Close()
+ for it.Seek(k); it.ValidForPrefix(k); it.Next() {
+ key := it.Item().Key()
+ ser = serial.FromKey(key)
+ break
+ }
+ var item *badger.Item
+ if item, err = txn.Get(prefixes.Event.Key(ser)); chk.E(err) {
+ return
+ }
+ if b, err = item.ValueCopy(nil); chk.E(err) {
+ return
+ }
+ // if r.UseCompact {
+ // ev := &event.E{}
+ // var rem []byte
+ // if rem, err = ev.UnmarshalCompact(b); chk.E(err) {
+ // return
+ // }
+ // if len(rem) > 0 {
+ // log.I.S(rem)
+ // }
+ // if _, err = out.Write(ev.Serialize()); chk.E(err) {
+ // return
+ // }
+ // } else {
+ // if db isn't using compact encoding the bytes are already right
+ if _, err = out.Write(b); chk.E(err) {
+ return
+ }
+ // }
+ // add the new line after entries
+ if _, err = out.Write([]byte{'\n'}); chk.E(err) {
+ return
+ }
+ }
+ return
+ },
+ )
+ return
+}
+
+func (r *T) EventIdsBySerial(start uint64, count int) (
+ evs []eventidserial.E, err error,
+) {
+ // TODO implement me
+ panic("implement me")
+}
diff --git a/ratel/garbagecollector.go b/ratel/garbagecollector.go
new file mode 100644
index 0000000..6d0e4a6
--- /dev/null
+++ b/ratel/garbagecollector.go
@@ -0,0 +1,66 @@
+package ratel
+
+// import (
+// "time"
+//
+// "orly.dev/units"
+// )
+//
+// // GarbageCollector starts up a ticker that runs a check on space utilisation
+// // and when it exceeds the high-water mark, prunes back to the low-water mark.
+// //
+// // This function should be invoked as a goroutine, and will terminate when the
+// // backend context is canceled.
+// //
+// // TODO: this needs to be updated and set to actually run by default specifically just for
+// // TODO: pruning tombstones after they are a year or more old.
+// func (r *T) GarbageCollector() {
+// log.D.F("starting ratel back-end garbage collector,"+
+// "max size %0.3fGb,"+
+// "high water %0.3fGb,"+
+// "low water %0.3fGb,"+
+// "GC check frequency %v,%s",
+// float32(r.DBSizeLimit/units.Gb),
+// float32(r.DBHighWater*r.DBSizeLimit/100)/float32(units.Gb),
+// float32(r.DBLowWater*r.DBSizeLimit/100)/float32(units.Gb),
+// r.GCFrequency,
+// r.Path(),
+// )
+// var err error
+// if err = r.GCRun(); chk.E(err) {
+// }
+// GCticker := time.NewTicker(r.GCFrequency)
+// syncTicker := time.NewTicker(r.GCFrequency * 10)
+// out:
+// for {
+// select {
+// case <-r.Ctx.Done():
+// log.W.Ln("stopping event GC ticker")
+// GCticker.Stop()
+// break out
+// case <-GCticker.C:
+// // log.T.Ln("running GC", r.Path)
+// if err = r.GCRun(); chk.E(err) {
+// }
+// case <-syncTicker.C:
+// chk.E(r.DB.Sync())
+// }
+// }
+// log.I.Ln("closing badger event store garbage collector")
+// }
+//
+// func (r *T) GCRun() (err error) {
+// log.T.Ln("running GC", r.Path())
+// var pruneEvents, pruneIndexes DelItems
+// if pruneEvents, pruneIndexes, err = r.GCMark(); chk.E(err) {
+// return
+// }
+// if len(pruneEvents) < 1 && len(pruneIndexes) < 1 {
+// // log.I.Ln("GC sweep unnecessary")
+// return
+// }
+// if err = r.GCSweep(pruneEvents, pruneIndexes); chk.E(err) {
+// return
+// }
+// return
+// }
diff --git a/ratel/gccount.go b/ratel/gccount.go
new file mode 100644
index 0000000..0f04f8d
--- /dev/null
+++ b/ratel/gccount.go
@@ -0,0 +1,203 @@
+package ratel
+
+// import (
+// "encoding/binary"
+// "fmt"
+// "sort"
+// "sync"
+// "time"
+//
+// "github.com/dgraph-io/badger/v4"
+//
+// "orly.dev/ratel/keys/count"
+// "orly.dev/ratel/keys/createdat"
+// "orly.dev/ratel/keys/index"
+// "orly.dev/ratel/keys/serial"
+// "orly.dev/ratel/prefixes"
+// "orly.dev/sha256"
+// "orly.dev/timestamp"
+// "orly.dev/units"
+// )
+//
+// const KeyLen = serial.Len + 1
+// const PrunedLen = sha256.Size + KeyLen
+// const CounterLen = KeyLen + createdat.Len
+//
+// // GCCount performs a census of events in the event store. It counts the number
+// // of events and their size, and if there is a layer 2 enabled, it counts the
+// // number of events that have been pruned and thus have indexes to count.
+// //
+// // Both operations are more efficient combined together rather than separated,
+// // thus this is a fairly long function.
+// func (r *T) GCCount() (unpruned, pruned count.Items, unprunedTotal,
+// prunedTotal int, err error) {
+//
+// // log.D.Ln("running GC count", r.Path())
+// overallStart := time.Now()
+// prf := prefixes.Event.Key()
+// evStream := r.DB.NewStream()
+// evStream.Prefix = prf
+// var countMx sync.Mutex
+// var totalCounter int
+// evStream.ChooseKey = func(item *badger.Item) (b bool) {
+// if item.IsDeletedOrExpired() {
+// return
+// }
+// key := make([]byte, index.Len+serial.Len)
+// item.KeyCopy(key)
+// ser := serial.FromKey(key)
+// size := uint32(item.ValueSize())
+// totalCounter++
+// countMx.Lock()
+// if size == sha256.Size {
+// pruned = append(pruned, &count.Item{
+// Serial: ser.Uint64(),
+// Size: PrunedLen,
+// })
+// } else {
+// unpruned = append(unpruned, &count.Item{
+// Serial: ser.Uint64(),
+// Size: size + KeyLen,
+// })
+// }
+// countMx.Unlock()
+// return
+// }
+// // started := time.Now()
+// // run in a background thread to parallelise all the streams
+// if err = evStream.Orchestrate(r.Ctx); chk.E(err) {
+// return
+// }
+// log.T.F("counted %d events, %d pruned events in %v %s", len(unpruned),
+// len(pruned), time.Now().Sub(overallStart), r.Path())
+// var unprunedBySerial, prunedBySerial count.ItemsBySerial
+// unprunedBySerial = count.ItemsBySerial(unpruned)
+// sort.Sort(unprunedBySerial)
+// var countFresh count.Freshes
+// // pruneStarted := time.Now()
+// counterStream := r.DB.NewStream()
+// counterStream.Prefix = []byte{prefixes.Counter.B()}
+// v := make([]byte, createdat.Len)
+// countFresh = make(count.Freshes, 0, totalCounter)
+// counterStream.ChooseKey = func(item *badger.Item) (b bool) {
+// key := make([]byte, index.Len+serial.Len)
+// item.KeyCopy(key)
+// s64 := serial.FromKey(key).Uint64()
+// countMx.Lock()
+// countFresh = append(countFresh,
+// &count.Fresh{
+// Serial: s64,
+// Freshness: timestamp.FromUnix(int64(binary.BigEndian.Uint64(v))),
+// })
+// countMx.Unlock()
+// return
+// }
+// // run in a background thread to parallelise all the streams
+// if err = counterStream.Orchestrate(r.Ctx); chk.E(err) {
+// return
+// }
+// // wait until all the jobs are complete
+// sort.Sort(countFresh)
+// if r.HasL2 {
+// // if there is L2 we are marking pruned indexes as well
+// // log.I.ToSliceOfBytes("counted %d pruned events in %v %s", len(pruned),
+// // time.Now().Sub(pruneStarted), r.Path())
+// prunedBySerial = count.ItemsBySerial(pruned)
+// sort.Sort(prunedBySerial)
+// }
+// // both slices are now sorted by serial, so we can now iterate the freshness
+// // slice and write in the access timestamps to the unpruned
+// //
+// // this provides the least amount of iteration and computation to essentially
+// // zip two tables together
+// var unprunedCursor, prunedCursor int
+// // we also need to create a map of serials to their respective array index, and
+// // we know how big it has to be so we can avoid allocations during the iteration.
+// //
+// // if there is no L2 this will be an empty map and have nothing added to it.
+// prunedMap := make(map[uint64]int, len(prunedBySerial))
+// for i := range countFresh {
+// // populate freshness of unpruned item
+// if len(unprunedBySerial) > i && countFresh[i].Serial ==
+// unprunedBySerial[unprunedCursor].Serial {
+// // add the counter record to the size
+// unprunedBySerial[unprunedCursor].Size += CounterLen
+// unprunedBySerial[unprunedCursor].Freshness = countFresh[i].Freshness
+// unprunedCursor++
+// // if there is no L2 we should not see any here anyway
+// } else if r.HasL2 && len(prunedBySerial) > 0 && len(prunedBySerial) < prunedCursor {
+// if countFresh[i].Serial ==
+// prunedBySerial[prunedCursor].Serial {
+// // populate freshness of pruned item
+// ps := prunedBySerial[prunedCursor]
+// // add the counter record to the size
+// ps.Size += CounterLen
+// ps.Freshness = countFresh[i].Freshness
+// prunedMap[ps.Serial] = prunedCursor
+// prunedCursor++
+// }
+// }
+// }
+// if r.HasL2 {
+// // lastly, we need to count the size of all relevant transactions from the
+// // pruned set
+// for _, fp := range prefixes.FilterPrefixes {
+// // this can all be done concurrently
+// go func(fp []byte) {
+// evStream = r.DB.NewStream()
+// evStream.Prefix = fp
+// evStream.ChooseKey = func(item *badger.Item) (b bool) {
+// k := item.KeyCopy(nil)
+// ser := serial.FromKey(k)
+// uSer := ser.Uint64()
+// countMx.Lock()
+// // the pruned map allows us to (more) directly find the slice index relevant to
+// // the serial
+// pruned[prunedMap[uSer]].Size += uint32(len(k)) + uint32(item.ValueSize())
+// countMx.Unlock()
+// return
+// }
+// }(fp)
+// }
+// }
+// hw, _ := r.GetEventHeadroom()
+// unprunedTotal = unpruned.Total()
+// up := float64(unprunedTotal)
+// var o string
+// o += fmt.Sprintf("%8d complete,"+
+// "total %0.6f Gb,"+
+// "HW %0.6f Gb",
+// len(unpruned),
+// up/units.Gb,
+// float64(hw)/units.Gb,
+// )
+// if r.HasL2 {
+// l2hw, _ := r.GetIndexHeadroom()
+// prunedTotal = pruned.Total()
+// p := float64(prunedTotal)
+// if r.HasL2 {
+// o += fmt.Sprintf(",%8d pruned,"+
+// "total %0.6f Gb,"+
+// "pruned HW %0.6f Gb,computed in %v,%s",
+// len(pruned),
+// p/units.Gb,
+// float64(l2hw)/units.Gb,
+// time.Now().Sub(overallStart),
+// r.Path(),
+// )
+// }
+// }
+// log.D.Ln(o)
+// return
+// }
+//
+// func (r *T) GetIndexHeadroom() (hw, lw int) {
+// limit := r.DBSizeLimit - r.DBSizeLimit*r.DBHighWater/100
+// return limit * r.DBHighWater / 100,
+// limit * r.DBLowWater / 100
+// }
+//
+// func (r *T) GetEventHeadroom() (hw, lw int) {
+// return r.DBSizeLimit * r.DBHighWater / 100,
+// r.DBSizeLimit * r.DBLowWater / 100
+// }
diff --git a/ratel/gcmark.go b/ratel/gcmark.go
new file mode 100644
index 0000000..77ab9a3
--- /dev/null
+++ b/ratel/gcmark.go
@@ -0,0 +1,63 @@
+package ratel
+
+// import (
+// "sort"
+//
+// "orly.dev/ratel/keys/count"
+// "orly.dev/units"
+// )
+//
+// type DelItems []uint64
+//
+// // GCMark first gathers the serial, data size and last accessed information
+// // about all events and pruned events using GCCount then sorts the results of
+// // the events and indexes by least recently accessed and generates the set of
+// // serials of events that need to be deleted
+// func (r *T) GCMark() (pruneEvents, pruneIndexes DelItems, err error) {
+// var unpruned, pruned count.Items
+// var uTotal, pTotal int
+// if unpruned, pruned, uTotal, pTotal, err = r.GCCount(); chk.E(err) {
+// return
+// }
+// hw, lw := r.GetEventHeadroom()
+// if uTotal > hw {
+// // run event GC mark
+// sort.Sort(unpruned)
+// pruneOff := uTotal - lw
+// var cumulative, lastIndex int
+// for lastIndex = range unpruned {
+// if cumulative > pruneOff {
+// break
+// }
+// cumulative += int(unpruned[lastIndex].Size)
+// pruneEvents = append(pruneEvents, unpruned[lastIndex].Serial)
+// }
+// log.D.F("found %d events to prune,which will bring current "+
+// "utilization down to %0.6f Gb,%s",
+// lastIndex-1, float64(uTotal-cumulative)/units.Gb, r.Path())
+// }
+// l2hw, l2lw := r.GetIndexHeadroom()
+// if r.HasL2 && pTotal > l2hw {
+// // run index GC mark
+// sort.Sort(pruned)
+// var lastIndex int
+// // we want to remove the oldest indexes until at or below the index low water mark.
+// space := pTotal
+// // count the number of events until the low water mark
+// for lastIndex = range pruned {
+// if space < l2lw {
+// break
+// }
+// space -= int(pruned[lastIndex].Size)
+// }
+// log.D.F("deleting %d indexes using %d bytes to bring pruned index size to %d",
+// lastIndex+1, pTotal-l2lw, space)
+// for i := range pruned {
+// if i > lastIndex {
+// break
+// }
+// pruneIndexes = append(pruneIndexes, pruned[i].Serial)
+// }
+// }
+// return
+// }
diff --git a/ratel/gcsweep.go b/ratel/gcsweep.go
new file mode 100644
index 0000000..cc527ae
--- /dev/null
+++ b/ratel/gcsweep.go
@@ -0,0 +1,124 @@
+package ratel
+
+// // GCSweep runs the delete on all of the items that GCMark has determined should be deleted.
+// func (r *T) GCSweep(evs, idxs DelItems) (err error) {
+// // first we must gather all the indexes of the relevant events
+// started := time.Now()
+// batch := r.DB.NewWriteBatch()
+// defer func() {
+// log.I.Ln("flushing GC sweep batch")
+// if err = batch.Flush(); chk.E(err) {
+// return
+// }
+// if vlerr := r.DB.RunValueLogGC(0.5); vlerr == nil {
+// log.I.Ln("value log cleaned up")
+// }
+// chk.E(r.DB.Sync())
+// batch.Cancel()
+// log.I.Ln("completed sweep in", time.Now().Sub(started), r.Path())
+// }()
+// // var wg sync.WaitGroup
+// // go func() {
+// // wg.Add(1)
+// // defer wg.Done()
+// stream := r.DB.NewStream()
+// // get all the event indexes to delete/prune
+// stream.Prefix = prefixes.Event.Key()
+// stream.ChooseKey = func(item *badger.Item) (boo bool) {
+// if item.KeySize() != 1+serial.Len {
+// return
+// }
+// if item.IsDeletedOrExpired() {
+// return
+// }
+// key := item.KeyCopy(nil)
+// ser := serial.FromKey(key).Uint64()
+// var found bool
+// for i := range evs {
+// if evs[i] == ser {
+// found = true
+// break
+// }
+// }
+// if !found {
+// return
+// }
+// if r.HasL2 {
+// // if it's already pruned, skip
+// if item.ValueSize() == sha256.Size {
+// return
+// }
+// // if there is L2 we are only pruning (replacing event with the Id hash)
+// var evb []byte
+// if evb, err = item.ValueCopy(nil); chk.E(err) {
+// return
+// }
+// ev := &event.E{}
+// var rem []byte
+// if rem, err = r.Unmarshal(ev, evb); chk.E(err) {
+// return
+// }
+// if len(rem) != 0 {
+// log.I.S(rem)
+// }
+// // otherwise we are deleting
+// if err = batch.Delete(key); chk.E(err) {
+// return
+// }
+// if err = batch.Set(key, ev.Id); chk.E(err) {
+// return
+// }
+// return
+// } else {
+// // otherwise we are deleting
+// if err = batch.Delete(key); chk.E(err) {
+// return
+// }
+// }
+// return
+// }
+// // execute the event prune/delete
+// if err = stream.Orchestrate(r.Ctx); chk.E(err) {
+// return
+// }
+// // }()
+// // next delete all the indexes
+// if len(idxs) > 0 && r.HasL2 {
+// log.I.Ln("pruning indexes")
+// // we have to remove everything
+// prfs := [][]byte{prefixes.Event.Key()}
+// prfs = append(prfs, prefixes.FilterPrefixes...)
+// prfs = append(prfs, []byte{prefixes.Counter.B()})
+// for _, prf := range prfs {
+// stream = r.DB.NewStream()
+// stream.Prefix = prf
+// stream.ChooseKey = func(item *badger.Item) (boo bool) {
+// if item.IsDeletedOrExpired() || item.KeySize() < serial.Len+1 {
+// return
+// }
+// key := item.KeyCopy(nil)
+// ser := serial.FromKey(key).Uint64()
+// var found bool
+// for _, idx := range idxs {
+// if idx == ser {
+// found = true
+// break
+// }
+// }
+// if !found {
+// return
+// }
+// // log.I.ToSliceOfBytes("deleting index %x %d", prf, ser)
+// if err = batch.Delete(key); chk.E(err) {
+// return
+// }
+// return
+// }
+// if err = stream.Orchestrate(r.Ctx); chk.E(err) {
+// return
+// }
+// log.T.Ln("completed index prefix", prf)
+// }
+// }
+// return
+// }
diff --git a/ratel/getecounterkey.go b/ratel/getecounterkey.go
new file mode 100644
index 0000000..879cc38
--- /dev/null
+++ b/ratel/getecounterkey.go
@@ -0,0 +1,10 @@
+package ratel
+
+//// GetCounterKey returns the proper counter key for a given event Id. This needs
+//// a separate function because of what it does, but is generated in the general
+//// GetIndexKeysForEvent function.
+//func GetCounterKey(ser *serial.T) (key []byte) {
+// key = prefixes.Counter.Key(ser)
+// // log.T.ToSliceOfBytes("counter key %d %d", index.Counter, ser.Uint64())
+// return
+//}
diff --git a/ratel/getindexkeysforevent.go b/ratel/getindexkeysforevent.go
new file mode 100644
index 0000000..63c6959
--- /dev/null
+++ b/ratel/getindexkeysforevent.go
@@ -0,0 +1,115 @@
+package ratel
+
+import (
+ "bytes"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "orly.dev/event"
+ "orly.dev/eventid"
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/fullid"
+ "orly.dev/ratel/keys/fullpubkey"
+ "orly.dev/ratel/keys/id"
+ "orly.dev/ratel/keys/index"
+ "orly.dev/ratel/keys/kinder"
+ "orly.dev/ratel/keys/pubkey"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/tag"
+)
+
+// GetIndexKeysForEvent generates all the index keys required to filter for
+// events. evtSerial should be the output of Serial() which gets a unique,
+// monotonic counter value for each new event.
+func GetIndexKeysForEvent(ev *event.E, ser *serial.T) (keyz [][]byte) {
+
+ var err error
+ keyz = make([][]byte, 0, 18)
+ ID := id.New(eventid.NewWith(ev.Id))
+ CA := createdat.New(ev.CreatedAt)
+ K := kinder.New(ev.Kind.ToU16())
+ PK, _ := pubkey.New(ev.Pubkey)
+ FID := fullid.New(eventid.NewWith(ev.Id))
+ FPK := fullpubkey.New(ev.Pubkey)
+ // indexes
+ { // ~ by id
+ k := prefixes.Id.Key(ID, ser)
+ // log.T.ToSliceOfBytes("id key: %x %0x %0x", k[0], k[1:9], k[9:])
+ keyz = append(keyz, k)
+ }
+ { // ~ by pubkey+date
+ k := prefixes.Pubkey.Key(PK, CA, ser)
+ // log.T.ToSliceOfBytes("pubkey + date key: %x %0x %0x %0x",
+ // k[0], k[1:9], k[9:17], k[17:])
+ keyz = append(keyz, k)
+ }
+ { // ~ by kind+date
+ k := prefixes.Kind.Key(K, CA, ser)
+ // log.T.ToSliceOfBytes("kind + date key: %x %0x %0x %0x",
+ // k[0], k[1:3], k[3:11], k[11:])
+ keyz = append(keyz, k)
+ }
+ { // ~ by pubkey+kind+date
+ k := prefixes.PubkeyKind.Key(PK, K, CA, ser)
+ // log.T.ToSliceOfBytes("pubkey + kind + date key: %x %0x %0x %0x %0x",
+ // k[0], k[1:9], k[9:11], k[11:19], k[19:])
+ keyz = append(keyz, k)
+ }
+ // ~ by tag value + date
+ for i, t := range ev.Tags.ToSliceOfTags() {
+ // there is no value field
+ if t.Len() < 2 ||
+ // the tag is not a-zA-Z probably (this would permit arbitrary other
+ // single byte chars)
+ len(t.ToSliceOfBytes()[0]) != 1 ||
+ // the second field is zero length
+ len(t.ToSliceOfBytes()[1]) == 0 ||
+ // the second field is more than 100 characters long
+ len(t.ToSliceOfBytes()[1]) > 100 {
+ // any of the above is true then the tag is not indexable
+ continue
+ }
+ var firstIndex int
+ var tt *tag.T
+ for firstIndex, tt = range ev.Tags.ToSliceOfTags() {
+ if tt.Len() >= 2 && bytes.Equal(tt.B(1), t.B(1)) {
+ break
+ }
+ }
+ if firstIndex != i {
+ // duplicate
+ continue
+ }
+ // get key prefix (with full length) and offset where to write the last
+ // parts
+ prf, elems := index.P(0), []keys.Element(nil)
+ if prf, elems, err = Create_a_Tag(
+ string(t.ToSliceOfBytes()[0]),
+ string(t.ToSliceOfBytes()[1]), CA,
+ ser,
+ ); chk.E(err) {
+ log.I.F("%v", t.ToStringSlice())
+ return
+ }
+ k := prf.Key(elems...)
+ // log.T.ToSliceOfBytes("tag '%s': %s key %0x", t.ToSliceOfBytes()[0], t.ToSliceOfBytes()[1:], k)
+ keyz = append(keyz, k)
+ }
+ { // ~ by date only
+ k := prefixes.CreatedAt.Key(CA, ser)
+ // log.T.ToSliceOfBytes("date key: %x %0x %0x", k[0], k[1:9], k[9:])
+ keyz = append(keyz, k)
+ }
+ // { // Counter index - for storing last access time of events.
+ // k := GetCounterKey(ser)
+ // keyz = append(keyz, k)
+ // }
+ { // - full Id index - enabling retrieving the event Id without unmarshalling the data
+ k := prefixes.FullIndex.Key(ser, FID, FPK, CA)
+ // log.T.ToSliceOfBytes("full id: %x %0x %0x", k[0], k[1:9], k[9:])
+ keyz = append(keyz, k)
+ }
+ return
+}
diff --git a/ratel/gettagkeyprefix.go b/ratel/gettagkeyprefix.go
new file mode 100644
index 0000000..9b79335
--- /dev/null
+++ b/ratel/gettagkeyprefix.go
@@ -0,0 +1,56 @@
+package ratel
+
+import (
+ eventstore "orly.dev/addresstag"
+ "orly.dev/chk"
+ "orly.dev/hex"
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/arb"
+ "orly.dev/ratel/keys/kinder"
+ "orly.dev/ratel/keys/pubkey"
+ "orly.dev/ratel/prefixes"
+)
+
+// GetTagKeyPrefix returns tag index prefixes based on the initial field of a
+// tag.
+//
+// There is 3 types of index tag keys:
+//
+// - TagAddr: [ 8 ][ 2b Kind ][ 8b Pubkey ][ address/URL ][ 8b Serial ]
+//
+// - Tag32: [ 7 ][ 8b Pubkey ][ 8b Serial ]
+//
+// - Tag: [ 6 ][ address/URL ][ 8b Serial ]
+//
+// This function produces the initial bytes without the index.
+func GetTagKeyPrefix(tagValue string) (key []byte, err error) {
+ if k, pkb, d := eventstore.DecodeAddressTag(tagValue); len(pkb) == 32 {
+ // store value in the new special "a" tag index
+ var pk *pubkey.T
+ if pk, err = pubkey.NewFromBytes(pkb); chk.E(err) {
+ return
+ }
+ els := []keys.Element{kinder.New(k), pk}
+ if len(d) > 0 {
+ els = append(els, arb.New(d))
+ }
+ key = prefixes.TagAddr.Key(els...)
+ } else if pkb, _ := hex.Dec(tagValue); len(pkb) == 32 {
+ // store value as bytes
+ var pkk *pubkey.T
+ if pkk, err = pubkey.NewFromBytes(pkb); chk.E(err) {
+ return
+ }
+ key = prefixes.Tag32.Key(pkk)
+ } else {
+ // store whatever as utf-8
+ if len(tagValue) > 0 {
+ var a *arb.T
+ a = arb.New(tagValue)
+ key = prefixes.Tag.Key(a)
+ } else {
+ key = prefixes.Tag.Key()
+ }
+ }
+ return
+}
diff --git a/ratel/import.go b/ratel/import.go
new file mode 100644
index 0000000..2986170
--- /dev/null
+++ b/ratel/import.go
@@ -0,0 +1,49 @@
+package ratel
+
+import (
+ "bufio"
+ "io"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "orly.dev/event"
+)
+
+const maxLen = 500000000
+
+// Import a collection of events in line structured minified JSON format (JSONL).
+func (r *T) Import(rr io.Reader) {
+ r.Flatten = true
+ var err error
+ scan := bufio.NewScanner(rr)
+ buf := make([]byte, maxLen)
+ scan.Buffer(buf, maxLen)
+ var count, total int
+ for scan.Scan() {
+ b := scan.Bytes()
+ total += len(b) + 1
+ if len(b) < 1 {
+ continue
+ }
+ ev := &event.E{}
+ if _, err = ev.Unmarshal(b); err != nil {
+ continue
+ }
+ if _, _, err = r.SaveEvent(r.Ctx, ev); err != nil {
+ continue
+ }
+ count++
+ if count%1000 == 0 {
+ log.I.F("received %d events", count)
+ }
+ if count > 0 && count%10000 == 0 {
+ chk.T(r.DB.Sync())
+ chk.T(r.DB.RunValueLogGC(0.5))
+ }
+ }
+ log.I.F("read %d bytes and saved %d events", total, count)
+ err = scan.Err()
+ if chk.E(err) {
+ }
+ return
+}
diff --git a/ratel/init.go b/ratel/init.go
new file mode 100644
index 0000000..a76d4ea
--- /dev/null
+++ b/ratel/init.go
@@ -0,0 +1,114 @@
+package ratel
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "github.com/dgraph-io/badger/v4"
+ "orly.dev/ratel/prefixes"
+)
+
+// Init sets up the database with the loaded configuration.
+func (r *T) Init(path string) (err error) {
+ r.dataDir = path
+ log.I.Ln("opening ratel event store at", r.Path())
+ opts := badger.DefaultOptions(r.dataDir)
+ // opts.BlockCacheSize = int64(r.BlockCacheSize)
+ // opts.BlockSize = 128 * units.Mb
+ // opts.CompactL0OnClose = true
+ // opts.LmaxCompaction = true
+ // switch r.Compression {
+ // case "none":
+ // opts.Compression = options.None
+ // case "snappy":
+ // opts.Compression = options.Snappy
+ // case "zstd":
+ // opts.Compression = options.ZSTD
+ // }
+ r.Logger = NewLogger(r.InitLogLevel, r.dataDir)
+ opts.Logger = r.Logger
+ if r.DB, err = badger.Open(opts); chk.E(err) {
+ return err
+ }
+ log.T.Ln("getting event store sequence index", r.dataDir)
+ if r.seq, err = r.DB.GetSequence([]byte("events"), 1000); chk.E(err) {
+ return err
+ }
+ log.T.Ln("running migrations", r.dataDir)
+ if err = r.runMigrations(); chk.E(err) {
+ return log.E.Err("error running migrations: %w; %s", err, r.dataDir)
+ }
+ // if r.DBSizeLimit > 0 {
+ // go r.GarbageCollector()
+ // // } else {
+ // // go r.GCCount()
+ // }
+ return nil
+
+}
+
+const Version = 1
+
+func (r *T) runMigrations() (err error) {
+ return r.Update(
+ func(txn *badger.Txn) (err error) {
+ var version uint16
+ var item *badger.Item
+ item, err = txn.Get(prefixes.Version.Key())
+ if errors.Is(err, badger.ErrKeyNotFound) {
+ version = 0
+ } else if chk.E(err) {
+ return err
+ } else {
+ chk.E(
+ item.Value(
+ func(val []byte) (err error) {
+ version = binary.BigEndian.Uint16(val)
+ return
+ },
+ ),
+ )
+ }
+ // do the migrations in increasing steps (there is no rollback)
+ if version < Version {
+ // if there is any data in the relay we will stop and notify the user, otherwise we
+ // just set version to 1 and proceed
+ prefix := prefixes.Id.Key()
+ it := txn.NewIterator(
+ badger.IteratorOptions{
+ PrefetchValues: true,
+ PrefetchSize: 100,
+ Prefix: prefix,
+ },
+ )
+ defer it.Close()
+ hasAnyEntries := false
+ for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
+ hasAnyEntries = true
+ break
+ }
+ if hasAnyEntries {
+ return fmt.Errorf(
+ "your database is at version %d, but in order to migrate up "+
+ "to version 1 you must manually export all the events and then import "+
+ "again:\n"+
+ "run an old version of this software, export the data, then delete the "+
+ "database files, run the new version, import the data back it",
+ version,
+ )
+ }
+ chk.E(r.bumpVersion(txn, Version))
+ }
+ return nil
+ },
+ )
+}
+
+func (r *T) bumpVersion(txn *badger.Txn, version uint16) error {
+ buf := make([]byte, 2)
+ binary.BigEndian.PutUint16(buf, version)
+ return txn.Set(prefixes.Version.Key(), buf)
+}
diff --git a/ratel/keys/arb/arb.go b/ratel/keys/arb/arb.go
new file mode 100644
index 0000000..7c96d74
--- /dev/null
+++ b/ratel/keys/arb/arb.go
@@ -0,0 +1,94 @@
+// Package arb implements arbitrary length byte keys.Element. In any construction
+// there can only be one with arbitrary length. Custom lengths can be created by
+// calling New with the custom length in it, both for Read and Write operations.
+package arb
+
+import (
+ "bytes"
+ "io"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "orly.dev/ratel/keys"
+)
+
+// T is an arbitrary length byte string. In any construction there can only be one with arbitrary length. Custom lengths
+// can be created by calling New with the custom length in it, both for Read and Write operations.
+type T struct {
+ Val []byte
+}
+
+var _ keys.Element = &T{}
+
+// New creates a new arb.T. This must have the expected length for the provided byte slice as this is what the Read
+// method will aim to copy. In general this will be a bounded field, either the final or only arbitrary length field in
+// a key.
+func New[V []byte | string](s V) (p *T) {
+ b := []byte(s)
+ if len(b) == 0 {
+ log.T.Ln(
+ "empty or nil slice is the same as zero value, " +
+ "use keys.ReadWithArbElem",
+ )
+ return &T{}
+ }
+ return &T{Val: b}
+}
+
+// NewWithLen creates a new arb.T of a given size.
+func NewWithLen(l int) (p *T) { return &T{Val: make([]byte, l)} }
+
+// Write the contents of a bytes.Buffer
+func (p *T) Write(buf io.Writer) {
+ if len(p.Val) == 0 {
+ log.T.Ln("empty slice has no effect")
+ return
+ }
+ buf.Write(p.Val)
+}
+
+func (p *T) Read(buf io.Reader) (el keys.Element) {
+ if len(p.Val) < 1 {
+ log.T.Ln("empty slice has no effect")
+ return
+ }
+ if _, err := buf.Read(p.Val); chk.E(err) {
+ return nil
+ }
+ return p
+}
+
+func (p *T) Len() int {
+ if p == nil {
+ panic("uninitialized pointer to arb.T")
+ }
+ return len(p.Val)
+}
+
+// ReadWithArbElem is a variant of Read that recognises an arbitrary length element by its zero length and imputes its
+// actual length by the byte buffer size and the lengths of the fixed length fields.
+//
+// For reasons of space efficiency, it is not practical to use TLVs for badger database key fields, so this will panic
+// if there is more than one arbitrary length element.
+func ReadWithArbElem(b []byte, elems ...keys.Element) {
+ var arbEl int
+ var arbSet bool
+ l := len(b)
+ for i, el := range elems {
+ elLen := el.Len()
+ l -= elLen
+ if elLen == 0 {
+ if arbSet {
+ panic("cannot have more than one arbitrary length field in a key")
+ }
+ arbEl = i
+ arbSet = true
+ }
+ }
+ // now we can say that the remainder is the correct length for the arb element
+ elems[arbEl] = New(make([]byte, l))
+ buf := bytes.NewBuffer(b)
+ for _, el := range elems {
+ el.Read(buf)
+ }
+}
diff --git a/ratel/keys/arb/arb_test.go b/ratel/keys/arb/arb_test.go
new file mode 100644
index 0000000..d713c6e
--- /dev/null
+++ b/ratel/keys/arb/arb_test.go
@@ -0,0 +1,22 @@
+package arb
+
+import (
+ "bytes"
+ "testing"
+
+ "lukechampine.com/frand"
+)
+
+func TestT(t *testing.T) {
+ randomBytes := frand.Bytes(frand.Intn(128))
+ v := New(randomBytes)
+ buf := new(bytes.Buffer)
+ v.Write(buf)
+ randomCopy := make([]byte, len(randomBytes))
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ v2 := New(randomCopy)
+ el := v2.Read(buf2).(*T)
+ if bytes.Compare(el.Val, v.Val) != 0 {
+ t.Fatalf("expected %x got %x", v.Val, el.Val)
+ }
+}
diff --git a/ratel/keys/count/count.go b/ratel/keys/count/count.go
new file mode 100644
index 0000000..40e12d4
--- /dev/null
+++ b/ratel/keys/count/count.go
@@ -0,0 +1,47 @@
+// Package count contains a series of data types for managing lists of indexes
+// for garbage collection.
+package count
+
+import (
+ "orly.dev/timestamp"
+)
+
+type Item struct {
+ Serial uint64
+ Size uint32
+ Freshness *timestamp.T
+}
+
+type Items []*Item
+
+func (c Items) Len() int { return len(c) }
+func (c Items) Less(i, j int) bool { return c[i].Freshness.I64() < c[j].Freshness.I64() }
+func (c Items) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c Items) Total() (total int) {
+ for i := range c {
+ total += int(c[i].Size)
+ }
+ return
+}
+
+type ItemsBySerial []*Item
+
+func (c ItemsBySerial) Len() int { return len(c) }
+func (c ItemsBySerial) Less(i, j int) bool { return c[i].Serial < c[j].Serial }
+func (c ItemsBySerial) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
+func (c ItemsBySerial) Total() (total int) {
+ for i := range c {
+ total += int(c[i].Size)
+ }
+ return
+}
+
+type Fresh struct {
+ Serial uint64
+ Freshness *timestamp.T
+}
+type Freshes []*Fresh
+
+func (c Freshes) Len() int { return len(c) }
+func (c Freshes) Less(i, j int) bool { return c[i].Freshness.I64() < c[j].Freshness.I64() }
+func (c Freshes) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
diff --git a/ratel/keys/createdat/createdat.go b/ratel/keys/createdat/createdat.go
new file mode 100644
index 0000000..1350978
--- /dev/null
+++ b/ratel/keys/createdat/createdat.go
@@ -0,0 +1,49 @@
+// Package createdat implements a badger key index keys.Element for timestamps.
+package createdat
+
+import (
+ "encoding/binary"
+ "io"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/timestamp"
+)
+
+const Len = 8
+
+type T struct {
+ Val *timestamp.T
+}
+
+var _ keys.Element = &T{}
+
+func New(c *timestamp.T) (p *T) { return &T{Val: c} }
+
+func (c *T) Write(buf io.Writer) { buf.Write(c.Val.Bytes()) }
+
+func (c *T) Read(buf io.Reader) (el keys.Element) {
+ b := make([]byte, Len)
+ if n, err := buf.Read(b); chk.E(err) || n != Len {
+ return nil
+ }
+ c.Val = timestamp.FromUnix(int64(binary.BigEndian.Uint64(b)))
+ return c
+}
+
+func (c *T) Len() int { return Len }
+
+// FromKey expects to find a datestamp in the 8 bytes before a serial in a key.
+func FromKey(k []byte) (p *T) {
+ if len(k) < Len+serial.Len {
+ err := errorf.F(
+ "cannot get a serial without at least %d bytes", Len+serial.Len,
+ )
+ panic(err)
+ }
+ key := make([]byte, 0, Len)
+ key = append(key, k[len(k)-Len-serial.Len:len(k)-serial.Len]...)
+ return &T{Val: timestamp.FromBytes(key)}
+}
diff --git a/ratel/keys/createdat/createdat_test.go b/ratel/keys/createdat/createdat_test.go
new file mode 100644
index 0000000..40f2d26
--- /dev/null
+++ b/ratel/keys/createdat/createdat_test.go
@@ -0,0 +1,26 @@
+package createdat
+
+import (
+ "bytes"
+ "math"
+ "testing"
+
+ "lukechampine.com/frand"
+
+ "orly.dev/timestamp"
+)
+
+func TestT(t *testing.T) {
+ for _ = range 1000000 {
+ n := timestamp.FromUnix(int64(frand.Intn(math.MaxInt64)))
+ v := New(n)
+ buf := new(bytes.Buffer)
+ v.Write(buf)
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ v2 := New(timestamp.New())
+ el := v2.Read(buf2).(*T)
+ if el.Val.Int() != n.Int() {
+ t.Fatalf("expected %d got %d", n.Int(), el.Val.Int())
+ }
+ }
+}
diff --git a/ratel/keys/fullid/fullid.go b/ratel/keys/fullid/fullid.go
new file mode 100644
index 0000000..82ee878
--- /dev/null
+++ b/ratel/keys/fullid/fullid.go
@@ -0,0 +1,48 @@
+// Package fullid implements a keys.Element for a complete 32 byte event Ids.
+package fullid
+
+import (
+ "fmt"
+ "io"
+ "orly.dev/chk"
+
+ "orly.dev/ratel/keys"
+ "orly.dev/sha256"
+
+ "orly.dev/eventid"
+)
+
+const Len = sha256.Size
+
+type T struct {
+ Val []byte
+}
+
+var _ keys.Element = &T{}
+
+func New(evID ...*eventid.T) (p *T) {
+ if len(evID) < 1 {
+ return &T{make([]byte, Len)}
+ }
+ return &T{Val: evID[0].Bytes()}
+}
+
+func (p *T) Write(buf io.Writer) {
+ if len(p.Val) != Len {
+ panic(fmt.Sprintln("must use New or initialize Val with len", Len))
+ }
+ buf.Write(p.Val)
+}
+
+func (p *T) Read(buf io.Reader) (el keys.Element) {
+ // allow uninitialized struct
+ if len(p.Val) != Len {
+ p.Val = make([]byte, Len)
+ }
+ if n, err := buf.Read(p.Val); chk.E(err) || n != Len {
+ return nil
+ }
+ return p
+}
+
+func (p *T) Len() int { return Len }
diff --git a/ratel/keys/fullid/fullid_test.go b/ratel/keys/fullid/fullid_test.go
new file mode 100644
index 0000000..6ffd221
--- /dev/null
+++ b/ratel/keys/fullid/fullid_test.go
@@ -0,0 +1,25 @@
+package fullid
+
+import (
+ "bytes"
+ "testing"
+
+ "lukechampine.com/frand"
+
+ "orly.dev/eventid"
+ "orly.dev/sha256"
+)
+
+func TestT(t *testing.T) {
+ fakeIdBytes := frand.Bytes(sha256.Size)
+ id := eventid.NewWith(fakeIdBytes)
+ v := New(id)
+ buf := new(bytes.Buffer)
+ v.Write(buf)
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ v2 := New()
+ el := v2.Read(buf2).(*T)
+ if bytes.Compare(el.Val, v.Val) != 0 {
+ t.Fatalf("expected %x got %x", v.Val, el.Val)
+ }
+}
diff --git a/ratel/keys/fullpubkey/fullpubkey.go b/ratel/keys/fullpubkey/fullpubkey.go
new file mode 100644
index 0000000..b6d6e1e
--- /dev/null
+++ b/ratel/keys/fullpubkey/fullpubkey.go
@@ -0,0 +1,47 @@
+// Package fullpubkey implements a keys.Element for a complete 32 byte nostr
+// pubkeys.
+package fullpubkey
+
+import (
+ "fmt"
+ "io"
+ "orly.dev/chk"
+
+ "orly.dev/ec/schnorr"
+ "orly.dev/ratel/keys"
+)
+
+const Len = schnorr.PubKeyBytesLen
+
+type T struct {
+ Val []byte
+}
+
+var _ keys.Element = &T{}
+
+func New(evID ...[]byte) (p *T) {
+ if len(evID) < 1 || len(evID[0]) < 1 {
+ return &T{make([]byte, Len)}
+ }
+ return &T{Val: evID[0]}
+}
+
+func (p *T) Write(buf io.Writer) {
+ if len(p.Val) != Len {
+ panic(fmt.Sprintln("must use New or initialize Val with len", Len))
+ }
+ buf.Write(p.Val)
+}
+
+func (p *T) Read(buf io.Reader) (el keys.Element) {
+ // allow uninitialized struct
+ if len(p.Val) != Len {
+ p.Val = make([]byte, Len)
+ }
+ if n, err := buf.Read(p.Val); chk.E(err) || n != Len {
+ return nil
+ }
+ return p
+}
+
+func (p *T) Len() int { return len(p.Val) }
diff --git a/ratel/keys/fullpubkey/fullpubkey_test.go b/ratel/keys/fullpubkey/fullpubkey_test.go
new file mode 100644
index 0000000..8740619
--- /dev/null
+++ b/ratel/keys/fullpubkey/fullpubkey_test.go
@@ -0,0 +1,23 @@
+package fullpubkey
+
+import (
+ "bytes"
+ "testing"
+
+ "lukechampine.com/frand"
+
+ "orly.dev/sha256"
+)
+
+func TestT(t *testing.T) {
+ pk := frand.Bytes(sha256.Size)
+ v := New(pk)
+ buf := new(bytes.Buffer)
+ v.Write(buf)
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ v2 := New()
+ el := v2.Read(buf2).(*T)
+ if bytes.Compare(el.Val, v.Val) != 0 {
+ t.Fatalf("expected %x got %x", v.Val, el.Val)
+ }
+}
diff --git a/ratel/keys/id/id.go b/ratel/keys/id/id.go
new file mode 100644
index 0000000..c551503
--- /dev/null
+++ b/ratel/keys/id/id.go
@@ -0,0 +1,72 @@
+// Package id implements a keys.Element for a truncated event Ids containing the
+// first 8 bytes of an eventid.T.
+package id
+
+import (
+ "fmt"
+ "io"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+ "strings"
+
+ "orly.dev/ratel/keys"
+ "orly.dev/sha256"
+
+ "orly.dev/eventid"
+ "orly.dev/hex"
+)
+
+const Len = 8
+
+type T struct {
+ Val []byte
+}
+
+var _ keys.Element = &T{}
+
+func New(evID ...*eventid.T) (p *T) {
+ if len(evID) < 1 || len(evID[0].String()) < 1 {
+ return &T{make([]byte, Len)}
+ }
+ evid := evID[0].String()
+ if len(evid) < 64 {
+ evid = strings.Repeat("0", 64-len(evid)) + evid
+ }
+ if len(evid) > 64 {
+ evid = evid[:64]
+ }
+ b, err := hex.Dec(evid[:Len*2])
+ if chk.E(err) {
+ return
+ }
+ return &T{Val: b}
+}
+
+func NewFromBytes(b []byte) (p *T, err error) {
+ if len(b) != sha256.Size {
+ err = errorf.E("event Id must be 32 bytes got: %d %0x", len(b), b)
+ return
+ }
+ p = &T{Val: b[:Len]}
+ return
+}
+
+func (p *T) Write(buf io.Writer) {
+ if len(p.Val) != Len {
+ panic(fmt.Sprintln("must use New or initialize Val with len", Len))
+ }
+ buf.Write(p.Val)
+}
+
+func (p *T) Read(buf io.Reader) (el keys.Element) {
+ // allow uninitialized struct
+ if len(p.Val) != Len {
+ p.Val = make([]byte, Len)
+ }
+ if n, err := buf.Read(p.Val); chk.E(err) || n != Len {
+ return nil
+ }
+ return p
+}
+
+func (p *T) Len() int { return Len }
diff --git a/ratel/keys/id/id_test.go b/ratel/keys/id/id_test.go
new file mode 100644
index 0000000..ff4ab69
--- /dev/null
+++ b/ratel/keys/id/id_test.go
@@ -0,0 +1,24 @@
+package id
+
+import (
+ "bytes"
+ "testing"
+
+ "lukechampine.com/frand"
+ "orly.dev/eventid"
+ "orly.dev/sha256"
+)
+
+func TestT(t *testing.T) {
+ fakeIdBytes := frand.Bytes(sha256.Size)
+ id := eventid.NewWith(fakeIdBytes)
+ v := New(id)
+ buf := new(bytes.Buffer)
+ v.Write(buf)
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ v2 := New()
+ el := v2.Read(buf2).(*T)
+ if bytes.Compare(el.Val, v.Val) != 0 {
+ t.Fatalf("expected %x got %x", v.Val, el.Val)
+ }
+}
diff --git a/ratel/keys/index/index.go b/ratel/keys/index/index.go
new file mode 100644
index 0000000..4a6909c
--- /dev/null
+++ b/ratel/keys/index/index.go
@@ -0,0 +1,52 @@
+// Package index implements the single byte prefix of the database keys. This
+// means a limit of 256 tables but is plenty for a single purpose nostr event
+// store.
+package index
+
+import (
+ "fmt"
+ "io"
+ "orly.dev/chk"
+
+ "orly.dev/ratel/keys"
+)
+
+const Len = 1
+
+type T struct {
+ Val []byte
+}
+
+var _ keys.Element = &T{}
+
+func New[V byte | P | int](code ...V) (p *T) {
+ var cod []byte
+ switch len(code) {
+ case 0:
+ cod = []byte{0}
+ default:
+ cod = []byte{byte(code[0])}
+ }
+ return &T{Val: cod}
+}
+
+func Empty() (p *T) {
+ return &T{Val: []byte{0}}
+}
+
+func (p *T) Write(buf io.Writer) {
+ if len(p.Val) != Len {
+ panic(fmt.Sprintln("must use New or initialize Val with len", Len))
+ }
+ buf.Write(p.Val)
+}
+
+func (p *T) Read(buf io.Reader) (el keys.Element) {
+ p.Val = make([]byte, Len)
+ if n, err := buf.Read(p.Val); chk.E(err) || n != Len {
+ return nil
+ }
+ return p
+}
+
+func (p *T) Len() int { return Len }
diff --git a/ratel/keys/index/prefixes.go b/ratel/keys/index/prefixes.go
new file mode 100644
index 0000000..1308670
--- /dev/null
+++ b/ratel/keys/index/prefixes.go
@@ -0,0 +1,32 @@
+package index
+
+import (
+ "orly.dev/ratel/keys"
+)
+
+type P byte
+
+// Key writes a key with the P prefix byte and an arbitrary list of
+// keys.Element.
+func (p P) Key(element ...keys.Element) (b []byte) {
+ b = keys.Write(
+ append([]keys.Element{New(byte(p))}, element...)...,
+ )
+ // log.T.ToSliceOfBytes("key %x", b)
+ return
+}
+
+// B returns the index.P as a byte.
+func (p P) B() byte { return byte(p) }
+
+// I returns the index.P as an int (for use with the KeySizes.
+func (p P) I() int { return int(p) }
+
+// GetAsBytes todo wat is dis?
+func GetAsBytes(prf ...P) (b [][]byte) {
+ b = make([][]byte, len(prf))
+ for i := range prf {
+ b[i] = []byte{byte(prf[i])}
+ }
+ return
+}
diff --git a/ratel/keys/keys.go b/ratel/keys/keys.go
new file mode 100644
index 0000000..8182bb2
--- /dev/null
+++ b/ratel/keys/keys.go
@@ -0,0 +1,44 @@
+// Package keys is a composable framework for constructing badger keys from
+// fields of events.
+package keys
+
+import (
+ "bytes"
+ "io"
+)
+
+// Element is an enveloper for a type that can Read and Write its binary form.
+type Element interface {
+ // Write the binary form of the field into the given bytes.Buffer.
+ Write(buf io.Writer)
+ // Read accepts a bytes.Buffer and decodes a field from it.
+ Read(buf io.Reader) Element
+ // Len gives the length of the bytes output by the type.
+ Len() int
+}
+
+// Write the contents of each Element to a byte slice.
+func Write(elems ...Element) []byte {
+ // get the length of the buffer required
+ var length int
+ for _, el := range elems {
+ length += el.Len()
+ }
+ buf := bytes.NewBuffer(make([]byte, 0, length))
+ // write out the data from each element
+ for _, el := range elems {
+ el.Write(buf)
+ }
+ return buf.Bytes()
+}
+
+// Read the contents of a byte slice into the provided list of Element types.
+func Read(b []byte, elems ...Element) {
+ buf := bytes.NewBuffer(b)
+ for _, el := range elems {
+ el.Read(buf)
+ }
+}
+
+// Make is a convenience method to wrap a list of Element into a slice.
+func Make(elems ...Element) []Element { return elems }
diff --git a/ratel/keys/keys_test.go b/ratel/keys/keys_test.go
new file mode 100644
index 0000000..af34b4c
--- /dev/null
+++ b/ratel/keys/keys_test.go
@@ -0,0 +1,142 @@
+// package keys_test needs to be a different package name or the implementation
+// types imports will circular
+package keys_test
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "testing"
+
+ "lukechampine.com/frand"
+
+ "orly.dev/ec/schnorr"
+ "orly.dev/eventid"
+ "orly.dev/kind"
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/id"
+ "orly.dev/ratel/keys/index"
+ "orly.dev/ratel/keys/kinder"
+ "orly.dev/ratel/keys/pubkey"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/timestamp"
+)
+
+func TestElement(t *testing.T) {
+ for _ = range 100000 {
+ var failed bool
+ { // construct a typical key type of structure
+ // a prefix
+ np := prefixes.Version
+ vp := index.New(byte(np))
+ // an id
+ fakeIdBytes := frand.Bytes(sha256.Size)
+ i := eventid.NewWith(fakeIdBytes)
+ vid := id.New(i)
+ // a kinder
+ n := kind.New(1059)
+ vk := kinder.New(n.K)
+ // a pubkey
+ fakePubkeyBytes := frand.Bytes(schnorr.PubKeyBytesLen)
+ var vpk *pubkey.T
+ var err error
+ vpk, err = pubkey.NewFromBytes(fakePubkeyBytes)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // a createdat
+ ts := timestamp.Now()
+ vca := createdat.New(ts)
+ // a serial
+ fakeSerialBytes := frand.Bytes(serial.Len)
+ vs := serial.New(fakeSerialBytes)
+ // write Element list into buffer
+ b := keys.Write(vp, vid, vk, vpk, vca, vs)
+ // check that values decoded all correctly
+ // we expect the following types, so we must create them:
+ var vp2 = index.New(0)
+ var vid2 = id.New()
+ var vk2 = kinder.New(0)
+ var vpk2 *pubkey.T
+ vpk2, err = pubkey.New()
+ if err != nil {
+ t.Fatal(err)
+ }
+ var vca2 = createdat.New(timestamp.New())
+ var vs2 = serial.New(nil)
+ // read it in
+ keys.Read(b, vp2, vid2, vk2, vpk2, vca2, vs2)
+ // this is a lot of tests, so use switch syntax
+ switch {
+ case bytes.Compare(vp.Val, vp2.Val) != 0:
+ t.Logf(
+ "failed to decode correctly got %v expected %v", vp2.Val,
+ vp.Val,
+ )
+ failed = true
+ fallthrough
+ case bytes.Compare(vid.Val, vid2.Val) != 0:
+ t.Logf(
+ "failed to decode correctly got %v expected %v", vid2.Val,
+ vid.Val,
+ )
+ failed = true
+ fallthrough
+ case vk.Val.ToU16() != vk2.Val.ToU16():
+ t.Logf(
+ "failed to decode correctly got %v expected %v", vk2.Val,
+ vk.Val,
+ )
+ failed = true
+ fallthrough
+ case !bytes.Equal(vpk.Val, vpk2.Val):
+ t.Logf(
+ "failed to decode correctly got %v expected %v", vpk2.Val,
+ vpk.Val,
+ )
+ failed = true
+ fallthrough
+ case vca.Val.I64() != vca2.Val.I64():
+ t.Logf(
+ "failed to decode correctly got %v expected %v", vca2.Val,
+ vca.Val,
+ )
+ failed = true
+ fallthrough
+ case !bytes.Equal(vs.Val, vs2.Val):
+ t.Logf(
+ "failed to decode correctly got %v expected %v", vpk2.Val,
+ vpk.Val,
+ )
+ failed = true
+ }
+ }
+ { // construct a counter value
+ // a createdat
+ ts := timestamp.Now()
+ vca := createdat.New(ts)
+ // a sizer
+ // n := uint32(frand.Uint64n(math.MaxUint32))
+ // write out values
+ b := keys.Write(vca)
+ // check that values decoded all correctly
+ // we expect the following types, so we must create them:
+ var vca2 = createdat.New(timestamp.New())
+ // read it in
+ keys.Read(b, vca2)
+ // check they match
+
+ if vca.Val.I64() != vca2.Val.I64() {
+ t.Logf(
+ "failed to decode correctly got %v expected %v", vca2.Val,
+ vca.Val,
+ )
+ failed = true
+ }
+ }
+ if failed {
+ t.FailNow()
+ }
+ }
+}
diff --git a/ratel/keys/kinder/kind.go b/ratel/keys/kinder/kind.go
new file mode 100644
index 0000000..c205714
--- /dev/null
+++ b/ratel/keys/kinder/kind.go
@@ -0,0 +1,45 @@
+// Package kinder implements a keys.Element for the 16 bit nostr 'kind' value
+// for use in indexes.
+package kinder
+
+import (
+ "encoding/binary"
+ "io"
+ "orly.dev/chk"
+
+ "orly.dev/kind"
+ "orly.dev/ratel/keys"
+)
+
+const Len = 2
+
+type T struct {
+ Val *kind.T
+}
+
+var _ keys.Element = &T{}
+
+// New creates a new kinder.T for reading/writing kind.T values.
+func New[V uint16 | uint32 | int32 | uint64 | int64 | int](c V) (p *T) { return &T{Val: kind.New(c)} }
+
+func Make(c *kind.T) (v []byte) {
+ v = make([]byte, Len)
+ binary.BigEndian.PutUint16(v, c.K)
+ return
+}
+
+func (c *T) Write(buf io.Writer) {
+ buf.Write(Make(c.Val))
+}
+
+func (c *T) Read(buf io.Reader) (el keys.Element) {
+ b := make([]byte, Len)
+ if n, err := buf.Read(b); chk.E(err) || n != Len {
+ return nil
+ }
+ v := binary.BigEndian.Uint16(b)
+ c.Val = kind.New(v)
+ return c
+}
+
+func (c *T) Len() int { return Len }
diff --git a/ratel/keys/kinder/kind_test.go b/ratel/keys/kinder/kind_test.go
new file mode 100644
index 0000000..dce57cb
--- /dev/null
+++ b/ratel/keys/kinder/kind_test.go
@@ -0,0 +1,21 @@
+package kinder
+
+import (
+ "bytes"
+ "testing"
+
+ "orly.dev/kind"
+)
+
+func TestT(t *testing.T) {
+ n := kind.New(1059)
+ v := New(n.ToU16())
+ buf := new(bytes.Buffer)
+ v.Write(buf)
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ v2 := New(0)
+ el := v2.Read(buf2).(*T)
+ if el.Val.ToU16() != n.ToU16() {
+ t.Fatalf("expected %d got %d", n, el.Val)
+ }
+}
diff --git a/ratel/keys/pubkey/pubkey.go b/ratel/keys/pubkey/pubkey.go
new file mode 100644
index 0000000..860bb43
--- /dev/null
+++ b/ratel/keys/pubkey/pubkey.go
@@ -0,0 +1,75 @@
+// Package pubkey implements an 8 byte truncated public key implementation of a
+// keys.Element.
+package pubkey
+
+import (
+ "fmt"
+ "io"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "orly.dev/ec/schnorr"
+ "orly.dev/ratel/keys"
+)
+
+const Len = 8
+
+type T struct {
+ Val []byte
+}
+
+var _ keys.Element = &T{}
+
+// New creates a new pubkey prefix, if parameter is omitted, new one is
+// allocated (for read) if more than one is given, only the first is used, and
+// if the first one is not the correct hexadecimal length of 64, return error.
+func New(pk ...[]byte) (p *T, err error) {
+ if len(pk) < 1 {
+ // allows init with no parameter
+ return &T{make([]byte, Len)}, nil
+ }
+ // // only the first pubkey will be used
+ if len(pk[0]) != schnorr.PubKeyBytesLen {
+ err = log.E.Err("pubkey hex must be 32 chars, got", len(pk[0]))
+ return
+ }
+ return &T{Val: pk[0][:Len]}, nil
+}
+
+func NewFromBytes(pkb []byte) (p *T, err error) {
+ if len(pkb) != schnorr.PubKeyBytesLen {
+ err = log.E.Err(
+ "provided key not correct length, got %d expected %d",
+ len(pkb), schnorr.PubKeyBytesLen,
+ )
+ log.T.S(pkb)
+ return
+ }
+ b := make([]byte, Len)
+ copy(b, pkb[:Len])
+ p = &T{Val: b}
+ return
+}
+
+func (p *T) Write(buf io.Writer) {
+ if p == nil {
+ panic("nil pubkey")
+ }
+ if p.Val == nil || len(p.Val) != Len {
+ panic(fmt.Sprintln("must use New or initialize Val with len", Len))
+ }
+ buf.Write(p.Val)
+}
+
+func (p *T) Read(buf io.Reader) (el keys.Element) {
+ // allow uninitialized struct
+ if len(p.Val) != Len {
+ p.Val = make([]byte, Len)
+ }
+ if n, err := buf.Read(p.Val); chk.E(err) || n != Len {
+ return nil
+ }
+ return p
+}
+
+func (p *T) Len() int { return Len }
diff --git a/ratel/keys/pubkey/pubkey_test.go b/ratel/keys/pubkey/pubkey_test.go
new file mode 100644
index 0000000..bb6a46e
--- /dev/null
+++ b/ratel/keys/pubkey/pubkey_test.go
@@ -0,0 +1,29 @@
+package pubkey
+
+import (
+ "bytes"
+ "orly.dev/chk"
+ "testing"
+
+ "lukechampine.com/frand"
+
+ "orly.dev/ec/schnorr"
+)
+
+func TestT(t *testing.T) {
+ for _ = range 10000000 {
+ fakePubkeyBytes := frand.Bytes(schnorr.PubKeyBytesLen)
+ v, err := New(fakePubkeyBytes)
+ if chk.E(err) {
+ t.FailNow()
+ }
+ buf := new(bytes.Buffer)
+ v.Write(buf)
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ v2, _ := New()
+ el := v2.Read(buf2).(*T)
+ if bytes.Compare(el.Val, v.Val) != 0 {
+ t.Fatalf("expected %x got %x", v.Val, el.Val)
+ }
+ }
+}
diff --git a/ratel/keys/serial/serial.go b/ratel/keys/serial/serial.go
new file mode 100644
index 0000000..3a2cc25
--- /dev/null
+++ b/ratel/keys/serial/serial.go
@@ -0,0 +1,85 @@
+// Package serial implements a keys.Element for encoding a serial (monotonic 64
+// bit counter) for stored events, used to link an index to the main data table.
+package serial
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "orly.dev/chk"
+
+ "orly.dev/ratel/keys"
+)
+
+const Len = 8
+
+// T is a badger DB serial number used for conflict free event record keys.
+type T struct {
+ Val []byte
+}
+
+var _ keys.Element = &T{}
+
+// New returns a new serial record key.Element - if nil or short slice is given,
+// initialize a fresh one with Len (for reading), otherwise if equal or longer,
+// trim if long and store into struct (for writing).
+func New(ser []byte) (p *T) {
+ switch {
+ case len(ser) < Len:
+ // log.I.Ln("empty serial")
+ // allows use of nil to init
+ ser = make([]byte, Len)
+ default:
+ ser = ser[:Len]
+ }
+ return &T{Val: ser}
+}
+
+// FromKey expects the last Len bytes of the given slice to be the serial.
+func FromKey(k []byte) (p *T) {
+ if len(k) < Len {
+ panic(fmt.Sprintf("cannot get a serial without at least 8 bytes %x", k))
+ }
+ key := make([]byte, Len)
+ copy(key, k[len(k)-Len:])
+ return &T{Val: key}
+}
+
+func Make(s uint64) (ser []byte) {
+ ser = make([]byte, 8)
+ binary.BigEndian.PutUint64(ser, s)
+ return
+}
+
+func (p *T) Write(buf io.Writer) {
+ if len(p.Val) != Len {
+ panic(fmt.Sprintln("must use New or initialize Val with len", Len))
+ }
+ buf.Write(p.Val)
+}
+
+func (p *T) Read(buf io.Reader) (el keys.Element) {
+ // allow uninitialized struct
+ if len(p.Val) != Len {
+ p.Val = make([]byte, Len)
+ }
+ if n, err := buf.Read(p.Val); chk.E(err) || n != Len {
+ return nil
+ }
+ return p
+}
+
+func (p *T) Len() int { return Len }
+func (p *T) Uint64() (u uint64) { return binary.BigEndian.Uint64(p.Val) }
+
+// Match compares a key bytes to a serial, all indexes have the serial at
+// the end indicating the event record they refer to, and if they match returns
+// true.
+func Match(index, ser []byte) bool {
+ l := len(index)
+ if l < Len {
+ return false
+ }
+ return bytes.Compare(index[l-Len:], ser) == 0
+}
diff --git a/ratel/keys/serial/serial_test.go b/ratel/keys/serial/serial_test.go
new file mode 100644
index 0000000..8606f5e
--- /dev/null
+++ b/ratel/keys/serial/serial_test.go
@@ -0,0 +1,23 @@
+package serial_test
+
+import (
+ "bytes"
+ "testing"
+
+ "orly.dev/ratel/keys/serial"
+
+ "lukechampine.com/frand"
+)
+
+func TestT(t *testing.T) {
+ fakeSerialBytes := frand.Bytes(serial.Len)
+ v := serial.New(fakeSerialBytes)
+ buf := new(bytes.Buffer)
+ v.Write(buf)
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ v2 := &serial.T{} // or can use New(nil)
+ el := v2.Read(buf2).(*serial.T)
+ if bytes.Compare(el.Val, v.Val) != 0 {
+ t.Fatalf("expected %x got %x", v.Val, el.Val)
+ }
+}
diff --git a/ratel/keys/tombstone/tombstone.go b/ratel/keys/tombstone/tombstone.go
new file mode 100644
index 0000000..e7029ad
--- /dev/null
+++ b/ratel/keys/tombstone/tombstone.go
@@ -0,0 +1,49 @@
+// Package tombstone is a 16 byte truncated event Id for keys.Element used to
+// mark an event as being deleted so it isn't saved again.
+package tombstone
+
+import (
+ "io"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "orly.dev/eventid"
+ "orly.dev/ratel/keys"
+)
+
+const Len = 16
+
+type T struct {
+ val []byte
+}
+
+var _ keys.Element = &T{}
+
+func Make(eid *eventid.T) (v []byte) {
+ v = make([]byte, Len)
+ copy(v, eid.Bytes())
+ return
+}
+
+func New() (t *T) { return new(T) }
+
+func NewWith(eid *eventid.T) (t *T) {
+ t = &T{val: Make(eid)}
+ return
+}
+
+func (t *T) Write(buf io.Writer) {
+ buf.Write(t.val)
+}
+
+func (t *T) Read(buf io.Reader) (el keys.Element) {
+ b := make([]byte, Len)
+ if n, err := buf.Read(b); chk.E(err) || n < Len {
+ log.I.S(n, err)
+ return nil
+ }
+ t.val = b
+ return &T{val: b}
+}
+
+func (t *T) Len() int { return Len }
diff --git a/ratel/keys/tombstone/tombstone_test.go b/ratel/keys/tombstone/tombstone_test.go
new file mode 100644
index 0000000..cc0049d
--- /dev/null
+++ b/ratel/keys/tombstone/tombstone_test.go
@@ -0,0 +1,23 @@
+package tombstone
+
+import (
+ "bytes"
+ "testing"
+
+ "lukechampine.com/frand"
+
+ "orly.dev/eventid"
+)
+
+func TestT(t *testing.T) {
+ id := frand.Entropy256()
+ ts := NewWith(eventid.NewWith(id[:]))
+ buf := new(bytes.Buffer)
+ ts.Write(buf)
+ buf2 := bytes.NewBuffer(buf.Bytes())
+ ts2 := New()
+ ts2.Read(buf2)
+ if !bytes.Equal(ts.val, ts2.val) {
+ t.Errorf("expected %0x got %0x", ts.val, ts2.val)
+ }
+}
diff --git a/ratel/keys/util_test.go b/ratel/keys/util_test.go
new file mode 100644
index 0000000..63a0a44
--- /dev/null
+++ b/ratel/keys/util_test.go
@@ -0,0 +1,9 @@
+package keys_test
+
+import (
+ "orly.dev/lol"
+)
+
+var (
+ log, chk, errorf = lol.Main.Log, lol.Main.Check, lol.Main.Errorf
+)
diff --git a/ratel/log.go b/ratel/log.go
new file mode 100644
index 0000000..c8248d6
--- /dev/null
+++ b/ratel/log.go
@@ -0,0 +1,69 @@
+package ratel
+
+import (
+ "fmt"
+ "orly.dev/log"
+ "runtime"
+ "strings"
+
+ "orly.dev/atomic"
+ "orly.dev/lol"
+)
+
+// NewLogger creates a new badger logger.
+func NewLogger(logLevel int, label string) (l *logger) {
+ log.T.Ln("getting logger for", label)
+ l = &logger{Label: label}
+ l.Level.Store(int32(logLevel))
+ return
+}
+
+type logger struct {
+ Level atomic.Int32
+ Label string
+}
+
+// SetLogLevel atomically adjusts the log level to the given log level code.
+func (l *logger) SetLogLevel(level int) {
+ l.Level.Store(int32(level))
+}
+
+// Errorf is a log printer for this level of message.
+func (l *logger) Errorf(s string, i ...interface{}) {
+ if l.Level.Load() >= lol.Error {
+ s = l.Label + ": " + s
+ txt := fmt.Sprintf(s, i...)
+ _, file, line, _ := runtime.Caller(2)
+ log.E.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
+ }
+}
+
+// Warningf is a log printer for this level of message.
+func (l *logger) Warningf(s string, i ...interface{}) {
+ if l.Level.Load() >= lol.Warn {
+ s = l.Label + ": " + s
+ txt := fmt.Sprintf(s, i...)
+ _, file, line, _ := runtime.Caller(2)
+ log.D.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
+ }
+}
+
+// Infof is a log printer for this level of message.
+func (l *logger) Infof(s string, i ...interface{}) {
+ if l.Level.Load() >= lol.Info {
+ s = l.Label + ": " + s
+ txt := fmt.Sprintf(s, i...)
+ _, file, line, _ := runtime.Caller(2)
+ log.D.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
+ }
+}
+
+// Debugf is a log printer for this level of message.
+func (l *logger) Debugf(s string, i ...interface{}) {
+ if l.Level.Load() >= lol.Debug {
+ s = l.Label + ": " + s
+ txt := fmt.Sprintf(s, i...)
+ _, file, line, _ := runtime.Caller(2)
+ log.T.F("%s\n%s:%d", strings.TrimSpace(txt), file, line)
+ }
+}
diff --git a/ratel/main.go b/ratel/main.go
new file mode 100644
index 0000000..abbbe2b
--- /dev/null
+++ b/ratel/main.go
@@ -0,0 +1,169 @@
+// Package ratel is a badger DB based event store with optional cache management
+// and capability to be used as a pruning cache along with a secondary larger
+// event store.
+package ratel
+
+import (
+ "encoding/binary"
+ "github.com/dgraph-io/badger/v4"
+ "orly.dev/chk"
+ "orly.dev/lol"
+ "sync"
+
+ "orly.dev/context"
+ "orly.dev/interfaces/store"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+)
+
+// DefaultMaxLimit is set to a size that means the usual biggest batch of events sent to a
+// client usually is at most about 256kb or so.
+const DefaultMaxLimit = 512
+
+// T is a badger event store database with layer2 and garbage collection.
+type T struct {
+ Ctx context.T
+ WG *sync.WaitGroup
+ dataDir string
+ // DBSizeLimit is the number of bytes we want to keep the data store from exceeding.
+ // DBSizeLimit int
+ // // DBLowWater is the percentage of DBSizeLimit a GC run will reduce the used storage down
+ // // to.
+ // DBLowWater int
+ // // DBHighWater is the trigger point at which a GC run should start if exceeded.
+ // DBHighWater int
+ // // GCFrequency is the frequency of checks of the current utilisation.
+ // GCFrequency time.Duration
+ // HasL2 bool
+ // BlockCacheSize int
+ InitLogLevel int
+ Logger *logger
+ // DB is the badger db
+ *badger.DB
+ // seq is the monotonic collision-free index for raw event storage.
+ seq *badger.Sequence
+ // Threads is how many CPU threads we dedicate to concurrent actions, flatten and GC mark
+ Threads int
+ // MaxLimit is a default limit that applies to a query without a limit, to avoid sending out
+ // too many events to a client from a malformed or excessively broad filter.
+ MaxLimit int
+ // // ActuallyDelete sets whether we actually delete or rewrite deleted entries with a modified
+ // // deleted prefix value (8th bit set)
+ // ActuallyDelete bool
+ // Flatten should be set to true to trigger a flatten at close... this is mainly
+ // triggered by running an import
+ Flatten bool
+ // // UseCompact uses a compact encoding based on the canonical format (generate
+ // // hash of it to get Id field with the signature in raw binary after.
+ // UseCompact bool
+ // // Compression sets the compression to use, none/snappy/zstd
+ // Compression string
+}
+
+func (r *T) SetLogLevel(level string) {
+ r.Logger.SetLogLevel(lol.GetLogLevel(level))
+}
+
+var _ store.I = (*T)(nil)
+
+// BackendParams is the configurations used in creating a new ratel.T.
+type BackendParams struct {
+ Ctx context.T
+ WG *sync.WaitGroup
+ HasL2, UseCompact bool
+ BlockCacheSize, LogLevel, MaxLimit int
+ Compression string // none,snappy,zstd
+ Extra []int
+}
+
+// New configures a a new ratel.T event store.
+func New(p BackendParams, params ...int) *T {
+ return GetBackend(
+ p.Ctx, p.WG, p.HasL2, p.UseCompact, p.BlockCacheSize, p.LogLevel,
+ p.MaxLimit,
+ p.Compression, params...,
+ )
+}
+
+// GetBackend returns a reasonably configured badger.Backend.
+//
+// The variadic params correspond to DBSizeLimit, DBLowWater, DBHighWater and
+// GCFrequency as an integer multiplier of number of seconds.
+//
+// Note that the cancel function for the context needs to be managed by the
+// caller.
+//
+// Deprecated: use New instead.
+func GetBackend(
+ Ctx context.T, WG *sync.WaitGroup, hasL2, useCompact bool,
+ blockCacheSize, logLevel, maxLimit int, compression string, params ...int,
+) (b *T) {
+ // var sizeLimit, lw, hw, freq = 0, 50, 66, 3600
+ // switch len(params) {
+ // case 4:
+ // freq = params[3]
+ // fallthrough
+ // case 3:
+ // hw = params[2]
+ // fallthrough
+ // case 2:
+ // lw = params[1]
+ // fallthrough
+ // case 1:
+ // sizeLimit = params[0] * units.Gb
+ // }
+ // if unset, assume a safe maximum limit for unlimited filters.
+ if maxLimit == 0 {
+ maxLimit = 512
+ }
+ b = &T{
+ Ctx: Ctx,
+ WG: WG,
+ // DBSizeLimit: sizeLimit,
+ // DBLowWater: lw,
+ // DBHighWater: hw,
+ // GCFrequency: time.Duration(freq) * time.Second,
+ // HasL2: hasL2,
+ // BlockCacheSize: blockCacheSize,
+ InitLogLevel: logLevel,
+ MaxLimit: maxLimit,
+ // UseCompact: useCompact,
+ // Compression: compression,
+ }
+ return
+}
+
+// Path returns the path where the database files are stored.
+func (r *T) Path() string { return r.dataDir }
+
+// SerialKey returns a key used for storing events, and the raw serial counter
+// bytes to copy into index keys.
+func (r *T) SerialKey() (idx []byte, ser *serial.T) {
+ var err error
+ var s []byte
+ if s, err = r.SerialBytes(); chk.E(err) {
+ panic(err)
+ }
+ ser = serial.New(s)
+ return prefixes.Event.Key(ser), ser
+}
+
+// Serial returns the next monotonic conflict free unique serial on the database.
+func (r *T) Serial() (ser uint64, err error) {
+ if ser, err = r.seq.Next(); chk.E(err) {
+ }
+ // log.T.ToSliceOfBytes("serial %x", ser)
+ return
+}
+
+// SerialBytes returns a new serial value, used to store an event record with a
+// conflict-free unique code (it is a monotonic, atomic, ascending counter).
+func (r *T) SerialBytes() (ser []byte, err error) {
+ var serU64 uint64
+ if serU64, err = r.Serial(); chk.E(err) {
+ panic(err)
+ }
+ ser = make([]byte, serial.Len)
+ binary.BigEndian.PutUint64(ser, serU64)
+ return
+}
diff --git a/ratel/nuke.go b/ratel/nuke.go
new file mode 100644
index 0000000..414e4ab
--- /dev/null
+++ b/ratel/nuke.go
@@ -0,0 +1,19 @@
+package ratel
+
+import (
+ "orly.dev/chk"
+ "orly.dev/log"
+ "orly.dev/ratel/prefixes"
+)
+
+func (r *T) Wipe() (err error) {
+ log.W.F("nuking database at %s", r.dataDir)
+ log.I.S(prefixes.AllPrefixes)
+ if err = r.DB.DropPrefix(prefixes.AllPrefixes...); chk.E(err) {
+ return
+ }
+ if err = r.DB.RunValueLogGC(0.8); chk.E(err) {
+ return
+ }
+ return
+}
diff --git a/ratel/prefixes/index_test.go b/ratel/prefixes/index_test.go
new file mode 100644
index 0000000..b937ba0
--- /dev/null
+++ b/ratel/prefixes/index_test.go
@@ -0,0 +1,21 @@
+package prefixes
+
+import (
+ "bytes"
+ "testing"
+
+ "orly.dev/ratel/keys/index"
+)
+
+func TestT(t *testing.T) {
+ v := Version.Key()
+ // v := New(n)
+ // buf := new(bytes.Buffer)
+ // v.Write(buf)
+ buf2 := bytes.NewBuffer(v)
+ v2 := index.New(0)
+ el := v2.Read(buf2).(*index.T)
+ if el.Val[0] != v[0] {
+ t.Fatalf("expected %d got %d", v[0], el.Val)
+ }
+}
diff --git a/ratel/prefixes/prefixes.go b/ratel/prefixes/prefixes.go
new file mode 100644
index 0000000..f421f74
--- /dev/null
+++ b/ratel/prefixes/prefixes.go
@@ -0,0 +1,180 @@
+// Package prefixes provides a list of the index.P types that designate tables
+// in the ratel event store, as well as enabling a simple syntax to assemble and
+// decompose an index key into its keys.Element s.
+package prefixes
+
+import (
+ "orly.dev/ec/schnorr"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/fullid"
+ "orly.dev/ratel/keys/id"
+ "orly.dev/ratel/keys/index"
+ "orly.dev/ratel/keys/kinder"
+ "orly.dev/ratel/keys/pubkey"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/sha256"
+)
+
+const (
+ // Version is the key that stores the version number, the value is a 16-bit
+ // integer (2 bytes)
+ //
+ // [ 255 ][ 2 byte/16 bit version code ]
+ Version index.P = 255
+
+ // Event is the prefix used with a Serial counter value provided by badgerDB to
+ // provide conflict-free 8 byte 64-bit unique keys for event records, which
+ // follows the prefix.
+ //
+ // [ 1 ][ 8 bytes Serial ]
+ Event index.P = iota
+
+ // CreatedAt creates an index key that contains the unix
+ // timestamp of the event record serial.
+ //
+ // [ 2 ][ 8 bytes timestamp.T ][ 8 bytes Serial ]
+ CreatedAt
+
+ // Id contains the first 8 bytes of the Id of the event and the 8
+ // byte Serial of the event record.
+ //
+ // [ 3 ][ 8 bytes eventid.T prefix ][ 8 bytes Serial ]
+ Id
+
+ // Kind contains the kind and datestamp.
+ //
+ // [ 4 ][ 2 bytes kind.T ][ 8 bytes timestamp.T ][ 8 bytes Serial ]
+ Kind
+
+ // Pubkey contains pubkey prefix and timestamp.
+ //
+ // [ 5 ][ 8 bytes pubkey prefix ][ 8 bytes timestamp.T ][ 8 bytes Serial ]
+ Pubkey
+
+ // PubkeyKind contains pubkey prefix, kind and timestamp.
+ //
+ // [ 6 ][ 8 bytes pubkey prefix ][ 2 bytes kind.T ][ 8 bytes timestamp.T ][ 8 bytes Serial ]
+ PubkeyKind
+
+ // Tag is for miscellaneous arbitrary length tags, with timestamp and event
+ // serial after.
+ //
+ // [ 7 ][ tag string 1 <= 100 bytes ][ 8 bytes timestamp.T ][ 8 bytes Serial ]
+ Tag
+
+ // Tag32 contains the 8 byte pubkey prefix, timestamp and serial.
+ //
+ // [ 8 ][ 8 bytes pubkey prefix ][ 8 bytes timestamp.T ][ 8 bytes Serial ]
+ Tag32
+
+ // TagAddr contains the kind, pubkey prefix, value (index 2) of address tag (eg
+ // relay address), followed by timestamp and serial.
+ //
+ // [ 9 ][ 2 byte kind.T][ 8 byte pubkey prefix ][ network address ][ 8 byte timestamp.T ][ 8 byte Serial ]
+ TagAddr
+
+ // Counter is the eventid.T prefix, value stores the average time of access
+ // (average of all access timestamps) and the size of the record.
+ //
+ // [ 10 ][ 8 bytes Serial ] : value: [ 8 bytes timestamp ]
+ Counter
+
+ // Tombstone is an index that contains the left half of an event Id that has
+ // been deleted. The purpose of this event is to stop the event being
+ // republished, as a delete event may not be respected by other relays and
+ // eventually lead to a republication. The timestamp is added at the end to
+ // enable pruning the oldest tombstones.
+ //
+ // [ 11 ][ 16 bytes first/left half of event Id ][ 8 bytes timestamp ]
+ Tombstone
+
+ // PubkeyIndex is the prefix for an index that stores a mapping between pubkeys
+ // and a pubkey serial.
+ //
+ // todo: this is useful feature but rather than for saving space on pubkeys in
+ // events might have a more useful place in some kind of search API. eg just
+ // want pubkey from event id, combined with FullIndex.
+ //
+ // [ 12 ][ 32 bytes pubkey ][ 8 bytes pubkey serial ]
+ PubkeyIndex
+
+ // FullIndex is a secondary table for Ids that is used to fetch the full Id
+ // hash instead of fetching and unmarshalling the event. The Id index will
+ // ultimately be deprecated in favor of this because returning event Ids and
+ // letting the client handle pagination reduces relay complexity.
+ //
+ // In addition, as a mechanism of sorting, the event Id bears also a timestamp
+ // from its created_at field. The serial acts as a "first seen" ordering, then
+ // you also have the (claimed) chronological ordering.
+ //
+ // [ 13 ][ 8 bytes Serial ][ 32 bytes eventid.T ][ 32 bytes pubkey ][ 8 bytes timestamp.T ]
+ FullIndex
+
+ // Configuration is a free-form minified JSON object that contains a collection of
+ // configuration items.
+ //
+ // [ 14 ]
+ Configuration
+)
+
+// FilterPrefixes is a slice of the prefixes used by filter index to enable a loop
+// for pulling events matching a serial
+var FilterPrefixes = [][]byte{
+ {CreatedAt.B()},
+ {Id.B()},
+ {Kind.B()},
+ {Pubkey.B()},
+ {PubkeyKind.B()},
+ {Tag.B()},
+ {Tag32.B()},
+ {TagAddr.B()},
+ {FullIndex.B()},
+}
+
+// AllPrefixes is used to do a full database nuke.
+var AllPrefixes = [][]byte{
+ {Event.B()},
+ {CreatedAt.B()},
+ {Id.B()},
+ {Kind.B()},
+ {Pubkey.B()},
+ {PubkeyKind.B()},
+ {Tag.B()},
+ {Tag32.B()},
+ {TagAddr.B()},
+ {Counter.B()},
+ {PubkeyIndex.B()},
+ {FullIndex.B()},
+}
+
+// KeySizes are the byte size of keys of each type of key prefix. int(P) or call the P.I() method
+// corresponds to the index 1:1. For future index additions be sure to add the
+// relevant KeySizes sum as it describes the data for a programmer.
+var KeySizes = []int{
+ // Event
+ 1 + serial.Len,
+ // CreatedAt
+ 1 + createdat.Len + serial.Len,
+ // Id
+ 1 + id.Len + serial.Len,
+ // Kind
+ 1 + kinder.Len + createdat.Len + serial.Len,
+ // Pubkey
+ 1 + pubkey.Len + createdat.Len + serial.Len,
+ // PubkeyKind
+ 1 + pubkey.Len + kinder.Len + createdat.Len + serial.Len,
+ // Tag (worst case scenario)
+ 1 + 100 + createdat.Len + serial.Len,
+ // Tag32
+ 1 + pubkey.Len + createdat.Len + serial.Len,
+ // TagAddr
+ 1 + kinder.Len + pubkey.Len + 100 + createdat.Len + serial.Len,
+ // Counter
+ 1 + serial.Len,
+ // Tombstone
+ 1 + sha256.Size/2 + serial.Len,
+ // PubkeyIndex
+ 1 + schnorr.PubKeyBytesLen + serial.Len,
+ // FullIndex
+ 1 + fullid.Len + createdat.Len + serial.Len,
+}
diff --git a/ratel/preparequeries.go b/ratel/preparequeries.go
new file mode 100644
index 0000000..e9057c6
--- /dev/null
+++ b/ratel/preparequeries.go
@@ -0,0 +1,202 @@
+package ratel
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+ "orly.dev/log"
+
+ "orly.dev/event"
+ "orly.dev/eventid"
+ "orly.dev/filter"
+ "orly.dev/ratel/keys/id"
+ "orly.dev/ratel/keys/kinder"
+ "orly.dev/ratel/keys/pubkey"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/timestamp"
+)
+
+type Results struct {
+ Ev *event.E
+ TS *timestamp.T
+ Ser *serial.T
+}
+
+type query struct {
+ index int
+ queryFilter *filter.F
+ searchPrefix []byte
+ start []byte
+ skipTS bool
+}
+
+// PrepareQueries analyses a filter and generates a set of query specs that produce
+// key prefixes to search for in the badger key indexes.
+func PrepareQueries(f *filter.F) (
+ qs []query,
+ ext *filter.F,
+ since uint64,
+ err error,
+) {
+ if f == nil {
+ err = errorf.E("filter cannot be nil")
+ return
+ }
+ switch {
+ // first if there is Ids, just search for them, this overrides all other filters
+ case f.Ids.Len() > 0:
+ qs = make([]query, f.Ids.Len())
+ for i, idB := range f.Ids.ToSliceOfBytes() {
+ ih := id.New(eventid.NewWith(idB))
+ if ih == nil {
+ log.E.F("failed to decode event Id: %s", idB)
+ // just ignore it, clients will be clients
+ continue
+ }
+ prf := prefixes.Id.Key(ih)
+ // log.F.ToSliceOfBytes("id prefix to search on %0x from key %0x", prf, ih.Val)
+ qs[i] = query{
+ index: i,
+ queryFilter: f,
+ searchPrefix: prf,
+ skipTS: true,
+ }
+ }
+ // log.F.S("ids", qs)
+ // second we make a set of queries based on author pubkeys, optionally with kinds
+ case f.Authors.Len() > 0:
+ // if there are no kinds, we just make the queries based on the author pub keys
+ if f.Kinds.Len() == 0 {
+ qs = make([]query, f.Authors.Len())
+ for i, pubkeyHex := range f.Authors.ToSliceOfBytes() {
+ var pk *pubkey.T
+ if pk, err = pubkey.New(pubkeyHex); chk.E(err) {
+ // bogus filter, continue anyway
+ continue
+ }
+ sp := prefixes.Pubkey.Key(pk)
+ // log.I.ToSliceOfBytes("search only for authors %0x from pub key %0x", sp, pk.Val)
+ qs[i] = query{
+ index: i,
+ queryFilter: f,
+ searchPrefix: sp,
+ }
+ }
+ // log.I.S("authors", qs)
+ } else {
+ // if there is kinds as well, we are searching via the kind/pubkey prefixes
+ qs = make([]query, f.Authors.Len()*f.Kinds.Len())
+ i := 0
+ authors:
+ for _, pubkeyHex := range f.Authors.ToSliceOfBytes() {
+ for _, kind := range f.Kinds.K {
+ var pk *pubkey.T
+ if pk, err = pubkey.New(pubkeyHex); chk.E(err) {
+ // skip this dodgy thing
+ continue authors
+ }
+ ki := kinder.New(kind.K)
+ sp := prefixes.PubkeyKind.Key(pk, ki)
+ // log.F.ToSliceOfBytes("search for authors from pub key %0x and kind %0x", pk.Val, ki.Val)
+ qs[i] = query{index: i, queryFilter: f, searchPrefix: sp}
+ i++
+ }
+ }
+ // log.F.S("authors/kinds", qs)
+ }
+ if f.Tags.Len() > 0 {
+ ext = &filter.F{Tags: f.Tags}
+ // log.F.S("extra filter", ext)
+ }
+ case f.Tags.Len() > 0:
+ // determine the size of the queries array by inspecting all tags sizes
+ size := 0
+ for _, values := range f.Tags.ToSliceOfTags() {
+ size += values.Len() - 1
+ }
+ if size == 0 {
+ return nil, nil, 0, fmt.Errorf("empty tag filters")
+ }
+ // we need a query for each tag search
+ qs = make([]query, size)
+ // and any kinds mentioned as well in extra filter
+ ext = &filter.F{Kinds: f.Kinds}
+ i := 0
+ for _, values := range f.Tags.ToSliceOfTags() {
+ for _, value := range values.ToSliceOfBytes()[1:] {
+ // get key prefix (with full length) and offset where to write the last parts
+ var prf []byte
+ if prf, err = GetTagKeyPrefix(string(value)); chk.E(err) {
+ continue
+ }
+ // remove the last part to get just the prefix we want here
+ qs[i] = query{index: i, queryFilter: f, searchPrefix: prf}
+ i++
+ }
+ }
+ // log.F.S("tags", qs)
+ case f.Kinds.Len() > 0:
+ // if there is no ids, pubs or tags, we are just searching for kinds
+ qs = make([]query, f.Kinds.Len())
+ for i, kind := range f.Kinds.K {
+ kk := kinder.New(kind.K)
+ ki := prefixes.Kind.Key(kk)
+ qs[i] = query{
+ index: i,
+ queryFilter: f,
+ searchPrefix: ki,
+ }
+ }
+ // log.F.S("kinds", qs)
+ default:
+ log.I.F("nothing in filter, returning latest events")
+ // if len(qs) > 0 {
+ qs = append(
+ qs, query{
+ index: 0, queryFilter: f, searchPrefix: []byte{1},
+ start: []byte{1, 255, 255, 255, 255, 255, 255, 255, 255},
+ // })
+ // qs = append(qs, query{index: 0, queryFilter: f,
+ // searchPrefix: prefixes.CreatedAt.Key(),
+ skipTS: true,
+ },
+ )
+ ext = nil
+ // }
+ // // log.F.S("other", qs)
+ }
+
+ // this is where we'll end the iteration
+ if f.Since != nil {
+ if fs := f.Since.U64(); fs > since {
+ since = fs
+ }
+ }
+ // log.I.ToSliceOfBytes("since %d", since)
+
+ var until uint64 = math.MaxInt64
+ if f.Until != nil {
+ if fu := f.Until.U64(); fu < until {
+ until = fu + 1
+ }
+ }
+ // log.I.ToSliceOfBytes("until %d", until)
+ for i, q := range qs {
+ qs[i].start = binary.BigEndian.AppendUint64(
+ q.searchPrefix, uint64(until),
+ )
+ }
+ // if we got an empty filter, we still need a query for scraping the newest
+ if len(qs) == 0 {
+ qs = append(
+ qs, query{
+ index: 0, queryFilter: f, searchPrefix: []byte{1},
+ start: []byte{1, 255, 255, 255, 255, 255, 255, 255, 255},
+ },
+ )
+ }
+ return
+}
diff --git a/ratel/queryevents.go b/ratel/queryevents.go
new file mode 100644
index 0000000..117e7f2
--- /dev/null
+++ b/ratel/queryevents.go
@@ -0,0 +1,293 @@
+package ratel
+
+import (
+ "errors"
+ "github.com/dgraph-io/badger/v4"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "sort"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/eventid"
+ "orly.dev/filter"
+ "orly.dev/hex"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+)
+
+func (r *T) QueryEvents(c context.T, f *filter.F) (evs event.S, err error) {
+ log.T.F("QueryEvents %s\n", f.Serialize())
+ evMap := make(map[string]*event.E)
+ var queries []query
+ var ext *filter.F
+ var since uint64
+ if queries, ext, since, err = PrepareQueries(f); chk.E(err) {
+ return
+ }
+ // log.I.S(f, queries)
+ limit := r.MaxLimit
+ if f.Limit != nil {
+ limit = int(*f.Limit)
+ }
+ // search for the keys generated from the filter
+ var total int
+ eventKeys := make(map[string]struct{})
+ for _, q := range queries {
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ default:
+ }
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ // iterate only through keys and in reverse order
+ opts := badger.IteratorOptions{
+ Reverse: true,
+ }
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() {
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ default:
+ }
+ item := it.Item()
+ k := item.KeyCopy(nil)
+ if !q.skipTS {
+ if len(k) < createdat.Len+serial.Len {
+ continue
+ }
+ createdAt := createdat.FromKey(k)
+ if createdAt.Val.U64() < since {
+ break
+ }
+ }
+ ser := serial.FromKey(k)
+ idx := prefixes.Event.Key(ser)
+ eventKeys[string(idx)] = struct{}{}
+ total++
+ // some queries just produce stupid amounts of matches, they are a resource
+ // exhaustion attack vector and only spiders make them
+ if total >= r.MaxLimit {
+ return
+ }
+ }
+ return
+ },
+ )
+ if chk.E(err) {
+ // this means shutdown, probably
+ if errors.Is(err, badger.ErrDBClosed) {
+ return
+ }
+ }
+ }
+ log.T.F(
+ "found %d event indexes from %d queries", len(eventKeys), len(queries),
+ )
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ default:
+ }
+ var delEvs [][]byte
+ defer func() {
+ for _, d := range delEvs {
+ // if events were found that should be deleted, delete them
+ chk.E(r.DeleteEvent(r.Ctx, eventid.NewWith(d)))
+ }
+ }()
+ // accessed := make(map[string]struct{})
+ for ek := range eventKeys {
+ eventKey := []byte(ek)
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ default:
+ }
+ opts := badger.IteratorOptions{Reverse: true}
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() {
+ item := it.Item()
+ // if r.HasL2 && item.ValueSize() == sha256.Size {
+ // // todo: this isn't actually calling anything right now, it should be
+ // // accumulating to propagate the query (this means response lag also)
+ // //
+ // // this is a stub entry that indicates an L2 needs to be accessed for it, so we
+ // // populate only the event.F.Id and return the result, the caller will expect
+ // // this as a signal to query the L2 event store.
+ // var eventValue []byte
+ // ev := &event.F{}
+ // if eventValue, err = item.ValueCopy(nil); chk.E(err) {
+ // continue
+ // }
+ // log.F.F("found event stub %0x must seek in L2", eventValue)
+ // ev.Id = eventValue
+ // select {
+ // case <-c.Done():
+ // return
+ // case <-r.Ctx.Done():
+ // log.F.Ln("backend context canceled")
+ // return
+ // default:
+ // }
+ // evMap[hex.Enc(ev.Id)] = ev
+ // return
+ // }
+ ev := &event.E{}
+ if err = item.Value(
+ func(eventValue []byte) (err error) {
+ log.I.F("%s", eventValue)
+ var rem []byte
+ if rem, err = r.Unmarshal(
+ ev, eventValue,
+ ); chk.E(err) {
+ return
+ }
+ if len(rem) > 0 {
+ log.T.S(rem)
+ }
+ // if et := ev.Tags.GetFirst(tag.New("expiration")); et != nil {
+ // var exp uint64
+ // if exp, err = strconv.ParseUint(string(et.Value()), 10,
+ // 64); chk.E(err) {
+ // return
+ // }
+ // if int64(exp) > time.Now().Unix() {
+ // // this needs to be deleted
+ // delEvs = append(delEvs, ev.Id)
+ // ev = nil
+ // return
+ // }
+ // }
+ return
+ },
+ ); chk.E(err) {
+ continue
+ }
+ if ev == nil {
+ continue
+ }
+ // if ext != nil {
+ // log.I.S(ext)
+ // log.I.S(ev)
+ // log.I.S(ext.Matches(ev))
+ // }
+ if ext == nil || ext.Matches(ev) {
+ evMap[hex.Enc(ev.Id)] = ev
+ // add event counter key to accessed
+ // ser := serial.FromKey(eventKey)
+ // accessed[string(ser.Val)] = struct{}{}
+ // if pointers.Present(f.Limit) {
+ // *f.Limit--
+ // if *f.Limit <= 0 {
+ // log.I.F("found events: %d", len(evMap))
+ // return
+ // }
+ // }
+ // if there is no limit, cap it at the MaxLimit, assume this was the
+ // intent or the client is erroneous, if any limit greater is
+ // requested this will be used instead as the previous clause.
+ if len(evMap) >= r.MaxLimit {
+ // log.F.ToSliceOfBytes("found MaxLimit events: %d", len(evMap))
+ return
+ }
+ }
+ }
+ return
+ },
+ )
+ if err != nil {
+ // this means shutdown, probably
+ if errors.Is(err, badger.ErrDBClosed) {
+ return
+ }
+ }
+ select {
+ case <-r.Ctx.Done():
+ return
+ case <-c.Done():
+ return
+ default:
+ }
+ }
+ // log.I.S(evMap)
+ if len(evMap) > 0 {
+ for i := range evMap {
+ if len(evMap[i].Pubkey) == 0 {
+ log.I.S(evMap[i])
+ continue
+ }
+ evs = append(evs, evMap[i])
+ }
+ log.I.S(len(evs))
+ sort.Sort(event.Descending(evs))
+ if len(evs) > limit {
+ evs = evs[:limit]
+ }
+ seen := make(map[uint16]struct{})
+ var tmp event.S
+ for _, ev := range evs {
+ log.I.F("%d", ev.CreatedAt.V)
+ if ev.Kind.IsReplaceable() {
+ // remove all but newest versions of replaceable
+ if _, ok := seen[ev.Kind.K]; ok {
+ // already seen this replaceable avent, skip
+ continue
+ }
+ seen[ev.Kind.K] = struct{}{}
+ }
+ tmp = append(tmp, ev)
+ }
+ evs = tmp
+ // log.I.S(evs)
+ // log.F.C(func() string {
+ // evIds := make([]string, len(evs))
+ // for i, ev := range evs {
+ // evIds[i] = hex.Enc(ev.Id)
+ // }
+ // heading := fmt.Sprintf("query complete,%d events found,%s", len(evs),
+ // f.Serialize())
+ // return fmt.Sprintf("%s\nevents,%v", heading, evIds)
+ // })
+ // bump the access times on all retrieved events. do this in a goroutine so the
+ // user's events are delivered immediately
+ // go func() {
+ // for ser := range accessed {
+ // seri := serial.New([]byte(ser))
+ // now := timestamp.Now()
+ // err = r.Update(func(txn *badger.Txn) (err error) {
+ // key := GetCounterKey(seri)
+ // it := txn.NewIterator(badger.IteratorOptions{})
+ // defer it.Close()
+ // if it.Seek(key); it.ValidForPrefix(key) {
+ // // update access record
+ // if err = txn.Set(key, now.Bytes()); chk.E(err) {
+ // return
+ // }
+ // }
+ // // log.F.Ln("last access for", seri.Uint64(), now.U64())
+ // return nil
+ // })
+ // }
+ // }()
+ } else {
+ log.T.F("no events found,%s", f.Serialize())
+ }
+ // }
+ return
+}
diff --git a/ratel/queryforids.go b/ratel/queryforids.go
new file mode 100644
index 0000000..c184dd2
--- /dev/null
+++ b/ratel/queryforids.go
@@ -0,0 +1,209 @@
+package ratel
+
+import (
+ "errors"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "strconv"
+ "time"
+
+ "github.com/dgraph-io/badger/v4"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/eventid"
+ "orly.dev/filter"
+ "orly.dev/interfaces/store"
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/fullid"
+ "orly.dev/ratel/keys/fullpubkey"
+ "orly.dev/ratel/keys/index"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/realy/pointers"
+ "orly.dev/tag"
+ "orly.dev/timestamp"
+)
+
+func (r *T) QueryForIds(c context.T, f *filter.F) (
+ founds []store.IdPkTs, err error,
+) {
+ log.T.F("QueryForIds %s\n", f.Serialize())
+ var queries []query
+ var ext *filter.F
+ var since uint64
+ if queries, ext, since, err = PrepareQueries(f); chk.E(err) {
+ return
+ }
+ // search for the keys generated from the filter
+ var total int
+ eventKeys := make(map[string]struct{})
+ var serials []*serial.T
+ for _, q := range queries {
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ // iterate only through keys and in reverse order
+ opts := badger.IteratorOptions{
+ Reverse: true,
+ }
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ for it.Seek(q.start); it.ValidForPrefix(q.searchPrefix); it.Next() {
+ item := it.Item()
+ k := item.KeyCopy(nil)
+ if !q.skipTS {
+ if len(k) < createdat.Len+serial.Len {
+ continue
+ }
+ createdAt := createdat.FromKey(k)
+ if createdAt.Val.U64() < since {
+ break
+ }
+ }
+ ser := serial.FromKey(k)
+ serials = append(serials, ser)
+ idx := prefixes.Event.Key(ser)
+ eventKeys[string(idx)] = struct{}{}
+ total++
+ // some queries just produce stupid amounts of matches, they are a resource
+ // exhaustion attack vector and only spiders make them
+ if total > 5000 {
+ return
+ }
+ }
+ return
+ },
+ )
+ if chk.E(err) {
+ // this means shutdown, probably
+ if errors.Is(err, badger.ErrDBClosed) {
+ return
+ }
+ }
+ }
+ log.T.F(
+ "found %d event indexes from %d queries", len(eventKeys), len(queries),
+ )
+ // l2Map := make(map[string]*event.F) // todo: this is not being used, it should be
+ var delEvs [][]byte
+ defer func() {
+ for _, d := range delEvs {
+ // if events were found that should be deleted, delete them
+ chk.E(r.DeleteEvent(r.Ctx, eventid.NewWith(d)))
+ }
+ }()
+ accessed := make(map[string]struct{})
+ if ext != nil {
+ // we have to fetch the event
+ for ek := range eventKeys {
+ eventKey := []byte(ek)
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ opts := badger.IteratorOptions{Reverse: true}
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ done:
+ for it.Seek(eventKey); it.ValidForPrefix(eventKey); it.Next() {
+ item := it.Item()
+ // if r.HasL2 && item.ValueSize() == sha256.Size {
+ // // this is a stub entry that indicates an L2 needs to be accessed for
+ // // it, so we populate only the event.F.Id and return the result, the
+ // // caller will expect this as a signal to query the L2 event store.
+ // var eventValue []byte
+ // ev := &event.F{}
+ // if eventValue, err = item.ValueCopy(nil); chk.E(err) {
+ // continue
+ // }
+ // log.F.F("found event stub %0x must seek in L2", eventValue)
+ // ev.Id = eventValue
+ // l2Map[hex.Enc(ev.Id)] = ev
+ // return
+ // }
+ ev := &event.E{}
+ if err = item.Value(
+ func(eventValue []byte) (err error) {
+ var rem []byte
+ if rem, err = r.Unmarshal(
+ ev, eventValue,
+ ); chk.E(err) {
+ return
+ }
+ if len(rem) > 0 {
+ log.T.S(rem)
+ }
+ if et := ev.Tags.GetFirst(tag.New("expiration")); et != nil {
+ var exp uint64
+ if exp, err = strconv.ParseUint(
+ string(et.Value()), 10,
+ 64,
+ ); chk.E(err) {
+ return
+ }
+ if int64(exp) > time.Now().Unix() {
+ // this needs to be deleted
+ delEvs = append(delEvs, ev.Id)
+ return
+ }
+ }
+ return
+ },
+ ); chk.E(err) {
+ continue
+ }
+ if ev == nil {
+ continue
+ }
+ if ext.Matches(ev) {
+ // add event counter key to accessed
+ ser := serial.FromKey(eventKey)
+ serials = append(serials, ser)
+ accessed[string(ser.Val)] = struct{}{}
+ if pointers.Present(f.Limit) {
+ if *f.Limit < uint(len(serials)) {
+ // done
+ break done
+ }
+ }
+ }
+ }
+ return
+ },
+ )
+ if err != nil {
+ // this means shutdown, probably
+ if errors.Is(err, badger.ErrDBClosed) {
+ return
+ }
+ }
+ }
+ }
+ for _, ser := range serials {
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ prf := prefixes.FullIndex.Key(ser)
+ opts := badger.IteratorOptions{Prefix: prf}
+ it := txn.NewIterator(opts)
+ defer it.Close()
+ it.Seek(prf)
+ if it.ValidForPrefix(prf) {
+ k := it.Item().KeyCopy(nil)
+ id := fullid.New()
+ ts := createdat.New(timestamp.New())
+ pk := fullpubkey.New()
+ keys.Read(k, index.New(0), serial.New(nil), id, pk, ts)
+ ff := store.IdPkTs{
+ Ts: ts.Val.I64(),
+ Id: id.Val,
+ Pub: pk.Val,
+ Ser: ser.Uint64(),
+ }
+ founds = append(founds, ff)
+ }
+ return
+ },
+ )
+ }
+ // log.I.S(founds)
+ return
+}
diff --git a/ratel/rescan.go b/ratel/rescan.go
new file mode 100644
index 0000000..3f6e662
--- /dev/null
+++ b/ratel/rescan.go
@@ -0,0 +1,82 @@
+package ratel
+
+import (
+ "github.com/dgraph-io/badger/v4"
+ "orly.dev/chk"
+ "orly.dev/log"
+
+ "orly.dev/event"
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/sha256"
+ "orly.dev/timestamp"
+)
+
+// Rescan regenerates all indexes of events to add new indexes in a new version.
+func (r *T) Rescan() (err error) {
+ var evKeys [][]byte
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ prf := []byte{prefixes.Event.B()}
+ it := txn.NewIterator(badger.IteratorOptions{})
+ defer it.Close()
+ for it.Rewind(); it.ValidForPrefix(prf); it.Next() {
+ item := it.Item()
+ if it.Item().ValueSize() == sha256.Size {
+ continue
+ }
+ evKeys = append(evKeys, item.KeyCopy(nil))
+ }
+ return
+ },
+ )
+ var i int
+ var key []byte
+ for i, key = range evKeys {
+ err = r.Update(
+ func(txn *badger.Txn) (err error) {
+ it := txn.NewIterator(badger.IteratorOptions{})
+ defer it.Close()
+ it.Seek(key)
+ if it.Valid() {
+ item := it.Item()
+ var evB []byte
+ if evB, err = item.ValueCopy(nil); chk.E(err) {
+ return
+ }
+ ser := serial.FromKey(key)
+ var rem []byte
+ ev := &event.E{}
+ if rem, err = r.Unmarshal(ev, evB); chk.E(err) {
+ return
+ }
+ if len(rem) > 0 {
+ log.T.S(rem)
+ }
+ // add the indexes
+ var indexKeys [][]byte
+ indexKeys = GetIndexKeysForEvent(ev, ser)
+ // log.I.S(indexKeys)
+ for _, k := range indexKeys {
+ var val []byte
+ if k[0] == prefixes.Counter.B() {
+ val = keys.Write(createdat.New(timestamp.Now()))
+ }
+ if err = txn.Set(k, val); chk.E(err) {
+ return
+ }
+ }
+ if i%1000 == 0 {
+ log.I.F("rescanned %d events", i)
+ }
+ }
+ return
+ },
+ )
+ }
+ chk.E(err)
+ log.I.F("completed rescanning %d events", i)
+ return err
+}
diff --git a/ratel/saveevent.go b/ratel/saveevent.go
new file mode 100644
index 0000000..5e3b48b
--- /dev/null
+++ b/ratel/saveevent.go
@@ -0,0 +1,155 @@
+package ratel
+
+import (
+ "github.com/dgraph-io/badger/v4"
+ "orly.dev/chk"
+ "orly.dev/errorf"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/eventid"
+ eventstore "orly.dev/interfaces/store"
+ "orly.dev/ratel/keys"
+ "orly.dev/ratel/keys/createdat"
+ "orly.dev/ratel/keys/id"
+ "orly.dev/ratel/keys/index"
+ "orly.dev/ratel/keys/serial"
+ "orly.dev/ratel/keys/tombstone"
+ "orly.dev/ratel/prefixes"
+ "orly.dev/sha256"
+ "orly.dev/timestamp"
+)
+
+func (r *T) SaveEvent(c context.T, ev *event.E) (
+ keySize, ValueSize int, err error,
+) {
+ if ev.Kind.IsEphemeral() {
+ // log.T.ToSliceOfBytes("not saving ephemeral event\n%s", ev.Serialize())
+ return
+ }
+ // make sure Close waits for this to complete
+ r.WG.Add(1)
+ defer r.WG.Done()
+ // first, search to see if the event Id already exists.
+ var foundSerial []byte
+ var deleted bool
+ seri := serial.New(nil)
+ var tsPrefixBytes []byte
+ err = r.View(
+ func(txn *badger.Txn) (err error) {
+ // query event by id to ensure we don't try to save duplicates
+ prf := prefixes.Id.Key(id.New(eventid.NewWith(ev.Id)))
+ it := txn.NewIterator(badger.IteratorOptions{})
+ defer it.Close()
+ it.Seek(prf)
+ if it.ValidForPrefix(prf) {
+ var k []byte
+ // get the serial
+ k = it.Item().Key()
+ // copy serial out
+ keys.Read(k, index.Empty(), id.New(&eventid.T{}), seri)
+ // save into foundSerial
+ foundSerial = seri.Val
+ }
+ // if the event was deleted we don't want to save it again
+ // In deleteevent.go, the tombstone key is created with:
+ // tombstoneKey = prefixes.Tombstone.Key(ts, createdat.New(timestamp.Now()))
+ // where ts is created with tombstone.NewWith(ev.EventId())
+ // We need to use just the prefix part (without the timestamp) to find any tombstone for this event
+ tsPrefixBytes = []byte{prefixes.Tombstone.B()}
+ tsBytes := tombstone.Make(eventid.NewWith(ev.Id))
+ tsPrefixBytes = append(tsPrefixBytes, tsBytes...)
+ it2 := txn.NewIterator(badger.IteratorOptions{})
+ defer it2.Close()
+ it2.Rewind()
+ it2.Seek(tsPrefixBytes)
+ if it2.ValidForPrefix(tsPrefixBytes) {
+ deleted = true
+ }
+ return
+ },
+ )
+ if chk.E(err) {
+ return
+ }
+ if deleted {
+ err = errorf.W(
+ "tombstone found %0x, event will not be saved", tsPrefixBytes,
+ )
+ return
+ }
+ if foundSerial != nil {
+ // log.D.ToSliceOfBytes("found possible duplicate or stub for %s", ev.Serialize())
+ err = r.Update(
+ func(txn *badger.Txn) (err error) {
+ // retrieve the event record
+ evKey := keys.Write(index.New(prefixes.Event), seri)
+ it := txn.NewIterator(badger.IteratorOptions{})
+ defer it.Close()
+ it.Seek(evKey)
+ if it.ValidForPrefix(evKey) {
+ if it.Item().ValueSize() != sha256.Size {
+ // not a stub, we already have it
+ // log.D.ToSliceOfBytes("duplicate event %0x", ev.Id)
+ return eventstore.ErrDupEvent
+ }
+ // we only need to restore the event binary and write the access counter key
+ // encode to binary
+ var bin []byte
+ bin = r.Marshal(ev, bin)
+ if err = txn.Set(it.Item().Key(), bin); chk.E(err) {
+ return
+ }
+ // // bump counter key
+ // counterKey := GetCounterKey(seri)
+ // val := keys.Write(createdat.New(timestamp.Now()))
+ // if err = txn.Set(counterKey, val); chk.E(err) {
+ // return
+ // }
+ return
+ }
+ return
+ },
+ )
+ // if it was a dupe, we are done.
+ if err != nil {
+ return
+ }
+ return
+ }
+ var bin []byte
+ bin = r.Marshal(ev, bin)
+ // otherwise, save new event record.
+ if err = r.Update(
+ func(txn *badger.Txn) (err error) {
+ var idx []byte
+ var ser *serial.T
+ idx, ser = r.SerialKey()
+ // encode to binary
+ // raw event store
+ if err = txn.Set(idx, bin); chk.E(err) {
+ return
+ }
+ // add the indexes
+ var indexKeys [][]byte
+ indexKeys = GetIndexKeysForEvent(ev, ser)
+ // log.I.S(indexKeys)
+ for _, k := range indexKeys {
+ var val []byte
+ if k[0] == prefixes.Counter.B() {
+ val = keys.Write(createdat.New(timestamp.Now()))
+ }
+ if err = txn.Set(k, val); chk.E(err) {
+ return
+ }
+ }
+ // log.D.ToSliceOfBytes("saved event to ratel %s:\n%s", r.dataDir, ev.Serialize())
+ return
+ },
+ ); chk.E(err) {
+ return
+ }
+ return
+}
+
+func (r *T) Sync() (err error) { return r.DB.Sync() }
diff --git a/readme.adoc b/readme.adoc
new file mode 100644
index 0000000..ffc2e53
--- /dev/null
+++ b/readme.adoc
@@ -0,0 +1,125 @@
+= realy.lol
+:toc:
+:note-caption: note 👉
+
+image:https://img.shields.io/badge/godoc-documentation-blue.svg[Documentation,link=https://pkg.go.dev/realy.lol]
+image:https://img.shields.io/badge/donate-geyser_crowdfunding_project_page-orange.svg[Support this project,link=https://geyser.fund/project/realy]
+zap me: ⚡️mleku@getalby.com
+
+image:./realy.png[realy.png]
+
+nostr relay built from a heavily modified fork of https://github.com/nbd-wtf/go-nostr[nbd-wtf/go-nostr]
+and https://github.com/fiatjaf/relayer[fiatjaf/relayer] aimed at maximum performance, simplicity and memory efficiency.
+
+== Features
+
+* new HTTP REST API available in addition to standard websocket access, simplifying writing applications and tools, and building a standard API method set for future extensions for more flexible features
+* a lot of other bits and pieces accumulated from nearly 8 years of working with Go, logging and run control, XDG user data directories (windows, mac, linux, android)
+* a cleaned up and unified fork of the btcd/dcred BIP-340 signatures, including the use of bitcoin core's BIP-340 implementation (more than 4x faster than btcd) (todo: ECDH from the C library tbd)
+* AVX/AVX2 optimized SHA256 and SIMD hex encoder
+* https://github.com/bitcoin/secp256k1[libsecp256k1]-enabled signature and signature verification (see link:p256k/README.md[here])
+* efficient, mutable byte slice based hash/pubkey/signature encoding in memory (zero allocation decode from wire, can tolerate whitespace, at a speed penalty)
+* custom badger based event store with an optional garbage collector that deletes least recent once the store exceeds a specified size access, and data encoded using a more space efficient format based on the nostr canonical json array event form
+* link:cmd/vainstr[vainstr] vanity npub generator that can mine a 5 letter suffix in around 15 minutes on a 6 core Ryzen 5 processor using the CGO bitcoin core signature library
+* reverse proxy tool link:cmd/lerproxy[lerproxy] with support for Go vanity imports and https://github.com/nostr-protocol/nips/blob/master/05.md[nip-05] npub DNS verification and own TLS certificates
+* link:https://github.com/nostr-protocol/nips/blob/master/98.md[nip-98] implementation with new expiring variant for vanilla HTTP tools and browsers.
+
+== Building
+
+If you just want to make it run from source, you should check out a tagged version.
+
+The commits on these tags will explain what state the commit is at.
+
+In general, the most stable versions are new minor tags, eg v1.2.0 or v1.23.0, and minor patch versions may not be stable and occasionally may not compile (not very often).
+
+Go 1.24 or better is recommended.
+Go 1.23.1 is minimum required.
+
+== Repository Policy
+
+In general, the main `dev` branch will build, but occasionally may not.
+It is where new commits are added once they are working, mostly, and allows people to easily see ongoing activity.
+
+WARNING: IT IS NOT GUARANTEED TO BE STABLE... but it is getting there.
+
+Use tags to pin to a specific version.
+Tags are in standard Go semver pattern `vX.X.X`
+
+== CGO and secp256k1 signatures library
+
+By default, Go will usually be configured with `CGO_ENABLED=1`.
+This selects the use of the C library from bitcoin core, which does signatures and verifications much faster (4x and better) but complicates the build process as you have to install the library beforehand.
+There is instructions in link:p256k/README.md[p256k/README.md] for doing this.
+
+=== Disabling CGO
+
+In order to disable the use of this, you must set the environment variable `CGO_ENABLED=0` and it the Go compiler will automatically revert to using the btcec based secp256k1 signatures library.
+
+----
+export CGO_ENABLED=0
+cd cmd/realy
+go build .
+----
+
+This will build the binary and place it in cmd/realy and then you can move it where you like.
+
+=== Static build
+
+To produce a static binary, whether you use the CGO secp256k1 or disable CGO as above:
+
+----
+go build --ldflags '-extldflags "-static"' -o ~/bin/realy ./cmd/realy/.
+----
+
+will place it into your `~/bin/` directory, and it will work on any system of the same architecture with the same glibc major version (has been 2 for a long time).
+
+== Configuration
+
+The default will run the relay with default settings, which will not be what you want.
+
+=== Show Current Configuration
+
+To see the current active configuration:
+
+----
+realy env
+----
+
+=== Create Persistent Configuration
+
+This output can be directed to the profile location to make the settings editable without manually setting them on the commandline:
+
+----
+realy env > $HOME/.config/realy/.env
+----
+
+You can now edit this file to alter the configuration.
+
+Regarding the configuration system, this is an element of many servers that is absurdly complex, and for which reason Realy does not use a complicated scheme, a simple library that allows automatic configuration of a series of options, added a simple info print:
+
+----
+realy help
+----
+
+will show you the instructions, and the one simple extension of being able to use a standard formated .env file to configure all the options for an instance.
+
+=== Database Storage Location
+
+The database is stored in `$HOME/.local/share/realy` and if need be you can stop `realy` delete everything in this directory and restart to "nuke" the database. Note that this is now available through the link:#_simplified_nostr[Simplified Nostr] HTTP OpenAPI endpoint on `/nuke`
+
+== API support
+
+=== Standard Nostr NIPs
+
+`realy` already accepts all the standard NIPs mainly nip-01 and many other types are recognised such an NIP-42 auth messages and it uses and parses relay lists, and all that other stuff.
+It has maybe the most faithful implementation of NIP-42 but most clients don't correctly implement it, or at all.
+Which is sad, but what can you do with stupid people?
+
+[#_simplified_nostr]
+=== Simplified Nostr
+
+Rather than write a text that will likely fall out of date very quickly, simply run `realy` and visit its listener address (eg link:http://localhost:3334/api[http://localhost:3334/api]) to see the full documentation.
+
+By default this presents you with a Scalar Docs page that lets you browse the available API methods and shows examples in many forms including cURL and most languages how to call and what data needs to go in headers, body, and parameters and what results will come back.
+
+There is even a subscription endpoint, also, which uses SSE format and does not require a websocket upgrade to work with.
\ No newline at end of file
diff --git a/readme.md b/readme.md
deleted file mode 100644
index 3efd0a7..0000000
--- a/readme.md
+++ /dev/null
@@ -1,5 +0,0 @@
-# orly
-
-
-
-a super simple, fast nostr relay
\ No newline at end of file
diff --git a/orly.png b/realy.png
similarity index 100%
rename from orly.png
rename to realy.png
diff --git a/realy.service b/realy.service
new file mode 100644
index 0000000..c445493
--- /dev/null
+++ b/realy.service
@@ -0,0 +1,16 @@
+# systemd unit to run realy as a service
+[Unit]
+Description=realy
+
+[Service]
+Type=simple
+User=mleku
+ExecStart=/home/mleku/.local/bin/realy
+Restart=always
+Wants=network-online.target
+# waits for wireguard service to come up before starting, remove if running it directly on an
+# internet routeable connection
+After=network.target network-online.target wg-quick@wg0.service
+
+[Install]
+WantedBy=multi-user.target
diff --git a/realy/addEvent.go b/realy/addEvent.go
new file mode 100644
index 0000000..e32e397
--- /dev/null
+++ b/realy/addEvent.go
@@ -0,0 +1,75 @@
+package realy
+
+import (
+ "errors"
+ "net/http"
+ "orly.dev/log"
+ "strings"
+
+ "orly.dev/context"
+ "orly.dev/event"
+ "orly.dev/interfaces/store"
+ "orly.dev/normalize"
+ "orly.dev/relay"
+ "orly.dev/socketapi"
+)
+
+func (s *Server) addEvent(
+ c context.T, rl relay.I, ev *event.E,
+ hr *http.Request, origin string,
+ authedPubkey []byte,
+) (accepted bool, message []byte) {
+
+ if ev == nil {
+ return false, normalize.Invalid.F("empty event")
+ }
+ // sto := rl.Storage()
+ // advancedSaver, _ := sto.(relay.AdvancedSaver)
+ // don't allow storing event with protected marker as per nip-70 with auth enabled.
+ // if (s.authRequired || !s.publicReadable) && ev.Tags.ContainsProtectedMarker() {
+ // if len(authedPubkey) == 0 || !bytes.Equal(ev.Pubkey, authedPubkey) {
+ // return false,
+ // []byte(fmt.Sprintf("event with relay marker tag '-' (nip-70 protected event) "+
+ // "may only be published by matching npub: %0x is not %0x",
+ // authedPubkey, ev.Pubkey))
+ // }
+ // }
+ if ev.Kind.IsEphemeral() {
+ } else {
+ // if advancedSaver != nil {
+ // advancedSaver.BeforeSave(c, ev)
+ // }
+ if saveErr := s.Publish(c, ev); saveErr != nil {
+ if errors.Is(saveErr, store.ErrDupEvent) {
+ return false, normalize.Error.F(saveErr.Error())
+ }
+ errmsg := saveErr.Error()
+ if socketapi.NIP20prefixmatcher.MatchString(errmsg) {
+ if strings.Contains(errmsg, "tombstone") {
+ return false, normalize.Blocked.F("event was deleted, not storing it again")
+ }
+ if strings.HasPrefix(errmsg, string(normalize.Blocked)) {
+ return false, []byte(errmsg)
+ }
+ return false, normalize.Error.F(errmsg)
+ } else {
+ return false, normalize.Error.F("failed to save (%s)", errmsg)
+ }
+ }
+ log.I.F(
+ "event id %0x stored ephemeral: %s", ev.Id, ev.Kind.IsEphemeral(),
+ )
+ // if advancedSaver != nil {
+ // advancedSaver.AfterSave(ev)
+ // }
+ }
+ // var authRequired bool
+ // if ar, ok := rl.(relay.Authenticator); ok {
+ // authRequired = ar.AuthRequired()
+ // }
+ // notify subscribers
+ s.listeners.Deliver(ev)
+ accepted = true
+ log.I.S(ev)
+ return
+}
diff --git a/realy/auth.go b/realy/auth.go
new file mode 100644
index 0000000..ce72ae3
--- /dev/null
+++ b/realy/auth.go
@@ -0,0 +1,32 @@
+package realy
+
+//func (s *Server) adminAuth(r *http.Request,
+// tolerance ...time.Duration) (authed bool, pubkey []byte) {
+// var valid bool
+// var err error
+// var tolerate time.Duration
+// if len(tolerance) > 0 {
+// tolerate = tolerance[0]
+// }
+// if valid, pubkey, err = httpauth.CheckAuth(r, tolerate); chk.E(err) {
+// return
+// }
+// if !valid {
+// return
+// }
+// // check admins pubkey list
+// for _, v := range s.admins {
+// if bytes.Equal(v.Pub(), pubkey) {
+// authed = true
+// return
+// }
+// }
+// return
+//}
+
+//func (s *Server) unauthorized(w http.ResponseWriter, r *http.Request) {
+// w.Header().Set("WWW-Authenticate", `Basic realm="restricted", charset="UTF-8"`)
+// http.Error(w, "Unauthorized", http.StatusUnauthorized)
+// fmt.Fprintf(w,
+// "not authorized, either you did not provide an auth token or what you provided does not grant access\n")
+//}
diff --git a/realy/config/config.go b/realy/config/config.go
new file mode 100644
index 0000000..89e174e
--- /dev/null
+++ b/realy/config/config.go
@@ -0,0 +1,217 @@
+// Package config provides a go-simpler.org/env configuration table and helpers
+// for working with the list of key/value lists stored in .env files.
+package config
+
+import (
+ "fmt"
+ "io"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "orly.dev/version"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/adrg/xdg"
+ "go-simpler.org/env"
+
+ "orly.dev/apputil"
+ env2 "orly.dev/env"
+)
+
+// C is the configuration for realy relay. These are read from the environment if present, or if
+// a .env file is found in ~/.config/realy/ that is read instead and overrides anything else.
+type C struct {
+ AppName string `env:"ORLY_APP_NAME" default:"realy"`
+ Config string `env:"ORLY_CONFIG_DIR" usage:"location for configuration file, which has the name '.env' to make it harder to delete, and is a standard environment KEY=value... style"`
+ State string `env:"ORLY_STATE_DATA_DIR" usage:"storage location for state data affected by dynamic interactive interfaces"`
+ DataDir string `env:"ORLY_DATA_DIR" usage:"storage location for the ratel event store"`
+ Listen string `env:"ORLY_LISTEN" default:"0.0.0.0" usage:"network listen address"`
+ Port int `env:"ORLY_PORT" default:"3334" usage:"port to listen on"`
+ // AdminNpubs string `env:"ORLY_ADMIN_NPUBS" usage:"comma separated lists of hex or bech32 format pubkeys of authorised administrators for the http admin endpoints"`
+ LogLevel string `env:"ORLY_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
+ DbLogLevel string `env:"ORLY_DB_LOG_LEVEL" default:"info" usage:"debug level: fatal error warn info debug trace"`
+ // AuthRequired bool `env:"ORLY_AUTH_REQUIRED" default:"false" usage:"requires auth for all access"`
+ // PublicReadable bool `env:"ORLY_PUBLIC_READABLE" default:"true" usage:"allows all read access, overriding read access limit from ORLY_AUTH_REQUIRED"`
+ // Owners []string `env:"ORLY_OWNERS" usage:"comma separated list of npubs of users in hex or bech32 format whose follow and mute list dictate accepting requests and events with AUTH_REQUIRED enabled - follows and follows follows are allowed to read/write, owners mutes events are rejected"`
+ // DBSizeLimit int `env:"ORLY_DB_SIZE_LIMIT" default:"0" usage:"the number of gigabytes (1,000,000,000 bytes) we want to keep the data store from exceeding, 0 means disabled"`
+ // DBLowWater int `env:"ORLY_DB_LOW_WATER" default:"60" usage:"the percentage of DBSizeLimit a GC run will reduce the used storage down to"`
+ // DBHighWater int `env:"ORLY_DB_HIGH_WATER" default:"80" usage:"the trigger point at which a GC run should start if exceeded"`
+ // GCFrequency int `env:"ORLY_GC_FREQUENCY" default:"3600" usage:"the frequency of checks of the current utilisation in minutes"`
+ Pprof bool `env:"ORLY_PPROF" default:"false" usage:"enable pprof on 127.0.0.1:6060"`
+ // MemLimit int `env:"ORLY_MEMLIMIT" default:"250000000" usage:"set memory limit, default is 250Mb"`
+ // UseCompact bool `env:"ORLY_USE_COMPACT" default:"false" usage:"use the compact database encoding for the ratel event store"`
+ // Compression string `env:"ORLY_COMPRESSION" default:"none" usage:"compress the database, [none|snappy|zstd]"`
+ // NWC st `env:"NWC" usage:"NWC connection string for relay to interact with an NWC enabled wallet"` // todo
+}
+
+// New creates a new config.C.
+func New() (cfg *C, err error) {
+ cfg = &C{}
+ if err = env.Load(cfg, &env.Options{SliceSep: ","}); chk.T(err) {
+ return
+ }
+ if cfg.Config == "" {
+ cfg.Config = filepath.Join(xdg.ConfigHome, cfg.AppName)
+ }
+ if cfg.State == "" {
+ cfg.State = filepath.Join(xdg.StateHome, cfg.AppName)
+ }
+ if cfg.DataDir == "" {
+ cfg.DataDir = filepath.Join(xdg.DataHome, cfg.AppName)
+ }
+ envPath := filepath.Join(cfg.Config, ".env")
+ if apputil.FileExists(envPath) {
+ log.I.F("loading config from %s", envPath)
+ var e env2.Env
+ if e, err = env2.GetEnv(envPath); chk.T(err) {
+ return
+ }
+ if err = env.Load(
+ cfg, &env.Options{SliceSep: ",", Source: e},
+ ); chk.E(err) {
+ return
+ }
+ // var owners []string
+ // // remove empties if any
+ // for _, o := range cfg.Owners {
+ // if len(o) > 0 {
+ // owners = append(owners, o)
+ // }
+ // }
+ // cfg.Owners = owners
+ }
+ return
+}
+
+// HelpRequested returns true if any of the common types of help invocation are
+// found as the first command line parameter/flag.
+func HelpRequested() (help bool) {
+ if len(os.Args) > 1 {
+ switch strings.ToLower(os.Args[1]) {
+ case "help", "-h", "--h", "-help", "--help", "?":
+ help = true
+ }
+ }
+ return
+}
+
+// GetEnv processes os.Args to detect a request for printing the current settings as a list of
+// environment variable key/values.
+func GetEnv() (requested bool) {
+ if len(os.Args) > 1 {
+ switch strings.ToLower(os.Args[1]) {
+ case "env":
+ requested = true
+ }
+ }
+ return
+}
+
+// KV is a key/value pair.
+type KV struct{ Key, Value string }
+
+// KVSlice is a collection of key/value pairs.
+type KVSlice []KV
+
+func (kv KVSlice) Len() int { return len(kv) }
+func (kv KVSlice) Less(i, j int) bool { return kv[i].Key < kv[j].Key }
+func (kv KVSlice) Swap(i, j int) { kv[i], kv[j] = kv[j], kv[i] }
+
+// Composit merges two KVSlice together, replacing the values of earlier keys with same named
+// KV items later in the slice (enabling compositing two together as a .env, as well as them
+// being composed as structs.
+func (kv KVSlice) Composit(kv2 KVSlice) (out KVSlice) {
+ // duplicate the initial KVSlice
+ for _, p := range kv {
+ out = append(out, p)
+ }
+out:
+ for i, p := range kv2 {
+ for j, q := range out {
+ // if the key is repeated, replace the value
+ if p.Key == q.Key {
+ out[j].Value = kv2[i].Value
+ continue out
+ }
+ }
+ out = append(out, p)
+ }
+ return
+}
+
+// EnvKV turns a struct with `env` keys (used with go-simpler/env) into a standard formatted
+// environment variable key/value pair list, one per line. Note you must dereference a pointer
+// type to use this. This allows the composition of the config in this file with an extended
+// form with a customized variant of realy to produce correct environment variables both read
+// and write.
+func EnvKV(cfg any) (m KVSlice) {
+ t := reflect.TypeOf(cfg)
+ for i := 0; i < t.NumField(); i++ {
+ k := t.Field(i).Tag.Get("env")
+ v := reflect.ValueOf(cfg).Field(i).Interface()
+ var val string
+ switch v.(type) {
+ case string:
+ val = v.(string)
+ case int, bool, time.Duration:
+ val = fmt.Sprint(v)
+ case []string:
+ arr := v.([]string)
+ if len(arr) > 0 {
+ val = strings.Join(arr, ",")
+ }
+ }
+ // this can happen with embedded structs
+ if k == "" {
+ continue
+ }
+ m = append(m, KV{k, val})
+ }
+ return
+}
+
+// PrintEnv renders the key/values of a config.C to a provided io.Writer.
+func PrintEnv(cfg *C, printer io.Writer) {
+ kvs := EnvKV(*cfg)
+ sort.Sort(kvs)
+ for _, v := range kvs {
+ _, _ = fmt.Fprintf(printer, "%s=%s\n", v.Key, v.Value)
+ }
+}
+
+// PrintHelp outputs a help text listing the configuration options and default
+// values to a provided io.Writer (usually os.Stderr or os.Stdout).
+func PrintHelp(cfg *C, printer io.Writer) {
+ _, _ = fmt.Fprintf(
+ printer,
+ "%s %s\n\n", cfg.AppName, version.V,
+ )
+
+ _, _ = fmt.Fprintf(
+ printer,
+ "Environment variables that configure %s:\n\n", cfg.AppName,
+ )
+
+ env.Usage(cfg, printer, &env.Options{SliceSep: ","})
+ _, _ = fmt.Fprintf(
+ printer,
+ "\nCLI parameter 'help' also prints this information\n"+
+ "\n.env file found at the path %s will be automatically "+
+ "loaded for configuration.\nset these two variables for a custom load path,"+
+ " this file will be created on first startup.\nenvironment overrides it and "+
+ "you can also edit the file to set configuration options\n\n"+
+ "use the parameter 'env' to print out the current configuration to the terminal\n\n"+
+ "set the environment using\n\n\t%s env > %s/.env\n", os.Args[0],
+ cfg.Config,
+ cfg.Config,
+ )
+
+ fmt.Fprintf(printer, "\ncurrent configuration:\n\n")
+ PrintEnv(cfg, printer)
+ fmt.Fprintln(printer)
+ return
+}
diff --git a/realy/disconnect.go b/realy/disconnect.go
new file mode 100644
index 0000000..3474180
--- /dev/null
+++ b/realy/disconnect.go
@@ -0,0 +1,10 @@
+package realy
+
+import "orly.dev/log"
+
+func (s *Server) disconnect() {
+ for client := range s.clients {
+ log.I.F("closing client %s", client.RemoteAddr())
+ client.Close()
+ }
+}
diff --git a/realy/doc.go b/realy/doc.go
new file mode 100644
index 0000000..626ac42
--- /dev/null
+++ b/realy/doc.go
@@ -0,0 +1,3 @@
+// Package realy implements a nostr relay including the new HTTP API built with
+// huma.
+package realy
diff --git a/realy/handleRelayinfo.go b/realy/handleRelayinfo.go
new file mode 100644
index 0000000..ea01f98
--- /dev/null
+++ b/realy/handleRelayinfo.go
@@ -0,0 +1,63 @@
+package realy
+
+import (
+ "encoding/json"
+ "net/http"
+ "orly.dev/chk"
+ "orly.dev/log"
+ "orly.dev/version"
+ "sort"
+
+ "orly.dev/relay"
+ "orly.dev/relayinfo"
+)
+
+func (s *Server) handleRelayInfo(w http.ResponseWriter, r *http.Request) {
+ r.Header.Set("Content-Type", "application/json")
+ log.I.Ln("handling relay information document")
+ var info *relayinfo.T
+ if informationer, ok := s.relay.(relay.Informationer); ok {
+ info = informationer.GetNIP11InformationDocument()
+ } else {
+ supportedNIPs := relayinfo.GetList(
+ relayinfo.BasicProtocol,
+ relayinfo.EncryptedDirectMessage,
+ relayinfo.EventDeletion,
+ relayinfo.RelayInformationDocument,
+ relayinfo.GenericTagQueries,
+ relayinfo.NostrMarketplace,
+ relayinfo.EventTreatment,
+ relayinfo.CommandResults,
+ relayinfo.ParameterizedReplaceableEvents,
+ relayinfo.ExpirationTimestamp,
+ relayinfo.ProtectedEvents,
+ relayinfo.RelayListMetadata,
+ )
+ // var auther relay.Authenticator
+ // if auther, ok = s.relay.(relay.Authenticator); ok && auther.ServiceUrl(r) != "" {
+ // supportedNIPs = append(supportedNIPs, relayinfo.Authentication.N())
+ // }
+ // var storage store.I
+ // if storage = s.relay.Storage(); storage != nil {
+ // if _, ok = storage.(relay.EventCounter); ok {
+ // supportedNIPs = append(supportedNIPs, relayinfo.CountingResults.N())
+ // }
+ // }
+ sort.Sort(supportedNIPs)
+ log.T.Ln("supported NIPs", supportedNIPs)
+ info = &relayinfo.T{
+ Name: s.relay.Name(),
+ Description: version.Description,
+ Nips: supportedNIPs, Software: version.URL,
+ Version: version.V,
+ Limitation: relayinfo.Limits{
+ // MaxLimit: s.maxLimit,
+ // AuthRequired: s.authRequired,
+ // RestrictedWrites: !s.publicReadable || s.authRequired || len(s.owners) > 0,
+ },
+ Icon: "https://cdn.satellite.earth/ac9778868fbf23b63c47c769a74e163377e6ea94d3f0f31711931663d035c4f6.png",
+ }
+ }
+ if err := json.NewEncoder(w).Encode(info); chk.E(err) {
+ }
+}
diff --git a/realy/handleWebsocket.go b/realy/handleWebsocket.go
new file mode 100644
index 0000000..9248011
--- /dev/null
+++ b/realy/handleWebsocket.go
@@ -0,0 +1,13 @@
+package realy
+
+import (
+ "net/http"
+
+ "orly.dev/socketapi"
+)
+
+func (s *Server) handleWebsocket(w http.ResponseWriter, r *http.Request) {
+ a := &socketapi.A{Server: s} // ClientsMu: &s.clientsMu, Clients: s.clients,
+
+ a.Serve(w, r, s)
+}
diff --git a/realy/helpers/helpers.go b/realy/helpers/helpers.go
new file mode 100644
index 0000000..21afb55
--- /dev/null
+++ b/realy/helpers/helpers.go
@@ -0,0 +1,34 @@
+package helpers
+
+import (
+ "net/http"
+ "strings"
+)
+
+func GenerateDescription(text string, scopes []string) string {
+ if len(scopes) == 0 {
+ return text
+ }
+ result := make([]string, 0)
+ for _, value := range scopes {
+ result = append(result, "`"+value+"`")
+ }
+ return text + "